1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Event char devices, giving access to raw input device events.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 1999-2002 Vojtech Pavlik
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define EVDEV_MINOR_BASE 64
11*4882a593Smuzhiyun #define EVDEV_MINORS 32
12*4882a593Smuzhiyun #define EVDEV_MIN_BUFFER_SIZE 64U
13*4882a593Smuzhiyun #define EVDEV_BUF_PACKETS 8
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/poll.h>
16*4882a593Smuzhiyun #include <linux/sched.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/vmalloc.h>
19*4882a593Smuzhiyun #include <linux/mm.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/input/mt.h>
23*4882a593Smuzhiyun #include <linux/major.h>
24*4882a593Smuzhiyun #include <linux/device.h>
25*4882a593Smuzhiyun #include <linux/cdev.h>
26*4882a593Smuzhiyun #include "input-compat.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct evdev {
29*4882a593Smuzhiyun int open;
30*4882a593Smuzhiyun struct input_handle handle;
31*4882a593Smuzhiyun struct evdev_client __rcu *grab;
32*4882a593Smuzhiyun struct list_head client_list;
33*4882a593Smuzhiyun spinlock_t client_lock; /* protects client_list */
34*4882a593Smuzhiyun struct mutex mutex;
35*4882a593Smuzhiyun struct device dev;
36*4882a593Smuzhiyun struct cdev cdev;
37*4882a593Smuzhiyun bool exist;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct evdev_client {
41*4882a593Smuzhiyun unsigned int head;
42*4882a593Smuzhiyun unsigned int tail;
43*4882a593Smuzhiyun unsigned int packet_head; /* [future] position of the first element of next packet */
44*4882a593Smuzhiyun spinlock_t buffer_lock; /* protects access to buffer, head and tail */
45*4882a593Smuzhiyun wait_queue_head_t wait;
46*4882a593Smuzhiyun struct fasync_struct *fasync;
47*4882a593Smuzhiyun struct evdev *evdev;
48*4882a593Smuzhiyun struct list_head node;
49*4882a593Smuzhiyun enum input_clock_type clk_type;
50*4882a593Smuzhiyun bool revoked;
51*4882a593Smuzhiyun unsigned long *evmasks[EV_CNT];
52*4882a593Smuzhiyun unsigned int bufsize;
53*4882a593Smuzhiyun struct input_event buffer[];
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
evdev_get_mask_cnt(unsigned int type)56*4882a593Smuzhiyun static size_t evdev_get_mask_cnt(unsigned int type)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun static const size_t counts[EV_CNT] = {
59*4882a593Smuzhiyun /* EV_SYN==0 is EV_CNT, _not_ SYN_CNT, see EVIOCGBIT */
60*4882a593Smuzhiyun [EV_SYN] = EV_CNT,
61*4882a593Smuzhiyun [EV_KEY] = KEY_CNT,
62*4882a593Smuzhiyun [EV_REL] = REL_CNT,
63*4882a593Smuzhiyun [EV_ABS] = ABS_CNT,
64*4882a593Smuzhiyun [EV_MSC] = MSC_CNT,
65*4882a593Smuzhiyun [EV_SW] = SW_CNT,
66*4882a593Smuzhiyun [EV_LED] = LED_CNT,
67*4882a593Smuzhiyun [EV_SND] = SND_CNT,
68*4882a593Smuzhiyun [EV_FF] = FF_CNT,
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return (type < EV_CNT) ? counts[type] : 0;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* requires the buffer lock to be held */
__evdev_is_filtered(struct evdev_client * client,unsigned int type,unsigned int code)75*4882a593Smuzhiyun static bool __evdev_is_filtered(struct evdev_client *client,
76*4882a593Smuzhiyun unsigned int type,
77*4882a593Smuzhiyun unsigned int code)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun unsigned long *mask;
80*4882a593Smuzhiyun size_t cnt;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* EV_SYN and unknown codes are never filtered */
83*4882a593Smuzhiyun if (type == EV_SYN || type >= EV_CNT)
84*4882a593Smuzhiyun return false;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* first test whether the type is filtered */
87*4882a593Smuzhiyun mask = client->evmasks[0];
88*4882a593Smuzhiyun if (mask && !test_bit(type, mask))
89*4882a593Smuzhiyun return true;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* unknown values are never filtered */
92*4882a593Smuzhiyun cnt = evdev_get_mask_cnt(type);
93*4882a593Smuzhiyun if (!cnt || code >= cnt)
94*4882a593Smuzhiyun return false;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun mask = client->evmasks[type];
97*4882a593Smuzhiyun return mask && !test_bit(code, mask);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* flush queued events of type @type, caller must hold client->buffer_lock */
__evdev_flush_queue(struct evdev_client * client,unsigned int type)101*4882a593Smuzhiyun static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun unsigned int i, head, num;
104*4882a593Smuzhiyun unsigned int mask = client->bufsize - 1;
105*4882a593Smuzhiyun bool is_report;
106*4882a593Smuzhiyun struct input_event *ev;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun BUG_ON(type == EV_SYN);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun head = client->tail;
111*4882a593Smuzhiyun client->packet_head = client->tail;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* init to 1 so a leading SYN_REPORT will not be dropped */
114*4882a593Smuzhiyun num = 1;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun for (i = client->tail; i != client->head; i = (i + 1) & mask) {
117*4882a593Smuzhiyun ev = &client->buffer[i];
118*4882a593Smuzhiyun is_report = ev->type == EV_SYN && ev->code == SYN_REPORT;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (ev->type == type) {
121*4882a593Smuzhiyun /* drop matched entry */
122*4882a593Smuzhiyun continue;
123*4882a593Smuzhiyun } else if (is_report && !num) {
124*4882a593Smuzhiyun /* drop empty SYN_REPORT groups */
125*4882a593Smuzhiyun continue;
126*4882a593Smuzhiyun } else if (head != i) {
127*4882a593Smuzhiyun /* move entry to fill the gap */
128*4882a593Smuzhiyun client->buffer[head] = *ev;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun num++;
132*4882a593Smuzhiyun head = (head + 1) & mask;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (is_report) {
135*4882a593Smuzhiyun num = 0;
136*4882a593Smuzhiyun client->packet_head = head;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun client->head = head;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
__evdev_queue_syn_dropped(struct evdev_client * client)143*4882a593Smuzhiyun static void __evdev_queue_syn_dropped(struct evdev_client *client)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun ktime_t *ev_time = input_get_timestamp(client->evdev->handle.dev);
146*4882a593Smuzhiyun struct timespec64 ts = ktime_to_timespec64(ev_time[client->clk_type]);
147*4882a593Smuzhiyun struct input_event ev;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun ev.input_event_sec = ts.tv_sec;
150*4882a593Smuzhiyun ev.input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
151*4882a593Smuzhiyun ev.type = EV_SYN;
152*4882a593Smuzhiyun ev.code = SYN_DROPPED;
153*4882a593Smuzhiyun ev.value = 0;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun client->buffer[client->head++] = ev;
156*4882a593Smuzhiyun client->head &= client->bufsize - 1;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (unlikely(client->head == client->tail)) {
159*4882a593Smuzhiyun /* drop queue but keep our SYN_DROPPED event */
160*4882a593Smuzhiyun client->tail = (client->head - 1) & (client->bufsize - 1);
161*4882a593Smuzhiyun client->packet_head = client->tail;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
evdev_queue_syn_dropped(struct evdev_client * client)165*4882a593Smuzhiyun static void evdev_queue_syn_dropped(struct evdev_client *client)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun unsigned long flags;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun spin_lock_irqsave(&client->buffer_lock, flags);
170*4882a593Smuzhiyun __evdev_queue_syn_dropped(client);
171*4882a593Smuzhiyun spin_unlock_irqrestore(&client->buffer_lock, flags);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
evdev_set_clk_type(struct evdev_client * client,unsigned int clkid)174*4882a593Smuzhiyun static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun unsigned long flags;
177*4882a593Smuzhiyun enum input_clock_type clk_type;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun switch (clkid) {
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun case CLOCK_REALTIME:
182*4882a593Smuzhiyun clk_type = INPUT_CLK_REAL;
183*4882a593Smuzhiyun break;
184*4882a593Smuzhiyun case CLOCK_MONOTONIC:
185*4882a593Smuzhiyun clk_type = INPUT_CLK_MONO;
186*4882a593Smuzhiyun break;
187*4882a593Smuzhiyun case CLOCK_BOOTTIME:
188*4882a593Smuzhiyun clk_type = INPUT_CLK_BOOT;
189*4882a593Smuzhiyun break;
190*4882a593Smuzhiyun default:
191*4882a593Smuzhiyun return -EINVAL;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (client->clk_type != clk_type) {
195*4882a593Smuzhiyun client->clk_type = clk_type;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * Flush pending events and queue SYN_DROPPED event,
199*4882a593Smuzhiyun * but only if the queue is not empty.
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun spin_lock_irqsave(&client->buffer_lock, flags);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun if (client->head != client->tail) {
204*4882a593Smuzhiyun client->packet_head = client->head = client->tail;
205*4882a593Smuzhiyun __evdev_queue_syn_dropped(client);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun spin_unlock_irqrestore(&client->buffer_lock, flags);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return 0;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
__pass_event(struct evdev_client * client,const struct input_event * event)214*4882a593Smuzhiyun static void __pass_event(struct evdev_client *client,
215*4882a593Smuzhiyun const struct input_event *event)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun client->buffer[client->head++] = *event;
218*4882a593Smuzhiyun client->head &= client->bufsize - 1;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (unlikely(client->head == client->tail)) {
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * This effectively "drops" all unconsumed events, leaving
223*4882a593Smuzhiyun * EV_SYN/SYN_DROPPED plus the newest event in the queue.
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun client->tail = (client->head - 2) & (client->bufsize - 1);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun client->buffer[client->tail] = (struct input_event) {
228*4882a593Smuzhiyun .input_event_sec = event->input_event_sec,
229*4882a593Smuzhiyun .input_event_usec = event->input_event_usec,
230*4882a593Smuzhiyun .type = EV_SYN,
231*4882a593Smuzhiyun .code = SYN_DROPPED,
232*4882a593Smuzhiyun .value = 0,
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun client->packet_head = client->tail;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (event->type == EV_SYN && event->code == SYN_REPORT) {
239*4882a593Smuzhiyun client->packet_head = client->head;
240*4882a593Smuzhiyun kill_fasync(&client->fasync, SIGIO, POLL_IN);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
evdev_pass_values(struct evdev_client * client,const struct input_value * vals,unsigned int count,ktime_t * ev_time)244*4882a593Smuzhiyun static void evdev_pass_values(struct evdev_client *client,
245*4882a593Smuzhiyun const struct input_value *vals, unsigned int count,
246*4882a593Smuzhiyun ktime_t *ev_time)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun const struct input_value *v;
249*4882a593Smuzhiyun struct input_event event;
250*4882a593Smuzhiyun struct timespec64 ts;
251*4882a593Smuzhiyun bool wakeup = false;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (client->revoked)
254*4882a593Smuzhiyun return;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun ts = ktime_to_timespec64(ev_time[client->clk_type]);
257*4882a593Smuzhiyun event.input_event_sec = ts.tv_sec;
258*4882a593Smuzhiyun event.input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* Interrupts are disabled, just acquire the lock. */
261*4882a593Smuzhiyun spin_lock(&client->buffer_lock);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun for (v = vals; v != vals + count; v++) {
264*4882a593Smuzhiyun if (__evdev_is_filtered(client, v->type, v->code))
265*4882a593Smuzhiyun continue;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (v->type == EV_SYN && v->code == SYN_REPORT) {
268*4882a593Smuzhiyun /* drop empty SYN_REPORT */
269*4882a593Smuzhiyun if (client->packet_head == client->head)
270*4882a593Smuzhiyun continue;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun wakeup = true;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun event.type = v->type;
276*4882a593Smuzhiyun event.code = v->code;
277*4882a593Smuzhiyun event.value = v->value;
278*4882a593Smuzhiyun __pass_event(client, &event);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun spin_unlock(&client->buffer_lock);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun if (wakeup)
284*4882a593Smuzhiyun wake_up_interruptible_poll(&client->wait,
285*4882a593Smuzhiyun EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun * Pass incoming events to all connected clients.
290*4882a593Smuzhiyun */
evdev_events(struct input_handle * handle,const struct input_value * vals,unsigned int count)291*4882a593Smuzhiyun static void evdev_events(struct input_handle *handle,
292*4882a593Smuzhiyun const struct input_value *vals, unsigned int count)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct evdev *evdev = handle->private;
295*4882a593Smuzhiyun struct evdev_client *client;
296*4882a593Smuzhiyun ktime_t *ev_time = input_get_timestamp(handle->dev);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun rcu_read_lock();
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun client = rcu_dereference(evdev->grab);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (client)
303*4882a593Smuzhiyun evdev_pass_values(client, vals, count, ev_time);
304*4882a593Smuzhiyun else
305*4882a593Smuzhiyun list_for_each_entry_rcu(client, &evdev->client_list, node)
306*4882a593Smuzhiyun evdev_pass_values(client, vals, count, ev_time);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun rcu_read_unlock();
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Pass incoming event to all connected clients.
313*4882a593Smuzhiyun */
evdev_event(struct input_handle * handle,unsigned int type,unsigned int code,int value)314*4882a593Smuzhiyun static void evdev_event(struct input_handle *handle,
315*4882a593Smuzhiyun unsigned int type, unsigned int code, int value)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun struct input_value vals[] = { { type, code, value } };
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun evdev_events(handle, vals, 1);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
evdev_fasync(int fd,struct file * file,int on)322*4882a593Smuzhiyun static int evdev_fasync(int fd, struct file *file, int on)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct evdev_client *client = file->private_data;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun return fasync_helper(fd, file, on, &client->fasync);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
evdev_free(struct device * dev)329*4882a593Smuzhiyun static void evdev_free(struct device *dev)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct evdev *evdev = container_of(dev, struct evdev, dev);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun input_put_device(evdev->handle.dev);
334*4882a593Smuzhiyun kfree(evdev);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /*
338*4882a593Smuzhiyun * Grabs an event device (along with underlying input device).
339*4882a593Smuzhiyun * This function is called with evdev->mutex taken.
340*4882a593Smuzhiyun */
evdev_grab(struct evdev * evdev,struct evdev_client * client)341*4882a593Smuzhiyun static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun int error;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (evdev->grab)
346*4882a593Smuzhiyun return -EBUSY;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun error = input_grab_device(&evdev->handle);
349*4882a593Smuzhiyun if (error)
350*4882a593Smuzhiyun return error;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun rcu_assign_pointer(evdev->grab, client);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
evdev_ungrab(struct evdev * evdev,struct evdev_client * client)357*4882a593Smuzhiyun static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct evdev_client *grab = rcu_dereference_protected(evdev->grab,
360*4882a593Smuzhiyun lockdep_is_held(&evdev->mutex));
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (grab != client)
363*4882a593Smuzhiyun return -EINVAL;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun rcu_assign_pointer(evdev->grab, NULL);
366*4882a593Smuzhiyun synchronize_rcu();
367*4882a593Smuzhiyun input_release_device(&evdev->handle);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun return 0;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
evdev_attach_client(struct evdev * evdev,struct evdev_client * client)372*4882a593Smuzhiyun static void evdev_attach_client(struct evdev *evdev,
373*4882a593Smuzhiyun struct evdev_client *client)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun spin_lock(&evdev->client_lock);
376*4882a593Smuzhiyun list_add_tail_rcu(&client->node, &evdev->client_list);
377*4882a593Smuzhiyun spin_unlock(&evdev->client_lock);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
evdev_detach_client(struct evdev * evdev,struct evdev_client * client)380*4882a593Smuzhiyun static void evdev_detach_client(struct evdev *evdev,
381*4882a593Smuzhiyun struct evdev_client *client)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun spin_lock(&evdev->client_lock);
384*4882a593Smuzhiyun list_del_rcu(&client->node);
385*4882a593Smuzhiyun spin_unlock(&evdev->client_lock);
386*4882a593Smuzhiyun synchronize_rcu();
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
evdev_open_device(struct evdev * evdev)389*4882a593Smuzhiyun static int evdev_open_device(struct evdev *evdev)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun int retval;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun retval = mutex_lock_interruptible(&evdev->mutex);
394*4882a593Smuzhiyun if (retval)
395*4882a593Smuzhiyun return retval;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (!evdev->exist)
398*4882a593Smuzhiyun retval = -ENODEV;
399*4882a593Smuzhiyun else if (!evdev->open++) {
400*4882a593Smuzhiyun retval = input_open_device(&evdev->handle);
401*4882a593Smuzhiyun if (retval)
402*4882a593Smuzhiyun evdev->open--;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun mutex_unlock(&evdev->mutex);
406*4882a593Smuzhiyun return retval;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
evdev_close_device(struct evdev * evdev)409*4882a593Smuzhiyun static void evdev_close_device(struct evdev *evdev)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun mutex_lock(&evdev->mutex);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (evdev->exist && !--evdev->open)
414*4882a593Smuzhiyun input_close_device(&evdev->handle);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun mutex_unlock(&evdev->mutex);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * Wake up users waiting for IO so they can disconnect from
421*4882a593Smuzhiyun * dead device.
422*4882a593Smuzhiyun */
evdev_hangup(struct evdev * evdev)423*4882a593Smuzhiyun static void evdev_hangup(struct evdev *evdev)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun struct evdev_client *client;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun spin_lock(&evdev->client_lock);
428*4882a593Smuzhiyun list_for_each_entry(client, &evdev->client_list, node) {
429*4882a593Smuzhiyun kill_fasync(&client->fasync, SIGIO, POLL_HUP);
430*4882a593Smuzhiyun wake_up_interruptible_poll(&client->wait, EPOLLHUP | EPOLLERR);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun spin_unlock(&evdev->client_lock);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
evdev_release(struct inode * inode,struct file * file)435*4882a593Smuzhiyun static int evdev_release(struct inode *inode, struct file *file)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun struct evdev_client *client = file->private_data;
438*4882a593Smuzhiyun struct evdev *evdev = client->evdev;
439*4882a593Smuzhiyun unsigned int i;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun mutex_lock(&evdev->mutex);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (evdev->exist && !client->revoked)
444*4882a593Smuzhiyun input_flush_device(&evdev->handle, file);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun evdev_ungrab(evdev, client);
447*4882a593Smuzhiyun mutex_unlock(&evdev->mutex);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun evdev_detach_client(evdev, client);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun for (i = 0; i < EV_CNT; ++i)
452*4882a593Smuzhiyun bitmap_free(client->evmasks[i]);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun kvfree(client);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun evdev_close_device(evdev);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun return 0;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
evdev_compute_buffer_size(struct input_dev * dev)461*4882a593Smuzhiyun static unsigned int evdev_compute_buffer_size(struct input_dev *dev)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun unsigned int n_events =
464*4882a593Smuzhiyun max(dev->hint_events_per_packet * EVDEV_BUF_PACKETS,
465*4882a593Smuzhiyun EVDEV_MIN_BUFFER_SIZE);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun return roundup_pow_of_two(n_events);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
evdev_open(struct inode * inode,struct file * file)470*4882a593Smuzhiyun static int evdev_open(struct inode *inode, struct file *file)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
473*4882a593Smuzhiyun unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
474*4882a593Smuzhiyun struct evdev_client *client;
475*4882a593Smuzhiyun int error;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
478*4882a593Smuzhiyun if (!client)
479*4882a593Smuzhiyun return -ENOMEM;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun init_waitqueue_head(&client->wait);
482*4882a593Smuzhiyun client->bufsize = bufsize;
483*4882a593Smuzhiyun spin_lock_init(&client->buffer_lock);
484*4882a593Smuzhiyun client->evdev = evdev;
485*4882a593Smuzhiyun evdev_attach_client(evdev, client);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun error = evdev_open_device(evdev);
488*4882a593Smuzhiyun if (error)
489*4882a593Smuzhiyun goto err_free_client;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun file->private_data = client;
492*4882a593Smuzhiyun stream_open(inode, file);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return 0;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun err_free_client:
497*4882a593Smuzhiyun evdev_detach_client(evdev, client);
498*4882a593Smuzhiyun kvfree(client);
499*4882a593Smuzhiyun return error;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
evdev_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)502*4882a593Smuzhiyun static ssize_t evdev_write(struct file *file, const char __user *buffer,
503*4882a593Smuzhiyun size_t count, loff_t *ppos)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun struct evdev_client *client = file->private_data;
506*4882a593Smuzhiyun struct evdev *evdev = client->evdev;
507*4882a593Smuzhiyun struct input_event event;
508*4882a593Smuzhiyun int retval = 0;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (count != 0 && count < input_event_size())
511*4882a593Smuzhiyun return -EINVAL;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun retval = mutex_lock_interruptible(&evdev->mutex);
514*4882a593Smuzhiyun if (retval)
515*4882a593Smuzhiyun return retval;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (!evdev->exist || client->revoked) {
518*4882a593Smuzhiyun retval = -ENODEV;
519*4882a593Smuzhiyun goto out;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun while (retval + input_event_size() <= count) {
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (input_event_from_user(buffer + retval, &event)) {
525*4882a593Smuzhiyun retval = -EFAULT;
526*4882a593Smuzhiyun goto out;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun retval += input_event_size();
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun input_inject_event(&evdev->handle,
531*4882a593Smuzhiyun event.type, event.code, event.value);
532*4882a593Smuzhiyun cond_resched();
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun out:
536*4882a593Smuzhiyun mutex_unlock(&evdev->mutex);
537*4882a593Smuzhiyun return retval;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
evdev_fetch_next_event(struct evdev_client * client,struct input_event * event)540*4882a593Smuzhiyun static int evdev_fetch_next_event(struct evdev_client *client,
541*4882a593Smuzhiyun struct input_event *event)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun int have_event;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun spin_lock_irq(&client->buffer_lock);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun have_event = client->packet_head != client->tail;
548*4882a593Smuzhiyun if (have_event) {
549*4882a593Smuzhiyun *event = client->buffer[client->tail++];
550*4882a593Smuzhiyun client->tail &= client->bufsize - 1;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun spin_unlock_irq(&client->buffer_lock);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun return have_event;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
evdev_read(struct file * file,char __user * buffer,size_t count,loff_t * ppos)558*4882a593Smuzhiyun static ssize_t evdev_read(struct file *file, char __user *buffer,
559*4882a593Smuzhiyun size_t count, loff_t *ppos)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct evdev_client *client = file->private_data;
562*4882a593Smuzhiyun struct evdev *evdev = client->evdev;
563*4882a593Smuzhiyun struct input_event event;
564*4882a593Smuzhiyun size_t read = 0;
565*4882a593Smuzhiyun int error;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (count != 0 && count < input_event_size())
568*4882a593Smuzhiyun return -EINVAL;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun for (;;) {
571*4882a593Smuzhiyun if (!evdev->exist || client->revoked)
572*4882a593Smuzhiyun return -ENODEV;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (client->packet_head == client->tail &&
575*4882a593Smuzhiyun (file->f_flags & O_NONBLOCK))
576*4882a593Smuzhiyun return -EAGAIN;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun /*
579*4882a593Smuzhiyun * count == 0 is special - no IO is done but we check
580*4882a593Smuzhiyun * for error conditions (see above).
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun if (count == 0)
583*4882a593Smuzhiyun break;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun while (read + input_event_size() <= count &&
586*4882a593Smuzhiyun evdev_fetch_next_event(client, &event)) {
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (input_event_to_user(buffer + read, &event))
589*4882a593Smuzhiyun return -EFAULT;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun read += input_event_size();
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (read)
595*4882a593Smuzhiyun break;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (!(file->f_flags & O_NONBLOCK)) {
598*4882a593Smuzhiyun error = wait_event_interruptible(client->wait,
599*4882a593Smuzhiyun client->packet_head != client->tail ||
600*4882a593Smuzhiyun !evdev->exist || client->revoked);
601*4882a593Smuzhiyun if (error)
602*4882a593Smuzhiyun return error;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun return read;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /* No kernel lock - fine */
evdev_poll(struct file * file,poll_table * wait)610*4882a593Smuzhiyun static __poll_t evdev_poll(struct file *file, poll_table *wait)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun struct evdev_client *client = file->private_data;
613*4882a593Smuzhiyun struct evdev *evdev = client->evdev;
614*4882a593Smuzhiyun __poll_t mask;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun poll_wait(file, &client->wait, wait);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun if (evdev->exist && !client->revoked)
619*4882a593Smuzhiyun mask = EPOLLOUT | EPOLLWRNORM;
620*4882a593Smuzhiyun else
621*4882a593Smuzhiyun mask = EPOLLHUP | EPOLLERR;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (client->packet_head != client->tail)
624*4882a593Smuzhiyun mask |= EPOLLIN | EPOLLRDNORM;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun return mask;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun #define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8)
632*4882a593Smuzhiyun #define BITS_TO_LONGS_COMPAT(x) ((((x) - 1) / BITS_PER_LONG_COMPAT) + 1)
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
bits_to_user(unsigned long * bits,unsigned int maxbit,unsigned int maxlen,void __user * p,int compat)635*4882a593Smuzhiyun static int bits_to_user(unsigned long *bits, unsigned int maxbit,
636*4882a593Smuzhiyun unsigned int maxlen, void __user *p, int compat)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun int len, i;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (compat) {
641*4882a593Smuzhiyun len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t);
642*4882a593Smuzhiyun if (len > maxlen)
643*4882a593Smuzhiyun len = maxlen;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun for (i = 0; i < len / sizeof(compat_long_t); i++)
646*4882a593Smuzhiyun if (copy_to_user((compat_long_t __user *) p + i,
647*4882a593Smuzhiyun (compat_long_t *) bits +
648*4882a593Smuzhiyun i + 1 - ((i % 2) << 1),
649*4882a593Smuzhiyun sizeof(compat_long_t)))
650*4882a593Smuzhiyun return -EFAULT;
651*4882a593Smuzhiyun } else {
652*4882a593Smuzhiyun len = BITS_TO_LONGS(maxbit) * sizeof(long);
653*4882a593Smuzhiyun if (len > maxlen)
654*4882a593Smuzhiyun len = maxlen;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (copy_to_user(p, bits, len))
657*4882a593Smuzhiyun return -EFAULT;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun return len;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
bits_from_user(unsigned long * bits,unsigned int maxbit,unsigned int maxlen,const void __user * p,int compat)663*4882a593Smuzhiyun static int bits_from_user(unsigned long *bits, unsigned int maxbit,
664*4882a593Smuzhiyun unsigned int maxlen, const void __user *p, int compat)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun int len, i;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun if (compat) {
669*4882a593Smuzhiyun if (maxlen % sizeof(compat_long_t))
670*4882a593Smuzhiyun return -EINVAL;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t);
673*4882a593Smuzhiyun if (len > maxlen)
674*4882a593Smuzhiyun len = maxlen;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun for (i = 0; i < len / sizeof(compat_long_t); i++)
677*4882a593Smuzhiyun if (copy_from_user((compat_long_t *) bits +
678*4882a593Smuzhiyun i + 1 - ((i % 2) << 1),
679*4882a593Smuzhiyun (compat_long_t __user *) p + i,
680*4882a593Smuzhiyun sizeof(compat_long_t)))
681*4882a593Smuzhiyun return -EFAULT;
682*4882a593Smuzhiyun if (i % 2)
683*4882a593Smuzhiyun *((compat_long_t *) bits + i - 1) = 0;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun } else {
686*4882a593Smuzhiyun if (maxlen % sizeof(long))
687*4882a593Smuzhiyun return -EINVAL;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun len = BITS_TO_LONGS(maxbit) * sizeof(long);
690*4882a593Smuzhiyun if (len > maxlen)
691*4882a593Smuzhiyun len = maxlen;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun if (copy_from_user(bits, p, len))
694*4882a593Smuzhiyun return -EFAULT;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun return len;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun #else
701*4882a593Smuzhiyun
bits_to_user(unsigned long * bits,unsigned int maxbit,unsigned int maxlen,void __user * p,int compat)702*4882a593Smuzhiyun static int bits_to_user(unsigned long *bits, unsigned int maxbit,
703*4882a593Smuzhiyun unsigned int maxlen, void __user *p, int compat)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun int len = compat ?
706*4882a593Smuzhiyun BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t) :
707*4882a593Smuzhiyun BITS_TO_LONGS(maxbit) * sizeof(long);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (len > maxlen)
710*4882a593Smuzhiyun len = maxlen;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun return copy_to_user(p, bits, len) ? -EFAULT : len;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
bits_from_user(unsigned long * bits,unsigned int maxbit,unsigned int maxlen,const void __user * p,int compat)715*4882a593Smuzhiyun static int bits_from_user(unsigned long *bits, unsigned int maxbit,
716*4882a593Smuzhiyun unsigned int maxlen, const void __user *p, int compat)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun size_t chunk_size = compat ? sizeof(compat_long_t) : sizeof(long);
719*4882a593Smuzhiyun int len;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (maxlen % chunk_size)
722*4882a593Smuzhiyun return -EINVAL;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun len = compat ? BITS_TO_LONGS_COMPAT(maxbit) : BITS_TO_LONGS(maxbit);
725*4882a593Smuzhiyun len *= chunk_size;
726*4882a593Smuzhiyun if (len > maxlen)
727*4882a593Smuzhiyun len = maxlen;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun return copy_from_user(bits, p, len) ? -EFAULT : len;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun #endif /* __BIG_ENDIAN */
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun #else
735*4882a593Smuzhiyun
bits_to_user(unsigned long * bits,unsigned int maxbit,unsigned int maxlen,void __user * p,int compat)736*4882a593Smuzhiyun static int bits_to_user(unsigned long *bits, unsigned int maxbit,
737*4882a593Smuzhiyun unsigned int maxlen, void __user *p, int compat)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun int len = BITS_TO_LONGS(maxbit) * sizeof(long);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (len > maxlen)
742*4882a593Smuzhiyun len = maxlen;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun return copy_to_user(p, bits, len) ? -EFAULT : len;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
bits_from_user(unsigned long * bits,unsigned int maxbit,unsigned int maxlen,const void __user * p,int compat)747*4882a593Smuzhiyun static int bits_from_user(unsigned long *bits, unsigned int maxbit,
748*4882a593Smuzhiyun unsigned int maxlen, const void __user *p, int compat)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun int len;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if (maxlen % sizeof(long))
753*4882a593Smuzhiyun return -EINVAL;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun len = BITS_TO_LONGS(maxbit) * sizeof(long);
756*4882a593Smuzhiyun if (len > maxlen)
757*4882a593Smuzhiyun len = maxlen;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun return copy_from_user(bits, p, len) ? -EFAULT : len;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun #endif /* CONFIG_COMPAT */
763*4882a593Smuzhiyun
str_to_user(const char * str,unsigned int maxlen,void __user * p)764*4882a593Smuzhiyun static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun int len;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun if (!str)
769*4882a593Smuzhiyun return -ENOENT;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun len = strlen(str) + 1;
772*4882a593Smuzhiyun if (len > maxlen)
773*4882a593Smuzhiyun len = maxlen;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun return copy_to_user(p, str, len) ? -EFAULT : len;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
handle_eviocgbit(struct input_dev * dev,unsigned int type,unsigned int size,void __user * p,int compat_mode)778*4882a593Smuzhiyun static int handle_eviocgbit(struct input_dev *dev,
779*4882a593Smuzhiyun unsigned int type, unsigned int size,
780*4882a593Smuzhiyun void __user *p, int compat_mode)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun unsigned long *bits;
783*4882a593Smuzhiyun int len;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun switch (type) {
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun case 0: bits = dev->evbit; len = EV_MAX; break;
788*4882a593Smuzhiyun case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
789*4882a593Smuzhiyun case EV_REL: bits = dev->relbit; len = REL_MAX; break;
790*4882a593Smuzhiyun case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
791*4882a593Smuzhiyun case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
792*4882a593Smuzhiyun case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
793*4882a593Smuzhiyun case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
794*4882a593Smuzhiyun case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
795*4882a593Smuzhiyun case EV_SW: bits = dev->swbit; len = SW_MAX; break;
796*4882a593Smuzhiyun default: return -EINVAL;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun return bits_to_user(bits, len, size, p, compat_mode);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
evdev_handle_get_keycode(struct input_dev * dev,void __user * p)802*4882a593Smuzhiyun static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun struct input_keymap_entry ke = {
805*4882a593Smuzhiyun .len = sizeof(unsigned int),
806*4882a593Smuzhiyun .flags = 0,
807*4882a593Smuzhiyun };
808*4882a593Smuzhiyun int __user *ip = (int __user *)p;
809*4882a593Smuzhiyun int error;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun /* legacy case */
812*4882a593Smuzhiyun if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
813*4882a593Smuzhiyun return -EFAULT;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun error = input_get_keycode(dev, &ke);
816*4882a593Smuzhiyun if (error)
817*4882a593Smuzhiyun return error;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun if (put_user(ke.keycode, ip + 1))
820*4882a593Smuzhiyun return -EFAULT;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun return 0;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
evdev_handle_get_keycode_v2(struct input_dev * dev,void __user * p)825*4882a593Smuzhiyun static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun struct input_keymap_entry ke;
828*4882a593Smuzhiyun int error;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (copy_from_user(&ke, p, sizeof(ke)))
831*4882a593Smuzhiyun return -EFAULT;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun error = input_get_keycode(dev, &ke);
834*4882a593Smuzhiyun if (error)
835*4882a593Smuzhiyun return error;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (copy_to_user(p, &ke, sizeof(ke)))
838*4882a593Smuzhiyun return -EFAULT;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun return 0;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
evdev_handle_set_keycode(struct input_dev * dev,void __user * p)843*4882a593Smuzhiyun static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun struct input_keymap_entry ke = {
846*4882a593Smuzhiyun .len = sizeof(unsigned int),
847*4882a593Smuzhiyun .flags = 0,
848*4882a593Smuzhiyun };
849*4882a593Smuzhiyun int __user *ip = (int __user *)p;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
852*4882a593Smuzhiyun return -EFAULT;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (get_user(ke.keycode, ip + 1))
855*4882a593Smuzhiyun return -EFAULT;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun return input_set_keycode(dev, &ke);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
evdev_handle_set_keycode_v2(struct input_dev * dev,void __user * p)860*4882a593Smuzhiyun static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun struct input_keymap_entry ke;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun if (copy_from_user(&ke, p, sizeof(ke)))
865*4882a593Smuzhiyun return -EFAULT;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if (ke.len > sizeof(ke.scancode))
868*4882a593Smuzhiyun return -EINVAL;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun return input_set_keycode(dev, &ke);
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun /*
874*4882a593Smuzhiyun * If we transfer state to the user, we should flush all pending events
875*4882a593Smuzhiyun * of the same type from the client's queue. Otherwise, they might end up
876*4882a593Smuzhiyun * with duplicate events, which can screw up client's state tracking.
877*4882a593Smuzhiyun * If bits_to_user fails after flushing the queue, we queue a SYN_DROPPED
878*4882a593Smuzhiyun * event so user-space will notice missing events.
879*4882a593Smuzhiyun *
880*4882a593Smuzhiyun * LOCKING:
881*4882a593Smuzhiyun * We need to take event_lock before buffer_lock to avoid dead-locks. But we
882*4882a593Smuzhiyun * need the even_lock only to guarantee consistent state. We can safely release
883*4882a593Smuzhiyun * it while flushing the queue. This allows input-core to handle filters while
884*4882a593Smuzhiyun * we flush the queue.
885*4882a593Smuzhiyun */
evdev_handle_get_val(struct evdev_client * client,struct input_dev * dev,unsigned int type,unsigned long * bits,unsigned int maxbit,unsigned int maxlen,void __user * p,int compat)886*4882a593Smuzhiyun static int evdev_handle_get_val(struct evdev_client *client,
887*4882a593Smuzhiyun struct input_dev *dev, unsigned int type,
888*4882a593Smuzhiyun unsigned long *bits, unsigned int maxbit,
889*4882a593Smuzhiyun unsigned int maxlen, void __user *p,
890*4882a593Smuzhiyun int compat)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun int ret;
893*4882a593Smuzhiyun unsigned long *mem;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun mem = bitmap_alloc(maxbit, GFP_KERNEL);
896*4882a593Smuzhiyun if (!mem)
897*4882a593Smuzhiyun return -ENOMEM;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun spin_lock_irq(&dev->event_lock);
900*4882a593Smuzhiyun spin_lock(&client->buffer_lock);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun bitmap_copy(mem, bits, maxbit);
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun spin_unlock(&dev->event_lock);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun __evdev_flush_queue(client, type);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun spin_unlock_irq(&client->buffer_lock);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun ret = bits_to_user(mem, maxbit, maxlen, p, compat);
911*4882a593Smuzhiyun if (ret < 0)
912*4882a593Smuzhiyun evdev_queue_syn_dropped(client);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun bitmap_free(mem);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun return ret;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
evdev_handle_mt_request(struct input_dev * dev,unsigned int size,int __user * ip)919*4882a593Smuzhiyun static int evdev_handle_mt_request(struct input_dev *dev,
920*4882a593Smuzhiyun unsigned int size,
921*4882a593Smuzhiyun int __user *ip)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun const struct input_mt *mt = dev->mt;
924*4882a593Smuzhiyun unsigned int code;
925*4882a593Smuzhiyun int max_slots;
926*4882a593Smuzhiyun int i;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun if (get_user(code, &ip[0]))
929*4882a593Smuzhiyun return -EFAULT;
930*4882a593Smuzhiyun if (!mt || !input_is_mt_value(code))
931*4882a593Smuzhiyun return -EINVAL;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun max_slots = (size - sizeof(__u32)) / sizeof(__s32);
934*4882a593Smuzhiyun for (i = 0; i < mt->num_slots && i < max_slots; i++) {
935*4882a593Smuzhiyun int value = input_mt_get_value(&mt->slots[i], code);
936*4882a593Smuzhiyun if (put_user(value, &ip[1 + i]))
937*4882a593Smuzhiyun return -EFAULT;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun return 0;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
evdev_revoke(struct evdev * evdev,struct evdev_client * client,struct file * file)943*4882a593Smuzhiyun static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
944*4882a593Smuzhiyun struct file *file)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun client->revoked = true;
947*4882a593Smuzhiyun evdev_ungrab(evdev, client);
948*4882a593Smuzhiyun input_flush_device(&evdev->handle, file);
949*4882a593Smuzhiyun wake_up_interruptible_poll(&client->wait, EPOLLHUP | EPOLLERR);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun return 0;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun /* must be called with evdev-mutex held */
evdev_set_mask(struct evdev_client * client,unsigned int type,const void __user * codes,u32 codes_size,int compat)955*4882a593Smuzhiyun static int evdev_set_mask(struct evdev_client *client,
956*4882a593Smuzhiyun unsigned int type,
957*4882a593Smuzhiyun const void __user *codes,
958*4882a593Smuzhiyun u32 codes_size,
959*4882a593Smuzhiyun int compat)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun unsigned long flags, *mask, *oldmask;
962*4882a593Smuzhiyun size_t cnt;
963*4882a593Smuzhiyun int error;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun /* we allow unknown types and 'codes_size > size' for forward-compat */
966*4882a593Smuzhiyun cnt = evdev_get_mask_cnt(type);
967*4882a593Smuzhiyun if (!cnt)
968*4882a593Smuzhiyun return 0;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun mask = bitmap_zalloc(cnt, GFP_KERNEL);
971*4882a593Smuzhiyun if (!mask)
972*4882a593Smuzhiyun return -ENOMEM;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun error = bits_from_user(mask, cnt - 1, codes_size, codes, compat);
975*4882a593Smuzhiyun if (error < 0) {
976*4882a593Smuzhiyun bitmap_free(mask);
977*4882a593Smuzhiyun return error;
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun spin_lock_irqsave(&client->buffer_lock, flags);
981*4882a593Smuzhiyun oldmask = client->evmasks[type];
982*4882a593Smuzhiyun client->evmasks[type] = mask;
983*4882a593Smuzhiyun spin_unlock_irqrestore(&client->buffer_lock, flags);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun bitmap_free(oldmask);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun return 0;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun /* must be called with evdev-mutex held */
evdev_get_mask(struct evdev_client * client,unsigned int type,void __user * codes,u32 codes_size,int compat)991*4882a593Smuzhiyun static int evdev_get_mask(struct evdev_client *client,
992*4882a593Smuzhiyun unsigned int type,
993*4882a593Smuzhiyun void __user *codes,
994*4882a593Smuzhiyun u32 codes_size,
995*4882a593Smuzhiyun int compat)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun unsigned long *mask;
998*4882a593Smuzhiyun size_t cnt, size, xfer_size;
999*4882a593Smuzhiyun int i;
1000*4882a593Smuzhiyun int error;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun /* we allow unknown types and 'codes_size > size' for forward-compat */
1003*4882a593Smuzhiyun cnt = evdev_get_mask_cnt(type);
1004*4882a593Smuzhiyun size = sizeof(unsigned long) * BITS_TO_LONGS(cnt);
1005*4882a593Smuzhiyun xfer_size = min_t(size_t, codes_size, size);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun if (cnt > 0) {
1008*4882a593Smuzhiyun mask = client->evmasks[type];
1009*4882a593Smuzhiyun if (mask) {
1010*4882a593Smuzhiyun error = bits_to_user(mask, cnt - 1,
1011*4882a593Smuzhiyun xfer_size, codes, compat);
1012*4882a593Smuzhiyun if (error < 0)
1013*4882a593Smuzhiyun return error;
1014*4882a593Smuzhiyun } else {
1015*4882a593Smuzhiyun /* fake mask with all bits set */
1016*4882a593Smuzhiyun for (i = 0; i < xfer_size; i++)
1017*4882a593Smuzhiyun if (put_user(0xffU, (u8 __user *)codes + i))
1018*4882a593Smuzhiyun return -EFAULT;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (xfer_size < codes_size)
1023*4882a593Smuzhiyun if (clear_user(codes + xfer_size, codes_size - xfer_size))
1024*4882a593Smuzhiyun return -EFAULT;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun return 0;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
evdev_do_ioctl(struct file * file,unsigned int cmd,void __user * p,int compat_mode)1029*4882a593Smuzhiyun static long evdev_do_ioctl(struct file *file, unsigned int cmd,
1030*4882a593Smuzhiyun void __user *p, int compat_mode)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun struct evdev_client *client = file->private_data;
1033*4882a593Smuzhiyun struct evdev *evdev = client->evdev;
1034*4882a593Smuzhiyun struct input_dev *dev = evdev->handle.dev;
1035*4882a593Smuzhiyun struct input_absinfo abs;
1036*4882a593Smuzhiyun struct input_mask mask;
1037*4882a593Smuzhiyun struct ff_effect effect;
1038*4882a593Smuzhiyun int __user *ip = (int __user *)p;
1039*4882a593Smuzhiyun unsigned int i, t, u, v;
1040*4882a593Smuzhiyun unsigned int size;
1041*4882a593Smuzhiyun int error;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /* First we check for fixed-length commands */
1044*4882a593Smuzhiyun switch (cmd) {
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun case EVIOCGVERSION:
1047*4882a593Smuzhiyun return put_user(EV_VERSION, ip);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun case EVIOCGID:
1050*4882a593Smuzhiyun if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
1051*4882a593Smuzhiyun return -EFAULT;
1052*4882a593Smuzhiyun return 0;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun case EVIOCGREP:
1055*4882a593Smuzhiyun if (!test_bit(EV_REP, dev->evbit))
1056*4882a593Smuzhiyun return -ENOSYS;
1057*4882a593Smuzhiyun if (put_user(dev->rep[REP_DELAY], ip))
1058*4882a593Smuzhiyun return -EFAULT;
1059*4882a593Smuzhiyun if (put_user(dev->rep[REP_PERIOD], ip + 1))
1060*4882a593Smuzhiyun return -EFAULT;
1061*4882a593Smuzhiyun return 0;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun case EVIOCSREP:
1064*4882a593Smuzhiyun if (!test_bit(EV_REP, dev->evbit))
1065*4882a593Smuzhiyun return -ENOSYS;
1066*4882a593Smuzhiyun if (get_user(u, ip))
1067*4882a593Smuzhiyun return -EFAULT;
1068*4882a593Smuzhiyun if (get_user(v, ip + 1))
1069*4882a593Smuzhiyun return -EFAULT;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u);
1072*4882a593Smuzhiyun input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v);
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun return 0;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun case EVIOCRMFF:
1077*4882a593Smuzhiyun return input_ff_erase(dev, (int)(unsigned long) p, file);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun case EVIOCGEFFECTS:
1080*4882a593Smuzhiyun i = test_bit(EV_FF, dev->evbit) ?
1081*4882a593Smuzhiyun dev->ff->max_effects : 0;
1082*4882a593Smuzhiyun if (put_user(i, ip))
1083*4882a593Smuzhiyun return -EFAULT;
1084*4882a593Smuzhiyun return 0;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun case EVIOCGRAB:
1087*4882a593Smuzhiyun if (p)
1088*4882a593Smuzhiyun return evdev_grab(evdev, client);
1089*4882a593Smuzhiyun else
1090*4882a593Smuzhiyun return evdev_ungrab(evdev, client);
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun case EVIOCREVOKE:
1093*4882a593Smuzhiyun if (p)
1094*4882a593Smuzhiyun return -EINVAL;
1095*4882a593Smuzhiyun else
1096*4882a593Smuzhiyun return evdev_revoke(evdev, client, file);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun case EVIOCGMASK: {
1099*4882a593Smuzhiyun void __user *codes_ptr;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun if (copy_from_user(&mask, p, sizeof(mask)))
1102*4882a593Smuzhiyun return -EFAULT;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun codes_ptr = (void __user *)(unsigned long)mask.codes_ptr;
1105*4882a593Smuzhiyun return evdev_get_mask(client,
1106*4882a593Smuzhiyun mask.type, codes_ptr, mask.codes_size,
1107*4882a593Smuzhiyun compat_mode);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun case EVIOCSMASK: {
1111*4882a593Smuzhiyun const void __user *codes_ptr;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun if (copy_from_user(&mask, p, sizeof(mask)))
1114*4882a593Smuzhiyun return -EFAULT;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun codes_ptr = (const void __user *)(unsigned long)mask.codes_ptr;
1117*4882a593Smuzhiyun return evdev_set_mask(client,
1118*4882a593Smuzhiyun mask.type, codes_ptr, mask.codes_size,
1119*4882a593Smuzhiyun compat_mode);
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun case EVIOCSCLOCKID:
1123*4882a593Smuzhiyun if (copy_from_user(&i, p, sizeof(unsigned int)))
1124*4882a593Smuzhiyun return -EFAULT;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun return evdev_set_clk_type(client, i);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun case EVIOCGKEYCODE:
1129*4882a593Smuzhiyun return evdev_handle_get_keycode(dev, p);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun case EVIOCSKEYCODE:
1132*4882a593Smuzhiyun return evdev_handle_set_keycode(dev, p);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun case EVIOCGKEYCODE_V2:
1135*4882a593Smuzhiyun return evdev_handle_get_keycode_v2(dev, p);
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun case EVIOCSKEYCODE_V2:
1138*4882a593Smuzhiyun return evdev_handle_set_keycode_v2(dev, p);
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun size = _IOC_SIZE(cmd);
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun /* Now check variable-length commands */
1144*4882a593Smuzhiyun #define EVIOC_MASK_SIZE(nr) ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
1145*4882a593Smuzhiyun switch (EVIOC_MASK_SIZE(cmd)) {
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun case EVIOCGPROP(0):
1148*4882a593Smuzhiyun return bits_to_user(dev->propbit, INPUT_PROP_MAX,
1149*4882a593Smuzhiyun size, p, compat_mode);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun case EVIOCGMTSLOTS(0):
1152*4882a593Smuzhiyun return evdev_handle_mt_request(dev, size, ip);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun case EVIOCGKEY(0):
1155*4882a593Smuzhiyun return evdev_handle_get_val(client, dev, EV_KEY, dev->key,
1156*4882a593Smuzhiyun KEY_MAX, size, p, compat_mode);
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun case EVIOCGLED(0):
1159*4882a593Smuzhiyun return evdev_handle_get_val(client, dev, EV_LED, dev->led,
1160*4882a593Smuzhiyun LED_MAX, size, p, compat_mode);
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun case EVIOCGSND(0):
1163*4882a593Smuzhiyun return evdev_handle_get_val(client, dev, EV_SND, dev->snd,
1164*4882a593Smuzhiyun SND_MAX, size, p, compat_mode);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun case EVIOCGSW(0):
1167*4882a593Smuzhiyun return evdev_handle_get_val(client, dev, EV_SW, dev->sw,
1168*4882a593Smuzhiyun SW_MAX, size, p, compat_mode);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun case EVIOCGNAME(0):
1171*4882a593Smuzhiyun return str_to_user(dev->name, size, p);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun case EVIOCGPHYS(0):
1174*4882a593Smuzhiyun return str_to_user(dev->phys, size, p);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun case EVIOCGUNIQ(0):
1177*4882a593Smuzhiyun return str_to_user(dev->uniq, size, p);
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun case EVIOC_MASK_SIZE(EVIOCSFF):
1180*4882a593Smuzhiyun if (input_ff_effect_from_user(p, size, &effect))
1181*4882a593Smuzhiyun return -EFAULT;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun error = input_ff_upload(dev, &effect, file);
1184*4882a593Smuzhiyun if (error)
1185*4882a593Smuzhiyun return error;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
1188*4882a593Smuzhiyun return -EFAULT;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun return 0;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun /* Multi-number variable-length handlers */
1194*4882a593Smuzhiyun if (_IOC_TYPE(cmd) != 'E')
1195*4882a593Smuzhiyun return -EINVAL;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun if (_IOC_DIR(cmd) == _IOC_READ) {
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
1200*4882a593Smuzhiyun return handle_eviocgbit(dev,
1201*4882a593Smuzhiyun _IOC_NR(cmd) & EV_MAX, size,
1202*4882a593Smuzhiyun p, compat_mode);
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (!dev->absinfo)
1207*4882a593Smuzhiyun return -EINVAL;
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun t = _IOC_NR(cmd) & ABS_MAX;
1210*4882a593Smuzhiyun abs = dev->absinfo[t];
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun if (copy_to_user(p, &abs, min_t(size_t,
1213*4882a593Smuzhiyun size, sizeof(struct input_absinfo))))
1214*4882a593Smuzhiyun return -EFAULT;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun return 0;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun if (_IOC_DIR(cmd) == _IOC_WRITE) {
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun if (!dev->absinfo)
1225*4882a593Smuzhiyun return -EINVAL;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun t = _IOC_NR(cmd) & ABS_MAX;
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun if (copy_from_user(&abs, p, min_t(size_t,
1230*4882a593Smuzhiyun size, sizeof(struct input_absinfo))))
1231*4882a593Smuzhiyun return -EFAULT;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun if (size < sizeof(struct input_absinfo))
1234*4882a593Smuzhiyun abs.resolution = 0;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun /* We can't change number of reserved MT slots */
1237*4882a593Smuzhiyun if (t == ABS_MT_SLOT)
1238*4882a593Smuzhiyun return -EINVAL;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /*
1241*4882a593Smuzhiyun * Take event lock to ensure that we are not
1242*4882a593Smuzhiyun * changing device parameters in the middle
1243*4882a593Smuzhiyun * of event.
1244*4882a593Smuzhiyun */
1245*4882a593Smuzhiyun spin_lock_irq(&dev->event_lock);
1246*4882a593Smuzhiyun dev->absinfo[t] = abs;
1247*4882a593Smuzhiyun spin_unlock_irq(&dev->event_lock);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun return 0;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun return -EINVAL;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
evdev_ioctl_handler(struct file * file,unsigned int cmd,void __user * p,int compat_mode)1256*4882a593Smuzhiyun static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
1257*4882a593Smuzhiyun void __user *p, int compat_mode)
1258*4882a593Smuzhiyun {
1259*4882a593Smuzhiyun struct evdev_client *client = file->private_data;
1260*4882a593Smuzhiyun struct evdev *evdev = client->evdev;
1261*4882a593Smuzhiyun int retval;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun retval = mutex_lock_interruptible(&evdev->mutex);
1264*4882a593Smuzhiyun if (retval)
1265*4882a593Smuzhiyun return retval;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun if (!evdev->exist || client->revoked) {
1268*4882a593Smuzhiyun retval = -ENODEV;
1269*4882a593Smuzhiyun goto out;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun retval = evdev_do_ioctl(file, cmd, p, compat_mode);
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun out:
1275*4882a593Smuzhiyun mutex_unlock(&evdev->mutex);
1276*4882a593Smuzhiyun return retval;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun
evdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1279*4882a593Smuzhiyun static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1280*4882a593Smuzhiyun {
1281*4882a593Smuzhiyun return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0);
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
evdev_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1285*4882a593Smuzhiyun static long evdev_ioctl_compat(struct file *file,
1286*4882a593Smuzhiyun unsigned int cmd, unsigned long arg)
1287*4882a593Smuzhiyun {
1288*4882a593Smuzhiyun return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1);
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun #endif
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun static const struct file_operations evdev_fops = {
1293*4882a593Smuzhiyun .owner = THIS_MODULE,
1294*4882a593Smuzhiyun .read = evdev_read,
1295*4882a593Smuzhiyun .write = evdev_write,
1296*4882a593Smuzhiyun .poll = evdev_poll,
1297*4882a593Smuzhiyun .open = evdev_open,
1298*4882a593Smuzhiyun .release = evdev_release,
1299*4882a593Smuzhiyun .unlocked_ioctl = evdev_ioctl,
1300*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1301*4882a593Smuzhiyun .compat_ioctl = evdev_ioctl_compat,
1302*4882a593Smuzhiyun #endif
1303*4882a593Smuzhiyun .fasync = evdev_fasync,
1304*4882a593Smuzhiyun .llseek = no_llseek,
1305*4882a593Smuzhiyun };
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun /*
1308*4882a593Smuzhiyun * Mark device non-existent. This disables writes, ioctls and
1309*4882a593Smuzhiyun * prevents new users from opening the device. Already posted
1310*4882a593Smuzhiyun * blocking reads will stay, however new ones will fail.
1311*4882a593Smuzhiyun */
evdev_mark_dead(struct evdev * evdev)1312*4882a593Smuzhiyun static void evdev_mark_dead(struct evdev *evdev)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun mutex_lock(&evdev->mutex);
1315*4882a593Smuzhiyun evdev->exist = false;
1316*4882a593Smuzhiyun mutex_unlock(&evdev->mutex);
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
evdev_cleanup(struct evdev * evdev)1319*4882a593Smuzhiyun static void evdev_cleanup(struct evdev *evdev)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun struct input_handle *handle = &evdev->handle;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun evdev_mark_dead(evdev);
1324*4882a593Smuzhiyun evdev_hangup(evdev);
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun /* evdev is marked dead so no one else accesses evdev->open */
1327*4882a593Smuzhiyun if (evdev->open) {
1328*4882a593Smuzhiyun input_flush_device(handle, NULL);
1329*4882a593Smuzhiyun input_close_device(handle);
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun /*
1334*4882a593Smuzhiyun * Create new evdev device. Note that input core serializes calls
1335*4882a593Smuzhiyun * to connect and disconnect.
1336*4882a593Smuzhiyun */
evdev_connect(struct input_handler * handler,struct input_dev * dev,const struct input_device_id * id)1337*4882a593Smuzhiyun static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
1338*4882a593Smuzhiyun const struct input_device_id *id)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun struct evdev *evdev;
1341*4882a593Smuzhiyun int minor;
1342*4882a593Smuzhiyun int dev_no;
1343*4882a593Smuzhiyun int error;
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun minor = input_get_new_minor(EVDEV_MINOR_BASE, EVDEV_MINORS, true);
1346*4882a593Smuzhiyun if (minor < 0) {
1347*4882a593Smuzhiyun error = minor;
1348*4882a593Smuzhiyun pr_err("failed to reserve new minor: %d\n", error);
1349*4882a593Smuzhiyun return error;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL);
1353*4882a593Smuzhiyun if (!evdev) {
1354*4882a593Smuzhiyun error = -ENOMEM;
1355*4882a593Smuzhiyun goto err_free_minor;
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun INIT_LIST_HEAD(&evdev->client_list);
1359*4882a593Smuzhiyun spin_lock_init(&evdev->client_lock);
1360*4882a593Smuzhiyun mutex_init(&evdev->mutex);
1361*4882a593Smuzhiyun evdev->exist = true;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun dev_no = minor;
1364*4882a593Smuzhiyun /* Normalize device number if it falls into legacy range */
1365*4882a593Smuzhiyun if (dev_no < EVDEV_MINOR_BASE + EVDEV_MINORS)
1366*4882a593Smuzhiyun dev_no -= EVDEV_MINOR_BASE;
1367*4882a593Smuzhiyun dev_set_name(&evdev->dev, "event%d", dev_no);
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun evdev->handle.dev = input_get_device(dev);
1370*4882a593Smuzhiyun evdev->handle.name = dev_name(&evdev->dev);
1371*4882a593Smuzhiyun evdev->handle.handler = handler;
1372*4882a593Smuzhiyun evdev->handle.private = evdev;
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun evdev->dev.devt = MKDEV(INPUT_MAJOR, minor);
1375*4882a593Smuzhiyun evdev->dev.class = &input_class;
1376*4882a593Smuzhiyun evdev->dev.parent = &dev->dev;
1377*4882a593Smuzhiyun evdev->dev.release = evdev_free;
1378*4882a593Smuzhiyun device_initialize(&evdev->dev);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun error = input_register_handle(&evdev->handle);
1381*4882a593Smuzhiyun if (error)
1382*4882a593Smuzhiyun goto err_free_evdev;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun cdev_init(&evdev->cdev, &evdev_fops);
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun error = cdev_device_add(&evdev->cdev, &evdev->dev);
1387*4882a593Smuzhiyun if (error)
1388*4882a593Smuzhiyun goto err_cleanup_evdev;
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun return 0;
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun err_cleanup_evdev:
1393*4882a593Smuzhiyun evdev_cleanup(evdev);
1394*4882a593Smuzhiyun input_unregister_handle(&evdev->handle);
1395*4882a593Smuzhiyun err_free_evdev:
1396*4882a593Smuzhiyun put_device(&evdev->dev);
1397*4882a593Smuzhiyun err_free_minor:
1398*4882a593Smuzhiyun input_free_minor(minor);
1399*4882a593Smuzhiyun return error;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
evdev_disconnect(struct input_handle * handle)1402*4882a593Smuzhiyun static void evdev_disconnect(struct input_handle *handle)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun struct evdev *evdev = handle->private;
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun cdev_device_del(&evdev->cdev, &evdev->dev);
1407*4882a593Smuzhiyun evdev_cleanup(evdev);
1408*4882a593Smuzhiyun input_free_minor(MINOR(evdev->dev.devt));
1409*4882a593Smuzhiyun input_unregister_handle(handle);
1410*4882a593Smuzhiyun put_device(&evdev->dev);
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun static const struct input_device_id evdev_ids[] = {
1414*4882a593Smuzhiyun { .driver_info = 1 }, /* Matches all devices */
1415*4882a593Smuzhiyun { }, /* Terminating zero entry */
1416*4882a593Smuzhiyun };
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun MODULE_DEVICE_TABLE(input, evdev_ids);
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun static struct input_handler evdev_handler = {
1421*4882a593Smuzhiyun .event = evdev_event,
1422*4882a593Smuzhiyun .events = evdev_events,
1423*4882a593Smuzhiyun .connect = evdev_connect,
1424*4882a593Smuzhiyun .disconnect = evdev_disconnect,
1425*4882a593Smuzhiyun .legacy_minors = true,
1426*4882a593Smuzhiyun .minor = EVDEV_MINOR_BASE,
1427*4882a593Smuzhiyun .name = "evdev",
1428*4882a593Smuzhiyun .id_table = evdev_ids,
1429*4882a593Smuzhiyun };
1430*4882a593Smuzhiyun
evdev_init(void)1431*4882a593Smuzhiyun static int __init evdev_init(void)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun return input_register_handler(&evdev_handler);
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun
evdev_exit(void)1436*4882a593Smuzhiyun static void __exit evdev_exit(void)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun input_unregister_handler(&evdev_handler);
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun module_init(evdev_init);
1442*4882a593Smuzhiyun module_exit(evdev_exit);
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
1445*4882a593Smuzhiyun MODULE_DESCRIPTION("Input driver event char devices");
1446*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1447