1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ACPI event handling for Wilco Embedded Controller
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2019 Google LLC
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * The Wilco Embedded Controller can create custom events that
8*4882a593Smuzhiyun * are not handled as standard ACPI objects. These events can
9*4882a593Smuzhiyun * contain information about changes in EC controlled features,
10*4882a593Smuzhiyun * such as errors and events in the dock or display. For example,
11*4882a593Smuzhiyun * an event is triggered if the dock is plugged into a display
12*4882a593Smuzhiyun * incorrectly. These events are needed for telemetry and
13*4882a593Smuzhiyun * diagnostics reasons, and for possibly alerting the user.
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun * These events are triggered by the EC with an ACPI Notify(0x90),
16*4882a593Smuzhiyun * and then the BIOS reads the event buffer from EC RAM via an
17*4882a593Smuzhiyun * ACPI method. When the OS receives these events via ACPI,
18*4882a593Smuzhiyun * it passes them along to this driver. The events are put into
19*4882a593Smuzhiyun * a queue which can be read by a userspace daemon via a char device
20*4882a593Smuzhiyun * that implements read() and poll(). The event queue acts as a
21*4882a593Smuzhiyun * circular buffer of size 64, so if there are no userspace consumers
22*4882a593Smuzhiyun * the kernel will not run out of memory. The char device will appear at
23*4882a593Smuzhiyun * /dev/wilco_event{n}, where n is some small non-negative integer,
24*4882a593Smuzhiyun * starting from 0. Standard ACPI events such as the battery getting
25*4882a593Smuzhiyun * plugged/unplugged can also come through this path, but they are
26*4882a593Smuzhiyun * dealt with via other paths, and are ignored here.
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun * To test, you can tail the binary data with
29*4882a593Smuzhiyun * $ cat /dev/wilco_event0 | hexdump -ve '1/1 "%x\n"'
30*4882a593Smuzhiyun * and then create an event by plugging/unplugging the battery.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/acpi.h>
34*4882a593Smuzhiyun #include <linux/cdev.h>
35*4882a593Smuzhiyun #include <linux/device.h>
36*4882a593Smuzhiyun #include <linux/fs.h>
37*4882a593Smuzhiyun #include <linux/idr.h>
38*4882a593Smuzhiyun #include <linux/io.h>
39*4882a593Smuzhiyun #include <linux/list.h>
40*4882a593Smuzhiyun #include <linux/module.h>
41*4882a593Smuzhiyun #include <linux/poll.h>
42*4882a593Smuzhiyun #include <linux/spinlock.h>
43*4882a593Smuzhiyun #include <linux/uaccess.h>
44*4882a593Smuzhiyun #include <linux/wait.h>
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* ACPI Notify event code indicating event data is available. */
47*4882a593Smuzhiyun #define EC_ACPI_NOTIFY_EVENT 0x90
48*4882a593Smuzhiyun /* ACPI Method to execute to retrieve event data buffer from the EC. */
49*4882a593Smuzhiyun #define EC_ACPI_GET_EVENT "QSET"
50*4882a593Smuzhiyun /* Maximum number of words in event data returned by the EC. */
51*4882a593Smuzhiyun #define EC_ACPI_MAX_EVENT_WORDS 6
52*4882a593Smuzhiyun #define EC_ACPI_MAX_EVENT_SIZE \
53*4882a593Smuzhiyun (sizeof(struct ec_event) + (EC_ACPI_MAX_EVENT_WORDS) * sizeof(u16))
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* Node will appear in /dev/EVENT_DEV_NAME */
56*4882a593Smuzhiyun #define EVENT_DEV_NAME "wilco_event"
57*4882a593Smuzhiyun #define EVENT_CLASS_NAME EVENT_DEV_NAME
58*4882a593Smuzhiyun #define DRV_NAME EVENT_DEV_NAME
59*4882a593Smuzhiyun #define EVENT_DEV_NAME_FMT (EVENT_DEV_NAME "%d")
60*4882a593Smuzhiyun static struct class event_class = {
61*4882a593Smuzhiyun .owner = THIS_MODULE,
62*4882a593Smuzhiyun .name = EVENT_CLASS_NAME,
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Keep track of all the device numbers used. */
66*4882a593Smuzhiyun #define EVENT_MAX_DEV 128
67*4882a593Smuzhiyun static int event_major;
68*4882a593Smuzhiyun static DEFINE_IDA(event_ida);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Size of circular queue of events. */
71*4882a593Smuzhiyun #define MAX_NUM_EVENTS 64
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun * struct ec_event - Extended event returned by the EC.
75*4882a593Smuzhiyun * @size: Number of 16bit words in structure after the size word.
76*4882a593Smuzhiyun * @type: Extended event type, meaningless for us.
77*4882a593Smuzhiyun * @event: Event data words. Max count is %EC_ACPI_MAX_EVENT_WORDS.
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun struct ec_event {
80*4882a593Smuzhiyun u16 size;
81*4882a593Smuzhiyun u16 type;
82*4882a593Smuzhiyun u16 event[];
83*4882a593Smuzhiyun } __packed;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #define ec_event_num_words(ev) (ev->size - 1)
86*4882a593Smuzhiyun #define ec_event_size(ev) (sizeof(*ev) + (ec_event_num_words(ev) * sizeof(u16)))
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /**
89*4882a593Smuzhiyun * struct ec_event_queue - Circular queue for events.
90*4882a593Smuzhiyun * @capacity: Number of elements the queue can hold.
91*4882a593Smuzhiyun * @head: Next index to write to.
92*4882a593Smuzhiyun * @tail: Next index to read from.
93*4882a593Smuzhiyun * @entries: Array of events.
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun struct ec_event_queue {
96*4882a593Smuzhiyun int capacity;
97*4882a593Smuzhiyun int head;
98*4882a593Smuzhiyun int tail;
99*4882a593Smuzhiyun struct ec_event *entries[];
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* Maximum number of events to store in ec_event_queue */
103*4882a593Smuzhiyun static int queue_size = 64;
104*4882a593Smuzhiyun module_param(queue_size, int, 0644);
105*4882a593Smuzhiyun
event_queue_new(int capacity)106*4882a593Smuzhiyun static struct ec_event_queue *event_queue_new(int capacity)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct ec_event_queue *q;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun q = kzalloc(struct_size(q, entries, capacity), GFP_KERNEL);
111*4882a593Smuzhiyun if (!q)
112*4882a593Smuzhiyun return NULL;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun q->capacity = capacity;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun return q;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
event_queue_empty(struct ec_event_queue * q)119*4882a593Smuzhiyun static inline bool event_queue_empty(struct ec_event_queue *q)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun /* head==tail when both full and empty, but head==NULL when empty */
122*4882a593Smuzhiyun return q->head == q->tail && !q->entries[q->head];
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
event_queue_full(struct ec_event_queue * q)125*4882a593Smuzhiyun static inline bool event_queue_full(struct ec_event_queue *q)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun /* head==tail when both full and empty, but head!=NULL when full */
128*4882a593Smuzhiyun return q->head == q->tail && q->entries[q->head];
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
event_queue_pop(struct ec_event_queue * q)131*4882a593Smuzhiyun static struct ec_event *event_queue_pop(struct ec_event_queue *q)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct ec_event *ev;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (event_queue_empty(q))
136*4882a593Smuzhiyun return NULL;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun ev = q->entries[q->tail];
139*4882a593Smuzhiyun q->entries[q->tail] = NULL;
140*4882a593Smuzhiyun q->tail = (q->tail + 1) % q->capacity;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return ev;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * If full, overwrite the oldest event and return it so the caller
147*4882a593Smuzhiyun * can kfree it. If not full, return NULL.
148*4882a593Smuzhiyun */
event_queue_push(struct ec_event_queue * q,struct ec_event * ev)149*4882a593Smuzhiyun static struct ec_event *event_queue_push(struct ec_event_queue *q,
150*4882a593Smuzhiyun struct ec_event *ev)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct ec_event *popped = NULL;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (event_queue_full(q))
155*4882a593Smuzhiyun popped = event_queue_pop(q);
156*4882a593Smuzhiyun q->entries[q->head] = ev;
157*4882a593Smuzhiyun q->head = (q->head + 1) % q->capacity;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return popped;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
event_queue_free(struct ec_event_queue * q)162*4882a593Smuzhiyun static void event_queue_free(struct ec_event_queue *q)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct ec_event *event;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun while ((event = event_queue_pop(q)) != NULL)
167*4882a593Smuzhiyun kfree(event);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun kfree(q);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /**
173*4882a593Smuzhiyun * struct event_device_data - Data for a Wilco EC device that responds to ACPI.
174*4882a593Smuzhiyun * @events: Circular queue of EC events to be provided to userspace.
175*4882a593Smuzhiyun * @queue_lock: Protect the queue from simultaneous read/writes.
176*4882a593Smuzhiyun * @wq: Wait queue to notify processes when events are available or the
177*4882a593Smuzhiyun * device has been removed.
178*4882a593Smuzhiyun * @cdev: Char dev that userspace reads() and polls() from.
179*4882a593Smuzhiyun * @dev: Device associated with the %cdev.
180*4882a593Smuzhiyun * @exist: Has the device been not been removed? Once a device has been removed,
181*4882a593Smuzhiyun * writes, reads, and new opens will fail.
182*4882a593Smuzhiyun * @available: Guarantee only one client can open() file and read from queue.
183*4882a593Smuzhiyun *
184*4882a593Smuzhiyun * There will be one of these structs for each ACPI device registered. This data
185*4882a593Smuzhiyun * is the queue of events received from ACPI that still need to be read from
186*4882a593Smuzhiyun * userspace, the device and char device that userspace is using, a wait queue
187*4882a593Smuzhiyun * used to notify different threads when something has changed, plus a flag
188*4882a593Smuzhiyun * on whether the ACPI device has been removed.
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun struct event_device_data {
191*4882a593Smuzhiyun struct ec_event_queue *events;
192*4882a593Smuzhiyun spinlock_t queue_lock;
193*4882a593Smuzhiyun wait_queue_head_t wq;
194*4882a593Smuzhiyun struct device dev;
195*4882a593Smuzhiyun struct cdev cdev;
196*4882a593Smuzhiyun bool exist;
197*4882a593Smuzhiyun atomic_t available;
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun * enqueue_events() - Place EC events in queue to be read by userspace.
202*4882a593Smuzhiyun * @adev: Device the events came from.
203*4882a593Smuzhiyun * @buf: Buffer of event data.
204*4882a593Smuzhiyun * @length: Length of event data buffer.
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * %buf contains a number of ec_event's, packed one after the other.
207*4882a593Smuzhiyun * Each ec_event is of variable length. Start with the first event, copy it
208*4882a593Smuzhiyun * into a persistent ec_event, store that entry in the queue, move on
209*4882a593Smuzhiyun * to the next ec_event in buf, and repeat.
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * Return: 0 on success or negative error code on failure.
212*4882a593Smuzhiyun */
enqueue_events(struct acpi_device * adev,const u8 * buf,u32 length)213*4882a593Smuzhiyun static int enqueue_events(struct acpi_device *adev, const u8 *buf, u32 length)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct event_device_data *dev_data = adev->driver_data;
216*4882a593Smuzhiyun struct ec_event *event, *queue_event, *old_event;
217*4882a593Smuzhiyun size_t num_words, event_size;
218*4882a593Smuzhiyun u32 offset = 0;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun while (offset < length) {
221*4882a593Smuzhiyun event = (struct ec_event *)(buf + offset);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun num_words = ec_event_num_words(event);
224*4882a593Smuzhiyun event_size = ec_event_size(event);
225*4882a593Smuzhiyun if (num_words > EC_ACPI_MAX_EVENT_WORDS) {
226*4882a593Smuzhiyun dev_err(&adev->dev, "Too many event words: %zu > %d\n",
227*4882a593Smuzhiyun num_words, EC_ACPI_MAX_EVENT_WORDS);
228*4882a593Smuzhiyun return -EOVERFLOW;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* Ensure event does not overflow the available buffer */
232*4882a593Smuzhiyun if ((offset + event_size) > length) {
233*4882a593Smuzhiyun dev_err(&adev->dev, "Event exceeds buffer: %zu > %d\n",
234*4882a593Smuzhiyun offset + event_size, length);
235*4882a593Smuzhiyun return -EOVERFLOW;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Point to the next event in the buffer */
239*4882a593Smuzhiyun offset += event_size;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Copy event into the queue */
242*4882a593Smuzhiyun queue_event = kmemdup(event, event_size, GFP_KERNEL);
243*4882a593Smuzhiyun if (!queue_event)
244*4882a593Smuzhiyun return -ENOMEM;
245*4882a593Smuzhiyun spin_lock(&dev_data->queue_lock);
246*4882a593Smuzhiyun old_event = event_queue_push(dev_data->events, queue_event);
247*4882a593Smuzhiyun spin_unlock(&dev_data->queue_lock);
248*4882a593Smuzhiyun kfree(old_event);
249*4882a593Smuzhiyun wake_up_interruptible(&dev_data->wq);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun return 0;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun * event_device_notify() - Callback when EC generates an event over ACPI.
257*4882a593Smuzhiyun * @adev: The device that the event is coming from.
258*4882a593Smuzhiyun * @value: Value passed to Notify() in ACPI.
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * This function will read the events from the device and enqueue them.
261*4882a593Smuzhiyun */
event_device_notify(struct acpi_device * adev,u32 value)262*4882a593Smuzhiyun static void event_device_notify(struct acpi_device *adev, u32 value)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun struct acpi_buffer event_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
265*4882a593Smuzhiyun union acpi_object *obj;
266*4882a593Smuzhiyun acpi_status status;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (value != EC_ACPI_NOTIFY_EVENT) {
269*4882a593Smuzhiyun dev_err(&adev->dev, "Invalid event: 0x%08x\n", value);
270*4882a593Smuzhiyun return;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Execute ACPI method to get event data buffer. */
274*4882a593Smuzhiyun status = acpi_evaluate_object(adev->handle, EC_ACPI_GET_EVENT,
275*4882a593Smuzhiyun NULL, &event_buffer);
276*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
277*4882a593Smuzhiyun dev_err(&adev->dev, "Error executing ACPI method %s()\n",
278*4882a593Smuzhiyun EC_ACPI_GET_EVENT);
279*4882a593Smuzhiyun return;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun obj = (union acpi_object *)event_buffer.pointer;
283*4882a593Smuzhiyun if (!obj) {
284*4882a593Smuzhiyun dev_err(&adev->dev, "Nothing returned from %s()\n",
285*4882a593Smuzhiyun EC_ACPI_GET_EVENT);
286*4882a593Smuzhiyun return;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun if (obj->type != ACPI_TYPE_BUFFER) {
289*4882a593Smuzhiyun dev_err(&adev->dev, "Invalid object returned from %s()\n",
290*4882a593Smuzhiyun EC_ACPI_GET_EVENT);
291*4882a593Smuzhiyun kfree(obj);
292*4882a593Smuzhiyun return;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun if (obj->buffer.length < sizeof(struct ec_event)) {
295*4882a593Smuzhiyun dev_err(&adev->dev, "Invalid buffer length %d from %s()\n",
296*4882a593Smuzhiyun obj->buffer.length, EC_ACPI_GET_EVENT);
297*4882a593Smuzhiyun kfree(obj);
298*4882a593Smuzhiyun return;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun enqueue_events(adev, obj->buffer.pointer, obj->buffer.length);
302*4882a593Smuzhiyun kfree(obj);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
event_open(struct inode * inode,struct file * filp)305*4882a593Smuzhiyun static int event_open(struct inode *inode, struct file *filp)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct event_device_data *dev_data;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun dev_data = container_of(inode->i_cdev, struct event_device_data, cdev);
310*4882a593Smuzhiyun if (!dev_data->exist)
311*4882a593Smuzhiyun return -ENODEV;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (atomic_cmpxchg(&dev_data->available, 1, 0) == 0)
314*4882a593Smuzhiyun return -EBUSY;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Increase refcount on device so dev_data is not freed */
317*4882a593Smuzhiyun get_device(&dev_data->dev);
318*4882a593Smuzhiyun stream_open(inode, filp);
319*4882a593Smuzhiyun filp->private_data = dev_data;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
event_poll(struct file * filp,poll_table * wait)324*4882a593Smuzhiyun static __poll_t event_poll(struct file *filp, poll_table *wait)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun struct event_device_data *dev_data = filp->private_data;
327*4882a593Smuzhiyun __poll_t mask = 0;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun poll_wait(filp, &dev_data->wq, wait);
330*4882a593Smuzhiyun if (!dev_data->exist)
331*4882a593Smuzhiyun return EPOLLHUP;
332*4882a593Smuzhiyun if (!event_queue_empty(dev_data->events))
333*4882a593Smuzhiyun mask |= EPOLLIN | EPOLLRDNORM | EPOLLPRI;
334*4882a593Smuzhiyun return mask;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /**
338*4882a593Smuzhiyun * event_read() - Callback for passing event data to userspace via read().
339*4882a593Smuzhiyun * @filp: The file we are reading from.
340*4882a593Smuzhiyun * @buf: Pointer to userspace buffer to fill with one event.
341*4882a593Smuzhiyun * @count: Number of bytes requested. Must be at least EC_ACPI_MAX_EVENT_SIZE.
342*4882a593Smuzhiyun * @pos: File position pointer, irrelevant since we don't support seeking.
343*4882a593Smuzhiyun *
344*4882a593Smuzhiyun * Removes the first event from the queue, places it in the passed buffer.
345*4882a593Smuzhiyun *
346*4882a593Smuzhiyun * If there are no events in the the queue, then one of two things happens,
347*4882a593Smuzhiyun * depending on if the file was opened in nonblocking mode: If in nonblocking
348*4882a593Smuzhiyun * mode, then return -EAGAIN to say there's no data. If in blocking mode, then
349*4882a593Smuzhiyun * block until an event is available.
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * Return: Number of bytes placed in buffer, negative error code on failure.
352*4882a593Smuzhiyun */
event_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)353*4882a593Smuzhiyun static ssize_t event_read(struct file *filp, char __user *buf, size_t count,
354*4882a593Smuzhiyun loff_t *pos)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun struct event_device_data *dev_data = filp->private_data;
357*4882a593Smuzhiyun struct ec_event *event;
358*4882a593Smuzhiyun ssize_t n_bytes_written = 0;
359*4882a593Smuzhiyun int err;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* We only will give them the entire event at once */
362*4882a593Smuzhiyun if (count != 0 && count < EC_ACPI_MAX_EVENT_SIZE)
363*4882a593Smuzhiyun return -EINVAL;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun spin_lock(&dev_data->queue_lock);
366*4882a593Smuzhiyun while (event_queue_empty(dev_data->events)) {
367*4882a593Smuzhiyun spin_unlock(&dev_data->queue_lock);
368*4882a593Smuzhiyun if (filp->f_flags & O_NONBLOCK)
369*4882a593Smuzhiyun return -EAGAIN;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun err = wait_event_interruptible(dev_data->wq,
372*4882a593Smuzhiyun !event_queue_empty(dev_data->events) ||
373*4882a593Smuzhiyun !dev_data->exist);
374*4882a593Smuzhiyun if (err)
375*4882a593Smuzhiyun return err;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Device was removed as we waited? */
378*4882a593Smuzhiyun if (!dev_data->exist)
379*4882a593Smuzhiyun return -ENODEV;
380*4882a593Smuzhiyun spin_lock(&dev_data->queue_lock);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun event = event_queue_pop(dev_data->events);
383*4882a593Smuzhiyun spin_unlock(&dev_data->queue_lock);
384*4882a593Smuzhiyun n_bytes_written = ec_event_size(event);
385*4882a593Smuzhiyun if (copy_to_user(buf, event, n_bytes_written))
386*4882a593Smuzhiyun n_bytes_written = -EFAULT;
387*4882a593Smuzhiyun kfree(event);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun return n_bytes_written;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
event_release(struct inode * inode,struct file * filp)392*4882a593Smuzhiyun static int event_release(struct inode *inode, struct file *filp)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun struct event_device_data *dev_data = filp->private_data;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun atomic_set(&dev_data->available, 1);
397*4882a593Smuzhiyun put_device(&dev_data->dev);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return 0;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun static const struct file_operations event_fops = {
403*4882a593Smuzhiyun .open = event_open,
404*4882a593Smuzhiyun .poll = event_poll,
405*4882a593Smuzhiyun .read = event_read,
406*4882a593Smuzhiyun .release = event_release,
407*4882a593Smuzhiyun .llseek = no_llseek,
408*4882a593Smuzhiyun .owner = THIS_MODULE,
409*4882a593Smuzhiyun };
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /**
412*4882a593Smuzhiyun * free_device_data() - Callback to free the event_device_data structure.
413*4882a593Smuzhiyun * @d: The device embedded in our device data, which we have been ref counting.
414*4882a593Smuzhiyun *
415*4882a593Smuzhiyun * This is called only after event_device_remove() has been called and all
416*4882a593Smuzhiyun * userspace programs have called event_release() on all the open file
417*4882a593Smuzhiyun * descriptors.
418*4882a593Smuzhiyun */
free_device_data(struct device * d)419*4882a593Smuzhiyun static void free_device_data(struct device *d)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct event_device_data *dev_data;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun dev_data = container_of(d, struct event_device_data, dev);
424*4882a593Smuzhiyun event_queue_free(dev_data->events);
425*4882a593Smuzhiyun kfree(dev_data);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
hangup_device(struct event_device_data * dev_data)428*4882a593Smuzhiyun static void hangup_device(struct event_device_data *dev_data)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun dev_data->exist = false;
431*4882a593Smuzhiyun /* Wake up the waiting processes so they can close. */
432*4882a593Smuzhiyun wake_up_interruptible(&dev_data->wq);
433*4882a593Smuzhiyun put_device(&dev_data->dev);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /**
437*4882a593Smuzhiyun * event_device_add() - Callback when creating a new device.
438*4882a593Smuzhiyun * @adev: ACPI device that we will be receiving events from.
439*4882a593Smuzhiyun *
440*4882a593Smuzhiyun * This finds a free minor number for the device, allocates and initializes
441*4882a593Smuzhiyun * some device data, and creates a new device and char dev node.
442*4882a593Smuzhiyun *
443*4882a593Smuzhiyun * The device data is freed in free_device_data(), which is called when
444*4882a593Smuzhiyun * %dev_data->dev is release()ed. This happens after all references to
445*4882a593Smuzhiyun * %dev_data->dev are dropped, which happens once both event_device_remove()
446*4882a593Smuzhiyun * has been called and every open()ed file descriptor has been release()ed.
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * Return: 0 on success, negative error code on failure.
449*4882a593Smuzhiyun */
event_device_add(struct acpi_device * adev)450*4882a593Smuzhiyun static int event_device_add(struct acpi_device *adev)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun struct event_device_data *dev_data;
453*4882a593Smuzhiyun int error, minor;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun minor = ida_alloc_max(&event_ida, EVENT_MAX_DEV-1, GFP_KERNEL);
456*4882a593Smuzhiyun if (minor < 0) {
457*4882a593Smuzhiyun error = minor;
458*4882a593Smuzhiyun dev_err(&adev->dev, "Failed to find minor number: %d\n", error);
459*4882a593Smuzhiyun return error;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
463*4882a593Smuzhiyun if (!dev_data) {
464*4882a593Smuzhiyun error = -ENOMEM;
465*4882a593Smuzhiyun goto free_minor;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /* Initialize the device data. */
469*4882a593Smuzhiyun adev->driver_data = dev_data;
470*4882a593Smuzhiyun dev_data->events = event_queue_new(queue_size);
471*4882a593Smuzhiyun if (!dev_data->events) {
472*4882a593Smuzhiyun kfree(dev_data);
473*4882a593Smuzhiyun error = -ENOMEM;
474*4882a593Smuzhiyun goto free_minor;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun spin_lock_init(&dev_data->queue_lock);
477*4882a593Smuzhiyun init_waitqueue_head(&dev_data->wq);
478*4882a593Smuzhiyun dev_data->exist = true;
479*4882a593Smuzhiyun atomic_set(&dev_data->available, 1);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /* Initialize the device. */
482*4882a593Smuzhiyun dev_data->dev.devt = MKDEV(event_major, minor);
483*4882a593Smuzhiyun dev_data->dev.class = &event_class;
484*4882a593Smuzhiyun dev_data->dev.release = free_device_data;
485*4882a593Smuzhiyun dev_set_name(&dev_data->dev, EVENT_DEV_NAME_FMT, minor);
486*4882a593Smuzhiyun device_initialize(&dev_data->dev);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /* Initialize the character device, and add it to userspace. */
489*4882a593Smuzhiyun cdev_init(&dev_data->cdev, &event_fops);
490*4882a593Smuzhiyun error = cdev_device_add(&dev_data->cdev, &dev_data->dev);
491*4882a593Smuzhiyun if (error)
492*4882a593Smuzhiyun goto free_dev_data;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return 0;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun free_dev_data:
497*4882a593Smuzhiyun hangup_device(dev_data);
498*4882a593Smuzhiyun free_minor:
499*4882a593Smuzhiyun ida_simple_remove(&event_ida, minor);
500*4882a593Smuzhiyun return error;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
event_device_remove(struct acpi_device * adev)503*4882a593Smuzhiyun static int event_device_remove(struct acpi_device *adev)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun struct event_device_data *dev_data = adev->driver_data;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun cdev_device_del(&dev_data->cdev, &dev_data->dev);
508*4882a593Smuzhiyun ida_simple_remove(&event_ida, MINOR(dev_data->dev.devt));
509*4882a593Smuzhiyun hangup_device(dev_data);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun static const struct acpi_device_id event_acpi_ids[] = {
515*4882a593Smuzhiyun { "GOOG000D", 0 },
516*4882a593Smuzhiyun { }
517*4882a593Smuzhiyun };
518*4882a593Smuzhiyun MODULE_DEVICE_TABLE(acpi, event_acpi_ids);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun static struct acpi_driver event_driver = {
521*4882a593Smuzhiyun .name = DRV_NAME,
522*4882a593Smuzhiyun .class = DRV_NAME,
523*4882a593Smuzhiyun .ids = event_acpi_ids,
524*4882a593Smuzhiyun .ops = {
525*4882a593Smuzhiyun .add = event_device_add,
526*4882a593Smuzhiyun .notify = event_device_notify,
527*4882a593Smuzhiyun .remove = event_device_remove,
528*4882a593Smuzhiyun },
529*4882a593Smuzhiyun .owner = THIS_MODULE,
530*4882a593Smuzhiyun };
531*4882a593Smuzhiyun
event_module_init(void)532*4882a593Smuzhiyun static int __init event_module_init(void)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun dev_t dev_num = 0;
535*4882a593Smuzhiyun int ret;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun ret = class_register(&event_class);
538*4882a593Smuzhiyun if (ret) {
539*4882a593Smuzhiyun pr_err(DRV_NAME ": Failed registering class: %d\n", ret);
540*4882a593Smuzhiyun return ret;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* Request device numbers, starting with minor=0. Save the major num. */
544*4882a593Smuzhiyun ret = alloc_chrdev_region(&dev_num, 0, EVENT_MAX_DEV, EVENT_DEV_NAME);
545*4882a593Smuzhiyun if (ret) {
546*4882a593Smuzhiyun pr_err(DRV_NAME ": Failed allocating dev numbers: %d\n", ret);
547*4882a593Smuzhiyun goto destroy_class;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun event_major = MAJOR(dev_num);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun ret = acpi_bus_register_driver(&event_driver);
552*4882a593Smuzhiyun if (ret < 0) {
553*4882a593Smuzhiyun pr_err(DRV_NAME ": Failed registering driver: %d\n", ret);
554*4882a593Smuzhiyun goto unregister_region;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun return 0;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun unregister_region:
560*4882a593Smuzhiyun unregister_chrdev_region(MKDEV(event_major, 0), EVENT_MAX_DEV);
561*4882a593Smuzhiyun destroy_class:
562*4882a593Smuzhiyun class_unregister(&event_class);
563*4882a593Smuzhiyun ida_destroy(&event_ida);
564*4882a593Smuzhiyun return ret;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
event_module_exit(void)567*4882a593Smuzhiyun static void __exit event_module_exit(void)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun acpi_bus_unregister_driver(&event_driver);
570*4882a593Smuzhiyun unregister_chrdev_region(MKDEV(event_major, 0), EVENT_MAX_DEV);
571*4882a593Smuzhiyun class_unregister(&event_class);
572*4882a593Smuzhiyun ida_destroy(&event_ida);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun module_init(event_module_init);
576*4882a593Smuzhiyun module_exit(event_module_exit);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
579*4882a593Smuzhiyun MODULE_DESCRIPTION("Wilco EC ACPI event driver");
580*4882a593Smuzhiyun MODULE_LICENSE("GPL");
581*4882a593Smuzhiyun MODULE_ALIAS("platform:" DRV_NAME);
582