1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * fs/eventfd.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/file.h>
10*4882a593Smuzhiyun #include <linux/poll.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/fs.h>
13*4882a593Smuzhiyun #include <linux/sched/signal.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/list.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun #include <linux/anon_inodes.h>
19*4882a593Smuzhiyun #include <linux/syscalls.h>
20*4882a593Smuzhiyun #include <linux/export.h>
21*4882a593Smuzhiyun #include <linux/kref.h>
22*4882a593Smuzhiyun #include <linux/eventfd.h>
23*4882a593Smuzhiyun #include <linux/proc_fs.h>
24*4882a593Smuzhiyun #include <linux/seq_file.h>
25*4882a593Smuzhiyun #include <linux/idr.h>
26*4882a593Smuzhiyun #include <linux/uio.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun DEFINE_PER_CPU(int, eventfd_wake_count);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static DEFINE_IDA(eventfd_ida);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct eventfd_ctx {
33*4882a593Smuzhiyun struct kref kref;
34*4882a593Smuzhiyun wait_queue_head_t wqh;
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * Every time that a write(2) is performed on an eventfd, the
37*4882a593Smuzhiyun * value of the __u64 being written is added to "count" and a
38*4882a593Smuzhiyun * wakeup is performed on "wqh". A read(2) will return the "count"
39*4882a593Smuzhiyun * value to userspace, and will reset "count" to zero. The kernel
40*4882a593Smuzhiyun * side eventfd_signal() also, adds to the "count" counter and
41*4882a593Smuzhiyun * issue a wakeup.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun __u64 count;
44*4882a593Smuzhiyun unsigned int flags;
45*4882a593Smuzhiyun int id;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun
eventfd_signal_mask(struct eventfd_ctx * ctx,__u64 n,unsigned mask)48*4882a593Smuzhiyun __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun unsigned long flags;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * Deadlock or stack overflow issues can happen if we recurse here
54*4882a593Smuzhiyun * through waitqueue wakeup handlers. If the caller users potentially
55*4882a593Smuzhiyun * nested waitqueues with custom wakeup handlers, then it should
56*4882a593Smuzhiyun * check eventfd_signal_count() before calling this function. If
57*4882a593Smuzhiyun * it returns true, the eventfd_signal() call should be deferred to a
58*4882a593Smuzhiyun * safe context.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
61*4882a593Smuzhiyun return 0;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun spin_lock_irqsave(&ctx->wqh.lock, flags);
64*4882a593Smuzhiyun this_cpu_inc(eventfd_wake_count);
65*4882a593Smuzhiyun if (ULLONG_MAX - ctx->count < n)
66*4882a593Smuzhiyun n = ULLONG_MAX - ctx->count;
67*4882a593Smuzhiyun ctx->count += n;
68*4882a593Smuzhiyun if (waitqueue_active(&ctx->wqh))
69*4882a593Smuzhiyun wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
70*4882a593Smuzhiyun this_cpu_dec(eventfd_wake_count);
71*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->wqh.lock, flags);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun return n;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun * eventfd_signal - Adds @n to the eventfd counter.
78*4882a593Smuzhiyun * @ctx: [in] Pointer to the eventfd context.
79*4882a593Smuzhiyun * @n: [in] Value of the counter to be added to the eventfd internal counter.
80*4882a593Smuzhiyun * The value cannot be negative.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * This function is supposed to be called by the kernel in paths that do not
83*4882a593Smuzhiyun * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
84*4882a593Smuzhiyun * value, and we signal this as overflow condition by returning a EPOLLERR
85*4882a593Smuzhiyun * to poll(2).
86*4882a593Smuzhiyun *
87*4882a593Smuzhiyun * Returns the amount by which the counter was incremented. This will be less
88*4882a593Smuzhiyun * than @n if the counter has overflowed.
89*4882a593Smuzhiyun */
eventfd_signal(struct eventfd_ctx * ctx,__u64 n)90*4882a593Smuzhiyun __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return eventfd_signal_mask(ctx, n, 0);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(eventfd_signal);
95*4882a593Smuzhiyun
eventfd_free_ctx(struct eventfd_ctx * ctx)96*4882a593Smuzhiyun static void eventfd_free_ctx(struct eventfd_ctx *ctx)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun if (ctx->id >= 0)
99*4882a593Smuzhiyun ida_simple_remove(&eventfd_ida, ctx->id);
100*4882a593Smuzhiyun kfree(ctx);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
eventfd_free(struct kref * kref)103*4882a593Smuzhiyun static void eventfd_free(struct kref *kref)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun eventfd_free_ctx(ctx);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /**
111*4882a593Smuzhiyun * eventfd_ctx_put - Releases a reference to the internal eventfd context.
112*4882a593Smuzhiyun * @ctx: [in] Pointer to eventfd context.
113*4882a593Smuzhiyun *
114*4882a593Smuzhiyun * The eventfd context reference must have been previously acquired either
115*4882a593Smuzhiyun * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
116*4882a593Smuzhiyun */
eventfd_ctx_put(struct eventfd_ctx * ctx)117*4882a593Smuzhiyun void eventfd_ctx_put(struct eventfd_ctx *ctx)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun kref_put(&ctx->kref, eventfd_free);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(eventfd_ctx_put);
122*4882a593Smuzhiyun
eventfd_release(struct inode * inode,struct file * file)123*4882a593Smuzhiyun static int eventfd_release(struct inode *inode, struct file *file)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun struct eventfd_ctx *ctx = file->private_data;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun wake_up_poll(&ctx->wqh, EPOLLHUP);
128*4882a593Smuzhiyun eventfd_ctx_put(ctx);
129*4882a593Smuzhiyun return 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
eventfd_poll(struct file * file,poll_table * wait)132*4882a593Smuzhiyun static __poll_t eventfd_poll(struct file *file, poll_table *wait)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct eventfd_ctx *ctx = file->private_data;
135*4882a593Smuzhiyun __poll_t events = 0;
136*4882a593Smuzhiyun u64 count;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun poll_wait(file, &ctx->wqh, wait);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * All writes to ctx->count occur within ctx->wqh.lock. This read
142*4882a593Smuzhiyun * can be done outside ctx->wqh.lock because we know that poll_wait
143*4882a593Smuzhiyun * takes that lock (through add_wait_queue) if our caller will sleep.
144*4882a593Smuzhiyun *
145*4882a593Smuzhiyun * The read _can_ therefore seep into add_wait_queue's critical
146*4882a593Smuzhiyun * section, but cannot move above it! add_wait_queue's spin_lock acts
147*4882a593Smuzhiyun * as an acquire barrier and ensures that the read be ordered properly
148*4882a593Smuzhiyun * against the writes. The following CAN happen and is safe:
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun * poll write
151*4882a593Smuzhiyun * ----------------- ------------
152*4882a593Smuzhiyun * lock ctx->wqh.lock (in poll_wait)
153*4882a593Smuzhiyun * count = ctx->count
154*4882a593Smuzhiyun * __add_wait_queue
155*4882a593Smuzhiyun * unlock ctx->wqh.lock
156*4882a593Smuzhiyun * lock ctx->qwh.lock
157*4882a593Smuzhiyun * ctx->count += n
158*4882a593Smuzhiyun * if (waitqueue_active)
159*4882a593Smuzhiyun * wake_up_locked_poll
160*4882a593Smuzhiyun * unlock ctx->qwh.lock
161*4882a593Smuzhiyun * eventfd_poll returns 0
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * but the following, which would miss a wakeup, cannot happen:
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * poll write
166*4882a593Smuzhiyun * ----------------- ------------
167*4882a593Smuzhiyun * count = ctx->count (INVALID!)
168*4882a593Smuzhiyun * lock ctx->qwh.lock
169*4882a593Smuzhiyun * ctx->count += n
170*4882a593Smuzhiyun * **waitqueue_active is false**
171*4882a593Smuzhiyun * **no wake_up_locked_poll!**
172*4882a593Smuzhiyun * unlock ctx->qwh.lock
173*4882a593Smuzhiyun * lock ctx->wqh.lock (in poll_wait)
174*4882a593Smuzhiyun * __add_wait_queue
175*4882a593Smuzhiyun * unlock ctx->wqh.lock
176*4882a593Smuzhiyun * eventfd_poll returns 0
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun count = READ_ONCE(ctx->count);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (count > 0)
181*4882a593Smuzhiyun events |= EPOLLIN;
182*4882a593Smuzhiyun if (count == ULLONG_MAX)
183*4882a593Smuzhiyun events |= EPOLLERR;
184*4882a593Smuzhiyun if (ULLONG_MAX - 1 > count)
185*4882a593Smuzhiyun events |= EPOLLOUT;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return events;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
eventfd_ctx_do_read(struct eventfd_ctx * ctx,__u64 * cnt)190*4882a593Smuzhiyun static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
193*4882a593Smuzhiyun ctx->count -= *cnt;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
198*4882a593Smuzhiyun * @ctx: [in] Pointer to eventfd context.
199*4882a593Smuzhiyun * @wait: [in] Wait queue to be removed.
200*4882a593Smuzhiyun * @cnt: [out] Pointer to the 64-bit counter value.
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * Returns %0 if successful, or the following error codes:
203*4882a593Smuzhiyun *
204*4882a593Smuzhiyun * -EAGAIN : The operation would have blocked.
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * This is used to atomically remove a wait queue entry from the eventfd wait
207*4882a593Smuzhiyun * queue head, and read/reset the counter value.
208*4882a593Smuzhiyun */
eventfd_ctx_remove_wait_queue(struct eventfd_ctx * ctx,wait_queue_entry_t * wait,__u64 * cnt)209*4882a593Smuzhiyun int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
210*4882a593Smuzhiyun __u64 *cnt)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun unsigned long flags;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun spin_lock_irqsave(&ctx->wqh.lock, flags);
215*4882a593Smuzhiyun eventfd_ctx_do_read(ctx, cnt);
216*4882a593Smuzhiyun __remove_wait_queue(&ctx->wqh, wait);
217*4882a593Smuzhiyun if (*cnt != 0 && waitqueue_active(&ctx->wqh))
218*4882a593Smuzhiyun wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
219*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->wqh.lock, flags);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return *cnt != 0 ? 0 : -EAGAIN;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
224*4882a593Smuzhiyun
eventfd_read(struct kiocb * iocb,struct iov_iter * to)225*4882a593Smuzhiyun static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct file *file = iocb->ki_filp;
228*4882a593Smuzhiyun struct eventfd_ctx *ctx = file->private_data;
229*4882a593Smuzhiyun __u64 ucnt = 0;
230*4882a593Smuzhiyun DECLARE_WAITQUEUE(wait, current);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (iov_iter_count(to) < sizeof(ucnt))
233*4882a593Smuzhiyun return -EINVAL;
234*4882a593Smuzhiyun spin_lock_irq(&ctx->wqh.lock);
235*4882a593Smuzhiyun if (!ctx->count) {
236*4882a593Smuzhiyun if ((file->f_flags & O_NONBLOCK) ||
237*4882a593Smuzhiyun (iocb->ki_flags & IOCB_NOWAIT)) {
238*4882a593Smuzhiyun spin_unlock_irq(&ctx->wqh.lock);
239*4882a593Smuzhiyun return -EAGAIN;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun __add_wait_queue(&ctx->wqh, &wait);
242*4882a593Smuzhiyun for (;;) {
243*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
244*4882a593Smuzhiyun if (ctx->count)
245*4882a593Smuzhiyun break;
246*4882a593Smuzhiyun if (signal_pending(current)) {
247*4882a593Smuzhiyun __remove_wait_queue(&ctx->wqh, &wait);
248*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
249*4882a593Smuzhiyun spin_unlock_irq(&ctx->wqh.lock);
250*4882a593Smuzhiyun return -ERESTARTSYS;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun spin_unlock_irq(&ctx->wqh.lock);
253*4882a593Smuzhiyun schedule();
254*4882a593Smuzhiyun spin_lock_irq(&ctx->wqh.lock);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun __remove_wait_queue(&ctx->wqh, &wait);
257*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun eventfd_ctx_do_read(ctx, &ucnt);
260*4882a593Smuzhiyun if (waitqueue_active(&ctx->wqh))
261*4882a593Smuzhiyun wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
262*4882a593Smuzhiyun spin_unlock_irq(&ctx->wqh.lock);
263*4882a593Smuzhiyun if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
264*4882a593Smuzhiyun return -EFAULT;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun return sizeof(ucnt);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
eventfd_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)269*4882a593Smuzhiyun static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
270*4882a593Smuzhiyun loff_t *ppos)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun struct eventfd_ctx *ctx = file->private_data;
273*4882a593Smuzhiyun ssize_t res;
274*4882a593Smuzhiyun __u64 ucnt;
275*4882a593Smuzhiyun DECLARE_WAITQUEUE(wait, current);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if (count < sizeof(ucnt))
278*4882a593Smuzhiyun return -EINVAL;
279*4882a593Smuzhiyun if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
280*4882a593Smuzhiyun return -EFAULT;
281*4882a593Smuzhiyun if (ucnt == ULLONG_MAX)
282*4882a593Smuzhiyun return -EINVAL;
283*4882a593Smuzhiyun spin_lock_irq(&ctx->wqh.lock);
284*4882a593Smuzhiyun res = -EAGAIN;
285*4882a593Smuzhiyun if (ULLONG_MAX - ctx->count > ucnt)
286*4882a593Smuzhiyun res = sizeof(ucnt);
287*4882a593Smuzhiyun else if (!(file->f_flags & O_NONBLOCK)) {
288*4882a593Smuzhiyun __add_wait_queue(&ctx->wqh, &wait);
289*4882a593Smuzhiyun for (res = 0;;) {
290*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
291*4882a593Smuzhiyun if (ULLONG_MAX - ctx->count > ucnt) {
292*4882a593Smuzhiyun res = sizeof(ucnt);
293*4882a593Smuzhiyun break;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun if (signal_pending(current)) {
296*4882a593Smuzhiyun res = -ERESTARTSYS;
297*4882a593Smuzhiyun break;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun spin_unlock_irq(&ctx->wqh.lock);
300*4882a593Smuzhiyun schedule();
301*4882a593Smuzhiyun spin_lock_irq(&ctx->wqh.lock);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun __remove_wait_queue(&ctx->wqh, &wait);
304*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun if (likely(res > 0)) {
307*4882a593Smuzhiyun ctx->count += ucnt;
308*4882a593Smuzhiyun if (waitqueue_active(&ctx->wqh))
309*4882a593Smuzhiyun wake_up_locked_poll(&ctx->wqh, EPOLLIN);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun spin_unlock_irq(&ctx->wqh.lock);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun return res;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
eventfd_show_fdinfo(struct seq_file * m,struct file * f)317*4882a593Smuzhiyun static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun struct eventfd_ctx *ctx = f->private_data;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun spin_lock_irq(&ctx->wqh.lock);
322*4882a593Smuzhiyun seq_printf(m, "eventfd-count: %16llx\n",
323*4882a593Smuzhiyun (unsigned long long)ctx->count);
324*4882a593Smuzhiyun spin_unlock_irq(&ctx->wqh.lock);
325*4882a593Smuzhiyun seq_printf(m, "eventfd-id: %d\n", ctx->id);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun #endif
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun static const struct file_operations eventfd_fops = {
330*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
331*4882a593Smuzhiyun .show_fdinfo = eventfd_show_fdinfo,
332*4882a593Smuzhiyun #endif
333*4882a593Smuzhiyun .release = eventfd_release,
334*4882a593Smuzhiyun .poll = eventfd_poll,
335*4882a593Smuzhiyun .read_iter = eventfd_read,
336*4882a593Smuzhiyun .write = eventfd_write,
337*4882a593Smuzhiyun .llseek = noop_llseek,
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /**
341*4882a593Smuzhiyun * eventfd_fget - Acquire a reference of an eventfd file descriptor.
342*4882a593Smuzhiyun * @fd: [in] Eventfd file descriptor.
343*4882a593Smuzhiyun *
344*4882a593Smuzhiyun * Returns a pointer to the eventfd file structure in case of success, or the
345*4882a593Smuzhiyun * following error pointer:
346*4882a593Smuzhiyun *
347*4882a593Smuzhiyun * -EBADF : Invalid @fd file descriptor.
348*4882a593Smuzhiyun * -EINVAL : The @fd file descriptor is not an eventfd file.
349*4882a593Smuzhiyun */
eventfd_fget(int fd)350*4882a593Smuzhiyun struct file *eventfd_fget(int fd)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct file *file;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun file = fget(fd);
355*4882a593Smuzhiyun if (!file)
356*4882a593Smuzhiyun return ERR_PTR(-EBADF);
357*4882a593Smuzhiyun if (file->f_op != &eventfd_fops) {
358*4882a593Smuzhiyun fput(file);
359*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return file;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(eventfd_fget);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
368*4882a593Smuzhiyun * @fd: [in] Eventfd file descriptor.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * Returns a pointer to the internal eventfd context, otherwise the error
371*4882a593Smuzhiyun * pointers returned by the following functions:
372*4882a593Smuzhiyun *
373*4882a593Smuzhiyun * eventfd_fget
374*4882a593Smuzhiyun */
eventfd_ctx_fdget(int fd)375*4882a593Smuzhiyun struct eventfd_ctx *eventfd_ctx_fdget(int fd)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun struct eventfd_ctx *ctx;
378*4882a593Smuzhiyun struct fd f = fdget(fd);
379*4882a593Smuzhiyun if (!f.file)
380*4882a593Smuzhiyun return ERR_PTR(-EBADF);
381*4882a593Smuzhiyun ctx = eventfd_ctx_fileget(f.file);
382*4882a593Smuzhiyun fdput(f);
383*4882a593Smuzhiyun return ctx;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /**
388*4882a593Smuzhiyun * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
389*4882a593Smuzhiyun * @file: [in] Eventfd file pointer.
390*4882a593Smuzhiyun *
391*4882a593Smuzhiyun * Returns a pointer to the internal eventfd context, otherwise the error
392*4882a593Smuzhiyun * pointer:
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * -EINVAL : The @fd file descriptor is not an eventfd file.
395*4882a593Smuzhiyun */
eventfd_ctx_fileget(struct file * file)396*4882a593Smuzhiyun struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun struct eventfd_ctx *ctx;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun if (file->f_op != &eventfd_fops)
401*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun ctx = file->private_data;
404*4882a593Smuzhiyun kref_get(&ctx->kref);
405*4882a593Smuzhiyun return ctx;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
408*4882a593Smuzhiyun
do_eventfd(unsigned int count,int flags)409*4882a593Smuzhiyun static int do_eventfd(unsigned int count, int flags)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun struct eventfd_ctx *ctx;
412*4882a593Smuzhiyun struct file *file;
413*4882a593Smuzhiyun int fd;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* Check the EFD_* constants for consistency. */
416*4882a593Smuzhiyun BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
417*4882a593Smuzhiyun BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (flags & ~EFD_FLAGS_SET)
420*4882a593Smuzhiyun return -EINVAL;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
423*4882a593Smuzhiyun if (!ctx)
424*4882a593Smuzhiyun return -ENOMEM;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun kref_init(&ctx->kref);
427*4882a593Smuzhiyun init_waitqueue_head(&ctx->wqh);
428*4882a593Smuzhiyun ctx->count = count;
429*4882a593Smuzhiyun ctx->flags = flags;
430*4882a593Smuzhiyun ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun flags &= EFD_SHARED_FCNTL_FLAGS;
433*4882a593Smuzhiyun flags |= O_RDWR;
434*4882a593Smuzhiyun fd = get_unused_fd_flags(flags);
435*4882a593Smuzhiyun if (fd < 0)
436*4882a593Smuzhiyun goto err;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
439*4882a593Smuzhiyun if (IS_ERR(file)) {
440*4882a593Smuzhiyun put_unused_fd(fd);
441*4882a593Smuzhiyun fd = PTR_ERR(file);
442*4882a593Smuzhiyun goto err;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun file->f_mode |= FMODE_NOWAIT;
446*4882a593Smuzhiyun fd_install(fd, file);
447*4882a593Smuzhiyun return fd;
448*4882a593Smuzhiyun err:
449*4882a593Smuzhiyun eventfd_free_ctx(ctx);
450*4882a593Smuzhiyun return fd;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
SYSCALL_DEFINE2(eventfd2,unsigned int,count,int,flags)453*4882a593Smuzhiyun SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun return do_eventfd(count, flags);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
SYSCALL_DEFINE1(eventfd,unsigned int,count)458*4882a593Smuzhiyun SYSCALL_DEFINE1(eventfd, unsigned int, count)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun return do_eventfd(count, 0);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463