1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun /*
7*4882a593Smuzhiyun * Basic idea behind the notification queue: An fsnotify group (like inotify)
8*4882a593Smuzhiyun * sends the userspace notification about events asynchronously some time after
9*4882a593Smuzhiyun * the event happened. When inotify gets an event it will need to add that
10*4882a593Smuzhiyun * event to the group notify queue. Since a single event might need to be on
11*4882a593Smuzhiyun * multiple group's notification queues we can't add the event directly to each
12*4882a593Smuzhiyun * queue and instead add a small "event_holder" to each queue. This event_holder
13*4882a593Smuzhiyun * has a pointer back to the original event. Since the majority of events are
14*4882a593Smuzhiyun * going to end up on one, and only one, notification queue we embed one
15*4882a593Smuzhiyun * event_holder into each event. This means we have a single allocation instead
16*4882a593Smuzhiyun * of always needing two. If the embedded event_holder is already in use by
17*4882a593Smuzhiyun * another group a new event_holder (from fsnotify_event_holder_cachep) will be
18*4882a593Smuzhiyun * allocated and used.
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <linux/fs.h>
22*4882a593Smuzhiyun #include <linux/init.h>
23*4882a593Smuzhiyun #include <linux/kernel.h>
24*4882a593Smuzhiyun #include <linux/list.h>
25*4882a593Smuzhiyun #include <linux/module.h>
26*4882a593Smuzhiyun #include <linux/mount.h>
27*4882a593Smuzhiyun #include <linux/mutex.h>
28*4882a593Smuzhiyun #include <linux/namei.h>
29*4882a593Smuzhiyun #include <linux/path.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun #include <linux/spinlock.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/atomic.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/fsnotify_backend.h>
36*4882a593Smuzhiyun #include "fsnotify.h"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
42*4882a593Smuzhiyun * Called from fsnotify_move, which is inlined into filesystem modules.
43*4882a593Smuzhiyun */
fsnotify_get_cookie(void)44*4882a593Smuzhiyun u32 fsnotify_get_cookie(void)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun return atomic_inc_return(&fsnotify_sync_cookie);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* return true if the notify queue is empty, false otherwise */
fsnotify_notify_queue_is_empty(struct fsnotify_group * group)51*4882a593Smuzhiyun bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun assert_spin_locked(&group->notification_lock);
54*4882a593Smuzhiyun return list_empty(&group->notification_list) ? true : false;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
fsnotify_destroy_event(struct fsnotify_group * group,struct fsnotify_event * event)57*4882a593Smuzhiyun void fsnotify_destroy_event(struct fsnotify_group *group,
58*4882a593Smuzhiyun struct fsnotify_event *event)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun /* Overflow events are per-group and we don't want to free them */
61*4882a593Smuzhiyun if (!event || event == group->overflow_event)
62*4882a593Smuzhiyun return;
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * If the event is still queued, we have a problem... Do an unreliable
65*4882a593Smuzhiyun * lockless check first to avoid locking in the common case. The
66*4882a593Smuzhiyun * locking may be necessary for permission events which got removed
67*4882a593Smuzhiyun * from the list by a different CPU than the one freeing the event.
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun if (!list_empty(&event->list)) {
70*4882a593Smuzhiyun spin_lock(&group->notification_lock);
71*4882a593Smuzhiyun WARN_ON(!list_empty(&event->list));
72*4882a593Smuzhiyun spin_unlock(&group->notification_lock);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun group->ops->free_event(event);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * Add an event to the group notification queue. The group can later pull this
79*4882a593Smuzhiyun * event off the queue to deal with. The function returns 0 if the event was
80*4882a593Smuzhiyun * added to the queue, 1 if the event was merged with some other queued event,
81*4882a593Smuzhiyun * 2 if the event was not queued - either the queue of events has overflown
82*4882a593Smuzhiyun * or the group is shutting down.
83*4882a593Smuzhiyun */
fsnotify_add_event(struct fsnotify_group * group,struct fsnotify_event * event,int (* merge)(struct list_head *,struct fsnotify_event *))84*4882a593Smuzhiyun int fsnotify_add_event(struct fsnotify_group *group,
85*4882a593Smuzhiyun struct fsnotify_event *event,
86*4882a593Smuzhiyun int (*merge)(struct list_head *,
87*4882a593Smuzhiyun struct fsnotify_event *))
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun int ret = 0;
90*4882a593Smuzhiyun struct list_head *list = &group->notification_list;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun pr_debug("%s: group=%p event=%p\n", __func__, group, event);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun spin_lock(&group->notification_lock);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (group->shutdown) {
97*4882a593Smuzhiyun spin_unlock(&group->notification_lock);
98*4882a593Smuzhiyun return 2;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (event == group->overflow_event ||
102*4882a593Smuzhiyun group->q_len >= group->max_events) {
103*4882a593Smuzhiyun ret = 2;
104*4882a593Smuzhiyun /* Queue overflow event only if it isn't already queued */
105*4882a593Smuzhiyun if (!list_empty(&group->overflow_event->list)) {
106*4882a593Smuzhiyun spin_unlock(&group->notification_lock);
107*4882a593Smuzhiyun return ret;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun event = group->overflow_event;
110*4882a593Smuzhiyun goto queue;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (!list_empty(list) && merge) {
114*4882a593Smuzhiyun ret = merge(list, event);
115*4882a593Smuzhiyun if (ret) {
116*4882a593Smuzhiyun spin_unlock(&group->notification_lock);
117*4882a593Smuzhiyun return ret;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun queue:
122*4882a593Smuzhiyun group->q_len++;
123*4882a593Smuzhiyun list_add_tail(&event->list, list);
124*4882a593Smuzhiyun spin_unlock(&group->notification_lock);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun wake_up(&group->notification_waitq);
127*4882a593Smuzhiyun kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
128*4882a593Smuzhiyun return ret;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
fsnotify_remove_queued_event(struct fsnotify_group * group,struct fsnotify_event * event)131*4882a593Smuzhiyun void fsnotify_remove_queued_event(struct fsnotify_group *group,
132*4882a593Smuzhiyun struct fsnotify_event *event)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun assert_spin_locked(&group->notification_lock);
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * We need to init list head for the case of overflow event so that
137*4882a593Smuzhiyun * check in fsnotify_add_event() works
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun list_del_init(&event->list);
140*4882a593Smuzhiyun group->q_len--;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * Remove and return the first event from the notification list. It is the
145*4882a593Smuzhiyun * responsibility of the caller to destroy the obtained event
146*4882a593Smuzhiyun */
fsnotify_remove_first_event(struct fsnotify_group * group)147*4882a593Smuzhiyun struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct fsnotify_event *event;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun assert_spin_locked(&group->notification_lock);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun pr_debug("%s: group=%p\n", __func__, group);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun event = list_first_entry(&group->notification_list,
156*4882a593Smuzhiyun struct fsnotify_event, list);
157*4882a593Smuzhiyun fsnotify_remove_queued_event(group, event);
158*4882a593Smuzhiyun return event;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * This will not remove the event, that must be done with
163*4882a593Smuzhiyun * fsnotify_remove_first_event()
164*4882a593Smuzhiyun */
fsnotify_peek_first_event(struct fsnotify_group * group)165*4882a593Smuzhiyun struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun assert_spin_locked(&group->notification_lock);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun return list_first_entry(&group->notification_list,
170*4882a593Smuzhiyun struct fsnotify_event, list);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * Called when a group is being torn down to clean up any outstanding
175*4882a593Smuzhiyun * event notifications.
176*4882a593Smuzhiyun */
fsnotify_flush_notify(struct fsnotify_group * group)177*4882a593Smuzhiyun void fsnotify_flush_notify(struct fsnotify_group *group)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct fsnotify_event *event;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun spin_lock(&group->notification_lock);
182*4882a593Smuzhiyun while (!fsnotify_notify_queue_is_empty(group)) {
183*4882a593Smuzhiyun event = fsnotify_remove_first_event(group);
184*4882a593Smuzhiyun spin_unlock(&group->notification_lock);
185*4882a593Smuzhiyun fsnotify_destroy_event(group, event);
186*4882a593Smuzhiyun spin_lock(&group->notification_lock);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun spin_unlock(&group->notification_lock);
189*4882a593Smuzhiyun }
190