xref: /OK3568_Linux_fs/kernel/fs/notify/mark.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun /*
7*4882a593Smuzhiyun  * fsnotify inode mark locking/lifetime/and refcnting
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * REFCNT:
10*4882a593Smuzhiyun  * The group->recnt and mark->refcnt tell how many "things" in the kernel
11*4882a593Smuzhiyun  * currently are referencing the objects. Both kind of objects typically will
12*4882a593Smuzhiyun  * live inside the kernel with a refcnt of 2, one for its creation and one for
13*4882a593Smuzhiyun  * the reference a group and a mark hold to each other.
14*4882a593Smuzhiyun  * If you are holding the appropriate locks, you can take a reference and the
15*4882a593Smuzhiyun  * object itself is guaranteed to survive until the reference is dropped.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * LOCKING:
18*4882a593Smuzhiyun  * There are 3 locks involved with fsnotify inode marks and they MUST be taken
19*4882a593Smuzhiyun  * in order as follows:
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * group->mark_mutex
22*4882a593Smuzhiyun  * mark->lock
23*4882a593Smuzhiyun  * mark->connector->lock
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * group->mark_mutex protects the marks_list anchored inside a given group and
26*4882a593Smuzhiyun  * each mark is hooked via the g_list.  It also protects the groups private
27*4882a593Smuzhiyun  * data (i.e group limits).
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun  * mark->lock protects the marks attributes like its masks and flags.
30*4882a593Smuzhiyun  * Furthermore it protects the access to a reference of the group that the mark
31*4882a593Smuzhiyun  * is assigned to as well as the access to a reference of the inode/vfsmount
32*4882a593Smuzhiyun  * that is being watched by the mark.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * mark->connector->lock protects the list of marks anchored inside an
35*4882a593Smuzhiyun  * inode / vfsmount and each mark is hooked via the i_list.
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * A list of notification marks relating to inode / mnt is contained in
38*4882a593Smuzhiyun  * fsnotify_mark_connector. That structure is alive as long as there are any
39*4882a593Smuzhiyun  * marks in the list and is also protected by fsnotify_mark_srcu. A mark gets
40*4882a593Smuzhiyun  * detached from fsnotify_mark_connector when last reference to the mark is
41*4882a593Smuzhiyun  * dropped.  Thus having mark reference is enough to protect mark->connector
42*4882a593Smuzhiyun  * pointer and to make sure fsnotify_mark_connector cannot disappear. Also
43*4882a593Smuzhiyun  * because we remove mark from g_list before dropping mark reference associated
44*4882a593Smuzhiyun  * with that, any mark found through g_list is guaranteed to have
45*4882a593Smuzhiyun  * mark->connector set until we drop group->mark_mutex.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * LIFETIME:
48*4882a593Smuzhiyun  * Inode marks survive between when they are added to an inode and when their
49*4882a593Smuzhiyun  * refcnt==0. Marks are also protected by fsnotify_mark_srcu.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * The inode mark can be cleared for a number of different reasons including:
52*4882a593Smuzhiyun  * - The inode is unlinked for the last time.  (fsnotify_inode_remove)
53*4882a593Smuzhiyun  * - The inode is being evicted from cache. (fsnotify_inode_delete)
54*4882a593Smuzhiyun  * - The fs the inode is on is unmounted.  (fsnotify_inode_delete/fsnotify_unmount_inodes)
55*4882a593Smuzhiyun  * - Something explicitly requests that it be removed.  (fsnotify_destroy_mark)
56*4882a593Smuzhiyun  * - The fsnotify_group associated with the mark is going away and all such marks
57*4882a593Smuzhiyun  *   need to be cleaned up. (fsnotify_clear_marks_by_group)
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * This has the very interesting property of being able to run concurrently with
60*4882a593Smuzhiyun  * any (or all) other directions.
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #include <linux/fs.h>
64*4882a593Smuzhiyun #include <linux/init.h>
65*4882a593Smuzhiyun #include <linux/kernel.h>
66*4882a593Smuzhiyun #include <linux/kthread.h>
67*4882a593Smuzhiyun #include <linux/module.h>
68*4882a593Smuzhiyun #include <linux/mutex.h>
69*4882a593Smuzhiyun #include <linux/slab.h>
70*4882a593Smuzhiyun #include <linux/spinlock.h>
71*4882a593Smuzhiyun #include <linux/srcu.h>
72*4882a593Smuzhiyun #include <linux/ratelimit.h>
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #include <linux/atomic.h>
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #include <linux/fsnotify_backend.h>
77*4882a593Smuzhiyun #include "fsnotify.h"
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #define FSNOTIFY_REAPER_DELAY	(1)	/* 1 jiffy */
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun struct srcu_struct fsnotify_mark_srcu;
82*4882a593Smuzhiyun struct kmem_cache *fsnotify_mark_connector_cachep;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun static DEFINE_SPINLOCK(destroy_lock);
85*4882a593Smuzhiyun static LIST_HEAD(destroy_list);
86*4882a593Smuzhiyun static struct fsnotify_mark_connector *connector_destroy_list;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun static void fsnotify_mark_destroy_workfn(struct work_struct *work);
89*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy_workfn);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static void fsnotify_connector_destroy_workfn(struct work_struct *work);
92*4882a593Smuzhiyun static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
93*4882a593Smuzhiyun 
fsnotify_get_mark(struct fsnotify_mark * mark)94*4882a593Smuzhiyun void fsnotify_get_mark(struct fsnotify_mark *mark)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	WARN_ON_ONCE(!refcount_read(&mark->refcnt));
97*4882a593Smuzhiyun 	refcount_inc(&mark->refcnt);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
fsnotify_conn_mask_p(struct fsnotify_mark_connector * conn)100*4882a593Smuzhiyun static __u32 *fsnotify_conn_mask_p(struct fsnotify_mark_connector *conn)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
103*4882a593Smuzhiyun 		return &fsnotify_conn_inode(conn)->i_fsnotify_mask;
104*4882a593Smuzhiyun 	else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT)
105*4882a593Smuzhiyun 		return &fsnotify_conn_mount(conn)->mnt_fsnotify_mask;
106*4882a593Smuzhiyun 	else if (conn->type == FSNOTIFY_OBJ_TYPE_SB)
107*4882a593Smuzhiyun 		return &fsnotify_conn_sb(conn)->s_fsnotify_mask;
108*4882a593Smuzhiyun 	return NULL;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
fsnotify_conn_mask(struct fsnotify_mark_connector * conn)111*4882a593Smuzhiyun __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
114*4882a593Smuzhiyun 		return 0;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return *fsnotify_conn_mask_p(conn);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
__fsnotify_recalc_mask(struct fsnotify_mark_connector * conn)119*4882a593Smuzhiyun static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	u32 new_mask = 0;
122*4882a593Smuzhiyun 	struct fsnotify_mark *mark;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	assert_spin_locked(&conn->lock);
125*4882a593Smuzhiyun 	/* We can get detached connector here when inode is getting unlinked. */
126*4882a593Smuzhiyun 	if (!fsnotify_valid_obj_type(conn->type))
127*4882a593Smuzhiyun 		return;
128*4882a593Smuzhiyun 	hlist_for_each_entry(mark, &conn->list, obj_list) {
129*4882a593Smuzhiyun 		if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
130*4882a593Smuzhiyun 			new_mask |= mark->mask;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 	*fsnotify_conn_mask_p(conn) = new_mask;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun  * Calculate mask of events for a list of marks. The caller must make sure
137*4882a593Smuzhiyun  * connector and connector->obj cannot disappear under us.  Callers achieve
138*4882a593Smuzhiyun  * this by holding a mark->lock or mark->group->mark_mutex for a mark on this
139*4882a593Smuzhiyun  * list.
140*4882a593Smuzhiyun  */
fsnotify_recalc_mask(struct fsnotify_mark_connector * conn)141*4882a593Smuzhiyun void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	if (!conn)
144*4882a593Smuzhiyun 		return;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	spin_lock(&conn->lock);
147*4882a593Smuzhiyun 	__fsnotify_recalc_mask(conn);
148*4882a593Smuzhiyun 	spin_unlock(&conn->lock);
149*4882a593Smuzhiyun 	if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
150*4882a593Smuzhiyun 		__fsnotify_update_child_dentry_flags(
151*4882a593Smuzhiyun 					fsnotify_conn_inode(conn));
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /* Free all connectors queued for freeing once SRCU period ends */
fsnotify_connector_destroy_workfn(struct work_struct * work)155*4882a593Smuzhiyun static void fsnotify_connector_destroy_workfn(struct work_struct *work)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	struct fsnotify_mark_connector *conn, *free;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	spin_lock(&destroy_lock);
160*4882a593Smuzhiyun 	conn = connector_destroy_list;
161*4882a593Smuzhiyun 	connector_destroy_list = NULL;
162*4882a593Smuzhiyun 	spin_unlock(&destroy_lock);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	synchronize_srcu(&fsnotify_mark_srcu);
165*4882a593Smuzhiyun 	while (conn) {
166*4882a593Smuzhiyun 		free = conn;
167*4882a593Smuzhiyun 		conn = conn->destroy_next;
168*4882a593Smuzhiyun 		kmem_cache_free(fsnotify_mark_connector_cachep, free);
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
fsnotify_detach_connector_from_object(struct fsnotify_mark_connector * conn,unsigned int * type)172*4882a593Smuzhiyun static void *fsnotify_detach_connector_from_object(
173*4882a593Smuzhiyun 					struct fsnotify_mark_connector *conn,
174*4882a593Smuzhiyun 					unsigned int *type)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct inode *inode = NULL;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	*type = conn->type;
179*4882a593Smuzhiyun 	if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED)
180*4882a593Smuzhiyun 		return NULL;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
183*4882a593Smuzhiyun 		inode = fsnotify_conn_inode(conn);
184*4882a593Smuzhiyun 		inode->i_fsnotify_mask = 0;
185*4882a593Smuzhiyun 		atomic_long_inc(&inode->i_sb->s_fsnotify_inode_refs);
186*4882a593Smuzhiyun 	} else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
187*4882a593Smuzhiyun 		fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
188*4882a593Smuzhiyun 	} else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) {
189*4882a593Smuzhiyun 		fsnotify_conn_sb(conn)->s_fsnotify_mask = 0;
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	rcu_assign_pointer(*(conn->obj), NULL);
193*4882a593Smuzhiyun 	conn->obj = NULL;
194*4882a593Smuzhiyun 	conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return inode;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
fsnotify_final_mark_destroy(struct fsnotify_mark * mark)199*4882a593Smuzhiyun static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct fsnotify_group *group = mark->group;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!group))
204*4882a593Smuzhiyun 		return;
205*4882a593Smuzhiyun 	group->ops->free_mark(mark);
206*4882a593Smuzhiyun 	fsnotify_put_group(group);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /* Drop object reference originally held by a connector */
fsnotify_drop_object(unsigned int type,void * objp)210*4882a593Smuzhiyun static void fsnotify_drop_object(unsigned int type, void *objp)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct inode *inode;
213*4882a593Smuzhiyun 	struct super_block *sb;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (!objp)
216*4882a593Smuzhiyun 		return;
217*4882a593Smuzhiyun 	/* Currently only inode references are passed to be dropped */
218*4882a593Smuzhiyun 	if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE))
219*4882a593Smuzhiyun 		return;
220*4882a593Smuzhiyun 	inode = objp;
221*4882a593Smuzhiyun 	sb = inode->i_sb;
222*4882a593Smuzhiyun 	iput(inode);
223*4882a593Smuzhiyun 	if (atomic_long_dec_and_test(&sb->s_fsnotify_inode_refs))
224*4882a593Smuzhiyun 		wake_up_var(&sb->s_fsnotify_inode_refs);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
fsnotify_put_mark(struct fsnotify_mark * mark)227*4882a593Smuzhiyun void fsnotify_put_mark(struct fsnotify_mark *mark)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct fsnotify_mark_connector *conn = READ_ONCE(mark->connector);
230*4882a593Smuzhiyun 	void *objp = NULL;
231*4882a593Smuzhiyun 	unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED;
232*4882a593Smuzhiyun 	bool free_conn = false;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* Catch marks that were actually never attached to object */
235*4882a593Smuzhiyun 	if (!conn) {
236*4882a593Smuzhiyun 		if (refcount_dec_and_test(&mark->refcnt))
237*4882a593Smuzhiyun 			fsnotify_final_mark_destroy(mark);
238*4882a593Smuzhiyun 		return;
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/*
242*4882a593Smuzhiyun 	 * We have to be careful so that traversals of obj_list under lock can
243*4882a593Smuzhiyun 	 * safely grab mark reference.
244*4882a593Smuzhiyun 	 */
245*4882a593Smuzhiyun 	if (!refcount_dec_and_lock(&mark->refcnt, &conn->lock))
246*4882a593Smuzhiyun 		return;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	hlist_del_init_rcu(&mark->obj_list);
249*4882a593Smuzhiyun 	if (hlist_empty(&conn->list)) {
250*4882a593Smuzhiyun 		objp = fsnotify_detach_connector_from_object(conn, &type);
251*4882a593Smuzhiyun 		free_conn = true;
252*4882a593Smuzhiyun 	} else {
253*4882a593Smuzhiyun 		__fsnotify_recalc_mask(conn);
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 	WRITE_ONCE(mark->connector, NULL);
256*4882a593Smuzhiyun 	spin_unlock(&conn->lock);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	fsnotify_drop_object(type, objp);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	if (free_conn) {
261*4882a593Smuzhiyun 		spin_lock(&destroy_lock);
262*4882a593Smuzhiyun 		conn->destroy_next = connector_destroy_list;
263*4882a593Smuzhiyun 		connector_destroy_list = conn;
264*4882a593Smuzhiyun 		spin_unlock(&destroy_lock);
265*4882a593Smuzhiyun 		queue_work(system_unbound_wq, &connector_reaper_work);
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 	/*
268*4882a593Smuzhiyun 	 * Note that we didn't update flags telling whether inode cares about
269*4882a593Smuzhiyun 	 * what's happening with children. We update these flags from
270*4882a593Smuzhiyun 	 * __fsnotify_parent() lazily when next event happens on one of our
271*4882a593Smuzhiyun 	 * children.
272*4882a593Smuzhiyun 	 */
273*4882a593Smuzhiyun 	spin_lock(&destroy_lock);
274*4882a593Smuzhiyun 	list_add(&mark->g_list, &destroy_list);
275*4882a593Smuzhiyun 	spin_unlock(&destroy_lock);
276*4882a593Smuzhiyun 	queue_delayed_work(system_unbound_wq, &reaper_work,
277*4882a593Smuzhiyun 			   FSNOTIFY_REAPER_DELAY);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsnotify_put_mark);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun  * Get mark reference when we found the mark via lockless traversal of object
283*4882a593Smuzhiyun  * list. Mark can be already removed from the list by now and on its way to be
284*4882a593Smuzhiyun  * destroyed once SRCU period ends.
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * Also pin the group so it doesn't disappear under us.
287*4882a593Smuzhiyun  */
fsnotify_get_mark_safe(struct fsnotify_mark * mark)288*4882a593Smuzhiyun static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	if (!mark)
291*4882a593Smuzhiyun 		return true;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	if (refcount_inc_not_zero(&mark->refcnt)) {
294*4882a593Smuzhiyun 		spin_lock(&mark->lock);
295*4882a593Smuzhiyun 		if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
296*4882a593Smuzhiyun 			/* mark is attached, group is still alive then */
297*4882a593Smuzhiyun 			atomic_inc(&mark->group->user_waits);
298*4882a593Smuzhiyun 			spin_unlock(&mark->lock);
299*4882a593Smuzhiyun 			return true;
300*4882a593Smuzhiyun 		}
301*4882a593Smuzhiyun 		spin_unlock(&mark->lock);
302*4882a593Smuzhiyun 		fsnotify_put_mark(mark);
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 	return false;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /*
308*4882a593Smuzhiyun  * Puts marks and wakes up group destruction if necessary.
309*4882a593Smuzhiyun  *
310*4882a593Smuzhiyun  * Pairs with fsnotify_get_mark_safe()
311*4882a593Smuzhiyun  */
fsnotify_put_mark_wake(struct fsnotify_mark * mark)312*4882a593Smuzhiyun static void fsnotify_put_mark_wake(struct fsnotify_mark *mark)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	if (mark) {
315*4882a593Smuzhiyun 		struct fsnotify_group *group = mark->group;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		fsnotify_put_mark(mark);
318*4882a593Smuzhiyun 		/*
319*4882a593Smuzhiyun 		 * We abuse notification_waitq on group shutdown for waiting for
320*4882a593Smuzhiyun 		 * all marks pinned when waiting for userspace.
321*4882a593Smuzhiyun 		 */
322*4882a593Smuzhiyun 		if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
323*4882a593Smuzhiyun 			wake_up(&group->notification_waitq);
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
fsnotify_prepare_user_wait(struct fsnotify_iter_info * iter_info)327*4882a593Smuzhiyun bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
328*4882a593Smuzhiyun 	__releases(&fsnotify_mark_srcu)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	int type;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	fsnotify_foreach_obj_type(type) {
333*4882a593Smuzhiyun 		/* This can fail if mark is being removed */
334*4882a593Smuzhiyun 		if (!fsnotify_get_mark_safe(iter_info->marks[type])) {
335*4882a593Smuzhiyun 			__release(&fsnotify_mark_srcu);
336*4882a593Smuzhiyun 			goto fail;
337*4882a593Smuzhiyun 		}
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	/*
341*4882a593Smuzhiyun 	 * Now that both marks are pinned by refcount in the inode / vfsmount
342*4882a593Smuzhiyun 	 * lists, we can drop SRCU lock, and safely resume the list iteration
343*4882a593Smuzhiyun 	 * once userspace returns.
344*4882a593Smuzhiyun 	 */
345*4882a593Smuzhiyun 	srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	return true;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun fail:
350*4882a593Smuzhiyun 	for (type--; type >= 0; type--)
351*4882a593Smuzhiyun 		fsnotify_put_mark_wake(iter_info->marks[type]);
352*4882a593Smuzhiyun 	return false;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
fsnotify_finish_user_wait(struct fsnotify_iter_info * iter_info)355*4882a593Smuzhiyun void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
356*4882a593Smuzhiyun 	__acquires(&fsnotify_mark_srcu)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	int type;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
361*4882a593Smuzhiyun 	fsnotify_foreach_obj_type(type)
362*4882a593Smuzhiyun 		fsnotify_put_mark_wake(iter_info->marks[type]);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun  * Mark mark as detached, remove it from group list. Mark still stays in object
367*4882a593Smuzhiyun  * list until its last reference is dropped. Note that we rely on mark being
368*4882a593Smuzhiyun  * removed from group list before corresponding reference to it is dropped. In
369*4882a593Smuzhiyun  * particular we rely on mark->connector being valid while we hold
370*4882a593Smuzhiyun  * group->mark_mutex if we found the mark through g_list.
371*4882a593Smuzhiyun  *
372*4882a593Smuzhiyun  * Must be called with group->mark_mutex held. The caller must either hold
373*4882a593Smuzhiyun  * reference to the mark or be protected by fsnotify_mark_srcu.
374*4882a593Smuzhiyun  */
fsnotify_detach_mark(struct fsnotify_mark * mark)375*4882a593Smuzhiyun void fsnotify_detach_mark(struct fsnotify_mark *mark)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct fsnotify_group *group = mark->group;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
380*4882a593Smuzhiyun 	WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
381*4882a593Smuzhiyun 		     refcount_read(&mark->refcnt) < 1 +
382*4882a593Smuzhiyun 			!!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	spin_lock(&mark->lock);
385*4882a593Smuzhiyun 	/* something else already called this function on this mark */
386*4882a593Smuzhiyun 	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
387*4882a593Smuzhiyun 		spin_unlock(&mark->lock);
388*4882a593Smuzhiyun 		return;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 	mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED;
391*4882a593Smuzhiyun 	list_del_init(&mark->g_list);
392*4882a593Smuzhiyun 	spin_unlock(&mark->lock);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	atomic_dec(&group->num_marks);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* Drop mark reference acquired in fsnotify_add_mark_locked() */
397*4882a593Smuzhiyun 	fsnotify_put_mark(mark);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun  * Free fsnotify mark. The mark is actually only marked as being freed.  The
402*4882a593Smuzhiyun  * freeing is actually happening only once last reference to the mark is
403*4882a593Smuzhiyun  * dropped from a workqueue which first waits for srcu period end.
404*4882a593Smuzhiyun  *
405*4882a593Smuzhiyun  * Caller must have a reference to the mark or be protected by
406*4882a593Smuzhiyun  * fsnotify_mark_srcu.
407*4882a593Smuzhiyun  */
fsnotify_free_mark(struct fsnotify_mark * mark)408*4882a593Smuzhiyun void fsnotify_free_mark(struct fsnotify_mark *mark)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	struct fsnotify_group *group = mark->group;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	spin_lock(&mark->lock);
413*4882a593Smuzhiyun 	/* something else already called this function on this mark */
414*4882a593Smuzhiyun 	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
415*4882a593Smuzhiyun 		spin_unlock(&mark->lock);
416*4882a593Smuzhiyun 		return;
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
419*4882a593Smuzhiyun 	spin_unlock(&mark->lock);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/*
422*4882a593Smuzhiyun 	 * Some groups like to know that marks are being freed.  This is a
423*4882a593Smuzhiyun 	 * callback to the group function to let it know that this mark
424*4882a593Smuzhiyun 	 * is being freed.
425*4882a593Smuzhiyun 	 */
426*4882a593Smuzhiyun 	if (group->ops->freeing_mark)
427*4882a593Smuzhiyun 		group->ops->freeing_mark(mark, group);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
fsnotify_destroy_mark(struct fsnotify_mark * mark,struct fsnotify_group * group)430*4882a593Smuzhiyun void fsnotify_destroy_mark(struct fsnotify_mark *mark,
431*4882a593Smuzhiyun 			   struct fsnotify_group *group)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	mutex_lock(&group->mark_mutex);
434*4882a593Smuzhiyun 	fsnotify_detach_mark(mark);
435*4882a593Smuzhiyun 	mutex_unlock(&group->mark_mutex);
436*4882a593Smuzhiyun 	fsnotify_free_mark(mark);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsnotify_destroy_mark);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun /*
441*4882a593Smuzhiyun  * Sorting function for lists of fsnotify marks.
442*4882a593Smuzhiyun  *
443*4882a593Smuzhiyun  * Fanotify supports different notification classes (reflected as priority of
444*4882a593Smuzhiyun  * notification group). Events shall be passed to notification groups in
445*4882a593Smuzhiyun  * decreasing priority order. To achieve this marks in notification lists for
446*4882a593Smuzhiyun  * inodes and vfsmounts are sorted so that priorities of corresponding groups
447*4882a593Smuzhiyun  * are descending.
448*4882a593Smuzhiyun  *
449*4882a593Smuzhiyun  * Furthermore correct handling of the ignore mask requires processing inode
450*4882a593Smuzhiyun  * and vfsmount marks of each group together. Using the group address as
451*4882a593Smuzhiyun  * further sort criterion provides a unique sorting order and thus we can
452*4882a593Smuzhiyun  * merge inode and vfsmount lists of marks in linear time and find groups
453*4882a593Smuzhiyun  * present in both lists.
454*4882a593Smuzhiyun  *
455*4882a593Smuzhiyun  * A return value of 1 signifies that b has priority over a.
456*4882a593Smuzhiyun  * A return value of 0 signifies that the two marks have to be handled together.
457*4882a593Smuzhiyun  * A return value of -1 signifies that a has priority over b.
458*4882a593Smuzhiyun  */
fsnotify_compare_groups(struct fsnotify_group * a,struct fsnotify_group * b)459*4882a593Smuzhiyun int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	if (a == b)
462*4882a593Smuzhiyun 		return 0;
463*4882a593Smuzhiyun 	if (!a)
464*4882a593Smuzhiyun 		return 1;
465*4882a593Smuzhiyun 	if (!b)
466*4882a593Smuzhiyun 		return -1;
467*4882a593Smuzhiyun 	if (a->priority < b->priority)
468*4882a593Smuzhiyun 		return 1;
469*4882a593Smuzhiyun 	if (a->priority > b->priority)
470*4882a593Smuzhiyun 		return -1;
471*4882a593Smuzhiyun 	if (a < b)
472*4882a593Smuzhiyun 		return 1;
473*4882a593Smuzhiyun 	return -1;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
fsnotify_attach_connector_to_object(fsnotify_connp_t * connp,unsigned int type,__kernel_fsid_t * fsid)476*4882a593Smuzhiyun static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
477*4882a593Smuzhiyun 					       unsigned int type,
478*4882a593Smuzhiyun 					       __kernel_fsid_t *fsid)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	struct inode *inode = NULL;
481*4882a593Smuzhiyun 	struct fsnotify_mark_connector *conn;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	conn = kmem_cache_alloc(fsnotify_mark_connector_cachep, GFP_KERNEL);
484*4882a593Smuzhiyun 	if (!conn)
485*4882a593Smuzhiyun 		return -ENOMEM;
486*4882a593Smuzhiyun 	spin_lock_init(&conn->lock);
487*4882a593Smuzhiyun 	INIT_HLIST_HEAD(&conn->list);
488*4882a593Smuzhiyun 	conn->type = type;
489*4882a593Smuzhiyun 	conn->obj = connp;
490*4882a593Smuzhiyun 	/* Cache fsid of filesystem containing the object */
491*4882a593Smuzhiyun 	if (fsid) {
492*4882a593Smuzhiyun 		conn->fsid = *fsid;
493*4882a593Smuzhiyun 		conn->flags = FSNOTIFY_CONN_FLAG_HAS_FSID;
494*4882a593Smuzhiyun 	} else {
495*4882a593Smuzhiyun 		conn->fsid.val[0] = conn->fsid.val[1] = 0;
496*4882a593Smuzhiyun 		conn->flags = 0;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 	if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
499*4882a593Smuzhiyun 		inode = igrab(fsnotify_conn_inode(conn));
500*4882a593Smuzhiyun 	/*
501*4882a593Smuzhiyun 	 * cmpxchg() provides the barrier so that readers of *connp can see
502*4882a593Smuzhiyun 	 * only initialized structure
503*4882a593Smuzhiyun 	 */
504*4882a593Smuzhiyun 	if (cmpxchg(connp, NULL, conn)) {
505*4882a593Smuzhiyun 		/* Someone else created list structure for us */
506*4882a593Smuzhiyun 		if (inode)
507*4882a593Smuzhiyun 			iput(inode);
508*4882a593Smuzhiyun 		kmem_cache_free(fsnotify_mark_connector_cachep, conn);
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	return 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun  * Get mark connector, make sure it is alive and return with its lock held.
516*4882a593Smuzhiyun  * This is for users that get connector pointer from inode or mount. Users that
517*4882a593Smuzhiyun  * hold reference to a mark on the list may directly lock connector->lock as
518*4882a593Smuzhiyun  * they are sure list cannot go away under them.
519*4882a593Smuzhiyun  */
fsnotify_grab_connector(fsnotify_connp_t * connp)520*4882a593Smuzhiyun static struct fsnotify_mark_connector *fsnotify_grab_connector(
521*4882a593Smuzhiyun 						fsnotify_connp_t *connp)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun 	struct fsnotify_mark_connector *conn;
524*4882a593Smuzhiyun 	int idx;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	idx = srcu_read_lock(&fsnotify_mark_srcu);
527*4882a593Smuzhiyun 	conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
528*4882a593Smuzhiyun 	if (!conn)
529*4882a593Smuzhiyun 		goto out;
530*4882a593Smuzhiyun 	spin_lock(&conn->lock);
531*4882a593Smuzhiyun 	if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED) {
532*4882a593Smuzhiyun 		spin_unlock(&conn->lock);
533*4882a593Smuzhiyun 		srcu_read_unlock(&fsnotify_mark_srcu, idx);
534*4882a593Smuzhiyun 		return NULL;
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun out:
537*4882a593Smuzhiyun 	srcu_read_unlock(&fsnotify_mark_srcu, idx);
538*4882a593Smuzhiyun 	return conn;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun /*
542*4882a593Smuzhiyun  * Add mark into proper place in given list of marks. These marks may be used
543*4882a593Smuzhiyun  * for the fsnotify backend to determine which event types should be delivered
544*4882a593Smuzhiyun  * to which group and for which inodes. These marks are ordered according to
545*4882a593Smuzhiyun  * priority, highest number first, and then by the group's location in memory.
546*4882a593Smuzhiyun  */
fsnotify_add_mark_list(struct fsnotify_mark * mark,fsnotify_connp_t * connp,unsigned int type,int allow_dups,__kernel_fsid_t * fsid)547*4882a593Smuzhiyun static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
548*4882a593Smuzhiyun 				  fsnotify_connp_t *connp, unsigned int type,
549*4882a593Smuzhiyun 				  int allow_dups, __kernel_fsid_t *fsid)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	struct fsnotify_mark *lmark, *last = NULL;
552*4882a593Smuzhiyun 	struct fsnotify_mark_connector *conn;
553*4882a593Smuzhiyun 	int cmp;
554*4882a593Smuzhiyun 	int err = 0;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	if (WARN_ON(!fsnotify_valid_obj_type(type)))
557*4882a593Smuzhiyun 		return -EINVAL;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	/* Backend is expected to check for zero fsid (e.g. tmpfs) */
560*4882a593Smuzhiyun 	if (fsid && WARN_ON_ONCE(!fsid->val[0] && !fsid->val[1]))
561*4882a593Smuzhiyun 		return -ENODEV;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun restart:
564*4882a593Smuzhiyun 	spin_lock(&mark->lock);
565*4882a593Smuzhiyun 	conn = fsnotify_grab_connector(connp);
566*4882a593Smuzhiyun 	if (!conn) {
567*4882a593Smuzhiyun 		spin_unlock(&mark->lock);
568*4882a593Smuzhiyun 		err = fsnotify_attach_connector_to_object(connp, type, fsid);
569*4882a593Smuzhiyun 		if (err)
570*4882a593Smuzhiyun 			return err;
571*4882a593Smuzhiyun 		goto restart;
572*4882a593Smuzhiyun 	} else if (fsid && !(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID)) {
573*4882a593Smuzhiyun 		conn->fsid = *fsid;
574*4882a593Smuzhiyun 		/* Pairs with smp_rmb() in fanotify_get_fsid() */
575*4882a593Smuzhiyun 		smp_wmb();
576*4882a593Smuzhiyun 		conn->flags |= FSNOTIFY_CONN_FLAG_HAS_FSID;
577*4882a593Smuzhiyun 	} else if (fsid && (conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID) &&
578*4882a593Smuzhiyun 		   (fsid->val[0] != conn->fsid.val[0] ||
579*4882a593Smuzhiyun 		    fsid->val[1] != conn->fsid.val[1])) {
580*4882a593Smuzhiyun 		/*
581*4882a593Smuzhiyun 		 * Backend is expected to check for non uniform fsid
582*4882a593Smuzhiyun 		 * (e.g. btrfs), but maybe we missed something?
583*4882a593Smuzhiyun 		 * Only allow setting conn->fsid once to non zero fsid.
584*4882a593Smuzhiyun 		 * inotify and non-fid fanotify groups do not set nor test
585*4882a593Smuzhiyun 		 * conn->fsid.
586*4882a593Smuzhiyun 		 */
587*4882a593Smuzhiyun 		pr_warn_ratelimited("%s: fsid mismatch on object of type %u: "
588*4882a593Smuzhiyun 				    "%x.%x != %x.%x\n", __func__, conn->type,
589*4882a593Smuzhiyun 				    fsid->val[0], fsid->val[1],
590*4882a593Smuzhiyun 				    conn->fsid.val[0], conn->fsid.val[1]);
591*4882a593Smuzhiyun 		err = -EXDEV;
592*4882a593Smuzhiyun 		goto out_err;
593*4882a593Smuzhiyun 	}
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	/* is mark the first mark? */
596*4882a593Smuzhiyun 	if (hlist_empty(&conn->list)) {
597*4882a593Smuzhiyun 		hlist_add_head_rcu(&mark->obj_list, &conn->list);
598*4882a593Smuzhiyun 		goto added;
599*4882a593Smuzhiyun 	}
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	/* should mark be in the middle of the current list? */
602*4882a593Smuzhiyun 	hlist_for_each_entry(lmark, &conn->list, obj_list) {
603*4882a593Smuzhiyun 		last = lmark;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 		if ((lmark->group == mark->group) &&
606*4882a593Smuzhiyun 		    (lmark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) &&
607*4882a593Smuzhiyun 		    !allow_dups) {
608*4882a593Smuzhiyun 			err = -EEXIST;
609*4882a593Smuzhiyun 			goto out_err;
610*4882a593Smuzhiyun 		}
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 		cmp = fsnotify_compare_groups(lmark->group, mark->group);
613*4882a593Smuzhiyun 		if (cmp >= 0) {
614*4882a593Smuzhiyun 			hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list);
615*4882a593Smuzhiyun 			goto added;
616*4882a593Smuzhiyun 		}
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	BUG_ON(last == NULL);
620*4882a593Smuzhiyun 	/* mark should be the last entry.  last is the current last entry */
621*4882a593Smuzhiyun 	hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
622*4882a593Smuzhiyun added:
623*4882a593Smuzhiyun 	/*
624*4882a593Smuzhiyun 	 * Since connector is attached to object using cmpxchg() we are
625*4882a593Smuzhiyun 	 * guaranteed that connector initialization is fully visible by anyone
626*4882a593Smuzhiyun 	 * seeing mark->connector set.
627*4882a593Smuzhiyun 	 */
628*4882a593Smuzhiyun 	WRITE_ONCE(mark->connector, conn);
629*4882a593Smuzhiyun out_err:
630*4882a593Smuzhiyun 	spin_unlock(&conn->lock);
631*4882a593Smuzhiyun 	spin_unlock(&mark->lock);
632*4882a593Smuzhiyun 	return err;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun /*
636*4882a593Smuzhiyun  * Attach an initialized mark to a given group and fs object.
637*4882a593Smuzhiyun  * These marks may be used for the fsnotify backend to determine which
638*4882a593Smuzhiyun  * event types should be delivered to which group.
639*4882a593Smuzhiyun  */
fsnotify_add_mark_locked(struct fsnotify_mark * mark,fsnotify_connp_t * connp,unsigned int type,int allow_dups,__kernel_fsid_t * fsid)640*4882a593Smuzhiyun int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
641*4882a593Smuzhiyun 			     fsnotify_connp_t *connp, unsigned int type,
642*4882a593Smuzhiyun 			     int allow_dups, __kernel_fsid_t *fsid)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	struct fsnotify_group *group = mark->group;
645*4882a593Smuzhiyun 	int ret = 0;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	BUG_ON(!mutex_is_locked(&group->mark_mutex));
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	/*
650*4882a593Smuzhiyun 	 * LOCKING ORDER!!!!
651*4882a593Smuzhiyun 	 * group->mark_mutex
652*4882a593Smuzhiyun 	 * mark->lock
653*4882a593Smuzhiyun 	 * mark->connector->lock
654*4882a593Smuzhiyun 	 */
655*4882a593Smuzhiyun 	spin_lock(&mark->lock);
656*4882a593Smuzhiyun 	mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	list_add(&mark->g_list, &group->marks_list);
659*4882a593Smuzhiyun 	atomic_inc(&group->num_marks);
660*4882a593Smuzhiyun 	fsnotify_get_mark(mark); /* for g_list */
661*4882a593Smuzhiyun 	spin_unlock(&mark->lock);
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	ret = fsnotify_add_mark_list(mark, connp, type, allow_dups, fsid);
664*4882a593Smuzhiyun 	if (ret)
665*4882a593Smuzhiyun 		goto err;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (mark->mask)
668*4882a593Smuzhiyun 		fsnotify_recalc_mask(mark->connector);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	return ret;
671*4882a593Smuzhiyun err:
672*4882a593Smuzhiyun 	spin_lock(&mark->lock);
673*4882a593Smuzhiyun 	mark->flags &= ~(FSNOTIFY_MARK_FLAG_ALIVE |
674*4882a593Smuzhiyun 			 FSNOTIFY_MARK_FLAG_ATTACHED);
675*4882a593Smuzhiyun 	list_del_init(&mark->g_list);
676*4882a593Smuzhiyun 	spin_unlock(&mark->lock);
677*4882a593Smuzhiyun 	atomic_dec(&group->num_marks);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	fsnotify_put_mark(mark);
680*4882a593Smuzhiyun 	return ret;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
fsnotify_add_mark(struct fsnotify_mark * mark,fsnotify_connp_t * connp,unsigned int type,int allow_dups,__kernel_fsid_t * fsid)683*4882a593Smuzhiyun int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp,
684*4882a593Smuzhiyun 		      unsigned int type, int allow_dups, __kernel_fsid_t *fsid)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	int ret;
687*4882a593Smuzhiyun 	struct fsnotify_group *group = mark->group;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	mutex_lock(&group->mark_mutex);
690*4882a593Smuzhiyun 	ret = fsnotify_add_mark_locked(mark, connp, type, allow_dups, fsid);
691*4882a593Smuzhiyun 	mutex_unlock(&group->mark_mutex);
692*4882a593Smuzhiyun 	return ret;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsnotify_add_mark);
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /*
697*4882a593Smuzhiyun  * Given a list of marks, find the mark associated with given group. If found
698*4882a593Smuzhiyun  * take a reference to that mark and return it, else return NULL.
699*4882a593Smuzhiyun  */
fsnotify_find_mark(fsnotify_connp_t * connp,struct fsnotify_group * group)700*4882a593Smuzhiyun struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
701*4882a593Smuzhiyun 					 struct fsnotify_group *group)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	struct fsnotify_mark_connector *conn;
704*4882a593Smuzhiyun 	struct fsnotify_mark *mark;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	conn = fsnotify_grab_connector(connp);
707*4882a593Smuzhiyun 	if (!conn)
708*4882a593Smuzhiyun 		return NULL;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	hlist_for_each_entry(mark, &conn->list, obj_list) {
711*4882a593Smuzhiyun 		if (mark->group == group &&
712*4882a593Smuzhiyun 		    (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
713*4882a593Smuzhiyun 			fsnotify_get_mark(mark);
714*4882a593Smuzhiyun 			spin_unlock(&conn->lock);
715*4882a593Smuzhiyun 			return mark;
716*4882a593Smuzhiyun 		}
717*4882a593Smuzhiyun 	}
718*4882a593Smuzhiyun 	spin_unlock(&conn->lock);
719*4882a593Smuzhiyun 	return NULL;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsnotify_find_mark);
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun /* Clear any marks in a group with given type mask */
fsnotify_clear_marks_by_group(struct fsnotify_group * group,unsigned int type_mask)724*4882a593Smuzhiyun void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
725*4882a593Smuzhiyun 				   unsigned int type_mask)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	struct fsnotify_mark *lmark, *mark;
728*4882a593Smuzhiyun 	LIST_HEAD(to_free);
729*4882a593Smuzhiyun 	struct list_head *head = &to_free;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	/* Skip selection step if we want to clear all marks. */
732*4882a593Smuzhiyun 	if (type_mask == FSNOTIFY_OBJ_ALL_TYPES_MASK) {
733*4882a593Smuzhiyun 		head = &group->marks_list;
734*4882a593Smuzhiyun 		goto clear;
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 	/*
737*4882a593Smuzhiyun 	 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
738*4882a593Smuzhiyun 	 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
739*4882a593Smuzhiyun 	 * to_free list so we have to use mark_mutex even when accessing that
740*4882a593Smuzhiyun 	 * list. And freeing mark requires us to drop mark_mutex. So we can
741*4882a593Smuzhiyun 	 * reliably free only the first mark in the list. That's why we first
742*4882a593Smuzhiyun 	 * move marks to free to to_free list in one go and then free marks in
743*4882a593Smuzhiyun 	 * to_free list one by one.
744*4882a593Smuzhiyun 	 */
745*4882a593Smuzhiyun 	mutex_lock(&group->mark_mutex);
746*4882a593Smuzhiyun 	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
747*4882a593Smuzhiyun 		if ((1U << mark->connector->type) & type_mask)
748*4882a593Smuzhiyun 			list_move(&mark->g_list, &to_free);
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 	mutex_unlock(&group->mark_mutex);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun clear:
753*4882a593Smuzhiyun 	while (1) {
754*4882a593Smuzhiyun 		mutex_lock(&group->mark_mutex);
755*4882a593Smuzhiyun 		if (list_empty(head)) {
756*4882a593Smuzhiyun 			mutex_unlock(&group->mark_mutex);
757*4882a593Smuzhiyun 			break;
758*4882a593Smuzhiyun 		}
759*4882a593Smuzhiyun 		mark = list_first_entry(head, struct fsnotify_mark, g_list);
760*4882a593Smuzhiyun 		fsnotify_get_mark(mark);
761*4882a593Smuzhiyun 		fsnotify_detach_mark(mark);
762*4882a593Smuzhiyun 		mutex_unlock(&group->mark_mutex);
763*4882a593Smuzhiyun 		fsnotify_free_mark(mark);
764*4882a593Smuzhiyun 		fsnotify_put_mark(mark);
765*4882a593Smuzhiyun 	}
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun /* Destroy all marks attached to an object via connector */
fsnotify_destroy_marks(fsnotify_connp_t * connp)769*4882a593Smuzhiyun void fsnotify_destroy_marks(fsnotify_connp_t *connp)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun 	struct fsnotify_mark_connector *conn;
772*4882a593Smuzhiyun 	struct fsnotify_mark *mark, *old_mark = NULL;
773*4882a593Smuzhiyun 	void *objp;
774*4882a593Smuzhiyun 	unsigned int type;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	conn = fsnotify_grab_connector(connp);
777*4882a593Smuzhiyun 	if (!conn)
778*4882a593Smuzhiyun 		return;
779*4882a593Smuzhiyun 	/*
780*4882a593Smuzhiyun 	 * We have to be careful since we can race with e.g.
781*4882a593Smuzhiyun 	 * fsnotify_clear_marks_by_group() and once we drop the conn->lock, the
782*4882a593Smuzhiyun 	 * list can get modified. However we are holding mark reference and
783*4882a593Smuzhiyun 	 * thus our mark cannot be removed from obj_list so we can continue
784*4882a593Smuzhiyun 	 * iteration after regaining conn->lock.
785*4882a593Smuzhiyun 	 */
786*4882a593Smuzhiyun 	hlist_for_each_entry(mark, &conn->list, obj_list) {
787*4882a593Smuzhiyun 		fsnotify_get_mark(mark);
788*4882a593Smuzhiyun 		spin_unlock(&conn->lock);
789*4882a593Smuzhiyun 		if (old_mark)
790*4882a593Smuzhiyun 			fsnotify_put_mark(old_mark);
791*4882a593Smuzhiyun 		old_mark = mark;
792*4882a593Smuzhiyun 		fsnotify_destroy_mark(mark, mark->group);
793*4882a593Smuzhiyun 		spin_lock(&conn->lock);
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun 	/*
796*4882a593Smuzhiyun 	 * Detach list from object now so that we don't pin inode until all
797*4882a593Smuzhiyun 	 * mark references get dropped. It would lead to strange results such
798*4882a593Smuzhiyun 	 * as delaying inode deletion or blocking unmount.
799*4882a593Smuzhiyun 	 */
800*4882a593Smuzhiyun 	objp = fsnotify_detach_connector_from_object(conn, &type);
801*4882a593Smuzhiyun 	spin_unlock(&conn->lock);
802*4882a593Smuzhiyun 	if (old_mark)
803*4882a593Smuzhiyun 		fsnotify_put_mark(old_mark);
804*4882a593Smuzhiyun 	fsnotify_drop_object(type, objp);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun /*
808*4882a593Smuzhiyun  * Nothing fancy, just initialize lists and locks and counters.
809*4882a593Smuzhiyun  */
fsnotify_init_mark(struct fsnotify_mark * mark,struct fsnotify_group * group)810*4882a593Smuzhiyun void fsnotify_init_mark(struct fsnotify_mark *mark,
811*4882a593Smuzhiyun 			struct fsnotify_group *group)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	memset(mark, 0, sizeof(*mark));
814*4882a593Smuzhiyun 	spin_lock_init(&mark->lock);
815*4882a593Smuzhiyun 	refcount_set(&mark->refcnt, 1);
816*4882a593Smuzhiyun 	fsnotify_get_group(group);
817*4882a593Smuzhiyun 	mark->group = group;
818*4882a593Smuzhiyun 	WRITE_ONCE(mark->connector, NULL);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsnotify_init_mark);
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun  * Destroy all marks in destroy_list, waits for SRCU period to finish before
824*4882a593Smuzhiyun  * actually freeing marks.
825*4882a593Smuzhiyun  */
fsnotify_mark_destroy_workfn(struct work_struct * work)826*4882a593Smuzhiyun static void fsnotify_mark_destroy_workfn(struct work_struct *work)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	struct fsnotify_mark *mark, *next;
829*4882a593Smuzhiyun 	struct list_head private_destroy_list;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	spin_lock(&destroy_lock);
832*4882a593Smuzhiyun 	/* exchange the list head */
833*4882a593Smuzhiyun 	list_replace_init(&destroy_list, &private_destroy_list);
834*4882a593Smuzhiyun 	spin_unlock(&destroy_lock);
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	synchronize_srcu(&fsnotify_mark_srcu);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
839*4882a593Smuzhiyun 		list_del_init(&mark->g_list);
840*4882a593Smuzhiyun 		fsnotify_final_mark_destroy(mark);
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun /* Wait for all marks queued for destruction to be actually destroyed */
fsnotify_wait_marks_destroyed(void)845*4882a593Smuzhiyun void fsnotify_wait_marks_destroyed(void)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun 	flush_delayed_work(&reaper_work);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsnotify_wait_marks_destroyed);
850