1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/list.h>
7*4882a593Smuzhiyun #include <linux/mutex.h>
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include <linux/srcu.h>
10*4882a593Smuzhiyun #include <linux/rculist.h>
11*4882a593Smuzhiyun #include <linux/wait.h>
12*4882a593Smuzhiyun #include <linux/memcontrol.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/fsnotify_backend.h>
15*4882a593Smuzhiyun #include "fsnotify.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/atomic.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * Final freeing of a group
21*4882a593Smuzhiyun */
fsnotify_final_destroy_group(struct fsnotify_group * group)22*4882a593Smuzhiyun static void fsnotify_final_destroy_group(struct fsnotify_group *group)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun if (group->ops->free_group_priv)
25*4882a593Smuzhiyun group->ops->free_group_priv(group);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun mem_cgroup_put(group->memcg);
28*4882a593Smuzhiyun mutex_destroy(&group->mark_mutex);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun kfree(group);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun * Stop queueing new events for this group. Once this function returns
35*4882a593Smuzhiyun * fsnotify_add_event() will not add any new events to the group's queue.
36*4882a593Smuzhiyun */
fsnotify_group_stop_queueing(struct fsnotify_group * group)37*4882a593Smuzhiyun void fsnotify_group_stop_queueing(struct fsnotify_group *group)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun spin_lock(&group->notification_lock);
40*4882a593Smuzhiyun group->shutdown = true;
41*4882a593Smuzhiyun spin_unlock(&group->notification_lock);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Trying to get rid of a group. Remove all marks, flush all events and release
46*4882a593Smuzhiyun * the group reference.
47*4882a593Smuzhiyun * Note that another thread calling fsnotify_clear_marks_by_group() may still
48*4882a593Smuzhiyun * hold a ref to the group.
49*4882a593Smuzhiyun */
fsnotify_destroy_group(struct fsnotify_group * group)50*4882a593Smuzhiyun void fsnotify_destroy_group(struct fsnotify_group *group)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * Stop queueing new events. The code below is careful enough to not
54*4882a593Smuzhiyun * require this but fanotify needs to stop queuing events even before
55*4882a593Smuzhiyun * fsnotify_destroy_group() is called and this makes the other callers
56*4882a593Smuzhiyun * of fsnotify_destroy_group() to see the same behavior.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun fsnotify_group_stop_queueing(group);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* Clear all marks for this group and queue them for destruction */
61*4882a593Smuzhiyun fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES_MASK);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * Some marks can still be pinned when waiting for response from
65*4882a593Smuzhiyun * userspace. Wait for those now. fsnotify_prepare_user_wait() will
66*4882a593Smuzhiyun * not succeed now so this wait is race-free.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun wait_event(group->notification_waitq, !atomic_read(&group->user_waits));
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * Wait until all marks get really destroyed. We could actually destroy
72*4882a593Smuzhiyun * them ourselves instead of waiting for worker to do it, however that
73*4882a593Smuzhiyun * would be racy as worker can already be processing some marks before
74*4882a593Smuzhiyun * we even entered fsnotify_destroy_group().
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun fsnotify_wait_marks_destroyed();
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * Since we have waited for fsnotify_mark_srcu in
80*4882a593Smuzhiyun * fsnotify_mark_destroy_list() there can be no outstanding event
81*4882a593Smuzhiyun * notification against this group. So clearing the notification queue
82*4882a593Smuzhiyun * of all events is reliable now.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun fsnotify_flush_notify(group);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * Destroy overflow event (we cannot use fsnotify_destroy_event() as
88*4882a593Smuzhiyun * that deliberately ignores overflow events.
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun if (group->overflow_event)
91*4882a593Smuzhiyun group->ops->free_event(group->overflow_event);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun fsnotify_put_group(group);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * Get reference to a group.
98*4882a593Smuzhiyun */
fsnotify_get_group(struct fsnotify_group * group)99*4882a593Smuzhiyun void fsnotify_get_group(struct fsnotify_group *group)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun refcount_inc(&group->refcnt);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * Drop a reference to a group. Free it if it's through.
106*4882a593Smuzhiyun */
fsnotify_put_group(struct fsnotify_group * group)107*4882a593Smuzhiyun void fsnotify_put_group(struct fsnotify_group *group)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun if (refcount_dec_and_test(&group->refcnt))
110*4882a593Smuzhiyun fsnotify_final_destroy_group(group);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsnotify_put_group);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun * Create a new fsnotify_group and hold a reference for the group returned.
116*4882a593Smuzhiyun */
fsnotify_alloc_group(const struct fsnotify_ops * ops)117*4882a593Smuzhiyun struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct fsnotify_group *group;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
122*4882a593Smuzhiyun if (!group)
123*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* set to 0 when there a no external references to this group */
126*4882a593Smuzhiyun refcount_set(&group->refcnt, 1);
127*4882a593Smuzhiyun atomic_set(&group->num_marks, 0);
128*4882a593Smuzhiyun atomic_set(&group->user_waits, 0);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun spin_lock_init(&group->notification_lock);
131*4882a593Smuzhiyun INIT_LIST_HEAD(&group->notification_list);
132*4882a593Smuzhiyun init_waitqueue_head(&group->notification_waitq);
133*4882a593Smuzhiyun group->max_events = UINT_MAX;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun mutex_init(&group->mark_mutex);
136*4882a593Smuzhiyun INIT_LIST_HEAD(&group->marks_list);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun group->ops = ops;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun return group;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsnotify_alloc_group);
143*4882a593Smuzhiyun
fsnotify_fasync(int fd,struct file * file,int on)144*4882a593Smuzhiyun int fsnotify_fasync(int fd, struct file *file, int on)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun struct fsnotify_group *group = file->private_data;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO;
149*4882a593Smuzhiyun }
150