xref: /OK3568_Linux_fs/kernel/mm/mmu_notifier.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/mm/mmu_notifier.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 2008  Qumranet, Inc.
6*4882a593Smuzhiyun  *  Copyright (C) 2008  SGI
7*4882a593Smuzhiyun  *             Christoph Lameter <cl@linux.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/rculist.h>
11*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun #include <linux/interval_tree.h>
16*4882a593Smuzhiyun #include <linux/srcu.h>
17*4882a593Smuzhiyun #include <linux/rcupdate.h>
18*4882a593Smuzhiyun #include <linux/sched.h>
19*4882a593Smuzhiyun #include <linux/sched/mm.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* global SRCU for all MMs */
23*4882a593Smuzhiyun DEFINE_STATIC_SRCU(srcu);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
26*4882a593Smuzhiyun struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
27*4882a593Smuzhiyun 	.name = "mmu_notifier_invalidate_range_start"
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * The mmu_notifier_subscriptions structure is allocated and installed in
33*4882a593Smuzhiyun  * mm->notifier_subscriptions inside the mm_take_all_locks() protected
34*4882a593Smuzhiyun  * critical section and it's released only when mm_count reaches zero
35*4882a593Smuzhiyun  * in mmdrop().
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun struct mmu_notifier_subscriptions {
38*4882a593Smuzhiyun 	/*
39*4882a593Smuzhiyun 	 * WARNING: hdr should be the first member of this structure
40*4882a593Smuzhiyun 	 * so that it can be typecasted into mmu_notifier_subscriptions_hdr.
41*4882a593Smuzhiyun 	 * This is required to avoid KMI CRC breakage.
42*4882a593Smuzhiyun 	 */
43*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions_hdr hdr;
44*4882a593Smuzhiyun 	/* all mmu notifiers registered in this mm are queued in this list */
45*4882a593Smuzhiyun 	struct hlist_head list;
46*4882a593Smuzhiyun 	bool has_itree;
47*4882a593Smuzhiyun 	/* to serialize the list modifications and hlist_unhashed */
48*4882a593Smuzhiyun 	spinlock_t lock;
49*4882a593Smuzhiyun 	unsigned long invalidate_seq;
50*4882a593Smuzhiyun 	unsigned long active_invalidate_ranges;
51*4882a593Smuzhiyun 	struct rb_root_cached itree;
52*4882a593Smuzhiyun 	wait_queue_head_t wq;
53*4882a593Smuzhiyun 	struct hlist_head deferred_list;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * This is a collision-retry read-side/write-side 'lock', a lot like a
58*4882a593Smuzhiyun  * seqcount, however this allows multiple write-sides to hold it at
59*4882a593Smuzhiyun  * once. Conceptually the write side is protecting the values of the PTEs in
60*4882a593Smuzhiyun  * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
61*4882a593Smuzhiyun  * writer exists.
62*4882a593Smuzhiyun  *
63*4882a593Smuzhiyun  * Note that the core mm creates nested invalidate_range_start()/end() regions
64*4882a593Smuzhiyun  * within the same thread, and runs invalidate_range_start()/end() in parallel
65*4882a593Smuzhiyun  * on multiple CPUs. This is designed to not reduce concurrency or block
66*4882a593Smuzhiyun  * progress on the mm side.
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  * As a secondary function, holding the full write side also serves to prevent
69*4882a593Smuzhiyun  * writers for the itree, this is an optimization to avoid extra locking
70*4882a593Smuzhiyun  * during invalidate_range_start/end notifiers.
71*4882a593Smuzhiyun  *
72*4882a593Smuzhiyun  * The write side has two states, fully excluded:
73*4882a593Smuzhiyun  *  - mm->active_invalidate_ranges != 0
74*4882a593Smuzhiyun  *  - subscriptions->invalidate_seq & 1 == True (odd)
75*4882a593Smuzhiyun  *  - some range on the mm_struct is being invalidated
76*4882a593Smuzhiyun  *  - the itree is not allowed to change
77*4882a593Smuzhiyun  *
78*4882a593Smuzhiyun  * And partially excluded:
79*4882a593Smuzhiyun  *  - mm->active_invalidate_ranges != 0
80*4882a593Smuzhiyun  *  - subscriptions->invalidate_seq & 1 == False (even)
81*4882a593Smuzhiyun  *  - some range on the mm_struct is being invalidated
82*4882a593Smuzhiyun  *  - the itree is allowed to change
83*4882a593Smuzhiyun  *
84*4882a593Smuzhiyun  * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
85*4882a593Smuzhiyun  *    seq |= 1  # Begin writing
86*4882a593Smuzhiyun  *    seq++     # Release the writing state
87*4882a593Smuzhiyun  *    seq & 1   # True if a writer exists
88*4882a593Smuzhiyun  *
89*4882a593Smuzhiyun  * The later state avoids some expensive work on inv_end in the common case of
90*4882a593Smuzhiyun  * no mmu_interval_notifier monitoring the VA.
91*4882a593Smuzhiyun  */
92*4882a593Smuzhiyun static bool
mn_itree_is_invalidating(struct mmu_notifier_subscriptions * subscriptions)93*4882a593Smuzhiyun mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	lockdep_assert_held(&subscriptions->lock);
96*4882a593Smuzhiyun 	return subscriptions->invalidate_seq & 1;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun static struct mmu_interval_notifier *
mn_itree_inv_start_range(struct mmu_notifier_subscriptions * subscriptions,const struct mmu_notifier_range * range,unsigned long * seq)100*4882a593Smuzhiyun mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
101*4882a593Smuzhiyun 			 const struct mmu_notifier_range *range,
102*4882a593Smuzhiyun 			 unsigned long *seq)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	struct interval_tree_node *node;
105*4882a593Smuzhiyun 	struct mmu_interval_notifier *res = NULL;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	spin_lock(&subscriptions->lock);
108*4882a593Smuzhiyun 	subscriptions->active_invalidate_ranges++;
109*4882a593Smuzhiyun 	node = interval_tree_iter_first(&subscriptions->itree, range->start,
110*4882a593Smuzhiyun 					range->end - 1);
111*4882a593Smuzhiyun 	if (node) {
112*4882a593Smuzhiyun 		subscriptions->invalidate_seq |= 1;
113*4882a593Smuzhiyun 		res = container_of(node, struct mmu_interval_notifier,
114*4882a593Smuzhiyun 				   interval_tree);
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	*seq = subscriptions->invalidate_seq;
118*4882a593Smuzhiyun 	spin_unlock(&subscriptions->lock);
119*4882a593Smuzhiyun 	return res;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun static struct mmu_interval_notifier *
mn_itree_inv_next(struct mmu_interval_notifier * interval_sub,const struct mmu_notifier_range * range)123*4882a593Smuzhiyun mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
124*4882a593Smuzhiyun 		  const struct mmu_notifier_range *range)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct interval_tree_node *node;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	node = interval_tree_iter_next(&interval_sub->interval_tree,
129*4882a593Smuzhiyun 				       range->start, range->end - 1);
130*4882a593Smuzhiyun 	if (!node)
131*4882a593Smuzhiyun 		return NULL;
132*4882a593Smuzhiyun 	return container_of(node, struct mmu_interval_notifier, interval_tree);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
mn_itree_inv_end(struct mmu_notifier_subscriptions * subscriptions)135*4882a593Smuzhiyun static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	struct mmu_interval_notifier *interval_sub;
138*4882a593Smuzhiyun 	struct hlist_node *next;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	spin_lock(&subscriptions->lock);
141*4882a593Smuzhiyun 	if (--subscriptions->active_invalidate_ranges ||
142*4882a593Smuzhiyun 	    !mn_itree_is_invalidating(subscriptions)) {
143*4882a593Smuzhiyun 		spin_unlock(&subscriptions->lock);
144*4882a593Smuzhiyun 		return;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/* Make invalidate_seq even */
148*4882a593Smuzhiyun 	subscriptions->invalidate_seq++;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/*
151*4882a593Smuzhiyun 	 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
152*4882a593Smuzhiyun 	 * Adds and removes are queued until the final inv_end happens then
153*4882a593Smuzhiyun 	 * they are progressed. This arrangement for tree updates is used to
154*4882a593Smuzhiyun 	 * avoid using a blocking lock during invalidate_range_start.
155*4882a593Smuzhiyun 	 */
156*4882a593Smuzhiyun 	hlist_for_each_entry_safe(interval_sub, next,
157*4882a593Smuzhiyun 				  &subscriptions->deferred_list,
158*4882a593Smuzhiyun 				  deferred_item) {
159*4882a593Smuzhiyun 		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
160*4882a593Smuzhiyun 			interval_tree_insert(&interval_sub->interval_tree,
161*4882a593Smuzhiyun 					     &subscriptions->itree);
162*4882a593Smuzhiyun 		else
163*4882a593Smuzhiyun 			interval_tree_remove(&interval_sub->interval_tree,
164*4882a593Smuzhiyun 					     &subscriptions->itree);
165*4882a593Smuzhiyun 		hlist_del(&interval_sub->deferred_item);
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 	spin_unlock(&subscriptions->lock);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	wake_up_all(&subscriptions->wq);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun /**
173*4882a593Smuzhiyun  * mmu_interval_read_begin - Begin a read side critical section against a VA
174*4882a593Smuzhiyun  *                           range
175*4882a593Smuzhiyun  * @interval_sub: The interval subscription
176*4882a593Smuzhiyun  *
177*4882a593Smuzhiyun  * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
178*4882a593Smuzhiyun  * collision-retry scheme similar to seqcount for the VA range under
179*4882a593Smuzhiyun  * subscription. If the mm invokes invalidation during the critical section
180*4882a593Smuzhiyun  * then mmu_interval_read_retry() will return true.
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
183*4882a593Smuzhiyun  * require a blocking context.  The critical region formed by this can sleep,
184*4882a593Smuzhiyun  * and the required 'user_lock' can also be a sleeping lock.
185*4882a593Smuzhiyun  *
186*4882a593Smuzhiyun  * The caller is required to provide a 'user_lock' to serialize both teardown
187*4882a593Smuzhiyun  * and setup.
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * The return value should be passed to mmu_interval_read_retry().
190*4882a593Smuzhiyun  */
191*4882a593Smuzhiyun unsigned long
mmu_interval_read_begin(struct mmu_interval_notifier * interval_sub)192*4882a593Smuzhiyun mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions =
195*4882a593Smuzhiyun 		interval_sub->mm->notifier_subscriptions;
196*4882a593Smuzhiyun 	unsigned long seq;
197*4882a593Smuzhiyun 	bool is_invalidating;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/*
200*4882a593Smuzhiyun 	 * If the subscription has a different seq value under the user_lock
201*4882a593Smuzhiyun 	 * than we started with then it has collided.
202*4882a593Smuzhiyun 	 *
203*4882a593Smuzhiyun 	 * If the subscription currently has the same seq value as the
204*4882a593Smuzhiyun 	 * subscriptions seq, then it is currently between
205*4882a593Smuzhiyun 	 * invalidate_start/end and is colliding.
206*4882a593Smuzhiyun 	 *
207*4882a593Smuzhiyun 	 * The locking looks broadly like this:
208*4882a593Smuzhiyun 	 *   mn_tree_invalidate_start():          mmu_interval_read_begin():
209*4882a593Smuzhiyun 	 *                                         spin_lock
210*4882a593Smuzhiyun 	 *                                          seq = READ_ONCE(interval_sub->invalidate_seq);
211*4882a593Smuzhiyun 	 *                                          seq == subs->invalidate_seq
212*4882a593Smuzhiyun 	 *                                         spin_unlock
213*4882a593Smuzhiyun 	 *    spin_lock
214*4882a593Smuzhiyun 	 *     seq = ++subscriptions->invalidate_seq
215*4882a593Smuzhiyun 	 *    spin_unlock
216*4882a593Smuzhiyun 	 *     op->invalidate_range():
217*4882a593Smuzhiyun 	 *       user_lock
218*4882a593Smuzhiyun 	 *        mmu_interval_set_seq()
219*4882a593Smuzhiyun 	 *         interval_sub->invalidate_seq = seq
220*4882a593Smuzhiyun 	 *       user_unlock
221*4882a593Smuzhiyun 	 *
222*4882a593Smuzhiyun 	 *                          [Required: mmu_interval_read_retry() == true]
223*4882a593Smuzhiyun 	 *
224*4882a593Smuzhiyun 	 *   mn_itree_inv_end():
225*4882a593Smuzhiyun 	 *    spin_lock
226*4882a593Smuzhiyun 	 *     seq = ++subscriptions->invalidate_seq
227*4882a593Smuzhiyun 	 *    spin_unlock
228*4882a593Smuzhiyun 	 *
229*4882a593Smuzhiyun 	 *                                        user_lock
230*4882a593Smuzhiyun 	 *                                         mmu_interval_read_retry():
231*4882a593Smuzhiyun 	 *                                          interval_sub->invalidate_seq != seq
232*4882a593Smuzhiyun 	 *                                        user_unlock
233*4882a593Smuzhiyun 	 *
234*4882a593Smuzhiyun 	 * Barriers are not needed here as any races here are closed by an
235*4882a593Smuzhiyun 	 * eventual mmu_interval_read_retry(), which provides a barrier via the
236*4882a593Smuzhiyun 	 * user_lock.
237*4882a593Smuzhiyun 	 */
238*4882a593Smuzhiyun 	spin_lock(&subscriptions->lock);
239*4882a593Smuzhiyun 	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
240*4882a593Smuzhiyun 	seq = READ_ONCE(interval_sub->invalidate_seq);
241*4882a593Smuzhiyun 	is_invalidating = seq == subscriptions->invalidate_seq;
242*4882a593Smuzhiyun 	spin_unlock(&subscriptions->lock);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/*
245*4882a593Smuzhiyun 	 * interval_sub->invalidate_seq must always be set to an odd value via
246*4882a593Smuzhiyun 	 * mmu_interval_set_seq() using the provided cur_seq from
247*4882a593Smuzhiyun 	 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
248*4882a593Smuzhiyun 	 * will always clear the below sleep in some reasonable time as
249*4882a593Smuzhiyun 	 * subscriptions->invalidate_seq is even in the idle state.
250*4882a593Smuzhiyun 	 */
251*4882a593Smuzhiyun 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
252*4882a593Smuzhiyun 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
253*4882a593Smuzhiyun 	if (is_invalidating)
254*4882a593Smuzhiyun 		wait_event(subscriptions->wq,
255*4882a593Smuzhiyun 			   READ_ONCE(subscriptions->invalidate_seq) != seq);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/*
258*4882a593Smuzhiyun 	 * Notice that mmu_interval_read_retry() can already be true at this
259*4882a593Smuzhiyun 	 * point, avoiding loops here allows the caller to provide a global
260*4882a593Smuzhiyun 	 * time bound.
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	return seq;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
266*4882a593Smuzhiyun 
mn_itree_release(struct mmu_notifier_subscriptions * subscriptions,struct mm_struct * mm)267*4882a593Smuzhiyun static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
268*4882a593Smuzhiyun 			     struct mm_struct *mm)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	struct mmu_notifier_range range = {
271*4882a593Smuzhiyun 		.flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
272*4882a593Smuzhiyun 		.event = MMU_NOTIFY_RELEASE,
273*4882a593Smuzhiyun 		.mm = mm,
274*4882a593Smuzhiyun 		.start = 0,
275*4882a593Smuzhiyun 		.end = ULONG_MAX,
276*4882a593Smuzhiyun 	};
277*4882a593Smuzhiyun 	struct mmu_interval_notifier *interval_sub;
278*4882a593Smuzhiyun 	unsigned long cur_seq;
279*4882a593Smuzhiyun 	bool ret;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	for (interval_sub =
282*4882a593Smuzhiyun 		     mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
283*4882a593Smuzhiyun 	     interval_sub;
284*4882a593Smuzhiyun 	     interval_sub = mn_itree_inv_next(interval_sub, &range)) {
285*4882a593Smuzhiyun 		ret = interval_sub->ops->invalidate(interval_sub, &range,
286*4882a593Smuzhiyun 						    cur_seq);
287*4882a593Smuzhiyun 		WARN_ON(!ret);
288*4882a593Smuzhiyun 	}
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	mn_itree_inv_end(subscriptions);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun  * This function can't run concurrently against mmu_notifier_register
295*4882a593Smuzhiyun  * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
296*4882a593Smuzhiyun  * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
297*4882a593Smuzhiyun  * in parallel despite there being no task using this mm any more,
298*4882a593Smuzhiyun  * through the vmas outside of the exit_mmap context, such as with
299*4882a593Smuzhiyun  * vmtruncate. This serializes against mmu_notifier_unregister with
300*4882a593Smuzhiyun  * the notifier_subscriptions->lock in addition to SRCU and it serializes
301*4882a593Smuzhiyun  * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
302*4882a593Smuzhiyun  * can't go away from under us as exit_mmap holds an mm_count pin
303*4882a593Smuzhiyun  * itself.
304*4882a593Smuzhiyun  */
mn_hlist_release(struct mmu_notifier_subscriptions * subscriptions,struct mm_struct * mm)305*4882a593Smuzhiyun static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
306*4882a593Smuzhiyun 			     struct mm_struct *mm)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
309*4882a593Smuzhiyun 	int id;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/*
312*4882a593Smuzhiyun 	 * SRCU here will block mmu_notifier_unregister until
313*4882a593Smuzhiyun 	 * ->release returns.
314*4882a593Smuzhiyun 	 */
315*4882a593Smuzhiyun 	id = srcu_read_lock(&srcu);
316*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
317*4882a593Smuzhiyun 				 srcu_read_lock_held(&srcu))
318*4882a593Smuzhiyun 		/*
319*4882a593Smuzhiyun 		 * If ->release runs before mmu_notifier_unregister it must be
320*4882a593Smuzhiyun 		 * handled, as it's the only way for the driver to flush all
321*4882a593Smuzhiyun 		 * existing sptes and stop the driver from establishing any more
322*4882a593Smuzhiyun 		 * sptes before all the pages in the mm are freed.
323*4882a593Smuzhiyun 		 */
324*4882a593Smuzhiyun 		if (subscription->ops->release)
325*4882a593Smuzhiyun 			subscription->ops->release(subscription, mm);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	spin_lock(&subscriptions->lock);
328*4882a593Smuzhiyun 	while (unlikely(!hlist_empty(&subscriptions->list))) {
329*4882a593Smuzhiyun 		subscription = hlist_entry(subscriptions->list.first,
330*4882a593Smuzhiyun 					   struct mmu_notifier, hlist);
331*4882a593Smuzhiyun 		/*
332*4882a593Smuzhiyun 		 * We arrived before mmu_notifier_unregister so
333*4882a593Smuzhiyun 		 * mmu_notifier_unregister will do nothing other than to wait
334*4882a593Smuzhiyun 		 * for ->release to finish and for mmu_notifier_unregister to
335*4882a593Smuzhiyun 		 * return.
336*4882a593Smuzhiyun 		 */
337*4882a593Smuzhiyun 		hlist_del_init_rcu(&subscription->hlist);
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 	spin_unlock(&subscriptions->lock);
340*4882a593Smuzhiyun 	srcu_read_unlock(&srcu, id);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/*
343*4882a593Smuzhiyun 	 * synchronize_srcu here prevents mmu_notifier_release from returning to
344*4882a593Smuzhiyun 	 * exit_mmap (which would proceed with freeing all pages in the mm)
345*4882a593Smuzhiyun 	 * until the ->release method returns, if it was invoked by
346*4882a593Smuzhiyun 	 * mmu_notifier_unregister.
347*4882a593Smuzhiyun 	 *
348*4882a593Smuzhiyun 	 * The notifier_subscriptions can't go away from under us because
349*4882a593Smuzhiyun 	 * one mm_count is held by exit_mmap.
350*4882a593Smuzhiyun 	 */
351*4882a593Smuzhiyun 	synchronize_srcu(&srcu);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
__mmu_notifier_release(struct mm_struct * mm)354*4882a593Smuzhiyun void __mmu_notifier_release(struct mm_struct *mm)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions =
357*4882a593Smuzhiyun 		mm->notifier_subscriptions;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (subscriptions->has_itree)
360*4882a593Smuzhiyun 		mn_itree_release(subscriptions, mm);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (!hlist_empty(&subscriptions->list))
363*4882a593Smuzhiyun 		mn_hlist_release(subscriptions, mm);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun /*
367*4882a593Smuzhiyun  * If no young bitflag is supported by the hardware, ->clear_flush_young can
368*4882a593Smuzhiyun  * unmap the address and return 1 or 0 depending if the mapping previously
369*4882a593Smuzhiyun  * existed or not.
370*4882a593Smuzhiyun  */
__mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)371*4882a593Smuzhiyun int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
372*4882a593Smuzhiyun 					unsigned long start,
373*4882a593Smuzhiyun 					unsigned long end)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
376*4882a593Smuzhiyun 	int young = 0, id;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	id = srcu_read_lock(&srcu);
379*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(subscription,
380*4882a593Smuzhiyun 				 &mm->notifier_subscriptions->list, hlist,
381*4882a593Smuzhiyun 				 srcu_read_lock_held(&srcu)) {
382*4882a593Smuzhiyun 		if (subscription->ops->clear_flush_young)
383*4882a593Smuzhiyun 			young |= subscription->ops->clear_flush_young(
384*4882a593Smuzhiyun 				subscription, mm, start, end);
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 	srcu_read_unlock(&srcu, id);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	return young;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
__mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end)391*4882a593Smuzhiyun int __mmu_notifier_clear_young(struct mm_struct *mm,
392*4882a593Smuzhiyun 			       unsigned long start,
393*4882a593Smuzhiyun 			       unsigned long end)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
396*4882a593Smuzhiyun 	int young = 0, id;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	id = srcu_read_lock(&srcu);
399*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(subscription,
400*4882a593Smuzhiyun 				 &mm->notifier_subscriptions->list, hlist,
401*4882a593Smuzhiyun 				 srcu_read_lock_held(&srcu)) {
402*4882a593Smuzhiyun 		if (subscription->ops->clear_young)
403*4882a593Smuzhiyun 			young |= subscription->ops->clear_young(subscription,
404*4882a593Smuzhiyun 								mm, start, end);
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 	srcu_read_unlock(&srcu, id);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	return young;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun 
__mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)411*4882a593Smuzhiyun int __mmu_notifier_test_young(struct mm_struct *mm,
412*4882a593Smuzhiyun 			      unsigned long address)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
415*4882a593Smuzhiyun 	int young = 0, id;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	id = srcu_read_lock(&srcu);
418*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(subscription,
419*4882a593Smuzhiyun 				 &mm->notifier_subscriptions->list, hlist,
420*4882a593Smuzhiyun 				 srcu_read_lock_held(&srcu)) {
421*4882a593Smuzhiyun 		if (subscription->ops->test_young) {
422*4882a593Smuzhiyun 			young = subscription->ops->test_young(subscription, mm,
423*4882a593Smuzhiyun 							      address);
424*4882a593Smuzhiyun 			if (young)
425*4882a593Smuzhiyun 				break;
426*4882a593Smuzhiyun 		}
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun 	srcu_read_unlock(&srcu, id);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	return young;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
__mmu_notifier_change_pte(struct mm_struct * mm,unsigned long address,pte_t pte)433*4882a593Smuzhiyun void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
434*4882a593Smuzhiyun 			       pte_t pte)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
437*4882a593Smuzhiyun 	int id;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	id = srcu_read_lock(&srcu);
440*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(subscription,
441*4882a593Smuzhiyun 				 &mm->notifier_subscriptions->list, hlist,
442*4882a593Smuzhiyun 				 srcu_read_lock_held(&srcu)) {
443*4882a593Smuzhiyun 		if (subscription->ops->change_pte)
444*4882a593Smuzhiyun 			subscription->ops->change_pte(subscription, mm, address,
445*4882a593Smuzhiyun 						      pte);
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 	srcu_read_unlock(&srcu, id);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun 
mn_itree_invalidate(struct mmu_notifier_subscriptions * subscriptions,const struct mmu_notifier_range * range)450*4882a593Smuzhiyun static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
451*4882a593Smuzhiyun 			       const struct mmu_notifier_range *range)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	struct mmu_interval_notifier *interval_sub;
454*4882a593Smuzhiyun 	unsigned long cur_seq;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	for (interval_sub =
457*4882a593Smuzhiyun 		     mn_itree_inv_start_range(subscriptions, range, &cur_seq);
458*4882a593Smuzhiyun 	     interval_sub;
459*4882a593Smuzhiyun 	     interval_sub = mn_itree_inv_next(interval_sub, range)) {
460*4882a593Smuzhiyun 		bool ret;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		ret = interval_sub->ops->invalidate(interval_sub, range,
463*4882a593Smuzhiyun 						    cur_seq);
464*4882a593Smuzhiyun 		if (!ret) {
465*4882a593Smuzhiyun 			if (WARN_ON(mmu_notifier_range_blockable(range)))
466*4882a593Smuzhiyun 				continue;
467*4882a593Smuzhiyun 			goto out_would_block;
468*4882a593Smuzhiyun 		}
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 	return 0;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun out_would_block:
473*4882a593Smuzhiyun 	/*
474*4882a593Smuzhiyun 	 * On -EAGAIN the non-blocking caller is not allowed to call
475*4882a593Smuzhiyun 	 * invalidate_range_end()
476*4882a593Smuzhiyun 	 */
477*4882a593Smuzhiyun 	mn_itree_inv_end(subscriptions);
478*4882a593Smuzhiyun 	return -EAGAIN;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
mn_hlist_invalidate_range_start(struct mmu_notifier_subscriptions * subscriptions,struct mmu_notifier_range * range)481*4882a593Smuzhiyun static int mn_hlist_invalidate_range_start(
482*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions,
483*4882a593Smuzhiyun 	struct mmu_notifier_range *range)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
486*4882a593Smuzhiyun 	int ret = 0;
487*4882a593Smuzhiyun 	int id;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	id = srcu_read_lock(&srcu);
490*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
491*4882a593Smuzhiyun 				 srcu_read_lock_held(&srcu)) {
492*4882a593Smuzhiyun 		const struct mmu_notifier_ops *ops = subscription->ops;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 		if (ops->invalidate_range_start) {
495*4882a593Smuzhiyun 			int _ret;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 			if (!mmu_notifier_range_blockable(range))
498*4882a593Smuzhiyun 				non_block_start();
499*4882a593Smuzhiyun 			_ret = ops->invalidate_range_start(subscription, range);
500*4882a593Smuzhiyun 			if (!mmu_notifier_range_blockable(range))
501*4882a593Smuzhiyun 				non_block_end();
502*4882a593Smuzhiyun 			if (_ret) {
503*4882a593Smuzhiyun 				pr_info("%pS callback failed with %d in %sblockable context.\n",
504*4882a593Smuzhiyun 					ops->invalidate_range_start, _ret,
505*4882a593Smuzhiyun 					!mmu_notifier_range_blockable(range) ?
506*4882a593Smuzhiyun 						"non-" :
507*4882a593Smuzhiyun 						"");
508*4882a593Smuzhiyun 				WARN_ON(mmu_notifier_range_blockable(range) ||
509*4882a593Smuzhiyun 					_ret != -EAGAIN);
510*4882a593Smuzhiyun 				/*
511*4882a593Smuzhiyun 				 * We call all the notifiers on any EAGAIN,
512*4882a593Smuzhiyun 				 * there is no way for a notifier to know if
513*4882a593Smuzhiyun 				 * its start method failed, thus a start that
514*4882a593Smuzhiyun 				 * does EAGAIN can't also do end.
515*4882a593Smuzhiyun 				 */
516*4882a593Smuzhiyun 				WARN_ON(ops->invalidate_range_end);
517*4882a593Smuzhiyun 				ret = _ret;
518*4882a593Smuzhiyun 			}
519*4882a593Smuzhiyun 		}
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	if (ret) {
523*4882a593Smuzhiyun 		/*
524*4882a593Smuzhiyun 		 * Must be non-blocking to get here.  If there are multiple
525*4882a593Smuzhiyun 		 * notifiers and one or more failed start, any that succeeded
526*4882a593Smuzhiyun 		 * start are expecting their end to be called.  Do so now.
527*4882a593Smuzhiyun 		 */
528*4882a593Smuzhiyun 		hlist_for_each_entry_rcu(subscription, &subscriptions->list,
529*4882a593Smuzhiyun 					 hlist, srcu_read_lock_held(&srcu)) {
530*4882a593Smuzhiyun 			if (!subscription->ops->invalidate_range_end)
531*4882a593Smuzhiyun 				continue;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 			subscription->ops->invalidate_range_end(subscription,
534*4882a593Smuzhiyun 								range);
535*4882a593Smuzhiyun 		}
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 	srcu_read_unlock(&srcu, id);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	return ret;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun 
__mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)542*4882a593Smuzhiyun int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions =
545*4882a593Smuzhiyun 		range->mm->notifier_subscriptions;
546*4882a593Smuzhiyun 	int ret;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	if (subscriptions->has_itree) {
549*4882a593Smuzhiyun 		ret = mn_itree_invalidate(subscriptions, range);
550*4882a593Smuzhiyun 		if (ret)
551*4882a593Smuzhiyun 			return ret;
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun 	if (!hlist_empty(&subscriptions->list))
554*4882a593Smuzhiyun 		return mn_hlist_invalidate_range_start(subscriptions, range);
555*4882a593Smuzhiyun 	return 0;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun static void
mn_hlist_invalidate_end(struct mmu_notifier_subscriptions * subscriptions,struct mmu_notifier_range * range,bool only_end)559*4882a593Smuzhiyun mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
560*4882a593Smuzhiyun 			struct mmu_notifier_range *range, bool only_end)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
563*4882a593Smuzhiyun 	int id;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	id = srcu_read_lock(&srcu);
566*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
567*4882a593Smuzhiyun 				 srcu_read_lock_held(&srcu)) {
568*4882a593Smuzhiyun 		/*
569*4882a593Smuzhiyun 		 * Call invalidate_range here too to avoid the need for the
570*4882a593Smuzhiyun 		 * subsystem of having to register an invalidate_range_end
571*4882a593Smuzhiyun 		 * call-back when there is invalidate_range already. Usually a
572*4882a593Smuzhiyun 		 * subsystem registers either invalidate_range_start()/end() or
573*4882a593Smuzhiyun 		 * invalidate_range(), so this will be no additional overhead
574*4882a593Smuzhiyun 		 * (besides the pointer check).
575*4882a593Smuzhiyun 		 *
576*4882a593Smuzhiyun 		 * We skip call to invalidate_range() if we know it is safe ie
577*4882a593Smuzhiyun 		 * call site use mmu_notifier_invalidate_range_only_end() which
578*4882a593Smuzhiyun 		 * is safe to do when we know that a call to invalidate_range()
579*4882a593Smuzhiyun 		 * already happen under page table lock.
580*4882a593Smuzhiyun 		 */
581*4882a593Smuzhiyun 		if (!only_end && subscription->ops->invalidate_range)
582*4882a593Smuzhiyun 			subscription->ops->invalidate_range(subscription,
583*4882a593Smuzhiyun 							    range->mm,
584*4882a593Smuzhiyun 							    range->start,
585*4882a593Smuzhiyun 							    range->end);
586*4882a593Smuzhiyun 		if (subscription->ops->invalidate_range_end) {
587*4882a593Smuzhiyun 			if (!mmu_notifier_range_blockable(range))
588*4882a593Smuzhiyun 				non_block_start();
589*4882a593Smuzhiyun 			subscription->ops->invalidate_range_end(subscription,
590*4882a593Smuzhiyun 								range);
591*4882a593Smuzhiyun 			if (!mmu_notifier_range_blockable(range))
592*4882a593Smuzhiyun 				non_block_end();
593*4882a593Smuzhiyun 		}
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun 	srcu_read_unlock(&srcu, id);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
__mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range,bool only_end)598*4882a593Smuzhiyun void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
599*4882a593Smuzhiyun 					 bool only_end)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions =
602*4882a593Smuzhiyun 		range->mm->notifier_subscriptions;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
605*4882a593Smuzhiyun 	if (subscriptions->has_itree)
606*4882a593Smuzhiyun 		mn_itree_inv_end(subscriptions);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	if (!hlist_empty(&subscriptions->list))
609*4882a593Smuzhiyun 		mn_hlist_invalidate_end(subscriptions, range, only_end);
610*4882a593Smuzhiyun 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
__mmu_notifier_invalidate_range(struct mm_struct * mm,unsigned long start,unsigned long end)613*4882a593Smuzhiyun void __mmu_notifier_invalidate_range(struct mm_struct *mm,
614*4882a593Smuzhiyun 				  unsigned long start, unsigned long end)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
617*4882a593Smuzhiyun 	int id;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	id = srcu_read_lock(&srcu);
620*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(subscription,
621*4882a593Smuzhiyun 				 &mm->notifier_subscriptions->list, hlist,
622*4882a593Smuzhiyun 				 srcu_read_lock_held(&srcu)) {
623*4882a593Smuzhiyun 		if (subscription->ops->invalidate_range)
624*4882a593Smuzhiyun 			subscription->ops->invalidate_range(subscription, mm,
625*4882a593Smuzhiyun 							    start, end);
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 	srcu_read_unlock(&srcu, id);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
631*4882a593Smuzhiyun 
mmu_notifier_write_lock(struct mm_struct * mm)632*4882a593Smuzhiyun static inline void mmu_notifier_write_lock(struct mm_struct *mm)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	percpu_down_write(
635*4882a593Smuzhiyun 		&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
mmu_notifier_write_unlock(struct mm_struct * mm)638*4882a593Smuzhiyun static inline void mmu_notifier_write_unlock(struct mm_struct *mm)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	percpu_up_write(
641*4882a593Smuzhiyun 		&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun #else /* CONFIG_SPECULATIVE_PAGE_FAULT */
645*4882a593Smuzhiyun 
mmu_notifier_write_lock(struct mm_struct * mm)646*4882a593Smuzhiyun static inline void mmu_notifier_write_lock(struct mm_struct *mm) {}
mmu_notifier_write_unlock(struct mm_struct * mm)647*4882a593Smuzhiyun static inline void mmu_notifier_write_unlock(struct mm_struct *mm) {}
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
650*4882a593Smuzhiyun 
init_subscriptions(struct mmu_notifier_subscriptions * subscriptions)651*4882a593Smuzhiyun static void init_subscriptions(struct mmu_notifier_subscriptions *subscriptions)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	INIT_HLIST_HEAD(&subscriptions->list);
654*4882a593Smuzhiyun 	spin_lock_init(&subscriptions->lock);
655*4882a593Smuzhiyun 	subscriptions->invalidate_seq = 2;
656*4882a593Smuzhiyun 	subscriptions->itree = RB_ROOT_CACHED;
657*4882a593Smuzhiyun 	init_waitqueue_head(&subscriptions->wq);
658*4882a593Smuzhiyun 	INIT_HLIST_HEAD(&subscriptions->deferred_list);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun /*
662*4882a593Smuzhiyun  * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
663*4882a593Smuzhiyun  * write mode. A NULL mn signals the notifier is being registered for itree
664*4882a593Smuzhiyun  * mode.
665*4882a593Smuzhiyun  */
__mmu_notifier_register(struct mmu_notifier * subscription,struct mm_struct * mm)666*4882a593Smuzhiyun int __mmu_notifier_register(struct mmu_notifier *subscription,
667*4882a593Smuzhiyun 			    struct mm_struct *mm)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions = NULL;
670*4882a593Smuzhiyun 	int ret;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	mmap_assert_write_locked(mm);
673*4882a593Smuzhiyun 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
676*4882a593Smuzhiyun 		fs_reclaim_acquire(GFP_KERNEL);
677*4882a593Smuzhiyun 		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
678*4882a593Smuzhiyun 		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
679*4882a593Smuzhiyun 		fs_reclaim_release(GFP_KERNEL);
680*4882a593Smuzhiyun 	}
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	if (!mm->notifier_subscriptions) {
683*4882a593Smuzhiyun 		/*
684*4882a593Smuzhiyun 		 * kmalloc cannot be called under mm_take_all_locks(), but we
685*4882a593Smuzhiyun 		 * know that mm->notifier_subscriptions can't change while we
686*4882a593Smuzhiyun 		 * hold the write side of the mmap_lock.
687*4882a593Smuzhiyun 		 */
688*4882a593Smuzhiyun 		subscriptions = kzalloc(
689*4882a593Smuzhiyun 			sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
690*4882a593Smuzhiyun 		if (!subscriptions)
691*4882a593Smuzhiyun 			return -ENOMEM;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 		init_subscriptions(subscriptions);
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	mmu_notifier_write_lock(mm);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	ret = mm_take_all_locks(mm);
699*4882a593Smuzhiyun 	if (unlikely(ret)) {
700*4882a593Smuzhiyun 		mmu_notifier_write_unlock(mm);
701*4882a593Smuzhiyun 		goto out_clean;
702*4882a593Smuzhiyun 	}
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	/*
705*4882a593Smuzhiyun 	 * Serialize the update against mmu_notifier_unregister. A
706*4882a593Smuzhiyun 	 * side note: mmu_notifier_release can't run concurrently with
707*4882a593Smuzhiyun 	 * us because we hold the mm_users pin (either implicitly as
708*4882a593Smuzhiyun 	 * current->mm or explicitly with get_task_mm() or similar).
709*4882a593Smuzhiyun 	 * We can't race against any other mmu notifier method either
710*4882a593Smuzhiyun 	 * thanks to mm_take_all_locks().
711*4882a593Smuzhiyun 	 *
712*4882a593Smuzhiyun 	 * release semantics on the initialization of the
713*4882a593Smuzhiyun 	 * mmu_notifier_subscriptions's contents are provided for unlocked
714*4882a593Smuzhiyun 	 * readers.  acquire can only be used while holding the mmgrab or
715*4882a593Smuzhiyun 	 * mmget, and is safe because once created the
716*4882a593Smuzhiyun 	 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
717*4882a593Smuzhiyun 	 * As above, users holding the mmap_lock or one of the
718*4882a593Smuzhiyun 	 * mm_take_all_locks() do not need to use acquire semantics.
719*4882a593Smuzhiyun 	 */
720*4882a593Smuzhiyun 	if (subscriptions)
721*4882a593Smuzhiyun 		smp_store_release(&mm->notifier_subscriptions, subscriptions);
722*4882a593Smuzhiyun 	mm->notifier_subscriptions->hdr.valid = true;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	if (subscription) {
725*4882a593Smuzhiyun 		/* Pairs with the mmdrop in mmu_notifier_unregister_* */
726*4882a593Smuzhiyun 		mmgrab(mm);
727*4882a593Smuzhiyun 		subscription->mm = mm;
728*4882a593Smuzhiyun 		subscription->users = 1;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		spin_lock(&mm->notifier_subscriptions->lock);
731*4882a593Smuzhiyun 		hlist_add_head_rcu(&subscription->hlist,
732*4882a593Smuzhiyun 				   &mm->notifier_subscriptions->list);
733*4882a593Smuzhiyun 		spin_unlock(&mm->notifier_subscriptions->lock);
734*4882a593Smuzhiyun 	} else
735*4882a593Smuzhiyun 		mm->notifier_subscriptions->has_itree = true;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	mm_drop_all_locks(mm);
738*4882a593Smuzhiyun 	mmu_notifier_write_unlock(mm);
739*4882a593Smuzhiyun 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
740*4882a593Smuzhiyun 	return 0;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun out_clean:
743*4882a593Smuzhiyun 	kfree(subscriptions);
744*4882a593Smuzhiyun 	return ret;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__mmu_notifier_register);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun /**
749*4882a593Smuzhiyun  * mmu_notifier_register - Register a notifier on a mm
750*4882a593Smuzhiyun  * @subscription: The notifier to attach
751*4882a593Smuzhiyun  * @mm: The mm to attach the notifier to
752*4882a593Smuzhiyun  *
753*4882a593Smuzhiyun  * Must not hold mmap_lock nor any other VM related lock when calling
754*4882a593Smuzhiyun  * this registration function. Must also ensure mm_users can't go down
755*4882a593Smuzhiyun  * to zero while this runs to avoid races with mmu_notifier_release,
756*4882a593Smuzhiyun  * so mm has to be current->mm or the mm should be pinned safely such
757*4882a593Smuzhiyun  * as with get_task_mm(). If the mm is not current->mm, the mm_users
758*4882a593Smuzhiyun  * pin should be released by calling mmput after mmu_notifier_register
759*4882a593Smuzhiyun  * returns.
760*4882a593Smuzhiyun  *
761*4882a593Smuzhiyun  * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
762*4882a593Smuzhiyun  * unregister the notifier.
763*4882a593Smuzhiyun  *
764*4882a593Smuzhiyun  * While the caller has a mmu_notifier get the subscription->mm pointer will remain
765*4882a593Smuzhiyun  * valid, and can be converted to an active mm pointer via mmget_not_zero().
766*4882a593Smuzhiyun  */
mmu_notifier_register(struct mmu_notifier * subscription,struct mm_struct * mm)767*4882a593Smuzhiyun int mmu_notifier_register(struct mmu_notifier *subscription,
768*4882a593Smuzhiyun 			  struct mm_struct *mm)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun 	int ret;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	mmap_write_lock(mm);
773*4882a593Smuzhiyun 	ret = __mmu_notifier_register(subscription, mm);
774*4882a593Smuzhiyun 	mmap_write_unlock(mm);
775*4882a593Smuzhiyun 	return ret;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_notifier_register);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun static struct mmu_notifier *
find_get_mmu_notifier(struct mm_struct * mm,const struct mmu_notifier_ops * ops)780*4882a593Smuzhiyun find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	spin_lock(&mm->notifier_subscriptions->lock);
785*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(subscription,
786*4882a593Smuzhiyun 				 &mm->notifier_subscriptions->list, hlist,
787*4882a593Smuzhiyun 				 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
788*4882a593Smuzhiyun 		if (subscription->ops != ops)
789*4882a593Smuzhiyun 			continue;
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 		if (likely(subscription->users != UINT_MAX))
792*4882a593Smuzhiyun 			subscription->users++;
793*4882a593Smuzhiyun 		else
794*4882a593Smuzhiyun 			subscription = ERR_PTR(-EOVERFLOW);
795*4882a593Smuzhiyun 		spin_unlock(&mm->notifier_subscriptions->lock);
796*4882a593Smuzhiyun 		return subscription;
797*4882a593Smuzhiyun 	}
798*4882a593Smuzhiyun 	spin_unlock(&mm->notifier_subscriptions->lock);
799*4882a593Smuzhiyun 	return NULL;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun /**
803*4882a593Smuzhiyun  * mmu_notifier_get_locked - Return the single struct mmu_notifier for
804*4882a593Smuzhiyun  *                           the mm & ops
805*4882a593Smuzhiyun  * @ops: The operations struct being subscribe with
806*4882a593Smuzhiyun  * @mm : The mm to attach notifiers too
807*4882a593Smuzhiyun  *
808*4882a593Smuzhiyun  * This function either allocates a new mmu_notifier via
809*4882a593Smuzhiyun  * ops->alloc_notifier(), or returns an already existing notifier on the
810*4882a593Smuzhiyun  * list. The value of the ops pointer is used to determine when two notifiers
811*4882a593Smuzhiyun  * are the same.
812*4882a593Smuzhiyun  *
813*4882a593Smuzhiyun  * Each call to mmu_notifier_get() must be paired with a call to
814*4882a593Smuzhiyun  * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
815*4882a593Smuzhiyun  *
816*4882a593Smuzhiyun  * While the caller has a mmu_notifier get the mm pointer will remain valid,
817*4882a593Smuzhiyun  * and can be converted to an active mm pointer via mmget_not_zero().
818*4882a593Smuzhiyun  */
mmu_notifier_get_locked(const struct mmu_notifier_ops * ops,struct mm_struct * mm)819*4882a593Smuzhiyun struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
820*4882a593Smuzhiyun 					     struct mm_struct *mm)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun 	struct mmu_notifier *subscription;
823*4882a593Smuzhiyun 	int ret;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	mmap_assert_write_locked(mm);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	if (mm->notifier_subscriptions) {
828*4882a593Smuzhiyun 		subscription = find_get_mmu_notifier(mm, ops);
829*4882a593Smuzhiyun 		if (subscription)
830*4882a593Smuzhiyun 			return subscription;
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	subscription = ops->alloc_notifier(mm);
834*4882a593Smuzhiyun 	if (IS_ERR(subscription))
835*4882a593Smuzhiyun 		return subscription;
836*4882a593Smuzhiyun 	subscription->ops = ops;
837*4882a593Smuzhiyun 	ret = __mmu_notifier_register(subscription, mm);
838*4882a593Smuzhiyun 	if (ret)
839*4882a593Smuzhiyun 		goto out_free;
840*4882a593Smuzhiyun 	return subscription;
841*4882a593Smuzhiyun out_free:
842*4882a593Smuzhiyun 	subscription->ops->free_notifier(subscription);
843*4882a593Smuzhiyun 	return ERR_PTR(ret);
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun /* this is called after the last mmu_notifier_unregister() returned */
__mmu_notifier_subscriptions_destroy(struct mm_struct * mm)848*4882a593Smuzhiyun void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun 	BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
851*4882a593Smuzhiyun 	kfree(mm->notifier_subscriptions);
852*4882a593Smuzhiyun 	mm->notifier_subscriptions = LIST_POISON1; /* debug */
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun /*
856*4882a593Smuzhiyun  * This releases the mm_count pin automatically and frees the mm
857*4882a593Smuzhiyun  * structure if it was the last user of it. It serializes against
858*4882a593Smuzhiyun  * running mmu notifiers with SRCU and against mmu_notifier_unregister
859*4882a593Smuzhiyun  * with the unregister lock + SRCU. All sptes must be dropped before
860*4882a593Smuzhiyun  * calling mmu_notifier_unregister. ->release or any other notifier
861*4882a593Smuzhiyun  * method may be invoked concurrently with mmu_notifier_unregister,
862*4882a593Smuzhiyun  * and only after mmu_notifier_unregister returned we're guaranteed
863*4882a593Smuzhiyun  * that ->release or any other method can't run anymore.
864*4882a593Smuzhiyun  */
mmu_notifier_unregister(struct mmu_notifier * subscription,struct mm_struct * mm)865*4882a593Smuzhiyun void mmu_notifier_unregister(struct mmu_notifier *subscription,
866*4882a593Smuzhiyun 			     struct mm_struct *mm)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	if (!hlist_unhashed(&subscription->hlist)) {
871*4882a593Smuzhiyun 		/*
872*4882a593Smuzhiyun 		 * SRCU here will force exit_mmap to wait for ->release to
873*4882a593Smuzhiyun 		 * finish before freeing the pages.
874*4882a593Smuzhiyun 		 */
875*4882a593Smuzhiyun 		int id;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 		id = srcu_read_lock(&srcu);
878*4882a593Smuzhiyun 		/*
879*4882a593Smuzhiyun 		 * exit_mmap will block in mmu_notifier_release to guarantee
880*4882a593Smuzhiyun 		 * that ->release is called before freeing the pages.
881*4882a593Smuzhiyun 		 */
882*4882a593Smuzhiyun 		if (subscription->ops->release)
883*4882a593Smuzhiyun 			subscription->ops->release(subscription, mm);
884*4882a593Smuzhiyun 		srcu_read_unlock(&srcu, id);
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 		spin_lock(&mm->notifier_subscriptions->lock);
887*4882a593Smuzhiyun 		/*
888*4882a593Smuzhiyun 		 * Can not use list_del_rcu() since __mmu_notifier_release
889*4882a593Smuzhiyun 		 * can delete it before we hold the lock.
890*4882a593Smuzhiyun 		 */
891*4882a593Smuzhiyun 		hlist_del_init_rcu(&subscription->hlist);
892*4882a593Smuzhiyun 		spin_unlock(&mm->notifier_subscriptions->lock);
893*4882a593Smuzhiyun 	}
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	/*
896*4882a593Smuzhiyun 	 * Wait for any running method to finish, of course including
897*4882a593Smuzhiyun 	 * ->release if it was run by mmu_notifier_release instead of us.
898*4882a593Smuzhiyun 	 */
899*4882a593Smuzhiyun 	synchronize_srcu(&srcu);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	mmdrop(mm);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
906*4882a593Smuzhiyun 
mmu_notifier_free_rcu(struct rcu_head * rcu)907*4882a593Smuzhiyun static void mmu_notifier_free_rcu(struct rcu_head *rcu)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun 	struct mmu_notifier *subscription =
910*4882a593Smuzhiyun 		container_of(rcu, struct mmu_notifier, rcu);
911*4882a593Smuzhiyun 	struct mm_struct *mm = subscription->mm;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	subscription->ops->free_notifier(subscription);
914*4882a593Smuzhiyun 	/* Pairs with the get in __mmu_notifier_register() */
915*4882a593Smuzhiyun 	mmdrop(mm);
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun /**
919*4882a593Smuzhiyun  * mmu_notifier_put - Release the reference on the notifier
920*4882a593Smuzhiyun  * @subscription: The notifier to act on
921*4882a593Smuzhiyun  *
922*4882a593Smuzhiyun  * This function must be paired with each mmu_notifier_get(), it releases the
923*4882a593Smuzhiyun  * reference obtained by the get. If this is the last reference then process
924*4882a593Smuzhiyun  * to free the notifier will be run asynchronously.
925*4882a593Smuzhiyun  *
926*4882a593Smuzhiyun  * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
927*4882a593Smuzhiyun  * when the mm_struct is destroyed. Instead free_notifier is always called to
928*4882a593Smuzhiyun  * release any resources held by the user.
929*4882a593Smuzhiyun  *
930*4882a593Smuzhiyun  * As ops->release is not guaranteed to be called, the user must ensure that
931*4882a593Smuzhiyun  * all sptes are dropped, and no new sptes can be established before
932*4882a593Smuzhiyun  * mmu_notifier_put() is called.
933*4882a593Smuzhiyun  *
934*4882a593Smuzhiyun  * This function can be called from the ops->release callback, however the
935*4882a593Smuzhiyun  * caller must still ensure it is called pairwise with mmu_notifier_get().
936*4882a593Smuzhiyun  *
937*4882a593Smuzhiyun  * Modules calling this function must call mmu_notifier_synchronize() in
938*4882a593Smuzhiyun  * their __exit functions to ensure the async work is completed.
939*4882a593Smuzhiyun  */
mmu_notifier_put(struct mmu_notifier * subscription)940*4882a593Smuzhiyun void mmu_notifier_put(struct mmu_notifier *subscription)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	struct mm_struct *mm = subscription->mm;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	spin_lock(&mm->notifier_subscriptions->lock);
945*4882a593Smuzhiyun 	if (WARN_ON(!subscription->users) || --subscription->users)
946*4882a593Smuzhiyun 		goto out_unlock;
947*4882a593Smuzhiyun 	hlist_del_init_rcu(&subscription->hlist);
948*4882a593Smuzhiyun 	spin_unlock(&mm->notifier_subscriptions->lock);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
951*4882a593Smuzhiyun 	return;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun out_unlock:
954*4882a593Smuzhiyun 	spin_unlock(&mm->notifier_subscriptions->lock);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_notifier_put);
957*4882a593Smuzhiyun 
__mmu_interval_notifier_insert(struct mmu_interval_notifier * interval_sub,struct mm_struct * mm,struct mmu_notifier_subscriptions * subscriptions,unsigned long start,unsigned long length,const struct mmu_interval_notifier_ops * ops)958*4882a593Smuzhiyun static int __mmu_interval_notifier_insert(
959*4882a593Smuzhiyun 	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
960*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
961*4882a593Smuzhiyun 	unsigned long length, const struct mmu_interval_notifier_ops *ops)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun 	interval_sub->mm = mm;
964*4882a593Smuzhiyun 	interval_sub->ops = ops;
965*4882a593Smuzhiyun 	RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
966*4882a593Smuzhiyun 	interval_sub->interval_tree.start = start;
967*4882a593Smuzhiyun 	/*
968*4882a593Smuzhiyun 	 * Note that the representation of the intervals in the interval tree
969*4882a593Smuzhiyun 	 * considers the ending point as contained in the interval.
970*4882a593Smuzhiyun 	 */
971*4882a593Smuzhiyun 	if (length == 0 ||
972*4882a593Smuzhiyun 	    check_add_overflow(start, length - 1,
973*4882a593Smuzhiyun 			       &interval_sub->interval_tree.last))
974*4882a593Smuzhiyun 		return -EOVERFLOW;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	/* Must call with a mmget() held */
977*4882a593Smuzhiyun 	if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
978*4882a593Smuzhiyun 		return -EINVAL;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	/* pairs with mmdrop in mmu_interval_notifier_remove() */
981*4882a593Smuzhiyun 	mmgrab(mm);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	/*
984*4882a593Smuzhiyun 	 * If some invalidate_range_start/end region is going on in parallel
985*4882a593Smuzhiyun 	 * we don't know what VA ranges are affected, so we must assume this
986*4882a593Smuzhiyun 	 * new range is included.
987*4882a593Smuzhiyun 	 *
988*4882a593Smuzhiyun 	 * If the itree is invalidating then we are not allowed to change
989*4882a593Smuzhiyun 	 * it. Retrying until invalidation is done is tricky due to the
990*4882a593Smuzhiyun 	 * possibility for live lock, instead defer the add to
991*4882a593Smuzhiyun 	 * mn_itree_inv_end() so this algorithm is deterministic.
992*4882a593Smuzhiyun 	 *
993*4882a593Smuzhiyun 	 * In all cases the value for the interval_sub->invalidate_seq should be
994*4882a593Smuzhiyun 	 * odd, see mmu_interval_read_begin()
995*4882a593Smuzhiyun 	 */
996*4882a593Smuzhiyun 	spin_lock(&subscriptions->lock);
997*4882a593Smuzhiyun 	if (subscriptions->active_invalidate_ranges) {
998*4882a593Smuzhiyun 		if (mn_itree_is_invalidating(subscriptions))
999*4882a593Smuzhiyun 			hlist_add_head(&interval_sub->deferred_item,
1000*4882a593Smuzhiyun 				       &subscriptions->deferred_list);
1001*4882a593Smuzhiyun 		else {
1002*4882a593Smuzhiyun 			subscriptions->invalidate_seq |= 1;
1003*4882a593Smuzhiyun 			interval_tree_insert(&interval_sub->interval_tree,
1004*4882a593Smuzhiyun 					     &subscriptions->itree);
1005*4882a593Smuzhiyun 		}
1006*4882a593Smuzhiyun 		interval_sub->invalidate_seq = subscriptions->invalidate_seq;
1007*4882a593Smuzhiyun 	} else {
1008*4882a593Smuzhiyun 		WARN_ON(mn_itree_is_invalidating(subscriptions));
1009*4882a593Smuzhiyun 		/*
1010*4882a593Smuzhiyun 		 * The starting seq for a subscription not under invalidation
1011*4882a593Smuzhiyun 		 * should be odd, not equal to the current invalidate_seq and
1012*4882a593Smuzhiyun 		 * invalidate_seq should not 'wrap' to the new seq any time
1013*4882a593Smuzhiyun 		 * soon.
1014*4882a593Smuzhiyun 		 */
1015*4882a593Smuzhiyun 		interval_sub->invalidate_seq =
1016*4882a593Smuzhiyun 			subscriptions->invalidate_seq - 1;
1017*4882a593Smuzhiyun 		interval_tree_insert(&interval_sub->interval_tree,
1018*4882a593Smuzhiyun 				     &subscriptions->itree);
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 	spin_unlock(&subscriptions->lock);
1021*4882a593Smuzhiyun 	return 0;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun /**
1025*4882a593Smuzhiyun  * mmu_interval_notifier_insert - Insert an interval notifier
1026*4882a593Smuzhiyun  * @interval_sub: Interval subscription to register
1027*4882a593Smuzhiyun  * @start: Starting virtual address to monitor
1028*4882a593Smuzhiyun  * @length: Length of the range to monitor
1029*4882a593Smuzhiyun  * @mm: mm_struct to attach to
1030*4882a593Smuzhiyun  * @ops: Interval notifier operations to be called on matching events
1031*4882a593Smuzhiyun  *
1032*4882a593Smuzhiyun  * This function subscribes the interval notifier for notifications from the
1033*4882a593Smuzhiyun  * mm.  Upon return the ops related to mmu_interval_notifier will be called
1034*4882a593Smuzhiyun  * whenever an event that intersects with the given range occurs.
1035*4882a593Smuzhiyun  *
1036*4882a593Smuzhiyun  * Upon return the range_notifier may not be present in the interval tree yet.
1037*4882a593Smuzhiyun  * The caller must use the normal interval notifier read flow via
1038*4882a593Smuzhiyun  * mmu_interval_read_begin() to establish SPTEs for this range.
1039*4882a593Smuzhiyun  */
mmu_interval_notifier_insert(struct mmu_interval_notifier * interval_sub,struct mm_struct * mm,unsigned long start,unsigned long length,const struct mmu_interval_notifier_ops * ops)1040*4882a593Smuzhiyun int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
1041*4882a593Smuzhiyun 				 struct mm_struct *mm, unsigned long start,
1042*4882a593Smuzhiyun 				 unsigned long length,
1043*4882a593Smuzhiyun 				 const struct mmu_interval_notifier_ops *ops)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions;
1046*4882a593Smuzhiyun 	int ret;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	might_lock(&mm->mmap_lock);
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
1051*4882a593Smuzhiyun 	if (!subscriptions || !subscriptions->has_itree) {
1052*4882a593Smuzhiyun 		ret = mmu_notifier_register(NULL, mm);
1053*4882a593Smuzhiyun 		if (ret)
1054*4882a593Smuzhiyun 			return ret;
1055*4882a593Smuzhiyun 		subscriptions = mm->notifier_subscriptions;
1056*4882a593Smuzhiyun 	}
1057*4882a593Smuzhiyun 	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1058*4882a593Smuzhiyun 					      start, length, ops);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
1061*4882a593Smuzhiyun 
mmu_interval_notifier_insert_locked(struct mmu_interval_notifier * interval_sub,struct mm_struct * mm,unsigned long start,unsigned long length,const struct mmu_interval_notifier_ops * ops)1062*4882a593Smuzhiyun int mmu_interval_notifier_insert_locked(
1063*4882a593Smuzhiyun 	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
1064*4882a593Smuzhiyun 	unsigned long start, unsigned long length,
1065*4882a593Smuzhiyun 	const struct mmu_interval_notifier_ops *ops)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions =
1068*4882a593Smuzhiyun 		mm->notifier_subscriptions;
1069*4882a593Smuzhiyun 	int ret;
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	mmap_assert_write_locked(mm);
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	if (!subscriptions || !subscriptions->has_itree) {
1074*4882a593Smuzhiyun 		ret = __mmu_notifier_register(NULL, mm);
1075*4882a593Smuzhiyun 		if (ret)
1076*4882a593Smuzhiyun 			return ret;
1077*4882a593Smuzhiyun 		subscriptions = mm->notifier_subscriptions;
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun 	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1080*4882a593Smuzhiyun 					      start, length, ops);
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun static bool
mmu_interval_seq_released(struct mmu_notifier_subscriptions * subscriptions,unsigned long seq)1085*4882a593Smuzhiyun mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
1086*4882a593Smuzhiyun 			  unsigned long seq)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun 	bool ret;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	spin_lock(&subscriptions->lock);
1091*4882a593Smuzhiyun 	ret = subscriptions->invalidate_seq != seq;
1092*4882a593Smuzhiyun 	spin_unlock(&subscriptions->lock);
1093*4882a593Smuzhiyun 	return ret;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun /**
1097*4882a593Smuzhiyun  * mmu_interval_notifier_remove - Remove a interval notifier
1098*4882a593Smuzhiyun  * @interval_sub: Interval subscription to unregister
1099*4882a593Smuzhiyun  *
1100*4882a593Smuzhiyun  * This function must be paired with mmu_interval_notifier_insert(). It cannot
1101*4882a593Smuzhiyun  * be called from any ops callback.
1102*4882a593Smuzhiyun  *
1103*4882a593Smuzhiyun  * Once this returns ops callbacks are no longer running on other CPUs and
1104*4882a593Smuzhiyun  * will not be called in future.
1105*4882a593Smuzhiyun  */
mmu_interval_notifier_remove(struct mmu_interval_notifier * interval_sub)1106*4882a593Smuzhiyun void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun 	struct mm_struct *mm = interval_sub->mm;
1109*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions =
1110*4882a593Smuzhiyun 		mm->notifier_subscriptions;
1111*4882a593Smuzhiyun 	unsigned long seq = 0;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	might_sleep();
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	spin_lock(&subscriptions->lock);
1116*4882a593Smuzhiyun 	if (mn_itree_is_invalidating(subscriptions)) {
1117*4882a593Smuzhiyun 		/*
1118*4882a593Smuzhiyun 		 * remove is being called after insert put this on the
1119*4882a593Smuzhiyun 		 * deferred list, but before the deferred list was processed.
1120*4882a593Smuzhiyun 		 */
1121*4882a593Smuzhiyun 		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1122*4882a593Smuzhiyun 			hlist_del(&interval_sub->deferred_item);
1123*4882a593Smuzhiyun 		} else {
1124*4882a593Smuzhiyun 			hlist_add_head(&interval_sub->deferred_item,
1125*4882a593Smuzhiyun 				       &subscriptions->deferred_list);
1126*4882a593Smuzhiyun 			seq = subscriptions->invalidate_seq;
1127*4882a593Smuzhiyun 		}
1128*4882a593Smuzhiyun 	} else {
1129*4882a593Smuzhiyun 		WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1130*4882a593Smuzhiyun 		interval_tree_remove(&interval_sub->interval_tree,
1131*4882a593Smuzhiyun 				     &subscriptions->itree);
1132*4882a593Smuzhiyun 	}
1133*4882a593Smuzhiyun 	spin_unlock(&subscriptions->lock);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	/*
1136*4882a593Smuzhiyun 	 * The possible sleep on progress in the invalidation requires the
1137*4882a593Smuzhiyun 	 * caller not hold any locks held by invalidation callbacks.
1138*4882a593Smuzhiyun 	 */
1139*4882a593Smuzhiyun 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1140*4882a593Smuzhiyun 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1141*4882a593Smuzhiyun 	if (seq)
1142*4882a593Smuzhiyun 		wait_event(subscriptions->wq,
1143*4882a593Smuzhiyun 			   mmu_interval_seq_released(subscriptions, seq));
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	/* pairs with mmgrab in mmu_interval_notifier_insert() */
1146*4882a593Smuzhiyun 	mmdrop(mm);
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun /**
1151*4882a593Smuzhiyun  * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
1152*4882a593Smuzhiyun  *
1153*4882a593Smuzhiyun  * This function ensures that all outstanding async SRU work from
1154*4882a593Smuzhiyun  * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
1155*4882a593Smuzhiyun  * associated with an unused mmu_notifier will no longer be called.
1156*4882a593Smuzhiyun  *
1157*4882a593Smuzhiyun  * Before using the caller must ensure that all of its mmu_notifiers have been
1158*4882a593Smuzhiyun  * fully released via mmu_notifier_put().
1159*4882a593Smuzhiyun  *
1160*4882a593Smuzhiyun  * Modules using the mmu_notifier_put() API should call this in their __exit
1161*4882a593Smuzhiyun  * function to avoid module unloading races.
1162*4882a593Smuzhiyun  */
mmu_notifier_synchronize(void)1163*4882a593Smuzhiyun void mmu_notifier_synchronize(void)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun 	synchronize_srcu(&srcu);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun bool
mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range * range)1170*4882a593Smuzhiyun mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun 	if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
1173*4882a593Smuzhiyun 		return false;
1174*4882a593Smuzhiyun 	/* Return true if the vma still have the read flag set. */
1175*4882a593Smuzhiyun 	return range->vma->vm_flags & VM_READ;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
1180*4882a593Smuzhiyun 
mmu_notifier_subscriptions_init(struct mm_struct * mm)1181*4882a593Smuzhiyun bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun 	struct mmu_notifier_subscriptions *subscriptions;
1184*4882a593Smuzhiyun 	struct percpu_rw_semaphore_atomic *sem;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	subscriptions = kzalloc(
1187*4882a593Smuzhiyun 		sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
1188*4882a593Smuzhiyun 	if (!subscriptions)
1189*4882a593Smuzhiyun 		return false;
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	sem = kzalloc(sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL);
1192*4882a593Smuzhiyun 	if (!sem) {
1193*4882a593Smuzhiyun 		kfree(subscriptions);
1194*4882a593Smuzhiyun 		return false;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 	percpu_init_rwsem(&sem->rw_sem);
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	init_subscriptions(subscriptions);
1199*4882a593Smuzhiyun 	subscriptions->has_itree = true;
1200*4882a593Smuzhiyun 	subscriptions->hdr.valid = false;
1201*4882a593Smuzhiyun 	subscriptions->hdr.mmu_notifier_lock = sem;
1202*4882a593Smuzhiyun 	mm->notifier_subscriptions = subscriptions;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	return true;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun 
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)1207*4882a593Smuzhiyun void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	percpu_rwsem_async_destroy(
1210*4882a593Smuzhiyun 			mm->notifier_subscriptions->hdr.mmu_notifier_lock);
1211*4882a593Smuzhiyun 	kfree(mm->notifier_subscriptions);
1212*4882a593Smuzhiyun 	mm->notifier_subscriptions = NULL;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
1216