xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/hfi1/mmu_rb.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright(c) 2020 Cornelis Networks, Inc.
3*4882a593Smuzhiyun  * Copyright(c) 2016 - 2017 Intel Corporation.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This file is provided under a dual BSD/GPLv2 license.  When using or
6*4882a593Smuzhiyun  * redistributing this file, you may do so under either license.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * GPL LICENSE SUMMARY
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
11*4882a593Smuzhiyun  * it under the terms of version 2 of the GNU General Public License as
12*4882a593Smuzhiyun  * published by the Free Software Foundation.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
15*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
16*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17*4882a593Smuzhiyun  * General Public License for more details.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * BSD LICENSE
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
22*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
23*4882a593Smuzhiyun  * are met:
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  *  - Redistributions of source code must retain the above copyright
26*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
27*4882a593Smuzhiyun  *  - Redistributions in binary form must reproduce the above copyright
28*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in
29*4882a593Smuzhiyun  *    the documentation and/or other materials provided with the
30*4882a593Smuzhiyun  *    distribution.
31*4882a593Smuzhiyun  *  - Neither the name of Intel Corporation nor the names of its
32*4882a593Smuzhiyun  *    contributors may be used to endorse or promote products derived
33*4882a593Smuzhiyun  *    from this software without specific prior written permission.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun #include <linux/list.h>
49*4882a593Smuzhiyun #include <linux/rculist.h>
50*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
51*4882a593Smuzhiyun #include <linux/interval_tree_generic.h>
52*4882a593Smuzhiyun #include <linux/sched/mm.h>
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #include "mmu_rb.h"
55*4882a593Smuzhiyun #include "trace.h"
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static unsigned long mmu_node_start(struct mmu_rb_node *);
58*4882a593Smuzhiyun static unsigned long mmu_node_last(struct mmu_rb_node *);
59*4882a593Smuzhiyun static int mmu_notifier_range_start(struct mmu_notifier *,
60*4882a593Smuzhiyun 		const struct mmu_notifier_range *);
61*4882a593Smuzhiyun static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
62*4882a593Smuzhiyun 					   unsigned long, unsigned long);
63*4882a593Smuzhiyun static void do_remove(struct mmu_rb_handler *handler,
64*4882a593Smuzhiyun 		      struct list_head *del_list);
65*4882a593Smuzhiyun static void handle_remove(struct work_struct *work);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun static const struct mmu_notifier_ops mn_opts = {
68*4882a593Smuzhiyun 	.invalidate_range_start = mmu_notifier_range_start,
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
72*4882a593Smuzhiyun 		     mmu_node_start, mmu_node_last, static, __mmu_int_rb);
73*4882a593Smuzhiyun 
mmu_node_start(struct mmu_rb_node * node)74*4882a593Smuzhiyun static unsigned long mmu_node_start(struct mmu_rb_node *node)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	return node->addr & PAGE_MASK;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
mmu_node_last(struct mmu_rb_node * node)79*4882a593Smuzhiyun static unsigned long mmu_node_last(struct mmu_rb_node *node)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	return PAGE_ALIGN(node->addr + node->len) - 1;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
hfi1_mmu_rb_register(void * ops_arg,struct mmu_rb_ops * ops,struct workqueue_struct * wq,struct mmu_rb_handler ** handler)84*4882a593Smuzhiyun int hfi1_mmu_rb_register(void *ops_arg,
85*4882a593Smuzhiyun 			 struct mmu_rb_ops *ops,
86*4882a593Smuzhiyun 			 struct workqueue_struct *wq,
87*4882a593Smuzhiyun 			 struct mmu_rb_handler **handler)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	struct mmu_rb_handler *h;
90*4882a593Smuzhiyun 	int ret;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	h = kzalloc(sizeof(*h), GFP_KERNEL);
93*4882a593Smuzhiyun 	if (!h)
94*4882a593Smuzhiyun 		return -ENOMEM;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	h->root = RB_ROOT_CACHED;
97*4882a593Smuzhiyun 	h->ops = ops;
98*4882a593Smuzhiyun 	h->ops_arg = ops_arg;
99*4882a593Smuzhiyun 	INIT_HLIST_NODE(&h->mn.hlist);
100*4882a593Smuzhiyun 	spin_lock_init(&h->lock);
101*4882a593Smuzhiyun 	h->mn.ops = &mn_opts;
102*4882a593Smuzhiyun 	INIT_WORK(&h->del_work, handle_remove);
103*4882a593Smuzhiyun 	INIT_LIST_HEAD(&h->del_list);
104*4882a593Smuzhiyun 	INIT_LIST_HEAD(&h->lru_list);
105*4882a593Smuzhiyun 	h->wq = wq;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	ret = mmu_notifier_register(&h->mn, current->mm);
108*4882a593Smuzhiyun 	if (ret) {
109*4882a593Smuzhiyun 		kfree(h);
110*4882a593Smuzhiyun 		return ret;
111*4882a593Smuzhiyun 	}
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	*handler = h;
114*4882a593Smuzhiyun 	return 0;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
hfi1_mmu_rb_unregister(struct mmu_rb_handler * handler)117*4882a593Smuzhiyun void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct mmu_rb_node *rbnode;
120*4882a593Smuzhiyun 	struct rb_node *node;
121*4882a593Smuzhiyun 	unsigned long flags;
122*4882a593Smuzhiyun 	struct list_head del_list;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Prevent freeing of mm until we are completely finished. */
125*4882a593Smuzhiyun 	mmgrab(handler->mn.mm);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* Unregister first so we don't get any more notifications. */
128*4882a593Smuzhiyun 	mmu_notifier_unregister(&handler->mn, handler->mn.mm);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/*
131*4882a593Smuzhiyun 	 * Make sure the wq delete handler is finished running.  It will not
132*4882a593Smuzhiyun 	 * be triggered once the mmu notifiers are unregistered above.
133*4882a593Smuzhiyun 	 */
134*4882a593Smuzhiyun 	flush_work(&handler->del_work);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	INIT_LIST_HEAD(&del_list);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	spin_lock_irqsave(&handler->lock, flags);
139*4882a593Smuzhiyun 	while ((node = rb_first_cached(&handler->root))) {
140*4882a593Smuzhiyun 		rbnode = rb_entry(node, struct mmu_rb_node, node);
141*4882a593Smuzhiyun 		rb_erase_cached(node, &handler->root);
142*4882a593Smuzhiyun 		/* move from LRU list to delete list */
143*4882a593Smuzhiyun 		list_move(&rbnode->list, &del_list);
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 	spin_unlock_irqrestore(&handler->lock, flags);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	do_remove(handler, &del_list);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* Now the mm may be freed. */
150*4882a593Smuzhiyun 	mmdrop(handler->mn.mm);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	kfree(handler);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
hfi1_mmu_rb_insert(struct mmu_rb_handler * handler,struct mmu_rb_node * mnode)155*4882a593Smuzhiyun int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
156*4882a593Smuzhiyun 		       struct mmu_rb_node *mnode)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct mmu_rb_node *node;
159*4882a593Smuzhiyun 	unsigned long flags;
160*4882a593Smuzhiyun 	int ret = 0;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (current->mm != handler->mn.mm)
165*4882a593Smuzhiyun 		return -EPERM;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	spin_lock_irqsave(&handler->lock, flags);
168*4882a593Smuzhiyun 	node = __mmu_rb_search(handler, mnode->addr, mnode->len);
169*4882a593Smuzhiyun 	if (node) {
170*4882a593Smuzhiyun 		ret = -EINVAL;
171*4882a593Smuzhiyun 		goto unlock;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 	__mmu_int_rb_insert(mnode, &handler->root);
174*4882a593Smuzhiyun 	list_add(&mnode->list, &handler->lru_list);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	ret = handler->ops->insert(handler->ops_arg, mnode);
177*4882a593Smuzhiyun 	if (ret) {
178*4882a593Smuzhiyun 		__mmu_int_rb_remove(mnode, &handler->root);
179*4882a593Smuzhiyun 		list_del(&mnode->list); /* remove from LRU list */
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 	mnode->handler = handler;
182*4882a593Smuzhiyun unlock:
183*4882a593Smuzhiyun 	spin_unlock_irqrestore(&handler->lock, flags);
184*4882a593Smuzhiyun 	return ret;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /* Caller must hold handler lock */
__mmu_rb_search(struct mmu_rb_handler * handler,unsigned long addr,unsigned long len)188*4882a593Smuzhiyun static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
189*4882a593Smuzhiyun 					   unsigned long addr,
190*4882a593Smuzhiyun 					   unsigned long len)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct mmu_rb_node *node = NULL;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	trace_hfi1_mmu_rb_search(addr, len);
195*4882a593Smuzhiyun 	if (!handler->ops->filter) {
196*4882a593Smuzhiyun 		node = __mmu_int_rb_iter_first(&handler->root, addr,
197*4882a593Smuzhiyun 					       (addr + len) - 1);
198*4882a593Smuzhiyun 	} else {
199*4882a593Smuzhiyun 		for (node = __mmu_int_rb_iter_first(&handler->root, addr,
200*4882a593Smuzhiyun 						    (addr + len) - 1);
201*4882a593Smuzhiyun 		     node;
202*4882a593Smuzhiyun 		     node = __mmu_int_rb_iter_next(node, addr,
203*4882a593Smuzhiyun 						   (addr + len) - 1)) {
204*4882a593Smuzhiyun 			if (handler->ops->filter(node, addr, len))
205*4882a593Smuzhiyun 				return node;
206*4882a593Smuzhiyun 		}
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 	return node;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler * handler,unsigned long addr,unsigned long len,struct mmu_rb_node ** rb_node)211*4882a593Smuzhiyun bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
212*4882a593Smuzhiyun 				     unsigned long addr, unsigned long len,
213*4882a593Smuzhiyun 				     struct mmu_rb_node **rb_node)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	struct mmu_rb_node *node;
216*4882a593Smuzhiyun 	unsigned long flags;
217*4882a593Smuzhiyun 	bool ret = false;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (current->mm != handler->mn.mm)
220*4882a593Smuzhiyun 		return ret;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	spin_lock_irqsave(&handler->lock, flags);
223*4882a593Smuzhiyun 	node = __mmu_rb_search(handler, addr, len);
224*4882a593Smuzhiyun 	if (node) {
225*4882a593Smuzhiyun 		if (node->addr == addr && node->len == len)
226*4882a593Smuzhiyun 			goto unlock;
227*4882a593Smuzhiyun 		__mmu_int_rb_remove(node, &handler->root);
228*4882a593Smuzhiyun 		list_del(&node->list); /* remove from LRU list */
229*4882a593Smuzhiyun 		ret = true;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun unlock:
232*4882a593Smuzhiyun 	spin_unlock_irqrestore(&handler->lock, flags);
233*4882a593Smuzhiyun 	*rb_node = node;
234*4882a593Smuzhiyun 	return ret;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
hfi1_mmu_rb_evict(struct mmu_rb_handler * handler,void * evict_arg)237*4882a593Smuzhiyun void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct mmu_rb_node *rbnode, *ptr;
240*4882a593Smuzhiyun 	struct list_head del_list;
241*4882a593Smuzhiyun 	unsigned long flags;
242*4882a593Smuzhiyun 	bool stop = false;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (current->mm != handler->mn.mm)
245*4882a593Smuzhiyun 		return;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	INIT_LIST_HEAD(&del_list);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spin_lock_irqsave(&handler->lock, flags);
250*4882a593Smuzhiyun 	list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
251*4882a593Smuzhiyun 					 list) {
252*4882a593Smuzhiyun 		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
253*4882a593Smuzhiyun 					&stop)) {
254*4882a593Smuzhiyun 			__mmu_int_rb_remove(rbnode, &handler->root);
255*4882a593Smuzhiyun 			/* move from LRU list to delete list */
256*4882a593Smuzhiyun 			list_move(&rbnode->list, &del_list);
257*4882a593Smuzhiyun 		}
258*4882a593Smuzhiyun 		if (stop)
259*4882a593Smuzhiyun 			break;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 	spin_unlock_irqrestore(&handler->lock, flags);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	while (!list_empty(&del_list)) {
264*4882a593Smuzhiyun 		rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
265*4882a593Smuzhiyun 		list_del(&rbnode->list);
266*4882a593Smuzhiyun 		handler->ops->remove(handler->ops_arg, rbnode);
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun  * It is up to the caller to ensure that this function does not race with the
272*4882a593Smuzhiyun  * mmu invalidate notifier which may be calling the users remove callback on
273*4882a593Smuzhiyun  * 'node'.
274*4882a593Smuzhiyun  */
hfi1_mmu_rb_remove(struct mmu_rb_handler * handler,struct mmu_rb_node * node)275*4882a593Smuzhiyun void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
276*4882a593Smuzhiyun 			struct mmu_rb_node *node)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	unsigned long flags;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (current->mm != handler->mn.mm)
281*4882a593Smuzhiyun 		return;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* Validity of handler and node pointers has been checked by caller. */
284*4882a593Smuzhiyun 	trace_hfi1_mmu_rb_remove(node->addr, node->len);
285*4882a593Smuzhiyun 	spin_lock_irqsave(&handler->lock, flags);
286*4882a593Smuzhiyun 	__mmu_int_rb_remove(node, &handler->root);
287*4882a593Smuzhiyun 	list_del(&node->list); /* remove from LRU list */
288*4882a593Smuzhiyun 	spin_unlock_irqrestore(&handler->lock, flags);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	handler->ops->remove(handler->ops_arg, node);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
mmu_notifier_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)293*4882a593Smuzhiyun static int mmu_notifier_range_start(struct mmu_notifier *mn,
294*4882a593Smuzhiyun 		const struct mmu_notifier_range *range)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct mmu_rb_handler *handler =
297*4882a593Smuzhiyun 		container_of(mn, struct mmu_rb_handler, mn);
298*4882a593Smuzhiyun 	struct rb_root_cached *root = &handler->root;
299*4882a593Smuzhiyun 	struct mmu_rb_node *node, *ptr = NULL;
300*4882a593Smuzhiyun 	unsigned long flags;
301*4882a593Smuzhiyun 	bool added = false;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	spin_lock_irqsave(&handler->lock, flags);
304*4882a593Smuzhiyun 	for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
305*4882a593Smuzhiyun 	     node; node = ptr) {
306*4882a593Smuzhiyun 		/* Guard against node removal. */
307*4882a593Smuzhiyun 		ptr = __mmu_int_rb_iter_next(node, range->start,
308*4882a593Smuzhiyun 					     range->end - 1);
309*4882a593Smuzhiyun 		trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
310*4882a593Smuzhiyun 		if (handler->ops->invalidate(handler->ops_arg, node)) {
311*4882a593Smuzhiyun 			__mmu_int_rb_remove(node, root);
312*4882a593Smuzhiyun 			/* move from LRU list to delete list */
313*4882a593Smuzhiyun 			list_move(&node->list, &handler->del_list);
314*4882a593Smuzhiyun 			added = true;
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 	spin_unlock_irqrestore(&handler->lock, flags);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (added)
320*4882a593Smuzhiyun 		queue_work(handler->wq, &handler->del_work);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	return 0;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun  * Call the remove function for the given handler and the list.  This
327*4882a593Smuzhiyun  * is expected to be called with a delete list extracted from handler.
328*4882a593Smuzhiyun  * The caller should not be holding the handler lock.
329*4882a593Smuzhiyun  */
do_remove(struct mmu_rb_handler * handler,struct list_head * del_list)330*4882a593Smuzhiyun static void do_remove(struct mmu_rb_handler *handler,
331*4882a593Smuzhiyun 		      struct list_head *del_list)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct mmu_rb_node *node;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	while (!list_empty(del_list)) {
336*4882a593Smuzhiyun 		node = list_first_entry(del_list, struct mmu_rb_node, list);
337*4882a593Smuzhiyun 		list_del(&node->list);
338*4882a593Smuzhiyun 		handler->ops->remove(handler->ops_arg, node);
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun  * Work queue function to remove all nodes that have been queued up to
344*4882a593Smuzhiyun  * be removed.  The key feature is that mm->mmap_lock is not being held
345*4882a593Smuzhiyun  * and the remove callback can sleep while taking it, if needed.
346*4882a593Smuzhiyun  */
handle_remove(struct work_struct * work)347*4882a593Smuzhiyun static void handle_remove(struct work_struct *work)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	struct mmu_rb_handler *handler = container_of(work,
350*4882a593Smuzhiyun 						struct mmu_rb_handler,
351*4882a593Smuzhiyun 						del_work);
352*4882a593Smuzhiyun 	struct list_head del_list;
353*4882a593Smuzhiyun 	unsigned long flags;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/* remove anything that is queued to get removed */
356*4882a593Smuzhiyun 	spin_lock_irqsave(&handler->lock, flags);
357*4882a593Smuzhiyun 	list_replace_init(&handler->del_list, &del_list);
358*4882a593Smuzhiyun 	spin_unlock_irqrestore(&handler->lock, flags);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	do_remove(handler, &del_list);
361*4882a593Smuzhiyun }
362