xref: /OK3568_Linux_fs/kernel/lib/klist.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * klist.c - Routines for manipulating klists.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2005 Patrick Mochel
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This klist interface provides a couple of structures that wrap around
8*4882a593Smuzhiyun  * struct list_head to provide explicit list "head" (struct klist) and list
9*4882a593Smuzhiyun  * "node" (struct klist_node) objects. For struct klist, a spinlock is
10*4882a593Smuzhiyun  * included that protects access to the actual list itself. struct
11*4882a593Smuzhiyun  * klist_node provides a pointer to the klist that owns it and a kref
12*4882a593Smuzhiyun  * reference count that indicates the number of current users of that node
13*4882a593Smuzhiyun  * in the list.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * The entire point is to provide an interface for iterating over a list
16*4882a593Smuzhiyun  * that is safe and allows for modification of the list during the
17*4882a593Smuzhiyun  * iteration (e.g. insertion and removal), including modification of the
18*4882a593Smuzhiyun  * current node on the list.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * It works using a 3rd object type - struct klist_iter - that is declared
21*4882a593Smuzhiyun  * and initialized before an iteration. klist_next() is used to acquire the
22*4882a593Smuzhiyun  * next element in the list. It returns NULL if there are no more items.
23*4882a593Smuzhiyun  * Internally, that routine takes the klist's lock, decrements the
24*4882a593Smuzhiyun  * reference count of the previous klist_node and increments the count of
25*4882a593Smuzhiyun  * the next klist_node. It then drops the lock and returns.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * There are primitives for adding and removing nodes to/from a klist.
28*4882a593Smuzhiyun  * When deleting, klist_del() will simply decrement the reference count.
29*4882a593Smuzhiyun  * Only when the count goes to 0 is the node removed from the list.
30*4882a593Smuzhiyun  * klist_remove() will try to delete the node from the list and block until
31*4882a593Smuzhiyun  * it is actually removed. This is useful for objects (like devices) that
32*4882a593Smuzhiyun  * have been removed from the system and must be freed (but must wait until
33*4882a593Smuzhiyun  * all accessors have finished).
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <linux/klist.h>
37*4882a593Smuzhiyun #include <linux/export.h>
38*4882a593Smuzhiyun #include <linux/sched.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * Use the lowest bit of n_klist to mark deleted nodes and exclude
42*4882a593Smuzhiyun  * dead ones from iteration.
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun #define KNODE_DEAD		1LU
45*4882a593Smuzhiyun #define KNODE_KLIST_MASK	~KNODE_DEAD
46*4882a593Smuzhiyun 
knode_klist(struct klist_node * knode)47*4882a593Smuzhiyun static struct klist *knode_klist(struct klist_node *knode)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	return (struct klist *)
50*4882a593Smuzhiyun 		((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
knode_dead(struct klist_node * knode)53*4882a593Smuzhiyun static bool knode_dead(struct klist_node *knode)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	return (unsigned long)knode->n_klist & KNODE_DEAD;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
knode_set_klist(struct klist_node * knode,struct klist * klist)58*4882a593Smuzhiyun static void knode_set_klist(struct klist_node *knode, struct klist *klist)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	knode->n_klist = klist;
61*4882a593Smuzhiyun 	/* no knode deserves to start its life dead */
62*4882a593Smuzhiyun 	WARN_ON(knode_dead(knode));
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
knode_kill(struct klist_node * knode)65*4882a593Smuzhiyun static void knode_kill(struct klist_node *knode)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	/* and no knode should die twice ever either, see we're very humane */
68*4882a593Smuzhiyun 	WARN_ON(knode_dead(knode));
69*4882a593Smuzhiyun 	*(unsigned long *)&knode->n_klist |= KNODE_DEAD;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun  * klist_init - Initialize a klist structure.
74*4882a593Smuzhiyun  * @k: The klist we're initializing.
75*4882a593Smuzhiyun  * @get: The get function for the embedding object (NULL if none)
76*4882a593Smuzhiyun  * @put: The put function for the embedding object (NULL if none)
77*4882a593Smuzhiyun  *
78*4882a593Smuzhiyun  * Initialises the klist structure.  If the klist_node structures are
79*4882a593Smuzhiyun  * going to be embedded in refcounted objects (necessary for safe
80*4882a593Smuzhiyun  * deletion) then the get/put arguments are used to initialise
81*4882a593Smuzhiyun  * functions that take and release references on the embedding
82*4882a593Smuzhiyun  * objects.
83*4882a593Smuzhiyun  */
klist_init(struct klist * k,void (* get)(struct klist_node *),void (* put)(struct klist_node *))84*4882a593Smuzhiyun void klist_init(struct klist *k, void (*get)(struct klist_node *),
85*4882a593Smuzhiyun 		void (*put)(struct klist_node *))
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	INIT_LIST_HEAD(&k->k_list);
88*4882a593Smuzhiyun 	spin_lock_init(&k->k_lock);
89*4882a593Smuzhiyun 	k->get = get;
90*4882a593Smuzhiyun 	k->put = put;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_init);
93*4882a593Smuzhiyun 
add_head(struct klist * k,struct klist_node * n)94*4882a593Smuzhiyun static void add_head(struct klist *k, struct klist_node *n)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	spin_lock(&k->k_lock);
97*4882a593Smuzhiyun 	list_add(&n->n_node, &k->k_list);
98*4882a593Smuzhiyun 	spin_unlock(&k->k_lock);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
add_tail(struct klist * k,struct klist_node * n)101*4882a593Smuzhiyun static void add_tail(struct klist *k, struct klist_node *n)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	spin_lock(&k->k_lock);
104*4882a593Smuzhiyun 	list_add_tail(&n->n_node, &k->k_list);
105*4882a593Smuzhiyun 	spin_unlock(&k->k_lock);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
klist_node_init(struct klist * k,struct klist_node * n)108*4882a593Smuzhiyun static void klist_node_init(struct klist *k, struct klist_node *n)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	INIT_LIST_HEAD(&n->n_node);
111*4882a593Smuzhiyun 	kref_init(&n->n_ref);
112*4882a593Smuzhiyun 	knode_set_klist(n, k);
113*4882a593Smuzhiyun 	if (k->get)
114*4882a593Smuzhiyun 		k->get(n);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /**
118*4882a593Smuzhiyun  * klist_add_head - Initialize a klist_node and add it to front.
119*4882a593Smuzhiyun  * @n: node we're adding.
120*4882a593Smuzhiyun  * @k: klist it's going on.
121*4882a593Smuzhiyun  */
klist_add_head(struct klist_node * n,struct klist * k)122*4882a593Smuzhiyun void klist_add_head(struct klist_node *n, struct klist *k)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	klist_node_init(k, n);
125*4882a593Smuzhiyun 	add_head(k, n);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_add_head);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /**
130*4882a593Smuzhiyun  * klist_add_tail - Initialize a klist_node and add it to back.
131*4882a593Smuzhiyun  * @n: node we're adding.
132*4882a593Smuzhiyun  * @k: klist it's going on.
133*4882a593Smuzhiyun  */
klist_add_tail(struct klist_node * n,struct klist * k)134*4882a593Smuzhiyun void klist_add_tail(struct klist_node *n, struct klist *k)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	klist_node_init(k, n);
137*4882a593Smuzhiyun 	add_tail(k, n);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_add_tail);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /**
142*4882a593Smuzhiyun  * klist_add_behind - Init a klist_node and add it after an existing node
143*4882a593Smuzhiyun  * @n: node we're adding.
144*4882a593Smuzhiyun  * @pos: node to put @n after
145*4882a593Smuzhiyun  */
klist_add_behind(struct klist_node * n,struct klist_node * pos)146*4882a593Smuzhiyun void klist_add_behind(struct klist_node *n, struct klist_node *pos)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct klist *k = knode_klist(pos);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	klist_node_init(k, n);
151*4882a593Smuzhiyun 	spin_lock(&k->k_lock);
152*4882a593Smuzhiyun 	list_add(&n->n_node, &pos->n_node);
153*4882a593Smuzhiyun 	spin_unlock(&k->k_lock);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_add_behind);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /**
158*4882a593Smuzhiyun  * klist_add_before - Init a klist_node and add it before an existing node
159*4882a593Smuzhiyun  * @n: node we're adding.
160*4882a593Smuzhiyun  * @pos: node to put @n after
161*4882a593Smuzhiyun  */
klist_add_before(struct klist_node * n,struct klist_node * pos)162*4882a593Smuzhiyun void klist_add_before(struct klist_node *n, struct klist_node *pos)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	struct klist *k = knode_klist(pos);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	klist_node_init(k, n);
167*4882a593Smuzhiyun 	spin_lock(&k->k_lock);
168*4882a593Smuzhiyun 	list_add_tail(&n->n_node, &pos->n_node);
169*4882a593Smuzhiyun 	spin_unlock(&k->k_lock);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_add_before);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun struct klist_waiter {
174*4882a593Smuzhiyun 	struct list_head list;
175*4882a593Smuzhiyun 	struct klist_node *node;
176*4882a593Smuzhiyun 	struct task_struct *process;
177*4882a593Smuzhiyun 	int woken;
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun static DEFINE_SPINLOCK(klist_remove_lock);
181*4882a593Smuzhiyun static LIST_HEAD(klist_remove_waiters);
182*4882a593Smuzhiyun 
klist_release(struct kref * kref)183*4882a593Smuzhiyun static void klist_release(struct kref *kref)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct klist_waiter *waiter, *tmp;
186*4882a593Smuzhiyun 	struct klist_node *n = container_of(kref, struct klist_node, n_ref);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	WARN_ON(!knode_dead(n));
189*4882a593Smuzhiyun 	list_del(&n->n_node);
190*4882a593Smuzhiyun 	spin_lock(&klist_remove_lock);
191*4882a593Smuzhiyun 	list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
192*4882a593Smuzhiyun 		if (waiter->node != n)
193*4882a593Smuzhiyun 			continue;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 		list_del(&waiter->list);
196*4882a593Smuzhiyun 		waiter->woken = 1;
197*4882a593Smuzhiyun 		mb();
198*4882a593Smuzhiyun 		wake_up_process(waiter->process);
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 	spin_unlock(&klist_remove_lock);
201*4882a593Smuzhiyun 	knode_set_klist(n, NULL);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
klist_dec_and_del(struct klist_node * n)204*4882a593Smuzhiyun static int klist_dec_and_del(struct klist_node *n)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	return kref_put(&n->n_ref, klist_release);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
klist_put(struct klist_node * n,bool kill)209*4882a593Smuzhiyun static void klist_put(struct klist_node *n, bool kill)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct klist *k = knode_klist(n);
212*4882a593Smuzhiyun 	void (*put)(struct klist_node *) = k->put;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	spin_lock(&k->k_lock);
215*4882a593Smuzhiyun 	if (kill)
216*4882a593Smuzhiyun 		knode_kill(n);
217*4882a593Smuzhiyun 	if (!klist_dec_and_del(n))
218*4882a593Smuzhiyun 		put = NULL;
219*4882a593Smuzhiyun 	spin_unlock(&k->k_lock);
220*4882a593Smuzhiyun 	if (put)
221*4882a593Smuzhiyun 		put(n);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun /**
225*4882a593Smuzhiyun  * klist_del - Decrement the reference count of node and try to remove.
226*4882a593Smuzhiyun  * @n: node we're deleting.
227*4882a593Smuzhiyun  */
klist_del(struct klist_node * n)228*4882a593Smuzhiyun void klist_del(struct klist_node *n)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	klist_put(n, true);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_del);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun /**
235*4882a593Smuzhiyun  * klist_remove - Decrement the refcount of node and wait for it to go away.
236*4882a593Smuzhiyun  * @n: node we're removing.
237*4882a593Smuzhiyun  */
klist_remove(struct klist_node * n)238*4882a593Smuzhiyun void klist_remove(struct klist_node *n)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct klist_waiter waiter;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	waiter.node = n;
243*4882a593Smuzhiyun 	waiter.process = current;
244*4882a593Smuzhiyun 	waiter.woken = 0;
245*4882a593Smuzhiyun 	spin_lock(&klist_remove_lock);
246*4882a593Smuzhiyun 	list_add(&waiter.list, &klist_remove_waiters);
247*4882a593Smuzhiyun 	spin_unlock(&klist_remove_lock);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	klist_del(n);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	for (;;) {
252*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
253*4882a593Smuzhiyun 		if (waiter.woken)
254*4882a593Smuzhiyun 			break;
255*4882a593Smuzhiyun 		schedule();
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_remove);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /**
262*4882a593Smuzhiyun  * klist_node_attached - Say whether a node is bound to a list or not.
263*4882a593Smuzhiyun  * @n: Node that we're testing.
264*4882a593Smuzhiyun  */
klist_node_attached(struct klist_node * n)265*4882a593Smuzhiyun int klist_node_attached(struct klist_node *n)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	return (n->n_klist != NULL);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_node_attached);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun  * klist_iter_init_node - Initialize a klist_iter structure.
273*4882a593Smuzhiyun  * @k: klist we're iterating.
274*4882a593Smuzhiyun  * @i: klist_iter we're filling.
275*4882a593Smuzhiyun  * @n: node to start with.
276*4882a593Smuzhiyun  *
277*4882a593Smuzhiyun  * Similar to klist_iter_init(), but starts the action off with @n,
278*4882a593Smuzhiyun  * instead of with the list head.
279*4882a593Smuzhiyun  */
klist_iter_init_node(struct klist * k,struct klist_iter * i,struct klist_node * n)280*4882a593Smuzhiyun void klist_iter_init_node(struct klist *k, struct klist_iter *i,
281*4882a593Smuzhiyun 			  struct klist_node *n)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	i->i_klist = k;
284*4882a593Smuzhiyun 	i->i_cur = NULL;
285*4882a593Smuzhiyun 	if (n && kref_get_unless_zero(&n->n_ref))
286*4882a593Smuzhiyun 		i->i_cur = n;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_iter_init_node);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun  * klist_iter_init - Iniitalize a klist_iter structure.
292*4882a593Smuzhiyun  * @k: klist we're iterating.
293*4882a593Smuzhiyun  * @i: klist_iter structure we're filling.
294*4882a593Smuzhiyun  *
295*4882a593Smuzhiyun  * Similar to klist_iter_init_node(), but start with the list head.
296*4882a593Smuzhiyun  */
klist_iter_init(struct klist * k,struct klist_iter * i)297*4882a593Smuzhiyun void klist_iter_init(struct klist *k, struct klist_iter *i)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	klist_iter_init_node(k, i, NULL);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_iter_init);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /**
304*4882a593Smuzhiyun  * klist_iter_exit - Finish a list iteration.
305*4882a593Smuzhiyun  * @i: Iterator structure.
306*4882a593Smuzhiyun  *
307*4882a593Smuzhiyun  * Must be called when done iterating over list, as it decrements the
308*4882a593Smuzhiyun  * refcount of the current node. Necessary in case iteration exited before
309*4882a593Smuzhiyun  * the end of the list was reached, and always good form.
310*4882a593Smuzhiyun  */
klist_iter_exit(struct klist_iter * i)311*4882a593Smuzhiyun void klist_iter_exit(struct klist_iter *i)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	if (i->i_cur) {
314*4882a593Smuzhiyun 		klist_put(i->i_cur, false);
315*4882a593Smuzhiyun 		i->i_cur = NULL;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_iter_exit);
319*4882a593Smuzhiyun 
to_klist_node(struct list_head * n)320*4882a593Smuzhiyun static struct klist_node *to_klist_node(struct list_head *n)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	return container_of(n, struct klist_node, n_node);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun /**
326*4882a593Smuzhiyun  * klist_prev - Ante up prev node in list.
327*4882a593Smuzhiyun  * @i: Iterator structure.
328*4882a593Smuzhiyun  *
329*4882a593Smuzhiyun  * First grab list lock. Decrement the reference count of the previous
330*4882a593Smuzhiyun  * node, if there was one. Grab the prev node, increment its reference
331*4882a593Smuzhiyun  * count, drop the lock, and return that prev node.
332*4882a593Smuzhiyun  */
klist_prev(struct klist_iter * i)333*4882a593Smuzhiyun struct klist_node *klist_prev(struct klist_iter *i)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	void (*put)(struct klist_node *) = i->i_klist->put;
336*4882a593Smuzhiyun 	struct klist_node *last = i->i_cur;
337*4882a593Smuzhiyun 	struct klist_node *prev;
338*4882a593Smuzhiyun 	unsigned long flags;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	spin_lock_irqsave(&i->i_klist->k_lock, flags);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (last) {
343*4882a593Smuzhiyun 		prev = to_klist_node(last->n_node.prev);
344*4882a593Smuzhiyun 		if (!klist_dec_and_del(last))
345*4882a593Smuzhiyun 			put = NULL;
346*4882a593Smuzhiyun 	} else
347*4882a593Smuzhiyun 		prev = to_klist_node(i->i_klist->k_list.prev);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	i->i_cur = NULL;
350*4882a593Smuzhiyun 	while (prev != to_klist_node(&i->i_klist->k_list)) {
351*4882a593Smuzhiyun 		if (likely(!knode_dead(prev))) {
352*4882a593Smuzhiyun 			kref_get(&prev->n_ref);
353*4882a593Smuzhiyun 			i->i_cur = prev;
354*4882a593Smuzhiyun 			break;
355*4882a593Smuzhiyun 		}
356*4882a593Smuzhiyun 		prev = to_klist_node(prev->n_node.prev);
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (put && last)
362*4882a593Smuzhiyun 		put(last);
363*4882a593Smuzhiyun 	return i->i_cur;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_prev);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun  * klist_next - Ante up next node in list.
369*4882a593Smuzhiyun  * @i: Iterator structure.
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * First grab list lock. Decrement the reference count of the previous
372*4882a593Smuzhiyun  * node, if there was one. Grab the next node, increment its reference
373*4882a593Smuzhiyun  * count, drop the lock, and return that next node.
374*4882a593Smuzhiyun  */
klist_next(struct klist_iter * i)375*4882a593Smuzhiyun struct klist_node *klist_next(struct klist_iter *i)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	void (*put)(struct klist_node *) = i->i_klist->put;
378*4882a593Smuzhiyun 	struct klist_node *last = i->i_cur;
379*4882a593Smuzhiyun 	struct klist_node *next;
380*4882a593Smuzhiyun 	unsigned long flags;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	spin_lock_irqsave(&i->i_klist->k_lock, flags);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (last) {
385*4882a593Smuzhiyun 		next = to_klist_node(last->n_node.next);
386*4882a593Smuzhiyun 		if (!klist_dec_and_del(last))
387*4882a593Smuzhiyun 			put = NULL;
388*4882a593Smuzhiyun 	} else
389*4882a593Smuzhiyun 		next = to_klist_node(i->i_klist->k_list.next);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	i->i_cur = NULL;
392*4882a593Smuzhiyun 	while (next != to_klist_node(&i->i_klist->k_list)) {
393*4882a593Smuzhiyun 		if (likely(!knode_dead(next))) {
394*4882a593Smuzhiyun 			kref_get(&next->n_ref);
395*4882a593Smuzhiyun 			i->i_cur = next;
396*4882a593Smuzhiyun 			break;
397*4882a593Smuzhiyun 		}
398*4882a593Smuzhiyun 		next = to_klist_node(next->n_node.next);
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (put && last)
404*4882a593Smuzhiyun 		put(last);
405*4882a593Smuzhiyun 	return i->i_cur;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(klist_next);
408