1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_RCULIST_H
3*4882a593Smuzhiyun #define _LINUX_RCULIST_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #ifdef __KERNEL__
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun * RCU-protected list version
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/rcupdate.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * Why is there no list_empty_rcu()? Because list_empty() serves this
15*4882a593Smuzhiyun * purpose. The list_empty() function fetches the RCU-protected pointer
16*4882a593Smuzhiyun * and compares it to the address of the list head, but neither dereferences
17*4882a593Smuzhiyun * this pointer itself nor provides this pointer to the caller. Therefore,
18*4882a593Smuzhiyun * it is not necessary to use rcu_dereference(), so that list_empty() can
19*4882a593Smuzhiyun * be used anywhere you would want to use a list_empty_rcu().
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
24*4882a593Smuzhiyun * @list: list to be initialized
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * You should instead use INIT_LIST_HEAD() for normal initialization and
27*4882a593Smuzhiyun * cleanup tasks, when readers have no access to the list being initialized.
28*4882a593Smuzhiyun * However, if the list being initialized is visible to readers, you
29*4882a593Smuzhiyun * need to keep the compiler from being too mischievous.
30*4882a593Smuzhiyun */
INIT_LIST_HEAD_RCU(struct list_head * list)31*4882a593Smuzhiyun static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun WRITE_ONCE(list->next, list);
34*4882a593Smuzhiyun WRITE_ONCE(list->prev, list);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * return the ->next pointer of a list_head in an rcu safe
39*4882a593Smuzhiyun * way, we must not access it directly
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun * list_tail_rcu - returns the prev pointer of the head of the list
45*4882a593Smuzhiyun * @head: the head of the list
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * Note: This should only be used with the list header, and even then
48*4882a593Smuzhiyun * only if list_del() and similar primitives are not also used on the
49*4882a593Smuzhiyun * list header.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun #define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev)))
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * Check during list traversal that we are within an RCU reader
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define check_arg_count_one(dummy)
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #ifdef CONFIG_PROVE_RCU_LIST
60*4882a593Smuzhiyun #define __list_check_rcu(dummy, cond, extra...) \
61*4882a593Smuzhiyun ({ \
62*4882a593Smuzhiyun check_arg_count_one(extra); \
63*4882a593Smuzhiyun RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \
64*4882a593Smuzhiyun "RCU-list traversed in non-reader section!"); \
65*4882a593Smuzhiyun })
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define __list_check_srcu(cond) \
68*4882a593Smuzhiyun ({ \
69*4882a593Smuzhiyun RCU_LOCKDEP_WARN(!(cond), \
70*4882a593Smuzhiyun "RCU-list traversed without holding the required lock!");\
71*4882a593Smuzhiyun })
72*4882a593Smuzhiyun #else
73*4882a593Smuzhiyun #define __list_check_rcu(dummy, cond, extra...) \
74*4882a593Smuzhiyun ({ check_arg_count_one(extra); })
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define __list_check_srcu(cond) ({ })
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * Insert a new entry between two known consecutive entries.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * This is only for internal list manipulation where we know
83*4882a593Smuzhiyun * the prev/next entries already!
84*4882a593Smuzhiyun */
__list_add_rcu(struct list_head * new,struct list_head * prev,struct list_head * next)85*4882a593Smuzhiyun static inline void __list_add_rcu(struct list_head *new,
86*4882a593Smuzhiyun struct list_head *prev, struct list_head *next)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun if (!__list_add_valid(new, prev, next))
89*4882a593Smuzhiyun return;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun new->next = next;
92*4882a593Smuzhiyun new->prev = prev;
93*4882a593Smuzhiyun rcu_assign_pointer(list_next_rcu(prev), new);
94*4882a593Smuzhiyun next->prev = new;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /**
98*4882a593Smuzhiyun * list_add_rcu - add a new entry to rcu-protected list
99*4882a593Smuzhiyun * @new: new entry to be added
100*4882a593Smuzhiyun * @head: list head to add it after
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun * Insert a new entry after the specified head.
103*4882a593Smuzhiyun * This is good for implementing stacks.
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * The caller must take whatever precautions are necessary
106*4882a593Smuzhiyun * (such as holding appropriate locks) to avoid racing
107*4882a593Smuzhiyun * with another list-mutation primitive, such as list_add_rcu()
108*4882a593Smuzhiyun * or list_del_rcu(), running on this same list.
109*4882a593Smuzhiyun * However, it is perfectly legal to run concurrently with
110*4882a593Smuzhiyun * the _rcu list-traversal primitives, such as
111*4882a593Smuzhiyun * list_for_each_entry_rcu().
112*4882a593Smuzhiyun */
list_add_rcu(struct list_head * new,struct list_head * head)113*4882a593Smuzhiyun static inline void list_add_rcu(struct list_head *new, struct list_head *head)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun __list_add_rcu(new, head, head->next);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun * list_add_tail_rcu - add a new entry to rcu-protected list
120*4882a593Smuzhiyun * @new: new entry to be added
121*4882a593Smuzhiyun * @head: list head to add it before
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * Insert a new entry before the specified head.
124*4882a593Smuzhiyun * This is useful for implementing queues.
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * The caller must take whatever precautions are necessary
127*4882a593Smuzhiyun * (such as holding appropriate locks) to avoid racing
128*4882a593Smuzhiyun * with another list-mutation primitive, such as list_add_tail_rcu()
129*4882a593Smuzhiyun * or list_del_rcu(), running on this same list.
130*4882a593Smuzhiyun * However, it is perfectly legal to run concurrently with
131*4882a593Smuzhiyun * the _rcu list-traversal primitives, such as
132*4882a593Smuzhiyun * list_for_each_entry_rcu().
133*4882a593Smuzhiyun */
list_add_tail_rcu(struct list_head * new,struct list_head * head)134*4882a593Smuzhiyun static inline void list_add_tail_rcu(struct list_head *new,
135*4882a593Smuzhiyun struct list_head *head)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun __list_add_rcu(new, head->prev, head);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun * list_del_rcu - deletes entry from list without re-initialization
142*4882a593Smuzhiyun * @entry: the element to delete from the list.
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * Note: list_empty() on entry does not return true after this,
145*4882a593Smuzhiyun * the entry is in an undefined state. It is useful for RCU based
146*4882a593Smuzhiyun * lockfree traversal.
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * In particular, it means that we can not poison the forward
149*4882a593Smuzhiyun * pointers that may still be used for walking the list.
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * The caller must take whatever precautions are necessary
152*4882a593Smuzhiyun * (such as holding appropriate locks) to avoid racing
153*4882a593Smuzhiyun * with another list-mutation primitive, such as list_del_rcu()
154*4882a593Smuzhiyun * or list_add_rcu(), running on this same list.
155*4882a593Smuzhiyun * However, it is perfectly legal to run concurrently with
156*4882a593Smuzhiyun * the _rcu list-traversal primitives, such as
157*4882a593Smuzhiyun * list_for_each_entry_rcu().
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * Note that the caller is not permitted to immediately free
160*4882a593Smuzhiyun * the newly deleted entry. Instead, either synchronize_rcu()
161*4882a593Smuzhiyun * or call_rcu() must be used to defer freeing until an RCU
162*4882a593Smuzhiyun * grace period has elapsed.
163*4882a593Smuzhiyun */
list_del_rcu(struct list_head * entry)164*4882a593Smuzhiyun static inline void list_del_rcu(struct list_head *entry)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun __list_del_entry(entry);
167*4882a593Smuzhiyun entry->prev = LIST_POISON2;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /**
171*4882a593Smuzhiyun * hlist_del_init_rcu - deletes entry from hash list with re-initialization
172*4882a593Smuzhiyun * @n: the element to delete from the hash list.
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * Note: list_unhashed() on the node return true after this. It is
175*4882a593Smuzhiyun * useful for RCU based read lockfree traversal if the writer side
176*4882a593Smuzhiyun * must know if the list entry is still hashed or already unhashed.
177*4882a593Smuzhiyun *
178*4882a593Smuzhiyun * In particular, it means that we can not poison the forward pointers
179*4882a593Smuzhiyun * that may still be used for walking the hash list and we can only
180*4882a593Smuzhiyun * zero the pprev pointer so list_unhashed() will return true after
181*4882a593Smuzhiyun * this.
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * The caller must take whatever precautions are necessary (such as
184*4882a593Smuzhiyun * holding appropriate locks) to avoid racing with another
185*4882a593Smuzhiyun * list-mutation primitive, such as hlist_add_head_rcu() or
186*4882a593Smuzhiyun * hlist_del_rcu(), running on this same list. However, it is
187*4882a593Smuzhiyun * perfectly legal to run concurrently with the _rcu list-traversal
188*4882a593Smuzhiyun * primitives, such as hlist_for_each_entry_rcu().
189*4882a593Smuzhiyun */
hlist_del_init_rcu(struct hlist_node * n)190*4882a593Smuzhiyun static inline void hlist_del_init_rcu(struct hlist_node *n)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun if (!hlist_unhashed(n)) {
193*4882a593Smuzhiyun __hlist_del(n);
194*4882a593Smuzhiyun WRITE_ONCE(n->pprev, NULL);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun * list_replace_rcu - replace old entry by new one
200*4882a593Smuzhiyun * @old : the element to be replaced
201*4882a593Smuzhiyun * @new : the new element to insert
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * The @old entry will be replaced with the @new entry atomically.
204*4882a593Smuzhiyun * Note: @old should not be empty.
205*4882a593Smuzhiyun */
list_replace_rcu(struct list_head * old,struct list_head * new)206*4882a593Smuzhiyun static inline void list_replace_rcu(struct list_head *old,
207*4882a593Smuzhiyun struct list_head *new)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun new->next = old->next;
210*4882a593Smuzhiyun new->prev = old->prev;
211*4882a593Smuzhiyun rcu_assign_pointer(list_next_rcu(new->prev), new);
212*4882a593Smuzhiyun new->next->prev = new;
213*4882a593Smuzhiyun old->prev = LIST_POISON2;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun * __list_splice_init_rcu - join an RCU-protected list into an existing list.
218*4882a593Smuzhiyun * @list: the RCU-protected list to splice
219*4882a593Smuzhiyun * @prev: points to the last element of the existing list
220*4882a593Smuzhiyun * @next: points to the first element of the existing list
221*4882a593Smuzhiyun * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
222*4882a593Smuzhiyun *
223*4882a593Smuzhiyun * The list pointed to by @prev and @next can be RCU-read traversed
224*4882a593Smuzhiyun * concurrently with this function.
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * Note that this function blocks.
227*4882a593Smuzhiyun *
228*4882a593Smuzhiyun * Important note: the caller must take whatever action is necessary to prevent
229*4882a593Smuzhiyun * any other updates to the existing list. In principle, it is possible to
230*4882a593Smuzhiyun * modify the list as soon as sync() begins execution. If this sort of thing
231*4882a593Smuzhiyun * becomes necessary, an alternative version based on call_rcu() could be
232*4882a593Smuzhiyun * created. But only if -really- needed -- there is no shortage of RCU API
233*4882a593Smuzhiyun * members.
234*4882a593Smuzhiyun */
__list_splice_init_rcu(struct list_head * list,struct list_head * prev,struct list_head * next,void (* sync)(void))235*4882a593Smuzhiyun static inline void __list_splice_init_rcu(struct list_head *list,
236*4882a593Smuzhiyun struct list_head *prev,
237*4882a593Smuzhiyun struct list_head *next,
238*4882a593Smuzhiyun void (*sync)(void))
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun struct list_head *first = list->next;
241*4882a593Smuzhiyun struct list_head *last = list->prev;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * "first" and "last" tracking list, so initialize it. RCU readers
245*4882a593Smuzhiyun * have access to this list, so we must use INIT_LIST_HEAD_RCU()
246*4882a593Smuzhiyun * instead of INIT_LIST_HEAD().
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun INIT_LIST_HEAD_RCU(list);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * At this point, the list body still points to the source list.
253*4882a593Smuzhiyun * Wait for any readers to finish using the list before splicing
254*4882a593Smuzhiyun * the list body into the new list. Any new readers will see
255*4882a593Smuzhiyun * an empty list.
256*4882a593Smuzhiyun */
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun sync();
259*4882a593Smuzhiyun ASSERT_EXCLUSIVE_ACCESS(*first);
260*4882a593Smuzhiyun ASSERT_EXCLUSIVE_ACCESS(*last);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * Readers are finished with the source list, so perform splice.
264*4882a593Smuzhiyun * The order is important if the new list is global and accessible
265*4882a593Smuzhiyun * to concurrent RCU readers. Note that RCU readers are not
266*4882a593Smuzhiyun * permitted to traverse the prev pointers without excluding
267*4882a593Smuzhiyun * this function.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun last->next = next;
271*4882a593Smuzhiyun rcu_assign_pointer(list_next_rcu(prev), first);
272*4882a593Smuzhiyun first->prev = prev;
273*4882a593Smuzhiyun next->prev = last;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun * list_splice_init_rcu - splice an RCU-protected list into an existing list,
278*4882a593Smuzhiyun * designed for stacks.
279*4882a593Smuzhiyun * @list: the RCU-protected list to splice
280*4882a593Smuzhiyun * @head: the place in the existing list to splice the first list into
281*4882a593Smuzhiyun * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
282*4882a593Smuzhiyun */
list_splice_init_rcu(struct list_head * list,struct list_head * head,void (* sync)(void))283*4882a593Smuzhiyun static inline void list_splice_init_rcu(struct list_head *list,
284*4882a593Smuzhiyun struct list_head *head,
285*4882a593Smuzhiyun void (*sync)(void))
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun if (!list_empty(list))
288*4882a593Smuzhiyun __list_splice_init_rcu(list, head, head->next, sync);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /**
292*4882a593Smuzhiyun * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
293*4882a593Smuzhiyun * list, designed for queues.
294*4882a593Smuzhiyun * @list: the RCU-protected list to splice
295*4882a593Smuzhiyun * @head: the place in the existing list to splice the first list into
296*4882a593Smuzhiyun * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
297*4882a593Smuzhiyun */
list_splice_tail_init_rcu(struct list_head * list,struct list_head * head,void (* sync)(void))298*4882a593Smuzhiyun static inline void list_splice_tail_init_rcu(struct list_head *list,
299*4882a593Smuzhiyun struct list_head *head,
300*4882a593Smuzhiyun void (*sync)(void))
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun if (!list_empty(list))
303*4882a593Smuzhiyun __list_splice_init_rcu(list, head->prev, head, sync);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /**
307*4882a593Smuzhiyun * list_entry_rcu - get the struct for this entry
308*4882a593Smuzhiyun * @ptr: the &struct list_head pointer.
309*4882a593Smuzhiyun * @type: the type of the struct this is embedded in.
310*4882a593Smuzhiyun * @member: the name of the list_head within the struct.
311*4882a593Smuzhiyun *
312*4882a593Smuzhiyun * This primitive may safely run concurrently with the _rcu list-mutation
313*4882a593Smuzhiyun * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun #define list_entry_rcu(ptr, type, member) \
316*4882a593Smuzhiyun container_of(READ_ONCE(ptr), type, member)
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * Where are list_empty_rcu() and list_first_entry_rcu()?
320*4882a593Smuzhiyun *
321*4882a593Smuzhiyun * Implementing those functions following their counterparts list_empty() and
322*4882a593Smuzhiyun * list_first_entry() is not advisable because they lead to subtle race
323*4882a593Smuzhiyun * conditions as the following snippet shows:
324*4882a593Smuzhiyun *
325*4882a593Smuzhiyun * if (!list_empty_rcu(mylist)) {
326*4882a593Smuzhiyun * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
327*4882a593Smuzhiyun * do_something(bar);
328*4882a593Smuzhiyun * }
329*4882a593Smuzhiyun *
330*4882a593Smuzhiyun * The list may not be empty when list_empty_rcu checks it, but it may be when
331*4882a593Smuzhiyun * list_first_entry_rcu rereads the ->next pointer.
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * Rereading the ->next pointer is not a problem for list_empty() and
334*4882a593Smuzhiyun * list_first_entry() because they would be protected by a lock that blocks
335*4882a593Smuzhiyun * writers.
336*4882a593Smuzhiyun *
337*4882a593Smuzhiyun * See list_first_or_null_rcu for an alternative.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /**
341*4882a593Smuzhiyun * list_first_or_null_rcu - get the first element from a list
342*4882a593Smuzhiyun * @ptr: the list head to take the element from.
343*4882a593Smuzhiyun * @type: the type of the struct this is embedded in.
344*4882a593Smuzhiyun * @member: the name of the list_head within the struct.
345*4882a593Smuzhiyun *
346*4882a593Smuzhiyun * Note that if the list is empty, it returns NULL.
347*4882a593Smuzhiyun *
348*4882a593Smuzhiyun * This primitive may safely run concurrently with the _rcu list-mutation
349*4882a593Smuzhiyun * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun #define list_first_or_null_rcu(ptr, type, member) \
352*4882a593Smuzhiyun ({ \
353*4882a593Smuzhiyun struct list_head *__ptr = (ptr); \
354*4882a593Smuzhiyun struct list_head *__next = READ_ONCE(__ptr->next); \
355*4882a593Smuzhiyun likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
356*4882a593Smuzhiyun })
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /**
359*4882a593Smuzhiyun * list_next_or_null_rcu - get the first element from a list
360*4882a593Smuzhiyun * @head: the head for the list.
361*4882a593Smuzhiyun * @ptr: the list head to take the next element from.
362*4882a593Smuzhiyun * @type: the type of the struct this is embedded in.
363*4882a593Smuzhiyun * @member: the name of the list_head within the struct.
364*4882a593Smuzhiyun *
365*4882a593Smuzhiyun * Note that if the ptr is at the end of the list, NULL is returned.
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun * This primitive may safely run concurrently with the _rcu list-mutation
368*4882a593Smuzhiyun * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun #define list_next_or_null_rcu(head, ptr, type, member) \
371*4882a593Smuzhiyun ({ \
372*4882a593Smuzhiyun struct list_head *__head = (head); \
373*4882a593Smuzhiyun struct list_head *__ptr = (ptr); \
374*4882a593Smuzhiyun struct list_head *__next = READ_ONCE(__ptr->next); \
375*4882a593Smuzhiyun likely(__next != __head) ? list_entry_rcu(__next, type, \
376*4882a593Smuzhiyun member) : NULL; \
377*4882a593Smuzhiyun })
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /**
380*4882a593Smuzhiyun * list_for_each_entry_rcu - iterate over rcu list of given type
381*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
382*4882a593Smuzhiyun * @head: the head for your list.
383*4882a593Smuzhiyun * @member: the name of the list_head within the struct.
384*4882a593Smuzhiyun * @cond: optional lockdep expression if called from non-RCU protection.
385*4882a593Smuzhiyun *
386*4882a593Smuzhiyun * This list-traversal primitive may safely run concurrently with
387*4882a593Smuzhiyun * the _rcu list-mutation primitives such as list_add_rcu()
388*4882a593Smuzhiyun * as long as the traversal is guarded by rcu_read_lock().
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun #define list_for_each_entry_rcu(pos, head, member, cond...) \
391*4882a593Smuzhiyun for (__list_check_rcu(dummy, ## cond, 0), \
392*4882a593Smuzhiyun pos = list_entry_rcu((head)->next, typeof(*pos), member); \
393*4882a593Smuzhiyun &pos->member != (head); \
394*4882a593Smuzhiyun pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /**
397*4882a593Smuzhiyun * list_for_each_entry_srcu - iterate over rcu list of given type
398*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
399*4882a593Smuzhiyun * @head: the head for your list.
400*4882a593Smuzhiyun * @member: the name of the list_head within the struct.
401*4882a593Smuzhiyun * @cond: lockdep expression for the lock required to traverse the list.
402*4882a593Smuzhiyun *
403*4882a593Smuzhiyun * This list-traversal primitive may safely run concurrently with
404*4882a593Smuzhiyun * the _rcu list-mutation primitives such as list_add_rcu()
405*4882a593Smuzhiyun * as long as the traversal is guarded by srcu_read_lock().
406*4882a593Smuzhiyun * The lockdep expression srcu_read_lock_held() can be passed as the
407*4882a593Smuzhiyun * cond argument from read side.
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun #define list_for_each_entry_srcu(pos, head, member, cond) \
410*4882a593Smuzhiyun for (__list_check_srcu(cond), \
411*4882a593Smuzhiyun pos = list_entry_rcu((head)->next, typeof(*pos), member); \
412*4882a593Smuzhiyun &pos->member != (head); \
413*4882a593Smuzhiyun pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /**
416*4882a593Smuzhiyun * list_entry_lockless - get the struct for this entry
417*4882a593Smuzhiyun * @ptr: the &struct list_head pointer.
418*4882a593Smuzhiyun * @type: the type of the struct this is embedded in.
419*4882a593Smuzhiyun * @member: the name of the list_head within the struct.
420*4882a593Smuzhiyun *
421*4882a593Smuzhiyun * This primitive may safely run concurrently with the _rcu
422*4882a593Smuzhiyun * list-mutation primitives such as list_add_rcu(), but requires some
423*4882a593Smuzhiyun * implicit RCU read-side guarding. One example is running within a special
424*4882a593Smuzhiyun * exception-time environment where preemption is disabled and where lockdep
425*4882a593Smuzhiyun * cannot be invoked. Another example is when items are added to the list,
426*4882a593Smuzhiyun * but never deleted.
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun #define list_entry_lockless(ptr, type, member) \
429*4882a593Smuzhiyun container_of((typeof(ptr))READ_ONCE(ptr), type, member)
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /**
432*4882a593Smuzhiyun * list_for_each_entry_lockless - iterate over rcu list of given type
433*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
434*4882a593Smuzhiyun * @head: the head for your list.
435*4882a593Smuzhiyun * @member: the name of the list_struct within the struct.
436*4882a593Smuzhiyun *
437*4882a593Smuzhiyun * This primitive may safely run concurrently with the _rcu
438*4882a593Smuzhiyun * list-mutation primitives such as list_add_rcu(), but requires some
439*4882a593Smuzhiyun * implicit RCU read-side guarding. One example is running within a special
440*4882a593Smuzhiyun * exception-time environment where preemption is disabled and where lockdep
441*4882a593Smuzhiyun * cannot be invoked. Another example is when items are added to the list,
442*4882a593Smuzhiyun * but never deleted.
443*4882a593Smuzhiyun */
444*4882a593Smuzhiyun #define list_for_each_entry_lockless(pos, head, member) \
445*4882a593Smuzhiyun for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
446*4882a593Smuzhiyun &pos->member != (head); \
447*4882a593Smuzhiyun pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /**
450*4882a593Smuzhiyun * list_for_each_entry_continue_rcu - continue iteration over list of given type
451*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
452*4882a593Smuzhiyun * @head: the head for your list.
453*4882a593Smuzhiyun * @member: the name of the list_head within the struct.
454*4882a593Smuzhiyun *
455*4882a593Smuzhiyun * Continue to iterate over list of given type, continuing after
456*4882a593Smuzhiyun * the current position which must have been in the list when the RCU read
457*4882a593Smuzhiyun * lock was taken.
458*4882a593Smuzhiyun * This would typically require either that you obtained the node from a
459*4882a593Smuzhiyun * previous walk of the list in the same RCU read-side critical section, or
460*4882a593Smuzhiyun * that you held some sort of non-RCU reference (such as a reference count)
461*4882a593Smuzhiyun * to keep the node alive *and* in the list.
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * This iterator is similar to list_for_each_entry_from_rcu() except
464*4882a593Smuzhiyun * this starts after the given position and that one starts at the given
465*4882a593Smuzhiyun * position.
466*4882a593Smuzhiyun */
467*4882a593Smuzhiyun #define list_for_each_entry_continue_rcu(pos, head, member) \
468*4882a593Smuzhiyun for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
469*4882a593Smuzhiyun &pos->member != (head); \
470*4882a593Smuzhiyun pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /**
473*4882a593Smuzhiyun * list_for_each_entry_from_rcu - iterate over a list from current point
474*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
475*4882a593Smuzhiyun * @head: the head for your list.
476*4882a593Smuzhiyun * @member: the name of the list_node within the struct.
477*4882a593Smuzhiyun *
478*4882a593Smuzhiyun * Iterate over the tail of a list starting from a given position,
479*4882a593Smuzhiyun * which must have been in the list when the RCU read lock was taken.
480*4882a593Smuzhiyun * This would typically require either that you obtained the node from a
481*4882a593Smuzhiyun * previous walk of the list in the same RCU read-side critical section, or
482*4882a593Smuzhiyun * that you held some sort of non-RCU reference (such as a reference count)
483*4882a593Smuzhiyun * to keep the node alive *and* in the list.
484*4882a593Smuzhiyun *
485*4882a593Smuzhiyun * This iterator is similar to list_for_each_entry_continue_rcu() except
486*4882a593Smuzhiyun * this starts from the given position and that one starts from the position
487*4882a593Smuzhiyun * after the given position.
488*4882a593Smuzhiyun */
489*4882a593Smuzhiyun #define list_for_each_entry_from_rcu(pos, head, member) \
490*4882a593Smuzhiyun for (; &(pos)->member != (head); \
491*4882a593Smuzhiyun pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member))
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /**
494*4882a593Smuzhiyun * hlist_del_rcu - deletes entry from hash list without re-initialization
495*4882a593Smuzhiyun * @n: the element to delete from the hash list.
496*4882a593Smuzhiyun *
497*4882a593Smuzhiyun * Note: list_unhashed() on entry does not return true after this,
498*4882a593Smuzhiyun * the entry is in an undefined state. It is useful for RCU based
499*4882a593Smuzhiyun * lockfree traversal.
500*4882a593Smuzhiyun *
501*4882a593Smuzhiyun * In particular, it means that we can not poison the forward
502*4882a593Smuzhiyun * pointers that may still be used for walking the hash list.
503*4882a593Smuzhiyun *
504*4882a593Smuzhiyun * The caller must take whatever precautions are necessary
505*4882a593Smuzhiyun * (such as holding appropriate locks) to avoid racing
506*4882a593Smuzhiyun * with another list-mutation primitive, such as hlist_add_head_rcu()
507*4882a593Smuzhiyun * or hlist_del_rcu(), running on this same list.
508*4882a593Smuzhiyun * However, it is perfectly legal to run concurrently with
509*4882a593Smuzhiyun * the _rcu list-traversal primitives, such as
510*4882a593Smuzhiyun * hlist_for_each_entry().
511*4882a593Smuzhiyun */
hlist_del_rcu(struct hlist_node * n)512*4882a593Smuzhiyun static inline void hlist_del_rcu(struct hlist_node *n)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun __hlist_del(n);
515*4882a593Smuzhiyun WRITE_ONCE(n->pprev, LIST_POISON2);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /**
519*4882a593Smuzhiyun * hlist_replace_rcu - replace old entry by new one
520*4882a593Smuzhiyun * @old : the element to be replaced
521*4882a593Smuzhiyun * @new : the new element to insert
522*4882a593Smuzhiyun *
523*4882a593Smuzhiyun * The @old entry will be replaced with the @new entry atomically.
524*4882a593Smuzhiyun */
hlist_replace_rcu(struct hlist_node * old,struct hlist_node * new)525*4882a593Smuzhiyun static inline void hlist_replace_rcu(struct hlist_node *old,
526*4882a593Smuzhiyun struct hlist_node *new)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct hlist_node *next = old->next;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun new->next = next;
531*4882a593Smuzhiyun WRITE_ONCE(new->pprev, old->pprev);
532*4882a593Smuzhiyun rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
533*4882a593Smuzhiyun if (next)
534*4882a593Smuzhiyun WRITE_ONCE(new->next->pprev, &new->next);
535*4882a593Smuzhiyun WRITE_ONCE(old->pprev, LIST_POISON2);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /**
539*4882a593Smuzhiyun * hlists_swap_heads_rcu - swap the lists the hlist heads point to
540*4882a593Smuzhiyun * @left: The hlist head on the left
541*4882a593Smuzhiyun * @right: The hlist head on the right
542*4882a593Smuzhiyun *
543*4882a593Smuzhiyun * The lists start out as [@left ][node1 ... ] and
544*4882a593Smuzhiyun * [@right ][node2 ... ]
545*4882a593Smuzhiyun * The lists end up as [@left ][node2 ... ]
546*4882a593Smuzhiyun * [@right ][node1 ... ]
547*4882a593Smuzhiyun */
hlists_swap_heads_rcu(struct hlist_head * left,struct hlist_head * right)548*4882a593Smuzhiyun static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun struct hlist_node *node1 = left->first;
551*4882a593Smuzhiyun struct hlist_node *node2 = right->first;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun rcu_assign_pointer(left->first, node2);
554*4882a593Smuzhiyun rcu_assign_pointer(right->first, node1);
555*4882a593Smuzhiyun WRITE_ONCE(node2->pprev, &left->first);
556*4882a593Smuzhiyun WRITE_ONCE(node1->pprev, &right->first);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /*
560*4882a593Smuzhiyun * return the first or the next element in an RCU protected hlist
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
563*4882a593Smuzhiyun #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
564*4882a593Smuzhiyun #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /**
567*4882a593Smuzhiyun * hlist_add_head_rcu
568*4882a593Smuzhiyun * @n: the element to add to the hash list.
569*4882a593Smuzhiyun * @h: the list to add to.
570*4882a593Smuzhiyun *
571*4882a593Smuzhiyun * Description:
572*4882a593Smuzhiyun * Adds the specified element to the specified hlist,
573*4882a593Smuzhiyun * while permitting racing traversals.
574*4882a593Smuzhiyun *
575*4882a593Smuzhiyun * The caller must take whatever precautions are necessary
576*4882a593Smuzhiyun * (such as holding appropriate locks) to avoid racing
577*4882a593Smuzhiyun * with another list-mutation primitive, such as hlist_add_head_rcu()
578*4882a593Smuzhiyun * or hlist_del_rcu(), running on this same list.
579*4882a593Smuzhiyun * However, it is perfectly legal to run concurrently with
580*4882a593Smuzhiyun * the _rcu list-traversal primitives, such as
581*4882a593Smuzhiyun * hlist_for_each_entry_rcu(), used to prevent memory-consistency
582*4882a593Smuzhiyun * problems on Alpha CPUs. Regardless of the type of CPU, the
583*4882a593Smuzhiyun * list-traversal primitive must be guarded by rcu_read_lock().
584*4882a593Smuzhiyun */
hlist_add_head_rcu(struct hlist_node * n,struct hlist_head * h)585*4882a593Smuzhiyun static inline void hlist_add_head_rcu(struct hlist_node *n,
586*4882a593Smuzhiyun struct hlist_head *h)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun struct hlist_node *first = h->first;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun n->next = first;
591*4882a593Smuzhiyun WRITE_ONCE(n->pprev, &h->first);
592*4882a593Smuzhiyun rcu_assign_pointer(hlist_first_rcu(h), n);
593*4882a593Smuzhiyun if (first)
594*4882a593Smuzhiyun WRITE_ONCE(first->pprev, &n->next);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /**
598*4882a593Smuzhiyun * hlist_add_tail_rcu
599*4882a593Smuzhiyun * @n: the element to add to the hash list.
600*4882a593Smuzhiyun * @h: the list to add to.
601*4882a593Smuzhiyun *
602*4882a593Smuzhiyun * Description:
603*4882a593Smuzhiyun * Adds the specified element to the specified hlist,
604*4882a593Smuzhiyun * while permitting racing traversals.
605*4882a593Smuzhiyun *
606*4882a593Smuzhiyun * The caller must take whatever precautions are necessary
607*4882a593Smuzhiyun * (such as holding appropriate locks) to avoid racing
608*4882a593Smuzhiyun * with another list-mutation primitive, such as hlist_add_head_rcu()
609*4882a593Smuzhiyun * or hlist_del_rcu(), running on this same list.
610*4882a593Smuzhiyun * However, it is perfectly legal to run concurrently with
611*4882a593Smuzhiyun * the _rcu list-traversal primitives, such as
612*4882a593Smuzhiyun * hlist_for_each_entry_rcu(), used to prevent memory-consistency
613*4882a593Smuzhiyun * problems on Alpha CPUs. Regardless of the type of CPU, the
614*4882a593Smuzhiyun * list-traversal primitive must be guarded by rcu_read_lock().
615*4882a593Smuzhiyun */
hlist_add_tail_rcu(struct hlist_node * n,struct hlist_head * h)616*4882a593Smuzhiyun static inline void hlist_add_tail_rcu(struct hlist_node *n,
617*4882a593Smuzhiyun struct hlist_head *h)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun struct hlist_node *i, *last = NULL;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* Note: write side code, so rcu accessors are not needed. */
622*4882a593Smuzhiyun for (i = h->first; i; i = i->next)
623*4882a593Smuzhiyun last = i;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun if (last) {
626*4882a593Smuzhiyun n->next = last->next;
627*4882a593Smuzhiyun WRITE_ONCE(n->pprev, &last->next);
628*4882a593Smuzhiyun rcu_assign_pointer(hlist_next_rcu(last), n);
629*4882a593Smuzhiyun } else {
630*4882a593Smuzhiyun hlist_add_head_rcu(n, h);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /**
635*4882a593Smuzhiyun * hlist_add_before_rcu
636*4882a593Smuzhiyun * @n: the new element to add to the hash list.
637*4882a593Smuzhiyun * @next: the existing element to add the new element before.
638*4882a593Smuzhiyun *
639*4882a593Smuzhiyun * Description:
640*4882a593Smuzhiyun * Adds the specified element to the specified hlist
641*4882a593Smuzhiyun * before the specified node while permitting racing traversals.
642*4882a593Smuzhiyun *
643*4882a593Smuzhiyun * The caller must take whatever precautions are necessary
644*4882a593Smuzhiyun * (such as holding appropriate locks) to avoid racing
645*4882a593Smuzhiyun * with another list-mutation primitive, such as hlist_add_head_rcu()
646*4882a593Smuzhiyun * or hlist_del_rcu(), running on this same list.
647*4882a593Smuzhiyun * However, it is perfectly legal to run concurrently with
648*4882a593Smuzhiyun * the _rcu list-traversal primitives, such as
649*4882a593Smuzhiyun * hlist_for_each_entry_rcu(), used to prevent memory-consistency
650*4882a593Smuzhiyun * problems on Alpha CPUs.
651*4882a593Smuzhiyun */
hlist_add_before_rcu(struct hlist_node * n,struct hlist_node * next)652*4882a593Smuzhiyun static inline void hlist_add_before_rcu(struct hlist_node *n,
653*4882a593Smuzhiyun struct hlist_node *next)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun WRITE_ONCE(n->pprev, next->pprev);
656*4882a593Smuzhiyun n->next = next;
657*4882a593Smuzhiyun rcu_assign_pointer(hlist_pprev_rcu(n), n);
658*4882a593Smuzhiyun WRITE_ONCE(next->pprev, &n->next);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /**
662*4882a593Smuzhiyun * hlist_add_behind_rcu
663*4882a593Smuzhiyun * @n: the new element to add to the hash list.
664*4882a593Smuzhiyun * @prev: the existing element to add the new element after.
665*4882a593Smuzhiyun *
666*4882a593Smuzhiyun * Description:
667*4882a593Smuzhiyun * Adds the specified element to the specified hlist
668*4882a593Smuzhiyun * after the specified node while permitting racing traversals.
669*4882a593Smuzhiyun *
670*4882a593Smuzhiyun * The caller must take whatever precautions are necessary
671*4882a593Smuzhiyun * (such as holding appropriate locks) to avoid racing
672*4882a593Smuzhiyun * with another list-mutation primitive, such as hlist_add_head_rcu()
673*4882a593Smuzhiyun * or hlist_del_rcu(), running on this same list.
674*4882a593Smuzhiyun * However, it is perfectly legal to run concurrently with
675*4882a593Smuzhiyun * the _rcu list-traversal primitives, such as
676*4882a593Smuzhiyun * hlist_for_each_entry_rcu(), used to prevent memory-consistency
677*4882a593Smuzhiyun * problems on Alpha CPUs.
678*4882a593Smuzhiyun */
hlist_add_behind_rcu(struct hlist_node * n,struct hlist_node * prev)679*4882a593Smuzhiyun static inline void hlist_add_behind_rcu(struct hlist_node *n,
680*4882a593Smuzhiyun struct hlist_node *prev)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun n->next = prev->next;
683*4882a593Smuzhiyun WRITE_ONCE(n->pprev, &prev->next);
684*4882a593Smuzhiyun rcu_assign_pointer(hlist_next_rcu(prev), n);
685*4882a593Smuzhiyun if (n->next)
686*4882a593Smuzhiyun WRITE_ONCE(n->next->pprev, &n->next);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun #define __hlist_for_each_rcu(pos, head) \
690*4882a593Smuzhiyun for (pos = rcu_dereference(hlist_first_rcu(head)); \
691*4882a593Smuzhiyun pos; \
692*4882a593Smuzhiyun pos = rcu_dereference(hlist_next_rcu(pos)))
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /**
695*4882a593Smuzhiyun * hlist_for_each_entry_rcu - iterate over rcu list of given type
696*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
697*4882a593Smuzhiyun * @head: the head for your list.
698*4882a593Smuzhiyun * @member: the name of the hlist_node within the struct.
699*4882a593Smuzhiyun * @cond: optional lockdep expression if called from non-RCU protection.
700*4882a593Smuzhiyun *
701*4882a593Smuzhiyun * This list-traversal primitive may safely run concurrently with
702*4882a593Smuzhiyun * the _rcu list-mutation primitives such as hlist_add_head_rcu()
703*4882a593Smuzhiyun * as long as the traversal is guarded by rcu_read_lock().
704*4882a593Smuzhiyun */
705*4882a593Smuzhiyun #define hlist_for_each_entry_rcu(pos, head, member, cond...) \
706*4882a593Smuzhiyun for (__list_check_rcu(dummy, ## cond, 0), \
707*4882a593Smuzhiyun pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
708*4882a593Smuzhiyun typeof(*(pos)), member); \
709*4882a593Smuzhiyun pos; \
710*4882a593Smuzhiyun pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
711*4882a593Smuzhiyun &(pos)->member)), typeof(*(pos)), member))
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /**
714*4882a593Smuzhiyun * hlist_for_each_entry_srcu - iterate over rcu list of given type
715*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
716*4882a593Smuzhiyun * @head: the head for your list.
717*4882a593Smuzhiyun * @member: the name of the hlist_node within the struct.
718*4882a593Smuzhiyun * @cond: lockdep expression for the lock required to traverse the list.
719*4882a593Smuzhiyun *
720*4882a593Smuzhiyun * This list-traversal primitive may safely run concurrently with
721*4882a593Smuzhiyun * the _rcu list-mutation primitives such as hlist_add_head_rcu()
722*4882a593Smuzhiyun * as long as the traversal is guarded by srcu_read_lock().
723*4882a593Smuzhiyun * The lockdep expression srcu_read_lock_held() can be passed as the
724*4882a593Smuzhiyun * cond argument from read side.
725*4882a593Smuzhiyun */
726*4882a593Smuzhiyun #define hlist_for_each_entry_srcu(pos, head, member, cond) \
727*4882a593Smuzhiyun for (__list_check_srcu(cond), \
728*4882a593Smuzhiyun pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
729*4882a593Smuzhiyun typeof(*(pos)), member); \
730*4882a593Smuzhiyun pos; \
731*4882a593Smuzhiyun pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
732*4882a593Smuzhiyun &(pos)->member)), typeof(*(pos)), member))
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /**
735*4882a593Smuzhiyun * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
736*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
737*4882a593Smuzhiyun * @head: the head for your list.
738*4882a593Smuzhiyun * @member: the name of the hlist_node within the struct.
739*4882a593Smuzhiyun *
740*4882a593Smuzhiyun * This list-traversal primitive may safely run concurrently with
741*4882a593Smuzhiyun * the _rcu list-mutation primitives such as hlist_add_head_rcu()
742*4882a593Smuzhiyun * as long as the traversal is guarded by rcu_read_lock().
743*4882a593Smuzhiyun *
744*4882a593Smuzhiyun * This is the same as hlist_for_each_entry_rcu() except that it does
745*4882a593Smuzhiyun * not do any RCU debugging or tracing.
746*4882a593Smuzhiyun */
747*4882a593Smuzhiyun #define hlist_for_each_entry_rcu_notrace(pos, head, member) \
748*4882a593Smuzhiyun for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
749*4882a593Smuzhiyun typeof(*(pos)), member); \
750*4882a593Smuzhiyun pos; \
751*4882a593Smuzhiyun pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
752*4882a593Smuzhiyun &(pos)->member)), typeof(*(pos)), member))
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /**
755*4882a593Smuzhiyun * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
756*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
757*4882a593Smuzhiyun * @head: the head for your list.
758*4882a593Smuzhiyun * @member: the name of the hlist_node within the struct.
759*4882a593Smuzhiyun *
760*4882a593Smuzhiyun * This list-traversal primitive may safely run concurrently with
761*4882a593Smuzhiyun * the _rcu list-mutation primitives such as hlist_add_head_rcu()
762*4882a593Smuzhiyun * as long as the traversal is guarded by rcu_read_lock().
763*4882a593Smuzhiyun */
764*4882a593Smuzhiyun #define hlist_for_each_entry_rcu_bh(pos, head, member) \
765*4882a593Smuzhiyun for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
766*4882a593Smuzhiyun typeof(*(pos)), member); \
767*4882a593Smuzhiyun pos; \
768*4882a593Smuzhiyun pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
769*4882a593Smuzhiyun &(pos)->member)), typeof(*(pos)), member))
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /**
772*4882a593Smuzhiyun * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
773*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
774*4882a593Smuzhiyun * @member: the name of the hlist_node within the struct.
775*4882a593Smuzhiyun */
776*4882a593Smuzhiyun #define hlist_for_each_entry_continue_rcu(pos, member) \
777*4882a593Smuzhiyun for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
778*4882a593Smuzhiyun &(pos)->member)), typeof(*(pos)), member); \
779*4882a593Smuzhiyun pos; \
780*4882a593Smuzhiyun pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
781*4882a593Smuzhiyun &(pos)->member)), typeof(*(pos)), member))
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /**
784*4882a593Smuzhiyun * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
785*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
786*4882a593Smuzhiyun * @member: the name of the hlist_node within the struct.
787*4882a593Smuzhiyun */
788*4882a593Smuzhiyun #define hlist_for_each_entry_continue_rcu_bh(pos, member) \
789*4882a593Smuzhiyun for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
790*4882a593Smuzhiyun &(pos)->member)), typeof(*(pos)), member); \
791*4882a593Smuzhiyun pos; \
792*4882a593Smuzhiyun pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
793*4882a593Smuzhiyun &(pos)->member)), typeof(*(pos)), member))
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /**
796*4882a593Smuzhiyun * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
797*4882a593Smuzhiyun * @pos: the type * to use as a loop cursor.
798*4882a593Smuzhiyun * @member: the name of the hlist_node within the struct.
799*4882a593Smuzhiyun */
800*4882a593Smuzhiyun #define hlist_for_each_entry_from_rcu(pos, member) \
801*4882a593Smuzhiyun for (; pos; \
802*4882a593Smuzhiyun pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
803*4882a593Smuzhiyun &(pos)->member)), typeof(*(pos)), member))
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun #endif /* __KERNEL__ */
806*4882a593Smuzhiyun #endif
807