xref: /OK3568_Linux_fs/kernel/include/linux/rculist_bl.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_RCULIST_BL_H
3*4882a593Smuzhiyun #define _LINUX_RCULIST_BL_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * RCU-protected bl list version. See include/linux/list_bl.h.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/list_bl.h>
9*4882a593Smuzhiyun #include <linux/rcupdate.h>
10*4882a593Smuzhiyun 
hlist_bl_set_first_rcu(struct hlist_bl_head * h,struct hlist_bl_node * n)11*4882a593Smuzhiyun static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
12*4882a593Smuzhiyun 					struct hlist_bl_node *n)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
15*4882a593Smuzhiyun 	LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
16*4882a593Smuzhiyun 							LIST_BL_LOCKMASK);
17*4882a593Smuzhiyun 	rcu_assign_pointer(h->first,
18*4882a593Smuzhiyun 		(struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun 
hlist_bl_first_rcu(struct hlist_bl_head * h)21*4882a593Smuzhiyun static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	return (struct hlist_bl_node *)
24*4882a593Smuzhiyun 		((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /**
28*4882a593Smuzhiyun  * hlist_bl_del_rcu - deletes entry from hash list without re-initialization
29*4882a593Smuzhiyun  * @n: the element to delete from the hash list.
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * Note: hlist_bl_unhashed() on entry does not return true after this,
32*4882a593Smuzhiyun  * the entry is in an undefined state. It is useful for RCU based
33*4882a593Smuzhiyun  * lockfree traversal.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * In particular, it means that we can not poison the forward
36*4882a593Smuzhiyun  * pointers that may still be used for walking the hash list.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * The caller must take whatever precautions are necessary
39*4882a593Smuzhiyun  * (such as holding appropriate locks) to avoid racing
40*4882a593Smuzhiyun  * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
41*4882a593Smuzhiyun  * or hlist_bl_del_rcu(), running on this same list.
42*4882a593Smuzhiyun  * However, it is perfectly legal to run concurrently with
43*4882a593Smuzhiyun  * the _rcu list-traversal primitives, such as
44*4882a593Smuzhiyun  * hlist_bl_for_each_entry().
45*4882a593Smuzhiyun  */
hlist_bl_del_rcu(struct hlist_bl_node * n)46*4882a593Smuzhiyun static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	__hlist_bl_del(n);
49*4882a593Smuzhiyun 	n->pprev = LIST_POISON2;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * hlist_bl_add_head_rcu
54*4882a593Smuzhiyun  * @n: the element to add to the hash list.
55*4882a593Smuzhiyun  * @h: the list to add to.
56*4882a593Smuzhiyun  *
57*4882a593Smuzhiyun  * Description:
58*4882a593Smuzhiyun  * Adds the specified element to the specified hlist_bl,
59*4882a593Smuzhiyun  * while permitting racing traversals.
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * The caller must take whatever precautions are necessary
62*4882a593Smuzhiyun  * (such as holding appropriate locks) to avoid racing
63*4882a593Smuzhiyun  * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
64*4882a593Smuzhiyun  * or hlist_bl_del_rcu(), running on this same list.
65*4882a593Smuzhiyun  * However, it is perfectly legal to run concurrently with
66*4882a593Smuzhiyun  * the _rcu list-traversal primitives, such as
67*4882a593Smuzhiyun  * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
68*4882a593Smuzhiyun  * problems on Alpha CPUs.  Regardless of the type of CPU, the
69*4882a593Smuzhiyun  * list-traversal primitive must be guarded by rcu_read_lock().
70*4882a593Smuzhiyun  */
hlist_bl_add_head_rcu(struct hlist_bl_node * n,struct hlist_bl_head * h)71*4882a593Smuzhiyun static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
72*4882a593Smuzhiyun 					struct hlist_bl_head *h)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	struct hlist_bl_node *first;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* don't need hlist_bl_first_rcu because we're under lock */
77*4882a593Smuzhiyun 	first = hlist_bl_first(h);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	n->next = first;
80*4882a593Smuzhiyun 	if (first)
81*4882a593Smuzhiyun 		first->pprev = &n->next;
82*4882a593Smuzhiyun 	n->pprev = &h->first;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* need _rcu because we can have concurrent lock free readers */
85*4882a593Smuzhiyun 	hlist_bl_set_first_rcu(h, n);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun  * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
89*4882a593Smuzhiyun  * @tpos:	the type * to use as a loop cursor.
90*4882a593Smuzhiyun  * @pos:	the &struct hlist_bl_node to use as a loop cursor.
91*4882a593Smuzhiyun  * @head:	the head for your list.
92*4882a593Smuzhiyun  * @member:	the name of the hlist_bl_node within the struct.
93*4882a593Smuzhiyun  *
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member)		\
96*4882a593Smuzhiyun 	for (pos = hlist_bl_first_rcu(head);				\
97*4882a593Smuzhiyun 		pos &&							\
98*4882a593Smuzhiyun 		({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
99*4882a593Smuzhiyun 		pos = rcu_dereference_raw(pos->next))
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #endif
102