xref: /OK3568_Linux_fs/kernel/fs/btrfs/ulist.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2011 STRATO AG
4*4882a593Smuzhiyun  * written by Arne Jansen <sensille@gmx.net>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include "ulist.h"
9*4882a593Smuzhiyun #include "ctree.h"
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun  * ulist is a generic data structure to hold a collection of unique u64
13*4882a593Smuzhiyun  * values. The only operations it supports is adding to the list and
14*4882a593Smuzhiyun  * enumerating it.
15*4882a593Smuzhiyun  * It is possible to store an auxiliary value along with the key.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * A sample usage for ulists is the enumeration of directed graphs without
18*4882a593Smuzhiyun  * visiting a node twice. The pseudo-code could look like this:
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * ulist = ulist_alloc();
21*4882a593Smuzhiyun  * ulist_add(ulist, root);
22*4882a593Smuzhiyun  * ULIST_ITER_INIT(&uiter);
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * while ((elem = ulist_next(ulist, &uiter)) {
25*4882a593Smuzhiyun  * 	for (all child nodes n in elem)
26*4882a593Smuzhiyun  *		ulist_add(ulist, n);
27*4882a593Smuzhiyun  *	do something useful with the node;
28*4882a593Smuzhiyun  * }
29*4882a593Smuzhiyun  * ulist_free(ulist);
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * This assumes the graph nodes are addressable by u64. This stems from the
32*4882a593Smuzhiyun  * usage for tree enumeration in btrfs, where the logical addresses are
33*4882a593Smuzhiyun  * 64 bit.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * It is also useful for tree enumeration which could be done elegantly
36*4882a593Smuzhiyun  * recursively, but is not possible due to kernel stack limitations. The
37*4882a593Smuzhiyun  * loop would be similar to the above.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun  * ulist_init - freshly initialize a ulist
42*4882a593Smuzhiyun  * @ulist:	the ulist to initialize
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  * Note: don't use this function to init an already used ulist, use
45*4882a593Smuzhiyun  * ulist_reinit instead.
46*4882a593Smuzhiyun  */
ulist_init(struct ulist * ulist)47*4882a593Smuzhiyun void ulist_init(struct ulist *ulist)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ulist->nodes);
50*4882a593Smuzhiyun 	ulist->root = RB_ROOT;
51*4882a593Smuzhiyun 	ulist->nnodes = 0;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun  * ulist_release - free up additionally allocated memory for the ulist
56*4882a593Smuzhiyun  * @ulist:	the ulist from which to free the additional memory
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * This is useful in cases where the base 'struct ulist' has been statically
59*4882a593Smuzhiyun  * allocated.
60*4882a593Smuzhiyun  */
ulist_release(struct ulist * ulist)61*4882a593Smuzhiyun void ulist_release(struct ulist *ulist)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	struct ulist_node *node;
64*4882a593Smuzhiyun 	struct ulist_node *next;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	list_for_each_entry_safe(node, next, &ulist->nodes, list) {
67*4882a593Smuzhiyun 		kfree(node);
68*4882a593Smuzhiyun 	}
69*4882a593Smuzhiyun 	ulist->root = RB_ROOT;
70*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ulist->nodes);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun  * ulist_reinit - prepare a ulist for reuse
75*4882a593Smuzhiyun  * @ulist:	ulist to be reused
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * Free up all additional memory allocated for the list elements and reinit
78*4882a593Smuzhiyun  * the ulist.
79*4882a593Smuzhiyun  */
ulist_reinit(struct ulist * ulist)80*4882a593Smuzhiyun void ulist_reinit(struct ulist *ulist)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	ulist_release(ulist);
83*4882a593Smuzhiyun 	ulist_init(ulist);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /**
87*4882a593Smuzhiyun  * ulist_alloc - dynamically allocate a ulist
88*4882a593Smuzhiyun  * @gfp_mask:	allocation flags to for base allocation
89*4882a593Smuzhiyun  *
90*4882a593Smuzhiyun  * The allocated ulist will be returned in an initialized state.
91*4882a593Smuzhiyun  */
ulist_alloc(gfp_t gfp_mask)92*4882a593Smuzhiyun struct ulist *ulist_alloc(gfp_t gfp_mask)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (!ulist)
97*4882a593Smuzhiyun 		return NULL;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	ulist_init(ulist);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	return ulist;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun  * ulist_free - free dynamically allocated ulist
106*4882a593Smuzhiyun  * @ulist:	ulist to free
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  * It is not necessary to call ulist_release before.
109*4882a593Smuzhiyun  */
ulist_free(struct ulist * ulist)110*4882a593Smuzhiyun void ulist_free(struct ulist *ulist)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	if (!ulist)
113*4882a593Smuzhiyun 		return;
114*4882a593Smuzhiyun 	ulist_release(ulist);
115*4882a593Smuzhiyun 	kfree(ulist);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
ulist_rbtree_search(struct ulist * ulist,u64 val)118*4882a593Smuzhiyun static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct rb_node *n = ulist->root.rb_node;
121*4882a593Smuzhiyun 	struct ulist_node *u = NULL;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	while (n) {
124*4882a593Smuzhiyun 		u = rb_entry(n, struct ulist_node, rb_node);
125*4882a593Smuzhiyun 		if (u->val < val)
126*4882a593Smuzhiyun 			n = n->rb_right;
127*4882a593Smuzhiyun 		else if (u->val > val)
128*4882a593Smuzhiyun 			n = n->rb_left;
129*4882a593Smuzhiyun 		else
130*4882a593Smuzhiyun 			return u;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 	return NULL;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
ulist_rbtree_erase(struct ulist * ulist,struct ulist_node * node)135*4882a593Smuzhiyun static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	rb_erase(&node->rb_node, &ulist->root);
138*4882a593Smuzhiyun 	list_del(&node->list);
139*4882a593Smuzhiyun 	kfree(node);
140*4882a593Smuzhiyun 	BUG_ON(ulist->nnodes == 0);
141*4882a593Smuzhiyun 	ulist->nnodes--;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
ulist_rbtree_insert(struct ulist * ulist,struct ulist_node * ins)144*4882a593Smuzhiyun static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct rb_node **p = &ulist->root.rb_node;
147*4882a593Smuzhiyun 	struct rb_node *parent = NULL;
148*4882a593Smuzhiyun 	struct ulist_node *cur = NULL;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	while (*p) {
151*4882a593Smuzhiyun 		parent = *p;
152*4882a593Smuzhiyun 		cur = rb_entry(parent, struct ulist_node, rb_node);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 		if (cur->val < ins->val)
155*4882a593Smuzhiyun 			p = &(*p)->rb_right;
156*4882a593Smuzhiyun 		else if (cur->val > ins->val)
157*4882a593Smuzhiyun 			p = &(*p)->rb_left;
158*4882a593Smuzhiyun 		else
159*4882a593Smuzhiyun 			return -EEXIST;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 	rb_link_node(&ins->rb_node, parent, p);
162*4882a593Smuzhiyun 	rb_insert_color(&ins->rb_node, &ulist->root);
163*4882a593Smuzhiyun 	return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun  * ulist_add - add an element to the ulist
168*4882a593Smuzhiyun  * @ulist:	ulist to add the element to
169*4882a593Smuzhiyun  * @val:	value to add to ulist
170*4882a593Smuzhiyun  * @aux:	auxiliary value to store along with val
171*4882a593Smuzhiyun  * @gfp_mask:	flags to use for allocation
172*4882a593Smuzhiyun  *
173*4882a593Smuzhiyun  * Note: locking must be provided by the caller. In case of rwlocks write
174*4882a593Smuzhiyun  *       locking is needed
175*4882a593Smuzhiyun  *
176*4882a593Smuzhiyun  * Add an element to a ulist. The @val will only be added if it doesn't
177*4882a593Smuzhiyun  * already exist. If it is added, the auxiliary value @aux is stored along with
178*4882a593Smuzhiyun  * it. In case @val already exists in the ulist, @aux is ignored, even if
179*4882a593Smuzhiyun  * it differs from the already stored value.
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
182*4882a593Smuzhiyun  * inserted.
183*4882a593Smuzhiyun  * In case of allocation failure -ENOMEM is returned and the ulist stays
184*4882a593Smuzhiyun  * unaltered.
185*4882a593Smuzhiyun  */
ulist_add(struct ulist * ulist,u64 val,u64 aux,gfp_t gfp_mask)186*4882a593Smuzhiyun int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
ulist_add_merge(struct ulist * ulist,u64 val,u64 aux,u64 * old_aux,gfp_t gfp_mask)191*4882a593Smuzhiyun int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
192*4882a593Smuzhiyun 		    u64 *old_aux, gfp_t gfp_mask)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	int ret;
195*4882a593Smuzhiyun 	struct ulist_node *node;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	node = ulist_rbtree_search(ulist, val);
198*4882a593Smuzhiyun 	if (node) {
199*4882a593Smuzhiyun 		if (old_aux)
200*4882a593Smuzhiyun 			*old_aux = node->aux;
201*4882a593Smuzhiyun 		return 0;
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 	node = kmalloc(sizeof(*node), gfp_mask);
204*4882a593Smuzhiyun 	if (!node)
205*4882a593Smuzhiyun 		return -ENOMEM;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	node->val = val;
208*4882a593Smuzhiyun 	node->aux = aux;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	ret = ulist_rbtree_insert(ulist, node);
211*4882a593Smuzhiyun 	ASSERT(!ret);
212*4882a593Smuzhiyun 	list_add_tail(&node->list, &ulist->nodes);
213*4882a593Smuzhiyun 	ulist->nnodes++;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	return 1;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun  * ulist_del - delete one node from ulist
220*4882a593Smuzhiyun  * @ulist:	ulist to remove node from
221*4882a593Smuzhiyun  * @val:	value to delete
222*4882a593Smuzhiyun  * @aux:	aux to delete
223*4882a593Smuzhiyun  *
224*4882a593Smuzhiyun  * The deletion will only be done when *BOTH* val and aux matches.
225*4882a593Smuzhiyun  * Return 0 for successful delete.
226*4882a593Smuzhiyun  * Return > 0 for not found.
227*4882a593Smuzhiyun  */
ulist_del(struct ulist * ulist,u64 val,u64 aux)228*4882a593Smuzhiyun int ulist_del(struct ulist *ulist, u64 val, u64 aux)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct ulist_node *node;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	node = ulist_rbtree_search(ulist, val);
233*4882a593Smuzhiyun 	/* Not found */
234*4882a593Smuzhiyun 	if (!node)
235*4882a593Smuzhiyun 		return 1;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (node->aux != aux)
238*4882a593Smuzhiyun 		return 1;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/* Found and delete */
241*4882a593Smuzhiyun 	ulist_rbtree_erase(ulist, node);
242*4882a593Smuzhiyun 	return 0;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun /**
246*4882a593Smuzhiyun  * ulist_next - iterate ulist
247*4882a593Smuzhiyun  * @ulist:	ulist to iterate
248*4882a593Smuzhiyun  * @uiter:	iterator variable, initialized with ULIST_ITER_INIT(&iterator)
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  * Note: locking must be provided by the caller. In case of rwlocks only read
251*4882a593Smuzhiyun  *       locking is needed
252*4882a593Smuzhiyun  *
253*4882a593Smuzhiyun  * This function is used to iterate an ulist.
254*4882a593Smuzhiyun  * It returns the next element from the ulist or %NULL when the
255*4882a593Smuzhiyun  * end is reached. No guarantee is made with respect to the order in which
256*4882a593Smuzhiyun  * the elements are returned. They might neither be returned in order of
257*4882a593Smuzhiyun  * addition nor in ascending order.
258*4882a593Smuzhiyun  * It is allowed to call ulist_add during an enumeration. Newly added items
259*4882a593Smuzhiyun  * are guaranteed to show up in the running enumeration.
260*4882a593Smuzhiyun  */
ulist_next(struct ulist * ulist,struct ulist_iterator * uiter)261*4882a593Smuzhiyun struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	struct ulist_node *node;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if (list_empty(&ulist->nodes))
266*4882a593Smuzhiyun 		return NULL;
267*4882a593Smuzhiyun 	if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
268*4882a593Smuzhiyun 		return NULL;
269*4882a593Smuzhiyun 	if (uiter->cur_list) {
270*4882a593Smuzhiyun 		uiter->cur_list = uiter->cur_list->next;
271*4882a593Smuzhiyun 	} else {
272*4882a593Smuzhiyun 		uiter->cur_list = ulist->nodes.next;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 	node = list_entry(uiter->cur_list, struct ulist_node, list);
275*4882a593Smuzhiyun 	return node;
276*4882a593Smuzhiyun }
277