xref: /OK3568_Linux_fs/kernel/lib/idr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun #include <linux/bitmap.h>
3*4882a593Smuzhiyun #include <linux/bug.h>
4*4882a593Smuzhiyun #include <linux/export.h>
5*4882a593Smuzhiyun #include <linux/idr.h>
6*4882a593Smuzhiyun #include <linux/slab.h>
7*4882a593Smuzhiyun #include <linux/spinlock.h>
8*4882a593Smuzhiyun #include <linux/xarray.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /**
11*4882a593Smuzhiyun  * idr_alloc_u32() - Allocate an ID.
12*4882a593Smuzhiyun  * @idr: IDR handle.
13*4882a593Smuzhiyun  * @ptr: Pointer to be associated with the new ID.
14*4882a593Smuzhiyun  * @nextid: Pointer to an ID.
15*4882a593Smuzhiyun  * @max: The maximum ID to allocate (inclusive).
16*4882a593Smuzhiyun  * @gfp: Memory allocation flags.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * Allocates an unused ID in the range specified by @nextid and @max.
19*4882a593Smuzhiyun  * Note that @max is inclusive whereas the @end parameter to idr_alloc()
20*4882a593Smuzhiyun  * is exclusive.  The new ID is assigned to @nextid before the pointer
21*4882a593Smuzhiyun  * is inserted into the IDR, so if @nextid points into the object pointed
22*4882a593Smuzhiyun  * to by @ptr, a concurrent lookup will not find an uninitialised ID.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * The caller should provide their own locking to ensure that two
25*4882a593Smuzhiyun  * concurrent modifications to the IDR are not possible.  Read-only
26*4882a593Smuzhiyun  * accesses to the IDR may be done under the RCU read lock or may
27*4882a593Smuzhiyun  * exclude simultaneous writers.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
30*4882a593Smuzhiyun  * or -ENOSPC if no free IDs could be found.  If an error occurred,
31*4882a593Smuzhiyun  * @nextid is unchanged.
32*4882a593Smuzhiyun  */
idr_alloc_u32(struct idr * idr,void * ptr,u32 * nextid,unsigned long max,gfp_t gfp)33*4882a593Smuzhiyun int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
34*4882a593Smuzhiyun 			unsigned long max, gfp_t gfp)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	struct radix_tree_iter iter;
37*4882a593Smuzhiyun 	void __rcu **slot;
38*4882a593Smuzhiyun 	unsigned int base = idr->idr_base;
39*4882a593Smuzhiyun 	unsigned int id = *nextid;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
42*4882a593Smuzhiyun 		idr->idr_rt.xa_flags |= IDR_RT_MARKER;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	id = (id < base) ? 0 : id - base;
45*4882a593Smuzhiyun 	radix_tree_iter_init(&iter, id);
46*4882a593Smuzhiyun 	slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
47*4882a593Smuzhiyun 	if (IS_ERR(slot))
48*4882a593Smuzhiyun 		return PTR_ERR(slot);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	*nextid = iter.index + base;
51*4882a593Smuzhiyun 	/* there is a memory barrier inside radix_tree_iter_replace() */
52*4882a593Smuzhiyun 	radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
53*4882a593Smuzhiyun 	radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(idr_alloc_u32);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /**
60*4882a593Smuzhiyun  * idr_alloc() - Allocate an ID.
61*4882a593Smuzhiyun  * @idr: IDR handle.
62*4882a593Smuzhiyun  * @ptr: Pointer to be associated with the new ID.
63*4882a593Smuzhiyun  * @start: The minimum ID (inclusive).
64*4882a593Smuzhiyun  * @end: The maximum ID (exclusive).
65*4882a593Smuzhiyun  * @gfp: Memory allocation flags.
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * Allocates an unused ID in the range specified by @start and @end.  If
68*4882a593Smuzhiyun  * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
69*4882a593Smuzhiyun  * callers to use @start + N as @end as long as N is within integer range.
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * The caller should provide their own locking to ensure that two
72*4882a593Smuzhiyun  * concurrent modifications to the IDR are not possible.  Read-only
73*4882a593Smuzhiyun  * accesses to the IDR may be done under the RCU read lock or may
74*4882a593Smuzhiyun  * exclude simultaneous writers.
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
77*4882a593Smuzhiyun  * or -ENOSPC if no free IDs could be found.
78*4882a593Smuzhiyun  */
idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp)79*4882a593Smuzhiyun int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	u32 id = start;
82*4882a593Smuzhiyun 	int ret;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (WARN_ON_ONCE(start < 0))
85*4882a593Smuzhiyun 		return -EINVAL;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
88*4882a593Smuzhiyun 	if (ret)
89*4882a593Smuzhiyun 		return ret;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	return id;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(idr_alloc);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun  * idr_alloc_cyclic() - Allocate an ID cyclically.
97*4882a593Smuzhiyun  * @idr: IDR handle.
98*4882a593Smuzhiyun  * @ptr: Pointer to be associated with the new ID.
99*4882a593Smuzhiyun  * @start: The minimum ID (inclusive).
100*4882a593Smuzhiyun  * @end: The maximum ID (exclusive).
101*4882a593Smuzhiyun  * @gfp: Memory allocation flags.
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Allocates an unused ID in the range specified by @nextid and @end.  If
104*4882a593Smuzhiyun  * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
105*4882a593Smuzhiyun  * callers to use @start + N as @end as long as N is within integer range.
106*4882a593Smuzhiyun  * The search for an unused ID will start at the last ID allocated and will
107*4882a593Smuzhiyun  * wrap around to @start if no free IDs are found before reaching @end.
108*4882a593Smuzhiyun  *
109*4882a593Smuzhiyun  * The caller should provide their own locking to ensure that two
110*4882a593Smuzhiyun  * concurrent modifications to the IDR are not possible.  Read-only
111*4882a593Smuzhiyun  * accesses to the IDR may be done under the RCU read lock or may
112*4882a593Smuzhiyun  * exclude simultaneous writers.
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
115*4882a593Smuzhiyun  * or -ENOSPC if no free IDs could be found.
116*4882a593Smuzhiyun  */
idr_alloc_cyclic(struct idr * idr,void * ptr,int start,int end,gfp_t gfp)117*4882a593Smuzhiyun int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	u32 id = idr->idr_next;
120*4882a593Smuzhiyun 	int err, max = end > 0 ? end - 1 : INT_MAX;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if ((int)id < start)
123*4882a593Smuzhiyun 		id = start;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	err = idr_alloc_u32(idr, ptr, &id, max, gfp);
126*4882a593Smuzhiyun 	if ((err == -ENOSPC) && (id > start)) {
127*4882a593Smuzhiyun 		id = start;
128*4882a593Smuzhiyun 		err = idr_alloc_u32(idr, ptr, &id, max, gfp);
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 	if (err)
131*4882a593Smuzhiyun 		return err;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	idr->idr_next = id + 1;
134*4882a593Smuzhiyun 	return id;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun EXPORT_SYMBOL(idr_alloc_cyclic);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun  * idr_remove() - Remove an ID from the IDR.
140*4882a593Smuzhiyun  * @idr: IDR handle.
141*4882a593Smuzhiyun  * @id: Pointer ID.
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  * Removes this ID from the IDR.  If the ID was not previously in the IDR,
144*4882a593Smuzhiyun  * this function returns %NULL.
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  * Since this function modifies the IDR, the caller should provide their
147*4882a593Smuzhiyun  * own locking to ensure that concurrent modification of the same IDR is
148*4882a593Smuzhiyun  * not possible.
149*4882a593Smuzhiyun  *
150*4882a593Smuzhiyun  * Return: The pointer formerly associated with this ID.
151*4882a593Smuzhiyun  */
idr_remove(struct idr * idr,unsigned long id)152*4882a593Smuzhiyun void *idr_remove(struct idr *idr, unsigned long id)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(idr_remove);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /**
159*4882a593Smuzhiyun  * idr_find() - Return pointer for given ID.
160*4882a593Smuzhiyun  * @idr: IDR handle.
161*4882a593Smuzhiyun  * @id: Pointer ID.
162*4882a593Smuzhiyun  *
163*4882a593Smuzhiyun  * Looks up the pointer associated with this ID.  A %NULL pointer may
164*4882a593Smuzhiyun  * indicate that @id is not allocated or that the %NULL pointer was
165*4882a593Smuzhiyun  * associated with this ID.
166*4882a593Smuzhiyun  *
167*4882a593Smuzhiyun  * This function can be called under rcu_read_lock(), given that the leaf
168*4882a593Smuzhiyun  * pointers lifetimes are correctly managed.
169*4882a593Smuzhiyun  *
170*4882a593Smuzhiyun  * Return: The pointer associated with this ID.
171*4882a593Smuzhiyun  */
idr_find(const struct idr * idr,unsigned long id)172*4882a593Smuzhiyun void *idr_find(const struct idr *idr, unsigned long id)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(idr_find);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun  * idr_for_each() - Iterate through all stored pointers.
180*4882a593Smuzhiyun  * @idr: IDR handle.
181*4882a593Smuzhiyun  * @fn: Function to be called for each pointer.
182*4882a593Smuzhiyun  * @data: Data passed to callback function.
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  * The callback function will be called for each entry in @idr, passing
185*4882a593Smuzhiyun  * the ID, the entry and @data.
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * If @fn returns anything other than %0, the iteration stops and that
188*4882a593Smuzhiyun  * value is returned from this function.
189*4882a593Smuzhiyun  *
190*4882a593Smuzhiyun  * idr_for_each() can be called concurrently with idr_alloc() and
191*4882a593Smuzhiyun  * idr_remove() if protected by RCU.  Newly added entries may not be
192*4882a593Smuzhiyun  * seen and deleted entries may be seen, but adding and removing entries
193*4882a593Smuzhiyun  * will not cause other entries to be skipped, nor spurious ones to be seen.
194*4882a593Smuzhiyun  */
idr_for_each(const struct idr * idr,int (* fn)(int id,void * p,void * data),void * data)195*4882a593Smuzhiyun int idr_for_each(const struct idr *idr,
196*4882a593Smuzhiyun 		int (*fn)(int id, void *p, void *data), void *data)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	struct radix_tree_iter iter;
199*4882a593Smuzhiyun 	void __rcu **slot;
200*4882a593Smuzhiyun 	int base = idr->idr_base;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
203*4882a593Smuzhiyun 		int ret;
204*4882a593Smuzhiyun 		unsigned long id = iter.index + base;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		if (WARN_ON_ONCE(id > INT_MAX))
207*4882a593Smuzhiyun 			break;
208*4882a593Smuzhiyun 		ret = fn(id, rcu_dereference_raw(*slot), data);
209*4882a593Smuzhiyun 		if (ret)
210*4882a593Smuzhiyun 			return ret;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	return 0;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun EXPORT_SYMBOL(idr_for_each);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun  * idr_get_next_ul() - Find next populated entry.
219*4882a593Smuzhiyun  * @idr: IDR handle.
220*4882a593Smuzhiyun  * @nextid: Pointer to an ID.
221*4882a593Smuzhiyun  *
222*4882a593Smuzhiyun  * Returns the next populated entry in the tree with an ID greater than
223*4882a593Smuzhiyun  * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
224*4882a593Smuzhiyun  * to the ID of the found value.  To use in a loop, the value pointed to by
225*4882a593Smuzhiyun  * nextid must be incremented by the user.
226*4882a593Smuzhiyun  */
idr_get_next_ul(struct idr * idr,unsigned long * nextid)227*4882a593Smuzhiyun void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct radix_tree_iter iter;
230*4882a593Smuzhiyun 	void __rcu **slot;
231*4882a593Smuzhiyun 	void *entry = NULL;
232*4882a593Smuzhiyun 	unsigned long base = idr->idr_base;
233*4882a593Smuzhiyun 	unsigned long id = *nextid;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	id = (id < base) ? 0 : id - base;
236*4882a593Smuzhiyun 	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
237*4882a593Smuzhiyun 		entry = rcu_dereference_raw(*slot);
238*4882a593Smuzhiyun 		if (!entry)
239*4882a593Smuzhiyun 			continue;
240*4882a593Smuzhiyun 		if (!xa_is_internal(entry))
241*4882a593Smuzhiyun 			break;
242*4882a593Smuzhiyun 		if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
243*4882a593Smuzhiyun 			break;
244*4882a593Smuzhiyun 		slot = radix_tree_iter_retry(&iter);
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 	if (!slot)
247*4882a593Smuzhiyun 		return NULL;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	*nextid = iter.index + base;
250*4882a593Smuzhiyun 	return entry;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun EXPORT_SYMBOL(idr_get_next_ul);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun  * idr_get_next() - Find next populated entry.
256*4882a593Smuzhiyun  * @idr: IDR handle.
257*4882a593Smuzhiyun  * @nextid: Pointer to an ID.
258*4882a593Smuzhiyun  *
259*4882a593Smuzhiyun  * Returns the next populated entry in the tree with an ID greater than
260*4882a593Smuzhiyun  * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
261*4882a593Smuzhiyun  * to the ID of the found value.  To use in a loop, the value pointed to by
262*4882a593Smuzhiyun  * nextid must be incremented by the user.
263*4882a593Smuzhiyun  */
idr_get_next(struct idr * idr,int * nextid)264*4882a593Smuzhiyun void *idr_get_next(struct idr *idr, int *nextid)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	unsigned long id = *nextid;
267*4882a593Smuzhiyun 	void *entry = idr_get_next_ul(idr, &id);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (WARN_ON_ONCE(id > INT_MAX))
270*4882a593Smuzhiyun 		return NULL;
271*4882a593Smuzhiyun 	*nextid = id;
272*4882a593Smuzhiyun 	return entry;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun EXPORT_SYMBOL(idr_get_next);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun  * idr_replace() - replace pointer for given ID.
278*4882a593Smuzhiyun  * @idr: IDR handle.
279*4882a593Smuzhiyun  * @ptr: New pointer to associate with the ID.
280*4882a593Smuzhiyun  * @id: ID to change.
281*4882a593Smuzhiyun  *
282*4882a593Smuzhiyun  * Replace the pointer registered with an ID and return the old value.
283*4882a593Smuzhiyun  * This function can be called under the RCU read lock concurrently with
284*4882a593Smuzhiyun  * idr_alloc() and idr_remove() (as long as the ID being removed is not
285*4882a593Smuzhiyun  * the one being replaced!).
286*4882a593Smuzhiyun  *
287*4882a593Smuzhiyun  * Returns: the old value on success.  %-ENOENT indicates that @id was not
288*4882a593Smuzhiyun  * found.  %-EINVAL indicates that @ptr was not valid.
289*4882a593Smuzhiyun  */
idr_replace(struct idr * idr,void * ptr,unsigned long id)290*4882a593Smuzhiyun void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	struct radix_tree_node *node;
293*4882a593Smuzhiyun 	void __rcu **slot = NULL;
294*4882a593Smuzhiyun 	void *entry;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	id -= idr->idr_base;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
299*4882a593Smuzhiyun 	if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
300*4882a593Smuzhiyun 		return ERR_PTR(-ENOENT);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	__radix_tree_replace(&idr->idr_rt, node, slot, ptr);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	return entry;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun EXPORT_SYMBOL(idr_replace);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun /**
309*4882a593Smuzhiyun  * DOC: IDA description
310*4882a593Smuzhiyun  *
311*4882a593Smuzhiyun  * The IDA is an ID allocator which does not provide the ability to
312*4882a593Smuzhiyun  * associate an ID with a pointer.  As such, it only needs to store one
313*4882a593Smuzhiyun  * bit per ID, and so is more space efficient than an IDR.  To use an IDA,
314*4882a593Smuzhiyun  * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
315*4882a593Smuzhiyun  * then initialise it using ida_init()).  To allocate a new ID, call
316*4882a593Smuzhiyun  * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
317*4882a593Smuzhiyun  * To free an ID, call ida_free().
318*4882a593Smuzhiyun  *
319*4882a593Smuzhiyun  * ida_destroy() can be used to dispose of an IDA without needing to
320*4882a593Smuzhiyun  * free the individual IDs in it.  You can use ida_is_empty() to find
321*4882a593Smuzhiyun  * out whether the IDA has any IDs currently allocated.
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * The IDA handles its own locking.  It is safe to call any of the IDA
324*4882a593Smuzhiyun  * functions without synchronisation in your code.
325*4882a593Smuzhiyun  *
326*4882a593Smuzhiyun  * IDs are currently limited to the range [0-INT_MAX].  If this is an awkward
327*4882a593Smuzhiyun  * limitation, it should be quite straightforward to raise the maximum.
328*4882a593Smuzhiyun  */
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun  * Developer's notes:
332*4882a593Smuzhiyun  *
333*4882a593Smuzhiyun  * The IDA uses the functionality provided by the XArray to store bitmaps in
334*4882a593Smuzhiyun  * each entry.  The XA_FREE_MARK is only cleared when all bits in the bitmap
335*4882a593Smuzhiyun  * have been set.
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * I considered telling the XArray that each slot is an order-10 node
338*4882a593Smuzhiyun  * and indexing by bit number, but the XArray can't allow a single multi-index
339*4882a593Smuzhiyun  * entry in the head, which would significantly increase memory consumption
340*4882a593Smuzhiyun  * for the IDA.  So instead we divide the index by the number of bits in the
341*4882a593Smuzhiyun  * leaf bitmap before doing a radix tree lookup.
342*4882a593Smuzhiyun  *
343*4882a593Smuzhiyun  * As an optimisation, if there are only a few low bits set in any given
344*4882a593Smuzhiyun  * leaf, instead of allocating a 128-byte bitmap, we store the bits
345*4882a593Smuzhiyun  * as a value entry.  Value entries never have the XA_FREE_MARK cleared
346*4882a593Smuzhiyun  * because we can always convert them into a bitmap entry.
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * It would be possible to optimise further; once we've run out of a
349*4882a593Smuzhiyun  * single 128-byte bitmap, we currently switch to a 576-byte node, put
350*4882a593Smuzhiyun  * the 128-byte bitmap in the first entry and then start allocating extra
351*4882a593Smuzhiyun  * 128-byte entries.  We could instead use the 512 bytes of the node's
352*4882a593Smuzhiyun  * data as a bitmap before moving to that scheme.  I do not believe this
353*4882a593Smuzhiyun  * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
354*4882a593Smuzhiyun  * users of the IDA and almost none of them use more than 1024 entries.
355*4882a593Smuzhiyun  * Those that do use more than the 8192 IDs that the 512 bytes would
356*4882a593Smuzhiyun  * provide.
357*4882a593Smuzhiyun  *
358*4882a593Smuzhiyun  * The IDA always uses a lock to alloc/free.  If we add a 'test_bit'
359*4882a593Smuzhiyun  * equivalent, it will still need locking.  Going to RCU lookup would require
360*4882a593Smuzhiyun  * using RCU to free bitmaps, and that's not trivial without embedding an
361*4882a593Smuzhiyun  * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
362*4882a593Smuzhiyun  * bitmap, which is excessive.
363*4882a593Smuzhiyun  */
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /**
366*4882a593Smuzhiyun  * ida_alloc_range() - Allocate an unused ID.
367*4882a593Smuzhiyun  * @ida: IDA handle.
368*4882a593Smuzhiyun  * @min: Lowest ID to allocate.
369*4882a593Smuzhiyun  * @max: Highest ID to allocate.
370*4882a593Smuzhiyun  * @gfp: Memory allocation flags.
371*4882a593Smuzhiyun  *
372*4882a593Smuzhiyun  * Allocate an ID between @min and @max, inclusive.  The allocated ID will
373*4882a593Smuzhiyun  * not exceed %INT_MAX, even if @max is larger.
374*4882a593Smuzhiyun  *
375*4882a593Smuzhiyun  * Context: Any context. It is safe to call this function without
376*4882a593Smuzhiyun  * locking in your code.
377*4882a593Smuzhiyun  * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
378*4882a593Smuzhiyun  * or %-ENOSPC if there are no free IDs.
379*4882a593Smuzhiyun  */
ida_alloc_range(struct ida * ida,unsigned int min,unsigned int max,gfp_t gfp)380*4882a593Smuzhiyun int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
381*4882a593Smuzhiyun 			gfp_t gfp)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
384*4882a593Smuzhiyun 	unsigned bit = min % IDA_BITMAP_BITS;
385*4882a593Smuzhiyun 	unsigned long flags;
386*4882a593Smuzhiyun 	struct ida_bitmap *bitmap, *alloc = NULL;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	if ((int)min < 0)
389*4882a593Smuzhiyun 		return -ENOSPC;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	if ((int)max < 0)
392*4882a593Smuzhiyun 		max = INT_MAX;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun retry:
395*4882a593Smuzhiyun 	xas_lock_irqsave(&xas, flags);
396*4882a593Smuzhiyun next:
397*4882a593Smuzhiyun 	bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
398*4882a593Smuzhiyun 	if (xas.xa_index > min / IDA_BITMAP_BITS)
399*4882a593Smuzhiyun 		bit = 0;
400*4882a593Smuzhiyun 	if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
401*4882a593Smuzhiyun 		goto nospc;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (xa_is_value(bitmap)) {
404*4882a593Smuzhiyun 		unsigned long tmp = xa_to_value(bitmap);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		if (bit < BITS_PER_XA_VALUE) {
407*4882a593Smuzhiyun 			bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
408*4882a593Smuzhiyun 			if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
409*4882a593Smuzhiyun 				goto nospc;
410*4882a593Smuzhiyun 			if (bit < BITS_PER_XA_VALUE) {
411*4882a593Smuzhiyun 				tmp |= 1UL << bit;
412*4882a593Smuzhiyun 				xas_store(&xas, xa_mk_value(tmp));
413*4882a593Smuzhiyun 				goto out;
414*4882a593Smuzhiyun 			}
415*4882a593Smuzhiyun 		}
416*4882a593Smuzhiyun 		bitmap = alloc;
417*4882a593Smuzhiyun 		if (!bitmap)
418*4882a593Smuzhiyun 			bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
419*4882a593Smuzhiyun 		if (!bitmap)
420*4882a593Smuzhiyun 			goto alloc;
421*4882a593Smuzhiyun 		bitmap->bitmap[0] = tmp;
422*4882a593Smuzhiyun 		xas_store(&xas, bitmap);
423*4882a593Smuzhiyun 		if (xas_error(&xas)) {
424*4882a593Smuzhiyun 			bitmap->bitmap[0] = 0;
425*4882a593Smuzhiyun 			goto out;
426*4882a593Smuzhiyun 		}
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (bitmap) {
430*4882a593Smuzhiyun 		bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
431*4882a593Smuzhiyun 		if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
432*4882a593Smuzhiyun 			goto nospc;
433*4882a593Smuzhiyun 		if (bit == IDA_BITMAP_BITS)
434*4882a593Smuzhiyun 			goto next;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		__set_bit(bit, bitmap->bitmap);
437*4882a593Smuzhiyun 		if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
438*4882a593Smuzhiyun 			xas_clear_mark(&xas, XA_FREE_MARK);
439*4882a593Smuzhiyun 	} else {
440*4882a593Smuzhiyun 		if (bit < BITS_PER_XA_VALUE) {
441*4882a593Smuzhiyun 			bitmap = xa_mk_value(1UL << bit);
442*4882a593Smuzhiyun 		} else {
443*4882a593Smuzhiyun 			bitmap = alloc;
444*4882a593Smuzhiyun 			if (!bitmap)
445*4882a593Smuzhiyun 				bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
446*4882a593Smuzhiyun 			if (!bitmap)
447*4882a593Smuzhiyun 				goto alloc;
448*4882a593Smuzhiyun 			__set_bit(bit, bitmap->bitmap);
449*4882a593Smuzhiyun 		}
450*4882a593Smuzhiyun 		xas_store(&xas, bitmap);
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun out:
453*4882a593Smuzhiyun 	xas_unlock_irqrestore(&xas, flags);
454*4882a593Smuzhiyun 	if (xas_nomem(&xas, gfp)) {
455*4882a593Smuzhiyun 		xas.xa_index = min / IDA_BITMAP_BITS;
456*4882a593Smuzhiyun 		bit = min % IDA_BITMAP_BITS;
457*4882a593Smuzhiyun 		goto retry;
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun 	if (bitmap != alloc)
460*4882a593Smuzhiyun 		kfree(alloc);
461*4882a593Smuzhiyun 	if (xas_error(&xas))
462*4882a593Smuzhiyun 		return xas_error(&xas);
463*4882a593Smuzhiyun 	return xas.xa_index * IDA_BITMAP_BITS + bit;
464*4882a593Smuzhiyun alloc:
465*4882a593Smuzhiyun 	xas_unlock_irqrestore(&xas, flags);
466*4882a593Smuzhiyun 	alloc = kzalloc(sizeof(*bitmap), gfp);
467*4882a593Smuzhiyun 	if (!alloc)
468*4882a593Smuzhiyun 		return -ENOMEM;
469*4882a593Smuzhiyun 	xas_set(&xas, min / IDA_BITMAP_BITS);
470*4882a593Smuzhiyun 	bit = min % IDA_BITMAP_BITS;
471*4882a593Smuzhiyun 	goto retry;
472*4882a593Smuzhiyun nospc:
473*4882a593Smuzhiyun 	xas_unlock_irqrestore(&xas, flags);
474*4882a593Smuzhiyun 	kfree(alloc);
475*4882a593Smuzhiyun 	return -ENOSPC;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun EXPORT_SYMBOL(ida_alloc_range);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun /**
480*4882a593Smuzhiyun  * ida_free() - Release an allocated ID.
481*4882a593Smuzhiyun  * @ida: IDA handle.
482*4882a593Smuzhiyun  * @id: Previously allocated ID.
483*4882a593Smuzhiyun  *
484*4882a593Smuzhiyun  * Context: Any context. It is safe to call this function without
485*4882a593Smuzhiyun  * locking in your code.
486*4882a593Smuzhiyun  */
ida_free(struct ida * ida,unsigned int id)487*4882a593Smuzhiyun void ida_free(struct ida *ida, unsigned int id)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
490*4882a593Smuzhiyun 	unsigned bit = id % IDA_BITMAP_BITS;
491*4882a593Smuzhiyun 	struct ida_bitmap *bitmap;
492*4882a593Smuzhiyun 	unsigned long flags;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	if ((int)id < 0)
495*4882a593Smuzhiyun 		return;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	xas_lock_irqsave(&xas, flags);
498*4882a593Smuzhiyun 	bitmap = xas_load(&xas);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if (xa_is_value(bitmap)) {
501*4882a593Smuzhiyun 		unsigned long v = xa_to_value(bitmap);
502*4882a593Smuzhiyun 		if (bit >= BITS_PER_XA_VALUE)
503*4882a593Smuzhiyun 			goto err;
504*4882a593Smuzhiyun 		if (!(v & (1UL << bit)))
505*4882a593Smuzhiyun 			goto err;
506*4882a593Smuzhiyun 		v &= ~(1UL << bit);
507*4882a593Smuzhiyun 		if (!v)
508*4882a593Smuzhiyun 			goto delete;
509*4882a593Smuzhiyun 		xas_store(&xas, xa_mk_value(v));
510*4882a593Smuzhiyun 	} else {
511*4882a593Smuzhiyun 		if (!test_bit(bit, bitmap->bitmap))
512*4882a593Smuzhiyun 			goto err;
513*4882a593Smuzhiyun 		__clear_bit(bit, bitmap->bitmap);
514*4882a593Smuzhiyun 		xas_set_mark(&xas, XA_FREE_MARK);
515*4882a593Smuzhiyun 		if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
516*4882a593Smuzhiyun 			kfree(bitmap);
517*4882a593Smuzhiyun delete:
518*4882a593Smuzhiyun 			xas_store(&xas, NULL);
519*4882a593Smuzhiyun 		}
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 	xas_unlock_irqrestore(&xas, flags);
522*4882a593Smuzhiyun 	return;
523*4882a593Smuzhiyun  err:
524*4882a593Smuzhiyun 	xas_unlock_irqrestore(&xas, flags);
525*4882a593Smuzhiyun 	WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun EXPORT_SYMBOL(ida_free);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /**
530*4882a593Smuzhiyun  * ida_destroy() - Free all IDs.
531*4882a593Smuzhiyun  * @ida: IDA handle.
532*4882a593Smuzhiyun  *
533*4882a593Smuzhiyun  * Calling this function frees all IDs and releases all resources used
534*4882a593Smuzhiyun  * by an IDA.  When this call returns, the IDA is empty and can be reused
535*4882a593Smuzhiyun  * or freed.  If the IDA is already empty, there is no need to call this
536*4882a593Smuzhiyun  * function.
537*4882a593Smuzhiyun  *
538*4882a593Smuzhiyun  * Context: Any context. It is safe to call this function without
539*4882a593Smuzhiyun  * locking in your code.
540*4882a593Smuzhiyun  */
ida_destroy(struct ida * ida)541*4882a593Smuzhiyun void ida_destroy(struct ida *ida)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	XA_STATE(xas, &ida->xa, 0);
544*4882a593Smuzhiyun 	struct ida_bitmap *bitmap;
545*4882a593Smuzhiyun 	unsigned long flags;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	xas_lock_irqsave(&xas, flags);
548*4882a593Smuzhiyun 	xas_for_each(&xas, bitmap, ULONG_MAX) {
549*4882a593Smuzhiyun 		if (!xa_is_value(bitmap))
550*4882a593Smuzhiyun 			kfree(bitmap);
551*4882a593Smuzhiyun 		xas_store(&xas, NULL);
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun 	xas_unlock_irqrestore(&xas, flags);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun EXPORT_SYMBOL(ida_destroy);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun #ifndef __KERNEL__
558*4882a593Smuzhiyun extern void xa_dump_index(unsigned long index, unsigned int shift);
559*4882a593Smuzhiyun #define IDA_CHUNK_SHIFT		ilog2(IDA_BITMAP_BITS)
560*4882a593Smuzhiyun 
ida_dump_entry(void * entry,unsigned long index)561*4882a593Smuzhiyun static void ida_dump_entry(void *entry, unsigned long index)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	unsigned long i;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (!entry)
566*4882a593Smuzhiyun 		return;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	if (xa_is_node(entry)) {
569*4882a593Smuzhiyun 		struct xa_node *node = xa_to_node(entry);
570*4882a593Smuzhiyun 		unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
571*4882a593Smuzhiyun 			XA_CHUNK_SHIFT;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		xa_dump_index(index * IDA_BITMAP_BITS, shift);
574*4882a593Smuzhiyun 		xa_dump_node(node);
575*4882a593Smuzhiyun 		for (i = 0; i < XA_CHUNK_SIZE; i++)
576*4882a593Smuzhiyun 			ida_dump_entry(node->slots[i],
577*4882a593Smuzhiyun 					index | (i << node->shift));
578*4882a593Smuzhiyun 	} else if (xa_is_value(entry)) {
579*4882a593Smuzhiyun 		xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
580*4882a593Smuzhiyun 		pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
581*4882a593Smuzhiyun 	} else {
582*4882a593Smuzhiyun 		struct ida_bitmap *bitmap = entry;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 		xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
585*4882a593Smuzhiyun 		pr_cont("bitmap: %p data", bitmap);
586*4882a593Smuzhiyun 		for (i = 0; i < IDA_BITMAP_LONGS; i++)
587*4882a593Smuzhiyun 			pr_cont(" %lx", bitmap->bitmap[i]);
588*4882a593Smuzhiyun 		pr_cont("\n");
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
ida_dump(struct ida * ida)592*4882a593Smuzhiyun static void ida_dump(struct ida *ida)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	struct xarray *xa = &ida->xa;
595*4882a593Smuzhiyun 	pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
596*4882a593Smuzhiyun 				xa->xa_flags >> ROOT_TAG_SHIFT);
597*4882a593Smuzhiyun 	ida_dump_entry(xa->xa_head, 0);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun #endif
600