1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * include/linux/idr.h
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * 2002-10-18 written by Jim Houston jim.houston@ccur.com
6*4882a593Smuzhiyun * Copyright (C) 2002 by Concurrent Computer Corporation
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Small id to pointer translation service avoiding fixed sized
9*4882a593Smuzhiyun * tables.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #ifndef __IDR_H__
13*4882a593Smuzhiyun #define __IDR_H__
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/radix-tree.h>
16*4882a593Smuzhiyun #include <linux/gfp.h>
17*4882a593Smuzhiyun #include <linux/percpu.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun struct idr {
20*4882a593Smuzhiyun struct radix_tree_root idr_rt;
21*4882a593Smuzhiyun unsigned int idr_base;
22*4882a593Smuzhiyun unsigned int idr_next;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * The IDR API does not expose the tagging functionality of the radix tree
27*4882a593Smuzhiyun * to users. Use tag 0 to track whether a node has free space below it.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun #define IDR_FREE 0
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* Set the IDR flag and the IDR_FREE tag */
32*4882a593Smuzhiyun #define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
33*4882a593Smuzhiyun (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define IDR_INIT_BASE(name, base) { \
36*4882a593Smuzhiyun .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
37*4882a593Smuzhiyun .idr_base = (base), \
38*4882a593Smuzhiyun .idr_next = 0, \
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun * IDR_INIT() - Initialise an IDR.
43*4882a593Smuzhiyun * @name: Name of IDR.
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * A freshly-initialised IDR contains no IDs.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun #define IDR_INIT(name) IDR_INIT_BASE(name, 0)
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /**
50*4882a593Smuzhiyun * DEFINE_IDR() - Define a statically-allocated IDR.
51*4882a593Smuzhiyun * @name: Name of IDR.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * An IDR defined using this macro is ready for use with no additional
54*4882a593Smuzhiyun * initialisation required. It contains no IDs.
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun #define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * idr_get_cursor - Return the current position of the cyclic allocator
60*4882a593Smuzhiyun * @idr: idr handle
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * The value returned is the value that will be next returned from
63*4882a593Smuzhiyun * idr_alloc_cyclic() if it is free (otherwise the search will start from
64*4882a593Smuzhiyun * this position).
65*4882a593Smuzhiyun */
idr_get_cursor(const struct idr * idr)66*4882a593Smuzhiyun static inline unsigned int idr_get_cursor(const struct idr *idr)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun return READ_ONCE(idr->idr_next);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /**
72*4882a593Smuzhiyun * idr_set_cursor - Set the current position of the cyclic allocator
73*4882a593Smuzhiyun * @idr: idr handle
74*4882a593Smuzhiyun * @val: new position
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * The next call to idr_alloc_cyclic() will return @val if it is free
77*4882a593Smuzhiyun * (otherwise the search will start from this position).
78*4882a593Smuzhiyun */
idr_set_cursor(struct idr * idr,unsigned int val)79*4882a593Smuzhiyun static inline void idr_set_cursor(struct idr *idr, unsigned int val)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun WRITE_ONCE(idr->idr_next, val);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /**
85*4882a593Smuzhiyun * DOC: idr sync
86*4882a593Smuzhiyun * idr synchronization (stolen from radix-tree.h)
87*4882a593Smuzhiyun *
88*4882a593Smuzhiyun * idr_find() is able to be called locklessly, using RCU. The caller must
89*4882a593Smuzhiyun * ensure calls to this function are made within rcu_read_lock() regions.
90*4882a593Smuzhiyun * Other readers (lock-free or otherwise) and modifications may be running
91*4882a593Smuzhiyun * concurrently.
92*4882a593Smuzhiyun *
93*4882a593Smuzhiyun * It is still required that the caller manage the synchronization and
94*4882a593Smuzhiyun * lifetimes of the items. So if RCU lock-free lookups are used, typically
95*4882a593Smuzhiyun * this would mean that the items have their own locks, or are amenable to
96*4882a593Smuzhiyun * lock-free access; and that the items are freed by RCU (or only freed after
97*4882a593Smuzhiyun * having been deleted from the idr tree *and* a synchronize_rcu() grace
98*4882a593Smuzhiyun * period).
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define idr_lock(idr) xa_lock(&(idr)->idr_rt)
102*4882a593Smuzhiyun #define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
103*4882a593Smuzhiyun #define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
104*4882a593Smuzhiyun #define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
105*4882a593Smuzhiyun #define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
106*4882a593Smuzhiyun #define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
107*4882a593Smuzhiyun #define idr_lock_irqsave(idr, flags) \
108*4882a593Smuzhiyun xa_lock_irqsave(&(idr)->idr_rt, flags)
109*4882a593Smuzhiyun #define idr_unlock_irqrestore(idr, flags) \
110*4882a593Smuzhiyun xa_unlock_irqrestore(&(idr)->idr_rt, flags)
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun void idr_preload(gfp_t gfp_mask);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
115*4882a593Smuzhiyun int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
116*4882a593Smuzhiyun unsigned long max, gfp_t);
117*4882a593Smuzhiyun int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
118*4882a593Smuzhiyun void *idr_remove(struct idr *, unsigned long id);
119*4882a593Smuzhiyun void *idr_find(const struct idr *, unsigned long id);
120*4882a593Smuzhiyun int idr_for_each(const struct idr *,
121*4882a593Smuzhiyun int (*fn)(int id, void *p, void *data), void *data);
122*4882a593Smuzhiyun void *idr_get_next(struct idr *, int *nextid);
123*4882a593Smuzhiyun void *idr_get_next_ul(struct idr *, unsigned long *nextid);
124*4882a593Smuzhiyun void *idr_replace(struct idr *, void *, unsigned long id);
125*4882a593Smuzhiyun void idr_destroy(struct idr *);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /**
128*4882a593Smuzhiyun * idr_init_base() - Initialise an IDR.
129*4882a593Smuzhiyun * @idr: IDR handle.
130*4882a593Smuzhiyun * @base: The base value for the IDR.
131*4882a593Smuzhiyun *
132*4882a593Smuzhiyun * This variation of idr_init() creates an IDR which will allocate IDs
133*4882a593Smuzhiyun * starting at %base.
134*4882a593Smuzhiyun */
idr_init_base(struct idr * idr,int base)135*4882a593Smuzhiyun static inline void idr_init_base(struct idr *idr, int base)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
138*4882a593Smuzhiyun idr->idr_base = base;
139*4882a593Smuzhiyun idr->idr_next = 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * idr_init() - Initialise an IDR.
144*4882a593Smuzhiyun * @idr: IDR handle.
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * Initialise a dynamically allocated IDR. To initialise a
147*4882a593Smuzhiyun * statically allocated IDR, use DEFINE_IDR().
148*4882a593Smuzhiyun */
idr_init(struct idr * idr)149*4882a593Smuzhiyun static inline void idr_init(struct idr *idr)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun idr_init_base(idr, 0);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /**
155*4882a593Smuzhiyun * idr_is_empty() - Are there any IDs allocated?
156*4882a593Smuzhiyun * @idr: IDR handle.
157*4882a593Smuzhiyun *
158*4882a593Smuzhiyun * Return: %true if any IDs have been allocated from this IDR.
159*4882a593Smuzhiyun */
idr_is_empty(const struct idr * idr)160*4882a593Smuzhiyun static inline bool idr_is_empty(const struct idr *idr)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun return radix_tree_empty(&idr->idr_rt) &&
163*4882a593Smuzhiyun radix_tree_tagged(&idr->idr_rt, IDR_FREE);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun * idr_preload_end - end preload section started with idr_preload()
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Each idr_preload() should be matched with an invocation of this
170*4882a593Smuzhiyun * function. See idr_preload() for details.
171*4882a593Smuzhiyun */
idr_preload_end(void)172*4882a593Smuzhiyun static inline void idr_preload_end(void)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun local_unlock(&radix_tree_preloads.lock);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /**
178*4882a593Smuzhiyun * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
179*4882a593Smuzhiyun * @idr: IDR handle.
180*4882a593Smuzhiyun * @entry: The type * to use as cursor
181*4882a593Smuzhiyun * @id: Entry ID.
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * @entry and @id do not need to be initialized before the loop, and
184*4882a593Smuzhiyun * after normal termination @entry is left with the value NULL. This
185*4882a593Smuzhiyun * is convenient for a "not found" value.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun #define idr_for_each_entry(idr, entry, id) \
188*4882a593Smuzhiyun for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
192*4882a593Smuzhiyun * @idr: IDR handle.
193*4882a593Smuzhiyun * @entry: The type * to use as cursor.
194*4882a593Smuzhiyun * @tmp: A temporary placeholder for ID.
195*4882a593Smuzhiyun * @id: Entry ID.
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * @entry and @id do not need to be initialized before the loop, and
198*4882a593Smuzhiyun * after normal termination @entry is left with the value NULL. This
199*4882a593Smuzhiyun * is convenient for a "not found" value.
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun #define idr_for_each_entry_ul(idr, entry, tmp, id) \
202*4882a593Smuzhiyun for (tmp = 0, id = 0; \
203*4882a593Smuzhiyun tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
204*4882a593Smuzhiyun tmp = id, ++id)
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /**
207*4882a593Smuzhiyun * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
208*4882a593Smuzhiyun * @idr: IDR handle.
209*4882a593Smuzhiyun * @entry: The type * to use as a cursor.
210*4882a593Smuzhiyun * @id: Entry ID.
211*4882a593Smuzhiyun *
212*4882a593Smuzhiyun * Continue to iterate over entries, continuing after the current position.
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun #define idr_for_each_entry_continue(idr, entry, id) \
215*4882a593Smuzhiyun for ((entry) = idr_get_next((idr), &(id)); \
216*4882a593Smuzhiyun entry; \
217*4882a593Smuzhiyun ++id, (entry) = idr_get_next((idr), &(id)))
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /**
220*4882a593Smuzhiyun * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
221*4882a593Smuzhiyun * @idr: IDR handle.
222*4882a593Smuzhiyun * @entry: The type * to use as a cursor.
223*4882a593Smuzhiyun * @tmp: A temporary placeholder for ID.
224*4882a593Smuzhiyun * @id: Entry ID.
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * Continue to iterate over entries, continuing after the current position.
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
229*4882a593Smuzhiyun for (tmp = id; \
230*4882a593Smuzhiyun tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
231*4882a593Smuzhiyun tmp = id, ++id)
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
237*4882a593Smuzhiyun #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
238*4882a593Smuzhiyun #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun struct ida_bitmap {
241*4882a593Smuzhiyun unsigned long bitmap[IDA_BITMAP_LONGS];
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun struct ida {
245*4882a593Smuzhiyun struct xarray xa;
246*4882a593Smuzhiyun };
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun #define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun #define IDA_INIT(name) { \
251*4882a593Smuzhiyun .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun #define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
256*4882a593Smuzhiyun void ida_free(struct ida *, unsigned int id);
257*4882a593Smuzhiyun void ida_destroy(struct ida *ida);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun * ida_alloc() - Allocate an unused ID.
261*4882a593Smuzhiyun * @ida: IDA handle.
262*4882a593Smuzhiyun * @gfp: Memory allocation flags.
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun * Allocate an ID between 0 and %INT_MAX, inclusive.
265*4882a593Smuzhiyun *
266*4882a593Smuzhiyun * Context: Any context. It is safe to call this function without
267*4882a593Smuzhiyun * locking in your code.
268*4882a593Smuzhiyun * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
269*4882a593Smuzhiyun * or %-ENOSPC if there are no free IDs.
270*4882a593Smuzhiyun */
ida_alloc(struct ida * ida,gfp_t gfp)271*4882a593Smuzhiyun static inline int ida_alloc(struct ida *ida, gfp_t gfp)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun return ida_alloc_range(ida, 0, ~0, gfp);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun * ida_alloc_min() - Allocate an unused ID.
278*4882a593Smuzhiyun * @ida: IDA handle.
279*4882a593Smuzhiyun * @min: Lowest ID to allocate.
280*4882a593Smuzhiyun * @gfp: Memory allocation flags.
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * Allocate an ID between @min and %INT_MAX, inclusive.
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * Context: Any context. It is safe to call this function without
285*4882a593Smuzhiyun * locking in your code.
286*4882a593Smuzhiyun * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
287*4882a593Smuzhiyun * or %-ENOSPC if there are no free IDs.
288*4882a593Smuzhiyun */
ida_alloc_min(struct ida * ida,unsigned int min,gfp_t gfp)289*4882a593Smuzhiyun static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun return ida_alloc_range(ida, min, ~0, gfp);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /**
295*4882a593Smuzhiyun * ida_alloc_max() - Allocate an unused ID.
296*4882a593Smuzhiyun * @ida: IDA handle.
297*4882a593Smuzhiyun * @max: Highest ID to allocate.
298*4882a593Smuzhiyun * @gfp: Memory allocation flags.
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * Allocate an ID between 0 and @max, inclusive.
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * Context: Any context. It is safe to call this function without
303*4882a593Smuzhiyun * locking in your code.
304*4882a593Smuzhiyun * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
305*4882a593Smuzhiyun * or %-ENOSPC if there are no free IDs.
306*4882a593Smuzhiyun */
ida_alloc_max(struct ida * ida,unsigned int max,gfp_t gfp)307*4882a593Smuzhiyun static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun return ida_alloc_range(ida, 0, max, gfp);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
ida_init(struct ida * ida)312*4882a593Smuzhiyun static inline void ida_init(struct ida *ida)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun * ida_simple_get() and ida_simple_remove() are deprecated. Use
319*4882a593Smuzhiyun * ida_alloc() and ida_free() instead respectively.
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun #define ida_simple_get(ida, start, end, gfp) \
322*4882a593Smuzhiyun ida_alloc_range(ida, start, (end) - 1, gfp)
323*4882a593Smuzhiyun #define ida_simple_remove(ida, id) ida_free(ida, id)
324*4882a593Smuzhiyun
ida_is_empty(const struct ida * ida)325*4882a593Smuzhiyun static inline bool ida_is_empty(const struct ida *ida)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun return xa_empty(&ida->xa);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun #endif /* __IDR_H__ */
330