1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * I/O Address Space ID allocator. There is one global IOASID space, split into
4*4882a593Smuzhiyun * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
5*4882a593Smuzhiyun * free IOASIDs with ioasid_alloc and ioasid_free.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #include <linux/ioasid.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/spinlock.h>
11*4882a593Smuzhiyun #include <linux/xarray.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun struct ioasid_data {
14*4882a593Smuzhiyun ioasid_t id;
15*4882a593Smuzhiyun struct ioasid_set *set;
16*4882a593Smuzhiyun void *private;
17*4882a593Smuzhiyun struct rcu_head rcu;
18*4882a593Smuzhiyun };
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * struct ioasid_allocator_data - Internal data structure to hold information
22*4882a593Smuzhiyun * about an allocator. There are two types of allocators:
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * - Default allocator always has its own XArray to track the IOASIDs allocated.
25*4882a593Smuzhiyun * - Custom allocators may share allocation helpers with different private data.
26*4882a593Smuzhiyun * Custom allocators that share the same helper functions also share the same
27*4882a593Smuzhiyun * XArray.
28*4882a593Smuzhiyun * Rules:
29*4882a593Smuzhiyun * 1. Default allocator is always available, not dynamically registered. This is
30*4882a593Smuzhiyun * to prevent race conditions with early boot code that want to register
31*4882a593Smuzhiyun * custom allocators or allocate IOASIDs.
32*4882a593Smuzhiyun * 2. Custom allocators take precedence over the default allocator.
33*4882a593Smuzhiyun * 3. When all custom allocators sharing the same helper functions are
34*4882a593Smuzhiyun * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
35*4882a593Smuzhiyun * freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
36*4882a593Smuzhiyun * 4. When switching between custom allocators sharing the same helper
37*4882a593Smuzhiyun * functions, outstanding IOASIDs are preserved.
38*4882a593Smuzhiyun * 5. When switching between custom allocator and default allocator, all IOASIDs
39*4882a593Smuzhiyun * must be freed to ensure unadulterated space for the new allocator.
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * @ops: allocator helper functions and its data
42*4882a593Smuzhiyun * @list: registered custom allocators
43*4882a593Smuzhiyun * @slist: allocators share the same ops but different data
44*4882a593Smuzhiyun * @flags: attributes of the allocator
45*4882a593Smuzhiyun * @xa: xarray holds the IOASID space
46*4882a593Smuzhiyun * @rcu: used for kfree_rcu when unregistering allocator
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun struct ioasid_allocator_data {
49*4882a593Smuzhiyun struct ioasid_allocator_ops *ops;
50*4882a593Smuzhiyun struct list_head list;
51*4882a593Smuzhiyun struct list_head slist;
52*4882a593Smuzhiyun #define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
53*4882a593Smuzhiyun unsigned long flags;
54*4882a593Smuzhiyun struct xarray xa;
55*4882a593Smuzhiyun struct rcu_head rcu;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static DEFINE_SPINLOCK(ioasid_allocator_lock);
59*4882a593Smuzhiyun static LIST_HEAD(allocators_list);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
62*4882a593Smuzhiyun static void default_free(ioasid_t ioasid, void *opaque);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static struct ioasid_allocator_ops default_ops = {
65*4882a593Smuzhiyun .alloc = default_alloc,
66*4882a593Smuzhiyun .free = default_free,
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun static struct ioasid_allocator_data default_allocator = {
70*4882a593Smuzhiyun .ops = &default_ops,
71*4882a593Smuzhiyun .flags = 0,
72*4882a593Smuzhiyun .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun static struct ioasid_allocator_data *active_allocator = &default_allocator;
76*4882a593Smuzhiyun
default_alloc(ioasid_t min,ioasid_t max,void * opaque)77*4882a593Smuzhiyun static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun ioasid_t id;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
82*4882a593Smuzhiyun pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
83*4882a593Smuzhiyun return INVALID_IOASID;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return id;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
default_free(ioasid_t ioasid,void * opaque)89*4882a593Smuzhiyun static void default_free(ioasid_t ioasid, void *opaque)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun struct ioasid_data *ioasid_data;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun ioasid_data = xa_erase(&default_allocator.xa, ioasid);
94*4882a593Smuzhiyun kfree_rcu(ioasid_data, rcu);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* Allocate and initialize a new custom allocator with its helper functions */
ioasid_alloc_allocator(struct ioasid_allocator_ops * ops)98*4882a593Smuzhiyun static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct ioasid_allocator_data *ia_data;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
103*4882a593Smuzhiyun if (!ia_data)
104*4882a593Smuzhiyun return NULL;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
107*4882a593Smuzhiyun INIT_LIST_HEAD(&ia_data->slist);
108*4882a593Smuzhiyun ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
109*4882a593Smuzhiyun ia_data->ops = ops;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* For tracking custom allocators that share the same ops */
112*4882a593Smuzhiyun list_add_tail(&ops->list, &ia_data->slist);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun return ia_data;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
use_same_ops(struct ioasid_allocator_ops * a,struct ioasid_allocator_ops * b)117*4882a593Smuzhiyun static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun return (a->free == b->free) && (a->alloc == b->alloc);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /**
123*4882a593Smuzhiyun * ioasid_register_allocator - register a custom allocator
124*4882a593Smuzhiyun * @ops: the custom allocator ops to be registered
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * Custom allocators take precedence over the default xarray based allocator.
127*4882a593Smuzhiyun * Private data associated with the IOASID allocated by the custom allocators
128*4882a593Smuzhiyun * are managed by IOASID framework similar to data stored in xa by default
129*4882a593Smuzhiyun * allocator.
130*4882a593Smuzhiyun *
131*4882a593Smuzhiyun * There can be multiple allocators registered but only one is active. In case
132*4882a593Smuzhiyun * of runtime removal of a custom allocator, the next one is activated based
133*4882a593Smuzhiyun * on the registration ordering.
134*4882a593Smuzhiyun *
135*4882a593Smuzhiyun * Multiple allocators can share the same alloc() function, in this case the
136*4882a593Smuzhiyun * IOASID space is shared.
137*4882a593Smuzhiyun */
ioasid_register_allocator(struct ioasid_allocator_ops * ops)138*4882a593Smuzhiyun int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun struct ioasid_allocator_data *ia_data;
141*4882a593Smuzhiyun struct ioasid_allocator_data *pallocator;
142*4882a593Smuzhiyun int ret = 0;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun spin_lock(&ioasid_allocator_lock);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun ia_data = ioasid_alloc_allocator(ops);
147*4882a593Smuzhiyun if (!ia_data) {
148*4882a593Smuzhiyun ret = -ENOMEM;
149*4882a593Smuzhiyun goto out_unlock;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * No particular preference, we activate the first one and keep
154*4882a593Smuzhiyun * the later registered allocators in a list in case the first one gets
155*4882a593Smuzhiyun * removed due to hotplug.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun if (list_empty(&allocators_list)) {
158*4882a593Smuzhiyun WARN_ON(active_allocator != &default_allocator);
159*4882a593Smuzhiyun /* Use this new allocator if default is not active */
160*4882a593Smuzhiyun if (xa_empty(&active_allocator->xa)) {
161*4882a593Smuzhiyun rcu_assign_pointer(active_allocator, ia_data);
162*4882a593Smuzhiyun list_add_tail(&ia_data->list, &allocators_list);
163*4882a593Smuzhiyun goto out_unlock;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun pr_warn("Default allocator active with outstanding IOASID\n");
166*4882a593Smuzhiyun ret = -EAGAIN;
167*4882a593Smuzhiyun goto out_free;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* Check if the allocator is already registered */
171*4882a593Smuzhiyun list_for_each_entry(pallocator, &allocators_list, list) {
172*4882a593Smuzhiyun if (pallocator->ops == ops) {
173*4882a593Smuzhiyun pr_err("IOASID allocator already registered\n");
174*4882a593Smuzhiyun ret = -EEXIST;
175*4882a593Smuzhiyun goto out_free;
176*4882a593Smuzhiyun } else if (use_same_ops(pallocator->ops, ops)) {
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun * If the new allocator shares the same ops,
179*4882a593Smuzhiyun * then they will share the same IOASID space.
180*4882a593Smuzhiyun * We should put them under the same xarray.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun list_add_tail(&ops->list, &pallocator->slist);
183*4882a593Smuzhiyun goto out_free;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun list_add_tail(&ia_data->list, &allocators_list);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun spin_unlock(&ioasid_allocator_lock);
189*4882a593Smuzhiyun return 0;
190*4882a593Smuzhiyun out_free:
191*4882a593Smuzhiyun kfree(ia_data);
192*4882a593Smuzhiyun out_unlock:
193*4882a593Smuzhiyun spin_unlock(&ioasid_allocator_lock);
194*4882a593Smuzhiyun return ret;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ioasid_register_allocator);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
200*4882a593Smuzhiyun * @ops: the custom allocator to be removed
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * Remove an allocator from the list, activate the next allocator in
203*4882a593Smuzhiyun * the order it was registered. Or revert to default allocator if all
204*4882a593Smuzhiyun * custom allocators are unregistered without outstanding IOASIDs.
205*4882a593Smuzhiyun */
ioasid_unregister_allocator(struct ioasid_allocator_ops * ops)206*4882a593Smuzhiyun void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct ioasid_allocator_data *pallocator;
209*4882a593Smuzhiyun struct ioasid_allocator_ops *sops;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun spin_lock(&ioasid_allocator_lock);
212*4882a593Smuzhiyun if (list_empty(&allocators_list)) {
213*4882a593Smuzhiyun pr_warn("No custom IOASID allocators active!\n");
214*4882a593Smuzhiyun goto exit_unlock;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun list_for_each_entry(pallocator, &allocators_list, list) {
218*4882a593Smuzhiyun if (!use_same_ops(pallocator->ops, ops))
219*4882a593Smuzhiyun continue;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (list_is_singular(&pallocator->slist)) {
222*4882a593Smuzhiyun /* No shared helper functions */
223*4882a593Smuzhiyun list_del(&pallocator->list);
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun * All IOASIDs should have been freed before
226*4882a593Smuzhiyun * the last allocator that shares the same ops
227*4882a593Smuzhiyun * is unregistered.
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun WARN_ON(!xa_empty(&pallocator->xa));
230*4882a593Smuzhiyun if (list_empty(&allocators_list)) {
231*4882a593Smuzhiyun pr_info("No custom IOASID allocators, switch to default.\n");
232*4882a593Smuzhiyun rcu_assign_pointer(active_allocator, &default_allocator);
233*4882a593Smuzhiyun } else if (pallocator == active_allocator) {
234*4882a593Smuzhiyun rcu_assign_pointer(active_allocator,
235*4882a593Smuzhiyun list_first_entry(&allocators_list,
236*4882a593Smuzhiyun struct ioasid_allocator_data, list));
237*4882a593Smuzhiyun pr_info("IOASID allocator changed");
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun kfree_rcu(pallocator, rcu);
240*4882a593Smuzhiyun break;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun * Find the matching shared ops to delete,
244*4882a593Smuzhiyun * but keep outstanding IOASIDs
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun list_for_each_entry(sops, &pallocator->slist, list) {
247*4882a593Smuzhiyun if (sops == ops) {
248*4882a593Smuzhiyun list_del(&ops->list);
249*4882a593Smuzhiyun break;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun break;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun exit_unlock:
256*4882a593Smuzhiyun spin_unlock(&ioasid_allocator_lock);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /**
261*4882a593Smuzhiyun * ioasid_set_data - Set private data for an allocated ioasid
262*4882a593Smuzhiyun * @ioasid: the ID to set data
263*4882a593Smuzhiyun * @data: the private data
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * For IOASID that is already allocated, private data can be set
266*4882a593Smuzhiyun * via this API. Future lookup can be done via ioasid_find.
267*4882a593Smuzhiyun */
ioasid_set_data(ioasid_t ioasid,void * data)268*4882a593Smuzhiyun int ioasid_set_data(ioasid_t ioasid, void *data)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct ioasid_data *ioasid_data;
271*4882a593Smuzhiyun int ret = 0;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun spin_lock(&ioasid_allocator_lock);
274*4882a593Smuzhiyun ioasid_data = xa_load(&active_allocator->xa, ioasid);
275*4882a593Smuzhiyun if (ioasid_data)
276*4882a593Smuzhiyun rcu_assign_pointer(ioasid_data->private, data);
277*4882a593Smuzhiyun else
278*4882a593Smuzhiyun ret = -ENOENT;
279*4882a593Smuzhiyun spin_unlock(&ioasid_allocator_lock);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun * Wait for readers to stop accessing the old private data, so the
283*4882a593Smuzhiyun * caller can free it.
284*4882a593Smuzhiyun */
285*4882a593Smuzhiyun if (!ret)
286*4882a593Smuzhiyun synchronize_rcu();
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun return ret;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ioasid_set_data);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun * ioasid_alloc - Allocate an IOASID
294*4882a593Smuzhiyun * @set: the IOASID set
295*4882a593Smuzhiyun * @min: the minimum ID (inclusive)
296*4882a593Smuzhiyun * @max: the maximum ID (inclusive)
297*4882a593Smuzhiyun * @private: data private to the caller
298*4882a593Smuzhiyun *
299*4882a593Smuzhiyun * Allocate an ID between @min and @max. The @private pointer is stored
300*4882a593Smuzhiyun * internally and can be retrieved with ioasid_find().
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * Return: the allocated ID on success, or %INVALID_IOASID on failure.
303*4882a593Smuzhiyun */
ioasid_alloc(struct ioasid_set * set,ioasid_t min,ioasid_t max,void * private)304*4882a593Smuzhiyun ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
305*4882a593Smuzhiyun void *private)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct ioasid_data *data;
308*4882a593Smuzhiyun void *adata;
309*4882a593Smuzhiyun ioasid_t id;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun data = kzalloc(sizeof(*data), GFP_ATOMIC);
312*4882a593Smuzhiyun if (!data)
313*4882a593Smuzhiyun return INVALID_IOASID;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun data->set = set;
316*4882a593Smuzhiyun data->private = private;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * Custom allocator needs allocator data to perform platform specific
320*4882a593Smuzhiyun * operations.
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun spin_lock(&ioasid_allocator_lock);
323*4882a593Smuzhiyun adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
324*4882a593Smuzhiyun id = active_allocator->ops->alloc(min, max, adata);
325*4882a593Smuzhiyun if (id == INVALID_IOASID) {
326*4882a593Smuzhiyun pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
327*4882a593Smuzhiyun goto exit_free;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
331*4882a593Smuzhiyun xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
332*4882a593Smuzhiyun /* Custom allocator needs framework to store and track allocation results */
333*4882a593Smuzhiyun pr_err("Failed to alloc ioasid from %d\n", id);
334*4882a593Smuzhiyun active_allocator->ops->free(id, active_allocator->ops->pdata);
335*4882a593Smuzhiyun goto exit_free;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun data->id = id;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun spin_unlock(&ioasid_allocator_lock);
340*4882a593Smuzhiyun return id;
341*4882a593Smuzhiyun exit_free:
342*4882a593Smuzhiyun spin_unlock(&ioasid_allocator_lock);
343*4882a593Smuzhiyun kfree(data);
344*4882a593Smuzhiyun return INVALID_IOASID;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ioasid_alloc);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /**
349*4882a593Smuzhiyun * ioasid_free - Free an IOASID
350*4882a593Smuzhiyun * @ioasid: the ID to remove
351*4882a593Smuzhiyun */
ioasid_free(ioasid_t ioasid)352*4882a593Smuzhiyun void ioasid_free(ioasid_t ioasid)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct ioasid_data *ioasid_data;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun spin_lock(&ioasid_allocator_lock);
357*4882a593Smuzhiyun ioasid_data = xa_load(&active_allocator->xa, ioasid);
358*4882a593Smuzhiyun if (!ioasid_data) {
359*4882a593Smuzhiyun pr_err("Trying to free unknown IOASID %u\n", ioasid);
360*4882a593Smuzhiyun goto exit_unlock;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
364*4882a593Smuzhiyun /* Custom allocator needs additional steps to free the xa element */
365*4882a593Smuzhiyun if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
366*4882a593Smuzhiyun ioasid_data = xa_erase(&active_allocator->xa, ioasid);
367*4882a593Smuzhiyun kfree_rcu(ioasid_data, rcu);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun exit_unlock:
371*4882a593Smuzhiyun spin_unlock(&ioasid_allocator_lock);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ioasid_free);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /**
376*4882a593Smuzhiyun * ioasid_find - Find IOASID data
377*4882a593Smuzhiyun * @set: the IOASID set
378*4882a593Smuzhiyun * @ioasid: the IOASID to find
379*4882a593Smuzhiyun * @getter: function to call on the found object
380*4882a593Smuzhiyun *
381*4882a593Smuzhiyun * The optional getter function allows to take a reference to the found object
382*4882a593Smuzhiyun * under the rcu lock. The function can also check if the object is still valid:
383*4882a593Smuzhiyun * if @getter returns false, then the object is invalid and NULL is returned.
384*4882a593Smuzhiyun *
385*4882a593Smuzhiyun * If the IOASID exists, return the private pointer passed to ioasid_alloc.
386*4882a593Smuzhiyun * Private data can be NULL if not set. Return an error if the IOASID is not
387*4882a593Smuzhiyun * found, or if @set is not NULL and the IOASID does not belong to the set.
388*4882a593Smuzhiyun */
ioasid_find(struct ioasid_set * set,ioasid_t ioasid,bool (* getter)(void *))389*4882a593Smuzhiyun void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
390*4882a593Smuzhiyun bool (*getter)(void *))
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun void *priv;
393*4882a593Smuzhiyun struct ioasid_data *ioasid_data;
394*4882a593Smuzhiyun struct ioasid_allocator_data *idata;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun rcu_read_lock();
397*4882a593Smuzhiyun idata = rcu_dereference(active_allocator);
398*4882a593Smuzhiyun ioasid_data = xa_load(&idata->xa, ioasid);
399*4882a593Smuzhiyun if (!ioasid_data) {
400*4882a593Smuzhiyun priv = ERR_PTR(-ENOENT);
401*4882a593Smuzhiyun goto unlock;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun if (set && ioasid_data->set != set) {
404*4882a593Smuzhiyun /* data found but does not belong to the set */
405*4882a593Smuzhiyun priv = ERR_PTR(-EACCES);
406*4882a593Smuzhiyun goto unlock;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun /* Now IOASID and its set is verified, we can return the private data */
409*4882a593Smuzhiyun priv = rcu_dereference(ioasid_data->private);
410*4882a593Smuzhiyun if (getter && !getter(priv))
411*4882a593Smuzhiyun priv = NULL;
412*4882a593Smuzhiyun unlock:
413*4882a593Smuzhiyun rcu_read_unlock();
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun return priv;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ioasid_find);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
420*4882a593Smuzhiyun MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
421*4882a593Smuzhiyun MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
422*4882a593Smuzhiyun MODULE_LICENSE("GPL");
423