1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * test_xarray.c: Test the XArray API
4*4882a593Smuzhiyun * Copyright (c) 2017-2018 Microsoft Corporation
5*4882a593Smuzhiyun * Copyright (c) 2019-2020 Oracle
6*4882a593Smuzhiyun * Author: Matthew Wilcox <willy@infradead.org>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/xarray.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun static unsigned int tests_run;
13*4882a593Smuzhiyun static unsigned int tests_passed;
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun static const unsigned int order_limit =
16*4882a593Smuzhiyun IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #ifndef XA_DEBUG
19*4882a593Smuzhiyun # ifdef __KERNEL__
xa_dump(const struct xarray * xa)20*4882a593Smuzhiyun void xa_dump(const struct xarray *xa) { }
21*4882a593Smuzhiyun # endif
22*4882a593Smuzhiyun #undef XA_BUG_ON
23*4882a593Smuzhiyun #define XA_BUG_ON(xa, x) do { \
24*4882a593Smuzhiyun tests_run++; \
25*4882a593Smuzhiyun if (x) { \
26*4882a593Smuzhiyun printk("BUG at %s:%d\n", __func__, __LINE__); \
27*4882a593Smuzhiyun xa_dump(xa); \
28*4882a593Smuzhiyun dump_stack(); \
29*4882a593Smuzhiyun } else { \
30*4882a593Smuzhiyun tests_passed++; \
31*4882a593Smuzhiyun } \
32*4882a593Smuzhiyun } while (0)
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun
xa_mk_index(unsigned long index)35*4882a593Smuzhiyun static void *xa_mk_index(unsigned long index)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun return xa_mk_value(index & LONG_MAX);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
xa_store_index(struct xarray * xa,unsigned long index,gfp_t gfp)40*4882a593Smuzhiyun static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun return xa_store(xa, index, xa_mk_index(index), gfp);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
xa_insert_index(struct xarray * xa,unsigned long index)45*4882a593Smuzhiyun static void xa_insert_index(struct xarray *xa, unsigned long index)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
48*4882a593Smuzhiyun GFP_KERNEL) != 0);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
xa_alloc_index(struct xarray * xa,unsigned long index,gfp_t gfp)51*4882a593Smuzhiyun static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun u32 id;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
56*4882a593Smuzhiyun gfp) != 0);
57*4882a593Smuzhiyun XA_BUG_ON(xa, id != index);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
xa_erase_index(struct xarray * xa,unsigned long index)60*4882a593Smuzhiyun static void xa_erase_index(struct xarray *xa, unsigned long index)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
63*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, index) != NULL);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * If anyone needs this, please move it to xarray.c. We have no current
68*4882a593Smuzhiyun * users outside the test suite because all current multislot users want
69*4882a593Smuzhiyun * to use the advanced API.
70*4882a593Smuzhiyun */
xa_store_order(struct xarray * xa,unsigned long index,unsigned order,void * entry,gfp_t gfp)71*4882a593Smuzhiyun static void *xa_store_order(struct xarray *xa, unsigned long index,
72*4882a593Smuzhiyun unsigned order, void *entry, gfp_t gfp)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun XA_STATE_ORDER(xas, xa, index, order);
75*4882a593Smuzhiyun void *curr;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun do {
78*4882a593Smuzhiyun xas_lock(&xas);
79*4882a593Smuzhiyun curr = xas_store(&xas, entry);
80*4882a593Smuzhiyun xas_unlock(&xas);
81*4882a593Smuzhiyun } while (xas_nomem(&xas, gfp));
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun return curr;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
check_xa_err(struct xarray * xa)86*4882a593Smuzhiyun static noinline void check_xa_err(struct xarray *xa)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
89*4882a593Smuzhiyun XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
90*4882a593Smuzhiyun #ifndef __KERNEL__
91*4882a593Smuzhiyun /* The kernel does not fail GFP_NOWAIT allocations */
92*4882a593Smuzhiyun XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
93*4882a593Smuzhiyun XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
94*4882a593Smuzhiyun #endif
95*4882a593Smuzhiyun XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
96*4882a593Smuzhiyun XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
97*4882a593Smuzhiyun XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
98*4882a593Smuzhiyun // kills the test-suite :-(
99*4882a593Smuzhiyun // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
check_xas_retry(struct xarray * xa)102*4882a593Smuzhiyun static noinline void check_xas_retry(struct xarray *xa)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
105*4882a593Smuzhiyun void *entry;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun xa_store_index(xa, 0, GFP_KERNEL);
108*4882a593Smuzhiyun xa_store_index(xa, 1, GFP_KERNEL);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun rcu_read_lock();
111*4882a593Smuzhiyun XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
112*4882a593Smuzhiyun xa_erase_index(xa, 1);
113*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
114*4882a593Smuzhiyun XA_BUG_ON(xa, xas_retry(&xas, NULL));
115*4882a593Smuzhiyun XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
116*4882a593Smuzhiyun xas_reset(&xas);
117*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
118*4882a593Smuzhiyun XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
119*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node != NULL);
120*4882a593Smuzhiyun rcu_read_unlock();
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun rcu_read_lock();
125*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
126*4882a593Smuzhiyun xas.xa_node = XAS_RESTART;
127*4882a593Smuzhiyun XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
128*4882a593Smuzhiyun rcu_read_unlock();
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* Make sure we can iterate through retry entries */
131*4882a593Smuzhiyun xas_lock(&xas);
132*4882a593Smuzhiyun xas_set(&xas, 0);
133*4882a593Smuzhiyun xas_store(&xas, XA_RETRY_ENTRY);
134*4882a593Smuzhiyun xas_set(&xas, 1);
135*4882a593Smuzhiyun xas_store(&xas, XA_RETRY_ENTRY);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun xas_set(&xas, 0);
138*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX) {
139*4882a593Smuzhiyun xas_store(&xas, xa_mk_index(xas.xa_index));
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun xas_unlock(&xas);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun xa_erase_index(xa, 0);
144*4882a593Smuzhiyun xa_erase_index(xa, 1);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
check_xa_load(struct xarray * xa)147*4882a593Smuzhiyun static noinline void check_xa_load(struct xarray *xa)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun unsigned long i, j;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun for (i = 0; i < 1024; i++) {
152*4882a593Smuzhiyun for (j = 0; j < 1024; j++) {
153*4882a593Smuzhiyun void *entry = xa_load(xa, j);
154*4882a593Smuzhiyun if (j < i)
155*4882a593Smuzhiyun XA_BUG_ON(xa, xa_to_value(entry) != j);
156*4882a593Smuzhiyun else
157*4882a593Smuzhiyun XA_BUG_ON(xa, entry);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun for (i = 0; i < 1024; i++) {
163*4882a593Smuzhiyun for (j = 0; j < 1024; j++) {
164*4882a593Smuzhiyun void *entry = xa_load(xa, j);
165*4882a593Smuzhiyun if (j >= i)
166*4882a593Smuzhiyun XA_BUG_ON(xa, xa_to_value(entry) != j);
167*4882a593Smuzhiyun else
168*4882a593Smuzhiyun XA_BUG_ON(xa, entry);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun xa_erase_index(xa, i);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
check_xa_mark_1(struct xarray * xa,unsigned long index)175*4882a593Smuzhiyun static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun unsigned int order;
178*4882a593Smuzhiyun unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* NULL elements have no marks set */
181*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
182*4882a593Smuzhiyun xa_set_mark(xa, index, XA_MARK_0);
183*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Storing a pointer will not make a mark appear */
186*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
187*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
188*4882a593Smuzhiyun xa_set_mark(xa, index, XA_MARK_0);
189*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Setting one mark will not set another mark */
192*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
193*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* Storing NULL clears marks, and they can't be set again */
196*4882a593Smuzhiyun xa_erase_index(xa, index);
197*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
198*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
199*4882a593Smuzhiyun xa_set_mark(xa, index, XA_MARK_0);
200*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun * Storing a multi-index entry over entries with marks gives the
204*4882a593Smuzhiyun * entire entry the union of the marks
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun BUG_ON((index % 4) != 0);
207*4882a593Smuzhiyun for (order = 2; order < max_order; order++) {
208*4882a593Smuzhiyun unsigned long base = round_down(index, 1UL << order);
209*4882a593Smuzhiyun unsigned long next = base + (1UL << order);
210*4882a593Smuzhiyun unsigned long i;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
213*4882a593Smuzhiyun xa_set_mark(xa, index + 1, XA_MARK_0);
214*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
215*4882a593Smuzhiyun xa_set_mark(xa, index + 2, XA_MARK_2);
216*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
217*4882a593Smuzhiyun xa_store_order(xa, index, order, xa_mk_index(index),
218*4882a593Smuzhiyun GFP_KERNEL);
219*4882a593Smuzhiyun for (i = base; i < next; i++) {
220*4882a593Smuzhiyun XA_STATE(xas, xa, i);
221*4882a593Smuzhiyun unsigned int seen = 0;
222*4882a593Smuzhiyun void *entry;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
225*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
226*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* We should see two elements in the array */
229*4882a593Smuzhiyun rcu_read_lock();
230*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX)
231*4882a593Smuzhiyun seen++;
232*4882a593Smuzhiyun rcu_read_unlock();
233*4882a593Smuzhiyun XA_BUG_ON(xa, seen != 2);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* One of which is marked */
236*4882a593Smuzhiyun xas_set(&xas, 0);
237*4882a593Smuzhiyun seen = 0;
238*4882a593Smuzhiyun rcu_read_lock();
239*4882a593Smuzhiyun xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
240*4882a593Smuzhiyun seen++;
241*4882a593Smuzhiyun rcu_read_unlock();
242*4882a593Smuzhiyun XA_BUG_ON(xa, seen != 1);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
245*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
246*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
247*4882a593Smuzhiyun xa_erase_index(xa, index);
248*4882a593Smuzhiyun xa_erase_index(xa, next);
249*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
check_xa_mark_2(struct xarray * xa)254*4882a593Smuzhiyun static noinline void check_xa_mark_2(struct xarray *xa)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
257*4882a593Smuzhiyun unsigned long index;
258*4882a593Smuzhiyun unsigned int count = 0;
259*4882a593Smuzhiyun void *entry;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun xa_store_index(xa, 0, GFP_KERNEL);
262*4882a593Smuzhiyun xa_set_mark(xa, 0, XA_MARK_0);
263*4882a593Smuzhiyun xas_lock(&xas);
264*4882a593Smuzhiyun xas_load(&xas);
265*4882a593Smuzhiyun xas_init_marks(&xas);
266*4882a593Smuzhiyun xas_unlock(&xas);
267*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun for (index = 3500; index < 4500; index++) {
270*4882a593Smuzhiyun xa_store_index(xa, index, GFP_KERNEL);
271*4882a593Smuzhiyun xa_set_mark(xa, index, XA_MARK_0);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun xas_reset(&xas);
275*4882a593Smuzhiyun rcu_read_lock();
276*4882a593Smuzhiyun xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
277*4882a593Smuzhiyun count++;
278*4882a593Smuzhiyun rcu_read_unlock();
279*4882a593Smuzhiyun XA_BUG_ON(xa, count != 1000);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun xas_lock(&xas);
282*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX) {
283*4882a593Smuzhiyun xas_init_marks(&xas);
284*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
285*4882a593Smuzhiyun XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun xas_unlock(&xas);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun xa_destroy(xa);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
check_xa_mark_3(struct xarray * xa)292*4882a593Smuzhiyun static noinline void check_xa_mark_3(struct xarray *xa)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
295*4882a593Smuzhiyun XA_STATE(xas, xa, 0x41);
296*4882a593Smuzhiyun void *entry;
297*4882a593Smuzhiyun int count = 0;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
300*4882a593Smuzhiyun xa_set_mark(xa, 0x41, XA_MARK_0);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun rcu_read_lock();
303*4882a593Smuzhiyun xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
304*4882a593Smuzhiyun count++;
305*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(0x40));
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun XA_BUG_ON(xa, count != 1);
308*4882a593Smuzhiyun rcu_read_unlock();
309*4882a593Smuzhiyun xa_destroy(xa);
310*4882a593Smuzhiyun #endif
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
check_xa_mark(struct xarray * xa)313*4882a593Smuzhiyun static noinline void check_xa_mark(struct xarray *xa)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun unsigned long index;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun for (index = 0; index < 16384; index += 4)
318*4882a593Smuzhiyun check_xa_mark_1(xa, index);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun check_xa_mark_2(xa);
321*4882a593Smuzhiyun check_xa_mark_3(xa);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
check_xa_shrink(struct xarray * xa)324*4882a593Smuzhiyun static noinline void check_xa_shrink(struct xarray *xa)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun XA_STATE(xas, xa, 1);
327*4882a593Smuzhiyun struct xa_node *node;
328*4882a593Smuzhiyun unsigned int order;
329*4882a593Smuzhiyun unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
332*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
333*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun * Check that erasing the entry at 1 shrinks the tree and properly
337*4882a593Smuzhiyun * marks the node as being deleted.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun xas_lock(&xas);
340*4882a593Smuzhiyun XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
341*4882a593Smuzhiyun node = xas.xa_node;
342*4882a593Smuzhiyun XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
343*4882a593Smuzhiyun XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
344*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
345*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
346*4882a593Smuzhiyun XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
347*4882a593Smuzhiyun XA_BUG_ON(xa, xas_load(&xas) != NULL);
348*4882a593Smuzhiyun xas_unlock(&xas);
349*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
350*4882a593Smuzhiyun xa_erase_index(xa, 0);
351*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun for (order = 0; order < max_order; order++) {
354*4882a593Smuzhiyun unsigned long max = (1UL << order) - 1;
355*4882a593Smuzhiyun xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
356*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
357*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
358*4882a593Smuzhiyun rcu_read_lock();
359*4882a593Smuzhiyun node = xa_head(xa);
360*4882a593Smuzhiyun rcu_read_unlock();
361*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
362*4882a593Smuzhiyun NULL);
363*4882a593Smuzhiyun rcu_read_lock();
364*4882a593Smuzhiyun XA_BUG_ON(xa, xa_head(xa) == node);
365*4882a593Smuzhiyun rcu_read_unlock();
366*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
367*4882a593Smuzhiyun xa_erase_index(xa, ULONG_MAX);
368*4882a593Smuzhiyun XA_BUG_ON(xa, xa->xa_head != node);
369*4882a593Smuzhiyun xa_erase_index(xa, 0);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
check_insert(struct xarray * xa)373*4882a593Smuzhiyun static noinline void check_insert(struct xarray *xa)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun unsigned long i;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun for (i = 0; i < 1024; i++) {
378*4882a593Smuzhiyun xa_insert_index(xa, i);
379*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
380*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
381*4882a593Smuzhiyun xa_erase_index(xa, i);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun for (i = 10; i < BITS_PER_LONG; i++) {
385*4882a593Smuzhiyun xa_insert_index(xa, 1UL << i);
386*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
387*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
388*4882a593Smuzhiyun xa_erase_index(xa, 1UL << i);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun xa_insert_index(xa, (1UL << i) - 1);
391*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
392*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
393*4882a593Smuzhiyun xa_erase_index(xa, (1UL << i) - 1);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun xa_insert_index(xa, ~0UL);
397*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
398*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
399*4882a593Smuzhiyun xa_erase_index(xa, ~0UL);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
check_cmpxchg(struct xarray * xa)404*4882a593Smuzhiyun static noinline void check_cmpxchg(struct xarray *xa)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun void *FIVE = xa_mk_value(5);
407*4882a593Smuzhiyun void *SIX = xa_mk_value(6);
408*4882a593Smuzhiyun void *LOTS = xa_mk_value(12345678);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
411*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
412*4882a593Smuzhiyun XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
413*4882a593Smuzhiyun XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
414*4882a593Smuzhiyun XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
415*4882a593Smuzhiyun XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
416*4882a593Smuzhiyun XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
417*4882a593Smuzhiyun XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
418*4882a593Smuzhiyun XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
419*4882a593Smuzhiyun XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
420*4882a593Smuzhiyun XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
421*4882a593Smuzhiyun xa_erase_index(xa, 12345678);
422*4882a593Smuzhiyun xa_erase_index(xa, 5);
423*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
check_reserve(struct xarray * xa)426*4882a593Smuzhiyun static noinline void check_reserve(struct xarray *xa)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun void *entry;
429*4882a593Smuzhiyun unsigned long index;
430*4882a593Smuzhiyun int count;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* An array with a reserved entry is not empty */
433*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
434*4882a593Smuzhiyun XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
435*4882a593Smuzhiyun XA_BUG_ON(xa, xa_empty(xa));
436*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 12345678));
437*4882a593Smuzhiyun xa_release(xa, 12345678);
438*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /* Releasing a used entry does nothing */
441*4882a593Smuzhiyun XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
442*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
443*4882a593Smuzhiyun xa_release(xa, 12345678);
444*4882a593Smuzhiyun xa_erase_index(xa, 12345678);
445*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /* cmpxchg sees a reserved entry as ZERO */
448*4882a593Smuzhiyun XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
449*4882a593Smuzhiyun XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
450*4882a593Smuzhiyun xa_mk_value(12345678), GFP_NOWAIT) != NULL);
451*4882a593Smuzhiyun xa_release(xa, 12345678);
452*4882a593Smuzhiyun xa_erase_index(xa, 12345678);
453*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /* xa_insert treats it as busy */
456*4882a593Smuzhiyun XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
457*4882a593Smuzhiyun XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
458*4882a593Smuzhiyun -EBUSY);
459*4882a593Smuzhiyun XA_BUG_ON(xa, xa_empty(xa));
460*4882a593Smuzhiyun XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
461*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /* Can iterate through a reserved entry */
464*4882a593Smuzhiyun xa_store_index(xa, 5, GFP_KERNEL);
465*4882a593Smuzhiyun XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
466*4882a593Smuzhiyun xa_store_index(xa, 7, GFP_KERNEL);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun count = 0;
469*4882a593Smuzhiyun xa_for_each(xa, index, entry) {
470*4882a593Smuzhiyun XA_BUG_ON(xa, index != 5 && index != 7);
471*4882a593Smuzhiyun count++;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun XA_BUG_ON(xa, count != 2);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /* If we free a reserved entry, we should be able to allocate it */
476*4882a593Smuzhiyun if (xa->xa_flags & XA_FLAGS_ALLOC) {
477*4882a593Smuzhiyun u32 id;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
480*4882a593Smuzhiyun XA_LIMIT(5, 10), GFP_KERNEL) != 0);
481*4882a593Smuzhiyun XA_BUG_ON(xa, id != 8);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun xa_release(xa, 6);
484*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
485*4882a593Smuzhiyun XA_LIMIT(5, 10), GFP_KERNEL) != 0);
486*4882a593Smuzhiyun XA_BUG_ON(xa, id != 6);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun xa_destroy(xa);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
check_xas_erase(struct xarray * xa)492*4882a593Smuzhiyun static noinline void check_xas_erase(struct xarray *xa)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
495*4882a593Smuzhiyun void *entry;
496*4882a593Smuzhiyun unsigned long i, j;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun for (i = 0; i < 200; i++) {
499*4882a593Smuzhiyun for (j = i; j < 2 * i + 17; j++) {
500*4882a593Smuzhiyun xas_set(&xas, j);
501*4882a593Smuzhiyun do {
502*4882a593Smuzhiyun xas_lock(&xas);
503*4882a593Smuzhiyun xas_store(&xas, xa_mk_index(j));
504*4882a593Smuzhiyun xas_unlock(&xas);
505*4882a593Smuzhiyun } while (xas_nomem(&xas, GFP_KERNEL));
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun xas_set(&xas, ULONG_MAX);
509*4882a593Smuzhiyun do {
510*4882a593Smuzhiyun xas_lock(&xas);
511*4882a593Smuzhiyun xas_store(&xas, xa_mk_value(0));
512*4882a593Smuzhiyun xas_unlock(&xas);
513*4882a593Smuzhiyun } while (xas_nomem(&xas, GFP_KERNEL));
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun xas_lock(&xas);
516*4882a593Smuzhiyun xas_store(&xas, NULL);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun xas_set(&xas, 0);
519*4882a593Smuzhiyun j = i;
520*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX) {
521*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(j));
522*4882a593Smuzhiyun xas_store(&xas, NULL);
523*4882a593Smuzhiyun j++;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun xas_unlock(&xas);
526*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
check_multi_store_1(struct xarray * xa,unsigned long index,unsigned int order)531*4882a593Smuzhiyun static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
532*4882a593Smuzhiyun unsigned int order)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun XA_STATE(xas, xa, index);
535*4882a593Smuzhiyun unsigned long min = index & ~((1UL << order) - 1);
536*4882a593Smuzhiyun unsigned long max = min + (1UL << order);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
539*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
540*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
541*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, max) != NULL);
542*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun xas_lock(&xas);
545*4882a593Smuzhiyun XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
546*4882a593Smuzhiyun xas_unlock(&xas);
547*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
548*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
549*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, max) != NULL);
550*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun xa_erase_index(xa, min);
553*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
check_multi_store_2(struct xarray * xa,unsigned long index,unsigned int order)556*4882a593Smuzhiyun static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
557*4882a593Smuzhiyun unsigned int order)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun XA_STATE(xas, xa, index);
560*4882a593Smuzhiyun xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun xas_lock(&xas);
563*4882a593Smuzhiyun XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
564*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_index != index);
565*4882a593Smuzhiyun XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
566*4882a593Smuzhiyun xas_unlock(&xas);
567*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
check_multi_store_3(struct xarray * xa,unsigned long index,unsigned int order)570*4882a593Smuzhiyun static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
571*4882a593Smuzhiyun unsigned int order)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
574*4882a593Smuzhiyun void *entry;
575*4882a593Smuzhiyun int n = 0;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun xas_lock(&xas);
580*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX) {
581*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(index));
582*4882a593Smuzhiyun n++;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun XA_BUG_ON(xa, n != 1);
585*4882a593Smuzhiyun xas_set(&xas, index + 1);
586*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX) {
587*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(index));
588*4882a593Smuzhiyun n++;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun XA_BUG_ON(xa, n != 2);
591*4882a593Smuzhiyun xas_unlock(&xas);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun xa_destroy(xa);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun #endif
596*4882a593Smuzhiyun
check_multi_store(struct xarray * xa)597*4882a593Smuzhiyun static noinline void check_multi_store(struct xarray *xa)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
600*4882a593Smuzhiyun unsigned long i, j, k;
601*4882a593Smuzhiyun unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* Loading from any position returns the same value */
604*4882a593Smuzhiyun xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
605*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
606*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
607*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
608*4882a593Smuzhiyun rcu_read_lock();
609*4882a593Smuzhiyun XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
610*4882a593Smuzhiyun XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
611*4882a593Smuzhiyun rcu_read_unlock();
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* Storing adjacent to the value does not alter the value */
614*4882a593Smuzhiyun xa_store(xa, 3, xa, GFP_KERNEL);
615*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
616*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
617*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
618*4882a593Smuzhiyun rcu_read_lock();
619*4882a593Smuzhiyun XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
620*4882a593Smuzhiyun XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
621*4882a593Smuzhiyun rcu_read_unlock();
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* Overwriting multiple indexes works */
624*4882a593Smuzhiyun xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
625*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
626*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
627*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
628*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
629*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
630*4882a593Smuzhiyun rcu_read_lock();
631*4882a593Smuzhiyun XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
632*4882a593Smuzhiyun XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
633*4882a593Smuzhiyun rcu_read_unlock();
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /* We can erase multiple values with a single store */
636*4882a593Smuzhiyun xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
637*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun /* Even when the first slot is empty but the others aren't */
640*4882a593Smuzhiyun xa_store_index(xa, 1, GFP_KERNEL);
641*4882a593Smuzhiyun xa_store_index(xa, 2, GFP_KERNEL);
642*4882a593Smuzhiyun xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
643*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun for (i = 0; i < max_order; i++) {
646*4882a593Smuzhiyun for (j = 0; j < max_order; j++) {
647*4882a593Smuzhiyun xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
648*4882a593Smuzhiyun xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun for (k = 0; k < max_order; k++) {
651*4882a593Smuzhiyun void *entry = xa_load(xa, (1UL << k) - 1);
652*4882a593Smuzhiyun if ((i < k) && (j < k))
653*4882a593Smuzhiyun XA_BUG_ON(xa, entry != NULL);
654*4882a593Smuzhiyun else
655*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(j));
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun xa_erase(xa, 0);
659*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun for (i = 0; i < 20; i++) {
664*4882a593Smuzhiyun check_multi_store_1(xa, 200, i);
665*4882a593Smuzhiyun check_multi_store_1(xa, 0, i);
666*4882a593Smuzhiyun check_multi_store_1(xa, (1UL << i) + 1, i);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun check_multi_store_2(xa, 4095, 9);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun for (i = 1; i < 20; i++) {
671*4882a593Smuzhiyun check_multi_store_3(xa, 0, i);
672*4882a593Smuzhiyun check_multi_store_3(xa, 1UL << i, i);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun #endif
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
check_xa_alloc_1(struct xarray * xa,unsigned int base)677*4882a593Smuzhiyun static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun int i;
680*4882a593Smuzhiyun u32 id;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
683*4882a593Smuzhiyun /* An empty array should assign %base to the first alloc */
684*4882a593Smuzhiyun xa_alloc_index(xa, base, GFP_KERNEL);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /* Erasing it should make the array empty again */
687*4882a593Smuzhiyun xa_erase_index(xa, base);
688*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /* And it should assign %base again */
691*4882a593Smuzhiyun xa_alloc_index(xa, base, GFP_KERNEL);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* Allocating and then erasing a lot should not lose base */
694*4882a593Smuzhiyun for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
695*4882a593Smuzhiyun xa_alloc_index(xa, i, GFP_KERNEL);
696*4882a593Smuzhiyun for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
697*4882a593Smuzhiyun xa_erase_index(xa, i);
698*4882a593Smuzhiyun xa_alloc_index(xa, base, GFP_KERNEL);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /* Destroying the array should do the same as erasing */
701*4882a593Smuzhiyun xa_destroy(xa);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /* And it should assign %base again */
704*4882a593Smuzhiyun xa_alloc_index(xa, base, GFP_KERNEL);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /* The next assigned ID should be base+1 */
707*4882a593Smuzhiyun xa_alloc_index(xa, base + 1, GFP_KERNEL);
708*4882a593Smuzhiyun xa_erase_index(xa, base + 1);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /* Storing a value should mark it used */
711*4882a593Smuzhiyun xa_store_index(xa, base + 1, GFP_KERNEL);
712*4882a593Smuzhiyun xa_alloc_index(xa, base + 2, GFP_KERNEL);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* If we then erase base, it should be free */
715*4882a593Smuzhiyun xa_erase_index(xa, base);
716*4882a593Smuzhiyun xa_alloc_index(xa, base, GFP_KERNEL);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun xa_erase_index(xa, base + 1);
719*4882a593Smuzhiyun xa_erase_index(xa, base + 2);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun for (i = 1; i < 5000; i++) {
722*4882a593Smuzhiyun xa_alloc_index(xa, base + i, GFP_KERNEL);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun xa_destroy(xa);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun /* Check that we fail properly at the limit of allocation */
728*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
729*4882a593Smuzhiyun XA_LIMIT(UINT_MAX - 1, UINT_MAX),
730*4882a593Smuzhiyun GFP_KERNEL) != 0);
731*4882a593Smuzhiyun XA_BUG_ON(xa, id != 0xfffffffeU);
732*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
733*4882a593Smuzhiyun XA_LIMIT(UINT_MAX - 1, UINT_MAX),
734*4882a593Smuzhiyun GFP_KERNEL) != 0);
735*4882a593Smuzhiyun XA_BUG_ON(xa, id != 0xffffffffU);
736*4882a593Smuzhiyun id = 3;
737*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
738*4882a593Smuzhiyun XA_LIMIT(UINT_MAX - 1, UINT_MAX),
739*4882a593Smuzhiyun GFP_KERNEL) != -EBUSY);
740*4882a593Smuzhiyun XA_BUG_ON(xa, id != 3);
741*4882a593Smuzhiyun xa_destroy(xa);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
744*4882a593Smuzhiyun GFP_KERNEL) != -EBUSY);
745*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
746*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
747*4882a593Smuzhiyun GFP_KERNEL) != -EBUSY);
748*4882a593Smuzhiyun xa_erase_index(xa, 3);
749*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
check_xa_alloc_2(struct xarray * xa,unsigned int base)752*4882a593Smuzhiyun static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun unsigned int i, id;
755*4882a593Smuzhiyun unsigned long index;
756*4882a593Smuzhiyun void *entry;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /* Allocate and free a NULL and check xa_empty() behaves */
759*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
760*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
761*4882a593Smuzhiyun XA_BUG_ON(xa, id != base);
762*4882a593Smuzhiyun XA_BUG_ON(xa, xa_empty(xa));
763*4882a593Smuzhiyun XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
764*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /* Ditto, but check destroy instead of erase */
767*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
768*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
769*4882a593Smuzhiyun XA_BUG_ON(xa, id != base);
770*4882a593Smuzhiyun XA_BUG_ON(xa, xa_empty(xa));
771*4882a593Smuzhiyun xa_destroy(xa);
772*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun for (i = base; i < base + 10; i++) {
775*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
776*4882a593Smuzhiyun GFP_KERNEL) != 0);
777*4882a593Smuzhiyun XA_BUG_ON(xa, id != i);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
781*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
782*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
783*4882a593Smuzhiyun XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
784*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
785*4882a593Smuzhiyun XA_BUG_ON(xa, id != 5);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun xa_for_each(xa, index, entry) {
788*4882a593Smuzhiyun xa_erase_index(xa, index);
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun for (i = base; i < base + 9; i++) {
792*4882a593Smuzhiyun XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
793*4882a593Smuzhiyun XA_BUG_ON(xa, xa_empty(xa));
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
796*4882a593Smuzhiyun XA_BUG_ON(xa, xa_empty(xa));
797*4882a593Smuzhiyun XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
798*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun xa_destroy(xa);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
check_xa_alloc_3(struct xarray * xa,unsigned int base)803*4882a593Smuzhiyun static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun struct xa_limit limit = XA_LIMIT(1, 0x3fff);
806*4882a593Smuzhiyun u32 next = 0;
807*4882a593Smuzhiyun unsigned int i, id;
808*4882a593Smuzhiyun unsigned long index;
809*4882a593Smuzhiyun void *entry;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
812*4882a593Smuzhiyun &next, GFP_KERNEL) != 0);
813*4882a593Smuzhiyun XA_BUG_ON(xa, id != 1);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun next = 0x3ffd;
816*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
817*4882a593Smuzhiyun &next, GFP_KERNEL) != 0);
818*4882a593Smuzhiyun XA_BUG_ON(xa, id != 0x3ffd);
819*4882a593Smuzhiyun xa_erase_index(xa, 0x3ffd);
820*4882a593Smuzhiyun xa_erase_index(xa, 1);
821*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun for (i = 0x3ffe; i < 0x4003; i++) {
824*4882a593Smuzhiyun if (i < 0x4000)
825*4882a593Smuzhiyun entry = xa_mk_index(i);
826*4882a593Smuzhiyun else
827*4882a593Smuzhiyun entry = xa_mk_index(i - 0x3fff);
828*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
829*4882a593Smuzhiyun &next, GFP_KERNEL) != (id == 1));
830*4882a593Smuzhiyun XA_BUG_ON(xa, xa_mk_index(id) != entry);
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /* Check wrap-around is handled correctly */
834*4882a593Smuzhiyun if (base != 0)
835*4882a593Smuzhiyun xa_erase_index(xa, base);
836*4882a593Smuzhiyun xa_erase_index(xa, base + 1);
837*4882a593Smuzhiyun next = UINT_MAX;
838*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
839*4882a593Smuzhiyun xa_limit_32b, &next, GFP_KERNEL) != 0);
840*4882a593Smuzhiyun XA_BUG_ON(xa, id != UINT_MAX);
841*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
842*4882a593Smuzhiyun xa_limit_32b, &next, GFP_KERNEL) != 1);
843*4882a593Smuzhiyun XA_BUG_ON(xa, id != base);
844*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
845*4882a593Smuzhiyun xa_limit_32b, &next, GFP_KERNEL) != 0);
846*4882a593Smuzhiyun XA_BUG_ON(xa, id != base + 1);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun xa_for_each(xa, index, entry)
849*4882a593Smuzhiyun xa_erase_index(xa, index);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun static DEFINE_XARRAY_ALLOC(xa0);
855*4882a593Smuzhiyun static DEFINE_XARRAY_ALLOC1(xa1);
856*4882a593Smuzhiyun
check_xa_alloc(void)857*4882a593Smuzhiyun static noinline void check_xa_alloc(void)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun check_xa_alloc_1(&xa0, 0);
860*4882a593Smuzhiyun check_xa_alloc_1(&xa1, 1);
861*4882a593Smuzhiyun check_xa_alloc_2(&xa0, 0);
862*4882a593Smuzhiyun check_xa_alloc_2(&xa1, 1);
863*4882a593Smuzhiyun check_xa_alloc_3(&xa0, 0);
864*4882a593Smuzhiyun check_xa_alloc_3(&xa1, 1);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
__check_store_iter(struct xarray * xa,unsigned long start,unsigned int order,unsigned int present)867*4882a593Smuzhiyun static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
868*4882a593Smuzhiyun unsigned int order, unsigned int present)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun XA_STATE_ORDER(xas, xa, start, order);
871*4882a593Smuzhiyun void *entry;
872*4882a593Smuzhiyun unsigned int count = 0;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun retry:
875*4882a593Smuzhiyun xas_lock(&xas);
876*4882a593Smuzhiyun xas_for_each_conflict(&xas, entry) {
877*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_is_value(entry));
878*4882a593Smuzhiyun XA_BUG_ON(xa, entry < xa_mk_index(start));
879*4882a593Smuzhiyun XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
880*4882a593Smuzhiyun count++;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun xas_store(&xas, xa_mk_index(start));
883*4882a593Smuzhiyun xas_unlock(&xas);
884*4882a593Smuzhiyun if (xas_nomem(&xas, GFP_KERNEL)) {
885*4882a593Smuzhiyun count = 0;
886*4882a593Smuzhiyun goto retry;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun XA_BUG_ON(xa, xas_error(&xas));
889*4882a593Smuzhiyun XA_BUG_ON(xa, count != present);
890*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
891*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
892*4882a593Smuzhiyun xa_mk_index(start));
893*4882a593Smuzhiyun xa_erase_index(xa, start);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
check_store_iter(struct xarray * xa)896*4882a593Smuzhiyun static noinline void check_store_iter(struct xarray *xa)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun unsigned int i, j;
899*4882a593Smuzhiyun unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun for (i = 0; i < max_order; i++) {
902*4882a593Smuzhiyun unsigned int min = 1 << i;
903*4882a593Smuzhiyun unsigned int max = (2 << i) - 1;
904*4882a593Smuzhiyun __check_store_iter(xa, 0, i, 0);
905*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
906*4882a593Smuzhiyun __check_store_iter(xa, min, i, 0);
907*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun xa_store_index(xa, min, GFP_KERNEL);
910*4882a593Smuzhiyun __check_store_iter(xa, min, i, 1);
911*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
912*4882a593Smuzhiyun xa_store_index(xa, max, GFP_KERNEL);
913*4882a593Smuzhiyun __check_store_iter(xa, min, i, 1);
914*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun for (j = 0; j < min; j++)
917*4882a593Smuzhiyun xa_store_index(xa, j, GFP_KERNEL);
918*4882a593Smuzhiyun __check_store_iter(xa, 0, i, min);
919*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
920*4882a593Smuzhiyun for (j = 0; j < min; j++)
921*4882a593Smuzhiyun xa_store_index(xa, min + j, GFP_KERNEL);
922*4882a593Smuzhiyun __check_store_iter(xa, min, i, min);
923*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
926*4882a593Smuzhiyun xa_store_index(xa, 63, GFP_KERNEL);
927*4882a593Smuzhiyun xa_store_index(xa, 65, GFP_KERNEL);
928*4882a593Smuzhiyun __check_store_iter(xa, 64, 2, 1);
929*4882a593Smuzhiyun xa_erase_index(xa, 63);
930*4882a593Smuzhiyun #endif
931*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
check_multi_find_1(struct xarray * xa,unsigned order)934*4882a593Smuzhiyun static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
937*4882a593Smuzhiyun unsigned long multi = 3 << order;
938*4882a593Smuzhiyun unsigned long next = 4 << order;
939*4882a593Smuzhiyun unsigned long index;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
942*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
943*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun index = 0;
946*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
947*4882a593Smuzhiyun xa_mk_value(multi));
948*4882a593Smuzhiyun XA_BUG_ON(xa, index != multi);
949*4882a593Smuzhiyun index = multi + 1;
950*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
951*4882a593Smuzhiyun xa_mk_value(multi));
952*4882a593Smuzhiyun XA_BUG_ON(xa, (index < multi) || (index >= next));
953*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
954*4882a593Smuzhiyun xa_mk_value(next));
955*4882a593Smuzhiyun XA_BUG_ON(xa, index != next);
956*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
957*4882a593Smuzhiyun XA_BUG_ON(xa, index != next);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun xa_erase_index(xa, multi);
960*4882a593Smuzhiyun xa_erase_index(xa, next);
961*4882a593Smuzhiyun xa_erase_index(xa, next + 1);
962*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
963*4882a593Smuzhiyun #endif
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
check_multi_find_2(struct xarray * xa)966*4882a593Smuzhiyun static noinline void check_multi_find_2(struct xarray *xa)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
969*4882a593Smuzhiyun unsigned int i, j;
970*4882a593Smuzhiyun void *entry;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun for (i = 0; i < max_order; i++) {
973*4882a593Smuzhiyun unsigned long index = 1UL << i;
974*4882a593Smuzhiyun for (j = 0; j < index; j++) {
975*4882a593Smuzhiyun XA_STATE(xas, xa, j + index);
976*4882a593Smuzhiyun xa_store_index(xa, index - 1, GFP_KERNEL);
977*4882a593Smuzhiyun xa_store_order(xa, index, i, xa_mk_index(index),
978*4882a593Smuzhiyun GFP_KERNEL);
979*4882a593Smuzhiyun rcu_read_lock();
980*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX) {
981*4882a593Smuzhiyun xa_erase_index(xa, index);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun rcu_read_unlock();
984*4882a593Smuzhiyun xa_erase_index(xa, index - 1);
985*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
check_multi_find_3(struct xarray * xa)990*4882a593Smuzhiyun static noinline void check_multi_find_3(struct xarray *xa)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun unsigned int order;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun for (order = 5; order < order_limit; order++) {
995*4882a593Smuzhiyun unsigned long index = 1UL << (order - 5);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
998*4882a593Smuzhiyun xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
999*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
1000*4882a593Smuzhiyun xa_erase_index(xa, 0);
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
check_find_1(struct xarray * xa)1004*4882a593Smuzhiyun static noinline void check_find_1(struct xarray *xa)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun unsigned long i, j, k;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun /*
1011*4882a593Smuzhiyun * Check xa_find with all pairs between 0 and 99 inclusive,
1012*4882a593Smuzhiyun * starting at every index between 0 and 99
1013*4882a593Smuzhiyun */
1014*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
1015*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
1016*4882a593Smuzhiyun xa_set_mark(xa, i, XA_MARK_0);
1017*4882a593Smuzhiyun for (j = 0; j < i; j++) {
1018*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
1019*4882a593Smuzhiyun NULL);
1020*4882a593Smuzhiyun xa_set_mark(xa, j, XA_MARK_0);
1021*4882a593Smuzhiyun for (k = 0; k < 100; k++) {
1022*4882a593Smuzhiyun unsigned long index = k;
1023*4882a593Smuzhiyun void *entry = xa_find(xa, &index, ULONG_MAX,
1024*4882a593Smuzhiyun XA_PRESENT);
1025*4882a593Smuzhiyun if (k <= j)
1026*4882a593Smuzhiyun XA_BUG_ON(xa, index != j);
1027*4882a593Smuzhiyun else if (k <= i)
1028*4882a593Smuzhiyun XA_BUG_ON(xa, index != i);
1029*4882a593Smuzhiyun else
1030*4882a593Smuzhiyun XA_BUG_ON(xa, entry != NULL);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun index = k;
1033*4882a593Smuzhiyun entry = xa_find(xa, &index, ULONG_MAX,
1034*4882a593Smuzhiyun XA_MARK_0);
1035*4882a593Smuzhiyun if (k <= j)
1036*4882a593Smuzhiyun XA_BUG_ON(xa, index != j);
1037*4882a593Smuzhiyun else if (k <= i)
1038*4882a593Smuzhiyun XA_BUG_ON(xa, index != i);
1039*4882a593Smuzhiyun else
1040*4882a593Smuzhiyun XA_BUG_ON(xa, entry != NULL);
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun xa_erase_index(xa, j);
1043*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
1044*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun xa_erase_index(xa, i);
1047*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
check_find_2(struct xarray * xa)1052*4882a593Smuzhiyun static noinline void check_find_2(struct xarray *xa)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun void *entry;
1055*4882a593Smuzhiyun unsigned long i, j, index;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun xa_for_each(xa, index, entry) {
1058*4882a593Smuzhiyun XA_BUG_ON(xa, true);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun for (i = 0; i < 1024; i++) {
1062*4882a593Smuzhiyun xa_store_index(xa, index, GFP_KERNEL);
1063*4882a593Smuzhiyun j = 0;
1064*4882a593Smuzhiyun xa_for_each(xa, index, entry) {
1065*4882a593Smuzhiyun XA_BUG_ON(xa, xa_mk_index(index) != entry);
1066*4882a593Smuzhiyun XA_BUG_ON(xa, index != j++);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun xa_destroy(xa);
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
check_find_3(struct xarray * xa)1073*4882a593Smuzhiyun static noinline void check_find_3(struct xarray *xa)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
1076*4882a593Smuzhiyun unsigned long i, j, k;
1077*4882a593Smuzhiyun void *entry;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
1080*4882a593Smuzhiyun for (j = 0; j < 100; j++) {
1081*4882a593Smuzhiyun rcu_read_lock();
1082*4882a593Smuzhiyun for (k = 0; k < 100; k++) {
1083*4882a593Smuzhiyun xas_set(&xas, j);
1084*4882a593Smuzhiyun xas_for_each_marked(&xas, entry, k, XA_MARK_0)
1085*4882a593Smuzhiyun ;
1086*4882a593Smuzhiyun if (j > k)
1087*4882a593Smuzhiyun XA_BUG_ON(xa,
1088*4882a593Smuzhiyun xas.xa_node != XAS_RESTART);
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun rcu_read_unlock();
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun xa_store_index(xa, i, GFP_KERNEL);
1093*4882a593Smuzhiyun xa_set_mark(xa, i, XA_MARK_0);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun xa_destroy(xa);
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
check_find_4(struct xarray * xa)1098*4882a593Smuzhiyun static noinline void check_find_4(struct xarray *xa)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun unsigned long index = 0;
1101*4882a593Smuzhiyun void *entry;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1106*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1109*4882a593Smuzhiyun XA_BUG_ON(xa, entry);
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun xa_erase_index(xa, ULONG_MAX);
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
check_find(struct xarray * xa)1114*4882a593Smuzhiyun static noinline void check_find(struct xarray *xa)
1115*4882a593Smuzhiyun {
1116*4882a593Smuzhiyun unsigned i;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun check_find_1(xa);
1119*4882a593Smuzhiyun check_find_2(xa);
1120*4882a593Smuzhiyun check_find_3(xa);
1121*4882a593Smuzhiyun check_find_4(xa);
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun for (i = 2; i < 10; i++)
1124*4882a593Smuzhiyun check_multi_find_1(xa, i);
1125*4882a593Smuzhiyun check_multi_find_2(xa);
1126*4882a593Smuzhiyun check_multi_find_3(xa);
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun /* See find_swap_entry() in mm/shmem.c */
xa_find_entry(struct xarray * xa,void * item)1130*4882a593Smuzhiyun static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
1133*4882a593Smuzhiyun unsigned int checked = 0;
1134*4882a593Smuzhiyun void *entry;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun rcu_read_lock();
1137*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX) {
1138*4882a593Smuzhiyun if (xas_retry(&xas, entry))
1139*4882a593Smuzhiyun continue;
1140*4882a593Smuzhiyun if (entry == item)
1141*4882a593Smuzhiyun break;
1142*4882a593Smuzhiyun checked++;
1143*4882a593Smuzhiyun if ((checked % 4) != 0)
1144*4882a593Smuzhiyun continue;
1145*4882a593Smuzhiyun xas_pause(&xas);
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun rcu_read_unlock();
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun return entry ? xas.xa_index : -1;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
check_find_entry(struct xarray * xa)1152*4882a593Smuzhiyun static noinline void check_find_entry(struct xarray *xa)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
1155*4882a593Smuzhiyun unsigned int order;
1156*4882a593Smuzhiyun unsigned long offset, index;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun for (order = 0; order < 20; order++) {
1159*4882a593Smuzhiyun for (offset = 0; offset < (1UL << (order + 3));
1160*4882a593Smuzhiyun offset += (1UL << order)) {
1161*4882a593Smuzhiyun for (index = 0; index < (1UL << (order + 5));
1162*4882a593Smuzhiyun index += (1UL << order)) {
1163*4882a593Smuzhiyun xa_store_order(xa, index, order,
1164*4882a593Smuzhiyun xa_mk_index(index), GFP_KERNEL);
1165*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, index) !=
1166*4882a593Smuzhiyun xa_mk_index(index));
1167*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find_entry(xa,
1168*4882a593Smuzhiyun xa_mk_index(index)) != index);
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1171*4882a593Smuzhiyun xa_destroy(xa);
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun #endif
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1177*4882a593Smuzhiyun xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1178*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1179*4882a593Smuzhiyun XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
1180*4882a593Smuzhiyun xa_erase_index(xa, ULONG_MAX);
1181*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
check_pause(struct xarray * xa)1184*4882a593Smuzhiyun static noinline void check_pause(struct xarray *xa)
1185*4882a593Smuzhiyun {
1186*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
1187*4882a593Smuzhiyun void *entry;
1188*4882a593Smuzhiyun unsigned int order;
1189*4882a593Smuzhiyun unsigned long index = 1;
1190*4882a593Smuzhiyun unsigned int count = 0;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun for (order = 0; order < order_limit; order++) {
1193*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_order(xa, index, order,
1194*4882a593Smuzhiyun xa_mk_index(index), GFP_KERNEL));
1195*4882a593Smuzhiyun index += 1UL << order;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun rcu_read_lock();
1199*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX) {
1200*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1201*4882a593Smuzhiyun count++;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun rcu_read_unlock();
1204*4882a593Smuzhiyun XA_BUG_ON(xa, count != order_limit);
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun count = 0;
1207*4882a593Smuzhiyun xas_set(&xas, 0);
1208*4882a593Smuzhiyun rcu_read_lock();
1209*4882a593Smuzhiyun xas_for_each(&xas, entry, ULONG_MAX) {
1210*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1211*4882a593Smuzhiyun count++;
1212*4882a593Smuzhiyun xas_pause(&xas);
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun rcu_read_unlock();
1215*4882a593Smuzhiyun XA_BUG_ON(xa, count != order_limit);
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun xa_destroy(xa);
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
check_move_tiny(struct xarray * xa)1220*4882a593Smuzhiyun static noinline void check_move_tiny(struct xarray *xa)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1225*4882a593Smuzhiyun rcu_read_lock();
1226*4882a593Smuzhiyun XA_BUG_ON(xa, xas_next(&xas) != NULL);
1227*4882a593Smuzhiyun XA_BUG_ON(xa, xas_next(&xas) != NULL);
1228*4882a593Smuzhiyun rcu_read_unlock();
1229*4882a593Smuzhiyun xa_store_index(xa, 0, GFP_KERNEL);
1230*4882a593Smuzhiyun rcu_read_lock();
1231*4882a593Smuzhiyun xas_set(&xas, 0);
1232*4882a593Smuzhiyun XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
1233*4882a593Smuzhiyun XA_BUG_ON(xa, xas_next(&xas) != NULL);
1234*4882a593Smuzhiyun xas_set(&xas, 0);
1235*4882a593Smuzhiyun XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
1236*4882a593Smuzhiyun XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1237*4882a593Smuzhiyun rcu_read_unlock();
1238*4882a593Smuzhiyun xa_erase_index(xa, 0);
1239*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
check_move_max(struct xarray * xa)1242*4882a593Smuzhiyun static noinline void check_move_max(struct xarray *xa)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1247*4882a593Smuzhiyun rcu_read_lock();
1248*4882a593Smuzhiyun XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
1249*4882a593Smuzhiyun XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
1250*4882a593Smuzhiyun rcu_read_unlock();
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun xas_set(&xas, 0);
1253*4882a593Smuzhiyun rcu_read_lock();
1254*4882a593Smuzhiyun XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
1255*4882a593Smuzhiyun xas_pause(&xas);
1256*4882a593Smuzhiyun XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
1257*4882a593Smuzhiyun rcu_read_unlock();
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun xa_erase_index(xa, ULONG_MAX);
1260*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
check_move_small(struct xarray * xa,unsigned long idx)1263*4882a593Smuzhiyun static noinline void check_move_small(struct xarray *xa, unsigned long idx)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun XA_STATE(xas, xa, 0);
1266*4882a593Smuzhiyun unsigned long i;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun xa_store_index(xa, 0, GFP_KERNEL);
1269*4882a593Smuzhiyun xa_store_index(xa, idx, GFP_KERNEL);
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun rcu_read_lock();
1272*4882a593Smuzhiyun for (i = 0; i < idx * 4; i++) {
1273*4882a593Smuzhiyun void *entry = xas_next(&xas);
1274*4882a593Smuzhiyun if (i <= idx)
1275*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1276*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_index != i);
1277*4882a593Smuzhiyun if (i == 0 || i == idx)
1278*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(i));
1279*4882a593Smuzhiyun else
1280*4882a593Smuzhiyun XA_BUG_ON(xa, entry != NULL);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun xas_next(&xas);
1283*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_index != i);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun do {
1286*4882a593Smuzhiyun void *entry = xas_prev(&xas);
1287*4882a593Smuzhiyun i--;
1288*4882a593Smuzhiyun if (i <= idx)
1289*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1290*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_index != i);
1291*4882a593Smuzhiyun if (i == 0 || i == idx)
1292*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(i));
1293*4882a593Smuzhiyun else
1294*4882a593Smuzhiyun XA_BUG_ON(xa, entry != NULL);
1295*4882a593Smuzhiyun } while (i > 0);
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun xas_set(&xas, ULONG_MAX);
1298*4882a593Smuzhiyun XA_BUG_ON(xa, xas_next(&xas) != NULL);
1299*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1300*4882a593Smuzhiyun XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
1301*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_index != 0);
1302*4882a593Smuzhiyun XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1303*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1304*4882a593Smuzhiyun rcu_read_unlock();
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun xa_erase_index(xa, 0);
1307*4882a593Smuzhiyun xa_erase_index(xa, idx);
1308*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun
check_move(struct xarray * xa)1311*4882a593Smuzhiyun static noinline void check_move(struct xarray *xa)
1312*4882a593Smuzhiyun {
1313*4882a593Smuzhiyun XA_STATE(xas, xa, (1 << 16) - 1);
1314*4882a593Smuzhiyun unsigned long i;
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun for (i = 0; i < (1 << 16); i++)
1317*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun rcu_read_lock();
1320*4882a593Smuzhiyun do {
1321*4882a593Smuzhiyun void *entry = xas_prev(&xas);
1322*4882a593Smuzhiyun i--;
1323*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(i));
1324*4882a593Smuzhiyun XA_BUG_ON(xa, i != xas.xa_index);
1325*4882a593Smuzhiyun } while (i != 0);
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1328*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun do {
1331*4882a593Smuzhiyun void *entry = xas_next(&xas);
1332*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(i));
1333*4882a593Smuzhiyun XA_BUG_ON(xa, i != xas.xa_index);
1334*4882a593Smuzhiyun i++;
1335*4882a593Smuzhiyun } while (i < (1 << 16));
1336*4882a593Smuzhiyun rcu_read_unlock();
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun for (i = (1 << 8); i < (1 << 15); i++)
1339*4882a593Smuzhiyun xa_erase_index(xa, i);
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun i = xas.xa_index;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun rcu_read_lock();
1344*4882a593Smuzhiyun do {
1345*4882a593Smuzhiyun void *entry = xas_prev(&xas);
1346*4882a593Smuzhiyun i--;
1347*4882a593Smuzhiyun if ((i < (1 << 8)) || (i >= (1 << 15)))
1348*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(i));
1349*4882a593Smuzhiyun else
1350*4882a593Smuzhiyun XA_BUG_ON(xa, entry != NULL);
1351*4882a593Smuzhiyun XA_BUG_ON(xa, i != xas.xa_index);
1352*4882a593Smuzhiyun } while (i != 0);
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1355*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun do {
1358*4882a593Smuzhiyun void *entry = xas_next(&xas);
1359*4882a593Smuzhiyun if ((i < (1 << 8)) || (i >= (1 << 15)))
1360*4882a593Smuzhiyun XA_BUG_ON(xa, entry != xa_mk_index(i));
1361*4882a593Smuzhiyun else
1362*4882a593Smuzhiyun XA_BUG_ON(xa, entry != NULL);
1363*4882a593Smuzhiyun XA_BUG_ON(xa, i != xas.xa_index);
1364*4882a593Smuzhiyun i++;
1365*4882a593Smuzhiyun } while (i < (1 << 16));
1366*4882a593Smuzhiyun rcu_read_unlock();
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun xa_destroy(xa);
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun check_move_tiny(xa);
1371*4882a593Smuzhiyun check_move_max(xa);
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun for (i = 0; i < 16; i++)
1374*4882a593Smuzhiyun check_move_small(xa, 1UL << i);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun for (i = 2; i < 16; i++)
1377*4882a593Smuzhiyun check_move_small(xa, (1UL << i) - 1);
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
xa_store_many_order(struct xarray * xa,unsigned long index,unsigned order)1380*4882a593Smuzhiyun static noinline void xa_store_many_order(struct xarray *xa,
1381*4882a593Smuzhiyun unsigned long index, unsigned order)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun XA_STATE_ORDER(xas, xa, index, order);
1384*4882a593Smuzhiyun unsigned int i = 0;
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun do {
1387*4882a593Smuzhiyun xas_lock(&xas);
1388*4882a593Smuzhiyun XA_BUG_ON(xa, xas_find_conflict(&xas));
1389*4882a593Smuzhiyun xas_create_range(&xas);
1390*4882a593Smuzhiyun if (xas_error(&xas))
1391*4882a593Smuzhiyun goto unlock;
1392*4882a593Smuzhiyun for (i = 0; i < (1U << order); i++) {
1393*4882a593Smuzhiyun XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
1394*4882a593Smuzhiyun xas_next(&xas);
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun unlock:
1397*4882a593Smuzhiyun xas_unlock(&xas);
1398*4882a593Smuzhiyun } while (xas_nomem(&xas, GFP_KERNEL));
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun XA_BUG_ON(xa, xas_error(&xas));
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun
check_create_range_1(struct xarray * xa,unsigned long index,unsigned order)1403*4882a593Smuzhiyun static noinline void check_create_range_1(struct xarray *xa,
1404*4882a593Smuzhiyun unsigned long index, unsigned order)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun unsigned long i;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun xa_store_many_order(xa, index, order);
1409*4882a593Smuzhiyun for (i = index; i < index + (1UL << order); i++)
1410*4882a593Smuzhiyun xa_erase_index(xa, i);
1411*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
check_create_range_2(struct xarray * xa,unsigned order)1414*4882a593Smuzhiyun static noinline void check_create_range_2(struct xarray *xa, unsigned order)
1415*4882a593Smuzhiyun {
1416*4882a593Smuzhiyun unsigned long i;
1417*4882a593Smuzhiyun unsigned long nr = 1UL << order;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun for (i = 0; i < nr * nr; i += nr)
1420*4882a593Smuzhiyun xa_store_many_order(xa, i, order);
1421*4882a593Smuzhiyun for (i = 0; i < nr * nr; i++)
1422*4882a593Smuzhiyun xa_erase_index(xa, i);
1423*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun
check_create_range_3(void)1426*4882a593Smuzhiyun static noinline void check_create_range_3(void)
1427*4882a593Smuzhiyun {
1428*4882a593Smuzhiyun XA_STATE(xas, NULL, 0);
1429*4882a593Smuzhiyun xas_set_err(&xas, -EEXIST);
1430*4882a593Smuzhiyun xas_create_range(&xas);
1431*4882a593Smuzhiyun XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
check_create_range_4(struct xarray * xa,unsigned long index,unsigned order)1434*4882a593Smuzhiyun static noinline void check_create_range_4(struct xarray *xa,
1435*4882a593Smuzhiyun unsigned long index, unsigned order)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun XA_STATE_ORDER(xas, xa, index, order);
1438*4882a593Smuzhiyun unsigned long base = xas.xa_index;
1439*4882a593Smuzhiyun unsigned long i = 0;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun xa_store_index(xa, index, GFP_KERNEL);
1442*4882a593Smuzhiyun do {
1443*4882a593Smuzhiyun xas_lock(&xas);
1444*4882a593Smuzhiyun xas_create_range(&xas);
1445*4882a593Smuzhiyun if (xas_error(&xas))
1446*4882a593Smuzhiyun goto unlock;
1447*4882a593Smuzhiyun for (i = 0; i < (1UL << order); i++) {
1448*4882a593Smuzhiyun void *old = xas_store(&xas, xa_mk_index(base + i));
1449*4882a593Smuzhiyun if (xas.xa_index == index)
1450*4882a593Smuzhiyun XA_BUG_ON(xa, old != xa_mk_index(base + i));
1451*4882a593Smuzhiyun else
1452*4882a593Smuzhiyun XA_BUG_ON(xa, old != NULL);
1453*4882a593Smuzhiyun xas_next(&xas);
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun unlock:
1456*4882a593Smuzhiyun xas_unlock(&xas);
1457*4882a593Smuzhiyun } while (xas_nomem(&xas, GFP_KERNEL));
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun XA_BUG_ON(xa, xas_error(&xas));
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun for (i = base; i < base + (1UL << order); i++)
1462*4882a593Smuzhiyun xa_erase_index(xa, i);
1463*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
check_create_range_5(struct xarray * xa,unsigned long index,unsigned int order)1466*4882a593Smuzhiyun static noinline void check_create_range_5(struct xarray *xa,
1467*4882a593Smuzhiyun unsigned long index, unsigned int order)
1468*4882a593Smuzhiyun {
1469*4882a593Smuzhiyun XA_STATE_ORDER(xas, xa, index, order);
1470*4882a593Smuzhiyun unsigned int i;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun for (i = 0; i < order + 10; i++) {
1475*4882a593Smuzhiyun do {
1476*4882a593Smuzhiyun xas_lock(&xas);
1477*4882a593Smuzhiyun xas_create_range(&xas);
1478*4882a593Smuzhiyun xas_unlock(&xas);
1479*4882a593Smuzhiyun } while (xas_nomem(&xas, GFP_KERNEL));
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun xa_destroy(xa);
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun
check_create_range(struct xarray * xa)1485*4882a593Smuzhiyun static noinline void check_create_range(struct xarray *xa)
1486*4882a593Smuzhiyun {
1487*4882a593Smuzhiyun unsigned int order;
1488*4882a593Smuzhiyun unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun for (order = 0; order < max_order; order++) {
1491*4882a593Smuzhiyun check_create_range_1(xa, 0, order);
1492*4882a593Smuzhiyun check_create_range_1(xa, 1U << order, order);
1493*4882a593Smuzhiyun check_create_range_1(xa, 2U << order, order);
1494*4882a593Smuzhiyun check_create_range_1(xa, 3U << order, order);
1495*4882a593Smuzhiyun check_create_range_1(xa, 1U << 24, order);
1496*4882a593Smuzhiyun if (order < 10)
1497*4882a593Smuzhiyun check_create_range_2(xa, order);
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun check_create_range_4(xa, 0, order);
1500*4882a593Smuzhiyun check_create_range_4(xa, 1U << order, order);
1501*4882a593Smuzhiyun check_create_range_4(xa, 2U << order, order);
1502*4882a593Smuzhiyun check_create_range_4(xa, 3U << order, order);
1503*4882a593Smuzhiyun check_create_range_4(xa, 1U << 24, order);
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun check_create_range_4(xa, 1, order);
1506*4882a593Smuzhiyun check_create_range_4(xa, (1U << order) + 1, order);
1507*4882a593Smuzhiyun check_create_range_4(xa, (2U << order) + 1, order);
1508*4882a593Smuzhiyun check_create_range_4(xa, (2U << order) - 1, order);
1509*4882a593Smuzhiyun check_create_range_4(xa, (3U << order) + 1, order);
1510*4882a593Smuzhiyun check_create_range_4(xa, (3U << order) - 1, order);
1511*4882a593Smuzhiyun check_create_range_4(xa, (1U << 24) + 1, order);
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun check_create_range_5(xa, 0, order);
1514*4882a593Smuzhiyun check_create_range_5(xa, (1U << order), order);
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun check_create_range_3();
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun
__check_store_range(struct xarray * xa,unsigned long first,unsigned long last)1520*4882a593Smuzhiyun static noinline void __check_store_range(struct xarray *xa, unsigned long first,
1521*4882a593Smuzhiyun unsigned long last)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
1524*4882a593Smuzhiyun xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1527*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
1528*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
1529*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun xa_store_range(xa, first, last, NULL, GFP_KERNEL);
1532*4882a593Smuzhiyun #endif
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun
check_store_range(struct xarray * xa)1537*4882a593Smuzhiyun static noinline void check_store_range(struct xarray *xa)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun unsigned long i, j;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun for (i = 0; i < 128; i++) {
1542*4882a593Smuzhiyun for (j = i; j < 128; j++) {
1543*4882a593Smuzhiyun __check_store_range(xa, i, j);
1544*4882a593Smuzhiyun __check_store_range(xa, 128 + i, 128 + j);
1545*4882a593Smuzhiyun __check_store_range(xa, 4095 + i, 4095 + j);
1546*4882a593Smuzhiyun __check_store_range(xa, 4096 + i, 4096 + j);
1547*4882a593Smuzhiyun __check_store_range(xa, 123456 + i, 123456 + j);
1548*4882a593Smuzhiyun __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
check_split_1(struct xarray * xa,unsigned long index,unsigned int order,unsigned int new_order)1554*4882a593Smuzhiyun static void check_split_1(struct xarray *xa, unsigned long index,
1555*4882a593Smuzhiyun unsigned int order, unsigned int new_order)
1556*4882a593Smuzhiyun {
1557*4882a593Smuzhiyun XA_STATE_ORDER(xas, xa, index, new_order);
1558*4882a593Smuzhiyun unsigned int i;
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun xa_store_order(xa, index, order, xa, GFP_KERNEL);
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun xas_split_alloc(&xas, xa, order, GFP_KERNEL);
1563*4882a593Smuzhiyun xas_lock(&xas);
1564*4882a593Smuzhiyun xas_split(&xas, xa, order);
1565*4882a593Smuzhiyun for (i = 0; i < (1 << order); i += (1 << new_order))
1566*4882a593Smuzhiyun __xa_store(xa, index + i, xa_mk_index(index + i), 0);
1567*4882a593Smuzhiyun xas_unlock(&xas);
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun for (i = 0; i < (1 << order); i++) {
1570*4882a593Smuzhiyun unsigned int val = index + (i & ~((1 << new_order) - 1));
1571*4882a593Smuzhiyun XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
1572*4882a593Smuzhiyun }
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun xa_set_mark(xa, index, XA_MARK_0);
1575*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun xa_destroy(xa);
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun
check_split(struct xarray * xa)1580*4882a593Smuzhiyun static noinline void check_split(struct xarray *xa)
1581*4882a593Smuzhiyun {
1582*4882a593Smuzhiyun unsigned int order, new_order;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
1587*4882a593Smuzhiyun for (new_order = 0; new_order < order; new_order++) {
1588*4882a593Smuzhiyun check_split_1(xa, 0, order, new_order);
1589*4882a593Smuzhiyun check_split_1(xa, 1UL << order, order, new_order);
1590*4882a593Smuzhiyun check_split_1(xa, 3UL << order, order, new_order);
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun }
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun #else
check_split(struct xarray * xa)1595*4882a593Smuzhiyun static void check_split(struct xarray *xa) { }
1596*4882a593Smuzhiyun #endif
1597*4882a593Smuzhiyun
check_align_1(struct xarray * xa,char * name)1598*4882a593Smuzhiyun static void check_align_1(struct xarray *xa, char *name)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun int i;
1601*4882a593Smuzhiyun unsigned int id;
1602*4882a593Smuzhiyun unsigned long index;
1603*4882a593Smuzhiyun void *entry;
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
1606*4882a593Smuzhiyun XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
1607*4882a593Smuzhiyun GFP_KERNEL) != 0);
1608*4882a593Smuzhiyun XA_BUG_ON(xa, id != i);
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun xa_for_each(xa, index, entry)
1611*4882a593Smuzhiyun XA_BUG_ON(xa, xa_is_err(entry));
1612*4882a593Smuzhiyun xa_destroy(xa);
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun /*
1616*4882a593Smuzhiyun * We should always be able to store without allocating memory after
1617*4882a593Smuzhiyun * reserving a slot.
1618*4882a593Smuzhiyun */
check_align_2(struct xarray * xa,char * name)1619*4882a593Smuzhiyun static void check_align_2(struct xarray *xa, char *name)
1620*4882a593Smuzhiyun {
1621*4882a593Smuzhiyun int i;
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
1626*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
1627*4882a593Smuzhiyun xa_erase(xa, 0);
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
1631*4882a593Smuzhiyun XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
1632*4882a593Smuzhiyun XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
1633*4882a593Smuzhiyun xa_erase(xa, 0);
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun
check_align(struct xarray * xa)1639*4882a593Smuzhiyun static noinline void check_align(struct xarray *xa)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun char name[] = "Motorola 68000";
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun check_align_1(xa, name);
1644*4882a593Smuzhiyun check_align_1(xa, name + 1);
1645*4882a593Smuzhiyun check_align_1(xa, name + 2);
1646*4882a593Smuzhiyun check_align_1(xa, name + 3);
1647*4882a593Smuzhiyun check_align_2(xa, name);
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun static LIST_HEAD(shadow_nodes);
1651*4882a593Smuzhiyun
test_update_node(struct xa_node * node)1652*4882a593Smuzhiyun static void test_update_node(struct xa_node *node)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun if (node->count && node->count == node->nr_values) {
1655*4882a593Smuzhiyun if (list_empty(&node->private_list))
1656*4882a593Smuzhiyun list_add(&shadow_nodes, &node->private_list);
1657*4882a593Smuzhiyun } else {
1658*4882a593Smuzhiyun if (!list_empty(&node->private_list))
1659*4882a593Smuzhiyun list_del_init(&node->private_list);
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun
shadow_remove(struct xarray * xa)1663*4882a593Smuzhiyun static noinline void shadow_remove(struct xarray *xa)
1664*4882a593Smuzhiyun {
1665*4882a593Smuzhiyun struct xa_node *node;
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun xa_lock(xa);
1668*4882a593Smuzhiyun while ((node = list_first_entry_or_null(&shadow_nodes,
1669*4882a593Smuzhiyun struct xa_node, private_list))) {
1670*4882a593Smuzhiyun XA_BUG_ON(xa, node->array != xa);
1671*4882a593Smuzhiyun list_del_init(&node->private_list);
1672*4882a593Smuzhiyun xa_delete_node(node, test_update_node);
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun xa_unlock(xa);
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun
check_workingset(struct xarray * xa,unsigned long index)1677*4882a593Smuzhiyun static noinline void check_workingset(struct xarray *xa, unsigned long index)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun XA_STATE(xas, xa, index);
1680*4882a593Smuzhiyun xas_set_update(&xas, test_update_node);
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun do {
1683*4882a593Smuzhiyun xas_lock(&xas);
1684*4882a593Smuzhiyun xas_store(&xas, xa_mk_value(0));
1685*4882a593Smuzhiyun xas_next(&xas);
1686*4882a593Smuzhiyun xas_store(&xas, xa_mk_value(1));
1687*4882a593Smuzhiyun xas_unlock(&xas);
1688*4882a593Smuzhiyun } while (xas_nomem(&xas, GFP_KERNEL));
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun XA_BUG_ON(xa, list_empty(&shadow_nodes));
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun xas_lock(&xas);
1693*4882a593Smuzhiyun xas_next(&xas);
1694*4882a593Smuzhiyun xas_store(&xas, &xas);
1695*4882a593Smuzhiyun XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun xas_store(&xas, xa_mk_value(2));
1698*4882a593Smuzhiyun xas_unlock(&xas);
1699*4882a593Smuzhiyun XA_BUG_ON(xa, list_empty(&shadow_nodes));
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun shadow_remove(xa);
1702*4882a593Smuzhiyun XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1703*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1704*4882a593Smuzhiyun }
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun /*
1707*4882a593Smuzhiyun * Check that the pointer / value / sibling entries are accounted the
1708*4882a593Smuzhiyun * way we expect them to be.
1709*4882a593Smuzhiyun */
check_account(struct xarray * xa)1710*4882a593Smuzhiyun static noinline void check_account(struct xarray *xa)
1711*4882a593Smuzhiyun {
1712*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
1713*4882a593Smuzhiyun unsigned int order;
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun for (order = 1; order < 12; order++) {
1716*4882a593Smuzhiyun XA_STATE(xas, xa, 1 << order);
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun xa_store_order(xa, 0, order, xa, GFP_KERNEL);
1719*4882a593Smuzhiyun rcu_read_lock();
1720*4882a593Smuzhiyun xas_load(&xas);
1721*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node->count == 0);
1722*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1723*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1724*4882a593Smuzhiyun rcu_read_unlock();
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
1727*4882a593Smuzhiyun GFP_KERNEL);
1728*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun xa_erase(xa, 1 << order);
1731*4882a593Smuzhiyun XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun xa_erase(xa, 0);
1734*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1735*4882a593Smuzhiyun }
1736*4882a593Smuzhiyun #endif
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun
check_get_order(struct xarray * xa)1739*4882a593Smuzhiyun static noinline void check_get_order(struct xarray *xa)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
1742*4882a593Smuzhiyun unsigned int order;
1743*4882a593Smuzhiyun unsigned long i, j;
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun for (i = 0; i < 3; i++)
1746*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun for (order = 0; order < max_order; order++) {
1749*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
1750*4882a593Smuzhiyun xa_store_order(xa, i << order, order,
1751*4882a593Smuzhiyun xa_mk_index(i << order), GFP_KERNEL);
1752*4882a593Smuzhiyun for (j = i << order; j < (i + 1) << order; j++)
1753*4882a593Smuzhiyun XA_BUG_ON(xa, xa_get_order(xa, j) != order);
1754*4882a593Smuzhiyun xa_erase(xa, i << order);
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun }
1758*4882a593Smuzhiyun
check_destroy(struct xarray * xa)1759*4882a593Smuzhiyun static noinline void check_destroy(struct xarray *xa)
1760*4882a593Smuzhiyun {
1761*4882a593Smuzhiyun unsigned long index;
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun /* Destroying an empty array is a no-op */
1766*4882a593Smuzhiyun xa_destroy(xa);
1767*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun /* Destroying an array with a single entry */
1770*4882a593Smuzhiyun for (index = 0; index < 1000; index++) {
1771*4882a593Smuzhiyun xa_store_index(xa, index, GFP_KERNEL);
1772*4882a593Smuzhiyun XA_BUG_ON(xa, xa_empty(xa));
1773*4882a593Smuzhiyun xa_destroy(xa);
1774*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun /* Destroying an array with a single entry at ULONG_MAX */
1778*4882a593Smuzhiyun xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
1779*4882a593Smuzhiyun XA_BUG_ON(xa, xa_empty(xa));
1780*4882a593Smuzhiyun xa_destroy(xa);
1781*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun #ifdef CONFIG_XARRAY_MULTI
1784*4882a593Smuzhiyun /* Destroying an array with a multi-index entry */
1785*4882a593Smuzhiyun xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
1786*4882a593Smuzhiyun XA_BUG_ON(xa, xa_empty(xa));
1787*4882a593Smuzhiyun xa_destroy(xa);
1788*4882a593Smuzhiyun XA_BUG_ON(xa, !xa_empty(xa));
1789*4882a593Smuzhiyun #endif
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun static DEFINE_XARRAY(array);
1793*4882a593Smuzhiyun
xarray_checks(void)1794*4882a593Smuzhiyun static int xarray_checks(void)
1795*4882a593Smuzhiyun {
1796*4882a593Smuzhiyun check_xa_err(&array);
1797*4882a593Smuzhiyun check_xas_retry(&array);
1798*4882a593Smuzhiyun check_xa_load(&array);
1799*4882a593Smuzhiyun check_xa_mark(&array);
1800*4882a593Smuzhiyun check_xa_shrink(&array);
1801*4882a593Smuzhiyun check_xas_erase(&array);
1802*4882a593Smuzhiyun check_insert(&array);
1803*4882a593Smuzhiyun check_cmpxchg(&array);
1804*4882a593Smuzhiyun check_reserve(&array);
1805*4882a593Smuzhiyun check_reserve(&xa0);
1806*4882a593Smuzhiyun check_multi_store(&array);
1807*4882a593Smuzhiyun check_get_order(&array);
1808*4882a593Smuzhiyun check_xa_alloc();
1809*4882a593Smuzhiyun check_find(&array);
1810*4882a593Smuzhiyun check_find_entry(&array);
1811*4882a593Smuzhiyun check_pause(&array);
1812*4882a593Smuzhiyun check_account(&array);
1813*4882a593Smuzhiyun check_destroy(&array);
1814*4882a593Smuzhiyun check_move(&array);
1815*4882a593Smuzhiyun check_create_range(&array);
1816*4882a593Smuzhiyun check_store_range(&array);
1817*4882a593Smuzhiyun check_store_iter(&array);
1818*4882a593Smuzhiyun check_align(&xa0);
1819*4882a593Smuzhiyun check_split(&array);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun check_workingset(&array, 0);
1822*4882a593Smuzhiyun check_workingset(&array, 64);
1823*4882a593Smuzhiyun check_workingset(&array, 4096);
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
1826*4882a593Smuzhiyun return (tests_run == tests_passed) ? 0 : -EINVAL;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
xarray_exit(void)1829*4882a593Smuzhiyun static void xarray_exit(void)
1830*4882a593Smuzhiyun {
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun module_init(xarray_checks);
1834*4882a593Smuzhiyun module_exit(xarray_exit);
1835*4882a593Smuzhiyun MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
1836*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1837