1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Test cases for SL[AOU]B/page initialization at alloc/free time.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/init.h>
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/mm.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <linux/vmalloc.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define GARBAGE_INT (0x09A7BA9E)
16*4882a593Smuzhiyun #define GARBAGE_BYTE (0x9E)
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define REPORT_FAILURES_IN_FN() \
19*4882a593Smuzhiyun do { \
20*4882a593Smuzhiyun if (failures) \
21*4882a593Smuzhiyun pr_info("%s failed %d out of %d times\n", \
22*4882a593Smuzhiyun __func__, failures, num_tests); \
23*4882a593Smuzhiyun else \
24*4882a593Smuzhiyun pr_info("all %d tests in %s passed\n", \
25*4882a593Smuzhiyun num_tests, __func__); \
26*4882a593Smuzhiyun } while (0)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /* Calculate the number of uninitialized bytes in the buffer. */
count_nonzero_bytes(void * ptr,size_t size)29*4882a593Smuzhiyun static int __init count_nonzero_bytes(void *ptr, size_t size)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun int i, ret = 0;
32*4882a593Smuzhiyun unsigned char *p = (unsigned char *)ptr;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun for (i = 0; i < size; i++)
35*4882a593Smuzhiyun if (p[i])
36*4882a593Smuzhiyun ret++;
37*4882a593Smuzhiyun return ret;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* Fill a buffer with garbage, skipping |skip| first bytes. */
fill_with_garbage_skip(void * ptr,int size,size_t skip)41*4882a593Smuzhiyun static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun unsigned int *p = (unsigned int *)((char *)ptr + skip);
44*4882a593Smuzhiyun int i = 0;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun WARN_ON(skip > size);
47*4882a593Smuzhiyun size -= skip;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun while (size >= sizeof(*p)) {
50*4882a593Smuzhiyun p[i] = GARBAGE_INT;
51*4882a593Smuzhiyun i++;
52*4882a593Smuzhiyun size -= sizeof(*p);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun if (size)
55*4882a593Smuzhiyun memset(&p[i], GARBAGE_BYTE, size);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
fill_with_garbage(void * ptr,size_t size)58*4882a593Smuzhiyun static void __init fill_with_garbage(void *ptr, size_t size)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun fill_with_garbage_skip(ptr, size, 0);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
do_alloc_pages_order(int order,int * total_failures)63*4882a593Smuzhiyun static int __init do_alloc_pages_order(int order, int *total_failures)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct page *page;
66*4882a593Smuzhiyun void *buf;
67*4882a593Smuzhiyun size_t size = PAGE_SIZE << order;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun page = alloc_pages(GFP_KERNEL, order);
70*4882a593Smuzhiyun buf = page_address(page);
71*4882a593Smuzhiyun fill_with_garbage(buf, size);
72*4882a593Smuzhiyun __free_pages(page, order);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun page = alloc_pages(GFP_KERNEL, order);
75*4882a593Smuzhiyun buf = page_address(page);
76*4882a593Smuzhiyun if (count_nonzero_bytes(buf, size))
77*4882a593Smuzhiyun (*total_failures)++;
78*4882a593Smuzhiyun fill_with_garbage(buf, size);
79*4882a593Smuzhiyun __free_pages(page, order);
80*4882a593Smuzhiyun return 1;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Test the page allocator by calling alloc_pages with different orders. */
test_pages(int * total_failures)84*4882a593Smuzhiyun static int __init test_pages(int *total_failures)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun int failures = 0, num_tests = 0;
87*4882a593Smuzhiyun int i;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun for (i = 0; i < 10; i++)
90*4882a593Smuzhiyun num_tests += do_alloc_pages_order(i, &failures);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun REPORT_FAILURES_IN_FN();
93*4882a593Smuzhiyun *total_failures += failures;
94*4882a593Smuzhiyun return num_tests;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* Test kmalloc() with given parameters. */
do_kmalloc_size(size_t size,int * total_failures)98*4882a593Smuzhiyun static int __init do_kmalloc_size(size_t size, int *total_failures)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun void *buf;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun buf = kmalloc(size, GFP_KERNEL);
103*4882a593Smuzhiyun fill_with_garbage(buf, size);
104*4882a593Smuzhiyun kfree(buf);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun buf = kmalloc(size, GFP_KERNEL);
107*4882a593Smuzhiyun if (count_nonzero_bytes(buf, size))
108*4882a593Smuzhiyun (*total_failures)++;
109*4882a593Smuzhiyun fill_with_garbage(buf, size);
110*4882a593Smuzhiyun kfree(buf);
111*4882a593Smuzhiyun return 1;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Test vmalloc() with given parameters. */
do_vmalloc_size(size_t size,int * total_failures)115*4882a593Smuzhiyun static int __init do_vmalloc_size(size_t size, int *total_failures)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun void *buf;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun buf = vmalloc(size);
120*4882a593Smuzhiyun fill_with_garbage(buf, size);
121*4882a593Smuzhiyun vfree(buf);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun buf = vmalloc(size);
124*4882a593Smuzhiyun if (count_nonzero_bytes(buf, size))
125*4882a593Smuzhiyun (*total_failures)++;
126*4882a593Smuzhiyun fill_with_garbage(buf, size);
127*4882a593Smuzhiyun vfree(buf);
128*4882a593Smuzhiyun return 1;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
test_kvmalloc(int * total_failures)132*4882a593Smuzhiyun static int __init test_kvmalloc(int *total_failures)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun int failures = 0, num_tests = 0;
135*4882a593Smuzhiyun int i, size;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun for (i = 0; i < 20; i++) {
138*4882a593Smuzhiyun size = 1 << i;
139*4882a593Smuzhiyun num_tests += do_kmalloc_size(size, &failures);
140*4882a593Smuzhiyun num_tests += do_vmalloc_size(size, &failures);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun REPORT_FAILURES_IN_FN();
144*4882a593Smuzhiyun *total_failures += failures;
145*4882a593Smuzhiyun return num_tests;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #define CTOR_BYTES (sizeof(unsigned int))
149*4882a593Smuzhiyun #define CTOR_PATTERN (0x41414141)
150*4882a593Smuzhiyun /* Initialize the first 4 bytes of the object. */
test_ctor(void * obj)151*4882a593Smuzhiyun static void test_ctor(void *obj)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun *(unsigned int *)obj = CTOR_PATTERN;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * Check the invariants for the buffer allocated from a slab cache.
158*4882a593Smuzhiyun * If the cache has a test constructor, the first 4 bytes of the object must
159*4882a593Smuzhiyun * always remain equal to CTOR_PATTERN.
160*4882a593Smuzhiyun * If the cache isn't an RCU-typesafe one, or if the allocation is done with
161*4882a593Smuzhiyun * __GFP_ZERO, then the object contents must be zeroed after allocation.
162*4882a593Smuzhiyun * If the cache is an RCU-typesafe one, the object contents must never be
163*4882a593Smuzhiyun * zeroed after the first use. This is checked by memcmp() in
164*4882a593Smuzhiyun * do_kmem_cache_size().
165*4882a593Smuzhiyun */
check_buf(void * buf,int size,bool want_ctor,bool want_rcu,bool want_zero)166*4882a593Smuzhiyun static bool __init check_buf(void *buf, int size, bool want_ctor,
167*4882a593Smuzhiyun bool want_rcu, bool want_zero)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun int bytes;
170*4882a593Smuzhiyun bool fail = false;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun bytes = count_nonzero_bytes(buf, size);
173*4882a593Smuzhiyun WARN_ON(want_ctor && want_zero);
174*4882a593Smuzhiyun if (want_zero)
175*4882a593Smuzhiyun return bytes;
176*4882a593Smuzhiyun if (want_ctor) {
177*4882a593Smuzhiyun if (*(unsigned int *)buf != CTOR_PATTERN)
178*4882a593Smuzhiyun fail = 1;
179*4882a593Smuzhiyun } else {
180*4882a593Smuzhiyun if (bytes)
181*4882a593Smuzhiyun fail = !want_rcu;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun return fail;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #define BULK_SIZE 100
187*4882a593Smuzhiyun static void *bulk_array[BULK_SIZE];
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun * Test kmem_cache with given parameters:
191*4882a593Smuzhiyun * want_ctor - use a constructor;
192*4882a593Smuzhiyun * want_rcu - use SLAB_TYPESAFE_BY_RCU;
193*4882a593Smuzhiyun * want_zero - use __GFP_ZERO.
194*4882a593Smuzhiyun */
do_kmem_cache_size(size_t size,bool want_ctor,bool want_rcu,bool want_zero,int * total_failures)195*4882a593Smuzhiyun static int __init do_kmem_cache_size(size_t size, bool want_ctor,
196*4882a593Smuzhiyun bool want_rcu, bool want_zero,
197*4882a593Smuzhiyun int *total_failures)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct kmem_cache *c;
200*4882a593Smuzhiyun int iter;
201*4882a593Smuzhiyun bool fail = false;
202*4882a593Smuzhiyun gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0);
203*4882a593Smuzhiyun void *buf, *buf_copy;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun c = kmem_cache_create("test_cache", size, 1,
206*4882a593Smuzhiyun want_rcu ? SLAB_TYPESAFE_BY_RCU : 0,
207*4882a593Smuzhiyun want_ctor ? test_ctor : NULL);
208*4882a593Smuzhiyun for (iter = 0; iter < 10; iter++) {
209*4882a593Smuzhiyun /* Do a test of bulk allocations */
210*4882a593Smuzhiyun if (!want_rcu && !want_ctor) {
211*4882a593Smuzhiyun int ret;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun ret = kmem_cache_alloc_bulk(c, alloc_mask, BULK_SIZE, bulk_array);
214*4882a593Smuzhiyun if (!ret) {
215*4882a593Smuzhiyun fail = true;
216*4882a593Smuzhiyun } else {
217*4882a593Smuzhiyun int i;
218*4882a593Smuzhiyun for (i = 0; i < ret; i++)
219*4882a593Smuzhiyun fail |= check_buf(bulk_array[i], size, want_ctor, want_rcu, want_zero);
220*4882a593Smuzhiyun kmem_cache_free_bulk(c, ret, bulk_array);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun buf = kmem_cache_alloc(c, alloc_mask);
225*4882a593Smuzhiyun /* Check that buf is zeroed, if it must be. */
226*4882a593Smuzhiyun fail |= check_buf(buf, size, want_ctor, want_rcu, want_zero);
227*4882a593Smuzhiyun fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (!want_rcu) {
230*4882a593Smuzhiyun kmem_cache_free(c, buf);
231*4882a593Smuzhiyun continue;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun * If this is an RCU cache, use a critical section to ensure we
236*4882a593Smuzhiyun * can touch objects after they're freed.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun rcu_read_lock();
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * Copy the buffer to check that it's not wiped on
241*4882a593Smuzhiyun * free().
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun buf_copy = kmalloc(size, GFP_ATOMIC);
244*4882a593Smuzhiyun if (buf_copy)
245*4882a593Smuzhiyun memcpy(buf_copy, buf, size);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun kmem_cache_free(c, buf);
248*4882a593Smuzhiyun /*
249*4882a593Smuzhiyun * Check that |buf| is intact after kmem_cache_free().
250*4882a593Smuzhiyun * |want_zero| is false, because we wrote garbage to
251*4882a593Smuzhiyun * the buffer already.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun fail |= check_buf(buf, size, want_ctor, want_rcu,
254*4882a593Smuzhiyun false);
255*4882a593Smuzhiyun if (buf_copy) {
256*4882a593Smuzhiyun fail |= (bool)memcmp(buf, buf_copy, size);
257*4882a593Smuzhiyun kfree(buf_copy);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun rcu_read_unlock();
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun kmem_cache_destroy(c);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun *total_failures += fail;
264*4882a593Smuzhiyun return 1;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun * Check that the data written to an RCU-allocated object survives
269*4882a593Smuzhiyun * reallocation.
270*4882a593Smuzhiyun */
do_kmem_cache_rcu_persistent(int size,int * total_failures)271*4882a593Smuzhiyun static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct kmem_cache *c;
274*4882a593Smuzhiyun void *buf, *buf_contents, *saved_ptr;
275*4882a593Smuzhiyun void **used_objects;
276*4882a593Smuzhiyun int i, iter, maxiter = 1024;
277*4882a593Smuzhiyun bool fail = false;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
280*4882a593Smuzhiyun NULL);
281*4882a593Smuzhiyun buf = kmem_cache_alloc(c, GFP_KERNEL);
282*4882a593Smuzhiyun saved_ptr = buf;
283*4882a593Smuzhiyun fill_with_garbage(buf, size);
284*4882a593Smuzhiyun buf_contents = kmalloc(size, GFP_KERNEL);
285*4882a593Smuzhiyun if (!buf_contents)
286*4882a593Smuzhiyun goto out;
287*4882a593Smuzhiyun used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
288*4882a593Smuzhiyun if (!used_objects) {
289*4882a593Smuzhiyun kfree(buf_contents);
290*4882a593Smuzhiyun goto out;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun memcpy(buf_contents, buf, size);
293*4882a593Smuzhiyun kmem_cache_free(c, buf);
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * Run for a fixed number of iterations. If we never hit saved_ptr,
296*4882a593Smuzhiyun * assume the test passes.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun for (iter = 0; iter < maxiter; iter++) {
299*4882a593Smuzhiyun buf = kmem_cache_alloc(c, GFP_KERNEL);
300*4882a593Smuzhiyun used_objects[iter] = buf;
301*4882a593Smuzhiyun if (buf == saved_ptr) {
302*4882a593Smuzhiyun fail = memcmp(buf_contents, buf, size);
303*4882a593Smuzhiyun for (i = 0; i <= iter; i++)
304*4882a593Smuzhiyun kmem_cache_free(c, used_objects[i]);
305*4882a593Smuzhiyun goto free_out;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun free_out:
310*4882a593Smuzhiyun kmem_cache_destroy(c);
311*4882a593Smuzhiyun kfree(buf_contents);
312*4882a593Smuzhiyun kfree(used_objects);
313*4882a593Smuzhiyun out:
314*4882a593Smuzhiyun *total_failures += fail;
315*4882a593Smuzhiyun return 1;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
do_kmem_cache_size_bulk(int size,int * total_failures)318*4882a593Smuzhiyun static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct kmem_cache *c;
321*4882a593Smuzhiyun int i, iter, maxiter = 1024;
322*4882a593Smuzhiyun int num, bytes;
323*4882a593Smuzhiyun bool fail = false;
324*4882a593Smuzhiyun void *objects[10];
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun c = kmem_cache_create("test_cache", size, size, 0, NULL);
327*4882a593Smuzhiyun for (iter = 0; (iter < maxiter) && !fail; iter++) {
328*4882a593Smuzhiyun num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
329*4882a593Smuzhiyun objects);
330*4882a593Smuzhiyun for (i = 0; i < num; i++) {
331*4882a593Smuzhiyun bytes = count_nonzero_bytes(objects[i], size);
332*4882a593Smuzhiyun if (bytes)
333*4882a593Smuzhiyun fail = true;
334*4882a593Smuzhiyun fill_with_garbage(objects[i], size);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (num)
338*4882a593Smuzhiyun kmem_cache_free_bulk(c, num, objects);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun kmem_cache_destroy(c);
341*4882a593Smuzhiyun *total_failures += fail;
342*4882a593Smuzhiyun return 1;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /*
346*4882a593Smuzhiyun * Test kmem_cache allocation by creating caches of different sizes, with and
347*4882a593Smuzhiyun * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
348*4882a593Smuzhiyun */
test_kmemcache(int * total_failures)349*4882a593Smuzhiyun static int __init test_kmemcache(int *total_failures)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun int failures = 0, num_tests = 0;
352*4882a593Smuzhiyun int i, flags, size;
353*4882a593Smuzhiyun bool ctor, rcu, zero;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
356*4882a593Smuzhiyun size = 8 << i;
357*4882a593Smuzhiyun for (flags = 0; flags < 8; flags++) {
358*4882a593Smuzhiyun ctor = flags & 1;
359*4882a593Smuzhiyun rcu = flags & 2;
360*4882a593Smuzhiyun zero = flags & 4;
361*4882a593Smuzhiyun if (ctor & zero)
362*4882a593Smuzhiyun continue;
363*4882a593Smuzhiyun num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
364*4882a593Smuzhiyun &failures);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun num_tests += do_kmem_cache_size_bulk(size, &failures);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun REPORT_FAILURES_IN_FN();
369*4882a593Smuzhiyun *total_failures += failures;
370*4882a593Smuzhiyun return num_tests;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */
test_rcu_persistent(int * total_failures)374*4882a593Smuzhiyun static int __init test_rcu_persistent(int *total_failures)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun int failures = 0, num_tests = 0;
377*4882a593Smuzhiyun int i, size;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
380*4882a593Smuzhiyun size = 8 << i;
381*4882a593Smuzhiyun num_tests += do_kmem_cache_rcu_persistent(size, &failures);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun REPORT_FAILURES_IN_FN();
384*4882a593Smuzhiyun *total_failures += failures;
385*4882a593Smuzhiyun return num_tests;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /*
389*4882a593Smuzhiyun * Run the tests. Each test function returns the number of executed tests and
390*4882a593Smuzhiyun * updates |failures| with the number of failed tests.
391*4882a593Smuzhiyun */
test_meminit_init(void)392*4882a593Smuzhiyun static int __init test_meminit_init(void)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun int failures = 0, num_tests = 0;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun num_tests += test_pages(&failures);
397*4882a593Smuzhiyun num_tests += test_kvmalloc(&failures);
398*4882a593Smuzhiyun num_tests += test_kmemcache(&failures);
399*4882a593Smuzhiyun num_tests += test_rcu_persistent(&failures);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (failures == 0)
402*4882a593Smuzhiyun pr_info("all %d tests passed!\n", num_tests);
403*4882a593Smuzhiyun else
404*4882a593Smuzhiyun pr_info("failures: %d out of %d\n", failures, num_tests);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return failures ? -EINVAL : 0;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun module_init(test_meminit_init);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun MODULE_LICENSE("GPL");
411