1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5*4882a593Smuzhiyun * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/bitops.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/kasan.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/mman.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/printk.h>
16*4882a593Smuzhiyun #include <linux/random.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/string.h>
19*4882a593Smuzhiyun #include <linux/uaccess.h>
20*4882a593Smuzhiyun #include <linux/io.h>
21*4882a593Smuzhiyun #include <linux/vmalloc.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <asm/page.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <kunit/test.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "../mm/kasan/kasan.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun * Some tests use these global variables to store return values from function
33*4882a593Smuzhiyun * calls that could otherwise be eliminated by the compiler as dead code.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun void *kasan_ptr_result;
36*4882a593Smuzhiyun int kasan_int_result;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static struct kunit_resource resource;
39*4882a593Smuzhiyun static struct kunit_kasan_expectation fail_data;
40*4882a593Smuzhiyun static bool multishot;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
44*4882a593Smuzhiyun * first detected bug and panic the kernel if panic_on_warn is enabled. For
45*4882a593Smuzhiyun * hardware tag-based KASAN also allow tag checking to be reenabled for each
46*4882a593Smuzhiyun * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
47*4882a593Smuzhiyun */
kasan_test_init(struct kunit * test)48*4882a593Smuzhiyun static int kasan_test_init(struct kunit *test)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun if (!kasan_enabled()) {
51*4882a593Smuzhiyun kunit_err(test, "can't run KASAN tests with KASAN disabled");
52*4882a593Smuzhiyun return -1;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun multishot = kasan_save_enable_multi_shot();
56*4882a593Smuzhiyun kasan_set_tagging_report_once(false);
57*4882a593Smuzhiyun return 0;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
kasan_test_exit(struct kunit * test)60*4882a593Smuzhiyun static void kasan_test_exit(struct kunit *test)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun kasan_set_tagging_report_once(true);
63*4882a593Smuzhiyun kasan_restore_multi_shot(multishot);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /**
67*4882a593Smuzhiyun * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
68*4882a593Smuzhiyun * KASAN report; causes a test failure otherwise. This relies on a KUnit
69*4882a593Smuzhiyun * resource named "kasan_data". Do not use this name for KUnit resources
70*4882a593Smuzhiyun * outside of KASAN tests.
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
73*4882a593Smuzhiyun * checking is auto-disabled. When this happens, this test handler reenables
74*4882a593Smuzhiyun * tag checking. As tag checking can be only disabled or enabled per CPU,
75*4882a593Smuzhiyun * this handler disables migration (preemption).
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * Since the compiler doesn't see that the expression can change the fail_data
78*4882a593Smuzhiyun * fields, it can reorder or optimize away the accesses to those fields.
79*4882a593Smuzhiyun * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
80*4882a593Smuzhiyun * expression to prevent that.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
83*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
84*4882a593Smuzhiyun !kasan_async_mode_enabled()) \
85*4882a593Smuzhiyun migrate_disable(); \
86*4882a593Smuzhiyun WRITE_ONCE(fail_data.report_expected, true); \
87*4882a593Smuzhiyun WRITE_ONCE(fail_data.report_found, false); \
88*4882a593Smuzhiyun kunit_add_named_resource(test, \
89*4882a593Smuzhiyun NULL, \
90*4882a593Smuzhiyun NULL, \
91*4882a593Smuzhiyun &resource, \
92*4882a593Smuzhiyun "kasan_data", &fail_data); \
93*4882a593Smuzhiyun barrier(); \
94*4882a593Smuzhiyun expression; \
95*4882a593Smuzhiyun barrier(); \
96*4882a593Smuzhiyun if (kasan_async_mode_enabled()) \
97*4882a593Smuzhiyun kasan_force_async_fault(); \
98*4882a593Smuzhiyun barrier(); \
99*4882a593Smuzhiyun KUNIT_EXPECT_EQ(test, \
100*4882a593Smuzhiyun READ_ONCE(fail_data.report_expected), \
101*4882a593Smuzhiyun READ_ONCE(fail_data.report_found)); \
102*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
103*4882a593Smuzhiyun !kasan_async_mode_enabled()) { \
104*4882a593Smuzhiyun if (READ_ONCE(fail_data.report_found)) \
105*4882a593Smuzhiyun kasan_enable_tagging_sync(); \
106*4882a593Smuzhiyun migrate_enable(); \
107*4882a593Smuzhiyun } \
108*4882a593Smuzhiyun } while (0)
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
111*4882a593Smuzhiyun if (!IS_ENABLED(config)) { \
112*4882a593Smuzhiyun kunit_info((test), "skipping, " #config " required"); \
113*4882a593Smuzhiyun return; \
114*4882a593Smuzhiyun } \
115*4882a593Smuzhiyun } while (0)
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
118*4882a593Smuzhiyun if (IS_ENABLED(config)) { \
119*4882a593Smuzhiyun kunit_info((test), "skipping, " #config " enabled"); \
120*4882a593Smuzhiyun return; \
121*4882a593Smuzhiyun } \
122*4882a593Smuzhiyun } while (0)
123*4882a593Smuzhiyun
kmalloc_oob_right(struct kunit * test)124*4882a593Smuzhiyun static void kmalloc_oob_right(struct kunit *test)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun char *ptr;
127*4882a593Smuzhiyun size_t size = 123;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
130*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
133*4882a593Smuzhiyun kfree(ptr);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
kmalloc_oob_left(struct kunit * test)136*4882a593Smuzhiyun static void kmalloc_oob_left(struct kunit *test)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun char *ptr;
139*4882a593Smuzhiyun size_t size = 15;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
142*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
145*4882a593Smuzhiyun kfree(ptr);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
kmalloc_node_oob_right(struct kunit * test)148*4882a593Smuzhiyun static void kmalloc_node_oob_right(struct kunit *test)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun char *ptr;
151*4882a593Smuzhiyun size_t size = 4096;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun ptr = kmalloc_node(size, GFP_KERNEL, 0);
154*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
157*4882a593Smuzhiyun kfree(ptr);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
162*4882a593Smuzhiyun * fit into a slab cache and therefore is allocated via the page allocator
163*4882a593Smuzhiyun * fallback. Since this kind of fallback is only implemented for SLUB, these
164*4882a593Smuzhiyun * tests are limited to that allocator.
165*4882a593Smuzhiyun */
kmalloc_pagealloc_oob_right(struct kunit * test)166*4882a593Smuzhiyun static void kmalloc_pagealloc_oob_right(struct kunit *test)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun char *ptr;
169*4882a593Smuzhiyun size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
174*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun kfree(ptr);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
kmalloc_pagealloc_uaf(struct kunit * test)181*4882a593Smuzhiyun static void kmalloc_pagealloc_uaf(struct kunit *test)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun char *ptr;
184*4882a593Smuzhiyun size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
189*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
190*4882a593Smuzhiyun kfree(ptr);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
kmalloc_pagealloc_invalid_free(struct kunit * test)195*4882a593Smuzhiyun static void kmalloc_pagealloc_invalid_free(struct kunit *test)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun char *ptr;
198*4882a593Smuzhiyun size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
203*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
pagealloc_oob_right(struct kunit * test)208*4882a593Smuzhiyun static void pagealloc_oob_right(struct kunit *test)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun char *ptr;
211*4882a593Smuzhiyun struct page *pages;
212*4882a593Smuzhiyun size_t order = 4;
213*4882a593Smuzhiyun size_t size = (1UL << (PAGE_SHIFT + order));
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * With generic KASAN page allocations have no redzones, thus
217*4882a593Smuzhiyun * out-of-bounds detection is not guaranteed.
218*4882a593Smuzhiyun * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun pages = alloc_pages(GFP_KERNEL, order);
223*4882a593Smuzhiyun ptr = page_address(pages);
224*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
227*4882a593Smuzhiyun free_pages((unsigned long)ptr, order);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
pagealloc_uaf(struct kunit * test)230*4882a593Smuzhiyun static void pagealloc_uaf(struct kunit *test)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun char *ptr;
233*4882a593Smuzhiyun struct page *pages;
234*4882a593Smuzhiyun size_t order = 4;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun pages = alloc_pages(GFP_KERNEL, order);
237*4882a593Smuzhiyun ptr = page_address(pages);
238*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
239*4882a593Smuzhiyun free_pages((unsigned long)ptr, order);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
kmalloc_large_oob_right(struct kunit * test)244*4882a593Smuzhiyun static void kmalloc_large_oob_right(struct kunit *test)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun char *ptr;
247*4882a593Smuzhiyun size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * Allocate a chunk that is large enough, but still fits into a slab
251*4882a593Smuzhiyun * and does not trigger the page allocator fallback in SLUB.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
254*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
257*4882a593Smuzhiyun kfree(ptr);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
krealloc_more_oob_helper(struct kunit * test,size_t size1,size_t size2)260*4882a593Smuzhiyun static void krealloc_more_oob_helper(struct kunit *test,
261*4882a593Smuzhiyun size_t size1, size_t size2)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun char *ptr1, *ptr2;
264*4882a593Smuzhiyun size_t middle;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun KUNIT_ASSERT_LT(test, size1, size2);
267*4882a593Smuzhiyun middle = size1 + (size2 - size1) / 2;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun ptr1 = kmalloc(size1, GFP_KERNEL);
270*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
273*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* All offsets up to size2 must be accessible. */
276*4882a593Smuzhiyun ptr2[size1 - 1] = 'x';
277*4882a593Smuzhiyun ptr2[size1] = 'x';
278*4882a593Smuzhiyun ptr2[middle] = 'x';
279*4882a593Smuzhiyun ptr2[size2 - 1] = 'x';
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /* Generic mode is precise, so unaligned size2 must be inaccessible. */
282*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_KASAN_GENERIC))
283*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* For all modes first aligned offset after size2 must be inaccessible. */
286*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test,
287*4882a593Smuzhiyun ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun kfree(ptr2);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
krealloc_less_oob_helper(struct kunit * test,size_t size1,size_t size2)292*4882a593Smuzhiyun static void krealloc_less_oob_helper(struct kunit *test,
293*4882a593Smuzhiyun size_t size1, size_t size2)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun char *ptr1, *ptr2;
296*4882a593Smuzhiyun size_t middle;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun KUNIT_ASSERT_LT(test, size2, size1);
299*4882a593Smuzhiyun middle = size2 + (size1 - size2) / 2;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun ptr1 = kmalloc(size1, GFP_KERNEL);
302*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
305*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* Must be accessible for all modes. */
308*4882a593Smuzhiyun ptr2[size2 - 1] = 'x';
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* Generic mode is precise, so unaligned size2 must be inaccessible. */
311*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_KASAN_GENERIC))
312*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* For all modes first aligned offset after size2 must be inaccessible. */
315*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test,
316*4882a593Smuzhiyun ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * For all modes all size2, middle, and size1 should land in separate
320*4882a593Smuzhiyun * granules and thus the latter two offsets should be inaccessible.
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
323*4882a593Smuzhiyun round_down(middle, KASAN_GRANULE_SIZE));
324*4882a593Smuzhiyun KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
325*4882a593Smuzhiyun round_down(size1, KASAN_GRANULE_SIZE));
326*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
327*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
328*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun kfree(ptr2);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
krealloc_more_oob(struct kunit * test)333*4882a593Smuzhiyun static void krealloc_more_oob(struct kunit *test)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun krealloc_more_oob_helper(test, 201, 235);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
krealloc_less_oob(struct kunit * test)338*4882a593Smuzhiyun static void krealloc_less_oob(struct kunit *test)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun krealloc_less_oob_helper(test, 235, 201);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
krealloc_pagealloc_more_oob(struct kunit * test)343*4882a593Smuzhiyun static void krealloc_pagealloc_more_oob(struct kunit *test)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun /* page_alloc fallback in only implemented for SLUB. */
346*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
349*4882a593Smuzhiyun KMALLOC_MAX_CACHE_SIZE + 235);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
krealloc_pagealloc_less_oob(struct kunit * test)352*4882a593Smuzhiyun static void krealloc_pagealloc_less_oob(struct kunit *test)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun /* page_alloc fallback in only implemented for SLUB. */
355*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
358*4882a593Smuzhiyun KMALLOC_MAX_CACHE_SIZE + 201);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun * Check that krealloc() detects a use-after-free, returns NULL,
363*4882a593Smuzhiyun * and doesn't unpoison the freed object.
364*4882a593Smuzhiyun */
krealloc_uaf(struct kunit * test)365*4882a593Smuzhiyun static void krealloc_uaf(struct kunit *test)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun char *ptr1, *ptr2;
368*4882a593Smuzhiyun int size1 = 201;
369*4882a593Smuzhiyun int size2 = 235;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun ptr1 = kmalloc(size1, GFP_KERNEL);
372*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
373*4882a593Smuzhiyun kfree(ptr1);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
376*4882a593Smuzhiyun KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
377*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
kmalloc_oob_16(struct kunit * test)380*4882a593Smuzhiyun static void kmalloc_oob_16(struct kunit *test)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun struct {
383*4882a593Smuzhiyun u64 words[2];
384*4882a593Smuzhiyun } *ptr1, *ptr2;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* This test is specifically crafted for the generic mode. */
387*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
390*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
393*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
396*4882a593Smuzhiyun kfree(ptr1);
397*4882a593Smuzhiyun kfree(ptr2);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
kmalloc_uaf_16(struct kunit * test)400*4882a593Smuzhiyun static void kmalloc_uaf_16(struct kunit *test)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun struct {
403*4882a593Smuzhiyun u64 words[2];
404*4882a593Smuzhiyun } *ptr1, *ptr2;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
407*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
410*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
411*4882a593Smuzhiyun kfree(ptr2);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
414*4882a593Smuzhiyun kfree(ptr1);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
kmalloc_oob_memset_2(struct kunit * test)417*4882a593Smuzhiyun static void kmalloc_oob_memset_2(struct kunit *test)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun char *ptr;
420*4882a593Smuzhiyun size_t size = 8;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
423*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
426*4882a593Smuzhiyun kfree(ptr);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
kmalloc_oob_memset_4(struct kunit * test)429*4882a593Smuzhiyun static void kmalloc_oob_memset_4(struct kunit *test)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun char *ptr;
432*4882a593Smuzhiyun size_t size = 8;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
435*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
438*4882a593Smuzhiyun kfree(ptr);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun
kmalloc_oob_memset_8(struct kunit * test)442*4882a593Smuzhiyun static void kmalloc_oob_memset_8(struct kunit *test)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun char *ptr;
445*4882a593Smuzhiyun size_t size = 8;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
448*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
451*4882a593Smuzhiyun kfree(ptr);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
kmalloc_oob_memset_16(struct kunit * test)454*4882a593Smuzhiyun static void kmalloc_oob_memset_16(struct kunit *test)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun char *ptr;
457*4882a593Smuzhiyun size_t size = 16;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
460*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
463*4882a593Smuzhiyun kfree(ptr);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
kmalloc_oob_in_memset(struct kunit * test)466*4882a593Smuzhiyun static void kmalloc_oob_in_memset(struct kunit *test)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun char *ptr;
469*4882a593Smuzhiyun size_t size = 666;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
472*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
475*4882a593Smuzhiyun kfree(ptr);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
kmalloc_memmove_invalid_size(struct kunit * test)478*4882a593Smuzhiyun static void kmalloc_memmove_invalid_size(struct kunit *test)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun char *ptr;
481*4882a593Smuzhiyun size_t size = 64;
482*4882a593Smuzhiyun volatile size_t invalid_size = -2;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
485*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun memset((char *)ptr, 0, 64);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test,
490*4882a593Smuzhiyun memmove((char *)ptr, (char *)ptr + 4, invalid_size));
491*4882a593Smuzhiyun kfree(ptr);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
kmalloc_uaf(struct kunit * test)494*4882a593Smuzhiyun static void kmalloc_uaf(struct kunit *test)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun char *ptr;
497*4882a593Smuzhiyun size_t size = 10;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
500*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun kfree(ptr);
503*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
kmalloc_uaf_memset(struct kunit * test)506*4882a593Smuzhiyun static void kmalloc_uaf_memset(struct kunit *test)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun char *ptr;
509*4882a593Smuzhiyun size_t size = 33;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
512*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun kfree(ptr);
515*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
kmalloc_uaf2(struct kunit * test)518*4882a593Smuzhiyun static void kmalloc_uaf2(struct kunit *test)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun char *ptr1, *ptr2;
521*4882a593Smuzhiyun size_t size = 43;
522*4882a593Smuzhiyun int counter = 0;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun again:
525*4882a593Smuzhiyun ptr1 = kmalloc(size, GFP_KERNEL);
526*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun kfree(ptr1);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun ptr2 = kmalloc(size, GFP_KERNEL);
531*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
535*4882a593Smuzhiyun * Allow up to 16 attempts at generating different tags.
536*4882a593Smuzhiyun */
537*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
538*4882a593Smuzhiyun kfree(ptr2);
539*4882a593Smuzhiyun goto again;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
543*4882a593Smuzhiyun KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun kfree(ptr2);
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
kfree_via_page(struct kunit * test)548*4882a593Smuzhiyun static void kfree_via_page(struct kunit *test)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun char *ptr;
551*4882a593Smuzhiyun size_t size = 8;
552*4882a593Smuzhiyun struct page *page;
553*4882a593Smuzhiyun unsigned long offset;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
556*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun page = virt_to_page(ptr);
559*4882a593Smuzhiyun offset = offset_in_page(ptr);
560*4882a593Smuzhiyun kfree(page_address(page) + offset);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
kfree_via_phys(struct kunit * test)563*4882a593Smuzhiyun static void kfree_via_phys(struct kunit *test)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun char *ptr;
566*4882a593Smuzhiyun size_t size = 8;
567*4882a593Smuzhiyun phys_addr_t phys;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
570*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun phys = virt_to_phys(ptr);
573*4882a593Smuzhiyun kfree(phys_to_virt(phys));
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
kmem_cache_oob(struct kunit * test)576*4882a593Smuzhiyun static void kmem_cache_oob(struct kunit *test)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun char *p;
579*4882a593Smuzhiyun size_t size = 200;
580*4882a593Smuzhiyun struct kmem_cache *cache;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
583*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun p = kmem_cache_alloc(cache, GFP_KERNEL);
586*4882a593Smuzhiyun if (!p) {
587*4882a593Smuzhiyun kunit_err(test, "Allocation failed: %s\n", __func__);
588*4882a593Smuzhiyun kmem_cache_destroy(cache);
589*4882a593Smuzhiyun return;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun kmem_cache_free(cache, p);
595*4882a593Smuzhiyun kmem_cache_destroy(cache);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
kmem_cache_accounted(struct kunit * test)598*4882a593Smuzhiyun static void kmem_cache_accounted(struct kunit *test)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun int i;
601*4882a593Smuzhiyun char *p;
602*4882a593Smuzhiyun size_t size = 200;
603*4882a593Smuzhiyun struct kmem_cache *cache;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
606*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /*
609*4882a593Smuzhiyun * Several allocations with a delay to allow for lazy per memcg kmem
610*4882a593Smuzhiyun * cache creation.
611*4882a593Smuzhiyun */
612*4882a593Smuzhiyun for (i = 0; i < 5; i++) {
613*4882a593Smuzhiyun p = kmem_cache_alloc(cache, GFP_KERNEL);
614*4882a593Smuzhiyun if (!p)
615*4882a593Smuzhiyun goto free_cache;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun kmem_cache_free(cache, p);
618*4882a593Smuzhiyun msleep(100);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun free_cache:
622*4882a593Smuzhiyun kmem_cache_destroy(cache);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
kmem_cache_bulk(struct kunit * test)625*4882a593Smuzhiyun static void kmem_cache_bulk(struct kunit *test)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun struct kmem_cache *cache;
628*4882a593Smuzhiyun size_t size = 200;
629*4882a593Smuzhiyun char *p[10];
630*4882a593Smuzhiyun bool ret;
631*4882a593Smuzhiyun int i;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
634*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
637*4882a593Smuzhiyun if (!ret) {
638*4882a593Smuzhiyun kunit_err(test, "Allocation failed: %s\n", __func__);
639*4882a593Smuzhiyun kmem_cache_destroy(cache);
640*4882a593Smuzhiyun return;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(p); i++)
644*4882a593Smuzhiyun p[i][0] = p[i][size - 1] = 42;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
647*4882a593Smuzhiyun kmem_cache_destroy(cache);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun static char global_array[10];
651*4882a593Smuzhiyun
kasan_global_oob(struct kunit * test)652*4882a593Smuzhiyun static void kasan_global_oob(struct kunit *test)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun /*
655*4882a593Smuzhiyun * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
656*4882a593Smuzhiyun * from failing here and panicing the kernel, access the array via a
657*4882a593Smuzhiyun * volatile pointer, which will prevent the compiler from being able to
658*4882a593Smuzhiyun * determine the array bounds.
659*4882a593Smuzhiyun *
660*4882a593Smuzhiyun * This access uses a volatile pointer to char (char *volatile) rather
661*4882a593Smuzhiyun * than the more conventional pointer to volatile char (volatile char *)
662*4882a593Smuzhiyun * because we want to prevent the compiler from making inferences about
663*4882a593Smuzhiyun * the pointer itself (i.e. its array bounds), not the data that it
664*4882a593Smuzhiyun * refers to.
665*4882a593Smuzhiyun */
666*4882a593Smuzhiyun char *volatile array = global_array;
667*4882a593Smuzhiyun char *p = &array[ARRAY_SIZE(global_array) + 3];
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* Only generic mode instruments globals. */
670*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /* Check that ksize() makes the whole object accessible. */
ksize_unpoisons_memory(struct kunit * test)676*4882a593Smuzhiyun static void ksize_unpoisons_memory(struct kunit *test)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun char *ptr;
679*4882a593Smuzhiyun size_t size = 123, real_size;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
682*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
683*4882a593Smuzhiyun real_size = ksize(ptr);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* This access shouldn't trigger a KASAN report. */
686*4882a593Smuzhiyun ptr[size] = 'x';
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /* This one must. */
689*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun kfree(ptr);
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /*
695*4882a593Smuzhiyun * Check that a use-after-free is detected by ksize() and via normal accesses
696*4882a593Smuzhiyun * after it.
697*4882a593Smuzhiyun */
ksize_uaf(struct kunit * test)698*4882a593Smuzhiyun static void ksize_uaf(struct kunit *test)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun char *ptr;
701*4882a593Smuzhiyun int size = 128 - KASAN_GRANULE_SIZE;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
704*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
705*4882a593Smuzhiyun kfree(ptr);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
708*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
709*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
kasan_stack_oob(struct kunit * test)712*4882a593Smuzhiyun static void kasan_stack_oob(struct kunit *test)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun char stack_array[10];
715*4882a593Smuzhiyun /* See comment in kasan_global_oob. */
716*4882a593Smuzhiyun char *volatile array = stack_array;
717*4882a593Smuzhiyun char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
kasan_alloca_oob_left(struct kunit * test)724*4882a593Smuzhiyun static void kasan_alloca_oob_left(struct kunit *test)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun volatile int i = 10;
727*4882a593Smuzhiyun char alloca_array[i];
728*4882a593Smuzhiyun /* See comment in kasan_global_oob. */
729*4882a593Smuzhiyun char *volatile array = alloca_array;
730*4882a593Smuzhiyun char *p = array - 1;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* Only generic mode instruments dynamic allocas. */
733*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
734*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
kasan_alloca_oob_right(struct kunit * test)739*4882a593Smuzhiyun static void kasan_alloca_oob_right(struct kunit *test)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun volatile int i = 10;
742*4882a593Smuzhiyun char alloca_array[i];
743*4882a593Smuzhiyun /* See comment in kasan_global_oob. */
744*4882a593Smuzhiyun char *volatile array = alloca_array;
745*4882a593Smuzhiyun char *p = array + i;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /* Only generic mode instruments dynamic allocas. */
748*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
749*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
kmem_cache_double_free(struct kunit * test)754*4882a593Smuzhiyun static void kmem_cache_double_free(struct kunit *test)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun char *p;
757*4882a593Smuzhiyun size_t size = 200;
758*4882a593Smuzhiyun struct kmem_cache *cache;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
761*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun p = kmem_cache_alloc(cache, GFP_KERNEL);
764*4882a593Smuzhiyun if (!p) {
765*4882a593Smuzhiyun kunit_err(test, "Allocation failed: %s\n", __func__);
766*4882a593Smuzhiyun kmem_cache_destroy(cache);
767*4882a593Smuzhiyun return;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun kmem_cache_free(cache, p);
771*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
772*4882a593Smuzhiyun kmem_cache_destroy(cache);
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
kmem_cache_invalid_free(struct kunit * test)775*4882a593Smuzhiyun static void kmem_cache_invalid_free(struct kunit *test)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun char *p;
778*4882a593Smuzhiyun size_t size = 200;
779*4882a593Smuzhiyun struct kmem_cache *cache;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
782*4882a593Smuzhiyun NULL);
783*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun p = kmem_cache_alloc(cache, GFP_KERNEL);
786*4882a593Smuzhiyun if (!p) {
787*4882a593Smuzhiyun kunit_err(test, "Allocation failed: %s\n", __func__);
788*4882a593Smuzhiyun kmem_cache_destroy(cache);
789*4882a593Smuzhiyun return;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* Trigger invalid free, the object doesn't get freed. */
793*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /*
796*4882a593Smuzhiyun * Properly free the object to prevent the "Objects remaining in
797*4882a593Smuzhiyun * test_cache on __kmem_cache_shutdown" BUG failure.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun kmem_cache_free(cache, p);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun kmem_cache_destroy(cache);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
kasan_memchr(struct kunit * test)804*4882a593Smuzhiyun static void kasan_memchr(struct kunit *test)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun char *ptr;
807*4882a593Smuzhiyun size_t size = 24;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /*
810*4882a593Smuzhiyun * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
811*4882a593Smuzhiyun * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
812*4882a593Smuzhiyun */
813*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun if (OOB_TAG_OFF)
816*4882a593Smuzhiyun size = round_up(size, OOB_TAG_OFF);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
819*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test,
822*4882a593Smuzhiyun kasan_ptr_result = memchr(ptr, '1', size + 1));
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun kfree(ptr);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
kasan_memcmp(struct kunit * test)827*4882a593Smuzhiyun static void kasan_memcmp(struct kunit *test)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun char *ptr;
830*4882a593Smuzhiyun size_t size = 24;
831*4882a593Smuzhiyun int arr[9];
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /*
834*4882a593Smuzhiyun * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
835*4882a593Smuzhiyun * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
836*4882a593Smuzhiyun */
837*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (OOB_TAG_OFF)
840*4882a593Smuzhiyun size = round_up(size, OOB_TAG_OFF);
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
843*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
844*4882a593Smuzhiyun memset(arr, 0, sizeof(arr));
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test,
847*4882a593Smuzhiyun kasan_int_result = memcmp(ptr, arr, size+1));
848*4882a593Smuzhiyun kfree(ptr);
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
kasan_strings(struct kunit * test)851*4882a593Smuzhiyun static void kasan_strings(struct kunit *test)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun char *ptr;
854*4882a593Smuzhiyun size_t size = 24;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /*
857*4882a593Smuzhiyun * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
858*4882a593Smuzhiyun * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
859*4882a593Smuzhiyun */
860*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
863*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun kfree(ptr);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun /*
868*4882a593Smuzhiyun * Try to cause only 1 invalid access (less spam in dmesg).
869*4882a593Smuzhiyun * For that we need ptr to point to zeroed byte.
870*4882a593Smuzhiyun * Skip metadata that could be stored in freed object so ptr
871*4882a593Smuzhiyun * will likely point to zeroed byte.
872*4882a593Smuzhiyun */
873*4882a593Smuzhiyun ptr += 16;
874*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
kasan_bitops_modify(struct kunit * test,int nr,void * addr)887*4882a593Smuzhiyun static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
890*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
891*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
892*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
893*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
894*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
895*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
896*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
kasan_bitops_test_and_modify(struct kunit * test,int nr,void * addr)899*4882a593Smuzhiyun static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
902*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
903*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
904*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
905*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
906*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
907*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
908*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun #if defined(clear_bit_unlock_is_negative_byte)
911*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
912*4882a593Smuzhiyun clear_bit_unlock_is_negative_byte(nr, addr));
913*4882a593Smuzhiyun #endif
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
kasan_bitops_generic(struct kunit * test)916*4882a593Smuzhiyun static void kasan_bitops_generic(struct kunit *test)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun long *bits;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun /* This test is specifically crafted for the generic mode. */
921*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /*
924*4882a593Smuzhiyun * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
925*4882a593Smuzhiyun * this way we do not actually corrupt other memory.
926*4882a593Smuzhiyun */
927*4882a593Smuzhiyun bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
928*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun /*
931*4882a593Smuzhiyun * Below calls try to access bit within allocated memory; however, the
932*4882a593Smuzhiyun * below accesses are still out-of-bounds, since bitops are defined to
933*4882a593Smuzhiyun * operate on the whole long the bit is in.
934*4882a593Smuzhiyun */
935*4882a593Smuzhiyun kasan_bitops_modify(test, BITS_PER_LONG, bits);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun /*
938*4882a593Smuzhiyun * Below calls try to access bit beyond allocated memory.
939*4882a593Smuzhiyun */
940*4882a593Smuzhiyun kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun kfree(bits);
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
kasan_bitops_tags(struct kunit * test)945*4882a593Smuzhiyun static void kasan_bitops_tags(struct kunit *test)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun long *bits;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /* This test is specifically crafted for tag-based modes. */
950*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
953*4882a593Smuzhiyun bits = kzalloc(48, GFP_KERNEL);
954*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun /* Do the accesses past the 48 allocated bytes, but within the redone. */
957*4882a593Smuzhiyun kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
958*4882a593Smuzhiyun kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun kfree(bits);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
kmalloc_double_kzfree(struct kunit * test)963*4882a593Smuzhiyun static void kmalloc_double_kzfree(struct kunit *test)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun char *ptr;
966*4882a593Smuzhiyun size_t size = 16;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
969*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun kfree_sensitive(ptr);
972*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
vmalloc_oob(struct kunit * test)975*4882a593Smuzhiyun static void vmalloc_oob(struct kunit *test)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun void *area;
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /*
982*4882a593Smuzhiyun * We have to be careful not to hit the guard page.
983*4882a593Smuzhiyun * The MMU will catch that and crash us.
984*4882a593Smuzhiyun */
985*4882a593Smuzhiyun area = vmalloc(3000);
986*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
989*4882a593Smuzhiyun vfree(area);
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun /*
993*4882a593Smuzhiyun * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
994*4882a593Smuzhiyun * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
995*4882a593Smuzhiyun * modes.
996*4882a593Smuzhiyun */
match_all_not_assigned(struct kunit * test)997*4882a593Smuzhiyun static void match_all_not_assigned(struct kunit *test)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun char *ptr;
1000*4882a593Smuzhiyun struct page *pages;
1001*4882a593Smuzhiyun int i, size, order;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun for (i = 0; i < 256; i++) {
1006*4882a593Smuzhiyun size = (get_random_int() % 1024) + 1;
1007*4882a593Smuzhiyun ptr = kmalloc(size, GFP_KERNEL);
1008*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1009*4882a593Smuzhiyun KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1010*4882a593Smuzhiyun KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1011*4882a593Smuzhiyun kfree(ptr);
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun for (i = 0; i < 256; i++) {
1015*4882a593Smuzhiyun order = (get_random_int() % 4) + 1;
1016*4882a593Smuzhiyun pages = alloc_pages(GFP_KERNEL, order);
1017*4882a593Smuzhiyun ptr = page_address(pages);
1018*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1019*4882a593Smuzhiyun KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1020*4882a593Smuzhiyun KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1021*4882a593Smuzhiyun free_pages((unsigned long)ptr, order);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
match_all_ptr_tag(struct kunit * test)1026*4882a593Smuzhiyun static void match_all_ptr_tag(struct kunit *test)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun char *ptr;
1029*4882a593Smuzhiyun u8 tag;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun ptr = kmalloc(128, GFP_KERNEL);
1034*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /* Backup the assigned tag. */
1037*4882a593Smuzhiyun tag = get_tag(ptr);
1038*4882a593Smuzhiyun KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun /* Reset the tag to 0xff.*/
1041*4882a593Smuzhiyun ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /* This access shouldn't trigger a KASAN report. */
1044*4882a593Smuzhiyun *ptr = 0;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /* Recover the pointer tag and free. */
1047*4882a593Smuzhiyun ptr = set_tag(ptr, tag);
1048*4882a593Smuzhiyun kfree(ptr);
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun /* Check that there are no match-all memory tags for tag-based modes. */
match_all_mem_tag(struct kunit * test)1052*4882a593Smuzhiyun static void match_all_mem_tag(struct kunit *test)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun char *ptr;
1055*4882a593Smuzhiyun int tag;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun ptr = kmalloc(128, GFP_KERNEL);
1060*4882a593Smuzhiyun KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1061*4882a593Smuzhiyun KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /* For each possible tag value not matching the pointer tag. */
1064*4882a593Smuzhiyun for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1065*4882a593Smuzhiyun if (tag == get_tag(ptr))
1066*4882a593Smuzhiyun continue;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun /* Mark the first memory granule with the chosen memory tag. */
1069*4882a593Smuzhiyun kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /* This access must cause a KASAN report. */
1072*4882a593Smuzhiyun KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun /* Recover the memory tag and free. */
1076*4882a593Smuzhiyun kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1077*4882a593Smuzhiyun kfree(ptr);
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun static struct kunit_case kasan_kunit_test_cases[] = {
1081*4882a593Smuzhiyun KUNIT_CASE(kmalloc_oob_right),
1082*4882a593Smuzhiyun KUNIT_CASE(kmalloc_oob_left),
1083*4882a593Smuzhiyun KUNIT_CASE(kmalloc_node_oob_right),
1084*4882a593Smuzhiyun KUNIT_CASE(kmalloc_pagealloc_oob_right),
1085*4882a593Smuzhiyun KUNIT_CASE(kmalloc_pagealloc_uaf),
1086*4882a593Smuzhiyun KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1087*4882a593Smuzhiyun KUNIT_CASE(pagealloc_oob_right),
1088*4882a593Smuzhiyun KUNIT_CASE(pagealloc_uaf),
1089*4882a593Smuzhiyun KUNIT_CASE(kmalloc_large_oob_right),
1090*4882a593Smuzhiyun KUNIT_CASE(krealloc_more_oob),
1091*4882a593Smuzhiyun KUNIT_CASE(krealloc_less_oob),
1092*4882a593Smuzhiyun KUNIT_CASE(krealloc_pagealloc_more_oob),
1093*4882a593Smuzhiyun KUNIT_CASE(krealloc_pagealloc_less_oob),
1094*4882a593Smuzhiyun KUNIT_CASE(krealloc_uaf),
1095*4882a593Smuzhiyun KUNIT_CASE(kmalloc_oob_16),
1096*4882a593Smuzhiyun KUNIT_CASE(kmalloc_uaf_16),
1097*4882a593Smuzhiyun KUNIT_CASE(kmalloc_oob_in_memset),
1098*4882a593Smuzhiyun KUNIT_CASE(kmalloc_oob_memset_2),
1099*4882a593Smuzhiyun KUNIT_CASE(kmalloc_oob_memset_4),
1100*4882a593Smuzhiyun KUNIT_CASE(kmalloc_oob_memset_8),
1101*4882a593Smuzhiyun KUNIT_CASE(kmalloc_oob_memset_16),
1102*4882a593Smuzhiyun KUNIT_CASE(kmalloc_memmove_invalid_size),
1103*4882a593Smuzhiyun KUNIT_CASE(kmalloc_uaf),
1104*4882a593Smuzhiyun KUNIT_CASE(kmalloc_uaf_memset),
1105*4882a593Smuzhiyun KUNIT_CASE(kmalloc_uaf2),
1106*4882a593Smuzhiyun KUNIT_CASE(kfree_via_page),
1107*4882a593Smuzhiyun KUNIT_CASE(kfree_via_phys),
1108*4882a593Smuzhiyun KUNIT_CASE(kmem_cache_oob),
1109*4882a593Smuzhiyun KUNIT_CASE(kmem_cache_accounted),
1110*4882a593Smuzhiyun KUNIT_CASE(kmem_cache_bulk),
1111*4882a593Smuzhiyun KUNIT_CASE(kasan_global_oob),
1112*4882a593Smuzhiyun KUNIT_CASE(kasan_stack_oob),
1113*4882a593Smuzhiyun KUNIT_CASE(kasan_alloca_oob_left),
1114*4882a593Smuzhiyun KUNIT_CASE(kasan_alloca_oob_right),
1115*4882a593Smuzhiyun KUNIT_CASE(ksize_unpoisons_memory),
1116*4882a593Smuzhiyun KUNIT_CASE(ksize_uaf),
1117*4882a593Smuzhiyun KUNIT_CASE(kmem_cache_double_free),
1118*4882a593Smuzhiyun KUNIT_CASE(kmem_cache_invalid_free),
1119*4882a593Smuzhiyun KUNIT_CASE(kasan_memchr),
1120*4882a593Smuzhiyun KUNIT_CASE(kasan_memcmp),
1121*4882a593Smuzhiyun KUNIT_CASE(kasan_strings),
1122*4882a593Smuzhiyun KUNIT_CASE(kasan_bitops_generic),
1123*4882a593Smuzhiyun KUNIT_CASE(kasan_bitops_tags),
1124*4882a593Smuzhiyun KUNIT_CASE(kmalloc_double_kzfree),
1125*4882a593Smuzhiyun KUNIT_CASE(vmalloc_oob),
1126*4882a593Smuzhiyun KUNIT_CASE(match_all_not_assigned),
1127*4882a593Smuzhiyun KUNIT_CASE(match_all_ptr_tag),
1128*4882a593Smuzhiyun KUNIT_CASE(match_all_mem_tag),
1129*4882a593Smuzhiyun {}
1130*4882a593Smuzhiyun };
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun static struct kunit_suite kasan_kunit_test_suite = {
1133*4882a593Smuzhiyun .name = "kasan",
1134*4882a593Smuzhiyun .init = kasan_test_init,
1135*4882a593Smuzhiyun .test_cases = kasan_kunit_test_cases,
1136*4882a593Smuzhiyun .exit = kasan_test_exit,
1137*4882a593Smuzhiyun };
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun kunit_test_suite(kasan_kunit_test_suite);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1142