xref: /OK3568_Linux_fs/kernel/drivers/misc/lkdtm/usercopy.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * This is for all the tests related to copy_to_user() and copy_from_user()
4*4882a593Smuzhiyun  * hardening.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "lkdtm.h"
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/vmalloc.h>
9*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
10*4882a593Smuzhiyun #include <linux/mman.h>
11*4882a593Smuzhiyun #include <linux/uaccess.h>
12*4882a593Smuzhiyun #include <asm/cacheflush.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * Many of the tests here end up using const sizes, but those would
16*4882a593Smuzhiyun  * normally be ignored by hardened usercopy, so force the compiler
17*4882a593Smuzhiyun  * into choosing the non-const path to make sure we trigger the
18*4882a593Smuzhiyun  * hardened usercopy checks by added "unconst" to all the const copies,
19*4882a593Smuzhiyun  * and making sure "cache_size" isn't optimized into a const.
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun static volatile size_t unconst;
22*4882a593Smuzhiyun static volatile size_t cache_size = 1024;
23*4882a593Smuzhiyun static struct kmem_cache *whitelist_cache;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static const unsigned char test_text[] = "This is a test.\n";
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * Instead of adding -Wno-return-local-addr, just pass the stack address
29*4882a593Smuzhiyun  * through a function to obfuscate it from the compiler.
30*4882a593Smuzhiyun  */
trick_compiler(unsigned char * stack)31*4882a593Smuzhiyun static noinline unsigned char *trick_compiler(unsigned char *stack)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	return stack + unconst;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
do_usercopy_stack_callee(int value)36*4882a593Smuzhiyun static noinline unsigned char *do_usercopy_stack_callee(int value)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	unsigned char buf[128];
39*4882a593Smuzhiyun 	int i;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	/* Exercise stack to avoid everything living in registers. */
42*4882a593Smuzhiyun 	for (i = 0; i < sizeof(buf); i++) {
43*4882a593Smuzhiyun 		buf[i] = value & 0xff;
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	/*
47*4882a593Smuzhiyun 	 * Put the target buffer in the middle of stack allocation
48*4882a593Smuzhiyun 	 * so that we don't step on future stack users regardless
49*4882a593Smuzhiyun 	 * of stack growth direction.
50*4882a593Smuzhiyun 	 */
51*4882a593Smuzhiyun 	return trick_compiler(&buf[(128/2)-32]);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
do_usercopy_stack(bool to_user,bool bad_frame)54*4882a593Smuzhiyun static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	unsigned long user_addr;
57*4882a593Smuzhiyun 	unsigned char good_stack[32];
58*4882a593Smuzhiyun 	unsigned char *bad_stack;
59*4882a593Smuzhiyun 	int i;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/* Exercise stack to avoid everything living in registers. */
62*4882a593Smuzhiyun 	for (i = 0; i < sizeof(good_stack); i++)
63*4882a593Smuzhiyun 		good_stack[i] = test_text[i % sizeof(test_text)];
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/* This is a pointer to outside our current stack frame. */
66*4882a593Smuzhiyun 	if (bad_frame) {
67*4882a593Smuzhiyun 		bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
68*4882a593Smuzhiyun 	} else {
69*4882a593Smuzhiyun 		/* Put start address just inside stack. */
70*4882a593Smuzhiyun 		bad_stack = task_stack_page(current) + THREAD_SIZE;
71*4882a593Smuzhiyun 		bad_stack -= sizeof(unsigned long);
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #ifdef ARCH_HAS_CURRENT_STACK_POINTER
75*4882a593Smuzhiyun 	pr_info("stack     : %px\n", (void *)current_stack_pointer);
76*4882a593Smuzhiyun #endif
77*4882a593Smuzhiyun 	pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
78*4882a593Smuzhiyun 	pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
81*4882a593Smuzhiyun 			    PROT_READ | PROT_WRITE | PROT_EXEC,
82*4882a593Smuzhiyun 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
83*4882a593Smuzhiyun 	if (user_addr >= TASK_SIZE) {
84*4882a593Smuzhiyun 		pr_warn("Failed to allocate user memory\n");
85*4882a593Smuzhiyun 		return;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (to_user) {
89*4882a593Smuzhiyun 		pr_info("attempting good copy_to_user of local stack\n");
90*4882a593Smuzhiyun 		if (copy_to_user((void __user *)user_addr, good_stack,
91*4882a593Smuzhiyun 				 unconst + sizeof(good_stack))) {
92*4882a593Smuzhiyun 			pr_warn("copy_to_user failed unexpectedly?!\n");
93*4882a593Smuzhiyun 			goto free_user;
94*4882a593Smuzhiyun 		}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 		pr_info("attempting bad copy_to_user of distant stack\n");
97*4882a593Smuzhiyun 		if (copy_to_user((void __user *)user_addr, bad_stack,
98*4882a593Smuzhiyun 				 unconst + sizeof(good_stack))) {
99*4882a593Smuzhiyun 			pr_warn("copy_to_user failed, but lacked Oops\n");
100*4882a593Smuzhiyun 			goto free_user;
101*4882a593Smuzhiyun 		}
102*4882a593Smuzhiyun 	} else {
103*4882a593Smuzhiyun 		/*
104*4882a593Smuzhiyun 		 * There isn't a safe way to not be protected by usercopy
105*4882a593Smuzhiyun 		 * if we're going to write to another thread's stack.
106*4882a593Smuzhiyun 		 */
107*4882a593Smuzhiyun 		if (!bad_frame)
108*4882a593Smuzhiyun 			goto free_user;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		pr_info("attempting good copy_from_user of local stack\n");
111*4882a593Smuzhiyun 		if (copy_from_user(good_stack, (void __user *)user_addr,
112*4882a593Smuzhiyun 				   unconst + sizeof(good_stack))) {
113*4882a593Smuzhiyun 			pr_warn("copy_from_user failed unexpectedly?!\n");
114*4882a593Smuzhiyun 			goto free_user;
115*4882a593Smuzhiyun 		}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		pr_info("attempting bad copy_from_user of distant stack\n");
118*4882a593Smuzhiyun 		if (copy_from_user(bad_stack, (void __user *)user_addr,
119*4882a593Smuzhiyun 				   unconst + sizeof(good_stack))) {
120*4882a593Smuzhiyun 			pr_warn("copy_from_user failed, but lacked Oops\n");
121*4882a593Smuzhiyun 			goto free_user;
122*4882a593Smuzhiyun 		}
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun free_user:
126*4882a593Smuzhiyun 	vm_munmap(user_addr, PAGE_SIZE);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun  * This checks for whole-object size validation with hardened usercopy,
131*4882a593Smuzhiyun  * with or without usercopy whitelisting.
132*4882a593Smuzhiyun  */
do_usercopy_heap_size(bool to_user)133*4882a593Smuzhiyun static void do_usercopy_heap_size(bool to_user)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	unsigned long user_addr;
136*4882a593Smuzhiyun 	unsigned char *one, *two;
137*4882a593Smuzhiyun 	void __user *test_user_addr;
138*4882a593Smuzhiyun 	void *test_kern_addr;
139*4882a593Smuzhiyun 	size_t size = unconst + 1024;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	one = kmalloc(size, GFP_KERNEL);
142*4882a593Smuzhiyun 	two = kmalloc(size, GFP_KERNEL);
143*4882a593Smuzhiyun 	if (!one || !two) {
144*4882a593Smuzhiyun 		pr_warn("Failed to allocate kernel memory\n");
145*4882a593Smuzhiyun 		goto free_kernel;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
149*4882a593Smuzhiyun 			    PROT_READ | PROT_WRITE | PROT_EXEC,
150*4882a593Smuzhiyun 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
151*4882a593Smuzhiyun 	if (user_addr >= TASK_SIZE) {
152*4882a593Smuzhiyun 		pr_warn("Failed to allocate user memory\n");
153*4882a593Smuzhiyun 		goto free_kernel;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	memset(one, 'A', size);
157*4882a593Smuzhiyun 	memset(two, 'B', size);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	test_user_addr = (void __user *)(user_addr + 16);
160*4882a593Smuzhiyun 	test_kern_addr = one + 16;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (to_user) {
163*4882a593Smuzhiyun 		pr_info("attempting good copy_to_user of correct size\n");
164*4882a593Smuzhiyun 		if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
165*4882a593Smuzhiyun 			pr_warn("copy_to_user failed unexpectedly?!\n");
166*4882a593Smuzhiyun 			goto free_user;
167*4882a593Smuzhiyun 		}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		pr_info("attempting bad copy_to_user of too large size\n");
170*4882a593Smuzhiyun 		if (copy_to_user(test_user_addr, test_kern_addr, size)) {
171*4882a593Smuzhiyun 			pr_warn("copy_to_user failed, but lacked Oops\n");
172*4882a593Smuzhiyun 			goto free_user;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 	} else {
175*4882a593Smuzhiyun 		pr_info("attempting good copy_from_user of correct size\n");
176*4882a593Smuzhiyun 		if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
177*4882a593Smuzhiyun 			pr_warn("copy_from_user failed unexpectedly?!\n");
178*4882a593Smuzhiyun 			goto free_user;
179*4882a593Smuzhiyun 		}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		pr_info("attempting bad copy_from_user of too large size\n");
182*4882a593Smuzhiyun 		if (copy_from_user(test_kern_addr, test_user_addr, size)) {
183*4882a593Smuzhiyun 			pr_warn("copy_from_user failed, but lacked Oops\n");
184*4882a593Smuzhiyun 			goto free_user;
185*4882a593Smuzhiyun 		}
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun free_user:
189*4882a593Smuzhiyun 	vm_munmap(user_addr, PAGE_SIZE);
190*4882a593Smuzhiyun free_kernel:
191*4882a593Smuzhiyun 	kfree(one);
192*4882a593Smuzhiyun 	kfree(two);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun  * This checks for the specific whitelist window within an object. If this
197*4882a593Smuzhiyun  * test passes, then do_usercopy_heap_size() tests will pass too.
198*4882a593Smuzhiyun  */
do_usercopy_heap_whitelist(bool to_user)199*4882a593Smuzhiyun static void do_usercopy_heap_whitelist(bool to_user)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	unsigned long user_alloc;
202*4882a593Smuzhiyun 	unsigned char *buf = NULL;
203*4882a593Smuzhiyun 	unsigned char __user *user_addr;
204*4882a593Smuzhiyun 	size_t offset, size;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* Make sure cache was prepared. */
207*4882a593Smuzhiyun 	if (!whitelist_cache) {
208*4882a593Smuzhiyun 		pr_warn("Failed to allocate kernel cache\n");
209*4882a593Smuzhiyun 		return;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/*
213*4882a593Smuzhiyun 	 * Allocate a buffer with a whitelisted window in the buffer.
214*4882a593Smuzhiyun 	 */
215*4882a593Smuzhiyun 	buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
216*4882a593Smuzhiyun 	if (!buf) {
217*4882a593Smuzhiyun 		pr_warn("Failed to allocate buffer from whitelist cache\n");
218*4882a593Smuzhiyun 		goto free_alloc;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/* Allocate user memory we'll poke at. */
222*4882a593Smuzhiyun 	user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
223*4882a593Smuzhiyun 			    PROT_READ | PROT_WRITE | PROT_EXEC,
224*4882a593Smuzhiyun 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
225*4882a593Smuzhiyun 	if (user_alloc >= TASK_SIZE) {
226*4882a593Smuzhiyun 		pr_warn("Failed to allocate user memory\n");
227*4882a593Smuzhiyun 		goto free_alloc;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 	user_addr = (void __user *)user_alloc;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	memset(buf, 'B', cache_size);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
234*4882a593Smuzhiyun 	offset = (cache_size / 4) + unconst;
235*4882a593Smuzhiyun 	size = (cache_size / 16) + unconst;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (to_user) {
238*4882a593Smuzhiyun 		pr_info("attempting good copy_to_user inside whitelist\n");
239*4882a593Smuzhiyun 		if (copy_to_user(user_addr, buf + offset, size)) {
240*4882a593Smuzhiyun 			pr_warn("copy_to_user failed unexpectedly?!\n");
241*4882a593Smuzhiyun 			goto free_user;
242*4882a593Smuzhiyun 		}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 		pr_info("attempting bad copy_to_user outside whitelist\n");
245*4882a593Smuzhiyun 		if (copy_to_user(user_addr, buf + offset - 1, size)) {
246*4882a593Smuzhiyun 			pr_warn("copy_to_user failed, but lacked Oops\n");
247*4882a593Smuzhiyun 			goto free_user;
248*4882a593Smuzhiyun 		}
249*4882a593Smuzhiyun 	} else {
250*4882a593Smuzhiyun 		pr_info("attempting good copy_from_user inside whitelist\n");
251*4882a593Smuzhiyun 		if (copy_from_user(buf + offset, user_addr, size)) {
252*4882a593Smuzhiyun 			pr_warn("copy_from_user failed unexpectedly?!\n");
253*4882a593Smuzhiyun 			goto free_user;
254*4882a593Smuzhiyun 		}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		pr_info("attempting bad copy_from_user outside whitelist\n");
257*4882a593Smuzhiyun 		if (copy_from_user(buf + offset - 1, user_addr, size)) {
258*4882a593Smuzhiyun 			pr_warn("copy_from_user failed, but lacked Oops\n");
259*4882a593Smuzhiyun 			goto free_user;
260*4882a593Smuzhiyun 		}
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun free_user:
264*4882a593Smuzhiyun 	vm_munmap(user_alloc, PAGE_SIZE);
265*4882a593Smuzhiyun free_alloc:
266*4882a593Smuzhiyun 	if (buf)
267*4882a593Smuzhiyun 		kmem_cache_free(whitelist_cache, buf);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /* Callable tests. */
lkdtm_USERCOPY_HEAP_SIZE_TO(void)271*4882a593Smuzhiyun void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	do_usercopy_heap_size(true);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
lkdtm_USERCOPY_HEAP_SIZE_FROM(void)276*4882a593Smuzhiyun void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	do_usercopy_heap_size(false);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)281*4882a593Smuzhiyun void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	do_usercopy_heap_whitelist(true);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)286*4882a593Smuzhiyun void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	do_usercopy_heap_whitelist(false);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
lkdtm_USERCOPY_STACK_FRAME_TO(void)291*4882a593Smuzhiyun void lkdtm_USERCOPY_STACK_FRAME_TO(void)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	do_usercopy_stack(true, true);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
lkdtm_USERCOPY_STACK_FRAME_FROM(void)296*4882a593Smuzhiyun void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	do_usercopy_stack(false, true);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
lkdtm_USERCOPY_STACK_BEYOND(void)301*4882a593Smuzhiyun void lkdtm_USERCOPY_STACK_BEYOND(void)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	do_usercopy_stack(true, false);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
lkdtm_USERCOPY_KERNEL(void)306*4882a593Smuzhiyun void lkdtm_USERCOPY_KERNEL(void)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	unsigned long user_addr;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
311*4882a593Smuzhiyun 			    PROT_READ | PROT_WRITE | PROT_EXEC,
312*4882a593Smuzhiyun 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
313*4882a593Smuzhiyun 	if (user_addr >= TASK_SIZE) {
314*4882a593Smuzhiyun 		pr_warn("Failed to allocate user memory\n");
315*4882a593Smuzhiyun 		return;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	pr_info("attempting good copy_to_user from kernel rodata: %px\n",
319*4882a593Smuzhiyun 		test_text);
320*4882a593Smuzhiyun 	if (copy_to_user((void __user *)user_addr, test_text,
321*4882a593Smuzhiyun 			 unconst + sizeof(test_text))) {
322*4882a593Smuzhiyun 		pr_warn("copy_to_user failed unexpectedly?!\n");
323*4882a593Smuzhiyun 		goto free_user;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	pr_info("attempting bad copy_to_user from kernel text: %px\n",
327*4882a593Smuzhiyun 		vm_mmap);
328*4882a593Smuzhiyun 	if (copy_to_user((void __user *)user_addr, __va_function(vm_mmap),
329*4882a593Smuzhiyun 			 unconst + PAGE_SIZE)) {
330*4882a593Smuzhiyun 		pr_warn("copy_to_user failed, but lacked Oops\n");
331*4882a593Smuzhiyun 		goto free_user;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 	pr_err("FAIL: survived bad copy_to_user()\n");
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun free_user:
336*4882a593Smuzhiyun 	vm_munmap(user_addr, PAGE_SIZE);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
lkdtm_usercopy_init(void)339*4882a593Smuzhiyun void __init lkdtm_usercopy_init(void)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	/* Prepare cache that lacks SLAB_USERCOPY flag. */
342*4882a593Smuzhiyun 	whitelist_cache =
343*4882a593Smuzhiyun 		kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
344*4882a593Smuzhiyun 					   0, 0,
345*4882a593Smuzhiyun 					   cache_size / 4,
346*4882a593Smuzhiyun 					   cache_size / 16,
347*4882a593Smuzhiyun 					   NULL);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
lkdtm_usercopy_exit(void)350*4882a593Smuzhiyun void __exit lkdtm_usercopy_exit(void)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	kmem_cache_destroy(whitelist_cache);
353*4882a593Smuzhiyun }
354