xref: /OK3568_Linux_fs/kernel/lib/test_user_copy.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Kernel module for testing copy_to/from_user infrastructure.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2013 Google Inc. All Rights Reserved
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors:
8*4882a593Smuzhiyun  *      Kees Cook       <keescook@chromium.org>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/mman.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/uaccess.h>
18*4882a593Smuzhiyun #include <linux/vmalloc.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Several 32-bit architectures support 64-bit {get,put}_user() calls.
22*4882a593Smuzhiyun  * As there doesn't appear to be anything that can safely determine
23*4882a593Smuzhiyun  * their capability at compile-time, we just have to opt-out certain archs.
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun #if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
26*4882a593Smuzhiyun 			    !defined(CONFIG_M68K) &&		\
27*4882a593Smuzhiyun 			    !defined(CONFIG_MICROBLAZE) &&	\
28*4882a593Smuzhiyun 			    !defined(CONFIG_NIOS2) &&		\
29*4882a593Smuzhiyun 			    !defined(CONFIG_PPC32) &&		\
30*4882a593Smuzhiyun 			    !defined(CONFIG_SUPERH))
31*4882a593Smuzhiyun # define TEST_U64
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define test(condition, msg, ...)					\
35*4882a593Smuzhiyun ({									\
36*4882a593Smuzhiyun 	int cond = (condition);						\
37*4882a593Smuzhiyun 	if (cond)							\
38*4882a593Smuzhiyun 		pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__);	\
39*4882a593Smuzhiyun 	cond;								\
40*4882a593Smuzhiyun })
41*4882a593Smuzhiyun 
is_zeroed(void * from,size_t size)42*4882a593Smuzhiyun static bool is_zeroed(void *from, size_t size)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	return memchr_inv(from, 0x0, size) == NULL;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
test_check_nonzero_user(char * kmem,char __user * umem,size_t size)47*4882a593Smuzhiyun static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	int ret = 0;
50*4882a593Smuzhiyun 	size_t start, end, i, zero_start, zero_end;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (test(size < 2 * PAGE_SIZE, "buffer too small"))
53*4882a593Smuzhiyun 		return -EINVAL;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/*
56*4882a593Smuzhiyun 	 * We want to cross a page boundary to exercise the code more
57*4882a593Smuzhiyun 	 * effectively. We also don't want to make the size we scan too large,
58*4882a593Smuzhiyun 	 * otherwise the test can take a long time and cause soft lockups. So
59*4882a593Smuzhiyun 	 * scan a 1024 byte region across the page boundary.
60*4882a593Smuzhiyun 	 */
61*4882a593Smuzhiyun 	size = 1024;
62*4882a593Smuzhiyun 	start = PAGE_SIZE - (size / 2);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	kmem += start;
65*4882a593Smuzhiyun 	umem += start;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	zero_start = size / 4;
68*4882a593Smuzhiyun 	zero_end = size - zero_start;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/*
71*4882a593Smuzhiyun 	 * We conduct a series of check_nonzero_user() tests on a block of
72*4882a593Smuzhiyun 	 * memory with the following byte-pattern (trying every possible
73*4882a593Smuzhiyun 	 * [start,end] pair):
74*4882a593Smuzhiyun 	 *
75*4882a593Smuzhiyun 	 *   [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
76*4882a593Smuzhiyun 	 *
77*4882a593Smuzhiyun 	 * And we verify that check_nonzero_user() acts identically to
78*4882a593Smuzhiyun 	 * memchr_inv().
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	memset(kmem, 0x0, size);
82*4882a593Smuzhiyun 	for (i = 1; i < zero_start; i += 2)
83*4882a593Smuzhiyun 		kmem[i] = 0xff;
84*4882a593Smuzhiyun 	for (i = zero_end; i < size; i += 2)
85*4882a593Smuzhiyun 		kmem[i] = 0xff;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	ret |= test(copy_to_user(umem, kmem, size),
88*4882a593Smuzhiyun 		    "legitimate copy_to_user failed");
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	for (start = 0; start <= size; start++) {
91*4882a593Smuzhiyun 		for (end = start; end <= size; end++) {
92*4882a593Smuzhiyun 			size_t len = end - start;
93*4882a593Smuzhiyun 			int retval = check_zeroed_user(umem + start, len);
94*4882a593Smuzhiyun 			int expected = is_zeroed(kmem + start, len);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 			ret |= test(retval != expected,
97*4882a593Smuzhiyun 				    "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
98*4882a593Smuzhiyun 				    retval, expected, start, end);
99*4882a593Smuzhiyun 		}
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	return ret;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
test_copy_struct_from_user(char * kmem,char __user * umem,size_t size)105*4882a593Smuzhiyun static int test_copy_struct_from_user(char *kmem, char __user *umem,
106*4882a593Smuzhiyun 				      size_t size)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	int ret = 0;
109*4882a593Smuzhiyun 	char *umem_src = NULL, *expected = NULL;
110*4882a593Smuzhiyun 	size_t ksize, usize;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	umem_src = kmalloc(size, GFP_KERNEL);
113*4882a593Smuzhiyun 	ret = test(umem_src == NULL, "kmalloc failed");
114*4882a593Smuzhiyun 	if (ret)
115*4882a593Smuzhiyun 		goto out_free;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	expected = kmalloc(size, GFP_KERNEL);
118*4882a593Smuzhiyun 	ret = test(expected == NULL, "kmalloc failed");
119*4882a593Smuzhiyun 	if (ret)
120*4882a593Smuzhiyun 		goto out_free;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* Fill umem with a fixed byte pattern. */
123*4882a593Smuzhiyun 	memset(umem_src, 0x3e, size);
124*4882a593Smuzhiyun 	ret |= test(copy_to_user(umem, umem_src, size),
125*4882a593Smuzhiyun 		    "legitimate copy_to_user failed");
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* Check basic case -- (usize == ksize). */
128*4882a593Smuzhiyun 	ksize = size;
129*4882a593Smuzhiyun 	usize = size;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	memcpy(expected, umem_src, ksize);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	memset(kmem, 0x0, size);
134*4882a593Smuzhiyun 	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
135*4882a593Smuzhiyun 		    "copy_struct_from_user(usize == ksize) failed");
136*4882a593Smuzhiyun 	ret |= test(memcmp(kmem, expected, ksize),
137*4882a593Smuzhiyun 		    "copy_struct_from_user(usize == ksize) gives unexpected copy");
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* Old userspace case -- (usize < ksize). */
140*4882a593Smuzhiyun 	ksize = size;
141*4882a593Smuzhiyun 	usize = size / 2;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	memcpy(expected, umem_src, usize);
144*4882a593Smuzhiyun 	memset(expected + usize, 0x0, ksize - usize);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	memset(kmem, 0x0, size);
147*4882a593Smuzhiyun 	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
148*4882a593Smuzhiyun 		    "copy_struct_from_user(usize < ksize) failed");
149*4882a593Smuzhiyun 	ret |= test(memcmp(kmem, expected, ksize),
150*4882a593Smuzhiyun 		    "copy_struct_from_user(usize < ksize) gives unexpected copy");
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* New userspace (-E2BIG) case -- (usize > ksize). */
153*4882a593Smuzhiyun 	ksize = size / 2;
154*4882a593Smuzhiyun 	usize = size;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	memset(kmem, 0x0, size);
157*4882a593Smuzhiyun 	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
158*4882a593Smuzhiyun 		    "copy_struct_from_user(usize > ksize) didn't give E2BIG");
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	/* New userspace (success) case -- (usize > ksize). */
161*4882a593Smuzhiyun 	ksize = size / 2;
162*4882a593Smuzhiyun 	usize = size;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	memcpy(expected, umem_src, ksize);
165*4882a593Smuzhiyun 	ret |= test(clear_user(umem + ksize, usize - ksize),
166*4882a593Smuzhiyun 		    "legitimate clear_user failed");
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	memset(kmem, 0x0, size);
169*4882a593Smuzhiyun 	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
170*4882a593Smuzhiyun 		    "copy_struct_from_user(usize > ksize) failed");
171*4882a593Smuzhiyun 	ret |= test(memcmp(kmem, expected, ksize),
172*4882a593Smuzhiyun 		    "copy_struct_from_user(usize > ksize) gives unexpected copy");
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun out_free:
175*4882a593Smuzhiyun 	kfree(expected);
176*4882a593Smuzhiyun 	kfree(umem_src);
177*4882a593Smuzhiyun 	return ret;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
test_user_copy_init(void)180*4882a593Smuzhiyun static int __init test_user_copy_init(void)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	int ret = 0;
183*4882a593Smuzhiyun 	char *kmem;
184*4882a593Smuzhiyun 	char __user *usermem;
185*4882a593Smuzhiyun 	char *bad_usermem;
186*4882a593Smuzhiyun 	unsigned long user_addr;
187*4882a593Smuzhiyun 	u8 val_u8;
188*4882a593Smuzhiyun 	u16 val_u16;
189*4882a593Smuzhiyun 	u32 val_u32;
190*4882a593Smuzhiyun #ifdef TEST_U64
191*4882a593Smuzhiyun 	u64 val_u64;
192*4882a593Smuzhiyun #endif
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
195*4882a593Smuzhiyun 	if (!kmem)
196*4882a593Smuzhiyun 		return -ENOMEM;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2,
199*4882a593Smuzhiyun 			    PROT_READ | PROT_WRITE | PROT_EXEC,
200*4882a593Smuzhiyun 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
201*4882a593Smuzhiyun 	if (user_addr >= (unsigned long)(TASK_SIZE)) {
202*4882a593Smuzhiyun 		pr_warn("Failed to allocate user memory\n");
203*4882a593Smuzhiyun 		kfree(kmem);
204*4882a593Smuzhiyun 		return -ENOMEM;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	usermem = (char __user *)user_addr;
208*4882a593Smuzhiyun 	bad_usermem = (char *)user_addr;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/*
211*4882a593Smuzhiyun 	 * Legitimate usage: none of these copies should fail.
212*4882a593Smuzhiyun 	 */
213*4882a593Smuzhiyun 	memset(kmem, 0x3a, PAGE_SIZE * 2);
214*4882a593Smuzhiyun 	ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
215*4882a593Smuzhiyun 		    "legitimate copy_to_user failed");
216*4882a593Smuzhiyun 	memset(kmem, 0x0, PAGE_SIZE);
217*4882a593Smuzhiyun 	ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
218*4882a593Smuzhiyun 		    "legitimate copy_from_user failed");
219*4882a593Smuzhiyun 	ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE),
220*4882a593Smuzhiyun 		    "legitimate usercopy failed to copy data");
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun #define test_legit(size, check)						  \
223*4882a593Smuzhiyun 	do {								  \
224*4882a593Smuzhiyun 		val_##size = check;					  \
225*4882a593Smuzhiyun 		ret |= test(put_user(val_##size, (size __user *)usermem), \
226*4882a593Smuzhiyun 		    "legitimate put_user (" #size ") failed");		  \
227*4882a593Smuzhiyun 		val_##size = 0;						  \
228*4882a593Smuzhiyun 		ret |= test(get_user(val_##size, (size __user *)usermem), \
229*4882a593Smuzhiyun 		    "legitimate get_user (" #size ") failed");		  \
230*4882a593Smuzhiyun 		ret |= test(val_##size != check,			  \
231*4882a593Smuzhiyun 		    "legitimate get_user (" #size ") failed to do copy"); \
232*4882a593Smuzhiyun 		if (val_##size != check) {				  \
233*4882a593Smuzhiyun 			pr_info("0x%llx != 0x%llx\n",			  \
234*4882a593Smuzhiyun 				(unsigned long long)val_##size,		  \
235*4882a593Smuzhiyun 				(unsigned long long)check);		  \
236*4882a593Smuzhiyun 		}							  \
237*4882a593Smuzhiyun 	} while (0)
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	test_legit(u8,  0x5a);
240*4882a593Smuzhiyun 	test_legit(u16, 0x5a5b);
241*4882a593Smuzhiyun 	test_legit(u32, 0x5a5b5c5d);
242*4882a593Smuzhiyun #ifdef TEST_U64
243*4882a593Smuzhiyun 	test_legit(u64, 0x5a5b5c5d6a6b6c6d);
244*4882a593Smuzhiyun #endif
245*4882a593Smuzhiyun #undef test_legit
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* Test usage of check_nonzero_user(). */
248*4882a593Smuzhiyun 	ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
249*4882a593Smuzhiyun 	/* Test usage of copy_struct_from_user(). */
250*4882a593Smuzhiyun 	ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/*
253*4882a593Smuzhiyun 	 * Invalid usage: none of these copies should succeed.
254*4882a593Smuzhiyun 	 */
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* Prepare kernel memory with check values. */
257*4882a593Smuzhiyun 	memset(kmem, 0x5a, PAGE_SIZE);
258*4882a593Smuzhiyun 	memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* Reject kernel-to-kernel copies through copy_from_user(). */
261*4882a593Smuzhiyun 	ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
262*4882a593Smuzhiyun 				    PAGE_SIZE),
263*4882a593Smuzhiyun 		    "illegal all-kernel copy_from_user passed");
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* Destination half of buffer should have been zeroed. */
266*4882a593Smuzhiyun 	ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE),
267*4882a593Smuzhiyun 		    "zeroing failure for illegal all-kernel copy_from_user");
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun #if 0
270*4882a593Smuzhiyun 	/*
271*4882a593Smuzhiyun 	 * When running with SMAP/PAN/etc, this will Oops the kernel
272*4882a593Smuzhiyun 	 * due to the zeroing of userspace memory on failure. This needs
273*4882a593Smuzhiyun 	 * to be tested in LKDTM instead, since this test module does not
274*4882a593Smuzhiyun 	 * expect to explode.
275*4882a593Smuzhiyun 	 */
276*4882a593Smuzhiyun 	ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
277*4882a593Smuzhiyun 				    PAGE_SIZE),
278*4882a593Smuzhiyun 		    "illegal reversed copy_from_user passed");
279*4882a593Smuzhiyun #endif
280*4882a593Smuzhiyun 	ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
281*4882a593Smuzhiyun 				  PAGE_SIZE),
282*4882a593Smuzhiyun 		    "illegal all-kernel copy_to_user passed");
283*4882a593Smuzhiyun 	ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
284*4882a593Smuzhiyun 				  PAGE_SIZE),
285*4882a593Smuzhiyun 		    "illegal reversed copy_to_user passed");
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun #define test_illegal(size, check)					    \
288*4882a593Smuzhiyun 	do {								    \
289*4882a593Smuzhiyun 		val_##size = (check);					    \
290*4882a593Smuzhiyun 		ret |= test(!get_user(val_##size, (size __user *)kmem),	    \
291*4882a593Smuzhiyun 		    "illegal get_user (" #size ") passed");		    \
292*4882a593Smuzhiyun 		ret |= test(val_##size != (size)0,			    \
293*4882a593Smuzhiyun 		    "zeroing failure for illegal get_user (" #size ")");    \
294*4882a593Smuzhiyun 		if (val_##size != (size)0) {				    \
295*4882a593Smuzhiyun 			pr_info("0x%llx != 0\n",			    \
296*4882a593Smuzhiyun 				(unsigned long long)val_##size);	    \
297*4882a593Smuzhiyun 		}							    \
298*4882a593Smuzhiyun 		ret |= test(!put_user(val_##size, (size __user *)kmem),	    \
299*4882a593Smuzhiyun 		    "illegal put_user (" #size ") passed");		    \
300*4882a593Smuzhiyun 	} while (0)
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	test_illegal(u8,  0x5a);
303*4882a593Smuzhiyun 	test_illegal(u16, 0x5a5b);
304*4882a593Smuzhiyun 	test_illegal(u32, 0x5a5b5c5d);
305*4882a593Smuzhiyun #ifdef TEST_U64
306*4882a593Smuzhiyun 	test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
307*4882a593Smuzhiyun #endif
308*4882a593Smuzhiyun #undef test_illegal
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	vm_munmap(user_addr, PAGE_SIZE * 2);
311*4882a593Smuzhiyun 	kfree(kmem);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if (ret == 0) {
314*4882a593Smuzhiyun 		pr_info("tests passed.\n");
315*4882a593Smuzhiyun 		return 0;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	return -EINVAL;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun module_init(test_user_copy_init);
322*4882a593Smuzhiyun 
test_user_copy_exit(void)323*4882a593Smuzhiyun static void __exit test_user_copy_exit(void)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	pr_info("unloaded.\n");
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun module_exit(test_user_copy_exit);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
331*4882a593Smuzhiyun MODULE_LICENSE("GPL");
332