xref: /OK3568_Linux_fs/kernel/drivers/misc/lkdtm/bugs.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * This is for all the tests related to logic bugs (e.g. bad dereferences,
4*4882a593Smuzhiyun  * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
5*4882a593Smuzhiyun  * lockups) along with other things that don't fit well into existing LKDTM
6*4882a593Smuzhiyun  * test source files.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include "lkdtm.h"
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/sched/signal.h>
12*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
13*4882a593Smuzhiyun #include <linux/uaccess.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
17*4882a593Smuzhiyun #include <asm/desc.h>
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun struct lkdtm_list {
21*4882a593Smuzhiyun 	struct list_head node;
22*4882a593Smuzhiyun };
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Make sure our attempts to over run the kernel stack doesn't trigger
26*4882a593Smuzhiyun  * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
27*4882a593Smuzhiyun  * recurse past the end of THREAD_SIZE by default.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
30*4882a593Smuzhiyun #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
31*4882a593Smuzhiyun #else
32*4882a593Smuzhiyun #define REC_STACK_SIZE (THREAD_SIZE / 8)
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static int recur_count = REC_NUM_DEFAULT;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static DEFINE_SPINLOCK(lock_me_up);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * Make sure compiler does not optimize this function or stack frame away:
42*4882a593Smuzhiyun  * - function marked noinline
43*4882a593Smuzhiyun  * - stack variables are marked volatile
44*4882a593Smuzhiyun  * - stack variables are written (memset()) and read (pr_info())
45*4882a593Smuzhiyun  * - function has external effects (pr_info())
46*4882a593Smuzhiyun  * */
recursive_loop(int remaining)47*4882a593Smuzhiyun static int noinline recursive_loop(int remaining)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	volatile char buf[REC_STACK_SIZE];
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	memset((void *)buf, remaining & 0xFF, sizeof(buf));
52*4882a593Smuzhiyun 	pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
53*4882a593Smuzhiyun 		recur_count);
54*4882a593Smuzhiyun 	if (!remaining)
55*4882a593Smuzhiyun 		return 0;
56*4882a593Smuzhiyun 	else
57*4882a593Smuzhiyun 		return recursive_loop(remaining - 1);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* If the depth is negative, use the default, otherwise keep parameter. */
lkdtm_bugs_init(int * recur_param)61*4882a593Smuzhiyun void __init lkdtm_bugs_init(int *recur_param)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	if (*recur_param < 0)
64*4882a593Smuzhiyun 		*recur_param = recur_count;
65*4882a593Smuzhiyun 	else
66*4882a593Smuzhiyun 		recur_count = *recur_param;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
lkdtm_PANIC(void)69*4882a593Smuzhiyun void lkdtm_PANIC(void)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	panic("dumptest");
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
lkdtm_BUG(void)74*4882a593Smuzhiyun void lkdtm_BUG(void)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	BUG();
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun static int warn_counter;
80*4882a593Smuzhiyun 
lkdtm_WARNING(void)81*4882a593Smuzhiyun void lkdtm_WARNING(void)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	WARN_ON(++warn_counter);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
lkdtm_WARNING_MESSAGE(void)86*4882a593Smuzhiyun void lkdtm_WARNING_MESSAGE(void)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
lkdtm_EXCEPTION(void)91*4882a593Smuzhiyun void lkdtm_EXCEPTION(void)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	*((volatile int *) 0) = 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
lkdtm_LOOP(void)96*4882a593Smuzhiyun void lkdtm_LOOP(void)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	for (;;)
99*4882a593Smuzhiyun 		;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
lkdtm_EXHAUST_STACK(void)102*4882a593Smuzhiyun void lkdtm_EXHAUST_STACK(void)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	pr_info("Calling function with %lu frame size to depth %d ...\n",
105*4882a593Smuzhiyun 		REC_STACK_SIZE, recur_count);
106*4882a593Smuzhiyun 	recursive_loop(recur_count);
107*4882a593Smuzhiyun 	pr_info("FAIL: survived without exhausting stack?!\n");
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
__lkdtm_CORRUPT_STACK(void * stack)110*4882a593Smuzhiyun static noinline void __lkdtm_CORRUPT_STACK(void *stack)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	memset(stack, '\xff', 64);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* This should trip the stack canary, not corrupt the return address. */
lkdtm_CORRUPT_STACK(void)116*4882a593Smuzhiyun noinline void lkdtm_CORRUPT_STACK(void)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	/* Use default char array length that triggers stack protection. */
119*4882a593Smuzhiyun 	char data[8] __aligned(sizeof(void *));
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	pr_info("Corrupting stack containing char array ...\n");
122*4882a593Smuzhiyun 	__lkdtm_CORRUPT_STACK((void *)&data);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /* Same as above but will only get a canary with -fstack-protector-strong */
lkdtm_CORRUPT_STACK_STRONG(void)126*4882a593Smuzhiyun noinline void lkdtm_CORRUPT_STACK_STRONG(void)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	union {
129*4882a593Smuzhiyun 		unsigned short shorts[4];
130*4882a593Smuzhiyun 		unsigned long *ptr;
131*4882a593Smuzhiyun 	} data __aligned(sizeof(void *));
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	pr_info("Corrupting stack containing union ...\n");
134*4882a593Smuzhiyun 	__lkdtm_CORRUPT_STACK((void *)&data);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)137*4882a593Smuzhiyun void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
140*4882a593Smuzhiyun 	u32 *p;
141*4882a593Smuzhiyun 	u32 val = 0x12345678;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	p = (u32 *)(data + 1);
144*4882a593Smuzhiyun 	if (*p == 0)
145*4882a593Smuzhiyun 		val = 0x87654321;
146*4882a593Smuzhiyun 	*p = val;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
149*4882a593Smuzhiyun 		pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
lkdtm_SOFTLOCKUP(void)152*4882a593Smuzhiyun void lkdtm_SOFTLOCKUP(void)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	preempt_disable();
155*4882a593Smuzhiyun 	for (;;)
156*4882a593Smuzhiyun 		cpu_relax();
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
lkdtm_HARDLOCKUP(void)159*4882a593Smuzhiyun void lkdtm_HARDLOCKUP(void)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	local_irq_disable();
162*4882a593Smuzhiyun 	for (;;)
163*4882a593Smuzhiyun 		cpu_relax();
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
lkdtm_SPINLOCKUP(void)166*4882a593Smuzhiyun void lkdtm_SPINLOCKUP(void)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	/* Must be called twice to trigger. */
169*4882a593Smuzhiyun 	spin_lock(&lock_me_up);
170*4882a593Smuzhiyun 	/* Let sparse know we intended to exit holding the lock. */
171*4882a593Smuzhiyun 	__release(&lock_me_up);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
lkdtm_HUNG_TASK(void)174*4882a593Smuzhiyun void lkdtm_HUNG_TASK(void)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	set_current_state(TASK_UNINTERRUPTIBLE);
177*4882a593Smuzhiyun 	schedule();
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun volatile unsigned int huge = INT_MAX - 2;
181*4882a593Smuzhiyun volatile unsigned int ignored;
182*4882a593Smuzhiyun 
lkdtm_OVERFLOW_SIGNED(void)183*4882a593Smuzhiyun void lkdtm_OVERFLOW_SIGNED(void)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	int value;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	value = huge;
188*4882a593Smuzhiyun 	pr_info("Normal signed addition ...\n");
189*4882a593Smuzhiyun 	value += 1;
190*4882a593Smuzhiyun 	ignored = value;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	pr_info("Overflowing signed addition ...\n");
193*4882a593Smuzhiyun 	value += 4;
194*4882a593Smuzhiyun 	ignored = value;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 
lkdtm_OVERFLOW_UNSIGNED(void)198*4882a593Smuzhiyun void lkdtm_OVERFLOW_UNSIGNED(void)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	unsigned int value;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	value = huge;
203*4882a593Smuzhiyun 	pr_info("Normal unsigned addition ...\n");
204*4882a593Smuzhiyun 	value += 1;
205*4882a593Smuzhiyun 	ignored = value;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	pr_info("Overflowing unsigned addition ...\n");
208*4882a593Smuzhiyun 	value += 4;
209*4882a593Smuzhiyun 	ignored = value;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /* Intentionally using old-style flex array definition of 1 byte. */
213*4882a593Smuzhiyun struct array_bounds_flex_array {
214*4882a593Smuzhiyun 	int one;
215*4882a593Smuzhiyun 	int two;
216*4882a593Smuzhiyun 	char data[1];
217*4882a593Smuzhiyun };
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun struct array_bounds {
220*4882a593Smuzhiyun 	int one;
221*4882a593Smuzhiyun 	int two;
222*4882a593Smuzhiyun 	char data[8];
223*4882a593Smuzhiyun 	int three;
224*4882a593Smuzhiyun };
225*4882a593Smuzhiyun 
lkdtm_ARRAY_BOUNDS(void)226*4882a593Smuzhiyun void lkdtm_ARRAY_BOUNDS(void)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct array_bounds_flex_array *not_checked;
229*4882a593Smuzhiyun 	struct array_bounds *checked;
230*4882a593Smuzhiyun 	volatile int i;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
233*4882a593Smuzhiyun 	checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
234*4882a593Smuzhiyun 	if (!not_checked || !checked) {
235*4882a593Smuzhiyun 		kfree(not_checked);
236*4882a593Smuzhiyun 		kfree(checked);
237*4882a593Smuzhiyun 		return;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	pr_info("Array access within bounds ...\n");
241*4882a593Smuzhiyun 	/* For both, touch all bytes in the actual member size. */
242*4882a593Smuzhiyun 	for (i = 0; i < sizeof(checked->data); i++)
243*4882a593Smuzhiyun 		checked->data[i] = 'A';
244*4882a593Smuzhiyun 	/*
245*4882a593Smuzhiyun 	 * For the uninstrumented flex array member, also touch 1 byte
246*4882a593Smuzhiyun 	 * beyond to verify it is correctly uninstrumented.
247*4882a593Smuzhiyun 	 */
248*4882a593Smuzhiyun 	for (i = 0; i < sizeof(not_checked->data) + 1; i++)
249*4882a593Smuzhiyun 		not_checked->data[i] = 'A';
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	pr_info("Array access beyond bounds ...\n");
252*4882a593Smuzhiyun 	for (i = 0; i < sizeof(checked->data) + 1; i++)
253*4882a593Smuzhiyun 		checked->data[i] = 'B';
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	kfree(not_checked);
256*4882a593Smuzhiyun 	kfree(checked);
257*4882a593Smuzhiyun 	pr_err("FAIL: survived array bounds overflow!\n");
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
lkdtm_CORRUPT_LIST_ADD(void)260*4882a593Smuzhiyun void lkdtm_CORRUPT_LIST_ADD(void)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	/*
263*4882a593Smuzhiyun 	 * Initially, an empty list via LIST_HEAD:
264*4882a593Smuzhiyun 	 *	test_head.next = &test_head
265*4882a593Smuzhiyun 	 *	test_head.prev = &test_head
266*4882a593Smuzhiyun 	 */
267*4882a593Smuzhiyun 	LIST_HEAD(test_head);
268*4882a593Smuzhiyun 	struct lkdtm_list good, bad;
269*4882a593Smuzhiyun 	void *target[2] = { };
270*4882a593Smuzhiyun 	void *redirection = &target;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	pr_info("attempting good list addition\n");
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	/*
275*4882a593Smuzhiyun 	 * Adding to the list performs these actions:
276*4882a593Smuzhiyun 	 *	test_head.next->prev = &good.node
277*4882a593Smuzhiyun 	 *	good.node.next = test_head.next
278*4882a593Smuzhiyun 	 *	good.node.prev = test_head
279*4882a593Smuzhiyun 	 *	test_head.next = good.node
280*4882a593Smuzhiyun 	 */
281*4882a593Smuzhiyun 	list_add(&good.node, &test_head);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	pr_info("attempting corrupted list addition\n");
284*4882a593Smuzhiyun 	/*
285*4882a593Smuzhiyun 	 * In simulating this "write what where" primitive, the "what" is
286*4882a593Smuzhiyun 	 * the address of &bad.node, and the "where" is the address held
287*4882a593Smuzhiyun 	 * by "redirection".
288*4882a593Smuzhiyun 	 */
289*4882a593Smuzhiyun 	test_head.next = redirection;
290*4882a593Smuzhiyun 	list_add(&bad.node, &test_head);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	if (target[0] == NULL && target[1] == NULL)
293*4882a593Smuzhiyun 		pr_err("Overwrite did not happen, but no BUG?!\n");
294*4882a593Smuzhiyun 	else
295*4882a593Smuzhiyun 		pr_err("list_add() corruption not detected!\n");
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
lkdtm_CORRUPT_LIST_DEL(void)298*4882a593Smuzhiyun void lkdtm_CORRUPT_LIST_DEL(void)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	LIST_HEAD(test_head);
301*4882a593Smuzhiyun 	struct lkdtm_list item;
302*4882a593Smuzhiyun 	void *target[2] = { };
303*4882a593Smuzhiyun 	void *redirection = &target;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	list_add(&item.node, &test_head);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	pr_info("attempting good list removal\n");
308*4882a593Smuzhiyun 	list_del(&item.node);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	pr_info("attempting corrupted list removal\n");
311*4882a593Smuzhiyun 	list_add(&item.node, &test_head);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* As with the list_add() test above, this corrupts "next". */
314*4882a593Smuzhiyun 	item.node.next = redirection;
315*4882a593Smuzhiyun 	list_del(&item.node);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	if (target[0] == NULL && target[1] == NULL)
318*4882a593Smuzhiyun 		pr_err("Overwrite did not happen, but no BUG?!\n");
319*4882a593Smuzhiyun 	else
320*4882a593Smuzhiyun 		pr_err("list_del() corruption not detected!\n");
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun /* Test that VMAP_STACK is actually allocating with a leading guard page */
lkdtm_STACK_GUARD_PAGE_LEADING(void)324*4882a593Smuzhiyun void lkdtm_STACK_GUARD_PAGE_LEADING(void)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	const unsigned char *stack = task_stack_page(current);
327*4882a593Smuzhiyun 	const unsigned char *ptr = stack - 1;
328*4882a593Smuzhiyun 	volatile unsigned char byte;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	pr_info("attempting bad read from page below current stack\n");
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	byte = *ptr;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun /* Test that VMAP_STACK is actually allocating with a trailing guard page */
lkdtm_STACK_GUARD_PAGE_TRAILING(void)338*4882a593Smuzhiyun void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	const unsigned char *stack = task_stack_page(current);
341*4882a593Smuzhiyun 	const unsigned char *ptr = stack + THREAD_SIZE;
342*4882a593Smuzhiyun 	volatile unsigned char byte;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	pr_info("attempting bad read from page above current stack\n");
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	byte = *ptr;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
lkdtm_UNSET_SMEP(void)351*4882a593Smuzhiyun void lkdtm_UNSET_SMEP(void)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
354*4882a593Smuzhiyun #define MOV_CR4_DEPTH	64
355*4882a593Smuzhiyun 	void (*direct_write_cr4)(unsigned long val);
356*4882a593Smuzhiyun 	unsigned char *insn;
357*4882a593Smuzhiyun 	unsigned long cr4;
358*4882a593Smuzhiyun 	int i;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	cr4 = native_read_cr4();
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
363*4882a593Smuzhiyun 		pr_err("FAIL: SMEP not in use\n");
364*4882a593Smuzhiyun 		return;
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 	cr4 &= ~(X86_CR4_SMEP);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	pr_info("trying to clear SMEP normally\n");
369*4882a593Smuzhiyun 	native_write_cr4(cr4);
370*4882a593Smuzhiyun 	if (cr4 == native_read_cr4()) {
371*4882a593Smuzhiyun 		pr_err("FAIL: pinning SMEP failed!\n");
372*4882a593Smuzhiyun 		cr4 |= X86_CR4_SMEP;
373*4882a593Smuzhiyun 		pr_info("restoring SMEP\n");
374*4882a593Smuzhiyun 		native_write_cr4(cr4);
375*4882a593Smuzhiyun 		return;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 	pr_info("ok: SMEP did not get cleared\n");
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/*
380*4882a593Smuzhiyun 	 * To test the post-write pinning verification we need to call
381*4882a593Smuzhiyun 	 * directly into the middle of native_write_cr4() where the
382*4882a593Smuzhiyun 	 * cr4 write happens, skipping any pinning. This searches for
383*4882a593Smuzhiyun 	 * the cr4 writing instruction.
384*4882a593Smuzhiyun 	 */
385*4882a593Smuzhiyun 	insn = (unsigned char *)native_write_cr4;
386*4882a593Smuzhiyun 	for (i = 0; i < MOV_CR4_DEPTH; i++) {
387*4882a593Smuzhiyun 		/* mov %rdi, %cr4 */
388*4882a593Smuzhiyun 		if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
389*4882a593Smuzhiyun 			break;
390*4882a593Smuzhiyun 		/* mov %rdi,%rax; mov %rax, %cr4 */
391*4882a593Smuzhiyun 		if (insn[i]   == 0x48 && insn[i+1] == 0x89 &&
392*4882a593Smuzhiyun 		    insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
393*4882a593Smuzhiyun 		    insn[i+4] == 0x22 && insn[i+5] == 0xe0)
394*4882a593Smuzhiyun 			break;
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 	if (i >= MOV_CR4_DEPTH) {
397*4882a593Smuzhiyun 		pr_info("ok: cannot locate cr4 writing call gadget\n");
398*4882a593Smuzhiyun 		return;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 	direct_write_cr4 = (void *)(insn + i);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	pr_info("trying to clear SMEP with call gadget\n");
403*4882a593Smuzhiyun 	direct_write_cr4(cr4);
404*4882a593Smuzhiyun 	if (native_read_cr4() & X86_CR4_SMEP) {
405*4882a593Smuzhiyun 		pr_info("ok: SMEP removal was reverted\n");
406*4882a593Smuzhiyun 	} else {
407*4882a593Smuzhiyun 		pr_err("FAIL: cleared SMEP not detected!\n");
408*4882a593Smuzhiyun 		cr4 |= X86_CR4_SMEP;
409*4882a593Smuzhiyun 		pr_info("restoring SMEP\n");
410*4882a593Smuzhiyun 		native_write_cr4(cr4);
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun #else
413*4882a593Smuzhiyun 	pr_err("XFAIL: this test is x86_64-only\n");
414*4882a593Smuzhiyun #endif
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
lkdtm_DOUBLE_FAULT(void)417*4882a593Smuzhiyun void lkdtm_DOUBLE_FAULT(void)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
420*4882a593Smuzhiyun 	/*
421*4882a593Smuzhiyun 	 * Trigger #DF by setting the stack limit to zero.  This clobbers
422*4882a593Smuzhiyun 	 * a GDT TLS slot, which is okay because the current task will die
423*4882a593Smuzhiyun 	 * anyway due to the double fault.
424*4882a593Smuzhiyun 	 */
425*4882a593Smuzhiyun 	struct desc_struct d = {
426*4882a593Smuzhiyun 		.type = 3,	/* expand-up, writable, accessed data */
427*4882a593Smuzhiyun 		.p = 1,		/* present */
428*4882a593Smuzhiyun 		.d = 1,		/* 32-bit */
429*4882a593Smuzhiyun 		.g = 0,		/* limit in bytes */
430*4882a593Smuzhiyun 		.s = 1,		/* not system */
431*4882a593Smuzhiyun 	};
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	local_irq_disable();
434*4882a593Smuzhiyun 	write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
435*4882a593Smuzhiyun 			GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/*
438*4882a593Smuzhiyun 	 * Put our zero-limit segment in SS and then trigger a fault.  The
439*4882a593Smuzhiyun 	 * 4-byte access to (%esp) will fault with #SS, and the attempt to
440*4882a593Smuzhiyun 	 * deliver the fault will recursively cause #SS and result in #DF.
441*4882a593Smuzhiyun 	 * This whole process happens while NMIs and MCEs are blocked by the
442*4882a593Smuzhiyun 	 * MOV SS window.  This is nice because an NMI with an invalid SS
443*4882a593Smuzhiyun 	 * would also double-fault, resulting in the NMI or MCE being lost.
444*4882a593Smuzhiyun 	 */
445*4882a593Smuzhiyun 	asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
446*4882a593Smuzhiyun 		      "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	pr_err("FAIL: tried to double fault but didn't die\n");
449*4882a593Smuzhiyun #else
450*4882a593Smuzhiyun 	pr_err("XFAIL: this test is ia32-only\n");
451*4882a593Smuzhiyun #endif
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun #ifdef CONFIG_ARM64
change_pac_parameters(void)455*4882a593Smuzhiyun static noinline void change_pac_parameters(void)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) {
458*4882a593Smuzhiyun 		/* Reset the keys of current task */
459*4882a593Smuzhiyun 		ptrauth_thread_init_kernel(current);
460*4882a593Smuzhiyun 		ptrauth_thread_switch_kernel(current);
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun #endif
464*4882a593Smuzhiyun 
lkdtm_CORRUPT_PAC(void)465*4882a593Smuzhiyun noinline void lkdtm_CORRUPT_PAC(void)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun #ifdef CONFIG_ARM64
468*4882a593Smuzhiyun #define CORRUPT_PAC_ITERATE	10
469*4882a593Smuzhiyun 	int i;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
472*4882a593Smuzhiyun 		pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n");
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	if (!system_supports_address_auth()) {
475*4882a593Smuzhiyun 		pr_err("FAIL: CPU lacks pointer authentication feature\n");
476*4882a593Smuzhiyun 		return;
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	pr_info("changing PAC parameters to force function return failure...\n");
480*4882a593Smuzhiyun 	/*
481*4882a593Smuzhiyun 	 * PAC is a hash value computed from input keys, return address and
482*4882a593Smuzhiyun 	 * stack pointer. As pac has fewer bits so there is a chance of
483*4882a593Smuzhiyun 	 * collision, so iterate few times to reduce the collision probability.
484*4882a593Smuzhiyun 	 */
485*4882a593Smuzhiyun 	for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
486*4882a593Smuzhiyun 		change_pac_parameters();
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
489*4882a593Smuzhiyun #else
490*4882a593Smuzhiyun 	pr_err("XFAIL: this test is arm64-only\n");
491*4882a593Smuzhiyun #endif
492*4882a593Smuzhiyun }
493