xref: /OK3568_Linux_fs/kernel/arch/arm/include/asm/uaccess-asm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #ifndef __ASM_UACCESS_ASM_H__
4*4882a593Smuzhiyun #define __ASM_UACCESS_ASM_H__
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <asm/asm-offsets.h>
7*4882a593Smuzhiyun #include <asm/domain.h>
8*4882a593Smuzhiyun #include <asm/memory.h>
9*4882a593Smuzhiyun #include <asm/thread_info.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun 	.macro	csdb
12*4882a593Smuzhiyun #ifdef CONFIG_THUMB2_KERNEL
13*4882a593Smuzhiyun 	.inst.w	0xf3af8014
14*4882a593Smuzhiyun #else
15*4882a593Smuzhiyun 	.inst	0xe320f014
16*4882a593Smuzhiyun #endif
17*4882a593Smuzhiyun 	.endm
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
20*4882a593Smuzhiyun #ifndef CONFIG_CPU_USE_DOMAINS
21*4882a593Smuzhiyun 	adds	\tmp, \addr, #\size - 1
22*4882a593Smuzhiyun 	sbcscc	\tmp, \tmp, \limit
23*4882a593Smuzhiyun 	bcs	\bad
24*4882a593Smuzhiyun #ifdef CONFIG_CPU_SPECTRE
25*4882a593Smuzhiyun 	movcs	\addr, #0
26*4882a593Smuzhiyun 	csdb
27*4882a593Smuzhiyun #endif
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun 	.endm
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
32*4882a593Smuzhiyun #ifdef CONFIG_CPU_SPECTRE
33*4882a593Smuzhiyun 	sub	\tmp, \limit, #1
34*4882a593Smuzhiyun 	subs	\tmp, \tmp, \addr	@ tmp = limit - 1 - addr
35*4882a593Smuzhiyun 	addhs	\tmp, \tmp, #1		@ if (tmp >= 0) {
36*4882a593Smuzhiyun 	subshs	\tmp, \tmp, \size	@ tmp = limit - (addr + size) }
37*4882a593Smuzhiyun 	movlo	\addr, #0		@ if (tmp < 0) addr = NULL
38*4882a593Smuzhiyun 	csdb
39*4882a593Smuzhiyun #endif
40*4882a593Smuzhiyun 	.endm
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	.macro	uaccess_disable, tmp, isb=1
43*4882a593Smuzhiyun #ifdef CONFIG_CPU_SW_DOMAIN_PAN
44*4882a593Smuzhiyun 	/*
45*4882a593Smuzhiyun 	 * Whenever we re-enter userspace, the domains should always be
46*4882a593Smuzhiyun 	 * set appropriately.
47*4882a593Smuzhiyun 	 */
48*4882a593Smuzhiyun 	mov	\tmp, #DACR_UACCESS_DISABLE
49*4882a593Smuzhiyun 	mcr	p15, 0, \tmp, c3, c0, 0		@ Set domain register
50*4882a593Smuzhiyun 	.if	\isb
51*4882a593Smuzhiyun 	instr_sync
52*4882a593Smuzhiyun 	.endif
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun 	.endm
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	.macro	uaccess_enable, tmp, isb=1
57*4882a593Smuzhiyun #ifdef CONFIG_CPU_SW_DOMAIN_PAN
58*4882a593Smuzhiyun 	/*
59*4882a593Smuzhiyun 	 * Whenever we re-enter userspace, the domains should always be
60*4882a593Smuzhiyun 	 * set appropriately.
61*4882a593Smuzhiyun 	 */
62*4882a593Smuzhiyun 	mov	\tmp, #DACR_UACCESS_ENABLE
63*4882a593Smuzhiyun 	mcr	p15, 0, \tmp, c3, c0, 0
64*4882a593Smuzhiyun 	.if	\isb
65*4882a593Smuzhiyun 	instr_sync
66*4882a593Smuzhiyun 	.endif
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun 	.endm
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
71*4882a593Smuzhiyun #define DACR(x...)	x
72*4882a593Smuzhiyun #else
73*4882a593Smuzhiyun #define DACR(x...)
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/*
77*4882a593Smuzhiyun 	 * Save the address limit on entry to a privileged exception.
78*4882a593Smuzhiyun 	 *
79*4882a593Smuzhiyun 	 * If we are using the DACR for kernel access by the user accessors
80*4882a593Smuzhiyun 	 * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
81*4882a593Smuzhiyun 	 * back to client mode, whether or not \disable is set.
82*4882a593Smuzhiyun 	 *
83*4882a593Smuzhiyun 	 * If we are using SW PAN, set the DACR user domain to no access
84*4882a593Smuzhiyun 	 * if \disable is set.
85*4882a593Smuzhiyun 	 */
86*4882a593Smuzhiyun 	.macro	uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
87*4882a593Smuzhiyun 	ldr	\tmp1, [\tsk, #TI_ADDR_LIMIT]
88*4882a593Smuzhiyun 	mov	\tmp2, #TASK_SIZE
89*4882a593Smuzhiyun 	str	\tmp2, [\tsk, #TI_ADDR_LIMIT]
90*4882a593Smuzhiyun  DACR(	mrc	p15, 0, \tmp0, c3, c0, 0)
91*4882a593Smuzhiyun  DACR(	str	\tmp0, [sp, #SVC_DACR])
92*4882a593Smuzhiyun 	str	\tmp1, [sp, #SVC_ADDR_LIMIT]
93*4882a593Smuzhiyun 	.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
94*4882a593Smuzhiyun 	/* kernel=client, user=no access */
95*4882a593Smuzhiyun 	mov	\tmp2, #DACR_UACCESS_DISABLE
96*4882a593Smuzhiyun 	mcr	p15, 0, \tmp2, c3, c0, 0
97*4882a593Smuzhiyun 	instr_sync
98*4882a593Smuzhiyun 	.elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
99*4882a593Smuzhiyun 	/* kernel=client */
100*4882a593Smuzhiyun 	bic	\tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
101*4882a593Smuzhiyun 	orr	\tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
102*4882a593Smuzhiyun 	mcr	p15, 0, \tmp2, c3, c0, 0
103*4882a593Smuzhiyun 	instr_sync
104*4882a593Smuzhiyun 	.endif
105*4882a593Smuzhiyun 	.endm
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* Restore the user access state previously saved by uaccess_entry */
108*4882a593Smuzhiyun 	.macro	uaccess_exit, tsk, tmp0, tmp1
109*4882a593Smuzhiyun 	ldr	\tmp1, [sp, #SVC_ADDR_LIMIT]
110*4882a593Smuzhiyun  DACR(	ldr	\tmp0, [sp, #SVC_DACR])
111*4882a593Smuzhiyun 	str	\tmp1, [\tsk, #TI_ADDR_LIMIT]
112*4882a593Smuzhiyun  DACR(	mcr	p15, 0, \tmp0, c3, c0, 0)
113*4882a593Smuzhiyun 	.endm
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #undef DACR
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #endif /* __ASM_UACCESS_ASM_H__ */
118