xref: /OK3568_Linux_fs/kernel/arch/arm/include/asm/uaccess.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  arch/arm/include/asm/uaccess.h
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef _ASMARM_UACCESS_H
6*4882a593Smuzhiyun #define _ASMARM_UACCESS_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * User space memory access functions
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/string.h>
12*4882a593Smuzhiyun #include <asm/memory.h>
13*4882a593Smuzhiyun #include <asm/domain.h>
14*4882a593Smuzhiyun #include <asm/unified.h>
15*4882a593Smuzhiyun #include <asm/compiler.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/extable.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * These two functions allow hooking accesses to userspace to increase
21*4882a593Smuzhiyun  * system integrity by ensuring that the kernel can not inadvertantly
22*4882a593Smuzhiyun  * perform such accesses (eg, via list poison values) which could then
23*4882a593Smuzhiyun  * be exploited for priviledge escalation.
24*4882a593Smuzhiyun  */
uaccess_save_and_enable(void)25*4882a593Smuzhiyun static __always_inline unsigned int uaccess_save_and_enable(void)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun #ifdef CONFIG_CPU_SW_DOMAIN_PAN
28*4882a593Smuzhiyun 	unsigned int old_domain = get_domain();
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	/* Set the current domain access to permit user accesses */
31*4882a593Smuzhiyun 	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
32*4882a593Smuzhiyun 		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	return old_domain;
35*4882a593Smuzhiyun #else
36*4882a593Smuzhiyun 	return 0;
37*4882a593Smuzhiyun #endif
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
uaccess_restore(unsigned int flags)40*4882a593Smuzhiyun static __always_inline void uaccess_restore(unsigned int flags)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun #ifdef CONFIG_CPU_SW_DOMAIN_PAN
43*4882a593Smuzhiyun 	/* Restore the user access mask */
44*4882a593Smuzhiyun 	set_domain(flags);
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * These two are intentionally not defined anywhere - if the kernel
50*4882a593Smuzhiyun  * code generates any references to them, that's a bug.
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun extern int __get_user_bad(void);
53*4882a593Smuzhiyun extern int __put_user_bad(void);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun  * Note that this is actually 0x1,0000,0000
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun #define KERNEL_DS	0x00000000
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #ifdef CONFIG_MMU
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define USER_DS		TASK_SIZE
63*4882a593Smuzhiyun #define get_fs()	(current_thread_info()->addr_limit)
64*4882a593Smuzhiyun 
set_fs(mm_segment_t fs)65*4882a593Smuzhiyun static inline void set_fs(mm_segment_t fs)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	current_thread_info()->addr_limit = fs;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	/*
70*4882a593Smuzhiyun 	 * Prevent a mispredicted conditional call to set_fs from forwarding
71*4882a593Smuzhiyun 	 * the wrong address limit to access_ok under speculation.
72*4882a593Smuzhiyun 	 */
73*4882a593Smuzhiyun 	dsb(nsh);
74*4882a593Smuzhiyun 	isb();
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #define uaccess_kernel()	(get_fs() == KERNEL_DS)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun  * We use 33-bit arithmetic here.  Success returns zero, failure returns
83*4882a593Smuzhiyun  * addr_limit.  We take advantage that addr_limit will be zero for KERNEL_DS,
84*4882a593Smuzhiyun  * so this will always return success in that case.
85*4882a593Smuzhiyun  */
86*4882a593Smuzhiyun #define __range_ok(addr, size) ({ \
87*4882a593Smuzhiyun 	unsigned long flag, roksum; \
88*4882a593Smuzhiyun 	__chk_user_ptr(addr);	\
89*4882a593Smuzhiyun 	__asm__(".syntax unified\n" \
90*4882a593Smuzhiyun 		"adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
91*4882a593Smuzhiyun 		: "=&r" (flag), "=&r" (roksum) \
92*4882a593Smuzhiyun 		: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
93*4882a593Smuzhiyun 		: "cc"); \
94*4882a593Smuzhiyun 	flag; })
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun  * This is a type: either unsigned long, if the argument fits into
98*4882a593Smuzhiyun  * that type, or otherwise unsigned long long.
99*4882a593Smuzhiyun  */
100*4882a593Smuzhiyun #define __inttype(x) \
101*4882a593Smuzhiyun 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun  * Sanitise a uaccess pointer such that it becomes NULL if addr+size
105*4882a593Smuzhiyun  * is above the current addr_limit.
106*4882a593Smuzhiyun  */
107*4882a593Smuzhiyun #define uaccess_mask_range_ptr(ptr, size)			\
108*4882a593Smuzhiyun 	((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
__uaccess_mask_range_ptr(const void __user * ptr,size_t size)109*4882a593Smuzhiyun static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
110*4882a593Smuzhiyun 						    size_t size)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	void __user *safe_ptr = (void __user *)ptr;
113*4882a593Smuzhiyun 	unsigned long tmp;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	asm volatile(
116*4882a593Smuzhiyun 	"	.syntax unified\n"
117*4882a593Smuzhiyun 	"	sub	%1, %3, #1\n"
118*4882a593Smuzhiyun 	"	subs	%1, %1, %0\n"
119*4882a593Smuzhiyun 	"	addhs	%1, %1, #1\n"
120*4882a593Smuzhiyun 	"	subshs	%1, %1, %2\n"
121*4882a593Smuzhiyun 	"	movlo	%0, #0\n"
122*4882a593Smuzhiyun 	: "+r" (safe_ptr), "=&r" (tmp)
123*4882a593Smuzhiyun 	: "r" (size), "r" (current_thread_info()->addr_limit)
124*4882a593Smuzhiyun 	: "cc");
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	csdb();
127*4882a593Smuzhiyun 	return safe_ptr;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * Single-value transfer routines.  They automatically use the right
132*4882a593Smuzhiyun  * size if we just have the right pointer type.  Note that the functions
133*4882a593Smuzhiyun  * which read from user space (*get_*) need to take care not to leak
134*4882a593Smuzhiyun  * kernel data even if the calling code is buggy and fails to check
135*4882a593Smuzhiyun  * the return value.  This means zeroing out the destination variable
136*4882a593Smuzhiyun  * or buffer on error.  Normally this is done out of line by the
137*4882a593Smuzhiyun  * fixup code, but there are a few places where it intrudes on the
138*4882a593Smuzhiyun  * main code path.  When we only write to user space, there is no
139*4882a593Smuzhiyun  * problem.
140*4882a593Smuzhiyun  */
141*4882a593Smuzhiyun extern int __get_user_1(void *);
142*4882a593Smuzhiyun extern int __get_user_2(void *);
143*4882a593Smuzhiyun extern int __get_user_4(void *);
144*4882a593Smuzhiyun extern int __get_user_32t_8(void *);
145*4882a593Smuzhiyun extern int __get_user_8(void *);
146*4882a593Smuzhiyun extern int __get_user_64t_1(void *);
147*4882a593Smuzhiyun extern int __get_user_64t_2(void *);
148*4882a593Smuzhiyun extern int __get_user_64t_4(void *);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun #define __GUP_CLOBBER_1	"lr", "cc"
151*4882a593Smuzhiyun #ifdef CONFIG_CPU_USE_DOMAINS
152*4882a593Smuzhiyun #define __GUP_CLOBBER_2	"ip", "lr", "cc"
153*4882a593Smuzhiyun #else
154*4882a593Smuzhiyun #define __GUP_CLOBBER_2 "lr", "cc"
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun #define __GUP_CLOBBER_4	"lr", "cc"
157*4882a593Smuzhiyun #define __GUP_CLOBBER_32t_8 "lr", "cc"
158*4882a593Smuzhiyun #define __GUP_CLOBBER_8	"lr", "cc"
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun #define __get_user_x(__r2, __p, __e, __l, __s)				\
161*4882a593Smuzhiyun 	   __asm__ __volatile__ (					\
162*4882a593Smuzhiyun 		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
163*4882a593Smuzhiyun 		__asmeq("%3", "r1")					\
164*4882a593Smuzhiyun 		"bl	__get_user_" #__s				\
165*4882a593Smuzhiyun 		: "=&r" (__e), "=r" (__r2)				\
166*4882a593Smuzhiyun 		: "0" (__p), "r" (__l)					\
167*4882a593Smuzhiyun 		: __GUP_CLOBBER_##__s)
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun /* narrowing a double-word get into a single 32bit word register: */
170*4882a593Smuzhiyun #ifdef __ARMEB__
171*4882a593Smuzhiyun #define __get_user_x_32t(__r2, __p, __e, __l, __s)			\
172*4882a593Smuzhiyun 	__get_user_x(__r2, __p, __e, __l, 32t_8)
173*4882a593Smuzhiyun #else
174*4882a593Smuzhiyun #define __get_user_x_32t __get_user_x
175*4882a593Smuzhiyun #endif
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun  * storing result into proper least significant word of 64bit target var,
179*4882a593Smuzhiyun  * different only for big endian case where 64 bit __r2 lsw is r3:
180*4882a593Smuzhiyun  */
181*4882a593Smuzhiyun #ifdef __ARMEB__
182*4882a593Smuzhiyun #define __get_user_x_64t(__r2, __p, __e, __l, __s)		        \
183*4882a593Smuzhiyun 	   __asm__ __volatile__ (					\
184*4882a593Smuzhiyun 		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
185*4882a593Smuzhiyun 		__asmeq("%3", "r1")					\
186*4882a593Smuzhiyun 		"bl	__get_user_64t_" #__s				\
187*4882a593Smuzhiyun 		: "=&r" (__e), "=r" (__r2)				\
188*4882a593Smuzhiyun 		: "0" (__p), "r" (__l)					\
189*4882a593Smuzhiyun 		: __GUP_CLOBBER_##__s)
190*4882a593Smuzhiyun #else
191*4882a593Smuzhiyun #define __get_user_x_64t __get_user_x
192*4882a593Smuzhiyun #endif
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun #define __get_user_check(x, p)						\
196*4882a593Smuzhiyun 	({								\
197*4882a593Smuzhiyun 		unsigned long __limit = current_thread_info()->addr_limit - 1; \
198*4882a593Smuzhiyun 		register typeof(*(p)) __user *__p asm("r0") = (p);	\
199*4882a593Smuzhiyun 		register __inttype(x) __r2 asm("r2");			\
200*4882a593Smuzhiyun 		register unsigned long __l asm("r1") = __limit;		\
201*4882a593Smuzhiyun 		register int __e asm("r0");				\
202*4882a593Smuzhiyun 		unsigned int __ua_flags = uaccess_save_and_enable();	\
203*4882a593Smuzhiyun 		int __tmp_e;						\
204*4882a593Smuzhiyun 		switch (sizeof(*(__p))) {				\
205*4882a593Smuzhiyun 		case 1:							\
206*4882a593Smuzhiyun 			if (sizeof((x)) >= 8)				\
207*4882a593Smuzhiyun 				__get_user_x_64t(__r2, __p, __e, __l, 1); \
208*4882a593Smuzhiyun 			else						\
209*4882a593Smuzhiyun 				__get_user_x(__r2, __p, __e, __l, 1);	\
210*4882a593Smuzhiyun 			break;						\
211*4882a593Smuzhiyun 		case 2:							\
212*4882a593Smuzhiyun 			if (sizeof((x)) >= 8)				\
213*4882a593Smuzhiyun 				__get_user_x_64t(__r2, __p, __e, __l, 2); \
214*4882a593Smuzhiyun 			else						\
215*4882a593Smuzhiyun 				__get_user_x(__r2, __p, __e, __l, 2);	\
216*4882a593Smuzhiyun 			break;						\
217*4882a593Smuzhiyun 		case 4:							\
218*4882a593Smuzhiyun 			if (sizeof((x)) >= 8)				\
219*4882a593Smuzhiyun 				__get_user_x_64t(__r2, __p, __e, __l, 4); \
220*4882a593Smuzhiyun 			else						\
221*4882a593Smuzhiyun 				__get_user_x(__r2, __p, __e, __l, 4);	\
222*4882a593Smuzhiyun 			break;						\
223*4882a593Smuzhiyun 		case 8:							\
224*4882a593Smuzhiyun 			if (sizeof((x)) < 8)				\
225*4882a593Smuzhiyun 				__get_user_x_32t(__r2, __p, __e, __l, 4); \
226*4882a593Smuzhiyun 			else						\
227*4882a593Smuzhiyun 				__get_user_x(__r2, __p, __e, __l, 8);	\
228*4882a593Smuzhiyun 			break;						\
229*4882a593Smuzhiyun 		default: __e = __get_user_bad(); break;			\
230*4882a593Smuzhiyun 		}							\
231*4882a593Smuzhiyun 		__tmp_e = __e;						\
232*4882a593Smuzhiyun 		uaccess_restore(__ua_flags);				\
233*4882a593Smuzhiyun 		x = (typeof(*(p))) __r2;				\
234*4882a593Smuzhiyun 		__tmp_e;						\
235*4882a593Smuzhiyun 	})
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun #define get_user(x, p)							\
238*4882a593Smuzhiyun 	({								\
239*4882a593Smuzhiyun 		might_fault();						\
240*4882a593Smuzhiyun 		__get_user_check(x, p);					\
241*4882a593Smuzhiyun 	 })
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun extern int __put_user_1(void *, unsigned int);
244*4882a593Smuzhiyun extern int __put_user_2(void *, unsigned int);
245*4882a593Smuzhiyun extern int __put_user_4(void *, unsigned int);
246*4882a593Smuzhiyun extern int __put_user_8(void *, unsigned long long);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun #define __put_user_check(__pu_val, __ptr, __err, __s)			\
249*4882a593Smuzhiyun 	({								\
250*4882a593Smuzhiyun 		unsigned long __limit = current_thread_info()->addr_limit - 1; \
251*4882a593Smuzhiyun 		register typeof(__pu_val) __r2 asm("r2") = __pu_val;	\
252*4882a593Smuzhiyun 		register const void __user *__p asm("r0") = __ptr;	\
253*4882a593Smuzhiyun 		register unsigned long __l asm("r1") = __limit;		\
254*4882a593Smuzhiyun 		register int __e asm("r0");				\
255*4882a593Smuzhiyun 		__asm__ __volatile__ (					\
256*4882a593Smuzhiyun 			__asmeq("%0", "r0") __asmeq("%2", "r2")		\
257*4882a593Smuzhiyun 			__asmeq("%3", "r1")				\
258*4882a593Smuzhiyun 			"bl	__put_user_" #__s			\
259*4882a593Smuzhiyun 			: "=&r" (__e)					\
260*4882a593Smuzhiyun 			: "0" (__p), "r" (__r2), "r" (__l)		\
261*4882a593Smuzhiyun 			: "ip", "lr", "cc");				\
262*4882a593Smuzhiyun 		__err = __e;						\
263*4882a593Smuzhiyun 	})
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun #else /* CONFIG_MMU */
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun  * uClinux has only one addr space, so has simplified address limits.
269*4882a593Smuzhiyun  */
270*4882a593Smuzhiyun #define USER_DS			KERNEL_DS
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun #define uaccess_kernel()	(true)
273*4882a593Smuzhiyun #define __addr_ok(addr)		((void)(addr), 1)
274*4882a593Smuzhiyun #define __range_ok(addr, size)	((void)(addr), 0)
275*4882a593Smuzhiyun #define get_fs()		(KERNEL_DS)
276*4882a593Smuzhiyun 
set_fs(mm_segment_t fs)277*4882a593Smuzhiyun static inline void set_fs(mm_segment_t fs)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun #define get_user(x, p)	__get_user(x, p)
282*4882a593Smuzhiyun #define __put_user_check __put_user_nocheck
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun #endif /* CONFIG_MMU */
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun #define access_ok(addr, size)	(__range_ok(addr, size) == 0)
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun #define user_addr_max() \
289*4882a593Smuzhiyun 	(uaccess_kernel() ? ~0UL : get_fs())
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun #ifdef CONFIG_CPU_SPECTRE
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun  * When mitigating Spectre variant 1, it is not worth fixing the non-
294*4882a593Smuzhiyun  * verifying accessors, because we need to add verification of the
295*4882a593Smuzhiyun  * address space there.  Force these to use the standard get_user()
296*4882a593Smuzhiyun  * version instead.
297*4882a593Smuzhiyun  */
298*4882a593Smuzhiyun #define __get_user(x, ptr) get_user(x, ptr)
299*4882a593Smuzhiyun #else
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun  * The "__xxx" versions of the user access functions do not verify the
303*4882a593Smuzhiyun  * address space - it must have been done previously with a separate
304*4882a593Smuzhiyun  * "access_ok()" call.
305*4882a593Smuzhiyun  *
306*4882a593Smuzhiyun  * The "xxx_error" versions set the third argument to EFAULT if an
307*4882a593Smuzhiyun  * error occurs, and leave it unchanged on success.  Note that these
308*4882a593Smuzhiyun  * versions are void (ie, don't return a value as such).
309*4882a593Smuzhiyun  */
310*4882a593Smuzhiyun #define __get_user(x, ptr)						\
311*4882a593Smuzhiyun ({									\
312*4882a593Smuzhiyun 	long __gu_err = 0;						\
313*4882a593Smuzhiyun 	__get_user_err((x), (ptr), __gu_err);				\
314*4882a593Smuzhiyun 	__gu_err;							\
315*4882a593Smuzhiyun })
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #define __get_user_err(x, ptr, err)					\
318*4882a593Smuzhiyun do {									\
319*4882a593Smuzhiyun 	unsigned long __gu_addr = (unsigned long)(ptr);			\
320*4882a593Smuzhiyun 	unsigned long __gu_val;						\
321*4882a593Smuzhiyun 	unsigned int __ua_flags;					\
322*4882a593Smuzhiyun 	__chk_user_ptr(ptr);						\
323*4882a593Smuzhiyun 	might_fault();							\
324*4882a593Smuzhiyun 	__ua_flags = uaccess_save_and_enable();				\
325*4882a593Smuzhiyun 	switch (sizeof(*(ptr))) {					\
326*4882a593Smuzhiyun 	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
327*4882a593Smuzhiyun 	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
328*4882a593Smuzhiyun 	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
329*4882a593Smuzhiyun 	default: (__gu_val) = __get_user_bad();				\
330*4882a593Smuzhiyun 	}								\
331*4882a593Smuzhiyun 	uaccess_restore(__ua_flags);					\
332*4882a593Smuzhiyun 	(x) = (__typeof__(*(ptr)))__gu_val;				\
333*4882a593Smuzhiyun } while (0)
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun #define __get_user_asm(x, addr, err, instr)			\
336*4882a593Smuzhiyun 	__asm__ __volatile__(					\
337*4882a593Smuzhiyun 	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
338*4882a593Smuzhiyun 	"2:\n"							\
339*4882a593Smuzhiyun 	"	.pushsection .text.fixup,\"ax\"\n"		\
340*4882a593Smuzhiyun 	"	.align	2\n"					\
341*4882a593Smuzhiyun 	"3:	mov	%0, %3\n"				\
342*4882a593Smuzhiyun 	"	mov	%1, #0\n"				\
343*4882a593Smuzhiyun 	"	b	2b\n"					\
344*4882a593Smuzhiyun 	"	.popsection\n"					\
345*4882a593Smuzhiyun 	"	.pushsection __ex_table,\"a\"\n"		\
346*4882a593Smuzhiyun 	"	.align	3\n"					\
347*4882a593Smuzhiyun 	"	.long	1b, 3b\n"				\
348*4882a593Smuzhiyun 	"	.popsection"					\
349*4882a593Smuzhiyun 	: "+r" (err), "=&r" (x)					\
350*4882a593Smuzhiyun 	: "r" (addr), "i" (-EFAULT)				\
351*4882a593Smuzhiyun 	: "cc")
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun #define __get_user_asm_byte(x, addr, err)			\
354*4882a593Smuzhiyun 	__get_user_asm(x, addr, err, ldrb)
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 6
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun #define __get_user_asm_half(x, addr, err)			\
359*4882a593Smuzhiyun 	__get_user_asm(x, addr, err, ldrh)
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun #else
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun #ifndef __ARMEB__
364*4882a593Smuzhiyun #define __get_user_asm_half(x, __gu_addr, err)			\
365*4882a593Smuzhiyun ({								\
366*4882a593Smuzhiyun 	unsigned long __b1, __b2;				\
367*4882a593Smuzhiyun 	__get_user_asm_byte(__b1, __gu_addr, err);		\
368*4882a593Smuzhiyun 	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
369*4882a593Smuzhiyun 	(x) = __b1 | (__b2 << 8);				\
370*4882a593Smuzhiyun })
371*4882a593Smuzhiyun #else
372*4882a593Smuzhiyun #define __get_user_asm_half(x, __gu_addr, err)			\
373*4882a593Smuzhiyun ({								\
374*4882a593Smuzhiyun 	unsigned long __b1, __b2;				\
375*4882a593Smuzhiyun 	__get_user_asm_byte(__b1, __gu_addr, err);		\
376*4882a593Smuzhiyun 	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
377*4882a593Smuzhiyun 	(x) = (__b1 << 8) | __b2;				\
378*4882a593Smuzhiyun })
379*4882a593Smuzhiyun #endif
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun #endif /* __LINUX_ARM_ARCH__ >= 6 */
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun #define __get_user_asm_word(x, addr, err)			\
384*4882a593Smuzhiyun 	__get_user_asm(x, addr, err, ldr)
385*4882a593Smuzhiyun #endif
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun #define __put_user_switch(x, ptr, __err, __fn)				\
389*4882a593Smuzhiyun 	do {								\
390*4882a593Smuzhiyun 		const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);	\
391*4882a593Smuzhiyun 		__typeof__(*(ptr)) __pu_val = (x);			\
392*4882a593Smuzhiyun 		unsigned int __ua_flags;				\
393*4882a593Smuzhiyun 		might_fault();						\
394*4882a593Smuzhiyun 		__ua_flags = uaccess_save_and_enable();			\
395*4882a593Smuzhiyun 		switch (sizeof(*(ptr))) {				\
396*4882a593Smuzhiyun 		case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;	\
397*4882a593Smuzhiyun 		case 2:	__fn(__pu_val, __pu_ptr, __err, 2); break;	\
398*4882a593Smuzhiyun 		case 4:	__fn(__pu_val, __pu_ptr, __err, 4); break;	\
399*4882a593Smuzhiyun 		case 8:	__fn(__pu_val, __pu_ptr, __err, 8); break;	\
400*4882a593Smuzhiyun 		default: __err = __put_user_bad(); break;		\
401*4882a593Smuzhiyun 		}							\
402*4882a593Smuzhiyun 		uaccess_restore(__ua_flags);				\
403*4882a593Smuzhiyun 	} while (0)
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun #define put_user(x, ptr)						\
406*4882a593Smuzhiyun ({									\
407*4882a593Smuzhiyun 	int __pu_err = 0;						\
408*4882a593Smuzhiyun 	__put_user_switch((x), (ptr), __pu_err, __put_user_check);	\
409*4882a593Smuzhiyun 	__pu_err;							\
410*4882a593Smuzhiyun })
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun #ifdef CONFIG_CPU_SPECTRE
413*4882a593Smuzhiyun /*
414*4882a593Smuzhiyun  * When mitigating Spectre variant 1.1, all accessors need to include
415*4882a593Smuzhiyun  * verification of the address space.
416*4882a593Smuzhiyun  */
417*4882a593Smuzhiyun #define __put_user(x, ptr) put_user(x, ptr)
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun #else
420*4882a593Smuzhiyun #define __put_user(x, ptr)						\
421*4882a593Smuzhiyun ({									\
422*4882a593Smuzhiyun 	long __pu_err = 0;						\
423*4882a593Smuzhiyun 	__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);	\
424*4882a593Smuzhiyun 	__pu_err;							\
425*4882a593Smuzhiyun })
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun #define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
428*4882a593Smuzhiyun 	do {								\
429*4882a593Smuzhiyun 		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
430*4882a593Smuzhiyun 		__put_user_nocheck_##__size(x, __pu_addr, __err);	\
431*4882a593Smuzhiyun 	} while (0)
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun #define __put_user_nocheck_1 __put_user_asm_byte
434*4882a593Smuzhiyun #define __put_user_nocheck_2 __put_user_asm_half
435*4882a593Smuzhiyun #define __put_user_nocheck_4 __put_user_asm_word
436*4882a593Smuzhiyun #define __put_user_nocheck_8 __put_user_asm_dword
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun #define __put_user_asm(x, __pu_addr, err, instr)		\
439*4882a593Smuzhiyun 	__asm__ __volatile__(					\
440*4882a593Smuzhiyun 	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
441*4882a593Smuzhiyun 	"2:\n"							\
442*4882a593Smuzhiyun 	"	.pushsection .text.fixup,\"ax\"\n"		\
443*4882a593Smuzhiyun 	"	.align	2\n"					\
444*4882a593Smuzhiyun 	"3:	mov	%0, %3\n"				\
445*4882a593Smuzhiyun 	"	b	2b\n"					\
446*4882a593Smuzhiyun 	"	.popsection\n"					\
447*4882a593Smuzhiyun 	"	.pushsection __ex_table,\"a\"\n"		\
448*4882a593Smuzhiyun 	"	.align	3\n"					\
449*4882a593Smuzhiyun 	"	.long	1b, 3b\n"				\
450*4882a593Smuzhiyun 	"	.popsection"					\
451*4882a593Smuzhiyun 	: "+r" (err)						\
452*4882a593Smuzhiyun 	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
453*4882a593Smuzhiyun 	: "cc")
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun #define __put_user_asm_byte(x, __pu_addr, err)			\
456*4882a593Smuzhiyun 	__put_user_asm(x, __pu_addr, err, strb)
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 6
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun #define __put_user_asm_half(x, __pu_addr, err)			\
461*4882a593Smuzhiyun 	__put_user_asm(x, __pu_addr, err, strh)
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun #else
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun #ifndef __ARMEB__
466*4882a593Smuzhiyun #define __put_user_asm_half(x, __pu_addr, err)			\
467*4882a593Smuzhiyun ({								\
468*4882a593Smuzhiyun 	unsigned long __temp = (__force unsigned long)(x);	\
469*4882a593Smuzhiyun 	__put_user_asm_byte(__temp, __pu_addr, err);		\
470*4882a593Smuzhiyun 	__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);	\
471*4882a593Smuzhiyun })
472*4882a593Smuzhiyun #else
473*4882a593Smuzhiyun #define __put_user_asm_half(x, __pu_addr, err)			\
474*4882a593Smuzhiyun ({								\
475*4882a593Smuzhiyun 	unsigned long __temp = (__force unsigned long)(x);	\
476*4882a593Smuzhiyun 	__put_user_asm_byte(__temp >> 8, __pu_addr, err);	\
477*4882a593Smuzhiyun 	__put_user_asm_byte(__temp, __pu_addr + 1, err);	\
478*4882a593Smuzhiyun })
479*4882a593Smuzhiyun #endif
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun #endif /* __LINUX_ARM_ARCH__ >= 6 */
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun #define __put_user_asm_word(x, __pu_addr, err)			\
484*4882a593Smuzhiyun 	__put_user_asm(x, __pu_addr, err, str)
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun #ifndef __ARMEB__
487*4882a593Smuzhiyun #define	__reg_oper0	"%R2"
488*4882a593Smuzhiyun #define	__reg_oper1	"%Q2"
489*4882a593Smuzhiyun #else
490*4882a593Smuzhiyun #define	__reg_oper0	"%Q2"
491*4882a593Smuzhiyun #define	__reg_oper1	"%R2"
492*4882a593Smuzhiyun #endif
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun #define __put_user_asm_dword(x, __pu_addr, err)			\
495*4882a593Smuzhiyun 	__asm__ __volatile__(					\
496*4882a593Smuzhiyun  ARM(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1], #4\n"	) \
497*4882a593Smuzhiyun  ARM(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1]\n"	) \
498*4882a593Smuzhiyun  THUMB(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1]\n"	) \
499*4882a593Smuzhiyun  THUMB(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1, #4]\n"	) \
500*4882a593Smuzhiyun 	"3:\n"							\
501*4882a593Smuzhiyun 	"	.pushsection .text.fixup,\"ax\"\n"		\
502*4882a593Smuzhiyun 	"	.align	2\n"					\
503*4882a593Smuzhiyun 	"4:	mov	%0, %3\n"				\
504*4882a593Smuzhiyun 	"	b	3b\n"					\
505*4882a593Smuzhiyun 	"	.popsection\n"					\
506*4882a593Smuzhiyun 	"	.pushsection __ex_table,\"a\"\n"		\
507*4882a593Smuzhiyun 	"	.align	3\n"					\
508*4882a593Smuzhiyun 	"	.long	1b, 4b\n"				\
509*4882a593Smuzhiyun 	"	.long	2b, 4b\n"				\
510*4882a593Smuzhiyun 	"	.popsection"					\
511*4882a593Smuzhiyun 	: "+r" (err), "+r" (__pu_addr)				\
512*4882a593Smuzhiyun 	: "r" (x), "i" (-EFAULT)				\
513*4882a593Smuzhiyun 	: "cc")
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun #endif /* !CONFIG_CPU_SPECTRE */
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun #ifdef CONFIG_MMU
518*4882a593Smuzhiyun extern unsigned long __must_check
519*4882a593Smuzhiyun arm_copy_from_user(void *to, const void __user *from, unsigned long n);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun static inline unsigned long __must_check
raw_copy_from_user(void * to,const void __user * from,unsigned long n)522*4882a593Smuzhiyun raw_copy_from_user(void *to, const void __user *from, unsigned long n)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	unsigned int __ua_flags;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	__ua_flags = uaccess_save_and_enable();
527*4882a593Smuzhiyun 	n = arm_copy_from_user(to, from, n);
528*4882a593Smuzhiyun 	uaccess_restore(__ua_flags);
529*4882a593Smuzhiyun 	return n;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun extern unsigned long __must_check
533*4882a593Smuzhiyun arm_copy_to_user(void __user *to, const void *from, unsigned long n);
534*4882a593Smuzhiyun extern unsigned long __must_check
535*4882a593Smuzhiyun __copy_to_user_std(void __user *to, const void *from, unsigned long n);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun static inline unsigned long __must_check
raw_copy_to_user(void __user * to,const void * from,unsigned long n)538*4882a593Smuzhiyun raw_copy_to_user(void __user *to, const void *from, unsigned long n)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun #ifndef CONFIG_UACCESS_WITH_MEMCPY
541*4882a593Smuzhiyun 	unsigned int __ua_flags;
542*4882a593Smuzhiyun 	__ua_flags = uaccess_save_and_enable();
543*4882a593Smuzhiyun 	n = arm_copy_to_user(to, from, n);
544*4882a593Smuzhiyun 	uaccess_restore(__ua_flags);
545*4882a593Smuzhiyun 	return n;
546*4882a593Smuzhiyun #else
547*4882a593Smuzhiyun 	return arm_copy_to_user(to, from, n);
548*4882a593Smuzhiyun #endif
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun extern unsigned long __must_check
552*4882a593Smuzhiyun arm_clear_user(void __user *addr, unsigned long n);
553*4882a593Smuzhiyun extern unsigned long __must_check
554*4882a593Smuzhiyun __clear_user_std(void __user *addr, unsigned long n);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun static inline unsigned long __must_check
__clear_user(void __user * addr,unsigned long n)557*4882a593Smuzhiyun __clear_user(void __user *addr, unsigned long n)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	unsigned int __ua_flags = uaccess_save_and_enable();
560*4882a593Smuzhiyun 	n = arm_clear_user(addr, n);
561*4882a593Smuzhiyun 	uaccess_restore(__ua_flags);
562*4882a593Smuzhiyun 	return n;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun #else
566*4882a593Smuzhiyun static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)567*4882a593Smuzhiyun raw_copy_from_user(void *to, const void __user *from, unsigned long n)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	memcpy(to, (const void __force *)from, n);
570*4882a593Smuzhiyun 	return 0;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)573*4882a593Smuzhiyun raw_copy_to_user(void __user *to, const void *from, unsigned long n)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	memcpy((void __force *)to, from, n);
576*4882a593Smuzhiyun 	return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun #define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
579*4882a593Smuzhiyun #endif
580*4882a593Smuzhiyun #define INLINE_COPY_TO_USER
581*4882a593Smuzhiyun #define INLINE_COPY_FROM_USER
582*4882a593Smuzhiyun 
clear_user(void __user * to,unsigned long n)583*4882a593Smuzhiyun static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	if (access_ok(to, n))
586*4882a593Smuzhiyun 		n = __clear_user(to, n);
587*4882a593Smuzhiyun 	return n;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun /* These are from lib/ code, and use __get_user() and friends */
591*4882a593Smuzhiyun extern long strncpy_from_user(char *dest, const char __user *src, long count);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun extern __must_check long strnlen_user(const char __user *str, long n);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun #endif /* _ASMARM_UACCESS_H */
596