xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/uaccess.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ARCH_POWERPC_UACCESS_H
3*4882a593Smuzhiyun #define _ARCH_POWERPC_UACCESS_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm/ppc_asm.h>
6*4882a593Smuzhiyun #include <asm/processor.h>
7*4882a593Smuzhiyun #include <asm/page.h>
8*4882a593Smuzhiyun #include <asm/extable.h>
9*4882a593Smuzhiyun #include <asm/kup.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #ifdef __powerpc64__
12*4882a593Smuzhiyun /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
13*4882a593Smuzhiyun #define TASK_SIZE_MAX		TASK_SIZE_USER64
14*4882a593Smuzhiyun #else
15*4882a593Smuzhiyun #define TASK_SIZE_MAX		TASK_SIZE
16*4882a593Smuzhiyun #endif
17*4882a593Smuzhiyun 
__access_ok(unsigned long addr,unsigned long size)18*4882a593Smuzhiyun static inline bool __access_ok(unsigned long addr, unsigned long size)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr;
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define access_ok(addr, size)		\
24*4882a593Smuzhiyun 	(__chk_user_ptr(addr),		\
25*4882a593Smuzhiyun 	 __access_ok((unsigned long)(addr), (size)))
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * These are the main single-value transfer routines.  They automatically
29*4882a593Smuzhiyun  * use the right size if we just have the right pointer type.
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * This gets kind of ugly. We want to return _two_ values in "get_user()"
32*4882a593Smuzhiyun  * and yet we don't want to do any pointers, because that is too much
33*4882a593Smuzhiyun  * of a performance impact. Thus we have a few rather ugly macros here,
34*4882a593Smuzhiyun  * and hide all the ugliness from the user.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * The "__xxx" versions of the user access functions are versions that
37*4882a593Smuzhiyun  * do not verify the address space, that must have been done previously
38*4882a593Smuzhiyun  * with a separate "access_ok()" call (this is used when we do multiple
39*4882a593Smuzhiyun  * accesses to the same area of user memory).
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * As we use the same address space for kernel and user data on the
42*4882a593Smuzhiyun  * PowerPC, we can just do these as direct assignments.  (Of course, the
43*4882a593Smuzhiyun  * exception handling means that it's no longer "just"...)
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun #define get_user(x, ptr) \
47*4882a593Smuzhiyun 	__get_user_check((x), (ptr), sizeof(*(ptr)))
48*4882a593Smuzhiyun #define put_user(x, ptr) \
49*4882a593Smuzhiyun 	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define __get_user(x, ptr) \
52*4882a593Smuzhiyun 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
53*4882a593Smuzhiyun #define __put_user(x, ptr) \
54*4882a593Smuzhiyun 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
55*4882a593Smuzhiyun #define __put_user_goto(x, ptr, label) \
56*4882a593Smuzhiyun 	__put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define __get_user_allowed(x, ptr) \
59*4882a593Smuzhiyun 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define __get_user_inatomic(x, ptr) \
62*4882a593Smuzhiyun 	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
63*4882a593Smuzhiyun #define __put_user_inatomic(x, ptr) \
64*4882a593Smuzhiyun 	__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #ifdef CONFIG_PPC64
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define ___get_user_instr(gu_op, dest, ptr)				\
69*4882a593Smuzhiyun ({									\
70*4882a593Smuzhiyun 	long __gui_ret = 0;						\
71*4882a593Smuzhiyun 	unsigned long __gui_ptr = (unsigned long)ptr;			\
72*4882a593Smuzhiyun 	struct ppc_inst __gui_inst;					\
73*4882a593Smuzhiyun 	unsigned int __prefix, __suffix;				\
74*4882a593Smuzhiyun 	__gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr);	\
75*4882a593Smuzhiyun 	if (__gui_ret == 0) {						\
76*4882a593Smuzhiyun 		if ((__prefix >> 26) == OP_PREFIX) {			\
77*4882a593Smuzhiyun 			__gui_ret = gu_op(__suffix,			\
78*4882a593Smuzhiyun 				(unsigned int __user *)__gui_ptr + 1);	\
79*4882a593Smuzhiyun 			__gui_inst = ppc_inst_prefix(__prefix,		\
80*4882a593Smuzhiyun 						     __suffix);		\
81*4882a593Smuzhiyun 		} else {						\
82*4882a593Smuzhiyun 			__gui_inst = ppc_inst(__prefix);		\
83*4882a593Smuzhiyun 		}							\
84*4882a593Smuzhiyun 		if (__gui_ret == 0)					\
85*4882a593Smuzhiyun 			(dest) = __gui_inst;				\
86*4882a593Smuzhiyun 	}								\
87*4882a593Smuzhiyun 	__gui_ret;							\
88*4882a593Smuzhiyun })
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define get_user_instr(x, ptr) \
91*4882a593Smuzhiyun 	___get_user_instr(get_user, x, ptr)
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #define __get_user_instr(x, ptr) \
94*4882a593Smuzhiyun 	___get_user_instr(__get_user, x, ptr)
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define __get_user_instr_inatomic(x, ptr) \
97*4882a593Smuzhiyun 	___get_user_instr(__get_user_inatomic, x, ptr)
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #else /* !CONFIG_PPC64 */
100*4882a593Smuzhiyun #define get_user_instr(x, ptr) \
101*4882a593Smuzhiyun 	get_user((x).val, (u32 __user *)(ptr))
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define __get_user_instr(x, ptr) \
104*4882a593Smuzhiyun 	__get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define __get_user_instr_inatomic(x, ptr) \
107*4882a593Smuzhiyun 	__get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun extern long __put_user_bad(void);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define __put_user_size_allowed(x, ptr, size, retval)		\
114*4882a593Smuzhiyun do {								\
115*4882a593Smuzhiyun 	__label__ __pu_failed;					\
116*4882a593Smuzhiyun 								\
117*4882a593Smuzhiyun 	retval = 0;						\
118*4882a593Smuzhiyun 	__put_user_size_goto(x, ptr, size, __pu_failed);	\
119*4882a593Smuzhiyun 	break;							\
120*4882a593Smuzhiyun 								\
121*4882a593Smuzhiyun __pu_failed:							\
122*4882a593Smuzhiyun 	retval = -EFAULT;					\
123*4882a593Smuzhiyun } while (0)
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun #define __put_user_size(x, ptr, size, retval)			\
126*4882a593Smuzhiyun do {								\
127*4882a593Smuzhiyun 	allow_write_to_user(ptr, size);				\
128*4882a593Smuzhiyun 	__put_user_size_allowed(x, ptr, size, retval);		\
129*4882a593Smuzhiyun 	prevent_write_to_user(ptr, size);			\
130*4882a593Smuzhiyun } while (0)
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun #define __put_user_nocheck(x, ptr, size)			\
133*4882a593Smuzhiyun ({								\
134*4882a593Smuzhiyun 	long __pu_err;						\
135*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
136*4882a593Smuzhiyun 	__typeof__(*(ptr)) __pu_val = (x);			\
137*4882a593Smuzhiyun 	__typeof__(size) __pu_size = (size);			\
138*4882a593Smuzhiyun 								\
139*4882a593Smuzhiyun 	if (!is_kernel_addr((unsigned long)__pu_addr))		\
140*4882a593Smuzhiyun 		might_fault();					\
141*4882a593Smuzhiyun 	__chk_user_ptr(__pu_addr);				\
142*4882a593Smuzhiyun 	__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err);	\
143*4882a593Smuzhiyun 								\
144*4882a593Smuzhiyun 	__pu_err;						\
145*4882a593Smuzhiyun })
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #define __put_user_check(x, ptr, size)					\
148*4882a593Smuzhiyun ({									\
149*4882a593Smuzhiyun 	long __pu_err = -EFAULT;					\
150*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
151*4882a593Smuzhiyun 	__typeof__(*(ptr)) __pu_val = (x);				\
152*4882a593Smuzhiyun 	__typeof__(size) __pu_size = (size);				\
153*4882a593Smuzhiyun 									\
154*4882a593Smuzhiyun 	might_fault();							\
155*4882a593Smuzhiyun 	if (access_ok(__pu_addr, __pu_size))				\
156*4882a593Smuzhiyun 		__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
157*4882a593Smuzhiyun 									\
158*4882a593Smuzhiyun 	__pu_err;							\
159*4882a593Smuzhiyun })
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #define __put_user_nosleep(x, ptr, size)			\
162*4882a593Smuzhiyun ({								\
163*4882a593Smuzhiyun 	long __pu_err;						\
164*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
165*4882a593Smuzhiyun 	__typeof__(*(ptr)) __pu_val = (x);			\
166*4882a593Smuzhiyun 	__typeof__(size) __pu_size = (size);			\
167*4882a593Smuzhiyun 								\
168*4882a593Smuzhiyun 	__chk_user_ptr(__pu_addr);				\
169*4882a593Smuzhiyun 	__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
170*4882a593Smuzhiyun 								\
171*4882a593Smuzhiyun 	__pu_err;						\
172*4882a593Smuzhiyun })
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun  * We don't tell gcc that we are accessing memory, but this is OK
177*4882a593Smuzhiyun  * because we do not write to any memory gcc knows about, so there
178*4882a593Smuzhiyun  * are no aliasing issues.
179*4882a593Smuzhiyun  */
180*4882a593Smuzhiyun #define __put_user_asm_goto(x, addr, label, op)			\
181*4882a593Smuzhiyun 	asm_volatile_goto(					\
182*4882a593Smuzhiyun 		"1:	" op "%U1%X1 %0,%1	# put_user\n"	\
183*4882a593Smuzhiyun 		EX_TABLE(1b, %l2)				\
184*4882a593Smuzhiyun 		:						\
185*4882a593Smuzhiyun 		: "r" (x), "m"UPD_CONSTR (*addr)		\
186*4882a593Smuzhiyun 		:						\
187*4882a593Smuzhiyun 		: label)
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun #ifdef __powerpc64__
190*4882a593Smuzhiyun #define __put_user_asm2_goto(x, ptr, label)			\
191*4882a593Smuzhiyun 	__put_user_asm_goto(x, ptr, label, "std")
192*4882a593Smuzhiyun #else /* __powerpc64__ */
193*4882a593Smuzhiyun #define __put_user_asm2_goto(x, addr, label)			\
194*4882a593Smuzhiyun 	asm_volatile_goto(					\
195*4882a593Smuzhiyun 		"1:	stw%X1 %0, %1\n"			\
196*4882a593Smuzhiyun 		"2:	stw%X1 %L0, %L1\n"			\
197*4882a593Smuzhiyun 		EX_TABLE(1b, %l2)				\
198*4882a593Smuzhiyun 		EX_TABLE(2b, %l2)				\
199*4882a593Smuzhiyun 		:						\
200*4882a593Smuzhiyun 		: "r" (x), "m" (*addr)				\
201*4882a593Smuzhiyun 		:						\
202*4882a593Smuzhiyun 		: label)
203*4882a593Smuzhiyun #endif /* __powerpc64__ */
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun #define __put_user_size_goto(x, ptr, size, label)		\
206*4882a593Smuzhiyun do {								\
207*4882a593Smuzhiyun 	switch (size) {						\
208*4882a593Smuzhiyun 	case 1: __put_user_asm_goto(x, ptr, label, "stb"); break;	\
209*4882a593Smuzhiyun 	case 2: __put_user_asm_goto(x, ptr, label, "sth"); break;	\
210*4882a593Smuzhiyun 	case 4: __put_user_asm_goto(x, ptr, label, "stw"); break;	\
211*4882a593Smuzhiyun 	case 8: __put_user_asm2_goto(x, ptr, label); break;	\
212*4882a593Smuzhiyun 	default: __put_user_bad();				\
213*4882a593Smuzhiyun 	}							\
214*4882a593Smuzhiyun } while (0)
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun #define __put_user_nocheck_goto(x, ptr, size, label)		\
217*4882a593Smuzhiyun do {								\
218*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
219*4882a593Smuzhiyun 	__chk_user_ptr(ptr);					\
220*4882a593Smuzhiyun 	__put_user_size_goto((x), __pu_addr, (size), label);	\
221*4882a593Smuzhiyun } while (0)
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun extern long __get_user_bad(void);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun  * This does an atomic 128 byte aligned load from userspace.
228*4882a593Smuzhiyun  * Upto caller to do enable_kernel_vmx() before calling!
229*4882a593Smuzhiyun  */
230*4882a593Smuzhiyun #define __get_user_atomic_128_aligned(kaddr, uaddr, err)		\
231*4882a593Smuzhiyun 	__asm__ __volatile__(				\
232*4882a593Smuzhiyun 		".machine push\n"			\
233*4882a593Smuzhiyun 		".machine altivec\n"			\
234*4882a593Smuzhiyun 		"1:	lvx  0,0,%1	# get user\n"	\
235*4882a593Smuzhiyun 		" 	stvx 0,0,%2	# put kernel\n"	\
236*4882a593Smuzhiyun 		".machine pop\n"			\
237*4882a593Smuzhiyun 		"2:\n"					\
238*4882a593Smuzhiyun 		".section .fixup,\"ax\"\n"		\
239*4882a593Smuzhiyun 		"3:	li %0,%3\n"			\
240*4882a593Smuzhiyun 		"	b 2b\n"				\
241*4882a593Smuzhiyun 		".previous\n"				\
242*4882a593Smuzhiyun 		EX_TABLE(1b, 3b)			\
243*4882a593Smuzhiyun 		: "=r" (err)			\
244*4882a593Smuzhiyun 		: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun #define __get_user_asm(x, addr, err, op)		\
247*4882a593Smuzhiyun 	__asm__ __volatile__(				\
248*4882a593Smuzhiyun 		"1:	"op"%U2%X2 %1, %2	# get_user\n"	\
249*4882a593Smuzhiyun 		"2:\n"					\
250*4882a593Smuzhiyun 		".section .fixup,\"ax\"\n"		\
251*4882a593Smuzhiyun 		"3:	li %0,%3\n"			\
252*4882a593Smuzhiyun 		"	li %1,0\n"			\
253*4882a593Smuzhiyun 		"	b 2b\n"				\
254*4882a593Smuzhiyun 		".previous\n"				\
255*4882a593Smuzhiyun 		EX_TABLE(1b, 3b)			\
256*4882a593Smuzhiyun 		: "=r" (err), "=r" (x)			\
257*4882a593Smuzhiyun 		: "m"UPD_CONSTR (*addr), "i" (-EFAULT), "0" (err))
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #ifdef __powerpc64__
260*4882a593Smuzhiyun #define __get_user_asm2(x, addr, err)			\
261*4882a593Smuzhiyun 	__get_user_asm(x, addr, err, "ld")
262*4882a593Smuzhiyun #else /* __powerpc64__ */
263*4882a593Smuzhiyun #define __get_user_asm2(x, addr, err)			\
264*4882a593Smuzhiyun 	__asm__ __volatile__(				\
265*4882a593Smuzhiyun 		"1:	lwz%X2 %1, %2\n"			\
266*4882a593Smuzhiyun 		"2:	lwz%X2 %L1, %L2\n"		\
267*4882a593Smuzhiyun 		"3:\n"					\
268*4882a593Smuzhiyun 		".section .fixup,\"ax\"\n"		\
269*4882a593Smuzhiyun 		"4:	li %0,%3\n"			\
270*4882a593Smuzhiyun 		"	li %1,0\n"			\
271*4882a593Smuzhiyun 		"	li %1+1,0\n"			\
272*4882a593Smuzhiyun 		"	b 3b\n"				\
273*4882a593Smuzhiyun 		".previous\n"				\
274*4882a593Smuzhiyun 		EX_TABLE(1b, 4b)			\
275*4882a593Smuzhiyun 		EX_TABLE(2b, 4b)			\
276*4882a593Smuzhiyun 		: "=r" (err), "=&r" (x)			\
277*4882a593Smuzhiyun 		: "m" (*addr), "i" (-EFAULT), "0" (err))
278*4882a593Smuzhiyun #endif /* __powerpc64__ */
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun #define __get_user_size_allowed(x, ptr, size, retval)		\
281*4882a593Smuzhiyun do {								\
282*4882a593Smuzhiyun 	retval = 0;						\
283*4882a593Smuzhiyun 	__chk_user_ptr(ptr);					\
284*4882a593Smuzhiyun 	if (size > sizeof(x))					\
285*4882a593Smuzhiyun 		(x) = __get_user_bad();				\
286*4882a593Smuzhiyun 	switch (size) {						\
287*4882a593Smuzhiyun 	case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break;	\
288*4882a593Smuzhiyun 	case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break;	\
289*4882a593Smuzhiyun 	case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break;	\
290*4882a593Smuzhiyun 	case 8: __get_user_asm2(x, (u64 __user *)ptr, retval);  break;	\
291*4882a593Smuzhiyun 	default: (x) = __get_user_bad();			\
292*4882a593Smuzhiyun 	}							\
293*4882a593Smuzhiyun } while (0)
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun #define __get_user_size(x, ptr, size, retval)			\
296*4882a593Smuzhiyun do {								\
297*4882a593Smuzhiyun 	allow_read_from_user(ptr, size);			\
298*4882a593Smuzhiyun 	__get_user_size_allowed(x, ptr, size, retval);		\
299*4882a593Smuzhiyun 	prevent_read_from_user(ptr, size);			\
300*4882a593Smuzhiyun } while (0)
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun  * This is a type: either unsigned long, if the argument fits into
304*4882a593Smuzhiyun  * that type, or otherwise unsigned long long.
305*4882a593Smuzhiyun  */
306*4882a593Smuzhiyun #define __long_type(x) \
307*4882a593Smuzhiyun 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun #define __get_user_nocheck(x, ptr, size, do_allow)			\
310*4882a593Smuzhiyun ({								\
311*4882a593Smuzhiyun 	long __gu_err;						\
312*4882a593Smuzhiyun 	__long_type(*(ptr)) __gu_val;				\
313*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
314*4882a593Smuzhiyun 	__typeof__(size) __gu_size = (size);			\
315*4882a593Smuzhiyun 								\
316*4882a593Smuzhiyun 	__chk_user_ptr(__gu_addr);				\
317*4882a593Smuzhiyun 	if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
318*4882a593Smuzhiyun 		might_fault();					\
319*4882a593Smuzhiyun 	barrier_nospec();					\
320*4882a593Smuzhiyun 	if (do_allow)								\
321*4882a593Smuzhiyun 		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err);	\
322*4882a593Smuzhiyun 	else									\
323*4882a593Smuzhiyun 		__get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
324*4882a593Smuzhiyun 	(x) = (__typeof__(*(ptr)))__gu_val;			\
325*4882a593Smuzhiyun 								\
326*4882a593Smuzhiyun 	__gu_err;						\
327*4882a593Smuzhiyun })
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun #define __get_user_check(x, ptr, size)					\
330*4882a593Smuzhiyun ({									\
331*4882a593Smuzhiyun 	long __gu_err = -EFAULT;					\
332*4882a593Smuzhiyun 	__long_type(*(ptr)) __gu_val = 0;				\
333*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
334*4882a593Smuzhiyun 	__typeof__(size) __gu_size = (size);				\
335*4882a593Smuzhiyun 									\
336*4882a593Smuzhiyun 	might_fault();							\
337*4882a593Smuzhiyun 	if (access_ok(__gu_addr, __gu_size)) {				\
338*4882a593Smuzhiyun 		barrier_nospec();					\
339*4882a593Smuzhiyun 		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
340*4882a593Smuzhiyun 	}								\
341*4882a593Smuzhiyun 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
342*4882a593Smuzhiyun 									\
343*4882a593Smuzhiyun 	__gu_err;							\
344*4882a593Smuzhiyun })
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun #define __get_user_nosleep(x, ptr, size)			\
347*4882a593Smuzhiyun ({								\
348*4882a593Smuzhiyun 	long __gu_err;						\
349*4882a593Smuzhiyun 	__long_type(*(ptr)) __gu_val;				\
350*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
351*4882a593Smuzhiyun 	__typeof__(size) __gu_size = (size);			\
352*4882a593Smuzhiyun 								\
353*4882a593Smuzhiyun 	__chk_user_ptr(__gu_addr);				\
354*4882a593Smuzhiyun 	barrier_nospec();					\
355*4882a593Smuzhiyun 	__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
356*4882a593Smuzhiyun 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
357*4882a593Smuzhiyun 								\
358*4882a593Smuzhiyun 	__gu_err;						\
359*4882a593Smuzhiyun })
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun /* more complex routines */
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun extern unsigned long __copy_tofrom_user(void __user *to,
365*4882a593Smuzhiyun 		const void __user *from, unsigned long size);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_COPY_MC
368*4882a593Smuzhiyun unsigned long __must_check
369*4882a593Smuzhiyun copy_mc_generic(void *to, const void *from, unsigned long size);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun static inline unsigned long __must_check
copy_mc_to_kernel(void * to,const void * from,unsigned long size)372*4882a593Smuzhiyun copy_mc_to_kernel(void *to, const void *from, unsigned long size)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	return copy_mc_generic(to, from, size);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun #define copy_mc_to_kernel copy_mc_to_kernel
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun static inline unsigned long __must_check
copy_mc_to_user(void __user * to,const void * from,unsigned long n)379*4882a593Smuzhiyun copy_mc_to_user(void __user *to, const void *from, unsigned long n)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	if (likely(check_copy_size(from, n, true))) {
382*4882a593Smuzhiyun 		if (access_ok(to, n)) {
383*4882a593Smuzhiyun 			allow_write_to_user(to, n);
384*4882a593Smuzhiyun 			n = copy_mc_generic((void *)to, from, n);
385*4882a593Smuzhiyun 			prevent_write_to_user(to, n);
386*4882a593Smuzhiyun 		}
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	return n;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun #endif
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun #ifdef __powerpc64__
394*4882a593Smuzhiyun static inline unsigned long
raw_copy_in_user(void __user * to,const void __user * from,unsigned long n)395*4882a593Smuzhiyun raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	unsigned long ret;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	barrier_nospec();
400*4882a593Smuzhiyun 	allow_read_write_user(to, from, n);
401*4882a593Smuzhiyun 	ret = __copy_tofrom_user(to, from, n);
402*4882a593Smuzhiyun 	prevent_read_write_user(to, from, n);
403*4882a593Smuzhiyun 	return ret;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun #endif /* __powerpc64__ */
406*4882a593Smuzhiyun 
raw_copy_from_user(void * to,const void __user * from,unsigned long n)407*4882a593Smuzhiyun static inline unsigned long raw_copy_from_user(void *to,
408*4882a593Smuzhiyun 		const void __user *from, unsigned long n)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	unsigned long ret;
411*4882a593Smuzhiyun 	if (__builtin_constant_p(n) && (n <= 8)) {
412*4882a593Smuzhiyun 		ret = 1;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 		switch (n) {
415*4882a593Smuzhiyun 		case 1:
416*4882a593Smuzhiyun 			barrier_nospec();
417*4882a593Smuzhiyun 			__get_user_size(*(u8 *)to, from, 1, ret);
418*4882a593Smuzhiyun 			break;
419*4882a593Smuzhiyun 		case 2:
420*4882a593Smuzhiyun 			barrier_nospec();
421*4882a593Smuzhiyun 			__get_user_size(*(u16 *)to, from, 2, ret);
422*4882a593Smuzhiyun 			break;
423*4882a593Smuzhiyun 		case 4:
424*4882a593Smuzhiyun 			barrier_nospec();
425*4882a593Smuzhiyun 			__get_user_size(*(u32 *)to, from, 4, ret);
426*4882a593Smuzhiyun 			break;
427*4882a593Smuzhiyun 		case 8:
428*4882a593Smuzhiyun 			barrier_nospec();
429*4882a593Smuzhiyun 			__get_user_size(*(u64 *)to, from, 8, ret);
430*4882a593Smuzhiyun 			break;
431*4882a593Smuzhiyun 		}
432*4882a593Smuzhiyun 		if (ret == 0)
433*4882a593Smuzhiyun 			return 0;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	barrier_nospec();
437*4882a593Smuzhiyun 	allow_read_from_user(from, n);
438*4882a593Smuzhiyun 	ret = __copy_tofrom_user((__force void __user *)to, from, n);
439*4882a593Smuzhiyun 	prevent_read_from_user(from, n);
440*4882a593Smuzhiyun 	return ret;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun static inline unsigned long
raw_copy_to_user_allowed(void __user * to,const void * from,unsigned long n)444*4882a593Smuzhiyun raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	if (__builtin_constant_p(n) && (n <= 8)) {
447*4882a593Smuzhiyun 		unsigned long ret = 1;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		switch (n) {
450*4882a593Smuzhiyun 		case 1:
451*4882a593Smuzhiyun 			__put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
452*4882a593Smuzhiyun 			break;
453*4882a593Smuzhiyun 		case 2:
454*4882a593Smuzhiyun 			__put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
455*4882a593Smuzhiyun 			break;
456*4882a593Smuzhiyun 		case 4:
457*4882a593Smuzhiyun 			__put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
458*4882a593Smuzhiyun 			break;
459*4882a593Smuzhiyun 		case 8:
460*4882a593Smuzhiyun 			__put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
461*4882a593Smuzhiyun 			break;
462*4882a593Smuzhiyun 		}
463*4882a593Smuzhiyun 		if (ret == 0)
464*4882a593Smuzhiyun 			return 0;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	return __copy_tofrom_user(to, (__force const void __user *)from, n);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)471*4882a593Smuzhiyun raw_copy_to_user(void __user *to, const void *from, unsigned long n)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	unsigned long ret;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	allow_write_to_user(to, n);
476*4882a593Smuzhiyun 	ret = raw_copy_to_user_allowed(to, from, n);
477*4882a593Smuzhiyun 	prevent_write_to_user(to, n);
478*4882a593Smuzhiyun 	return ret;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun unsigned long __arch_clear_user(void __user *addr, unsigned long size);
482*4882a593Smuzhiyun 
clear_user(void __user * addr,unsigned long size)483*4882a593Smuzhiyun static inline unsigned long clear_user(void __user *addr, unsigned long size)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	unsigned long ret = size;
486*4882a593Smuzhiyun 	might_fault();
487*4882a593Smuzhiyun 	if (likely(access_ok(addr, size))) {
488*4882a593Smuzhiyun 		allow_write_to_user(addr, size);
489*4882a593Smuzhiyun 		ret = __arch_clear_user(addr, size);
490*4882a593Smuzhiyun 		prevent_write_to_user(addr, size);
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 	return ret;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
__clear_user(void __user * addr,unsigned long size)495*4882a593Smuzhiyun static inline unsigned long __clear_user(void __user *addr, unsigned long size)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	return clear_user(addr, size);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun extern long strncpy_from_user(char *dst, const char __user *src, long count);
501*4882a593Smuzhiyun extern __must_check long strnlen_user(const char __user *str, long n);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun extern long __copy_from_user_flushcache(void *dst, const void __user *src,
504*4882a593Smuzhiyun 		unsigned size);
505*4882a593Smuzhiyun extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
506*4882a593Smuzhiyun 			   size_t len);
507*4882a593Smuzhiyun 
user_access_begin(const void __user * ptr,size_t len)508*4882a593Smuzhiyun static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	if (unlikely(!access_ok(ptr, len)))
511*4882a593Smuzhiyun 		return false;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	might_fault();
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	allow_read_write_user((void __user *)ptr, ptr, len);
516*4882a593Smuzhiyun 	return true;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun #define user_access_begin	user_access_begin
519*4882a593Smuzhiyun #define user_access_end		prevent_current_access_user
520*4882a593Smuzhiyun #define user_access_save	prevent_user_access_return
521*4882a593Smuzhiyun #define user_access_restore	restore_user_access
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun static __must_check inline bool
user_read_access_begin(const void __user * ptr,size_t len)524*4882a593Smuzhiyun user_read_access_begin(const void __user *ptr, size_t len)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	if (unlikely(!access_ok(ptr, len)))
527*4882a593Smuzhiyun 		return false;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	might_fault();
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	allow_read_from_user(ptr, len);
532*4882a593Smuzhiyun 	return true;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun #define user_read_access_begin	user_read_access_begin
535*4882a593Smuzhiyun #define user_read_access_end		prevent_current_read_from_user
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun static __must_check inline bool
user_write_access_begin(const void __user * ptr,size_t len)538*4882a593Smuzhiyun user_write_access_begin(const void __user *ptr, size_t len)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	if (unlikely(!access_ok(ptr, len)))
541*4882a593Smuzhiyun 		return false;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	might_fault();
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	allow_write_to_user((void __user *)ptr, len);
546*4882a593Smuzhiyun 	return true;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun #define user_write_access_begin	user_write_access_begin
549*4882a593Smuzhiyun #define user_write_access_end		prevent_current_write_to_user
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
552*4882a593Smuzhiyun #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
553*4882a593Smuzhiyun #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun #define unsafe_copy_to_user(d, s, l, e) \
556*4882a593Smuzhiyun do {									\
557*4882a593Smuzhiyun 	u8 __user *_dst = (u8 __user *)(d);				\
558*4882a593Smuzhiyun 	const u8 *_src = (const u8 *)(s);				\
559*4882a593Smuzhiyun 	size_t _len = (l);						\
560*4882a593Smuzhiyun 	int _i;								\
561*4882a593Smuzhiyun 									\
562*4882a593Smuzhiyun 	for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long))		\
563*4882a593Smuzhiyun 		__put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
564*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) {			\
565*4882a593Smuzhiyun 		__put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e);	\
566*4882a593Smuzhiyun 		_i += 4;						\
567*4882a593Smuzhiyun 	}								\
568*4882a593Smuzhiyun 	if (_len & 2) {							\
569*4882a593Smuzhiyun 		__put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e);	\
570*4882a593Smuzhiyun 		_i += 2;						\
571*4882a593Smuzhiyun 	}								\
572*4882a593Smuzhiyun 	if (_len & 1) \
573*4882a593Smuzhiyun 		__put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
574*4882a593Smuzhiyun } while (0)
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun #define HAVE_GET_KERNEL_NOFAULT
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun #define __get_kernel_nofault(dst, src, type, err_label)			\
579*4882a593Smuzhiyun do {									\
580*4882a593Smuzhiyun 	int __kr_err;							\
581*4882a593Smuzhiyun 									\
582*4882a593Smuzhiyun 	__get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\
583*4882a593Smuzhiyun 			sizeof(type), __kr_err);			\
584*4882a593Smuzhiyun 	if (unlikely(__kr_err))						\
585*4882a593Smuzhiyun 		goto err_label;						\
586*4882a593Smuzhiyun } while (0)
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun #define __put_kernel_nofault(dst, src, type, err_label)			\
589*4882a593Smuzhiyun 	__put_user_size_goto(*((type *)(src)),				\
590*4882a593Smuzhiyun 		(__force type __user *)(dst), sizeof(type), err_label)
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun #endif	/* _ARCH_POWERPC_UACCESS_H */
593