xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/uaccess.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_UACCESS_H
3*4882a593Smuzhiyun #define _ASM_X86_UACCESS_H
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun  * User space memory access functions
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/compiler.h>
8*4882a593Smuzhiyun #include <linux/kasan-checks.h>
9*4882a593Smuzhiyun #include <linux/string.h>
10*4882a593Smuzhiyun #include <asm/asm.h>
11*4882a593Smuzhiyun #include <asm/page.h>
12*4882a593Smuzhiyun #include <asm/smap.h>
13*4882a593Smuzhiyun #include <asm/extable.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun  * Test whether a block of memory is a valid user space address.
17*4882a593Smuzhiyun  * Returns 0 if the range is valid, nonzero otherwise.
18*4882a593Smuzhiyun  */
__chk_range_not_ok(unsigned long addr,unsigned long size,unsigned long limit)19*4882a593Smuzhiyun static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	/*
22*4882a593Smuzhiyun 	 * If we have used "sizeof()" for the size,
23*4882a593Smuzhiyun 	 * we know it won't overflow the limit (but
24*4882a593Smuzhiyun 	 * it might overflow the 'addr', so it's
25*4882a593Smuzhiyun 	 * important to subtract the size from the
26*4882a593Smuzhiyun 	 * limit, not add it to the address).
27*4882a593Smuzhiyun 	 */
28*4882a593Smuzhiyun 	if (__builtin_constant_p(size))
29*4882a593Smuzhiyun 		return unlikely(addr > limit - size);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	/* Arbitrary sizes? Be careful about overflow */
32*4882a593Smuzhiyun 	addr += size;
33*4882a593Smuzhiyun 	if (unlikely(addr < size))
34*4882a593Smuzhiyun 		return true;
35*4882a593Smuzhiyun 	return unlikely(addr > limit);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define __range_not_ok(addr, size, limit)				\
39*4882a593Smuzhiyun ({									\
40*4882a593Smuzhiyun 	__chk_user_ptr(addr);						\
41*4882a593Smuzhiyun 	__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
42*4882a593Smuzhiyun })
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
45*4882a593Smuzhiyun static inline bool pagefault_disabled(void);
46*4882a593Smuzhiyun # define WARN_ON_IN_IRQ()	\
47*4882a593Smuzhiyun 	WARN_ON_ONCE(!in_task() && !pagefault_disabled())
48*4882a593Smuzhiyun #else
49*4882a593Smuzhiyun # define WARN_ON_IN_IRQ()
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * access_ok - Checks if a user space pointer is valid
54*4882a593Smuzhiyun  * @addr: User space pointer to start of block to check
55*4882a593Smuzhiyun  * @size: Size of block to check
56*4882a593Smuzhiyun  *
57*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
58*4882a593Smuzhiyun  *          enabled.
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * Checks if a pointer to a block of memory in user space is valid.
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * Note that, depending on architecture, this function probably just
63*4882a593Smuzhiyun  * checks that the pointer is in the user space range - after calling
64*4882a593Smuzhiyun  * this function, memory access functions may still return -EFAULT.
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * Return: true (nonzero) if the memory block may be valid, false (zero)
67*4882a593Smuzhiyun  * if it is definitely invalid.
68*4882a593Smuzhiyun  */
69*4882a593Smuzhiyun #define access_ok(addr, size)					\
70*4882a593Smuzhiyun ({									\
71*4882a593Smuzhiyun 	WARN_ON_IN_IRQ();						\
72*4882a593Smuzhiyun 	likely(!__range_not_ok(addr, size, TASK_SIZE_MAX));		\
73*4882a593Smuzhiyun })
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun extern int __get_user_1(void);
76*4882a593Smuzhiyun extern int __get_user_2(void);
77*4882a593Smuzhiyun extern int __get_user_4(void);
78*4882a593Smuzhiyun extern int __get_user_8(void);
79*4882a593Smuzhiyun extern int __get_user_nocheck_1(void);
80*4882a593Smuzhiyun extern int __get_user_nocheck_2(void);
81*4882a593Smuzhiyun extern int __get_user_nocheck_4(void);
82*4882a593Smuzhiyun extern int __get_user_nocheck_8(void);
83*4882a593Smuzhiyun extern int __get_user_bad(void);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define __uaccess_begin() stac()
86*4882a593Smuzhiyun #define __uaccess_end()   clac()
87*4882a593Smuzhiyun #define __uaccess_begin_nospec()	\
88*4882a593Smuzhiyun ({					\
89*4882a593Smuzhiyun 	stac();				\
90*4882a593Smuzhiyun 	barrier_nospec();		\
91*4882a593Smuzhiyun })
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * This is the smallest unsigned integer type that can fit a value
95*4882a593Smuzhiyun  * (up to 'long long')
96*4882a593Smuzhiyun  */
97*4882a593Smuzhiyun #define __inttype(x) __typeof__(		\
98*4882a593Smuzhiyun 	__typefits(x,char,			\
99*4882a593Smuzhiyun 	  __typefits(x,short,			\
100*4882a593Smuzhiyun 	    __typefits(x,int,			\
101*4882a593Smuzhiyun 	      __typefits(x,long,0ULL)))))
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define __typefits(x,type,not) \
104*4882a593Smuzhiyun 	__builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun  * This is used for both get_user() and __get_user() to expand to
108*4882a593Smuzhiyun  * the proper special function call that has odd calling conventions
109*4882a593Smuzhiyun  * due to returning both a value and an error, and that depends on
110*4882a593Smuzhiyun  * the size of the pointer passed in.
111*4882a593Smuzhiyun  *
112*4882a593Smuzhiyun  * Careful: we have to cast the result to the type of the pointer
113*4882a593Smuzhiyun  * for sign reasons.
114*4882a593Smuzhiyun  *
115*4882a593Smuzhiyun  * The use of _ASM_DX as the register specifier is a bit of a
116*4882a593Smuzhiyun  * simplification, as gcc only cares about it as the starting point
117*4882a593Smuzhiyun  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
118*4882a593Smuzhiyun  * (%ecx being the next register in gcc's x86 register sequence), and
119*4882a593Smuzhiyun  * %rdx on 64 bits.
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * Clang/LLVM cares about the size of the register, but still wants
122*4882a593Smuzhiyun  * the base register for something that ends up being a pair.
123*4882a593Smuzhiyun  */
124*4882a593Smuzhiyun #define do_get_user_call(fn,x,ptr)					\
125*4882a593Smuzhiyun ({									\
126*4882a593Smuzhiyun 	int __ret_gu;							\
127*4882a593Smuzhiyun 	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
128*4882a593Smuzhiyun 	__chk_user_ptr(ptr);						\
129*4882a593Smuzhiyun 	asm volatile("call __" #fn "_%P4"				\
130*4882a593Smuzhiyun 		     : "=a" (__ret_gu), "=r" (__val_gu),		\
131*4882a593Smuzhiyun 			ASM_CALL_CONSTRAINT				\
132*4882a593Smuzhiyun 		     : "0" (ptr), "i" (sizeof(*(ptr))));		\
133*4882a593Smuzhiyun 	(x) = (__force __typeof__(*(ptr))) __val_gu;			\
134*4882a593Smuzhiyun 	__builtin_expect(__ret_gu, 0);					\
135*4882a593Smuzhiyun })
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /**
138*4882a593Smuzhiyun  * get_user - Get a simple variable from user space.
139*4882a593Smuzhiyun  * @x:   Variable to store result.
140*4882a593Smuzhiyun  * @ptr: Source address, in user space.
141*4882a593Smuzhiyun  *
142*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
143*4882a593Smuzhiyun  *          enabled.
144*4882a593Smuzhiyun  *
145*4882a593Smuzhiyun  * This macro copies a single simple variable from user space to kernel
146*4882a593Smuzhiyun  * space.  It supports simple types like char and int, but not larger
147*4882a593Smuzhiyun  * data types like structures or arrays.
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * @ptr must have pointer-to-simple-variable type, and the result of
150*4882a593Smuzhiyun  * dereferencing @ptr must be assignable to @x without a cast.
151*4882a593Smuzhiyun  *
152*4882a593Smuzhiyun  * Return: zero on success, or -EFAULT on error.
153*4882a593Smuzhiyun  * On error, the variable @x is set to zero.
154*4882a593Smuzhiyun  */
155*4882a593Smuzhiyun #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /**
158*4882a593Smuzhiyun  * __get_user - Get a simple variable from user space, with less checking.
159*4882a593Smuzhiyun  * @x:   Variable to store result.
160*4882a593Smuzhiyun  * @ptr: Source address, in user space.
161*4882a593Smuzhiyun  *
162*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
163*4882a593Smuzhiyun  *          enabled.
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * This macro copies a single simple variable from user space to kernel
166*4882a593Smuzhiyun  * space.  It supports simple types like char and int, but not larger
167*4882a593Smuzhiyun  * data types like structures or arrays.
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  * @ptr must have pointer-to-simple-variable type, and the result of
170*4882a593Smuzhiyun  * dereferencing @ptr must be assignable to @x without a cast.
171*4882a593Smuzhiyun  *
172*4882a593Smuzhiyun  * Caller must check the pointer with access_ok() before calling this
173*4882a593Smuzhiyun  * function.
174*4882a593Smuzhiyun  *
175*4882a593Smuzhiyun  * Return: zero on success, or -EFAULT on error.
176*4882a593Smuzhiyun  * On error, the variable @x is set to zero.
177*4882a593Smuzhiyun  */
178*4882a593Smuzhiyun #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun #ifdef CONFIG_X86_32
182*4882a593Smuzhiyun #define __put_user_goto_u64(x, addr, label)			\
183*4882a593Smuzhiyun 	asm_volatile_goto("\n"					\
184*4882a593Smuzhiyun 		     "1:	movl %%eax,0(%1)\n"		\
185*4882a593Smuzhiyun 		     "2:	movl %%edx,4(%1)\n"		\
186*4882a593Smuzhiyun 		     _ASM_EXTABLE_UA(1b, %l2)			\
187*4882a593Smuzhiyun 		     _ASM_EXTABLE_UA(2b, %l2)			\
188*4882a593Smuzhiyun 		     : : "A" (x), "r" (addr)			\
189*4882a593Smuzhiyun 		     : : label)
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun #else
192*4882a593Smuzhiyun #define __put_user_goto_u64(x, ptr, label) \
193*4882a593Smuzhiyun 	__put_user_goto(x, ptr, "q", "er", label)
194*4882a593Smuzhiyun #endif
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun extern void __put_user_bad(void);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun  * Strange magic calling convention: pointer in %ecx,
200*4882a593Smuzhiyun  * value in %eax(:%edx), return value in %ecx. clobbers %rbx
201*4882a593Smuzhiyun  */
202*4882a593Smuzhiyun extern void __put_user_1(void);
203*4882a593Smuzhiyun extern void __put_user_2(void);
204*4882a593Smuzhiyun extern void __put_user_4(void);
205*4882a593Smuzhiyun extern void __put_user_8(void);
206*4882a593Smuzhiyun extern void __put_user_nocheck_1(void);
207*4882a593Smuzhiyun extern void __put_user_nocheck_2(void);
208*4882a593Smuzhiyun extern void __put_user_nocheck_4(void);
209*4882a593Smuzhiyun extern void __put_user_nocheck_8(void);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun  * ptr must be evaluated and assigned to the temporary __ptr_pu before
213*4882a593Smuzhiyun  * the assignment of x to __val_pu, to avoid any function calls
214*4882a593Smuzhiyun  * involved in the ptr expression (possibly implicitly generated due
215*4882a593Smuzhiyun  * to KASAN) from clobbering %ax.
216*4882a593Smuzhiyun  */
217*4882a593Smuzhiyun #define do_put_user_call(fn,x,ptr)					\
218*4882a593Smuzhiyun ({									\
219*4882a593Smuzhiyun 	int __ret_pu;							\
220*4882a593Smuzhiyun 	void __user *__ptr_pu;						\
221*4882a593Smuzhiyun 	register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX);		\
222*4882a593Smuzhiyun 	__chk_user_ptr(ptr);						\
223*4882a593Smuzhiyun 	__ptr_pu = (ptr);						\
224*4882a593Smuzhiyun 	__val_pu = (x);							\
225*4882a593Smuzhiyun 	asm volatile("call __" #fn "_%P[size]"				\
226*4882a593Smuzhiyun 		     : "=c" (__ret_pu),					\
227*4882a593Smuzhiyun 			ASM_CALL_CONSTRAINT				\
228*4882a593Smuzhiyun 		     : "0" (__ptr_pu),					\
229*4882a593Smuzhiyun 		       "r" (__val_pu),					\
230*4882a593Smuzhiyun 		       [size] "i" (sizeof(*(ptr)))			\
231*4882a593Smuzhiyun 		     :"ebx");						\
232*4882a593Smuzhiyun 	__builtin_expect(__ret_pu, 0);					\
233*4882a593Smuzhiyun })
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun /**
236*4882a593Smuzhiyun  * put_user - Write a simple value into user space.
237*4882a593Smuzhiyun  * @x:   Value to copy to user space.
238*4882a593Smuzhiyun  * @ptr: Destination address, in user space.
239*4882a593Smuzhiyun  *
240*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
241*4882a593Smuzhiyun  *          enabled.
242*4882a593Smuzhiyun  *
243*4882a593Smuzhiyun  * This macro copies a single simple value from kernel space to user
244*4882a593Smuzhiyun  * space.  It supports simple types like char and int, but not larger
245*4882a593Smuzhiyun  * data types like structures or arrays.
246*4882a593Smuzhiyun  *
247*4882a593Smuzhiyun  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
248*4882a593Smuzhiyun  * to the result of dereferencing @ptr.
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  * Return: zero on success, or -EFAULT on error.
251*4882a593Smuzhiyun  */
252*4882a593Smuzhiyun #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun  * __put_user - Write a simple value into user space, with less checking.
256*4882a593Smuzhiyun  * @x:   Value to copy to user space.
257*4882a593Smuzhiyun  * @ptr: Destination address, in user space.
258*4882a593Smuzhiyun  *
259*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
260*4882a593Smuzhiyun  *          enabled.
261*4882a593Smuzhiyun  *
262*4882a593Smuzhiyun  * This macro copies a single simple value from kernel space to user
263*4882a593Smuzhiyun  * space.  It supports simple types like char and int, but not larger
264*4882a593Smuzhiyun  * data types like structures or arrays.
265*4882a593Smuzhiyun  *
266*4882a593Smuzhiyun  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
267*4882a593Smuzhiyun  * to the result of dereferencing @ptr.
268*4882a593Smuzhiyun  *
269*4882a593Smuzhiyun  * Caller must check the pointer with access_ok() before calling this
270*4882a593Smuzhiyun  * function.
271*4882a593Smuzhiyun  *
272*4882a593Smuzhiyun  * Return: zero on success, or -EFAULT on error.
273*4882a593Smuzhiyun  */
274*4882a593Smuzhiyun #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #define __put_user_size(x, ptr, size, label)				\
277*4882a593Smuzhiyun do {									\
278*4882a593Smuzhiyun 	__chk_user_ptr(ptr);						\
279*4882a593Smuzhiyun 	switch (size) {							\
280*4882a593Smuzhiyun 	case 1:								\
281*4882a593Smuzhiyun 		__put_user_goto(x, ptr, "b", "iq", label);		\
282*4882a593Smuzhiyun 		break;							\
283*4882a593Smuzhiyun 	case 2:								\
284*4882a593Smuzhiyun 		__put_user_goto(x, ptr, "w", "ir", label);		\
285*4882a593Smuzhiyun 		break;							\
286*4882a593Smuzhiyun 	case 4:								\
287*4882a593Smuzhiyun 		__put_user_goto(x, ptr, "l", "ir", label);		\
288*4882a593Smuzhiyun 		break;							\
289*4882a593Smuzhiyun 	case 8:								\
290*4882a593Smuzhiyun 		__put_user_goto_u64(x, ptr, label);			\
291*4882a593Smuzhiyun 		break;							\
292*4882a593Smuzhiyun 	default:							\
293*4882a593Smuzhiyun 		__put_user_bad();					\
294*4882a593Smuzhiyun 	}								\
295*4882a593Smuzhiyun } while (0)
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun #ifdef CONFIG_X86_32
300*4882a593Smuzhiyun #define __get_user_asm_u64(x, ptr, label) do {				\
301*4882a593Smuzhiyun 	unsigned int __gu_low, __gu_high;				\
302*4882a593Smuzhiyun 	const unsigned int __user *__gu_ptr;				\
303*4882a593Smuzhiyun 	__gu_ptr = (const void __user *)(ptr);				\
304*4882a593Smuzhiyun 	__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);		\
305*4882a593Smuzhiyun 	__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);	\
306*4882a593Smuzhiyun 	(x) = ((unsigned long long)__gu_high << 32) | __gu_low;		\
307*4882a593Smuzhiyun } while (0)
308*4882a593Smuzhiyun #else
309*4882a593Smuzhiyun #define __get_user_asm_u64(x, ptr, label)				\
310*4882a593Smuzhiyun 	__get_user_asm(x, ptr, "q", "=r", label)
311*4882a593Smuzhiyun #endif
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun #define __get_user_size(x, ptr, size, label)				\
314*4882a593Smuzhiyun do {									\
315*4882a593Smuzhiyun 	__chk_user_ptr(ptr);						\
316*4882a593Smuzhiyun 	switch (size) {							\
317*4882a593Smuzhiyun 	case 1:	{							\
318*4882a593Smuzhiyun 		unsigned char x_u8__;					\
319*4882a593Smuzhiyun 		__get_user_asm(x_u8__, ptr, "b", "=q", label);		\
320*4882a593Smuzhiyun 		(x) = x_u8__;						\
321*4882a593Smuzhiyun 		break;							\
322*4882a593Smuzhiyun 	}								\
323*4882a593Smuzhiyun 	case 2:								\
324*4882a593Smuzhiyun 		__get_user_asm(x, ptr, "w", "=r", label);		\
325*4882a593Smuzhiyun 		break;							\
326*4882a593Smuzhiyun 	case 4:								\
327*4882a593Smuzhiyun 		__get_user_asm(x, ptr, "l", "=r", label);		\
328*4882a593Smuzhiyun 		break;							\
329*4882a593Smuzhiyun 	case 8:								\
330*4882a593Smuzhiyun 		__get_user_asm_u64(x, ptr, label);			\
331*4882a593Smuzhiyun 		break;							\
332*4882a593Smuzhiyun 	default:							\
333*4882a593Smuzhiyun 		(x) = __get_user_bad();					\
334*4882a593Smuzhiyun 	}								\
335*4882a593Smuzhiyun } while (0)
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun #define __get_user_asm(x, addr, itype, ltype, label)			\
338*4882a593Smuzhiyun 	asm_volatile_goto("\n"						\
339*4882a593Smuzhiyun 		     "1:	mov"itype" %[umem],%[output]\n"		\
340*4882a593Smuzhiyun 		     _ASM_EXTABLE_UA(1b, %l2)				\
341*4882a593Smuzhiyun 		     : [output] ltype(x)				\
342*4882a593Smuzhiyun 		     : [umem] "m" (__m(addr))				\
343*4882a593Smuzhiyun 		     : : label)
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun #ifdef CONFIG_X86_32
348*4882a593Smuzhiyun #define __get_user_asm_u64(x, ptr, retval)				\
349*4882a593Smuzhiyun ({									\
350*4882a593Smuzhiyun 	__typeof__(ptr) __ptr = (ptr);					\
351*4882a593Smuzhiyun 	asm volatile("\n"						\
352*4882a593Smuzhiyun 		     "1:	movl %[lowbits],%%eax\n"		\
353*4882a593Smuzhiyun 		     "2:	movl %[highbits],%%edx\n"		\
354*4882a593Smuzhiyun 		     "3:\n"						\
355*4882a593Smuzhiyun 		     ".section .fixup,\"ax\"\n"				\
356*4882a593Smuzhiyun 		     "4:	mov %[efault],%[errout]\n"		\
357*4882a593Smuzhiyun 		     "	xorl %%eax,%%eax\n"				\
358*4882a593Smuzhiyun 		     "	xorl %%edx,%%edx\n"				\
359*4882a593Smuzhiyun 		     "	jmp 3b\n"					\
360*4882a593Smuzhiyun 		     ".previous\n"					\
361*4882a593Smuzhiyun 		     _ASM_EXTABLE_UA(1b, 4b)				\
362*4882a593Smuzhiyun 		     _ASM_EXTABLE_UA(2b, 4b)				\
363*4882a593Smuzhiyun 		     : [errout] "=r" (retval),				\
364*4882a593Smuzhiyun 		       [output] "=&A"(x)				\
365*4882a593Smuzhiyun 		     : [lowbits] "m" (__m(__ptr)),			\
366*4882a593Smuzhiyun 		       [highbits] "m" __m(((u32 __user *)(__ptr)) + 1),	\
367*4882a593Smuzhiyun 		       [efault] "i" (-EFAULT), "0" (retval));		\
368*4882a593Smuzhiyun })
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun #else
371*4882a593Smuzhiyun #define __get_user_asm_u64(x, ptr, retval) \
372*4882a593Smuzhiyun 	 __get_user_asm(x, ptr, retval, "q", "=r")
373*4882a593Smuzhiyun #endif
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun #define __get_user_size(x, ptr, size, retval)				\
376*4882a593Smuzhiyun do {									\
377*4882a593Smuzhiyun 	unsigned char x_u8__;						\
378*4882a593Smuzhiyun 									\
379*4882a593Smuzhiyun 	retval = 0;							\
380*4882a593Smuzhiyun 	__chk_user_ptr(ptr);						\
381*4882a593Smuzhiyun 	switch (size) {							\
382*4882a593Smuzhiyun 	case 1:								\
383*4882a593Smuzhiyun 		__get_user_asm(x_u8__, ptr, retval, "b", "=q");		\
384*4882a593Smuzhiyun 		(x) = x_u8__;						\
385*4882a593Smuzhiyun 		break;							\
386*4882a593Smuzhiyun 	case 2:								\
387*4882a593Smuzhiyun 		__get_user_asm(x, ptr, retval, "w", "=r");		\
388*4882a593Smuzhiyun 		break;							\
389*4882a593Smuzhiyun 	case 4:								\
390*4882a593Smuzhiyun 		__get_user_asm(x, ptr, retval, "l", "=r");		\
391*4882a593Smuzhiyun 		break;							\
392*4882a593Smuzhiyun 	case 8:								\
393*4882a593Smuzhiyun 		__get_user_asm_u64(x, ptr, retval);			\
394*4882a593Smuzhiyun 		break;							\
395*4882a593Smuzhiyun 	default:							\
396*4882a593Smuzhiyun 		(x) = __get_user_bad();					\
397*4882a593Smuzhiyun 	}								\
398*4882a593Smuzhiyun } while (0)
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun #define __get_user_asm(x, addr, err, itype, ltype)			\
401*4882a593Smuzhiyun 	asm volatile("\n"						\
402*4882a593Smuzhiyun 		     "1:	mov"itype" %[umem],%[output]\n"		\
403*4882a593Smuzhiyun 		     "2:\n"						\
404*4882a593Smuzhiyun 		     ".section .fixup,\"ax\"\n"				\
405*4882a593Smuzhiyun 		     "3:	mov %[efault],%[errout]\n"		\
406*4882a593Smuzhiyun 		     "	xorl %k[output],%k[output]\n"			\
407*4882a593Smuzhiyun 		     "	jmp 2b\n"					\
408*4882a593Smuzhiyun 		     ".previous\n"					\
409*4882a593Smuzhiyun 		     _ASM_EXTABLE_UA(1b, 3b)				\
410*4882a593Smuzhiyun 		     : [errout] "=r" (err),				\
411*4882a593Smuzhiyun 		       [output] ltype(x)				\
412*4882a593Smuzhiyun 		     : [umem] "m" (__m(addr)),				\
413*4882a593Smuzhiyun 		       [efault] "i" (-EFAULT), "0" (err))
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun #endif // CONFIG_CC_ASM_GOTO_OUTPUT
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /* FIXME: this hack is definitely wrong -AK */
418*4882a593Smuzhiyun struct __large_struct { unsigned long buf[100]; };
419*4882a593Smuzhiyun #define __m(x) (*(struct __large_struct __user *)(x))
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun /*
422*4882a593Smuzhiyun  * Tell gcc we read from memory instead of writing: this is because
423*4882a593Smuzhiyun  * we do not write to any memory gcc knows about, so there are no
424*4882a593Smuzhiyun  * aliasing issues.
425*4882a593Smuzhiyun  */
426*4882a593Smuzhiyun #define __put_user_goto(x, addr, itype, ltype, label)			\
427*4882a593Smuzhiyun 	asm_volatile_goto("\n"						\
428*4882a593Smuzhiyun 		"1:	mov"itype" %0,%1\n"				\
429*4882a593Smuzhiyun 		_ASM_EXTABLE_UA(1b, %l2)				\
430*4882a593Smuzhiyun 		: : ltype(x), "m" (__m(addr))				\
431*4882a593Smuzhiyun 		: : label)
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun extern unsigned long
434*4882a593Smuzhiyun copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
435*4882a593Smuzhiyun extern __must_check long
436*4882a593Smuzhiyun strncpy_from_user(char *dst, const char __user *src, long count);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun extern __must_check long strnlen_user(const char __user *str, long n);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun unsigned long __must_check clear_user(void __user *mem, unsigned long len);
441*4882a593Smuzhiyun unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_COPY_MC
444*4882a593Smuzhiyun unsigned long __must_check
445*4882a593Smuzhiyun copy_mc_to_kernel(void *to, const void *from, unsigned len);
446*4882a593Smuzhiyun #define copy_mc_to_kernel copy_mc_to_kernel
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun unsigned long __must_check
449*4882a593Smuzhiyun copy_mc_to_user(void *to, const void *from, unsigned len);
450*4882a593Smuzhiyun #endif
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun  * movsl can be slow when source and dest are not both 8-byte aligned
454*4882a593Smuzhiyun  */
455*4882a593Smuzhiyun #ifdef CONFIG_X86_INTEL_USERCOPY
456*4882a593Smuzhiyun extern struct movsl_mask {
457*4882a593Smuzhiyun 	int mask;
458*4882a593Smuzhiyun } ____cacheline_aligned_in_smp movsl_mask;
459*4882a593Smuzhiyun #endif
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun #define ARCH_HAS_NOCACHE_UACCESS 1
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun #ifdef CONFIG_X86_32
464*4882a593Smuzhiyun # include <asm/uaccess_32.h>
465*4882a593Smuzhiyun #else
466*4882a593Smuzhiyun # include <asm/uaccess_64.h>
467*4882a593Smuzhiyun #endif
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun  * The "unsafe" user accesses aren't really "unsafe", but the naming
471*4882a593Smuzhiyun  * is a big fat warning: you have to not only do the access_ok()
472*4882a593Smuzhiyun  * checking before using them, but you have to surround them with the
473*4882a593Smuzhiyun  * user_access_begin/end() pair.
474*4882a593Smuzhiyun  */
user_access_begin(const void __user * ptr,size_t len)475*4882a593Smuzhiyun static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	if (unlikely(!access_ok(ptr,len)))
478*4882a593Smuzhiyun 		return 0;
479*4882a593Smuzhiyun 	__uaccess_begin_nospec();
480*4882a593Smuzhiyun 	return 1;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun #define user_access_begin(a,b)	user_access_begin(a,b)
483*4882a593Smuzhiyun #define user_access_end()	__uaccess_end()
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun #define user_access_save()	smap_save()
486*4882a593Smuzhiyun #define user_access_restore(x)	smap_restore(x)
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun #define unsafe_put_user(x, ptr, label)	\
489*4882a593Smuzhiyun 	__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
492*4882a593Smuzhiyun #define unsafe_get_user(x, ptr, err_label)					\
493*4882a593Smuzhiyun do {										\
494*4882a593Smuzhiyun 	__inttype(*(ptr)) __gu_val;						\
495*4882a593Smuzhiyun 	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label);		\
496*4882a593Smuzhiyun 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
497*4882a593Smuzhiyun } while (0)
498*4882a593Smuzhiyun #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
499*4882a593Smuzhiyun #define unsafe_get_user(x, ptr, err_label)					\
500*4882a593Smuzhiyun do {										\
501*4882a593Smuzhiyun 	int __gu_err;								\
502*4882a593Smuzhiyun 	__inttype(*(ptr)) __gu_val;						\
503*4882a593Smuzhiyun 	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err);		\
504*4882a593Smuzhiyun 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
505*4882a593Smuzhiyun 	if (unlikely(__gu_err)) goto err_label;					\
506*4882a593Smuzhiyun } while (0)
507*4882a593Smuzhiyun #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun /*
510*4882a593Smuzhiyun  * We want the unsafe accessors to always be inlined and use
511*4882a593Smuzhiyun  * the error labels - thus the macro games.
512*4882a593Smuzhiyun  */
513*4882a593Smuzhiyun #define unsafe_copy_loop(dst, src, len, type, label)				\
514*4882a593Smuzhiyun 	while (len >= sizeof(type)) {						\
515*4882a593Smuzhiyun 		unsafe_put_user(*(type *)(src),(type __user *)(dst),label);	\
516*4882a593Smuzhiyun 		dst += sizeof(type);						\
517*4882a593Smuzhiyun 		src += sizeof(type);						\
518*4882a593Smuzhiyun 		len -= sizeof(type);						\
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun #define unsafe_copy_to_user(_dst,_src,_len,label)			\
522*4882a593Smuzhiyun do {									\
523*4882a593Smuzhiyun 	char __user *__ucu_dst = (_dst);				\
524*4882a593Smuzhiyun 	const char *__ucu_src = (_src);					\
525*4882a593Smuzhiyun 	size_t __ucu_len = (_len);					\
526*4882a593Smuzhiyun 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);	\
527*4882a593Smuzhiyun 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);	\
528*4882a593Smuzhiyun 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);	\
529*4882a593Smuzhiyun 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);	\
530*4882a593Smuzhiyun } while (0)
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun #define HAVE_GET_KERNEL_NOFAULT
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
535*4882a593Smuzhiyun #define __get_kernel_nofault(dst, src, type, err_label)			\
536*4882a593Smuzhiyun 	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
537*4882a593Smuzhiyun 			sizeof(type), err_label)
538*4882a593Smuzhiyun #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
539*4882a593Smuzhiyun #define __get_kernel_nofault(dst, src, type, err_label)			\
540*4882a593Smuzhiyun do {									\
541*4882a593Smuzhiyun 	int __kr_err;							\
542*4882a593Smuzhiyun 									\
543*4882a593Smuzhiyun 	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
544*4882a593Smuzhiyun 			sizeof(type), __kr_err);			\
545*4882a593Smuzhiyun 	if (unlikely(__kr_err))						\
546*4882a593Smuzhiyun 		goto err_label;						\
547*4882a593Smuzhiyun } while (0)
548*4882a593Smuzhiyun #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun #define __put_kernel_nofault(dst, src, type, err_label)			\
551*4882a593Smuzhiyun 	__put_user_size(*((type *)(src)), (__force type __user *)(dst),	\
552*4882a593Smuzhiyun 			sizeof(type), err_label)
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun #endif /* _ASM_X86_UACCESS_H */
555*4882a593Smuzhiyun 
556