xref: /OK3568_Linux_fs/kernel/arch/mips/include/asm/uaccess.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7*4882a593Smuzhiyun  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8*4882a593Smuzhiyun  * Copyright (C) 2007  Maciej W. Rozycki
9*4882a593Smuzhiyun  * Copyright (C) 2014, Imagination Technologies Ltd.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #ifndef _ASM_UACCESS_H
12*4882a593Smuzhiyun #define _ASM_UACCESS_H
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/string.h>
16*4882a593Smuzhiyun #include <asm/asm-eva.h>
17*4882a593Smuzhiyun #include <asm/extable.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * The fs value determines whether argument validity checking should be
21*4882a593Smuzhiyun  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22*4882a593Smuzhiyun  * get_fs() == KERNEL_DS, checking is bypassed.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * For historical reasons, these macros are grossly misnamed.
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun #ifdef CONFIG_32BIT
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #ifdef CONFIG_KVM_GUEST
29*4882a593Smuzhiyun #define __UA_LIMIT 0x40000000UL
30*4882a593Smuzhiyun #else
31*4882a593Smuzhiyun #define __UA_LIMIT 0x80000000UL
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define __UA_ADDR	".word"
35*4882a593Smuzhiyun #define __UA_LA		"la"
36*4882a593Smuzhiyun #define __UA_ADDU	"addu"
37*4882a593Smuzhiyun #define __UA_t0		"$8"
38*4882a593Smuzhiyun #define __UA_t1		"$9"
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #endif /* CONFIG_32BIT */
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #ifdef CONFIG_64BIT
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun extern u64 __ua_limit;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define __UA_LIMIT	__ua_limit
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define __UA_ADDR	".dword"
49*4882a593Smuzhiyun #define __UA_LA		"dla"
50*4882a593Smuzhiyun #define __UA_ADDU	"daddu"
51*4882a593Smuzhiyun #define __UA_t0		"$12"
52*4882a593Smuzhiyun #define __UA_t1		"$13"
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58*4882a593Smuzhiyun  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59*4882a593Smuzhiyun  * the arithmetic we're doing only works if the limit is a power of two, so
60*4882a593Smuzhiyun  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61*4882a593Smuzhiyun  * address in this range it's the process's problem, not ours :-)
62*4882a593Smuzhiyun  */
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #ifdef CONFIG_KVM_GUEST
65*4882a593Smuzhiyun #define KERNEL_DS	((mm_segment_t) { 0x80000000UL })
66*4882a593Smuzhiyun #define USER_DS		((mm_segment_t) { 0xC0000000UL })
67*4882a593Smuzhiyun #else
68*4882a593Smuzhiyun #define KERNEL_DS	((mm_segment_t) { 0UL })
69*4882a593Smuzhiyun #define USER_DS		((mm_segment_t) { __UA_LIMIT })
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define get_fs()	(current_thread_info()->addr_limit)
73*4882a593Smuzhiyun #define set_fs(x)	(current_thread_info()->addr_limit = (x))
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #define uaccess_kernel()	(get_fs().seg == KERNEL_DS.seg)
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun  * eva_kernel_access() - determine whether kernel memory access on an EVA system
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * Determines whether memory accesses should be performed to kernel memory
81*4882a593Smuzhiyun  * on a system using Extended Virtual Addressing (EVA).
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * Return: true if a kernel memory access on an EVA system, else false.
84*4882a593Smuzhiyun  */
eva_kernel_access(void)85*4882a593Smuzhiyun static inline bool eva_kernel_access(void)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_EVA))
88*4882a593Smuzhiyun 		return false;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	return uaccess_kernel();
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * Is a address valid? This does a straightforward calculation rather
95*4882a593Smuzhiyun  * than tests.
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  * Address valid if:
98*4882a593Smuzhiyun  *  - "addr" doesn't have any high-bits set
99*4882a593Smuzhiyun  *  - AND "size" doesn't have any high-bits set
100*4882a593Smuzhiyun  *  - AND "addr+size" doesn't have any high-bits set
101*4882a593Smuzhiyun  *  - OR we are in kernel mode.
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * __ua_size() is a trick to avoid runtime checking of positive constant
104*4882a593Smuzhiyun  * sizes; for those we already know at compile time that the size is ok.
105*4882a593Smuzhiyun  */
106*4882a593Smuzhiyun #define __ua_size(size)							\
107*4882a593Smuzhiyun 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * access_ok: - Checks if a user space pointer is valid
111*4882a593Smuzhiyun  * @addr: User space pointer to start of block to check
112*4882a593Smuzhiyun  * @size: Size of block to check
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
115*4882a593Smuzhiyun  *          enabled.
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  * Checks if a pointer to a block of memory in user space is valid.
118*4882a593Smuzhiyun  *
119*4882a593Smuzhiyun  * Returns true (nonzero) if the memory block may be valid, false (zero)
120*4882a593Smuzhiyun  * if it is definitely invalid.
121*4882a593Smuzhiyun  *
122*4882a593Smuzhiyun  * Note that, depending on architecture, this function probably just
123*4882a593Smuzhiyun  * checks that the pointer is in the user space range - after calling
124*4882a593Smuzhiyun  * this function, memory access functions may still return -EFAULT.
125*4882a593Smuzhiyun  */
126*4882a593Smuzhiyun 
__access_ok(const void __user * p,unsigned long size)127*4882a593Smuzhiyun static inline int __access_ok(const void __user *p, unsigned long size)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)p;
130*4882a593Smuzhiyun 	return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #define access_ok(addr, size)					\
134*4882a593Smuzhiyun 	likely(__access_ok((addr), (size)))
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun  * put_user: - Write a simple value into user space.
138*4882a593Smuzhiyun  * @x:	 Value to copy to user space.
139*4882a593Smuzhiyun  * @ptr: Destination address, in user space.
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
142*4882a593Smuzhiyun  *          enabled.
143*4882a593Smuzhiyun  *
144*4882a593Smuzhiyun  * This macro copies a single simple value from kernel space to user
145*4882a593Smuzhiyun  * space.  It supports simple types like char and int, but not larger
146*4882a593Smuzhiyun  * data types like structures or arrays.
147*4882a593Smuzhiyun  *
148*4882a593Smuzhiyun  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
149*4882a593Smuzhiyun  * to the result of dereferencing @ptr.
150*4882a593Smuzhiyun  *
151*4882a593Smuzhiyun  * Returns zero on success, or -EFAULT on error.
152*4882a593Smuzhiyun  */
153*4882a593Smuzhiyun #define put_user(x,ptr) \
154*4882a593Smuzhiyun 	__put_user_check((x), (ptr), sizeof(*(ptr)))
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun  * get_user: - Get a simple variable from user space.
158*4882a593Smuzhiyun  * @x:	 Variable to store result.
159*4882a593Smuzhiyun  * @ptr: Source address, in user space.
160*4882a593Smuzhiyun  *
161*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
162*4882a593Smuzhiyun  *          enabled.
163*4882a593Smuzhiyun  *
164*4882a593Smuzhiyun  * This macro copies a single simple variable from user space to kernel
165*4882a593Smuzhiyun  * space.  It supports simple types like char and int, but not larger
166*4882a593Smuzhiyun  * data types like structures or arrays.
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * @ptr must have pointer-to-simple-variable type, and the result of
169*4882a593Smuzhiyun  * dereferencing @ptr must be assignable to @x without a cast.
170*4882a593Smuzhiyun  *
171*4882a593Smuzhiyun  * Returns zero on success, or -EFAULT on error.
172*4882a593Smuzhiyun  * On error, the variable @x is set to zero.
173*4882a593Smuzhiyun  */
174*4882a593Smuzhiyun #define get_user(x,ptr) \
175*4882a593Smuzhiyun 	__get_user_check((x), (ptr), sizeof(*(ptr)))
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun  * __put_user: - Write a simple value into user space, with less checking.
179*4882a593Smuzhiyun  * @x:	 Value to copy to user space.
180*4882a593Smuzhiyun  * @ptr: Destination address, in user space.
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
183*4882a593Smuzhiyun  *          enabled.
184*4882a593Smuzhiyun  *
185*4882a593Smuzhiyun  * This macro copies a single simple value from kernel space to user
186*4882a593Smuzhiyun  * space.  It supports simple types like char and int, but not larger
187*4882a593Smuzhiyun  * data types like structures or arrays.
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
190*4882a593Smuzhiyun  * to the result of dereferencing @ptr.
191*4882a593Smuzhiyun  *
192*4882a593Smuzhiyun  * Caller must check the pointer with access_ok() before calling this
193*4882a593Smuzhiyun  * function.
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * Returns zero on success, or -EFAULT on error.
196*4882a593Smuzhiyun  */
197*4882a593Smuzhiyun #define __put_user(x,ptr) \
198*4882a593Smuzhiyun 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /*
201*4882a593Smuzhiyun  * __get_user: - Get a simple variable from user space, with less checking.
202*4882a593Smuzhiyun  * @x:	 Variable to store result.
203*4882a593Smuzhiyun  * @ptr: Source address, in user space.
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
206*4882a593Smuzhiyun  *          enabled.
207*4882a593Smuzhiyun  *
208*4882a593Smuzhiyun  * This macro copies a single simple variable from user space to kernel
209*4882a593Smuzhiyun  * space.  It supports simple types like char and int, but not larger
210*4882a593Smuzhiyun  * data types like structures or arrays.
211*4882a593Smuzhiyun  *
212*4882a593Smuzhiyun  * @ptr must have pointer-to-simple-variable type, and the result of
213*4882a593Smuzhiyun  * dereferencing @ptr must be assignable to @x without a cast.
214*4882a593Smuzhiyun  *
215*4882a593Smuzhiyun  * Caller must check the pointer with access_ok() before calling this
216*4882a593Smuzhiyun  * function.
217*4882a593Smuzhiyun  *
218*4882a593Smuzhiyun  * Returns zero on success, or -EFAULT on error.
219*4882a593Smuzhiyun  * On error, the variable @x is set to zero.
220*4882a593Smuzhiyun  */
221*4882a593Smuzhiyun #define __get_user(x,ptr) \
222*4882a593Smuzhiyun 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun struct __large_struct { unsigned long buf[100]; };
225*4882a593Smuzhiyun #define __m(x) (*(struct __large_struct __user *)(x))
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun  * Yuck.  We need two variants, one for 64bit operation and one
229*4882a593Smuzhiyun  * for 32 bit mode and old iron.
230*4882a593Smuzhiyun  */
231*4882a593Smuzhiyun #ifndef CONFIG_EVA
232*4882a593Smuzhiyun #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
233*4882a593Smuzhiyun #else
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun  * Kernel specific functions for EVA. We need to use normal load instructions
236*4882a593Smuzhiyun  * to read data from kernel when operating in EVA mode. We use these macros to
237*4882a593Smuzhiyun  * avoid redefining __get_user_asm for EVA.
238*4882a593Smuzhiyun  */
239*4882a593Smuzhiyun #undef _loadd
240*4882a593Smuzhiyun #undef _loadw
241*4882a593Smuzhiyun #undef _loadh
242*4882a593Smuzhiyun #undef _loadb
243*4882a593Smuzhiyun #ifdef CONFIG_32BIT
244*4882a593Smuzhiyun #define _loadd			_loadw
245*4882a593Smuzhiyun #else
246*4882a593Smuzhiyun #define _loadd(reg, addr)	"ld " reg ", " addr
247*4882a593Smuzhiyun #endif
248*4882a593Smuzhiyun #define _loadw(reg, addr)	"lw " reg ", " addr
249*4882a593Smuzhiyun #define _loadh(reg, addr)	"lh " reg ", " addr
250*4882a593Smuzhiyun #define _loadb(reg, addr)	"lb " reg ", " addr
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun #define __get_kernel_common(val, size, ptr)				\
253*4882a593Smuzhiyun do {									\
254*4882a593Smuzhiyun 	switch (size) {							\
255*4882a593Smuzhiyun 	case 1: __get_data_asm(val, _loadb, ptr); break;		\
256*4882a593Smuzhiyun 	case 2: __get_data_asm(val, _loadh, ptr); break;		\
257*4882a593Smuzhiyun 	case 4: __get_data_asm(val, _loadw, ptr); break;		\
258*4882a593Smuzhiyun 	case 8: __GET_DW(val, _loadd, ptr); break;			\
259*4882a593Smuzhiyun 	default: __get_user_unknown(); break;				\
260*4882a593Smuzhiyun 	}								\
261*4882a593Smuzhiyun } while (0)
262*4882a593Smuzhiyun #endif
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun #ifdef CONFIG_32BIT
265*4882a593Smuzhiyun #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
266*4882a593Smuzhiyun #endif
267*4882a593Smuzhiyun #ifdef CONFIG_64BIT
268*4882a593Smuzhiyun #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
269*4882a593Smuzhiyun #endif
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun extern void __get_user_unknown(void);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun #define __get_user_common(val, size, ptr)				\
274*4882a593Smuzhiyun do {									\
275*4882a593Smuzhiyun 	switch (size) {							\
276*4882a593Smuzhiyun 	case 1: __get_data_asm(val, user_lb, ptr); break;		\
277*4882a593Smuzhiyun 	case 2: __get_data_asm(val, user_lh, ptr); break;		\
278*4882a593Smuzhiyun 	case 4: __get_data_asm(val, user_lw, ptr); break;		\
279*4882a593Smuzhiyun 	case 8: __GET_DW(val, user_ld, ptr); break;			\
280*4882a593Smuzhiyun 	default: __get_user_unknown(); break;				\
281*4882a593Smuzhiyun 	}								\
282*4882a593Smuzhiyun } while (0)
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun #define __get_user_nocheck(x, ptr, size)				\
285*4882a593Smuzhiyun ({									\
286*4882a593Smuzhiyun 	int __gu_err;							\
287*4882a593Smuzhiyun 									\
288*4882a593Smuzhiyun 	if (eva_kernel_access()) {					\
289*4882a593Smuzhiyun 		__get_kernel_common((x), size, ptr);			\
290*4882a593Smuzhiyun 	} else {							\
291*4882a593Smuzhiyun 		__chk_user_ptr(ptr);					\
292*4882a593Smuzhiyun 		__get_user_common((x), size, ptr);			\
293*4882a593Smuzhiyun 	}								\
294*4882a593Smuzhiyun 	__gu_err;							\
295*4882a593Smuzhiyun })
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun #define __get_user_check(x, ptr, size)					\
298*4882a593Smuzhiyun ({									\
299*4882a593Smuzhiyun 	int __gu_err = -EFAULT;						\
300*4882a593Smuzhiyun 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
301*4882a593Smuzhiyun 									\
302*4882a593Smuzhiyun 	might_fault();							\
303*4882a593Smuzhiyun 	if (likely(access_ok( __gu_ptr, size))) {		\
304*4882a593Smuzhiyun 		if (eva_kernel_access())				\
305*4882a593Smuzhiyun 			__get_kernel_common((x), size, __gu_ptr);	\
306*4882a593Smuzhiyun 		else							\
307*4882a593Smuzhiyun 			__get_user_common((x), size, __gu_ptr);		\
308*4882a593Smuzhiyun 	} else								\
309*4882a593Smuzhiyun 		(x) = 0;						\
310*4882a593Smuzhiyun 									\
311*4882a593Smuzhiyun 	__gu_err;							\
312*4882a593Smuzhiyun })
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun #define __get_data_asm(val, insn, addr)					\
315*4882a593Smuzhiyun {									\
316*4882a593Smuzhiyun 	long __gu_tmp;							\
317*4882a593Smuzhiyun 									\
318*4882a593Smuzhiyun 	__asm__ __volatile__(						\
319*4882a593Smuzhiyun 	"1:	"insn("%1", "%3")"				\n"	\
320*4882a593Smuzhiyun 	"2:							\n"	\
321*4882a593Smuzhiyun 	"	.insn						\n"	\
322*4882a593Smuzhiyun 	"	.section .fixup,\"ax\"				\n"	\
323*4882a593Smuzhiyun 	"3:	li	%0, %4					\n"	\
324*4882a593Smuzhiyun 	"	move	%1, $0					\n"	\
325*4882a593Smuzhiyun 	"	j	2b					\n"	\
326*4882a593Smuzhiyun 	"	.previous					\n"	\
327*4882a593Smuzhiyun 	"	.section __ex_table,\"a\"			\n"	\
328*4882a593Smuzhiyun 	"	"__UA_ADDR "\t1b, 3b				\n"	\
329*4882a593Smuzhiyun 	"	.previous					\n"	\
330*4882a593Smuzhiyun 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
331*4882a593Smuzhiyun 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
332*4882a593Smuzhiyun 									\
333*4882a593Smuzhiyun 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun  * Get a long long 64 using 32 bit registers.
338*4882a593Smuzhiyun  */
339*4882a593Smuzhiyun #define __get_data_asm_ll32(val, insn, addr)				\
340*4882a593Smuzhiyun {									\
341*4882a593Smuzhiyun 	union {								\
342*4882a593Smuzhiyun 		unsigned long long	l;				\
343*4882a593Smuzhiyun 		__typeof__(*(addr))	t;				\
344*4882a593Smuzhiyun 	} __gu_tmp;							\
345*4882a593Smuzhiyun 									\
346*4882a593Smuzhiyun 	__asm__ __volatile__(						\
347*4882a593Smuzhiyun 	"1:	" insn("%1", "(%3)")"				\n"	\
348*4882a593Smuzhiyun 	"2:	" insn("%D1", "4(%3)")"				\n"	\
349*4882a593Smuzhiyun 	"3:							\n"	\
350*4882a593Smuzhiyun 	"	.insn						\n"	\
351*4882a593Smuzhiyun 	"	.section	.fixup,\"ax\"			\n"	\
352*4882a593Smuzhiyun 	"4:	li	%0, %4					\n"	\
353*4882a593Smuzhiyun 	"	move	%1, $0					\n"	\
354*4882a593Smuzhiyun 	"	move	%D1, $0					\n"	\
355*4882a593Smuzhiyun 	"	j	3b					\n"	\
356*4882a593Smuzhiyun 	"	.previous					\n"	\
357*4882a593Smuzhiyun 	"	.section	__ex_table,\"a\"		\n"	\
358*4882a593Smuzhiyun 	"	" __UA_ADDR "	1b, 4b				\n"	\
359*4882a593Smuzhiyun 	"	" __UA_ADDR "	2b, 4b				\n"	\
360*4882a593Smuzhiyun 	"	.previous					\n"	\
361*4882a593Smuzhiyun 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
362*4882a593Smuzhiyun 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
363*4882a593Smuzhiyun 									\
364*4882a593Smuzhiyun 	(val) = __gu_tmp.t;						\
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun #ifndef CONFIG_EVA
368*4882a593Smuzhiyun #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
369*4882a593Smuzhiyun #else
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun  * Kernel specific functions for EVA. We need to use normal load instructions
372*4882a593Smuzhiyun  * to read data from kernel when operating in EVA mode. We use these macros to
373*4882a593Smuzhiyun  * avoid redefining __get_data_asm for EVA.
374*4882a593Smuzhiyun  */
375*4882a593Smuzhiyun #undef _stored
376*4882a593Smuzhiyun #undef _storew
377*4882a593Smuzhiyun #undef _storeh
378*4882a593Smuzhiyun #undef _storeb
379*4882a593Smuzhiyun #ifdef CONFIG_32BIT
380*4882a593Smuzhiyun #define _stored			_storew
381*4882a593Smuzhiyun #else
382*4882a593Smuzhiyun #define _stored(reg, addr)	"ld " reg ", " addr
383*4882a593Smuzhiyun #endif
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun #define _storew(reg, addr)	"sw " reg ", " addr
386*4882a593Smuzhiyun #define _storeh(reg, addr)	"sh " reg ", " addr
387*4882a593Smuzhiyun #define _storeb(reg, addr)	"sb " reg ", " addr
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun #define __put_kernel_common(ptr, size)					\
390*4882a593Smuzhiyun do {									\
391*4882a593Smuzhiyun 	switch (size) {							\
392*4882a593Smuzhiyun 	case 1: __put_data_asm(_storeb, ptr); break;			\
393*4882a593Smuzhiyun 	case 2: __put_data_asm(_storeh, ptr); break;			\
394*4882a593Smuzhiyun 	case 4: __put_data_asm(_storew, ptr); break;			\
395*4882a593Smuzhiyun 	case 8: __PUT_DW(_stored, ptr); break;				\
396*4882a593Smuzhiyun 	default: __put_user_unknown(); break;				\
397*4882a593Smuzhiyun 	}								\
398*4882a593Smuzhiyun } while(0)
399*4882a593Smuzhiyun #endif
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun  * Yuck.  We need two variants, one for 64bit operation and one
403*4882a593Smuzhiyun  * for 32 bit mode and old iron.
404*4882a593Smuzhiyun  */
405*4882a593Smuzhiyun #ifdef CONFIG_32BIT
406*4882a593Smuzhiyun #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
407*4882a593Smuzhiyun #endif
408*4882a593Smuzhiyun #ifdef CONFIG_64BIT
409*4882a593Smuzhiyun #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
410*4882a593Smuzhiyun #endif
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun #define __put_user_common(ptr, size)					\
413*4882a593Smuzhiyun do {									\
414*4882a593Smuzhiyun 	switch (size) {							\
415*4882a593Smuzhiyun 	case 1: __put_data_asm(user_sb, ptr); break;			\
416*4882a593Smuzhiyun 	case 2: __put_data_asm(user_sh, ptr); break;			\
417*4882a593Smuzhiyun 	case 4: __put_data_asm(user_sw, ptr); break;			\
418*4882a593Smuzhiyun 	case 8: __PUT_DW(user_sd, ptr); break;				\
419*4882a593Smuzhiyun 	default: __put_user_unknown(); break;				\
420*4882a593Smuzhiyun 	}								\
421*4882a593Smuzhiyun } while (0)
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun #define __put_user_nocheck(x, ptr, size)				\
424*4882a593Smuzhiyun ({									\
425*4882a593Smuzhiyun 	__typeof__(*(ptr)) __pu_val;					\
426*4882a593Smuzhiyun 	int __pu_err = 0;						\
427*4882a593Smuzhiyun 									\
428*4882a593Smuzhiyun 	__pu_val = (x);							\
429*4882a593Smuzhiyun 	if (eva_kernel_access()) {					\
430*4882a593Smuzhiyun 		__put_kernel_common(ptr, size);				\
431*4882a593Smuzhiyun 	} else {							\
432*4882a593Smuzhiyun 		__chk_user_ptr(ptr);					\
433*4882a593Smuzhiyun 		__put_user_common(ptr, size);				\
434*4882a593Smuzhiyun 	}								\
435*4882a593Smuzhiyun 	__pu_err;							\
436*4882a593Smuzhiyun })
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun #define __put_user_check(x, ptr, size)					\
439*4882a593Smuzhiyun ({									\
440*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
441*4882a593Smuzhiyun 	__typeof__(*(ptr)) __pu_val = (x);				\
442*4882a593Smuzhiyun 	int __pu_err = -EFAULT;						\
443*4882a593Smuzhiyun 									\
444*4882a593Smuzhiyun 	might_fault();							\
445*4882a593Smuzhiyun 	if (likely(access_ok( __pu_addr, size))) {	\
446*4882a593Smuzhiyun 		if (eva_kernel_access())				\
447*4882a593Smuzhiyun 			__put_kernel_common(__pu_addr, size);		\
448*4882a593Smuzhiyun 		else							\
449*4882a593Smuzhiyun 			__put_user_common(__pu_addr, size);		\
450*4882a593Smuzhiyun 	}								\
451*4882a593Smuzhiyun 									\
452*4882a593Smuzhiyun 	__pu_err;							\
453*4882a593Smuzhiyun })
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun #define __put_data_asm(insn, ptr)					\
456*4882a593Smuzhiyun {									\
457*4882a593Smuzhiyun 	__asm__ __volatile__(						\
458*4882a593Smuzhiyun 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
459*4882a593Smuzhiyun 	"2:							\n"	\
460*4882a593Smuzhiyun 	"	.insn						\n"	\
461*4882a593Smuzhiyun 	"	.section	.fixup,\"ax\"			\n"	\
462*4882a593Smuzhiyun 	"3:	li	%0, %4					\n"	\
463*4882a593Smuzhiyun 	"	j	2b					\n"	\
464*4882a593Smuzhiyun 	"	.previous					\n"	\
465*4882a593Smuzhiyun 	"	.section	__ex_table,\"a\"		\n"	\
466*4882a593Smuzhiyun 	"	" __UA_ADDR "	1b, 3b				\n"	\
467*4882a593Smuzhiyun 	"	.previous					\n"	\
468*4882a593Smuzhiyun 	: "=r" (__pu_err)						\
469*4882a593Smuzhiyun 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
470*4882a593Smuzhiyun 	  "i" (-EFAULT));						\
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun #define __put_data_asm_ll32(insn, ptr)					\
474*4882a593Smuzhiyun {									\
475*4882a593Smuzhiyun 	__asm__ __volatile__(						\
476*4882a593Smuzhiyun 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
477*4882a593Smuzhiyun 	"2:	"insn("%D2", "4(%3)")"				\n"	\
478*4882a593Smuzhiyun 	"3:							\n"	\
479*4882a593Smuzhiyun 	"	.insn						\n"	\
480*4882a593Smuzhiyun 	"	.section	.fixup,\"ax\"			\n"	\
481*4882a593Smuzhiyun 	"4:	li	%0, %4					\n"	\
482*4882a593Smuzhiyun 	"	j	3b					\n"	\
483*4882a593Smuzhiyun 	"	.previous					\n"	\
484*4882a593Smuzhiyun 	"	.section	__ex_table,\"a\"		\n"	\
485*4882a593Smuzhiyun 	"	" __UA_ADDR "	1b, 4b				\n"	\
486*4882a593Smuzhiyun 	"	" __UA_ADDR "	2b, 4b				\n"	\
487*4882a593Smuzhiyun 	"	.previous"						\
488*4882a593Smuzhiyun 	: "=r" (__pu_err)						\
489*4882a593Smuzhiyun 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
490*4882a593Smuzhiyun 	  "i" (-EFAULT));						\
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun extern void __put_user_unknown(void);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun /*
496*4882a593Smuzhiyun  * We're generating jump to subroutines which will be outside the range of
497*4882a593Smuzhiyun  * jump instructions
498*4882a593Smuzhiyun  */
499*4882a593Smuzhiyun #ifdef MODULE
500*4882a593Smuzhiyun #define __MODULE_JAL(destination)					\
501*4882a593Smuzhiyun 	".set\tnoat\n\t"						\
502*4882a593Smuzhiyun 	__UA_LA "\t$1, " #destination "\n\t"				\
503*4882a593Smuzhiyun 	"jalr\t$1\n\t"							\
504*4882a593Smuzhiyun 	".set\tat\n\t"
505*4882a593Smuzhiyun #else
506*4882a593Smuzhiyun #define __MODULE_JAL(destination)					\
507*4882a593Smuzhiyun 	"jal\t" #destination "\n\t"
508*4882a593Smuzhiyun #endif
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
511*4882a593Smuzhiyun 					      defined(CONFIG_CPU_HAS_PREFETCH))
512*4882a593Smuzhiyun #define DADDI_SCRATCH "$3"
513*4882a593Smuzhiyun #else
514*4882a593Smuzhiyun #define DADDI_SCRATCH "$0"
515*4882a593Smuzhiyun #endif
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun extern size_t __copy_user(void *__to, const void *__from, size_t __n);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun #define __invoke_copy_from(func, to, from, n)				\
520*4882a593Smuzhiyun ({									\
521*4882a593Smuzhiyun 	register void *__cu_to_r __asm__("$4");				\
522*4882a593Smuzhiyun 	register const void __user *__cu_from_r __asm__("$5");		\
523*4882a593Smuzhiyun 	register long __cu_len_r __asm__("$6");				\
524*4882a593Smuzhiyun 									\
525*4882a593Smuzhiyun 	__cu_to_r = (to);						\
526*4882a593Smuzhiyun 	__cu_from_r = (from);						\
527*4882a593Smuzhiyun 	__cu_len_r = (n);						\
528*4882a593Smuzhiyun 	__asm__ __volatile__(						\
529*4882a593Smuzhiyun 	".set\tnoreorder\n\t"						\
530*4882a593Smuzhiyun 	__MODULE_JAL(func)						\
531*4882a593Smuzhiyun 	".set\tnoat\n\t"						\
532*4882a593Smuzhiyun 	__UA_ADDU "\t$1, %1, %2\n\t"					\
533*4882a593Smuzhiyun 	".set\tat\n\t"							\
534*4882a593Smuzhiyun 	".set\treorder"							\
535*4882a593Smuzhiyun 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
536*4882a593Smuzhiyun 	:								\
537*4882a593Smuzhiyun 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
538*4882a593Smuzhiyun 	  DADDI_SCRATCH, "memory");					\
539*4882a593Smuzhiyun 	__cu_len_r;							\
540*4882a593Smuzhiyun })
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun #define __invoke_copy_to(func, to, from, n)				\
543*4882a593Smuzhiyun ({									\
544*4882a593Smuzhiyun 	register void __user *__cu_to_r __asm__("$4");			\
545*4882a593Smuzhiyun 	register const void *__cu_from_r __asm__("$5");			\
546*4882a593Smuzhiyun 	register long __cu_len_r __asm__("$6");				\
547*4882a593Smuzhiyun 									\
548*4882a593Smuzhiyun 	__cu_to_r = (to);						\
549*4882a593Smuzhiyun 	__cu_from_r = (from);						\
550*4882a593Smuzhiyun 	__cu_len_r = (n);						\
551*4882a593Smuzhiyun 	__asm__ __volatile__(						\
552*4882a593Smuzhiyun 	__MODULE_JAL(func)						\
553*4882a593Smuzhiyun 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
554*4882a593Smuzhiyun 	:								\
555*4882a593Smuzhiyun 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
556*4882a593Smuzhiyun 	  DADDI_SCRATCH, "memory");					\
557*4882a593Smuzhiyun 	__cu_len_r;							\
558*4882a593Smuzhiyun })
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun #define __invoke_copy_from_kernel(to, from, n)				\
561*4882a593Smuzhiyun 	__invoke_copy_from(__copy_user, to, from, n)
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun #define __invoke_copy_to_kernel(to, from, n)				\
564*4882a593Smuzhiyun 	__invoke_copy_to(__copy_user, to, from, n)
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun #define ___invoke_copy_in_kernel(to, from, n)				\
567*4882a593Smuzhiyun 	__invoke_copy_from(__copy_user, to, from, n)
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun #ifndef CONFIG_EVA
570*4882a593Smuzhiyun #define __invoke_copy_from_user(to, from, n)				\
571*4882a593Smuzhiyun 	__invoke_copy_from(__copy_user, to, from, n)
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun #define __invoke_copy_to_user(to, from, n)				\
574*4882a593Smuzhiyun 	__invoke_copy_to(__copy_user, to, from, n)
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun #define ___invoke_copy_in_user(to, from, n)				\
577*4882a593Smuzhiyun 	__invoke_copy_from(__copy_user, to, from, n)
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun #else
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun /* EVA specific functions */
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun extern size_t __copy_from_user_eva(void *__to, const void *__from,
584*4882a593Smuzhiyun 				   size_t __n);
585*4882a593Smuzhiyun extern size_t __copy_to_user_eva(void *__to, const void *__from,
586*4882a593Smuzhiyun 				 size_t __n);
587*4882a593Smuzhiyun extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun /*
590*4882a593Smuzhiyun  * Source or destination address is in userland. We need to go through
591*4882a593Smuzhiyun  * the TLB
592*4882a593Smuzhiyun  */
593*4882a593Smuzhiyun #define __invoke_copy_from_user(to, from, n)				\
594*4882a593Smuzhiyun 	__invoke_copy_from(__copy_from_user_eva, to, from, n)
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun #define __invoke_copy_to_user(to, from, n)				\
597*4882a593Smuzhiyun 	__invoke_copy_to(__copy_to_user_eva, to, from, n)
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun #define ___invoke_copy_in_user(to, from, n)				\
600*4882a593Smuzhiyun 	__invoke_copy_from(__copy_in_user_eva, to, from, n)
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun #endif /* CONFIG_EVA */
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)605*4882a593Smuzhiyun raw_copy_to_user(void __user *to, const void *from, unsigned long n)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	if (eva_kernel_access())
608*4882a593Smuzhiyun 		return __invoke_copy_to_kernel(to, from, n);
609*4882a593Smuzhiyun 	else
610*4882a593Smuzhiyun 		return __invoke_copy_to_user(to, from, n);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)614*4882a593Smuzhiyun raw_copy_from_user(void *to, const void __user *from, unsigned long n)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	if (eva_kernel_access())
617*4882a593Smuzhiyun 		return __invoke_copy_from_kernel(to, from, n);
618*4882a593Smuzhiyun 	else
619*4882a593Smuzhiyun 		return __invoke_copy_from_user(to, from, n);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun #define INLINE_COPY_FROM_USER
623*4882a593Smuzhiyun #define INLINE_COPY_TO_USER
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun static inline unsigned long
raw_copy_in_user(void __user * to,const void __user * from,unsigned long n)626*4882a593Smuzhiyun raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	if (eva_kernel_access())
629*4882a593Smuzhiyun 		return ___invoke_copy_in_kernel(to, from, n);
630*4882a593Smuzhiyun 	else
631*4882a593Smuzhiyun 		return ___invoke_copy_in_user(to, from,	n);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
635*4882a593Smuzhiyun extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun  * __clear_user: - Zero a block of memory in user space, with less checking.
639*4882a593Smuzhiyun  * @to:	  Destination address, in user space.
640*4882a593Smuzhiyun  * @n:	  Number of bytes to zero.
641*4882a593Smuzhiyun  *
642*4882a593Smuzhiyun  * Zero a block of memory in user space.  Caller must check
643*4882a593Smuzhiyun  * the specified block with access_ok() before calling this function.
644*4882a593Smuzhiyun  *
645*4882a593Smuzhiyun  * Returns number of bytes that could not be cleared.
646*4882a593Smuzhiyun  * On success, this will be zero.
647*4882a593Smuzhiyun  */
648*4882a593Smuzhiyun static inline __kernel_size_t
__clear_user(void __user * addr,__kernel_size_t size)649*4882a593Smuzhiyun __clear_user(void __user *addr, __kernel_size_t size)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	__kernel_size_t res;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun #ifdef CONFIG_CPU_MICROMIPS
654*4882a593Smuzhiyun /* micromips memset / bzero also clobbers t7 & t8 */
655*4882a593Smuzhiyun #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
656*4882a593Smuzhiyun #else
657*4882a593Smuzhiyun #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
658*4882a593Smuzhiyun #endif /* CONFIG_CPU_MICROMIPS */
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	if (eva_kernel_access()) {
661*4882a593Smuzhiyun 		__asm__ __volatile__(
662*4882a593Smuzhiyun 			"move\t$4, %1\n\t"
663*4882a593Smuzhiyun 			"move\t$5, $0\n\t"
664*4882a593Smuzhiyun 			"move\t$6, %2\n\t"
665*4882a593Smuzhiyun 			__MODULE_JAL(__bzero_kernel)
666*4882a593Smuzhiyun 			"move\t%0, $6"
667*4882a593Smuzhiyun 			: "=r" (res)
668*4882a593Smuzhiyun 			: "r" (addr), "r" (size)
669*4882a593Smuzhiyun 			: bzero_clobbers);
670*4882a593Smuzhiyun 	} else {
671*4882a593Smuzhiyun 		might_fault();
672*4882a593Smuzhiyun 		__asm__ __volatile__(
673*4882a593Smuzhiyun 			"move\t$4, %1\n\t"
674*4882a593Smuzhiyun 			"move\t$5, $0\n\t"
675*4882a593Smuzhiyun 			"move\t$6, %2\n\t"
676*4882a593Smuzhiyun 			__MODULE_JAL(__bzero)
677*4882a593Smuzhiyun 			"move\t%0, $6"
678*4882a593Smuzhiyun 			: "=r" (res)
679*4882a593Smuzhiyun 			: "r" (addr), "r" (size)
680*4882a593Smuzhiyun 			: bzero_clobbers);
681*4882a593Smuzhiyun 	}
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	return res;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun #define clear_user(addr,n)						\
687*4882a593Smuzhiyun ({									\
688*4882a593Smuzhiyun 	void __user * __cl_addr = (addr);				\
689*4882a593Smuzhiyun 	unsigned long __cl_size = (n);					\
690*4882a593Smuzhiyun 	if (__cl_size && access_ok(__cl_addr, __cl_size))		\
691*4882a593Smuzhiyun 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
692*4882a593Smuzhiyun 	__cl_size;							\
693*4882a593Smuzhiyun })
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
696*4882a593Smuzhiyun extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun /*
699*4882a593Smuzhiyun  * strncpy_from_user: - Copy a NUL terminated string from userspace.
700*4882a593Smuzhiyun  * @dst:   Destination address, in kernel space.  This buffer must be at
701*4882a593Smuzhiyun  *	   least @count bytes long.
702*4882a593Smuzhiyun  * @src:   Source address, in user space.
703*4882a593Smuzhiyun  * @count: Maximum number of bytes to copy, including the trailing NUL.
704*4882a593Smuzhiyun  *
705*4882a593Smuzhiyun  * Copies a NUL-terminated string from userspace to kernel space.
706*4882a593Smuzhiyun  *
707*4882a593Smuzhiyun  * On success, returns the length of the string (not including the trailing
708*4882a593Smuzhiyun  * NUL).
709*4882a593Smuzhiyun  *
710*4882a593Smuzhiyun  * If access to userspace fails, returns -EFAULT (some data may have been
711*4882a593Smuzhiyun  * copied).
712*4882a593Smuzhiyun  *
713*4882a593Smuzhiyun  * If @count is smaller than the length of the string, copies @count bytes
714*4882a593Smuzhiyun  * and returns @count.
715*4882a593Smuzhiyun  */
716*4882a593Smuzhiyun static inline long
strncpy_from_user(char * __to,const char __user * __from,long __len)717*4882a593Smuzhiyun strncpy_from_user(char *__to, const char __user *__from, long __len)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	long res;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	if (eva_kernel_access()) {
722*4882a593Smuzhiyun 		__asm__ __volatile__(
723*4882a593Smuzhiyun 			"move\t$4, %1\n\t"
724*4882a593Smuzhiyun 			"move\t$5, %2\n\t"
725*4882a593Smuzhiyun 			"move\t$6, %3\n\t"
726*4882a593Smuzhiyun 			__MODULE_JAL(__strncpy_from_kernel_asm)
727*4882a593Smuzhiyun 			"move\t%0, $2"
728*4882a593Smuzhiyun 			: "=r" (res)
729*4882a593Smuzhiyun 			: "r" (__to), "r" (__from), "r" (__len)
730*4882a593Smuzhiyun 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
731*4882a593Smuzhiyun 	} else {
732*4882a593Smuzhiyun 		might_fault();
733*4882a593Smuzhiyun 		__asm__ __volatile__(
734*4882a593Smuzhiyun 			"move\t$4, %1\n\t"
735*4882a593Smuzhiyun 			"move\t$5, %2\n\t"
736*4882a593Smuzhiyun 			"move\t$6, %3\n\t"
737*4882a593Smuzhiyun 			__MODULE_JAL(__strncpy_from_user_asm)
738*4882a593Smuzhiyun 			"move\t%0, $2"
739*4882a593Smuzhiyun 			: "=r" (res)
740*4882a593Smuzhiyun 			: "r" (__to), "r" (__from), "r" (__len)
741*4882a593Smuzhiyun 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	return res;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun extern long __strnlen_kernel_asm(const char __user *s, long n);
748*4882a593Smuzhiyun extern long __strnlen_user_asm(const char __user *s, long n);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun /*
751*4882a593Smuzhiyun  * strnlen_user: - Get the size of a string in user space.
752*4882a593Smuzhiyun  * @str: The string to measure.
753*4882a593Smuzhiyun  *
754*4882a593Smuzhiyun  * Context: User context only. This function may sleep if pagefaults are
755*4882a593Smuzhiyun  *          enabled.
756*4882a593Smuzhiyun  *
757*4882a593Smuzhiyun  * Get the size of a NUL-terminated string in user space.
758*4882a593Smuzhiyun  *
759*4882a593Smuzhiyun  * Returns the size of the string INCLUDING the terminating NUL.
760*4882a593Smuzhiyun  * On exception, returns 0.
761*4882a593Smuzhiyun  * If the string is too long, returns a value greater than @n.
762*4882a593Smuzhiyun  */
strnlen_user(const char __user * s,long n)763*4882a593Smuzhiyun static inline long strnlen_user(const char __user *s, long n)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	long res;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	might_fault();
768*4882a593Smuzhiyun 	if (eva_kernel_access()) {
769*4882a593Smuzhiyun 		__asm__ __volatile__(
770*4882a593Smuzhiyun 			"move\t$4, %1\n\t"
771*4882a593Smuzhiyun 			"move\t$5, %2\n\t"
772*4882a593Smuzhiyun 			__MODULE_JAL(__strnlen_kernel_asm)
773*4882a593Smuzhiyun 			"move\t%0, $2"
774*4882a593Smuzhiyun 			: "=r" (res)
775*4882a593Smuzhiyun 			: "r" (s), "r" (n)
776*4882a593Smuzhiyun 			: "$2", "$4", "$5", __UA_t0, "$31");
777*4882a593Smuzhiyun 	} else {
778*4882a593Smuzhiyun 		__asm__ __volatile__(
779*4882a593Smuzhiyun 			"move\t$4, %1\n\t"
780*4882a593Smuzhiyun 			"move\t$5, %2\n\t"
781*4882a593Smuzhiyun 			__MODULE_JAL(__strnlen_user_asm)
782*4882a593Smuzhiyun 			"move\t%0, $2"
783*4882a593Smuzhiyun 			: "=r" (res)
784*4882a593Smuzhiyun 			: "r" (s), "r" (n)
785*4882a593Smuzhiyun 			: "$2", "$4", "$5", __UA_t0, "$31");
786*4882a593Smuzhiyun 	}
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	return res;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun #endif /* _ASM_UACCESS_H */
792