xref: /OK3568_Linux_fs/kernel/arch/nds32/include/asm/uaccess.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun // Copyright (C) 2005-2017 Andes Technology Corporation
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef _ASMANDES_UACCESS_H
5*4882a593Smuzhiyun #define _ASMANDES_UACCESS_H
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun  * User space memory access functions
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <asm/errno.h>
12*4882a593Smuzhiyun #include <asm/memory.h>
13*4882a593Smuzhiyun #include <asm/types.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * The exception table consists of pairs of addresses: the first is the
19*4882a593Smuzhiyun  * address of an instruction that is allowed to fault, and the second is
20*4882a593Smuzhiyun  * the address at which the program should continue.  No registers are
21*4882a593Smuzhiyun  * modified, so it is entirely up to the continuation code to figure out
22*4882a593Smuzhiyun  * what to do.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * All the routines below use bits of fixup code that are out of line
25*4882a593Smuzhiyun  * with the main instruction path.  This means when everything is well,
26*4882a593Smuzhiyun  * we don't even have to jump over them.  Further, they do not intrude
27*4882a593Smuzhiyun  * on our cache or tlb entries.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct exception_table_entry {
31*4882a593Smuzhiyun 	unsigned long insn, fixup;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun extern int fixup_exception(struct pt_regs *regs);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define KERNEL_DS 	((mm_segment_t) { ~0UL })
37*4882a593Smuzhiyun #define USER_DS		((mm_segment_t) {TASK_SIZE - 1})
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define get_fs()	(current_thread_info()->addr_limit)
40*4882a593Smuzhiyun #define user_addr_max	get_fs
41*4882a593Smuzhiyun 
set_fs(mm_segment_t fs)42*4882a593Smuzhiyun static inline void set_fs(mm_segment_t fs)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	current_thread_info()->addr_limit = fs;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define uaccess_kernel()	(get_fs() == KERNEL_DS)
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define access_ok(addr, size)	\
52*4882a593Smuzhiyun 	__range_ok((unsigned long)addr, (unsigned long)size)
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun  * Single-value transfer routines.  They automatically use the right
55*4882a593Smuzhiyun  * size if we just have the right pointer type.  Note that the functions
56*4882a593Smuzhiyun  * which read from user space (*get_*) need to take care not to leak
57*4882a593Smuzhiyun  * kernel data even if the calling code is buggy and fails to check
58*4882a593Smuzhiyun  * the return value.  This means zeroing out the destination variable
59*4882a593Smuzhiyun  * or buffer on error.  Normally this is done out of line by the
60*4882a593Smuzhiyun  * fixup code, but there are a few places where it intrudes on the
61*4882a593Smuzhiyun  * main code path.  When we only write to user space, there is no
62*4882a593Smuzhiyun  * problem.
63*4882a593Smuzhiyun  *
64*4882a593Smuzhiyun  * The "__xxx" versions of the user access functions do not verify the
65*4882a593Smuzhiyun  * address space - it must have been done previously with a separate
66*4882a593Smuzhiyun  * "access_ok()" call.
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  * The "xxx_error" versions set the third argument to EFAULT if an
69*4882a593Smuzhiyun  * error occurs, and leave it unchanged on success.  Note that these
70*4882a593Smuzhiyun  * versions are void (ie, don't return a value as such).
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #define get_user(x, ptr)						\
74*4882a593Smuzhiyun ({									\
75*4882a593Smuzhiyun 	long __gu_err = 0;						\
76*4882a593Smuzhiyun 	__get_user_check((x), (ptr), __gu_err);				\
77*4882a593Smuzhiyun 	__gu_err;							\
78*4882a593Smuzhiyun })
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #define __get_user_error(x, ptr, err)					\
81*4882a593Smuzhiyun ({									\
82*4882a593Smuzhiyun 	__get_user_check((x), (ptr), (err));				\
83*4882a593Smuzhiyun 	(void)0;							\
84*4882a593Smuzhiyun })
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define __get_user(x, ptr)						\
87*4882a593Smuzhiyun ({									\
88*4882a593Smuzhiyun 	long __gu_err = 0;						\
89*4882a593Smuzhiyun 	const __typeof__(*(ptr)) __user *__p = (ptr);			\
90*4882a593Smuzhiyun 	__get_user_err((x), __p, (__gu_err));				\
91*4882a593Smuzhiyun 	__gu_err;							\
92*4882a593Smuzhiyun })
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define __get_user_check(x, ptr, err)					\
95*4882a593Smuzhiyun ({									\
96*4882a593Smuzhiyun 	const __typeof__(*(ptr)) __user *__p = (ptr);			\
97*4882a593Smuzhiyun 	might_fault();							\
98*4882a593Smuzhiyun 	if (access_ok(__p, sizeof(*__p))) {		\
99*4882a593Smuzhiyun 		__get_user_err((x), __p, (err));			\
100*4882a593Smuzhiyun 	} else {							\
101*4882a593Smuzhiyun 		(x) = 0; (err) = -EFAULT;				\
102*4882a593Smuzhiyun 	}								\
103*4882a593Smuzhiyun })
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun #define __get_user_err(x, ptr, err)					\
106*4882a593Smuzhiyun do {									\
107*4882a593Smuzhiyun 	unsigned long __gu_val;						\
108*4882a593Smuzhiyun 	__chk_user_ptr(ptr);						\
109*4882a593Smuzhiyun 	switch (sizeof(*(ptr))) {					\
110*4882a593Smuzhiyun 	case 1:								\
111*4882a593Smuzhiyun 		__get_user_asm("lbi", __gu_val, (ptr), (err));		\
112*4882a593Smuzhiyun 		break;							\
113*4882a593Smuzhiyun 	case 2:								\
114*4882a593Smuzhiyun 		__get_user_asm("lhi", __gu_val, (ptr), (err));		\
115*4882a593Smuzhiyun 		break;							\
116*4882a593Smuzhiyun 	case 4:								\
117*4882a593Smuzhiyun 		__get_user_asm("lwi", __gu_val, (ptr), (err));		\
118*4882a593Smuzhiyun 		break;							\
119*4882a593Smuzhiyun 	case 8:								\
120*4882a593Smuzhiyun 		__get_user_asm_dword(__gu_val, (ptr), (err));		\
121*4882a593Smuzhiyun 		break;							\
122*4882a593Smuzhiyun 	default:							\
123*4882a593Smuzhiyun 		BUILD_BUG(); 						\
124*4882a593Smuzhiyun 		break;							\
125*4882a593Smuzhiyun 	}								\
126*4882a593Smuzhiyun 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
127*4882a593Smuzhiyun } while (0)
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #define __get_user_asm(inst, x, addr, err)				\
130*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
131*4882a593Smuzhiyun 		"1:	"inst"	%1,[%2]\n"				\
132*4882a593Smuzhiyun 		"2:\n"							\
133*4882a593Smuzhiyun 		"	.section .fixup,\"ax\"\n"			\
134*4882a593Smuzhiyun 		"	.align	2\n"					\
135*4882a593Smuzhiyun 		"3:	move %0, %3\n"					\
136*4882a593Smuzhiyun 		"	move %1, #0\n"					\
137*4882a593Smuzhiyun 		"	b	2b\n"					\
138*4882a593Smuzhiyun 		"	.previous\n"					\
139*4882a593Smuzhiyun 		"	.section __ex_table,\"a\"\n"			\
140*4882a593Smuzhiyun 		"	.align	3\n"					\
141*4882a593Smuzhiyun 		"	.long	1b, 3b\n"				\
142*4882a593Smuzhiyun 		"	.previous"					\
143*4882a593Smuzhiyun 		: "+r" (err), "=&r" (x)					\
144*4882a593Smuzhiyun 		: "r" (addr), "i" (-EFAULT)				\
145*4882a593Smuzhiyun 		: "cc")
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #ifdef __NDS32_EB__
148*4882a593Smuzhiyun #define __gu_reg_oper0 "%H1"
149*4882a593Smuzhiyun #define __gu_reg_oper1 "%L1"
150*4882a593Smuzhiyun #else
151*4882a593Smuzhiyun #define __gu_reg_oper0 "%L1"
152*4882a593Smuzhiyun #define __gu_reg_oper1 "%H1"
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #define __get_user_asm_dword(x, addr, err) 				\
156*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
157*4882a593Smuzhiyun 		"\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"			\
158*4882a593Smuzhiyun 		"\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"			\
159*4882a593Smuzhiyun 		"3:\n"							\
160*4882a593Smuzhiyun 		"	.section .fixup,\"ax\"\n"			\
161*4882a593Smuzhiyun 		"	.align	2\n"					\
162*4882a593Smuzhiyun 		"4:	move	%0, %3\n"				\
163*4882a593Smuzhiyun 		"	b	3b\n"					\
164*4882a593Smuzhiyun 		"	.previous\n"					\
165*4882a593Smuzhiyun 		"	.section __ex_table,\"a\"\n"			\
166*4882a593Smuzhiyun 		"	.align	3\n"					\
167*4882a593Smuzhiyun 		"	.long	1b, 4b\n"				\
168*4882a593Smuzhiyun 		"	.long	2b, 4b\n"				\
169*4882a593Smuzhiyun 		"	.previous"					\
170*4882a593Smuzhiyun 		: "+r"(err), "=&r"(x)					\
171*4882a593Smuzhiyun 		: "r"(addr), "i"(-EFAULT)				\
172*4882a593Smuzhiyun 		: "cc")
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #define put_user(x, ptr)						\
175*4882a593Smuzhiyun ({									\
176*4882a593Smuzhiyun 	long __pu_err = 0;						\
177*4882a593Smuzhiyun 	__put_user_check((x), (ptr), __pu_err);				\
178*4882a593Smuzhiyun 	__pu_err;							\
179*4882a593Smuzhiyun })
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun #define __put_user(x, ptr)						\
182*4882a593Smuzhiyun ({									\
183*4882a593Smuzhiyun 	long __pu_err = 0;						\
184*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__p = (ptr);				\
185*4882a593Smuzhiyun 	__put_user_err((x), __p, __pu_err);				\
186*4882a593Smuzhiyun 	__pu_err;							\
187*4882a593Smuzhiyun })
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun #define __put_user_error(x, ptr, err)					\
190*4882a593Smuzhiyun ({									\
191*4882a593Smuzhiyun 	__put_user_err((x), (ptr), (err));				\
192*4882a593Smuzhiyun 	(void)0;							\
193*4882a593Smuzhiyun })
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun #define __put_user_check(x, ptr, err)					\
196*4882a593Smuzhiyun ({									\
197*4882a593Smuzhiyun 	__typeof__(*(ptr)) __user *__p = (ptr);				\
198*4882a593Smuzhiyun 	might_fault();							\
199*4882a593Smuzhiyun 	if (access_ok(__p, sizeof(*__p))) {		\
200*4882a593Smuzhiyun 		__put_user_err((x), __p, (err));			\
201*4882a593Smuzhiyun 	} else	{							\
202*4882a593Smuzhiyun 		(err) = -EFAULT;					\
203*4882a593Smuzhiyun 	}								\
204*4882a593Smuzhiyun })
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun #define __put_user_err(x, ptr, err)					\
207*4882a593Smuzhiyun do {									\
208*4882a593Smuzhiyun 	__typeof__(*(ptr)) __pu_val = (x);				\
209*4882a593Smuzhiyun 	__chk_user_ptr(ptr);						\
210*4882a593Smuzhiyun 	switch (sizeof(*(ptr))) {					\
211*4882a593Smuzhiyun 	case 1:								\
212*4882a593Smuzhiyun 		__put_user_asm("sbi", __pu_val, (ptr), (err));		\
213*4882a593Smuzhiyun 		break;							\
214*4882a593Smuzhiyun 	case 2: 							\
215*4882a593Smuzhiyun 		__put_user_asm("shi", __pu_val, (ptr), (err));		\
216*4882a593Smuzhiyun 		break;							\
217*4882a593Smuzhiyun 	case 4: 							\
218*4882a593Smuzhiyun 		__put_user_asm("swi", __pu_val, (ptr), (err));		\
219*4882a593Smuzhiyun 		break;							\
220*4882a593Smuzhiyun 	case 8:								\
221*4882a593Smuzhiyun 		__put_user_asm_dword(__pu_val, (ptr), (err));		\
222*4882a593Smuzhiyun 		break;							\
223*4882a593Smuzhiyun 	default:							\
224*4882a593Smuzhiyun 		BUILD_BUG(); 						\
225*4882a593Smuzhiyun 		break;							\
226*4882a593Smuzhiyun 	}								\
227*4882a593Smuzhiyun } while (0)
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun #define __put_user_asm(inst, x, addr, err)				\
230*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
231*4882a593Smuzhiyun 		"1:	"inst"	%1,[%2]\n"				\
232*4882a593Smuzhiyun 		"2:\n"							\
233*4882a593Smuzhiyun 		"	.section .fixup,\"ax\"\n"			\
234*4882a593Smuzhiyun 		"	.align	2\n"					\
235*4882a593Smuzhiyun 		"3:	move	%0, %3\n"				\
236*4882a593Smuzhiyun 		"	b	2b\n"					\
237*4882a593Smuzhiyun 		"	.previous\n"					\
238*4882a593Smuzhiyun 		"	.section __ex_table,\"a\"\n"			\
239*4882a593Smuzhiyun 		"	.align	3\n"					\
240*4882a593Smuzhiyun 		"	.long	1b, 3b\n"				\
241*4882a593Smuzhiyun 		"	.previous"					\
242*4882a593Smuzhiyun 		: "+r" (err)						\
243*4882a593Smuzhiyun 		: "r" (x), "r" (addr), "i" (-EFAULT)			\
244*4882a593Smuzhiyun 		: "cc")
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun #ifdef __NDS32_EB__
247*4882a593Smuzhiyun #define __pu_reg_oper0 "%H2"
248*4882a593Smuzhiyun #define __pu_reg_oper1 "%L2"
249*4882a593Smuzhiyun #else
250*4882a593Smuzhiyun #define __pu_reg_oper0 "%L2"
251*4882a593Smuzhiyun #define __pu_reg_oper1 "%H2"
252*4882a593Smuzhiyun #endif
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun #define __put_user_asm_dword(x, addr, err) 				\
255*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
256*4882a593Smuzhiyun 		"\n1:\tswi " __pu_reg_oper0 ",[%1]\n"			\
257*4882a593Smuzhiyun 		"\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"			\
258*4882a593Smuzhiyun 		"3:\n"							\
259*4882a593Smuzhiyun 		"	.section .fixup,\"ax\"\n"			\
260*4882a593Smuzhiyun 		"	.align	2\n"					\
261*4882a593Smuzhiyun 		"4:	move	%0, %3\n"				\
262*4882a593Smuzhiyun 		"	b	3b\n"					\
263*4882a593Smuzhiyun 		"	.previous\n"					\
264*4882a593Smuzhiyun 		"	.section __ex_table,\"a\"\n"			\
265*4882a593Smuzhiyun 		"	.align	3\n"					\
266*4882a593Smuzhiyun 		"	.long	1b, 4b\n"				\
267*4882a593Smuzhiyun 		"	.long	2b, 4b\n"				\
268*4882a593Smuzhiyun 		"	.previous"					\
269*4882a593Smuzhiyun 		: "+r"(err)						\
270*4882a593Smuzhiyun 		: "r"(addr), "r"(x), "i"(-EFAULT)			\
271*4882a593Smuzhiyun 		: "cc")
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
274*4882a593Smuzhiyun extern long strncpy_from_user(char *dest, const char __user * src, long count);
275*4882a593Smuzhiyun extern __must_check long strlen_user(const char __user * str);
276*4882a593Smuzhiyun extern __must_check long strnlen_user(const char __user * str, long n);
277*4882a593Smuzhiyun extern unsigned long __arch_copy_from_user(void *to, const void __user * from,
278*4882a593Smuzhiyun                                            unsigned long n);
279*4882a593Smuzhiyun extern unsigned long __arch_copy_to_user(void __user * to, const void *from,
280*4882a593Smuzhiyun                                          unsigned long n);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun #define raw_copy_from_user __arch_copy_from_user
283*4882a593Smuzhiyun #define raw_copy_to_user __arch_copy_to_user
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun #define INLINE_COPY_FROM_USER
286*4882a593Smuzhiyun #define INLINE_COPY_TO_USER
clear_user(void __user * to,unsigned long n)287*4882a593Smuzhiyun static inline unsigned long clear_user(void __user * to, unsigned long n)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	if (access_ok(to, n))
290*4882a593Smuzhiyun 		n = __arch_clear_user(to, n);
291*4882a593Smuzhiyun 	return n;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
__clear_user(void __user * to,unsigned long n)294*4882a593Smuzhiyun static inline unsigned long __clear_user(void __user * to, unsigned long n)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	return __arch_clear_user(to, n);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun #endif /* _ASMNDS32_UACCESS_H */
300