xref: /OK3568_Linux_fs/kernel/arch/parisc/include/asm/uaccess.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __PARISC_UACCESS_H
3*4882a593Smuzhiyun #define __PARISC_UACCESS_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * User space memory access functions
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <asm/page.h>
9*4882a593Smuzhiyun #include <asm/cache.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/bug.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define KERNEL_DS	((mm_segment_t){0})
15*4882a593Smuzhiyun #define USER_DS 	((mm_segment_t){1})
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define get_fs()	(current_thread_info()->addr_limit)
20*4882a593Smuzhiyun #define set_fs(x)	(current_thread_info()->addr_limit = (x))
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * Note that since kernel addresses are in a separate address space on
24*4882a593Smuzhiyun  * parisc, we don't need to do anything for access_ok().
25*4882a593Smuzhiyun  * We just let the page fault handler do the right thing. This also means
26*4882a593Smuzhiyun  * that put_user is the same as __put_user, etc.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define access_ok(uaddr, size)	\
30*4882a593Smuzhiyun 	( (uaddr) == (uaddr) )
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define put_user __put_user
33*4882a593Smuzhiyun #define get_user __get_user
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #if !defined(CONFIG_64BIT)
36*4882a593Smuzhiyun #define LDD_USER(val, ptr)	__get_user_asm64(val, ptr)
37*4882a593Smuzhiyun #define STD_USER(x, ptr)	__put_user_asm64(x, ptr)
38*4882a593Smuzhiyun #else
39*4882a593Smuzhiyun #define LDD_USER(val, ptr)	__get_user_asm(val, "ldd", ptr)
40*4882a593Smuzhiyun #define STD_USER(x, ptr)	__put_user_asm("std", x, ptr)
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * The exception table contains two values: the first is the relative offset to
45*4882a593Smuzhiyun  * the address of the instruction that is allowed to fault, and the second is
46*4882a593Smuzhiyun  * the relative offset to the address of the fixup routine. Since relative
47*4882a593Smuzhiyun  * addresses are used, 32bit values are sufficient even on 64bit kernel.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define ARCH_HAS_RELATIVE_EXTABLE
51*4882a593Smuzhiyun struct exception_table_entry {
52*4882a593Smuzhiyun 	int insn;	/* relative address of insn that is allowed to fault. */
53*4882a593Smuzhiyun 	int fixup;	/* relative address of fixup routine */
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
57*4882a593Smuzhiyun 	".section __ex_table,\"aw\"\n"			   \
58*4882a593Smuzhiyun 	".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
59*4882a593Smuzhiyun 	".previous\n"
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun  * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
63*4882a593Smuzhiyun  * (with lowest bit set) for which the fault handler in fixup_exception() will
64*4882a593Smuzhiyun  * load -EFAULT into %r8 for a read or write fault, and zeroes the target
65*4882a593Smuzhiyun  * register in case of a read fault in get_user().
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
68*4882a593Smuzhiyun 	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun  * load_sr2() preloads the space register %%sr2 - based on the value of
72*4882a593Smuzhiyun  * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
73*4882a593Smuzhiyun  * is 0), or with the current value of %%sr3 to access user space (USER_DS)
74*4882a593Smuzhiyun  * memory. The following __get_user_asm() and __put_user_asm() functions have
75*4882a593Smuzhiyun  * %%sr2 hard-coded to access the requested memory.
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun #define load_sr2() \
78*4882a593Smuzhiyun 	__asm__(" or,=  %0,%%r0,%%r0\n\t"	\
79*4882a593Smuzhiyun 		" mfsp %%sr3,%0\n\t"		\
80*4882a593Smuzhiyun 		" mtsp %0,%%sr2\n\t"		\
81*4882a593Smuzhiyun 		: : "r"(get_fs()) : )
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define __get_user_internal(val, ptr)			\
84*4882a593Smuzhiyun ({							\
85*4882a593Smuzhiyun 	register long __gu_err __asm__ ("r8") = 0;	\
86*4882a593Smuzhiyun 							\
87*4882a593Smuzhiyun 	switch (sizeof(*(ptr))) {			\
88*4882a593Smuzhiyun 	case 1: __get_user_asm(val, "ldb", ptr); break;	\
89*4882a593Smuzhiyun 	case 2: __get_user_asm(val, "ldh", ptr); break; \
90*4882a593Smuzhiyun 	case 4: __get_user_asm(val, "ldw", ptr); break; \
91*4882a593Smuzhiyun 	case 8: LDD_USER(val, ptr); break;		\
92*4882a593Smuzhiyun 	default: BUILD_BUG();				\
93*4882a593Smuzhiyun 	}						\
94*4882a593Smuzhiyun 							\
95*4882a593Smuzhiyun 	__gu_err;					\
96*4882a593Smuzhiyun })
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #define __get_user(val, ptr)				\
99*4882a593Smuzhiyun ({							\
100*4882a593Smuzhiyun 	load_sr2();					\
101*4882a593Smuzhiyun 	__get_user_internal(val, ptr);			\
102*4882a593Smuzhiyun })
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define __get_user_asm(val, ldx, ptr)			\
105*4882a593Smuzhiyun {							\
106*4882a593Smuzhiyun 	register long __gu_val;				\
107*4882a593Smuzhiyun 							\
108*4882a593Smuzhiyun 	__asm__("1: " ldx " 0(%%sr2,%2),%0\n"		\
109*4882a593Smuzhiyun 		"9:\n"					\
110*4882a593Smuzhiyun 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
111*4882a593Smuzhiyun 		: "=r"(__gu_val), "=r"(__gu_err)        \
112*4882a593Smuzhiyun 		: "r"(ptr), "1"(__gu_err));		\
113*4882a593Smuzhiyun 							\
114*4882a593Smuzhiyun 	(val) = (__force __typeof__(*(ptr))) __gu_val;	\
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #if !defined(CONFIG_64BIT)
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #define __get_user_asm64(val, ptr)			\
120*4882a593Smuzhiyun {							\
121*4882a593Smuzhiyun 	union {						\
122*4882a593Smuzhiyun 		unsigned long long	l;		\
123*4882a593Smuzhiyun 		__typeof__(*(ptr))	t;		\
124*4882a593Smuzhiyun 	} __gu_tmp;					\
125*4882a593Smuzhiyun 							\
126*4882a593Smuzhiyun 	__asm__("   copy %%r0,%R0\n"			\
127*4882a593Smuzhiyun 		"1: ldw 0(%%sr2,%2),%0\n"		\
128*4882a593Smuzhiyun 		"2: ldw 4(%%sr2,%2),%R0\n"		\
129*4882a593Smuzhiyun 		"9:\n"					\
130*4882a593Smuzhiyun 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
131*4882a593Smuzhiyun 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	\
132*4882a593Smuzhiyun 		: "=&r"(__gu_tmp.l), "=r"(__gu_err)	\
133*4882a593Smuzhiyun 		: "r"(ptr), "1"(__gu_err));		\
134*4882a593Smuzhiyun 							\
135*4882a593Smuzhiyun 	(val) = __gu_tmp.t;				\
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun #endif /* !defined(CONFIG_64BIT) */
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun #define __put_user_internal(x, ptr)				\
142*4882a593Smuzhiyun ({								\
143*4882a593Smuzhiyun 	register long __pu_err __asm__ ("r8") = 0;      	\
144*4882a593Smuzhiyun         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
145*4882a593Smuzhiyun 								\
146*4882a593Smuzhiyun 	switch (sizeof(*(ptr))) {				\
147*4882a593Smuzhiyun 	case 1: __put_user_asm("stb", __x, ptr); break;		\
148*4882a593Smuzhiyun 	case 2: __put_user_asm("sth", __x, ptr); break;		\
149*4882a593Smuzhiyun 	case 4: __put_user_asm("stw", __x, ptr); break;		\
150*4882a593Smuzhiyun 	case 8: STD_USER(__x, ptr); break;			\
151*4882a593Smuzhiyun 	default: BUILD_BUG();					\
152*4882a593Smuzhiyun 	}							\
153*4882a593Smuzhiyun 								\
154*4882a593Smuzhiyun 	__pu_err;						\
155*4882a593Smuzhiyun })
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun #define __put_user(x, ptr)					\
158*4882a593Smuzhiyun ({								\
159*4882a593Smuzhiyun 	load_sr2();						\
160*4882a593Smuzhiyun 	__put_user_internal(x, ptr);				\
161*4882a593Smuzhiyun })
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
166*4882a593Smuzhiyun  * instead of writing. This is because they do not write to any memory
167*4882a593Smuzhiyun  * gcc knows about, so there are no aliasing issues. These macros must
168*4882a593Smuzhiyun  * also be aware that fixups are executed in the context of the fault,
169*4882a593Smuzhiyun  * and any registers used there must be listed as clobbers.
170*4882a593Smuzhiyun  * r8 is already listed as err.
171*4882a593Smuzhiyun  */
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun #define __put_user_asm(stx, x, ptr)                         \
174*4882a593Smuzhiyun 	__asm__ __volatile__ (                              \
175*4882a593Smuzhiyun 		"1: " stx " %2,0(%%sr2,%1)\n"		    \
176*4882a593Smuzhiyun 		"9:\n"					    \
177*4882a593Smuzhiyun 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
178*4882a593Smuzhiyun 		: "=r"(__pu_err)                            \
179*4882a593Smuzhiyun 		: "r"(ptr), "r"(x), "0"(__pu_err))
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun #if !defined(CONFIG_64BIT)
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun #define __put_user_asm64(__val, ptr) do {	    	    \
185*4882a593Smuzhiyun 	__asm__ __volatile__ (				    \
186*4882a593Smuzhiyun 		"1: stw %2,0(%%sr2,%1)\n"		    \
187*4882a593Smuzhiyun 		"2: stw %R2,4(%%sr2,%1)\n"		    \
188*4882a593Smuzhiyun 		"9:\n"					    \
189*4882a593Smuzhiyun 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
190*4882a593Smuzhiyun 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	    \
191*4882a593Smuzhiyun 		: "=r"(__pu_err)                            \
192*4882a593Smuzhiyun 		: "r"(ptr), "r"(__val), "0"(__pu_err));	    \
193*4882a593Smuzhiyun } while (0)
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun #endif /* !defined(CONFIG_64BIT) */
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun  * Complex access routines -- external declarations
200*4882a593Smuzhiyun  */
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun extern long strncpy_from_user(char *, const char __user *, long);
203*4882a593Smuzhiyun extern unsigned lclear_user(void __user *, unsigned long);
204*4882a593Smuzhiyun extern long lstrnlen_user(const char __user *, long);
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun  * Complex access routines -- macros
207*4882a593Smuzhiyun  */
208*4882a593Smuzhiyun #define user_addr_max() (~0UL)
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #define strnlen_user lstrnlen_user
211*4882a593Smuzhiyun #define clear_user lclear_user
212*4882a593Smuzhiyun #define __clear_user lclear_user
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
215*4882a593Smuzhiyun 					    unsigned long len);
216*4882a593Smuzhiyun unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
217*4882a593Smuzhiyun 					    unsigned long len);
218*4882a593Smuzhiyun unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
219*4882a593Smuzhiyun 					    unsigned long len);
220*4882a593Smuzhiyun #define INLINE_COPY_TO_USER
221*4882a593Smuzhiyun #define INLINE_COPY_FROM_USER
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun struct pt_regs;
224*4882a593Smuzhiyun int fixup_exception(struct pt_regs *regs);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun #endif /* __PARISC_UACCESS_H */
227