1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ALPHA_UACCESS_H
3*4882a593Smuzhiyun #define __ALPHA_UACCESS_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * The fs value determines whether argument validity checking should be
7*4882a593Smuzhiyun * performed or not. If get_fs() == USER_DS, checking is performed, with
8*4882a593Smuzhiyun * get_fs() == KERNEL_DS, checking is bypassed.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Or at least it did once upon a time. Nowadays it is a mask that
11*4882a593Smuzhiyun * defines which bits of the address space are off limits. This is a
12*4882a593Smuzhiyun * wee bit faster than the above.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * For historical reasons, these macros are grossly misnamed.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define KERNEL_DS ((mm_segment_t) { 0UL })
18*4882a593Smuzhiyun #define USER_DS ((mm_segment_t) { -0x40000000000UL })
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define get_fs() (current_thread_info()->addr_limit)
21*4882a593Smuzhiyun #define set_fs(x) (current_thread_info()->addr_limit = (x))
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Is a address valid? This does a straightforward calculation rather
27*4882a593Smuzhiyun * than tests.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Address valid if:
30*4882a593Smuzhiyun * - "addr" doesn't have any high-bits set
31*4882a593Smuzhiyun * - AND "size" doesn't have any high-bits set
32*4882a593Smuzhiyun * - AND "addr+size-(size != 0)" doesn't have any high-bits set
33*4882a593Smuzhiyun * - OR we are in kernel mode.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun #define __access_ok(addr, size) ({ \
36*4882a593Smuzhiyun unsigned long __ao_a = (addr), __ao_b = (size); \
37*4882a593Smuzhiyun unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
38*4882a593Smuzhiyun (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define access_ok(addr, size) \
41*4882a593Smuzhiyun ({ \
42*4882a593Smuzhiyun __chk_user_ptr(addr); \
43*4882a593Smuzhiyun __access_ok(((unsigned long)(addr)), (size)); \
44*4882a593Smuzhiyun })
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * These are the main single-value transfer routines. They automatically
48*4882a593Smuzhiyun * use the right size if we just have the right pointer type.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * As the alpha uses the same address space for kernel and user
51*4882a593Smuzhiyun * data, we can just do these as direct assignments. (Of course, the
52*4882a593Smuzhiyun * exception handling means that it's no longer "just"...)
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * Careful to not
55*4882a593Smuzhiyun * (a) re-use the arguments for side effects (sizeof/typeof is ok)
56*4882a593Smuzhiyun * (b) require any knowledge of processes at this stage
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun #define put_user(x, ptr) \
59*4882a593Smuzhiyun __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
60*4882a593Smuzhiyun #define get_user(x, ptr) \
61*4882a593Smuzhiyun __get_user_check((x), (ptr), sizeof(*(ptr)))
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * The "__xxx" versions do not do address space checking, useful when
65*4882a593Smuzhiyun * doing multiple accesses to the same area (the programmer has to do the
66*4882a593Smuzhiyun * checks by hand with "access_ok()")
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun #define __put_user(x, ptr) \
69*4882a593Smuzhiyun __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
70*4882a593Smuzhiyun #define __get_user(x, ptr) \
71*4882a593Smuzhiyun __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
75*4882a593Smuzhiyun * encode the bits we need for resolving the exception. See the
76*4882a593Smuzhiyun * more extensive comments with fixup_inline_exception below for
77*4882a593Smuzhiyun * more information.
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun #define EXC(label,cont,res,err) \
80*4882a593Smuzhiyun ".section __ex_table,\"a\"\n" \
81*4882a593Smuzhiyun " .long "#label"-.\n" \
82*4882a593Smuzhiyun " lda "#res","#cont"-"#label"("#err")\n" \
83*4882a593Smuzhiyun ".previous\n"
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun extern void __get_user_unknown(void);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define __get_user_nocheck(x, ptr, size) \
88*4882a593Smuzhiyun ({ \
89*4882a593Smuzhiyun long __gu_err = 0; \
90*4882a593Smuzhiyun unsigned long __gu_val; \
91*4882a593Smuzhiyun __chk_user_ptr(ptr); \
92*4882a593Smuzhiyun switch (size) { \
93*4882a593Smuzhiyun case 1: __get_user_8(ptr); break; \
94*4882a593Smuzhiyun case 2: __get_user_16(ptr); break; \
95*4882a593Smuzhiyun case 4: __get_user_32(ptr); break; \
96*4882a593Smuzhiyun case 8: __get_user_64(ptr); break; \
97*4882a593Smuzhiyun default: __get_user_unknown(); break; \
98*4882a593Smuzhiyun } \
99*4882a593Smuzhiyun (x) = (__force __typeof__(*(ptr))) __gu_val; \
100*4882a593Smuzhiyun __gu_err; \
101*4882a593Smuzhiyun })
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #define __get_user_check(x, ptr, size) \
104*4882a593Smuzhiyun ({ \
105*4882a593Smuzhiyun long __gu_err = -EFAULT; \
106*4882a593Smuzhiyun unsigned long __gu_val = 0; \
107*4882a593Smuzhiyun const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
108*4882a593Smuzhiyun if (__access_ok((unsigned long)__gu_addr, size)) { \
109*4882a593Smuzhiyun __gu_err = 0; \
110*4882a593Smuzhiyun switch (size) { \
111*4882a593Smuzhiyun case 1: __get_user_8(__gu_addr); break; \
112*4882a593Smuzhiyun case 2: __get_user_16(__gu_addr); break; \
113*4882a593Smuzhiyun case 4: __get_user_32(__gu_addr); break; \
114*4882a593Smuzhiyun case 8: __get_user_64(__gu_addr); break; \
115*4882a593Smuzhiyun default: __get_user_unknown(); break; \
116*4882a593Smuzhiyun } \
117*4882a593Smuzhiyun } \
118*4882a593Smuzhiyun (x) = (__force __typeof__(*(ptr))) __gu_val; \
119*4882a593Smuzhiyun __gu_err; \
120*4882a593Smuzhiyun })
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun struct __large_struct { unsigned long buf[100]; };
123*4882a593Smuzhiyun #define __m(x) (*(struct __large_struct __user *)(x))
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #define __get_user_64(addr) \
126*4882a593Smuzhiyun __asm__("1: ldq %0,%2\n" \
127*4882a593Smuzhiyun "2:\n" \
128*4882a593Smuzhiyun EXC(1b,2b,%0,%1) \
129*4882a593Smuzhiyun : "=r"(__gu_val), "=r"(__gu_err) \
130*4882a593Smuzhiyun : "m"(__m(addr)), "1"(__gu_err))
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun #define __get_user_32(addr) \
133*4882a593Smuzhiyun __asm__("1: ldl %0,%2\n" \
134*4882a593Smuzhiyun "2:\n" \
135*4882a593Smuzhiyun EXC(1b,2b,%0,%1) \
136*4882a593Smuzhiyun : "=r"(__gu_val), "=r"(__gu_err) \
137*4882a593Smuzhiyun : "m"(__m(addr)), "1"(__gu_err))
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #ifdef __alpha_bwx__
140*4882a593Smuzhiyun /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun #define __get_user_16(addr) \
143*4882a593Smuzhiyun __asm__("1: ldwu %0,%2\n" \
144*4882a593Smuzhiyun "2:\n" \
145*4882a593Smuzhiyun EXC(1b,2b,%0,%1) \
146*4882a593Smuzhiyun : "=r"(__gu_val), "=r"(__gu_err) \
147*4882a593Smuzhiyun : "m"(__m(addr)), "1"(__gu_err))
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #define __get_user_8(addr) \
150*4882a593Smuzhiyun __asm__("1: ldbu %0,%2\n" \
151*4882a593Smuzhiyun "2:\n" \
152*4882a593Smuzhiyun EXC(1b,2b,%0,%1) \
153*4882a593Smuzhiyun : "=r"(__gu_val), "=r"(__gu_err) \
154*4882a593Smuzhiyun : "m"(__m(addr)), "1"(__gu_err))
155*4882a593Smuzhiyun #else
156*4882a593Smuzhiyun /* Unfortunately, we can't get an unaligned access trap for the sub-word
157*4882a593Smuzhiyun load, so we have to do a general unaligned operation. */
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun #define __get_user_16(addr) \
160*4882a593Smuzhiyun { \
161*4882a593Smuzhiyun long __gu_tmp; \
162*4882a593Smuzhiyun __asm__("1: ldq_u %0,0(%3)\n" \
163*4882a593Smuzhiyun "2: ldq_u %1,1(%3)\n" \
164*4882a593Smuzhiyun " extwl %0,%3,%0\n" \
165*4882a593Smuzhiyun " extwh %1,%3,%1\n" \
166*4882a593Smuzhiyun " or %0,%1,%0\n" \
167*4882a593Smuzhiyun "3:\n" \
168*4882a593Smuzhiyun EXC(1b,3b,%0,%2) \
169*4882a593Smuzhiyun EXC(2b,3b,%0,%2) \
170*4882a593Smuzhiyun : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
171*4882a593Smuzhiyun : "r"(addr), "2"(__gu_err)); \
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun #define __get_user_8(addr) \
175*4882a593Smuzhiyun __asm__("1: ldq_u %0,0(%2)\n" \
176*4882a593Smuzhiyun " extbl %0,%2,%0\n" \
177*4882a593Smuzhiyun "2:\n" \
178*4882a593Smuzhiyun EXC(1b,2b,%0,%1) \
179*4882a593Smuzhiyun : "=&r"(__gu_val), "=r"(__gu_err) \
180*4882a593Smuzhiyun : "r"(addr), "1"(__gu_err))
181*4882a593Smuzhiyun #endif
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun extern void __put_user_unknown(void);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #define __put_user_nocheck(x, ptr, size) \
186*4882a593Smuzhiyun ({ \
187*4882a593Smuzhiyun long __pu_err = 0; \
188*4882a593Smuzhiyun __chk_user_ptr(ptr); \
189*4882a593Smuzhiyun switch (size) { \
190*4882a593Smuzhiyun case 1: __put_user_8(x, ptr); break; \
191*4882a593Smuzhiyun case 2: __put_user_16(x, ptr); break; \
192*4882a593Smuzhiyun case 4: __put_user_32(x, ptr); break; \
193*4882a593Smuzhiyun case 8: __put_user_64(x, ptr); break; \
194*4882a593Smuzhiyun default: __put_user_unknown(); break; \
195*4882a593Smuzhiyun } \
196*4882a593Smuzhiyun __pu_err; \
197*4882a593Smuzhiyun })
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun #define __put_user_check(x, ptr, size) \
200*4882a593Smuzhiyun ({ \
201*4882a593Smuzhiyun long __pu_err = -EFAULT; \
202*4882a593Smuzhiyun __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
203*4882a593Smuzhiyun if (__access_ok((unsigned long)__pu_addr, size)) { \
204*4882a593Smuzhiyun __pu_err = 0; \
205*4882a593Smuzhiyun switch (size) { \
206*4882a593Smuzhiyun case 1: __put_user_8(x, __pu_addr); break; \
207*4882a593Smuzhiyun case 2: __put_user_16(x, __pu_addr); break; \
208*4882a593Smuzhiyun case 4: __put_user_32(x, __pu_addr); break; \
209*4882a593Smuzhiyun case 8: __put_user_64(x, __pu_addr); break; \
210*4882a593Smuzhiyun default: __put_user_unknown(); break; \
211*4882a593Smuzhiyun } \
212*4882a593Smuzhiyun } \
213*4882a593Smuzhiyun __pu_err; \
214*4882a593Smuzhiyun })
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * The "__put_user_xx()" macros tell gcc they read from memory
218*4882a593Smuzhiyun * instead of writing: this is because they do not write to
219*4882a593Smuzhiyun * any memory gcc knows about, so there are no aliasing issues
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun #define __put_user_64(x, addr) \
222*4882a593Smuzhiyun __asm__ __volatile__("1: stq %r2,%1\n" \
223*4882a593Smuzhiyun "2:\n" \
224*4882a593Smuzhiyun EXC(1b,2b,$31,%0) \
225*4882a593Smuzhiyun : "=r"(__pu_err) \
226*4882a593Smuzhiyun : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun #define __put_user_32(x, addr) \
229*4882a593Smuzhiyun __asm__ __volatile__("1: stl %r2,%1\n" \
230*4882a593Smuzhiyun "2:\n" \
231*4882a593Smuzhiyun EXC(1b,2b,$31,%0) \
232*4882a593Smuzhiyun : "=r"(__pu_err) \
233*4882a593Smuzhiyun : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun #ifdef __alpha_bwx__
236*4882a593Smuzhiyun /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun #define __put_user_16(x, addr) \
239*4882a593Smuzhiyun __asm__ __volatile__("1: stw %r2,%1\n" \
240*4882a593Smuzhiyun "2:\n" \
241*4882a593Smuzhiyun EXC(1b,2b,$31,%0) \
242*4882a593Smuzhiyun : "=r"(__pu_err) \
243*4882a593Smuzhiyun : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun #define __put_user_8(x, addr) \
246*4882a593Smuzhiyun __asm__ __volatile__("1: stb %r2,%1\n" \
247*4882a593Smuzhiyun "2:\n" \
248*4882a593Smuzhiyun EXC(1b,2b,$31,%0) \
249*4882a593Smuzhiyun : "=r"(__pu_err) \
250*4882a593Smuzhiyun : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
251*4882a593Smuzhiyun #else
252*4882a593Smuzhiyun /* Unfortunately, we can't get an unaligned access trap for the sub-word
253*4882a593Smuzhiyun write, so we have to do a general unaligned operation. */
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun #define __put_user_16(x, addr) \
256*4882a593Smuzhiyun { \
257*4882a593Smuzhiyun long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
258*4882a593Smuzhiyun __asm__ __volatile__( \
259*4882a593Smuzhiyun "1: ldq_u %2,1(%5)\n" \
260*4882a593Smuzhiyun "2: ldq_u %1,0(%5)\n" \
261*4882a593Smuzhiyun " inswh %6,%5,%4\n" \
262*4882a593Smuzhiyun " inswl %6,%5,%3\n" \
263*4882a593Smuzhiyun " mskwh %2,%5,%2\n" \
264*4882a593Smuzhiyun " mskwl %1,%5,%1\n" \
265*4882a593Smuzhiyun " or %2,%4,%2\n" \
266*4882a593Smuzhiyun " or %1,%3,%1\n" \
267*4882a593Smuzhiyun "3: stq_u %2,1(%5)\n" \
268*4882a593Smuzhiyun "4: stq_u %1,0(%5)\n" \
269*4882a593Smuzhiyun "5:\n" \
270*4882a593Smuzhiyun EXC(1b,5b,$31,%0) \
271*4882a593Smuzhiyun EXC(2b,5b,$31,%0) \
272*4882a593Smuzhiyun EXC(3b,5b,$31,%0) \
273*4882a593Smuzhiyun EXC(4b,5b,$31,%0) \
274*4882a593Smuzhiyun : "=r"(__pu_err), "=&r"(__pu_tmp1), \
275*4882a593Smuzhiyun "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
276*4882a593Smuzhiyun "=&r"(__pu_tmp4) \
277*4882a593Smuzhiyun : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun #define __put_user_8(x, addr) \
281*4882a593Smuzhiyun { \
282*4882a593Smuzhiyun long __pu_tmp1, __pu_tmp2; \
283*4882a593Smuzhiyun __asm__ __volatile__( \
284*4882a593Smuzhiyun "1: ldq_u %1,0(%4)\n" \
285*4882a593Smuzhiyun " insbl %3,%4,%2\n" \
286*4882a593Smuzhiyun " mskbl %1,%4,%1\n" \
287*4882a593Smuzhiyun " or %1,%2,%1\n" \
288*4882a593Smuzhiyun "2: stq_u %1,0(%4)\n" \
289*4882a593Smuzhiyun "3:\n" \
290*4882a593Smuzhiyun EXC(1b,3b,$31,%0) \
291*4882a593Smuzhiyun EXC(2b,3b,$31,%0) \
292*4882a593Smuzhiyun : "=r"(__pu_err), \
293*4882a593Smuzhiyun "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
294*4882a593Smuzhiyun : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun #endif
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun * Complex access routines
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun extern long __copy_user(void *to, const void *from, long len);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long len)306*4882a593Smuzhiyun raw_copy_from_user(void *to, const void __user *from, unsigned long len)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun return __copy_user(to, (__force const void *)from, len);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long len)312*4882a593Smuzhiyun raw_copy_to_user(void __user *to, const void *from, unsigned long len)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun return __copy_user((__force void *)to, from, len);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun extern long __clear_user(void __user *to, long len);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun extern inline long
clear_user(void __user * to,long len)320*4882a593Smuzhiyun clear_user(void __user *to, long len)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun if (__access_ok((unsigned long)to, len))
323*4882a593Smuzhiyun len = __clear_user(to, len);
324*4882a593Smuzhiyun return len;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun #define user_addr_max() \
328*4882a593Smuzhiyun (uaccess_kernel() ? ~0UL : TASK_SIZE)
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun extern long strncpy_from_user(char *dest, const char __user *src, long count);
331*4882a593Smuzhiyun extern __must_check long strnlen_user(const char __user *str, long n);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun #include <asm/extable.h>
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun #endif /* __ALPHA_UACCESS_H */
336