1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * include/asm-xtensa/uaccess.h
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * User space memory access functions
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * These routines provide basic accessing functions to the user memory
7*4882a593Smuzhiyun * space for the kernel. This header file provides functions such as:
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
10*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
11*4882a593Smuzhiyun * for more details.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Copyright (C) 2001 - 2005 Tensilica Inc.
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifndef _XTENSA_UACCESS_H
17*4882a593Smuzhiyun #define _XTENSA_UACCESS_H
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/prefetch.h>
20*4882a593Smuzhiyun #include <asm/types.h>
21*4882a593Smuzhiyun #include <asm/extable.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * The fs value determines whether argument validity checking should
25*4882a593Smuzhiyun * be performed or not. If get_fs() == USER_DS, checking is
26*4882a593Smuzhiyun * performed, with get_fs() == KERNEL_DS, checking is bypassed.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * For historical reasons (Data Segment Register?), these macros are
29*4882a593Smuzhiyun * grossly misnamed.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define KERNEL_DS ((mm_segment_t) { 0 })
33*4882a593Smuzhiyun #define USER_DS ((mm_segment_t) { 1 })
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define get_fs() (current->thread.current_ds)
36*4882a593Smuzhiyun #define set_fs(val) (current->thread.current_ds = (val))
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define __kernel_ok (uaccess_kernel())
41*4882a593Smuzhiyun #define __user_ok(addr, size) \
42*4882a593Smuzhiyun (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
43*4882a593Smuzhiyun #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
44*4882a593Smuzhiyun #define access_ok(addr, size) __access_ok((unsigned long)(addr), (size))
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * These are the main single-value transfer routines. They
50*4882a593Smuzhiyun * automatically use the right size if we just have the right pointer
51*4882a593Smuzhiyun * type.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * This gets kind of ugly. We want to return _two_ values in
54*4882a593Smuzhiyun * "get_user()" and yet we don't want to do any pointers, because that
55*4882a593Smuzhiyun * is too much of a performance impact. Thus we have a few rather ugly
56*4882a593Smuzhiyun * macros here, and hide all the uglyness from the user.
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Careful to not
59*4882a593Smuzhiyun * (a) re-use the arguments for side effects (sizeof is ok)
60*4882a593Smuzhiyun * (b) require any knowledge of processes at this stage
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
63*4882a593Smuzhiyun #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * The "__xxx" versions of the user access functions are versions that
67*4882a593Smuzhiyun * do not verify the address space, that must have been done previously
68*4882a593Smuzhiyun * with a separate "access_ok()" call (this is used when we do multiple
69*4882a593Smuzhiyun * accesses to the same area of user memory).
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
72*4882a593Smuzhiyun #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun extern long __put_user_bad(void);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define __put_user_nocheck(x, ptr, size) \
78*4882a593Smuzhiyun ({ \
79*4882a593Smuzhiyun long __pu_err; \
80*4882a593Smuzhiyun __put_user_size((x), (ptr), (size), __pu_err); \
81*4882a593Smuzhiyun __pu_err; \
82*4882a593Smuzhiyun })
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define __put_user_check(x, ptr, size) \
85*4882a593Smuzhiyun ({ \
86*4882a593Smuzhiyun long __pu_err = -EFAULT; \
87*4882a593Smuzhiyun __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
88*4882a593Smuzhiyun if (access_ok(__pu_addr, size)) \
89*4882a593Smuzhiyun __put_user_size((x), __pu_addr, (size), __pu_err); \
90*4882a593Smuzhiyun __pu_err; \
91*4882a593Smuzhiyun })
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #define __put_user_size(x, ptr, size, retval) \
94*4882a593Smuzhiyun do { \
95*4882a593Smuzhiyun int __cb; \
96*4882a593Smuzhiyun retval = 0; \
97*4882a593Smuzhiyun switch (size) { \
98*4882a593Smuzhiyun case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \
99*4882a593Smuzhiyun case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
100*4882a593Smuzhiyun case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
101*4882a593Smuzhiyun case 8: { \
102*4882a593Smuzhiyun __typeof__(*ptr) __v64 = x; \
103*4882a593Smuzhiyun retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
104*4882a593Smuzhiyun break; \
105*4882a593Smuzhiyun } \
106*4882a593Smuzhiyun default: __put_user_bad(); \
107*4882a593Smuzhiyun } \
108*4882a593Smuzhiyun } while (0)
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * Consider a case of a user single load/store would cause both an
113*4882a593Smuzhiyun * unaligned exception and an MMU-related exception (unaligned
114*4882a593Smuzhiyun * exceptions happen first):
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * User code passes a bad variable ptr to a system call.
117*4882a593Smuzhiyun * Kernel tries to access the variable.
118*4882a593Smuzhiyun * Unaligned exception occurs.
119*4882a593Smuzhiyun * Unaligned exception handler tries to make aligned accesses.
120*4882a593Smuzhiyun * Double exception occurs for MMU-related cause (e.g., page not mapped).
121*4882a593Smuzhiyun * do_page_fault() thinks the fault address belongs to the kernel, not the
122*4882a593Smuzhiyun * user, and panics.
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * The kernel currently prohibits user unaligned accesses. We use the
125*4882a593Smuzhiyun * __check_align_* macros to check for unaligned addresses before
126*4882a593Smuzhiyun * accessing user space so we don't crash the kernel. Both
127*4882a593Smuzhiyun * __put_user_asm and __get_user_asm use these alignment macros, so
128*4882a593Smuzhiyun * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
129*4882a593Smuzhiyun * sync.
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun #define __check_align_1 ""
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #define __check_align_2 \
135*4882a593Smuzhiyun " _bbci.l %[mem] * 0, 1f \n" \
136*4882a593Smuzhiyun " movi %[err], %[efault] \n" \
137*4882a593Smuzhiyun " _j 2f \n"
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #define __check_align_4 \
140*4882a593Smuzhiyun " _bbsi.l %[mem] * 0, 0f \n" \
141*4882a593Smuzhiyun " _bbci.l %[mem] * 0 + 1, 1f \n" \
142*4882a593Smuzhiyun "0: movi %[err], %[efault] \n" \
143*4882a593Smuzhiyun " _j 2f \n"
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun * We don't tell gcc that we are accessing memory, but this is OK
148*4882a593Smuzhiyun * because we do not write to any memory gcc knows about, so there
149*4882a593Smuzhiyun * are no aliasing issues.
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * WARNING: If you modify this macro at all, verify that the
152*4882a593Smuzhiyun * __check_align_* macros still work.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun #define __put_user_asm(x_, addr_, err_, align, insn, cb)\
155*4882a593Smuzhiyun __asm__ __volatile__( \
156*4882a593Smuzhiyun __check_align_##align \
157*4882a593Smuzhiyun "1: "insn" %[x], %[mem] \n" \
158*4882a593Smuzhiyun "2: \n" \
159*4882a593Smuzhiyun " .section .fixup,\"ax\" \n" \
160*4882a593Smuzhiyun " .align 4 \n" \
161*4882a593Smuzhiyun " .literal_position \n" \
162*4882a593Smuzhiyun "5: \n" \
163*4882a593Smuzhiyun " movi %[tmp], 2b \n" \
164*4882a593Smuzhiyun " movi %[err], %[efault] \n" \
165*4882a593Smuzhiyun " jx %[tmp] \n" \
166*4882a593Smuzhiyun " .previous \n" \
167*4882a593Smuzhiyun " .section __ex_table,\"a\" \n" \
168*4882a593Smuzhiyun " .long 1b, 5b \n" \
169*4882a593Smuzhiyun " .previous" \
170*4882a593Smuzhiyun :[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
171*4882a593Smuzhiyun :[x] "r"(x_), [efault] "i"(-EFAULT))
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun #define __get_user_nocheck(x, ptr, size) \
174*4882a593Smuzhiyun ({ \
175*4882a593Smuzhiyun long __gu_err; \
176*4882a593Smuzhiyun __get_user_size((x), (ptr), (size), __gu_err); \
177*4882a593Smuzhiyun __gu_err; \
178*4882a593Smuzhiyun })
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun #define __get_user_check(x, ptr, size) \
181*4882a593Smuzhiyun ({ \
182*4882a593Smuzhiyun long __gu_err = -EFAULT; \
183*4882a593Smuzhiyun const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
184*4882a593Smuzhiyun if (access_ok(__gu_addr, size)) \
185*4882a593Smuzhiyun __get_user_size((x), __gu_addr, (size), __gu_err); \
186*4882a593Smuzhiyun else \
187*4882a593Smuzhiyun (x) = (__typeof__(*(ptr)))0; \
188*4882a593Smuzhiyun __gu_err; \
189*4882a593Smuzhiyun })
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun extern long __get_user_bad(void);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun #define __get_user_size(x, ptr, size, retval) \
194*4882a593Smuzhiyun do { \
195*4882a593Smuzhiyun int __cb; \
196*4882a593Smuzhiyun retval = 0; \
197*4882a593Smuzhiyun switch (size) { \
198*4882a593Smuzhiyun case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
199*4882a593Smuzhiyun case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
200*4882a593Smuzhiyun case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
201*4882a593Smuzhiyun case 8: { \
202*4882a593Smuzhiyun u64 __x; \
203*4882a593Smuzhiyun if (unlikely(__copy_from_user(&__x, ptr, 8))) { \
204*4882a593Smuzhiyun retval = -EFAULT; \
205*4882a593Smuzhiyun (x) = (__typeof__(*(ptr)))0; \
206*4882a593Smuzhiyun } else { \
207*4882a593Smuzhiyun (x) = *(__force __typeof__(*(ptr)) *)&__x; \
208*4882a593Smuzhiyun } \
209*4882a593Smuzhiyun break; \
210*4882a593Smuzhiyun } \
211*4882a593Smuzhiyun default: \
212*4882a593Smuzhiyun (x) = (__typeof__(*(ptr)))0; \
213*4882a593Smuzhiyun __get_user_bad(); \
214*4882a593Smuzhiyun } \
215*4882a593Smuzhiyun } while (0)
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun * WARNING: If you modify this macro at all, verify that the
220*4882a593Smuzhiyun * __check_align_* macros still work.
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun #define __get_user_asm(x_, addr_, err_, align, insn, cb) \
223*4882a593Smuzhiyun do { \
224*4882a593Smuzhiyun u32 __x = 0; \
225*4882a593Smuzhiyun __asm__ __volatile__( \
226*4882a593Smuzhiyun __check_align_##align \
227*4882a593Smuzhiyun "1: "insn" %[x], %[mem] \n" \
228*4882a593Smuzhiyun "2: \n" \
229*4882a593Smuzhiyun " .section .fixup,\"ax\" \n" \
230*4882a593Smuzhiyun " .align 4 \n" \
231*4882a593Smuzhiyun " .literal_position \n" \
232*4882a593Smuzhiyun "5: \n" \
233*4882a593Smuzhiyun " movi %[tmp], 2b \n" \
234*4882a593Smuzhiyun " movi %[err], %[efault] \n" \
235*4882a593Smuzhiyun " jx %[tmp] \n" \
236*4882a593Smuzhiyun " .previous \n" \
237*4882a593Smuzhiyun " .section __ex_table,\"a\" \n" \
238*4882a593Smuzhiyun " .long 1b, 5b \n" \
239*4882a593Smuzhiyun " .previous" \
240*4882a593Smuzhiyun :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
241*4882a593Smuzhiyun :[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
242*4882a593Smuzhiyun (x_) = (__force __typeof__(*(addr_)))__x; \
243*4882a593Smuzhiyun } while (0)
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Copy to/from user space
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)253*4882a593Smuzhiyun raw_copy_from_user(void *to, const void __user *from, unsigned long n)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun prefetchw(to);
256*4882a593Smuzhiyun return __xtensa_copy_user(to, (__force const void *)from, n);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)259*4882a593Smuzhiyun raw_copy_to_user(void __user *to, const void *from, unsigned long n)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun prefetch(from);
262*4882a593Smuzhiyun return __xtensa_copy_user((__force void *)to, from, n);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun #define INLINE_COPY_FROM_USER
265*4882a593Smuzhiyun #define INLINE_COPY_TO_USER
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun * We need to return the number of bytes not cleared. Our memset()
269*4882a593Smuzhiyun * returns zero if a problem occurs while accessing user-space memory.
270*4882a593Smuzhiyun * In that event, return no memory cleared. Otherwise, zero for
271*4882a593Smuzhiyun * success.
272*4882a593Smuzhiyun */
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun static inline unsigned long
__xtensa_clear_user(void __user * addr,unsigned long size)275*4882a593Smuzhiyun __xtensa_clear_user(void __user *addr, unsigned long size)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun if (!__memset((void __force *)addr, 0, size))
278*4882a593Smuzhiyun return size;
279*4882a593Smuzhiyun return 0;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun static inline unsigned long
clear_user(void __user * addr,unsigned long size)283*4882a593Smuzhiyun clear_user(void __user *addr, unsigned long size)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun if (access_ok(addr, size))
286*4882a593Smuzhiyun return __xtensa_clear_user(addr, size);
287*4882a593Smuzhiyun return size ? -EFAULT : 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun #define __clear_user __xtensa_clear_user
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun #ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun extern long __strncpy_user(char *dst, const char __user *src, long count);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun static inline long
strncpy_from_user(char * dst,const char __user * src,long count)298*4882a593Smuzhiyun strncpy_from_user(char *dst, const char __user *src, long count)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun if (access_ok(src, 1))
301*4882a593Smuzhiyun return __strncpy_user(dst, src, count);
302*4882a593Smuzhiyun return -EFAULT;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun #else
305*4882a593Smuzhiyun long strncpy_from_user(char *dst, const char __user *src, long count);
306*4882a593Smuzhiyun #endif
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun * Return the size of a string (including the ending 0!)
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun extern long __strnlen_user(const char __user *str, long len);
312*4882a593Smuzhiyun
strnlen_user(const char __user * str,long len)313*4882a593Smuzhiyun static inline long strnlen_user(const char __user *str, long len)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun if (!access_ok(str, 1))
316*4882a593Smuzhiyun return 0;
317*4882a593Smuzhiyun return __strnlen_user(str, len);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun #endif /* _XTENSA_UACCESS_H */
321