1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012 Regents of the University of California
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This file was copied from include/asm-generic/uaccess.h
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef _ASM_RISCV_UACCESS_H
9*4882a593Smuzhiyun #define _ASM_RISCV_UACCESS_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <asm/pgtable.h> /* for TASK_SIZE */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * User space memory access functions
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun #ifdef CONFIG_MMU
17*4882a593Smuzhiyun #include <linux/errno.h>
18*4882a593Smuzhiyun #include <linux/compiler.h>
19*4882a593Smuzhiyun #include <linux/thread_info.h>
20*4882a593Smuzhiyun #include <asm/byteorder.h>
21*4882a593Smuzhiyun #include <asm/extable.h>
22*4882a593Smuzhiyun #include <asm/asm.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define __enable_user_access() \
25*4882a593Smuzhiyun __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
26*4882a593Smuzhiyun #define __disable_user_access() \
27*4882a593Smuzhiyun __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /**
30*4882a593Smuzhiyun * access_ok: - Checks if a user space pointer is valid
31*4882a593Smuzhiyun * @addr: User space pointer to start of block to check
32*4882a593Smuzhiyun * @size: Size of block to check
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * Context: User context only. This function may sleep.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * Checks if a pointer to a block of memory in user space is valid.
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Returns true (nonzero) if the memory block may be valid, false (zero)
39*4882a593Smuzhiyun * if it is definitely invalid.
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * Note that, depending on architecture, this function probably just
42*4882a593Smuzhiyun * checks that the pointer is in the user space range - after calling
43*4882a593Smuzhiyun * this function, memory access functions may still return -EFAULT.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun #define access_ok(addr, size) ({ \
46*4882a593Smuzhiyun __chk_user_ptr(addr); \
47*4882a593Smuzhiyun likely(__access_ok((unsigned long __force)(addr), (size))); \
48*4882a593Smuzhiyun })
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * Ensure that the range [addr, addr+size) is within the process's
52*4882a593Smuzhiyun * address space
53*4882a593Smuzhiyun */
__access_ok(unsigned long addr,unsigned long size)54*4882a593Smuzhiyun static inline int __access_ok(unsigned long addr, unsigned long size)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun return size <= TASK_SIZE && addr <= TASK_SIZE - size;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * The exception table consists of pairs of addresses: the first is the
61*4882a593Smuzhiyun * address of an instruction that is allowed to fault, and the second is
62*4882a593Smuzhiyun * the address at which the program should continue. No registers are
63*4882a593Smuzhiyun * modified, so it is entirely up to the continuation code to figure out
64*4882a593Smuzhiyun * what to do.
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * All the routines below use bits of fixup code that are out of line
67*4882a593Smuzhiyun * with the main instruction path. This means when everything is well,
68*4882a593Smuzhiyun * we don't even have to jump over them. Further, they do not intrude
69*4882a593Smuzhiyun * on our cache or tlb entries.
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define __LSW 0
73*4882a593Smuzhiyun #define __MSW 1
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * The "__xxx" versions of the user access functions do not verify the address
77*4882a593Smuzhiyun * space - it must have been done previously with a separate "access_ok()"
78*4882a593Smuzhiyun * call.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define __get_user_asm(insn, x, ptr, err) \
82*4882a593Smuzhiyun do { \
83*4882a593Smuzhiyun uintptr_t __tmp; \
84*4882a593Smuzhiyun __typeof__(x) __x; \
85*4882a593Smuzhiyun __asm__ __volatile__ ( \
86*4882a593Smuzhiyun "1:\n" \
87*4882a593Smuzhiyun " " insn " %1, %3\n" \
88*4882a593Smuzhiyun "2:\n" \
89*4882a593Smuzhiyun " .section .fixup,\"ax\"\n" \
90*4882a593Smuzhiyun " .balign 4\n" \
91*4882a593Smuzhiyun "3:\n" \
92*4882a593Smuzhiyun " li %0, %4\n" \
93*4882a593Smuzhiyun " li %1, 0\n" \
94*4882a593Smuzhiyun " jump 2b, %2\n" \
95*4882a593Smuzhiyun " .previous\n" \
96*4882a593Smuzhiyun " .section __ex_table,\"a\"\n" \
97*4882a593Smuzhiyun " .balign " RISCV_SZPTR "\n" \
98*4882a593Smuzhiyun " " RISCV_PTR " 1b, 3b\n" \
99*4882a593Smuzhiyun " .previous" \
100*4882a593Smuzhiyun : "+r" (err), "=&r" (__x), "=r" (__tmp) \
101*4882a593Smuzhiyun : "m" (*(ptr)), "i" (-EFAULT)); \
102*4882a593Smuzhiyun (x) = __x; \
103*4882a593Smuzhiyun } while (0)
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #ifdef CONFIG_64BIT
106*4882a593Smuzhiyun #define __get_user_8(x, ptr, err) \
107*4882a593Smuzhiyun __get_user_asm("ld", x, ptr, err)
108*4882a593Smuzhiyun #else /* !CONFIG_64BIT */
109*4882a593Smuzhiyun #define __get_user_8(x, ptr, err) \
110*4882a593Smuzhiyun do { \
111*4882a593Smuzhiyun u32 __user *__ptr = (u32 __user *)(ptr); \
112*4882a593Smuzhiyun u32 __lo, __hi; \
113*4882a593Smuzhiyun uintptr_t __tmp; \
114*4882a593Smuzhiyun __asm__ __volatile__ ( \
115*4882a593Smuzhiyun "1:\n" \
116*4882a593Smuzhiyun " lw %1, %4\n" \
117*4882a593Smuzhiyun "2:\n" \
118*4882a593Smuzhiyun " lw %2, %5\n" \
119*4882a593Smuzhiyun "3:\n" \
120*4882a593Smuzhiyun " .section .fixup,\"ax\"\n" \
121*4882a593Smuzhiyun " .balign 4\n" \
122*4882a593Smuzhiyun "4:\n" \
123*4882a593Smuzhiyun " li %0, %6\n" \
124*4882a593Smuzhiyun " li %1, 0\n" \
125*4882a593Smuzhiyun " li %2, 0\n" \
126*4882a593Smuzhiyun " jump 3b, %3\n" \
127*4882a593Smuzhiyun " .previous\n" \
128*4882a593Smuzhiyun " .section __ex_table,\"a\"\n" \
129*4882a593Smuzhiyun " .balign " RISCV_SZPTR "\n" \
130*4882a593Smuzhiyun " " RISCV_PTR " 1b, 4b\n" \
131*4882a593Smuzhiyun " " RISCV_PTR " 2b, 4b\n" \
132*4882a593Smuzhiyun " .previous" \
133*4882a593Smuzhiyun : "+r" (err), "=&r" (__lo), "=r" (__hi), \
134*4882a593Smuzhiyun "=r" (__tmp) \
135*4882a593Smuzhiyun : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
136*4882a593Smuzhiyun "i" (-EFAULT)); \
137*4882a593Smuzhiyun (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
138*4882a593Smuzhiyun (((u64)__hi << 32) | __lo))); \
139*4882a593Smuzhiyun } while (0)
140*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun #define __get_user_nocheck(x, __gu_ptr, __gu_err) \
143*4882a593Smuzhiyun do { \
144*4882a593Smuzhiyun switch (sizeof(*__gu_ptr)) { \
145*4882a593Smuzhiyun case 1: \
146*4882a593Smuzhiyun __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
147*4882a593Smuzhiyun break; \
148*4882a593Smuzhiyun case 2: \
149*4882a593Smuzhiyun __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
150*4882a593Smuzhiyun break; \
151*4882a593Smuzhiyun case 4: \
152*4882a593Smuzhiyun __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
153*4882a593Smuzhiyun break; \
154*4882a593Smuzhiyun case 8: \
155*4882a593Smuzhiyun __get_user_8((x), __gu_ptr, __gu_err); \
156*4882a593Smuzhiyun break; \
157*4882a593Smuzhiyun default: \
158*4882a593Smuzhiyun BUILD_BUG(); \
159*4882a593Smuzhiyun } \
160*4882a593Smuzhiyun } while (0)
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /**
163*4882a593Smuzhiyun * __get_user: - Get a simple variable from user space, with less checking.
164*4882a593Smuzhiyun * @x: Variable to store result.
165*4882a593Smuzhiyun * @ptr: Source address, in user space.
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * Context: User context only. This function may sleep.
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * This macro copies a single simple variable from user space to kernel
170*4882a593Smuzhiyun * space. It supports simple types like char and int, but not larger
171*4882a593Smuzhiyun * data types like structures or arrays.
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun * @ptr must have pointer-to-simple-variable type, and the result of
174*4882a593Smuzhiyun * dereferencing @ptr must be assignable to @x without a cast.
175*4882a593Smuzhiyun *
176*4882a593Smuzhiyun * Caller must check the pointer with access_ok() before calling this
177*4882a593Smuzhiyun * function.
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * Returns zero on success, or -EFAULT on error.
180*4882a593Smuzhiyun * On error, the variable @x is set to zero.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun #define __get_user(x, ptr) \
183*4882a593Smuzhiyun ({ \
184*4882a593Smuzhiyun const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
185*4882a593Smuzhiyun long __gu_err = 0; \
186*4882a593Smuzhiyun \
187*4882a593Smuzhiyun __chk_user_ptr(__gu_ptr); \
188*4882a593Smuzhiyun \
189*4882a593Smuzhiyun __enable_user_access(); \
190*4882a593Smuzhiyun __get_user_nocheck(x, __gu_ptr, __gu_err); \
191*4882a593Smuzhiyun __disable_user_access(); \
192*4882a593Smuzhiyun \
193*4882a593Smuzhiyun __gu_err; \
194*4882a593Smuzhiyun })
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun * get_user: - Get a simple variable from user space.
198*4882a593Smuzhiyun * @x: Variable to store result.
199*4882a593Smuzhiyun * @ptr: Source address, in user space.
200*4882a593Smuzhiyun *
201*4882a593Smuzhiyun * Context: User context only. This function may sleep.
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * This macro copies a single simple variable from user space to kernel
204*4882a593Smuzhiyun * space. It supports simple types like char and int, but not larger
205*4882a593Smuzhiyun * data types like structures or arrays.
206*4882a593Smuzhiyun *
207*4882a593Smuzhiyun * @ptr must have pointer-to-simple-variable type, and the result of
208*4882a593Smuzhiyun * dereferencing @ptr must be assignable to @x without a cast.
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * Returns zero on success, or -EFAULT on error.
211*4882a593Smuzhiyun * On error, the variable @x is set to zero.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun #define get_user(x, ptr) \
214*4882a593Smuzhiyun ({ \
215*4882a593Smuzhiyun const __typeof__(*(ptr)) __user *__p = (ptr); \
216*4882a593Smuzhiyun might_fault(); \
217*4882a593Smuzhiyun access_ok(__p, sizeof(*__p)) ? \
218*4882a593Smuzhiyun __get_user((x), __p) : \
219*4882a593Smuzhiyun ((x) = 0, -EFAULT); \
220*4882a593Smuzhiyun })
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun #define __put_user_asm(insn, x, ptr, err) \
223*4882a593Smuzhiyun do { \
224*4882a593Smuzhiyun uintptr_t __tmp; \
225*4882a593Smuzhiyun __typeof__(*(ptr)) __x = x; \
226*4882a593Smuzhiyun __asm__ __volatile__ ( \
227*4882a593Smuzhiyun "1:\n" \
228*4882a593Smuzhiyun " " insn " %z3, %2\n" \
229*4882a593Smuzhiyun "2:\n" \
230*4882a593Smuzhiyun " .section .fixup,\"ax\"\n" \
231*4882a593Smuzhiyun " .balign 4\n" \
232*4882a593Smuzhiyun "3:\n" \
233*4882a593Smuzhiyun " li %0, %4\n" \
234*4882a593Smuzhiyun " jump 2b, %1\n" \
235*4882a593Smuzhiyun " .previous\n" \
236*4882a593Smuzhiyun " .section __ex_table,\"a\"\n" \
237*4882a593Smuzhiyun " .balign " RISCV_SZPTR "\n" \
238*4882a593Smuzhiyun " " RISCV_PTR " 1b, 3b\n" \
239*4882a593Smuzhiyun " .previous" \
240*4882a593Smuzhiyun : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
241*4882a593Smuzhiyun : "rJ" (__x), "i" (-EFAULT)); \
242*4882a593Smuzhiyun } while (0)
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun #ifdef CONFIG_64BIT
245*4882a593Smuzhiyun #define __put_user_8(x, ptr, err) \
246*4882a593Smuzhiyun __put_user_asm("sd", x, ptr, err)
247*4882a593Smuzhiyun #else /* !CONFIG_64BIT */
248*4882a593Smuzhiyun #define __put_user_8(x, ptr, err) \
249*4882a593Smuzhiyun do { \
250*4882a593Smuzhiyun u32 __user *__ptr = (u32 __user *)(ptr); \
251*4882a593Smuzhiyun u64 __x = (__typeof__((x)-(x)))(x); \
252*4882a593Smuzhiyun uintptr_t __tmp; \
253*4882a593Smuzhiyun __asm__ __volatile__ ( \
254*4882a593Smuzhiyun "1:\n" \
255*4882a593Smuzhiyun " sw %z4, %2\n" \
256*4882a593Smuzhiyun "2:\n" \
257*4882a593Smuzhiyun " sw %z5, %3\n" \
258*4882a593Smuzhiyun "3:\n" \
259*4882a593Smuzhiyun " .section .fixup,\"ax\"\n" \
260*4882a593Smuzhiyun " .balign 4\n" \
261*4882a593Smuzhiyun "4:\n" \
262*4882a593Smuzhiyun " li %0, %6\n" \
263*4882a593Smuzhiyun " jump 3b, %1\n" \
264*4882a593Smuzhiyun " .previous\n" \
265*4882a593Smuzhiyun " .section __ex_table,\"a\"\n" \
266*4882a593Smuzhiyun " .balign " RISCV_SZPTR "\n" \
267*4882a593Smuzhiyun " " RISCV_PTR " 1b, 4b\n" \
268*4882a593Smuzhiyun " " RISCV_PTR " 2b, 4b\n" \
269*4882a593Smuzhiyun " .previous" \
270*4882a593Smuzhiyun : "+r" (err), "=r" (__tmp), \
271*4882a593Smuzhiyun "=m" (__ptr[__LSW]), \
272*4882a593Smuzhiyun "=m" (__ptr[__MSW]) \
273*4882a593Smuzhiyun : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
274*4882a593Smuzhiyun } while (0)
275*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun #define __put_user_nocheck(x, __gu_ptr, __pu_err) \
278*4882a593Smuzhiyun do { \
279*4882a593Smuzhiyun switch (sizeof(*__gu_ptr)) { \
280*4882a593Smuzhiyun case 1: \
281*4882a593Smuzhiyun __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
282*4882a593Smuzhiyun break; \
283*4882a593Smuzhiyun case 2: \
284*4882a593Smuzhiyun __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
285*4882a593Smuzhiyun break; \
286*4882a593Smuzhiyun case 4: \
287*4882a593Smuzhiyun __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
288*4882a593Smuzhiyun break; \
289*4882a593Smuzhiyun case 8: \
290*4882a593Smuzhiyun __put_user_8((x), __gu_ptr, __pu_err); \
291*4882a593Smuzhiyun break; \
292*4882a593Smuzhiyun default: \
293*4882a593Smuzhiyun BUILD_BUG(); \
294*4882a593Smuzhiyun } \
295*4882a593Smuzhiyun } while (0)
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun * __put_user: - Write a simple value into user space, with less checking.
299*4882a593Smuzhiyun * @x: Value to copy to user space.
300*4882a593Smuzhiyun * @ptr: Destination address, in user space.
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * Context: User context only. This function may sleep.
303*4882a593Smuzhiyun *
304*4882a593Smuzhiyun * This macro copies a single simple value from kernel space to user
305*4882a593Smuzhiyun * space. It supports simple types like char and int, but not larger
306*4882a593Smuzhiyun * data types like structures or arrays.
307*4882a593Smuzhiyun *
308*4882a593Smuzhiyun * @ptr must have pointer-to-simple-variable type, and @x must be assignable
309*4882a593Smuzhiyun * to the result of dereferencing @ptr. The value of @x is copied to avoid
310*4882a593Smuzhiyun * re-ordering where @x is evaluated inside the block that enables user-space
311*4882a593Smuzhiyun * access (thus bypassing user space protection if @x is a function).
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * Caller must check the pointer with access_ok() before calling this
314*4882a593Smuzhiyun * function.
315*4882a593Smuzhiyun *
316*4882a593Smuzhiyun * Returns zero on success, or -EFAULT on error.
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun #define __put_user(x, ptr) \
319*4882a593Smuzhiyun ({ \
320*4882a593Smuzhiyun __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
321*4882a593Smuzhiyun __typeof__(*__gu_ptr) __val = (x); \
322*4882a593Smuzhiyun long __pu_err = 0; \
323*4882a593Smuzhiyun \
324*4882a593Smuzhiyun __chk_user_ptr(__gu_ptr); \
325*4882a593Smuzhiyun \
326*4882a593Smuzhiyun __enable_user_access(); \
327*4882a593Smuzhiyun __put_user_nocheck(__val, __gu_ptr, __pu_err); \
328*4882a593Smuzhiyun __disable_user_access(); \
329*4882a593Smuzhiyun \
330*4882a593Smuzhiyun __pu_err; \
331*4882a593Smuzhiyun })
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /**
334*4882a593Smuzhiyun * put_user: - Write a simple value into user space.
335*4882a593Smuzhiyun * @x: Value to copy to user space.
336*4882a593Smuzhiyun * @ptr: Destination address, in user space.
337*4882a593Smuzhiyun *
338*4882a593Smuzhiyun * Context: User context only. This function may sleep.
339*4882a593Smuzhiyun *
340*4882a593Smuzhiyun * This macro copies a single simple value from kernel space to user
341*4882a593Smuzhiyun * space. It supports simple types like char and int, but not larger
342*4882a593Smuzhiyun * data types like structures or arrays.
343*4882a593Smuzhiyun *
344*4882a593Smuzhiyun * @ptr must have pointer-to-simple-variable type, and @x must be assignable
345*4882a593Smuzhiyun * to the result of dereferencing @ptr.
346*4882a593Smuzhiyun *
347*4882a593Smuzhiyun * Returns zero on success, or -EFAULT on error.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun #define put_user(x, ptr) \
350*4882a593Smuzhiyun ({ \
351*4882a593Smuzhiyun __typeof__(*(ptr)) __user *__p = (ptr); \
352*4882a593Smuzhiyun might_fault(); \
353*4882a593Smuzhiyun access_ok(__p, sizeof(*__p)) ? \
354*4882a593Smuzhiyun __put_user((x), __p) : \
355*4882a593Smuzhiyun -EFAULT; \
356*4882a593Smuzhiyun })
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun unsigned long __must_check __asm_copy_to_user(void __user *to,
360*4882a593Smuzhiyun const void *from, unsigned long n);
361*4882a593Smuzhiyun unsigned long __must_check __asm_copy_from_user(void *to,
362*4882a593Smuzhiyun const void __user *from, unsigned long n);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)365*4882a593Smuzhiyun raw_copy_from_user(void *to, const void __user *from, unsigned long n)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun return __asm_copy_from_user(to, from, n);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)371*4882a593Smuzhiyun raw_copy_to_user(void __user *to, const void *from, unsigned long n)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun return __asm_copy_to_user(to, from, n);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun extern long strncpy_from_user(char *dest, const char __user *src, long count);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun extern long __must_check strlen_user(const char __user *str);
379*4882a593Smuzhiyun extern long __must_check strnlen_user(const char __user *str, long n);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun extern
382*4882a593Smuzhiyun unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun static inline
clear_user(void __user * to,unsigned long n)385*4882a593Smuzhiyun unsigned long __must_check clear_user(void __user *to, unsigned long n)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun might_fault();
388*4882a593Smuzhiyun return access_ok(to, n) ?
389*4882a593Smuzhiyun __clear_user(to, n) : n;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
394*4882a593Smuzhiyun * will set "err" to -EFAULT, while successful accesses return the previous
395*4882a593Smuzhiyun * value.
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun #define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
398*4882a593Smuzhiyun ({ \
399*4882a593Smuzhiyun __typeof__(ptr) __ptr = (ptr); \
400*4882a593Smuzhiyun __typeof__(*(ptr)) __old = (old); \
401*4882a593Smuzhiyun __typeof__(*(ptr)) __new = (new); \
402*4882a593Smuzhiyun __typeof__(*(ptr)) __ret; \
403*4882a593Smuzhiyun __typeof__(err) __err = 0; \
404*4882a593Smuzhiyun register unsigned int __rc; \
405*4882a593Smuzhiyun __enable_user_access(); \
406*4882a593Smuzhiyun switch (size) { \
407*4882a593Smuzhiyun case 4: \
408*4882a593Smuzhiyun __asm__ __volatile__ ( \
409*4882a593Smuzhiyun "0:\n" \
410*4882a593Smuzhiyun " lr.w" #scb " %[ret], %[ptr]\n" \
411*4882a593Smuzhiyun " bne %[ret], %z[old], 1f\n" \
412*4882a593Smuzhiyun " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
413*4882a593Smuzhiyun " bnez %[rc], 0b\n" \
414*4882a593Smuzhiyun "1:\n" \
415*4882a593Smuzhiyun ".section .fixup,\"ax\"\n" \
416*4882a593Smuzhiyun ".balign 4\n" \
417*4882a593Smuzhiyun "2:\n" \
418*4882a593Smuzhiyun " li %[err], %[efault]\n" \
419*4882a593Smuzhiyun " jump 1b, %[rc]\n" \
420*4882a593Smuzhiyun ".previous\n" \
421*4882a593Smuzhiyun ".section __ex_table,\"a\"\n" \
422*4882a593Smuzhiyun ".balign " RISCV_SZPTR "\n" \
423*4882a593Smuzhiyun " " RISCV_PTR " 1b, 2b\n" \
424*4882a593Smuzhiyun ".previous\n" \
425*4882a593Smuzhiyun : [ret] "=&r" (__ret), \
426*4882a593Smuzhiyun [rc] "=&r" (__rc), \
427*4882a593Smuzhiyun [ptr] "+A" (*__ptr), \
428*4882a593Smuzhiyun [err] "=&r" (__err) \
429*4882a593Smuzhiyun : [old] "rJ" (__old), \
430*4882a593Smuzhiyun [new] "rJ" (__new), \
431*4882a593Smuzhiyun [efault] "i" (-EFAULT)); \
432*4882a593Smuzhiyun break; \
433*4882a593Smuzhiyun case 8: \
434*4882a593Smuzhiyun __asm__ __volatile__ ( \
435*4882a593Smuzhiyun "0:\n" \
436*4882a593Smuzhiyun " lr.d" #scb " %[ret], %[ptr]\n" \
437*4882a593Smuzhiyun " bne %[ret], %z[old], 1f\n" \
438*4882a593Smuzhiyun " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
439*4882a593Smuzhiyun " bnez %[rc], 0b\n" \
440*4882a593Smuzhiyun "1:\n" \
441*4882a593Smuzhiyun ".section .fixup,\"ax\"\n" \
442*4882a593Smuzhiyun ".balign 4\n" \
443*4882a593Smuzhiyun "2:\n" \
444*4882a593Smuzhiyun " li %[err], %[efault]\n" \
445*4882a593Smuzhiyun " jump 1b, %[rc]\n" \
446*4882a593Smuzhiyun ".previous\n" \
447*4882a593Smuzhiyun ".section __ex_table,\"a\"\n" \
448*4882a593Smuzhiyun ".balign " RISCV_SZPTR "\n" \
449*4882a593Smuzhiyun " " RISCV_PTR " 1b, 2b\n" \
450*4882a593Smuzhiyun ".previous\n" \
451*4882a593Smuzhiyun : [ret] "=&r" (__ret), \
452*4882a593Smuzhiyun [rc] "=&r" (__rc), \
453*4882a593Smuzhiyun [ptr] "+A" (*__ptr), \
454*4882a593Smuzhiyun [err] "=&r" (__err) \
455*4882a593Smuzhiyun : [old] "rJ" (__old), \
456*4882a593Smuzhiyun [new] "rJ" (__new), \
457*4882a593Smuzhiyun [efault] "i" (-EFAULT)); \
458*4882a593Smuzhiyun break; \
459*4882a593Smuzhiyun default: \
460*4882a593Smuzhiyun BUILD_BUG(); \
461*4882a593Smuzhiyun } \
462*4882a593Smuzhiyun __disable_user_access(); \
463*4882a593Smuzhiyun (err) = __err; \
464*4882a593Smuzhiyun __ret; \
465*4882a593Smuzhiyun })
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun #define HAVE_GET_KERNEL_NOFAULT
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun #define __get_kernel_nofault(dst, src, type, err_label) \
470*4882a593Smuzhiyun do { \
471*4882a593Smuzhiyun long __kr_err; \
472*4882a593Smuzhiyun \
473*4882a593Smuzhiyun __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
474*4882a593Smuzhiyun if (unlikely(__kr_err)) \
475*4882a593Smuzhiyun goto err_label; \
476*4882a593Smuzhiyun } while (0)
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun #define __put_kernel_nofault(dst, src, type, err_label) \
479*4882a593Smuzhiyun do { \
480*4882a593Smuzhiyun long __kr_err; \
481*4882a593Smuzhiyun \
482*4882a593Smuzhiyun __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
483*4882a593Smuzhiyun if (unlikely(__kr_err)) \
484*4882a593Smuzhiyun goto err_label; \
485*4882a593Smuzhiyun } while (0)
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun #else /* CONFIG_MMU */
488*4882a593Smuzhiyun #include <asm-generic/uaccess.h>
489*4882a593Smuzhiyun #endif /* CONFIG_MMU */
490*4882a593Smuzhiyun #endif /* _ASM_RISCV_UACCESS_H */
491