1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * User memory access support for Hexagon
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef _ASM_UACCESS_H
9*4882a593Smuzhiyun #define _ASM_UACCESS_H
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun * User space memory access functions
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #include <asm/sections.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * access_ok: - Checks if a user space pointer is valid
17*4882a593Smuzhiyun * @addr: User space pointer to start of block to check
18*4882a593Smuzhiyun * @size: Size of block to check
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Context: User context only. This function may sleep if pagefaults are
21*4882a593Smuzhiyun * enabled.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Checks if a pointer to a block of memory in user space is valid.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * Returns true (nonzero) if the memory block *may* be valid, false (zero)
26*4882a593Smuzhiyun * if it is definitely invalid.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * User address space in Hexagon, like x86, goes to 0xbfffffff, so the
29*4882a593Smuzhiyun * simple MSB-based tests used by MIPS won't work. Some further
30*4882a593Smuzhiyun * optimization is probably possible here, but for now, keep it
31*4882a593Smuzhiyun * reasonably simple and not *too* slow. After all, we've got the
32*4882a593Smuzhiyun * MMU for backup.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define __access_ok(addr, size) \
36*4882a593Smuzhiyun ((get_fs().seg == KERNEL_DS.seg) || \
37*4882a593Smuzhiyun (((unsigned long)addr < get_fs().seg) && \
38*4882a593Smuzhiyun (unsigned long)size < (get_fs().seg - (unsigned long)addr)))
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * When a kernel-mode page fault is taken, the faulting instruction
42*4882a593Smuzhiyun * address is checked against a table of exception_table_entries.
43*4882a593Smuzhiyun * Each entry is a tuple of the address of an instruction that may
44*4882a593Smuzhiyun * be authorized to fault, and the address at which execution should
45*4882a593Smuzhiyun * be resumed instead of the faulting instruction, so as to effect
46*4882a593Smuzhiyun * a workaround.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Assembly somewhat optimized copy routines */
50*4882a593Smuzhiyun unsigned long raw_copy_from_user(void *to, const void __user *from,
51*4882a593Smuzhiyun unsigned long n);
52*4882a593Smuzhiyun unsigned long raw_copy_to_user(void __user *to, const void *from,
53*4882a593Smuzhiyun unsigned long n);
54*4882a593Smuzhiyun #define INLINE_COPY_FROM_USER
55*4882a593Smuzhiyun #define INLINE_COPY_TO_USER
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
58*4882a593Smuzhiyun #define __clear_user(a, s) __clear_user_hexagon((a), (s))
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* get around the ifndef in asm-generic/uaccess.h */
63*4882a593Smuzhiyun #define __strnlen_user __strnlen_user
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun extern long __strnlen_user(const char __user *src, long n);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
68*4882a593Smuzhiyun long n);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #include <asm-generic/uaccess.h>
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Todo: an actual accelerated version of this. */
hexagon_strncpy_from_user(char * dst,const char __user * src,long n)73*4882a593Smuzhiyun static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
74*4882a593Smuzhiyun long n)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun long res = __strnlen_user(src, n);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (unlikely(!res))
79*4882a593Smuzhiyun return -EFAULT;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (res > n) {
82*4882a593Smuzhiyun long left = raw_copy_from_user(dst, src, n);
83*4882a593Smuzhiyun if (unlikely(left))
84*4882a593Smuzhiyun memset(dst + (n - left), 0, left);
85*4882a593Smuzhiyun return n;
86*4882a593Smuzhiyun } else {
87*4882a593Smuzhiyun long left = raw_copy_from_user(dst, src, res);
88*4882a593Smuzhiyun if (unlikely(left))
89*4882a593Smuzhiyun memset(dst + (res - left), 0, left);
90*4882a593Smuzhiyun return res-1;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #endif
95