1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2011 Texas Instruments Incorporated
4*4882a593Smuzhiyun * Author: Mark Salter <msalter@redhat.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #ifndef _ASM_C6X_UACCESS_H
7*4882a593Smuzhiyun #define _ASM_C6X_UACCESS_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/compiler.h>
11*4882a593Smuzhiyun #include <linux/string.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * C6X supports unaligned 32 and 64 bit loads and stores.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun static inline __must_check unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)17*4882a593Smuzhiyun raw_copy_from_user(void *to, const void __user *from, unsigned long n)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun u32 tmp32;
20*4882a593Smuzhiyun u64 tmp64;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun if (__builtin_constant_p(n)) {
23*4882a593Smuzhiyun switch (n) {
24*4882a593Smuzhiyun case 1:
25*4882a593Smuzhiyun *(u8 *)to = *(u8 __force *)from;
26*4882a593Smuzhiyun return 0;
27*4882a593Smuzhiyun case 4:
28*4882a593Smuzhiyun asm volatile ("ldnw .d1t1 *%2,%0\n"
29*4882a593Smuzhiyun "nop 4\n"
30*4882a593Smuzhiyun "stnw .d1t1 %0,*%1\n"
31*4882a593Smuzhiyun : "=&a"(tmp32)
32*4882a593Smuzhiyun : "A"(to), "a"(from)
33*4882a593Smuzhiyun : "memory");
34*4882a593Smuzhiyun return 0;
35*4882a593Smuzhiyun case 8:
36*4882a593Smuzhiyun asm volatile ("ldndw .d1t1 *%2,%0\n"
37*4882a593Smuzhiyun "nop 4\n"
38*4882a593Smuzhiyun "stndw .d1t1 %0,*%1\n"
39*4882a593Smuzhiyun : "=&a"(tmp64)
40*4882a593Smuzhiyun : "a"(to), "a"(from)
41*4882a593Smuzhiyun : "memory");
42*4882a593Smuzhiyun return 0;
43*4882a593Smuzhiyun default:
44*4882a593Smuzhiyun break;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun memcpy(to, (const void __force *)from, n);
49*4882a593Smuzhiyun return 0;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun static inline __must_check unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)53*4882a593Smuzhiyun raw_copy_to_user(void __user *to, const void *from, unsigned long n)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun u32 tmp32;
56*4882a593Smuzhiyun u64 tmp64;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (__builtin_constant_p(n)) {
59*4882a593Smuzhiyun switch (n) {
60*4882a593Smuzhiyun case 1:
61*4882a593Smuzhiyun *(u8 __force *)to = *(u8 *)from;
62*4882a593Smuzhiyun return 0;
63*4882a593Smuzhiyun case 4:
64*4882a593Smuzhiyun asm volatile ("ldnw .d1t1 *%2,%0\n"
65*4882a593Smuzhiyun "nop 4\n"
66*4882a593Smuzhiyun "stnw .d1t1 %0,*%1\n"
67*4882a593Smuzhiyun : "=&a"(tmp32)
68*4882a593Smuzhiyun : "a"(to), "a"(from)
69*4882a593Smuzhiyun : "memory");
70*4882a593Smuzhiyun return 0;
71*4882a593Smuzhiyun case 8:
72*4882a593Smuzhiyun asm volatile ("ldndw .d1t1 *%2,%0\n"
73*4882a593Smuzhiyun "nop 4\n"
74*4882a593Smuzhiyun "stndw .d1t1 %0,*%1\n"
75*4882a593Smuzhiyun : "=&a"(tmp64)
76*4882a593Smuzhiyun : "a"(to), "a"(from)
77*4882a593Smuzhiyun : "memory");
78*4882a593Smuzhiyun return 0;
79*4882a593Smuzhiyun default:
80*4882a593Smuzhiyun break;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun memcpy((void __force *)to, from, n);
85*4882a593Smuzhiyun return 0;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun #define INLINE_COPY_FROM_USER
88*4882a593Smuzhiyun #define INLINE_COPY_TO_USER
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun extern int _access_ok(unsigned long addr, unsigned long size);
91*4882a593Smuzhiyun #ifdef CONFIG_ACCESS_CHECK
92*4882a593Smuzhiyun #define __access_ok _access_ok
93*4882a593Smuzhiyun #endif
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #include <asm-generic/uaccess.h>
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #endif /* _ASM_C6X_UACCESS_H */
98