xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/uaccess_64.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_UACCESS_64_H
3*4882a593Smuzhiyun #define _ASM_X86_UACCESS_64_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * User space memory access functions
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/compiler.h>
9*4882a593Smuzhiyun #include <linux/lockdep.h>
10*4882a593Smuzhiyun #include <linux/kasan-checks.h>
11*4882a593Smuzhiyun #include <asm/alternative.h>
12*4882a593Smuzhiyun #include <asm/cpufeatures.h>
13*4882a593Smuzhiyun #include <asm/page.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun  * Copy To/From Userspace
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* Handles exceptions in both to and from, but doesn't do access_ok */
20*4882a593Smuzhiyun __must_check unsigned long
21*4882a593Smuzhiyun copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22*4882a593Smuzhiyun __must_check unsigned long
23*4882a593Smuzhiyun copy_user_generic_string(void *to, const void *from, unsigned len);
24*4882a593Smuzhiyun __must_check unsigned long
25*4882a593Smuzhiyun copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun static __always_inline __must_check unsigned long
copy_user_generic(void * to,const void * from,unsigned len)28*4882a593Smuzhiyun copy_user_generic(void *to, const void *from, unsigned len)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	unsigned ret;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	/*
33*4882a593Smuzhiyun 	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34*4882a593Smuzhiyun 	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35*4882a593Smuzhiyun 	 * Otherwise, use copy_user_generic_unrolled.
36*4882a593Smuzhiyun 	 */
37*4882a593Smuzhiyun 	alternative_call_2(copy_user_generic_unrolled,
38*4882a593Smuzhiyun 			 copy_user_generic_string,
39*4882a593Smuzhiyun 			 X86_FEATURE_REP_GOOD,
40*4882a593Smuzhiyun 			 copy_user_enhanced_fast_string,
41*4882a593Smuzhiyun 			 X86_FEATURE_ERMS,
42*4882a593Smuzhiyun 			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43*4882a593Smuzhiyun 				     "=d" (len)),
44*4882a593Smuzhiyun 			 "1" (to), "2" (from), "3" (len)
45*4882a593Smuzhiyun 			 : "memory", "rcx", "r8", "r9", "r10", "r11");
46*4882a593Smuzhiyun 	return ret;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun static __always_inline __must_check unsigned long
raw_copy_from_user(void * dst,const void __user * src,unsigned long size)50*4882a593Smuzhiyun raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	return copy_user_generic(dst, (__force void *)src, size);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static __always_inline __must_check unsigned long
raw_copy_to_user(void __user * dst,const void * src,unsigned long size)56*4882a593Smuzhiyun raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	return copy_user_generic((__force void *)dst, src, size);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun static __always_inline __must_check
raw_copy_in_user(void __user * dst,const void __user * src,unsigned long size)62*4882a593Smuzhiyun unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	return copy_user_generic((__force void *)dst,
65*4882a593Smuzhiyun 				 (__force void *)src, size);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun extern long __copy_user_nocache(void *dst, const void __user *src,
69*4882a593Smuzhiyun 				unsigned size, int zerorest);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
72*4882a593Smuzhiyun extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
73*4882a593Smuzhiyun 			   size_t len);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun static inline int
__copy_from_user_inatomic_nocache(void * dst,const void __user * src,unsigned size)76*4882a593Smuzhiyun __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
77*4882a593Smuzhiyun 				  unsigned size)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	kasan_check_write(dst, size);
80*4882a593Smuzhiyun 	return __copy_user_nocache(dst, src, size, 0);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun static inline int
__copy_from_user_flushcache(void * dst,const void __user * src,unsigned size)84*4882a593Smuzhiyun __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	kasan_check_write(dst, size);
87*4882a593Smuzhiyun 	return __copy_user_flushcache(dst, src, size);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun #endif /* _ASM_X86_UACCESS_64_H */
90