xref: /OK3568_Linux_fs/kernel/arch/x86/lib/usercopy_64.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * User address space access functions.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 1997 Andi Kleen <ak@muc.de>
6*4882a593Smuzhiyun  * Copyright 1997 Linus Torvalds
7*4882a593Smuzhiyun  * Copyright 2002 Andi Kleen <ak@suse.de>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #include <linux/export.h>
10*4882a593Smuzhiyun #include <linux/uaccess.h>
11*4882a593Smuzhiyun #include <linux/highmem.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun  * Zero Userspace
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
__clear_user(void __user * addr,unsigned long size)17*4882a593Smuzhiyun unsigned long __clear_user(void __user *addr, unsigned long size)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	long __d0;
20*4882a593Smuzhiyun 	might_fault();
21*4882a593Smuzhiyun 	/* no memory constraint because it doesn't change any memory gcc knows
22*4882a593Smuzhiyun 	   about */
23*4882a593Smuzhiyun 	stac();
24*4882a593Smuzhiyun 	asm volatile(
25*4882a593Smuzhiyun 		"	testq  %[size8],%[size8]\n"
26*4882a593Smuzhiyun 		"	jz     4f\n"
27*4882a593Smuzhiyun 		"	.align 16\n"
28*4882a593Smuzhiyun 		"0:	movq $0,(%[dst])\n"
29*4882a593Smuzhiyun 		"	addq   $8,%[dst]\n"
30*4882a593Smuzhiyun 		"	decl %%ecx ; jnz   0b\n"
31*4882a593Smuzhiyun 		"4:	movq  %[size1],%%rcx\n"
32*4882a593Smuzhiyun 		"	testl %%ecx,%%ecx\n"
33*4882a593Smuzhiyun 		"	jz     2f\n"
34*4882a593Smuzhiyun 		"1:	movb   $0,(%[dst])\n"
35*4882a593Smuzhiyun 		"	incq   %[dst]\n"
36*4882a593Smuzhiyun 		"	decl %%ecx ; jnz  1b\n"
37*4882a593Smuzhiyun 		"2:\n"
38*4882a593Smuzhiyun 		".section .fixup,\"ax\"\n"
39*4882a593Smuzhiyun 		"3:	lea 0(%[size1],%[size8],8),%[size8]\n"
40*4882a593Smuzhiyun 		"	jmp 2b\n"
41*4882a593Smuzhiyun 		".previous\n"
42*4882a593Smuzhiyun 		_ASM_EXTABLE_UA(0b, 3b)
43*4882a593Smuzhiyun 		_ASM_EXTABLE_UA(1b, 2b)
44*4882a593Smuzhiyun 		: [size8] "=&c"(size), [dst] "=&D" (__d0)
45*4882a593Smuzhiyun 		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
46*4882a593Smuzhiyun 	clac();
47*4882a593Smuzhiyun 	return size;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun EXPORT_SYMBOL(__clear_user);
50*4882a593Smuzhiyun 
clear_user(void __user * to,unsigned long n)51*4882a593Smuzhiyun unsigned long clear_user(void __user *to, unsigned long n)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	if (access_ok(to, n))
54*4882a593Smuzhiyun 		return __clear_user(to, n);
55*4882a593Smuzhiyun 	return n;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun EXPORT_SYMBOL(clear_user);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun  * clean_cache_range - write back a cache range with CLWB
62*4882a593Smuzhiyun  * @vaddr:	virtual start address
63*4882a593Smuzhiyun  * @size:	number of bytes to write back
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * Write back a cache range using the CLWB (cache line write back)
66*4882a593Smuzhiyun  * instruction. Note that @size is internally rounded up to be cache
67*4882a593Smuzhiyun  * line size aligned.
68*4882a593Smuzhiyun  */
clean_cache_range(void * addr,size_t size)69*4882a593Smuzhiyun static void clean_cache_range(void *addr, size_t size)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
72*4882a593Smuzhiyun 	unsigned long clflush_mask = x86_clflush_size - 1;
73*4882a593Smuzhiyun 	void *vend = addr + size;
74*4882a593Smuzhiyun 	void *p;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	for (p = (void *)((unsigned long)addr & ~clflush_mask);
77*4882a593Smuzhiyun 	     p < vend; p += x86_clflush_size)
78*4882a593Smuzhiyun 		clwb(p);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
arch_wb_cache_pmem(void * addr,size_t size)81*4882a593Smuzhiyun void arch_wb_cache_pmem(void *addr, size_t size)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	clean_cache_range(addr, size);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
86*4882a593Smuzhiyun 
__copy_user_flushcache(void * dst,const void __user * src,unsigned size)87*4882a593Smuzhiyun long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	unsigned long flushed, dest = (unsigned long) dst;
90*4882a593Smuzhiyun 	long rc = __copy_user_nocache(dst, src, size, 0);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/*
93*4882a593Smuzhiyun 	 * __copy_user_nocache() uses non-temporal stores for the bulk
94*4882a593Smuzhiyun 	 * of the transfer, but we need to manually flush if the
95*4882a593Smuzhiyun 	 * transfer is unaligned. A cached memory copy is used when
96*4882a593Smuzhiyun 	 * destination or size is not naturally aligned. That is:
97*4882a593Smuzhiyun 	 *   - Require 8-byte alignment when size is 8 bytes or larger.
98*4882a593Smuzhiyun 	 *   - Require 4-byte alignment when size is 4 bytes.
99*4882a593Smuzhiyun 	 */
100*4882a593Smuzhiyun 	if (size < 8) {
101*4882a593Smuzhiyun 		if (!IS_ALIGNED(dest, 4) || size != 4)
102*4882a593Smuzhiyun 			clean_cache_range(dst, size);
103*4882a593Smuzhiyun 	} else {
104*4882a593Smuzhiyun 		if (!IS_ALIGNED(dest, 8)) {
105*4882a593Smuzhiyun 			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
106*4882a593Smuzhiyun 			clean_cache_range(dst, 1);
107*4882a593Smuzhiyun 		}
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 		flushed = dest - (unsigned long) dst;
110*4882a593Smuzhiyun 		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
111*4882a593Smuzhiyun 			clean_cache_range(dst + size - 1, 1);
112*4882a593Smuzhiyun 	}
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	return rc;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
__memcpy_flushcache(void * _dst,const void * _src,size_t size)117*4882a593Smuzhiyun void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	unsigned long dest = (unsigned long) _dst;
120*4882a593Smuzhiyun 	unsigned long source = (unsigned long) _src;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* cache copy and flush to align dest */
123*4882a593Smuzhiyun 	if (!IS_ALIGNED(dest, 8)) {
124*4882a593Smuzhiyun 		size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 		memcpy((void *) dest, (void *) source, len);
127*4882a593Smuzhiyun 		clean_cache_range((void *) dest, len);
128*4882a593Smuzhiyun 		dest += len;
129*4882a593Smuzhiyun 		source += len;
130*4882a593Smuzhiyun 		size -= len;
131*4882a593Smuzhiyun 		if (!size)
132*4882a593Smuzhiyun 			return;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* 4x8 movnti loop */
136*4882a593Smuzhiyun 	while (size >= 32) {
137*4882a593Smuzhiyun 		asm("movq    (%0), %%r8\n"
138*4882a593Smuzhiyun 		    "movq   8(%0), %%r9\n"
139*4882a593Smuzhiyun 		    "movq  16(%0), %%r10\n"
140*4882a593Smuzhiyun 		    "movq  24(%0), %%r11\n"
141*4882a593Smuzhiyun 		    "movnti  %%r8,   (%1)\n"
142*4882a593Smuzhiyun 		    "movnti  %%r9,  8(%1)\n"
143*4882a593Smuzhiyun 		    "movnti %%r10, 16(%1)\n"
144*4882a593Smuzhiyun 		    "movnti %%r11, 24(%1)\n"
145*4882a593Smuzhiyun 		    :: "r" (source), "r" (dest)
146*4882a593Smuzhiyun 		    : "memory", "r8", "r9", "r10", "r11");
147*4882a593Smuzhiyun 		dest += 32;
148*4882a593Smuzhiyun 		source += 32;
149*4882a593Smuzhiyun 		size -= 32;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* 1x8 movnti loop */
153*4882a593Smuzhiyun 	while (size >= 8) {
154*4882a593Smuzhiyun 		asm("movq    (%0), %%r8\n"
155*4882a593Smuzhiyun 		    "movnti  %%r8,   (%1)\n"
156*4882a593Smuzhiyun 		    :: "r" (source), "r" (dest)
157*4882a593Smuzhiyun 		    : "memory", "r8");
158*4882a593Smuzhiyun 		dest += 8;
159*4882a593Smuzhiyun 		source += 8;
160*4882a593Smuzhiyun 		size -= 8;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/* 1x4 movnti loop */
164*4882a593Smuzhiyun 	while (size >= 4) {
165*4882a593Smuzhiyun 		asm("movl    (%0), %%r8d\n"
166*4882a593Smuzhiyun 		    "movnti  %%r8d,   (%1)\n"
167*4882a593Smuzhiyun 		    :: "r" (source), "r" (dest)
168*4882a593Smuzhiyun 		    : "memory", "r8");
169*4882a593Smuzhiyun 		dest += 4;
170*4882a593Smuzhiyun 		source += 4;
171*4882a593Smuzhiyun 		size -= 4;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	/* cache copy for remaining bytes */
175*4882a593Smuzhiyun 	if (size) {
176*4882a593Smuzhiyun 		memcpy((void *) dest, (void *) source, size);
177*4882a593Smuzhiyun 		clean_cache_range((void *) dest, size);
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__memcpy_flushcache);
181*4882a593Smuzhiyun 
memcpy_page_flushcache(char * to,struct page * page,size_t offset,size_t len)182*4882a593Smuzhiyun void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
183*4882a593Smuzhiyun 		size_t len)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	char *from = kmap_atomic(page);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	memcpy_flushcache(to, from + offset, len);
188*4882a593Smuzhiyun 	kunmap_atomic(from);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun #endif
191