xref: /OK3568_Linux_fs/kernel/arch/arm/mm/copypage-v4wb.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/arch/arm/mm/copypage-v4wb.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 1995-1999 Russell King
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/init.h>
8*4882a593Smuzhiyun #include <linux/highmem.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * ARMv4 optimised copy_user_highpage
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * We flush the destination cache lines just before we write the data into the
14*4882a593Smuzhiyun  * corresponding address.  Since the Dcache is read-allocate, this removes the
15*4882a593Smuzhiyun  * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
16*4882a593Smuzhiyun  * and merged as appropriate.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
19*4882a593Smuzhiyun  * instruction.  If your processor does not supply this, you have to write your
20*4882a593Smuzhiyun  * own copy_user_highpage that does the right thing.
21*4882a593Smuzhiyun  */
v4wb_copy_user_page(void * kto,const void * kfrom)22*4882a593Smuzhiyun static void v4wb_copy_user_page(void *kto, const void *kfrom)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	int tmp;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	asm volatile ("\
27*4882a593Smuzhiyun 	.syntax unified\n\
28*4882a593Smuzhiyun 	ldmia	%1!, {r3, r4, ip, lr}		@ 4\n\
29*4882a593Smuzhiyun 1:	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
30*4882a593Smuzhiyun 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
31*4882a593Smuzhiyun 	ldmia	%1!, {r3, r4, ip, lr}		@ 4+1\n\
32*4882a593Smuzhiyun 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
33*4882a593Smuzhiyun 	ldmia	%1!, {r3, r4, ip, lr}		@ 4\n\
34*4882a593Smuzhiyun 	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
35*4882a593Smuzhiyun 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
36*4882a593Smuzhiyun 	ldmia	%1!, {r3, r4, ip, lr}		@ 4\n\
37*4882a593Smuzhiyun 	subs	%2, %2, #1			@ 1\n\
38*4882a593Smuzhiyun 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
39*4882a593Smuzhiyun 	ldmiane	%1!, {r3, r4, ip, lr}		@ 4\n\
40*4882a593Smuzhiyun 	bne	1b				@ 1\n\
41*4882a593Smuzhiyun 	mcr	p15, 0, %1, c7, c10, 4		@ 1   drain WB"
42*4882a593Smuzhiyun 	: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
43*4882a593Smuzhiyun 	: "2" (PAGE_SIZE / 64)
44*4882a593Smuzhiyun 	: "r3", "r4", "ip", "lr");
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
v4wb_copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)47*4882a593Smuzhiyun void v4wb_copy_user_highpage(struct page *to, struct page *from,
48*4882a593Smuzhiyun 	unsigned long vaddr, struct vm_area_struct *vma)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	void *kto, *kfrom;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	kto = kmap_atomic(to);
53*4882a593Smuzhiyun 	kfrom = kmap_atomic(from);
54*4882a593Smuzhiyun 	flush_cache_page(vma, vaddr, page_to_pfn(from));
55*4882a593Smuzhiyun 	v4wb_copy_user_page(kto, kfrom);
56*4882a593Smuzhiyun 	kunmap_atomic(kfrom);
57*4882a593Smuzhiyun 	kunmap_atomic(kto);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * ARMv4 optimised clear_user_page
62*4882a593Smuzhiyun  *
63*4882a593Smuzhiyun  * Same story as above.
64*4882a593Smuzhiyun  */
v4wb_clear_user_highpage(struct page * page,unsigned long vaddr)65*4882a593Smuzhiyun void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	void *ptr, *kaddr = kmap_atomic(page);
68*4882a593Smuzhiyun 	asm volatile("\
69*4882a593Smuzhiyun 	mov	r1, %2				@ 1\n\
70*4882a593Smuzhiyun 	mov	r2, #0				@ 1\n\
71*4882a593Smuzhiyun 	mov	r3, #0				@ 1\n\
72*4882a593Smuzhiyun 	mov	ip, #0				@ 1\n\
73*4882a593Smuzhiyun 	mov	lr, #0				@ 1\n\
74*4882a593Smuzhiyun 1:	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
75*4882a593Smuzhiyun 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
76*4882a593Smuzhiyun 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
77*4882a593Smuzhiyun 	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
78*4882a593Smuzhiyun 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
79*4882a593Smuzhiyun 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
80*4882a593Smuzhiyun 	subs	r1, r1, #1			@ 1\n\
81*4882a593Smuzhiyun 	bne	1b				@ 1\n\
82*4882a593Smuzhiyun 	mcr	p15, 0, r1, c7, c10, 4		@ 1   drain WB"
83*4882a593Smuzhiyun 	: "=r" (ptr)
84*4882a593Smuzhiyun 	: "0" (kaddr), "I" (PAGE_SIZE / 64)
85*4882a593Smuzhiyun 	: "r1", "r2", "r3", "ip", "lr");
86*4882a593Smuzhiyun 	kunmap_atomic(kaddr);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun struct cpu_user_fns v4wb_user_fns __initdata = {
90*4882a593Smuzhiyun 	.cpu_clear_user_highpage = v4wb_clear_user_highpage,
91*4882a593Smuzhiyun 	.cpu_copy_user_highpage	= v4wb_copy_user_highpage,
92*4882a593Smuzhiyun };
93