1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/arm/mm/copypage-xsc3.S
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2004 Intel Corp.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Adapted for 3rd gen XScale core, no more mini-dcache
8*4882a593Smuzhiyun * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/highmem.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * General note:
15*4882a593Smuzhiyun * We don't really want write-allocate cache behaviour for these functions
16*4882a593Smuzhiyun * since that will just eat through 8K of the cache.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * XSC3 optimised copy_user_highpage
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * The source page may have some clean entries in the cache already, but we
23*4882a593Smuzhiyun * can safely ignore them - break_cow() will flush them out of the cache
24*4882a593Smuzhiyun * if we eventually end up using our copied page.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun */
xsc3_mc_copy_user_page(void * kto,const void * kfrom)27*4882a593Smuzhiyun static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun int tmp;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun asm volatile ("\
32*4882a593Smuzhiyun pld [%1, #0] \n\
33*4882a593Smuzhiyun pld [%1, #32] \n\
34*4882a593Smuzhiyun 1: pld [%1, #64] \n\
35*4882a593Smuzhiyun pld [%1, #96] \n\
36*4882a593Smuzhiyun \n\
37*4882a593Smuzhiyun 2: ldrd r2, r3, [%1], #8 \n\
38*4882a593Smuzhiyun ldrd r4, r5, [%1], #8 \n\
39*4882a593Smuzhiyun mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
40*4882a593Smuzhiyun strd r2, r3, [%0], #8 \n\
41*4882a593Smuzhiyun ldrd r2, r3, [%1], #8 \n\
42*4882a593Smuzhiyun strd r4, r5, [%0], #8 \n\
43*4882a593Smuzhiyun ldrd r4, r5, [%1], #8 \n\
44*4882a593Smuzhiyun strd r2, r3, [%0], #8 \n\
45*4882a593Smuzhiyun strd r4, r5, [%0], #8 \n\
46*4882a593Smuzhiyun ldrd r2, r3, [%1], #8 \n\
47*4882a593Smuzhiyun ldrd r4, r5, [%1], #8 \n\
48*4882a593Smuzhiyun mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
49*4882a593Smuzhiyun strd r2, r3, [%0], #8 \n\
50*4882a593Smuzhiyun ldrd r2, r3, [%1], #8 \n\
51*4882a593Smuzhiyun subs %2, %2, #1 \n\
52*4882a593Smuzhiyun strd r4, r5, [%0], #8 \n\
53*4882a593Smuzhiyun ldrd r4, r5, [%1], #8 \n\
54*4882a593Smuzhiyun strd r2, r3, [%0], #8 \n\
55*4882a593Smuzhiyun strd r4, r5, [%0], #8 \n\
56*4882a593Smuzhiyun bgt 1b \n\
57*4882a593Smuzhiyun beq 2b "
58*4882a593Smuzhiyun : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
59*4882a593Smuzhiyun : "2" (PAGE_SIZE / 64 - 1)
60*4882a593Smuzhiyun : "r2", "r3", "r4", "r5");
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
xsc3_mc_copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)63*4882a593Smuzhiyun void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
64*4882a593Smuzhiyun unsigned long vaddr, struct vm_area_struct *vma)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun void *kto, *kfrom;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun kto = kmap_atomic(to);
69*4882a593Smuzhiyun kfrom = kmap_atomic(from);
70*4882a593Smuzhiyun flush_cache_page(vma, vaddr, page_to_pfn(from));
71*4882a593Smuzhiyun xsc3_mc_copy_user_page(kto, kfrom);
72*4882a593Smuzhiyun kunmap_atomic(kfrom);
73*4882a593Smuzhiyun kunmap_atomic(kto);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * XScale optimised clear_user_page
78*4882a593Smuzhiyun */
xsc3_mc_clear_user_highpage(struct page * page,unsigned long vaddr)79*4882a593Smuzhiyun void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun void *ptr, *kaddr = kmap_atomic(page);
82*4882a593Smuzhiyun asm volatile ("\
83*4882a593Smuzhiyun mov r1, %2 \n\
84*4882a593Smuzhiyun mov r2, #0 \n\
85*4882a593Smuzhiyun mov r3, #0 \n\
86*4882a593Smuzhiyun 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\
87*4882a593Smuzhiyun strd r2, r3, [%0], #8 \n\
88*4882a593Smuzhiyun strd r2, r3, [%0], #8 \n\
89*4882a593Smuzhiyun strd r2, r3, [%0], #8 \n\
90*4882a593Smuzhiyun strd r2, r3, [%0], #8 \n\
91*4882a593Smuzhiyun subs r1, r1, #1 \n\
92*4882a593Smuzhiyun bne 1b"
93*4882a593Smuzhiyun : "=r" (ptr)
94*4882a593Smuzhiyun : "0" (kaddr), "I" (PAGE_SIZE / 32)
95*4882a593Smuzhiyun : "r1", "r2", "r3");
96*4882a593Smuzhiyun kunmap_atomic(kaddr);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun struct cpu_user_fns xsc3_mc_user_fns __initdata = {
100*4882a593Smuzhiyun .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
101*4882a593Smuzhiyun .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
102*4882a593Smuzhiyun };
103