1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/arm/lib/copypage-armv4mc.S
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1995-2005 Russell King
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This handles the mini data cache, as found on SA11x0 and XScale
8*4882a593Smuzhiyun * processors. When we copy a user page page, we map it in such a way
9*4882a593Smuzhiyun * that accesses to this page will not touch the main data cache, but
10*4882a593Smuzhiyun * will be cached in the mini data cache. This prevents us thrashing
11*4882a593Smuzhiyun * the main data cache on page faults.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/mm.h>
15*4882a593Smuzhiyun #include <linux/highmem.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <asm/tlbflush.h>
18*4882a593Smuzhiyun #include <asm/cacheflush.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "mm.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
23*4882a593Smuzhiyun L_PTE_MT_MINICACHE)
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static DEFINE_RAW_SPINLOCK(minicache_lock);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * ARMv4 mini-dcache optimised copy_user_highpage
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * We flush the destination cache lines just before we write the data into the
31*4882a593Smuzhiyun * corresponding address. Since the Dcache is read-allocate, this removes the
32*4882a593Smuzhiyun * Dcache aliasing issue. The writes will be forwarded to the write buffer,
33*4882a593Smuzhiyun * and merged as appropriate.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
36*4882a593Smuzhiyun * instruction. If your processor does not supply this, you have to write your
37*4882a593Smuzhiyun * own copy_user_highpage that does the right thing.
38*4882a593Smuzhiyun */
mc_copy_user_page(void * from,void * to)39*4882a593Smuzhiyun static void mc_copy_user_page(void *from, void *to)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun int tmp;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun asm volatile ("\
44*4882a593Smuzhiyun .syntax unified\n\
45*4882a593Smuzhiyun ldmia %0!, {r2, r3, ip, lr} @ 4\n\
46*4882a593Smuzhiyun 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
47*4882a593Smuzhiyun stmia %1!, {r2, r3, ip, lr} @ 4\n\
48*4882a593Smuzhiyun ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\
49*4882a593Smuzhiyun stmia %1!, {r2, r3, ip, lr} @ 4\n\
50*4882a593Smuzhiyun ldmia %0!, {r2, r3, ip, lr} @ 4\n\
51*4882a593Smuzhiyun mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
52*4882a593Smuzhiyun stmia %1!, {r2, r3, ip, lr} @ 4\n\
53*4882a593Smuzhiyun ldmia %0!, {r2, r3, ip, lr} @ 4\n\
54*4882a593Smuzhiyun subs %2, %2, #1 @ 1\n\
55*4882a593Smuzhiyun stmia %1!, {r2, r3, ip, lr} @ 4\n\
56*4882a593Smuzhiyun ldmiane %0!, {r2, r3, ip, lr} @ 4\n\
57*4882a593Smuzhiyun bne 1b @ "
58*4882a593Smuzhiyun : "+&r" (from), "+&r" (to), "=&r" (tmp)
59*4882a593Smuzhiyun : "2" (PAGE_SIZE / 64)
60*4882a593Smuzhiyun : "r2", "r3", "ip", "lr");
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
v4_mc_copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)63*4882a593Smuzhiyun void v4_mc_copy_user_highpage(struct page *to, struct page *from,
64*4882a593Smuzhiyun unsigned long vaddr, struct vm_area_struct *vma)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun void *kto = kmap_atomic(to);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (!test_and_set_bit(PG_dcache_clean, &from->flags))
69*4882a593Smuzhiyun __flush_dcache_page(page_mapping_file(from), from);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun raw_spin_lock(&minicache_lock);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun raw_spin_unlock(&minicache_lock);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun kunmap_atomic(kto);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * ARMv4 optimised clear_user_page
84*4882a593Smuzhiyun */
v4_mc_clear_user_highpage(struct page * page,unsigned long vaddr)85*4882a593Smuzhiyun void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun void *ptr, *kaddr = kmap_atomic(page);
88*4882a593Smuzhiyun asm volatile("\
89*4882a593Smuzhiyun mov r1, %2 @ 1\n\
90*4882a593Smuzhiyun mov r2, #0 @ 1\n\
91*4882a593Smuzhiyun mov r3, #0 @ 1\n\
92*4882a593Smuzhiyun mov ip, #0 @ 1\n\
93*4882a593Smuzhiyun mov lr, #0 @ 1\n\
94*4882a593Smuzhiyun 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
95*4882a593Smuzhiyun stmia %0!, {r2, r3, ip, lr} @ 4\n\
96*4882a593Smuzhiyun stmia %0!, {r2, r3, ip, lr} @ 4\n\
97*4882a593Smuzhiyun mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
98*4882a593Smuzhiyun stmia %0!, {r2, r3, ip, lr} @ 4\n\
99*4882a593Smuzhiyun stmia %0!, {r2, r3, ip, lr} @ 4\n\
100*4882a593Smuzhiyun subs r1, r1, #1 @ 1\n\
101*4882a593Smuzhiyun bne 1b @ 1"
102*4882a593Smuzhiyun : "=r" (ptr)
103*4882a593Smuzhiyun : "0" (kaddr), "I" (PAGE_SIZE / 64)
104*4882a593Smuzhiyun : "r1", "r2", "r3", "ip", "lr");
105*4882a593Smuzhiyun kunmap_atomic(kaddr);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun struct cpu_user_fns v4_mc_user_fns __initdata = {
109*4882a593Smuzhiyun .cpu_clear_user_highpage = v4_mc_clear_user_highpage,
110*4882a593Smuzhiyun .cpu_copy_user_highpage = v4_mc_copy_user_highpage,
111*4882a593Smuzhiyun };
112