1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun */
4*4882a593Smuzhiyun #ifndef _ASM_POWERPC_CACHEFLUSH_H
5*4882a593Smuzhiyun #define _ASM_POWERPC_CACHEFLUSH_H
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/mm.h>
8*4882a593Smuzhiyun #include <asm/cputable.h>
9*4882a593Smuzhiyun #include <asm/cpu_has_feature.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun * Book3s has no ptesync after setting a pte, so without this ptesync it's
14*4882a593Smuzhiyun * possible for a kernel virtual mapping access to return a spurious fault
15*4882a593Smuzhiyun * if it's accessed right after the pte is set. The page fault handler does
16*4882a593Smuzhiyun * not expect this type of fault. flush_cache_vmap is not exactly the right
17*4882a593Smuzhiyun * place to put this, but it seems to work well enough.
18*4882a593Smuzhiyun */
flush_cache_vmap(unsigned long start,unsigned long end)19*4882a593Smuzhiyun static inline void flush_cache_vmap(unsigned long start, unsigned long end)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun asm volatile("ptesync" ::: "memory");
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun #define flush_cache_vmap flush_cache_vmap
24*4882a593Smuzhiyun #endif /* CONFIG_PPC_BOOK3S_64 */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
27*4882a593Smuzhiyun extern void flush_dcache_page(struct page *page);
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun void flush_icache_range(unsigned long start, unsigned long stop);
30*4882a593Smuzhiyun #define flush_icache_range flush_icache_range
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
33*4882a593Smuzhiyun unsigned long addr, int len);
34*4882a593Smuzhiyun #define flush_icache_user_page flush_icache_user_page
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun void flush_dcache_icache_page(struct page *page);
37*4882a593Smuzhiyun void __flush_dcache_icache(void *page);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /**
40*4882a593Smuzhiyun * flush_dcache_range(): Write any modified data cache blocks out to memory and
41*4882a593Smuzhiyun * invalidate them. Does not invalidate the corresponding instruction cache
42*4882a593Smuzhiyun * blocks.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * @start: the start address
45*4882a593Smuzhiyun * @stop: the stop address (exclusive)
46*4882a593Smuzhiyun */
flush_dcache_range(unsigned long start,unsigned long stop)47*4882a593Smuzhiyun static inline void flush_dcache_range(unsigned long start, unsigned long stop)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun unsigned long shift = l1_dcache_shift();
50*4882a593Smuzhiyun unsigned long bytes = l1_dcache_bytes();
51*4882a593Smuzhiyun void *addr = (void *)(start & ~(bytes - 1));
52*4882a593Smuzhiyun unsigned long size = stop - (unsigned long)addr + (bytes - 1);
53*4882a593Smuzhiyun unsigned long i;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC64))
56*4882a593Smuzhiyun mb(); /* sync */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun for (i = 0; i < size >> shift; i++, addr += bytes)
59*4882a593Smuzhiyun dcbf(addr);
60*4882a593Smuzhiyun mb(); /* sync */
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * Write any modified data cache blocks out to memory.
66*4882a593Smuzhiyun * Does not invalidate the corresponding cache lines (especially for
67*4882a593Smuzhiyun * any corresponding instruction cache).
68*4882a593Smuzhiyun */
clean_dcache_range(unsigned long start,unsigned long stop)69*4882a593Smuzhiyun static inline void clean_dcache_range(unsigned long start, unsigned long stop)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun unsigned long shift = l1_dcache_shift();
72*4882a593Smuzhiyun unsigned long bytes = l1_dcache_bytes();
73*4882a593Smuzhiyun void *addr = (void *)(start & ~(bytes - 1));
74*4882a593Smuzhiyun unsigned long size = stop - (unsigned long)addr + (bytes - 1);
75*4882a593Smuzhiyun unsigned long i;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun for (i = 0; i < size >> shift; i++, addr += bytes)
78*4882a593Smuzhiyun dcbst(addr);
79*4882a593Smuzhiyun mb(); /* sync */
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * Like above, but invalidate the D-cache. This is used by the 8xx
84*4882a593Smuzhiyun * to invalidate the cache so the PPC core doesn't get stale data
85*4882a593Smuzhiyun * from the CPM (no cache snooping here :-).
86*4882a593Smuzhiyun */
invalidate_dcache_range(unsigned long start,unsigned long stop)87*4882a593Smuzhiyun static inline void invalidate_dcache_range(unsigned long start,
88*4882a593Smuzhiyun unsigned long stop)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun unsigned long shift = l1_dcache_shift();
91*4882a593Smuzhiyun unsigned long bytes = l1_dcache_bytes();
92*4882a593Smuzhiyun void *addr = (void *)(start & ~(bytes - 1));
93*4882a593Smuzhiyun unsigned long size = stop - (unsigned long)addr + (bytes - 1);
94*4882a593Smuzhiyun unsigned long i;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun for (i = 0; i < size >> shift; i++, addr += bytes)
97*4882a593Smuzhiyun dcbi(addr);
98*4882a593Smuzhiyun mb(); /* sync */
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #ifdef CONFIG_4xx
flush_instruction_cache(void)102*4882a593Smuzhiyun static inline void flush_instruction_cache(void)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun iccci((void *)KERNELBASE);
105*4882a593Smuzhiyun isync();
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun #else
108*4882a593Smuzhiyun void flush_instruction_cache(void);
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #include <asm-generic/cacheflush.h>
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #endif /* _ASM_POWERPC_CACHEFLUSH_H */
114