1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/arm/mm/flush.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1995-2002 Russell King
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/mm.h>
9*4882a593Smuzhiyun #include <linux/pagemap.h>
10*4882a593Smuzhiyun #include <linux/highmem.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <asm/cacheflush.h>
13*4882a593Smuzhiyun #include <asm/cachetype.h>
14*4882a593Smuzhiyun #include <asm/highmem.h>
15*4882a593Smuzhiyun #include <asm/smp_plat.h>
16*4882a593Smuzhiyun #include <asm/tlbflush.h>
17*4882a593Smuzhiyun #include <linux/hugetlb.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "mm.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #ifdef CONFIG_ARM_HEAVY_MB
22*4882a593Smuzhiyun void (*soc_mb)(void);
23*4882a593Smuzhiyun
arm_heavy_mb(void)24*4882a593Smuzhiyun void arm_heavy_mb(void)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun #ifdef CONFIG_OUTER_CACHE_SYNC
27*4882a593Smuzhiyun if (outer_cache.sync)
28*4882a593Smuzhiyun outer_cache.sync();
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun if (soc_mb)
31*4882a593Smuzhiyun soc_mb();
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun EXPORT_SYMBOL(arm_heavy_mb);
34*4882a593Smuzhiyun #endif
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #ifdef CONFIG_CPU_CACHE_VIPT
37*4882a593Smuzhiyun
flush_pfn_alias(unsigned long pfn,unsigned long vaddr)38*4882a593Smuzhiyun static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
41*4882a593Smuzhiyun const int zero = 0;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun asm( "mcrr p15, 0, %1, %0, c14\n"
46*4882a593Smuzhiyun " mcr p15, 0, %2, c7, c10, 4"
47*4882a593Smuzhiyun :
48*4882a593Smuzhiyun : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
49*4882a593Smuzhiyun : "cc");
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
flush_icache_alias(unsigned long pfn,unsigned long vaddr,unsigned long len)52*4882a593Smuzhiyun static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
55*4882a593Smuzhiyun unsigned long offset = vaddr & (PAGE_SIZE - 1);
56*4882a593Smuzhiyun unsigned long to;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
59*4882a593Smuzhiyun to = va + offset;
60*4882a593Smuzhiyun flush_icache_range(to, to + len);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
flush_cache_mm(struct mm_struct * mm)63*4882a593Smuzhiyun void flush_cache_mm(struct mm_struct *mm)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun if (cache_is_vivt()) {
66*4882a593Smuzhiyun vivt_flush_cache_mm(mm);
67*4882a593Smuzhiyun return;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (cache_is_vipt_aliasing()) {
71*4882a593Smuzhiyun asm( "mcr p15, 0, %0, c7, c14, 0\n"
72*4882a593Smuzhiyun " mcr p15, 0, %0, c7, c10, 4"
73*4882a593Smuzhiyun :
74*4882a593Smuzhiyun : "r" (0)
75*4882a593Smuzhiyun : "cc");
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)79*4882a593Smuzhiyun void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun if (cache_is_vivt()) {
82*4882a593Smuzhiyun vivt_flush_cache_range(vma, start, end);
83*4882a593Smuzhiyun return;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (cache_is_vipt_aliasing()) {
87*4882a593Smuzhiyun asm( "mcr p15, 0, %0, c7, c14, 0\n"
88*4882a593Smuzhiyun " mcr p15, 0, %0, c7, c10, 4"
89*4882a593Smuzhiyun :
90*4882a593Smuzhiyun : "r" (0)
91*4882a593Smuzhiyun : "cc");
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (vma->vm_flags & VM_EXEC)
95*4882a593Smuzhiyun __flush_icache_all();
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
flush_cache_page(struct vm_area_struct * vma,unsigned long user_addr,unsigned long pfn)98*4882a593Smuzhiyun void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun if (cache_is_vivt()) {
101*4882a593Smuzhiyun vivt_flush_cache_page(vma, user_addr, pfn);
102*4882a593Smuzhiyun return;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (cache_is_vipt_aliasing()) {
106*4882a593Smuzhiyun flush_pfn_alias(pfn, user_addr);
107*4882a593Smuzhiyun __flush_icache_all();
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
111*4882a593Smuzhiyun __flush_icache_all();
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #else
115*4882a593Smuzhiyun #define flush_pfn_alias(pfn,vaddr) do { } while (0)
116*4882a593Smuzhiyun #define flush_icache_alias(pfn,vaddr,len) do { } while (0)
117*4882a593Smuzhiyun #endif
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun #define FLAG_PA_IS_EXEC 1
120*4882a593Smuzhiyun #define FLAG_PA_CORE_IN_MM 2
121*4882a593Smuzhiyun
flush_ptrace_access_other(void * args)122*4882a593Smuzhiyun static void flush_ptrace_access_other(void *args)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun __flush_icache_all();
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun static inline
__flush_ptrace_access(struct page * page,unsigned long uaddr,void * kaddr,unsigned long len,unsigned int flags)128*4882a593Smuzhiyun void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
129*4882a593Smuzhiyun unsigned long len, unsigned int flags)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun if (cache_is_vivt()) {
132*4882a593Smuzhiyun if (flags & FLAG_PA_CORE_IN_MM) {
133*4882a593Smuzhiyun unsigned long addr = (unsigned long)kaddr;
134*4882a593Smuzhiyun __cpuc_coherent_kern_range(addr, addr + len);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun return;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (cache_is_vipt_aliasing()) {
140*4882a593Smuzhiyun flush_pfn_alias(page_to_pfn(page), uaddr);
141*4882a593Smuzhiyun __flush_icache_all();
142*4882a593Smuzhiyun return;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* VIPT non-aliasing D-cache */
146*4882a593Smuzhiyun if (flags & FLAG_PA_IS_EXEC) {
147*4882a593Smuzhiyun unsigned long addr = (unsigned long)kaddr;
148*4882a593Smuzhiyun if (icache_is_vipt_aliasing())
149*4882a593Smuzhiyun flush_icache_alias(page_to_pfn(page), uaddr, len);
150*4882a593Smuzhiyun else
151*4882a593Smuzhiyun __cpuc_coherent_kern_range(addr, addr + len);
152*4882a593Smuzhiyun if (cache_ops_need_broadcast())
153*4882a593Smuzhiyun smp_call_function(flush_ptrace_access_other,
154*4882a593Smuzhiyun NULL, 1);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun static
flush_ptrace_access(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * kaddr,unsigned long len)159*4882a593Smuzhiyun void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
160*4882a593Smuzhiyun unsigned long uaddr, void *kaddr, unsigned long len)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun unsigned int flags = 0;
163*4882a593Smuzhiyun if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
164*4882a593Smuzhiyun flags |= FLAG_PA_CORE_IN_MM;
165*4882a593Smuzhiyun if (vma->vm_flags & VM_EXEC)
166*4882a593Smuzhiyun flags |= FLAG_PA_IS_EXEC;
167*4882a593Smuzhiyun __flush_ptrace_access(page, uaddr, kaddr, len, flags);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
flush_uprobe_xol_access(struct page * page,unsigned long uaddr,void * kaddr,unsigned long len)170*4882a593Smuzhiyun void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
171*4882a593Smuzhiyun void *kaddr, unsigned long len)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun __flush_ptrace_access(page, uaddr, kaddr, len, flags);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Copy user data from/to a page which is mapped into a different
180*4882a593Smuzhiyun * processes address space. Really, we want to allow our "user
181*4882a593Smuzhiyun * space" model to handle this.
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * Note that this code needs to run on the current CPU.
184*4882a593Smuzhiyun */
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * dst,const void * src,unsigned long len)185*4882a593Smuzhiyun void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
186*4882a593Smuzhiyun unsigned long uaddr, void *dst, const void *src,
187*4882a593Smuzhiyun unsigned long len)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun #ifdef CONFIG_SMP
190*4882a593Smuzhiyun preempt_disable();
191*4882a593Smuzhiyun #endif
192*4882a593Smuzhiyun memcpy(dst, src, len);
193*4882a593Smuzhiyun flush_ptrace_access(vma, page, uaddr, dst, len);
194*4882a593Smuzhiyun #ifdef CONFIG_SMP
195*4882a593Smuzhiyun preempt_enable();
196*4882a593Smuzhiyun #endif
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
__flush_dcache_page(struct address_space * mapping,struct page * page)199*4882a593Smuzhiyun void __flush_dcache_page(struct address_space *mapping, struct page *page)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * Writeback any data associated with the kernel mapping of this
203*4882a593Smuzhiyun * page. This ensures that data in the physical page is mutually
204*4882a593Smuzhiyun * coherent with the kernels mapping.
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun if (!PageHighMem(page)) {
207*4882a593Smuzhiyun __cpuc_flush_dcache_area(page_address(page), page_size(page));
208*4882a593Smuzhiyun } else {
209*4882a593Smuzhiyun unsigned long i;
210*4882a593Smuzhiyun if (cache_is_vipt_nonaliasing()) {
211*4882a593Smuzhiyun for (i = 0; i < compound_nr(page); i++) {
212*4882a593Smuzhiyun void *addr = kmap_atomic(page + i);
213*4882a593Smuzhiyun __cpuc_flush_dcache_area(addr, PAGE_SIZE);
214*4882a593Smuzhiyun kunmap_atomic(addr);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun } else {
217*4882a593Smuzhiyun for (i = 0; i < compound_nr(page); i++) {
218*4882a593Smuzhiyun void *addr = kmap_high_get(page + i);
219*4882a593Smuzhiyun if (addr) {
220*4882a593Smuzhiyun __cpuc_flush_dcache_area(addr, PAGE_SIZE);
221*4882a593Smuzhiyun kunmap_high(page + i);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * If this is a page cache page, and we have an aliasing VIPT cache,
229*4882a593Smuzhiyun * we only need to do one flush - which would be at the relevant
230*4882a593Smuzhiyun * userspace colour, which is congruent with page->index.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun if (mapping && cache_is_vipt_aliasing())
233*4882a593Smuzhiyun flush_pfn_alias(page_to_pfn(page),
234*4882a593Smuzhiyun page->index << PAGE_SHIFT);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
__flush_dcache_aliases(struct address_space * mapping,struct page * page)237*4882a593Smuzhiyun static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun struct mm_struct *mm = current->active_mm;
240*4882a593Smuzhiyun struct vm_area_struct *mpnt;
241*4882a593Smuzhiyun pgoff_t pgoff;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * There are possible user space mappings of this page:
245*4882a593Smuzhiyun * - VIVT cache: we need to also write back and invalidate all user
246*4882a593Smuzhiyun * data in the current VM view associated with this page.
247*4882a593Smuzhiyun * - aliasing VIPT: we only need to find one mapping of this page.
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun pgoff = page->index;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun flush_dcache_mmap_lock(mapping);
252*4882a593Smuzhiyun vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
253*4882a593Smuzhiyun unsigned long offset;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun * If this VMA is not in our MM, we can ignore it.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun if (mpnt->vm_mm != mm)
259*4882a593Smuzhiyun continue;
260*4882a593Smuzhiyun if (!(mpnt->vm_flags & VM_MAYSHARE))
261*4882a593Smuzhiyun continue;
262*4882a593Smuzhiyun offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
263*4882a593Smuzhiyun flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun flush_dcache_mmap_unlock(mapping);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 6
__sync_icache_dcache(pte_t pteval)269*4882a593Smuzhiyun void __sync_icache_dcache(pte_t pteval)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun unsigned long pfn;
272*4882a593Smuzhiyun struct page *page;
273*4882a593Smuzhiyun struct address_space *mapping;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
276*4882a593Smuzhiyun /* only flush non-aliasing VIPT caches for exec mappings */
277*4882a593Smuzhiyun return;
278*4882a593Smuzhiyun pfn = pte_pfn(pteval);
279*4882a593Smuzhiyun if (!pfn_valid(pfn))
280*4882a593Smuzhiyun return;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun page = pfn_to_page(pfn);
283*4882a593Smuzhiyun if (cache_is_vipt_aliasing())
284*4882a593Smuzhiyun mapping = page_mapping_file(page);
285*4882a593Smuzhiyun else
286*4882a593Smuzhiyun mapping = NULL;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (!test_and_set_bit(PG_dcache_clean, &page->flags))
289*4882a593Smuzhiyun __flush_dcache_page(mapping, page);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (pte_exec(pteval))
292*4882a593Smuzhiyun __flush_icache_all();
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun #endif
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * Ensure cache coherency between kernel mapping and userspace mapping
298*4882a593Smuzhiyun * of this page.
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * We have three cases to consider:
301*4882a593Smuzhiyun * - VIPT non-aliasing cache: fully coherent so nothing required.
302*4882a593Smuzhiyun * - VIVT: fully aliasing, so we need to handle every alias in our
303*4882a593Smuzhiyun * current VM view.
304*4882a593Smuzhiyun * - VIPT aliasing: need to handle one alias in our current VM view.
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * If we need to handle aliasing:
307*4882a593Smuzhiyun * If the page only exists in the page cache and there are no user
308*4882a593Smuzhiyun * space mappings, we can be lazy and remember that we may have dirty
309*4882a593Smuzhiyun * kernel cache lines for later. Otherwise, we assume we have
310*4882a593Smuzhiyun * aliasing mappings.
311*4882a593Smuzhiyun *
312*4882a593Smuzhiyun * Note that we disable the lazy flush for SMP configurations where
313*4882a593Smuzhiyun * the cache maintenance operations are not automatically broadcasted.
314*4882a593Smuzhiyun */
flush_dcache_page(struct page * page)315*4882a593Smuzhiyun void flush_dcache_page(struct page *page)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun struct address_space *mapping;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * The zero page is never written to, so never has any dirty
321*4882a593Smuzhiyun * cache lines, and therefore never needs to be flushed.
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun if (page == ZERO_PAGE(0))
324*4882a593Smuzhiyun return;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
327*4882a593Smuzhiyun if (test_bit(PG_dcache_clean, &page->flags))
328*4882a593Smuzhiyun clear_bit(PG_dcache_clean, &page->flags);
329*4882a593Smuzhiyun return;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun mapping = page_mapping_file(page);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (!cache_ops_need_broadcast() &&
335*4882a593Smuzhiyun mapping && !page_mapcount(page))
336*4882a593Smuzhiyun clear_bit(PG_dcache_clean, &page->flags);
337*4882a593Smuzhiyun else {
338*4882a593Smuzhiyun __flush_dcache_page(mapping, page);
339*4882a593Smuzhiyun if (mapping && cache_is_vivt())
340*4882a593Smuzhiyun __flush_dcache_aliases(mapping, page);
341*4882a593Smuzhiyun else if (mapping)
342*4882a593Smuzhiyun __flush_icache_all();
343*4882a593Smuzhiyun set_bit(PG_dcache_clean, &page->flags);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun EXPORT_SYMBOL(flush_dcache_page);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun * Ensure cache coherency for the kernel mapping of this page. We can
350*4882a593Smuzhiyun * assume that the page is pinned via kmap.
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * If the page only exists in the page cache and there are no user
353*4882a593Smuzhiyun * space mappings, this is a no-op since the page was already marked
354*4882a593Smuzhiyun * dirty at creation. Otherwise, we need to flush the dirty kernel
355*4882a593Smuzhiyun * cache lines directly.
356*4882a593Smuzhiyun */
flush_kernel_dcache_page(struct page * page)357*4882a593Smuzhiyun void flush_kernel_dcache_page(struct page *page)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun if (cache_is_vivt() || cache_is_vipt_aliasing()) {
360*4882a593Smuzhiyun struct address_space *mapping;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun mapping = page_mapping_file(page);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (!mapping || mapping_mapped(mapping)) {
365*4882a593Smuzhiyun void *addr;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun addr = page_address(page);
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun * kmap_atomic() doesn't set the page virtual
370*4882a593Smuzhiyun * address for highmem pages, and
371*4882a593Smuzhiyun * kunmap_atomic() takes care of cache
372*4882a593Smuzhiyun * flushing already.
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
375*4882a593Smuzhiyun __cpuc_flush_dcache_area(addr, PAGE_SIZE);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun EXPORT_SYMBOL(flush_kernel_dcache_page);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /*
382*4882a593Smuzhiyun * Flush an anonymous page so that users of get_user_pages()
383*4882a593Smuzhiyun * can safely access the data. The expected sequence is:
384*4882a593Smuzhiyun *
385*4882a593Smuzhiyun * get_user_pages()
386*4882a593Smuzhiyun * -> flush_anon_page
387*4882a593Smuzhiyun * memcpy() to/from page
388*4882a593Smuzhiyun * if written to page, flush_dcache_page()
389*4882a593Smuzhiyun */
__flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)390*4882a593Smuzhiyun void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun unsigned long pfn;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /* VIPT non-aliasing caches need do nothing */
395*4882a593Smuzhiyun if (cache_is_vipt_nonaliasing())
396*4882a593Smuzhiyun return;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * Write back and invalidate userspace mapping.
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun pfn = page_to_pfn(page);
402*4882a593Smuzhiyun if (cache_is_vivt()) {
403*4882a593Smuzhiyun flush_cache_page(vma, vmaddr, pfn);
404*4882a593Smuzhiyun } else {
405*4882a593Smuzhiyun /*
406*4882a593Smuzhiyun * For aliasing VIPT, we can flush an alias of the
407*4882a593Smuzhiyun * userspace address only.
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun flush_pfn_alias(pfn, vmaddr);
410*4882a593Smuzhiyun __flush_icache_all();
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /*
414*4882a593Smuzhiyun * Invalidate kernel mapping. No data should be contained
415*4882a593Smuzhiyun * in this mapping of the page. FIXME: this is overkill
416*4882a593Smuzhiyun * since we actually ask for a write-back and invalidate.
417*4882a593Smuzhiyun */
418*4882a593Smuzhiyun __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
419*4882a593Smuzhiyun }
420