1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Based on arch/arm/mm/flush.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1995-2002 Russell King
6*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/export.h>
10*4882a593Smuzhiyun #include <linux/mm.h>
11*4882a593Smuzhiyun #include <linux/pagemap.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <asm/cacheflush.h>
14*4882a593Smuzhiyun #include <asm/cache.h>
15*4882a593Smuzhiyun #include <asm/tlbflush.h>
16*4882a593Smuzhiyun
sync_icache_aliases(void * kaddr,unsigned long len)17*4882a593Smuzhiyun void sync_icache_aliases(void *kaddr, unsigned long len)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun unsigned long addr = (unsigned long)kaddr;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun if (icache_is_aliasing()) {
22*4882a593Smuzhiyun __clean_dcache_area_pou(kaddr, len);
23*4882a593Smuzhiyun __flush_icache_all();
24*4882a593Smuzhiyun } else {
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Don't issue kick_all_cpus_sync() after I-cache invalidation
27*4882a593Smuzhiyun * for user mappings.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun __flush_icache_range(addr, addr + len);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
flush_ptrace_access(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * kaddr,unsigned long len)33*4882a593Smuzhiyun static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
34*4882a593Smuzhiyun unsigned long uaddr, void *kaddr,
35*4882a593Smuzhiyun unsigned long len)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun if (vma->vm_flags & VM_EXEC)
38*4882a593Smuzhiyun sync_icache_aliases(kaddr, len);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * Copy user data from/to a page which is mapped into a different processes
43*4882a593Smuzhiyun * address space. Really, we want to allow our "user space" model to handle
44*4882a593Smuzhiyun * this.
45*4882a593Smuzhiyun */
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * dst,const void * src,unsigned long len)46*4882a593Smuzhiyun void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
47*4882a593Smuzhiyun unsigned long uaddr, void *dst, const void *src,
48*4882a593Smuzhiyun unsigned long len)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun memcpy(dst, src, len);
51*4882a593Smuzhiyun flush_ptrace_access(vma, page, uaddr, dst, len);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
__sync_icache_dcache(pte_t pte)54*4882a593Smuzhiyun void __sync_icache_dcache(pte_t pte)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct page *page = pte_page(pte);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (!test_bit(PG_dcache_clean, &page->flags)) {
59*4882a593Smuzhiyun sync_icache_aliases(page_address(page), page_size(page));
60*4882a593Smuzhiyun set_bit(PG_dcache_clean, &page->flags);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__sync_icache_dcache);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * This function is called when a page has been modified by the kernel. Mark
67*4882a593Smuzhiyun * it as dirty for later flushing when mapped in user space (if executable,
68*4882a593Smuzhiyun * see __sync_icache_dcache).
69*4882a593Smuzhiyun */
flush_dcache_page(struct page * page)70*4882a593Smuzhiyun void flush_dcache_page(struct page *page)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun if (test_bit(PG_dcache_clean, &page->flags))
73*4882a593Smuzhiyun clear_bit(PG_dcache_clean, &page->flags);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun EXPORT_SYMBOL(flush_dcache_page);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * Additional functions defined in assembly.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun EXPORT_SYMBOL(__flush_icache_range);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_PMEM_API
arch_wb_cache_pmem(void * addr,size_t size)83*4882a593Smuzhiyun void arch_wb_cache_pmem(void *addr, size_t size)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun /* Ensure order against any prior non-cacheable writes */
86*4882a593Smuzhiyun dmb(osh);
87*4882a593Smuzhiyun __clean_dcache_area_pop(addr, size);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
90*4882a593Smuzhiyun
arch_invalidate_pmem(void * addr,size_t size)91*4882a593Smuzhiyun void arch_invalidate_pmem(void *addr, size_t size)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun __inval_dcache_area(addr, size);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
96*4882a593Smuzhiyun #endif
97