1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
6*4882a593Smuzhiyun * -flush_cache_dup_mm (fork)
7*4882a593Smuzhiyun * -likewise for flush_cache_mm (exit/execve)
8*4882a593Smuzhiyun * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * vineetg: April 2008
11*4882a593Smuzhiyun * -Added a critical CacheLine flush to copy_to_user_page( ) which
12*4882a593Smuzhiyun * was causing gdbserver to not setup breakpoints consistently
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #ifndef _ASM_CACHEFLUSH_H
16*4882a593Smuzhiyun #define _ASM_CACHEFLUSH_H
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun #include <asm/shmparam.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * Semantically we need this because icache doesn't snoop dcache/dma.
23*4882a593Smuzhiyun * However ARC Cache flush requires paddr as well as vaddr, latter not available
24*4882a593Smuzhiyun * in the flush_icache_page() API. So we no-op it but do the equivalent work
25*4882a593Smuzhiyun * in update_mmu_cache()
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun #define flush_icache_page(vma, page)
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun void flush_cache_all(void);
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun void flush_icache_range(unsigned long kstart, unsigned long kend);
32*4882a593Smuzhiyun void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
33*4882a593Smuzhiyun void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
34*4882a593Smuzhiyun void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun void flush_dcache_page(struct page *page);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
41*4882a593Smuzhiyun void dma_cache_inv(phys_addr_t start, unsigned long sz);
42*4882a593Smuzhiyun void dma_cache_wback(phys_addr_t start, unsigned long sz);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define flush_dcache_mmap_lock(mapping) do { } while (0)
45*4882a593Smuzhiyun #define flush_dcache_mmap_unlock(mapping) do { } while (0)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* TBD: optimize this */
48*4882a593Smuzhiyun #define flush_cache_vmap(start, end) flush_cache_all()
49*4882a593Smuzhiyun #define flush_cache_vunmap(start, end) flush_cache_all()
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define flush_cache_mm(mm) /* called on munmap/exit */
56*4882a593Smuzhiyun #define flush_cache_range(mm, u_vstart, u_vend)
57*4882a593Smuzhiyun #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #else /* VIPT aliasing dcache */
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* To clear out stale userspace mappings */
62*4882a593Smuzhiyun void flush_cache_mm(struct mm_struct *mm);
63*4882a593Smuzhiyun void flush_cache_range(struct vm_area_struct *vma,
64*4882a593Smuzhiyun unsigned long start,unsigned long end);
65*4882a593Smuzhiyun void flush_cache_page(struct vm_area_struct *vma,
66*4882a593Smuzhiyun unsigned long user_addr, unsigned long page);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * To make sure that userspace mapping is flushed to memory before
70*4882a593Smuzhiyun * get_user_pages() uses a kernel mapping to access the page
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun #define ARCH_HAS_FLUSH_ANON_PAGE
73*4882a593Smuzhiyun void flush_anon_page(struct vm_area_struct *vma,
74*4882a593Smuzhiyun struct page *page, unsigned long u_vaddr);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
80*4882a593Smuzhiyun * This works around some PIO based drivers which don't call flush_dcache_page
81*4882a593Smuzhiyun * to record that they dirtied the dcache
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun #define PG_dc_clean PG_arch_1
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #define CACHE_COLORS_NUM 4
86*4882a593Smuzhiyun #define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
87*4882a593Smuzhiyun #define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Simple wrapper over config option
91*4882a593Smuzhiyun * Bootup code ensures that hardware matches kernel configuration
92*4882a593Smuzhiyun */
cache_is_vipt_aliasing(void)93*4882a593Smuzhiyun static inline int cache_is_vipt_aliasing(void)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * checks if two addresses (after page aligning) index into same cache set
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun #define addr_not_cache_congruent(addr1, addr2) \
102*4882a593Smuzhiyun ({ \
103*4882a593Smuzhiyun cache_is_vipt_aliasing() ? \
104*4882a593Smuzhiyun (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
105*4882a593Smuzhiyun })
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
108*4882a593Smuzhiyun do { \
109*4882a593Smuzhiyun memcpy(dst, src, len); \
110*4882a593Smuzhiyun if (vma->vm_flags & VM_EXEC) \
111*4882a593Smuzhiyun __sync_icache_dcache((unsigned long)(dst), vaddr, len); \
112*4882a593Smuzhiyun } while (0)
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
115*4882a593Smuzhiyun memcpy(dst, src, len); \
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #endif
118