xref: /OK3568_Linux_fs/kernel/arch/csky/abiv1/inc/abi/cacheflush.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef __ABI_CSKY_CACHEFLUSH_H
5*4882a593Smuzhiyun #define __ABI_CSKY_CACHEFLUSH_H
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/mm.h>
8*4882a593Smuzhiyun #include <asm/string.h>
9*4882a593Smuzhiyun #include <asm/cache.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
12*4882a593Smuzhiyun extern void flush_dcache_page(struct page *);
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define flush_cache_mm(mm)			dcache_wbinv_all()
15*4882a593Smuzhiyun #define flush_cache_page(vma, page, pfn)	cache_wbinv_all()
16*4882a593Smuzhiyun #define flush_cache_dup_mm(mm)			cache_wbinv_all()
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
19*4882a593Smuzhiyun extern void flush_kernel_dcache_page(struct page *);
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
22*4882a593Smuzhiyun #define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
23*4882a593Smuzhiyun 
flush_kernel_vmap_range(void * addr,int size)24*4882a593Smuzhiyun static inline void flush_kernel_vmap_range(void *addr, int size)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	dcache_wbinv_all();
27*4882a593Smuzhiyun }
invalidate_kernel_vmap_range(void * addr,int size)28*4882a593Smuzhiyun static inline void invalidate_kernel_vmap_range(void *addr, int size)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	dcache_wbinv_all();
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)34*4882a593Smuzhiyun static inline void flush_anon_page(struct vm_area_struct *vma,
35*4882a593Smuzhiyun 			 struct page *page, unsigned long vmaddr)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	if (PageAnon(page))
38*4882a593Smuzhiyun 		cache_wbinv_all();
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun  * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
43*4882a593Smuzhiyun  * Use cache_wbinv_all() here and need to be improved in future.
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
46*4882a593Smuzhiyun #define flush_cache_vmap(start, end)		cache_wbinv_all()
47*4882a593Smuzhiyun #define flush_cache_vunmap(start, end)		cache_wbinv_all()
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define flush_icache_page(vma, page)		do {} while (0);
50*4882a593Smuzhiyun #define flush_icache_range(start, end)		cache_wbinv_range(start, end)
51*4882a593Smuzhiyun #define flush_icache_mm_range(mm, start, end)	cache_wbinv_range(start, end)
52*4882a593Smuzhiyun #define flush_icache_deferred(mm)		do {} while (0);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
55*4882a593Smuzhiyun do { \
56*4882a593Smuzhiyun 	memcpy(dst, src, len); \
57*4882a593Smuzhiyun } while (0)
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
60*4882a593Smuzhiyun do { \
61*4882a593Smuzhiyun 	memcpy(dst, src, len); \
62*4882a593Smuzhiyun 	cache_wbinv_all(); \
63*4882a593Smuzhiyun } while (0)
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #endif /* __ABI_CSKY_CACHEFLUSH_H */
66