xref: /OK3568_Linux_fs/kernel/arch/nios2/include/asm/cacheflush.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2003 Microtronix Datacom Ltd.
3*4882a593Smuzhiyun  * Copyright (C) 2000-2002 Greg Ungerer <gerg@snapgear.com>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
6*4882a593Smuzhiyun  * License. See the file "COPYING" in the main directory of this archive
7*4882a593Smuzhiyun  * for more details.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef _ASM_NIOS2_CACHEFLUSH_H
11*4882a593Smuzhiyun #define _ASM_NIOS2_CACHEFLUSH_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/mm_types.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun  * This flag is used to indicate that the page pointed to by a pte is clean
17*4882a593Smuzhiyun  * and does not require cleaning before returning it to the user.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun #define PG_dcache_clean PG_arch_1
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun struct mm_struct;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun extern void flush_cache_all(void);
24*4882a593Smuzhiyun extern void flush_cache_mm(struct mm_struct *mm);
25*4882a593Smuzhiyun extern void flush_cache_dup_mm(struct mm_struct *mm);
26*4882a593Smuzhiyun extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
27*4882a593Smuzhiyun 	unsigned long end);
28*4882a593Smuzhiyun extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
29*4882a593Smuzhiyun 	unsigned long pfn);
30*4882a593Smuzhiyun #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
31*4882a593Smuzhiyun extern void flush_dcache_page(struct page *page);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun extern void flush_icache_range(unsigned long start, unsigned long end);
34*4882a593Smuzhiyun extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define flush_cache_vmap(start, end)		flush_dcache_range(start, end)
37*4882a593Smuzhiyun #define flush_cache_vunmap(start, end)		flush_dcache_range(start, end)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
40*4882a593Smuzhiyun 				unsigned long user_vaddr,
41*4882a593Smuzhiyun 				void *dst, void *src, int len);
42*4882a593Smuzhiyun extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
43*4882a593Smuzhiyun 				unsigned long user_vaddr,
44*4882a593Smuzhiyun 				void *dst, void *src, int len);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun extern void flush_dcache_range(unsigned long start, unsigned long end);
47*4882a593Smuzhiyun extern void invalidate_dcache_range(unsigned long start, unsigned long end);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
50*4882a593Smuzhiyun #define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #endif /* _ASM_NIOS2_CACHEFLUSH_H */
53