1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
7*4882a593Smuzhiyun * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #ifndef _ASM_CACHEFLUSH_H
10*4882a593Smuzhiyun #define _ASM_CACHEFLUSH_H
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /* Keep includes the same across arches. */
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <asm/cpu-features.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /* Cache flushing:
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - flush_cache_all() flushes entire cache
19*4882a593Smuzhiyun * - flush_cache_mm(mm) flushes the specified mm context's cache lines
20*4882a593Smuzhiyun * - flush_cache_dup mm(mm) handles cache flushing when forking
21*4882a593Smuzhiyun * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
22*4882a593Smuzhiyun * - flush_cache_range(vma, start, end) flushes a range of pages
23*4882a593Smuzhiyun * - flush_icache_range(start, end) flush a range of instructions
24*4882a593Smuzhiyun * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * MIPS specific flush operations:
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * - flush_icache_all() flush the entire instruction cache
29*4882a593Smuzhiyun * - flush_data_cache_page() flushes a page from the data cache
30*4882a593Smuzhiyun * - __flush_icache_user_range(start, end) flushes range of user instructions
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun * This flag is used to indicate that the page pointed to by a pte
35*4882a593Smuzhiyun * is dirty and requires cleaning before returning it to the user.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun #define PG_dcache_dirty PG_arch_1
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define Page_dcache_dirty(page) \
40*4882a593Smuzhiyun test_bit(PG_dcache_dirty, &(page)->flags)
41*4882a593Smuzhiyun #define SetPageDcacheDirty(page) \
42*4882a593Smuzhiyun set_bit(PG_dcache_dirty, &(page)->flags)
43*4882a593Smuzhiyun #define ClearPageDcacheDirty(page) \
44*4882a593Smuzhiyun clear_bit(PG_dcache_dirty, &(page)->flags)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun extern void (*flush_cache_all)(void);
47*4882a593Smuzhiyun extern void (*__flush_cache_all)(void);
48*4882a593Smuzhiyun extern void (*flush_cache_mm)(struct mm_struct *mm);
49*4882a593Smuzhiyun #define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
50*4882a593Smuzhiyun extern void (*flush_cache_range)(struct vm_area_struct *vma,
51*4882a593Smuzhiyun unsigned long start, unsigned long end);
52*4882a593Smuzhiyun extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
53*4882a593Smuzhiyun extern void __flush_dcache_page(struct page *page);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
flush_dcache_page(struct page * page)56*4882a593Smuzhiyun static inline void flush_dcache_page(struct page *page)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun if (cpu_has_dc_aliases)
59*4882a593Smuzhiyun __flush_dcache_page(page);
60*4882a593Smuzhiyun else if (!cpu_has_ic_fills_f_dc)
61*4882a593Smuzhiyun SetPageDcacheDirty(page);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #define flush_dcache_mmap_lock(mapping) do { } while (0)
65*4882a593Smuzhiyun #define flush_dcache_mmap_unlock(mapping) do { } while (0)
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define ARCH_HAS_FLUSH_ANON_PAGE
68*4882a593Smuzhiyun extern void __flush_anon_page(struct page *, unsigned long);
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)69*4882a593Smuzhiyun static inline void flush_anon_page(struct vm_area_struct *vma,
70*4882a593Smuzhiyun struct page *page, unsigned long vmaddr)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun if (cpu_has_dc_aliases && PageAnon(page))
73*4882a593Smuzhiyun __flush_anon_page(page, vmaddr);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
flush_icache_page(struct vm_area_struct * vma,struct page * page)76*4882a593Smuzhiyun static inline void flush_icache_page(struct vm_area_struct *vma,
77*4882a593Smuzhiyun struct page *page)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun extern void (*flush_icache_range)(unsigned long start, unsigned long end);
82*4882a593Smuzhiyun extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
83*4882a593Smuzhiyun extern void (*__flush_icache_user_range)(unsigned long start,
84*4882a593Smuzhiyun unsigned long end);
85*4882a593Smuzhiyun extern void (*__local_flush_icache_user_range)(unsigned long start,
86*4882a593Smuzhiyun unsigned long end);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun extern void (*__flush_cache_vmap)(void);
89*4882a593Smuzhiyun
flush_cache_vmap(unsigned long start,unsigned long end)90*4882a593Smuzhiyun static inline void flush_cache_vmap(unsigned long start, unsigned long end)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun if (cpu_has_dc_aliases)
93*4882a593Smuzhiyun __flush_cache_vmap();
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun extern void (*__flush_cache_vunmap)(void);
97*4882a593Smuzhiyun
flush_cache_vunmap(unsigned long start,unsigned long end)98*4882a593Smuzhiyun static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun if (cpu_has_dc_aliases)
101*4882a593Smuzhiyun __flush_cache_vunmap();
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun extern void copy_to_user_page(struct vm_area_struct *vma,
105*4882a593Smuzhiyun struct page *page, unsigned long vaddr, void *dst, const void *src,
106*4882a593Smuzhiyun unsigned long len);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun extern void copy_from_user_page(struct vm_area_struct *vma,
109*4882a593Smuzhiyun struct page *page, unsigned long vaddr, void *dst, const void *src,
110*4882a593Smuzhiyun unsigned long len);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun extern void (*flush_icache_all)(void);
113*4882a593Smuzhiyun extern void (*local_flush_data_cache_page)(void * addr);
114*4882a593Smuzhiyun extern void (*flush_data_cache_page)(unsigned long addr);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Run kernel code uncached, useful for cache probing functions. */
117*4882a593Smuzhiyun unsigned long run_uncached(void *func);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun extern void *kmap_coherent(struct page *page, unsigned long addr);
120*4882a593Smuzhiyun extern void kunmap_coherent(void);
121*4882a593Smuzhiyun extern void *kmap_noncoherent(struct page *page, unsigned long addr);
122*4882a593Smuzhiyun
kunmap_noncoherent(void)123*4882a593Smuzhiyun static inline void kunmap_noncoherent(void)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun kunmap_coherent();
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
flush_kernel_dcache_page(struct page * page)129*4882a593Smuzhiyun static inline void flush_kernel_dcache_page(struct page *page)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
132*4882a593Smuzhiyun flush_dcache_page(page);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
137*4882a593Smuzhiyun * cache writeback and invalidate operation.
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
140*4882a593Smuzhiyun
flush_kernel_vmap_range(void * vaddr,int size)141*4882a593Smuzhiyun static inline void flush_kernel_vmap_range(void *vaddr, int size)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun if (cpu_has_dc_aliases)
144*4882a593Smuzhiyun __flush_kernel_vmap_range((unsigned long) vaddr, size);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
invalidate_kernel_vmap_range(void * vaddr,int size)147*4882a593Smuzhiyun static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun if (cpu_has_dc_aliases)
150*4882a593Smuzhiyun __flush_kernel_vmap_range((unsigned long) vaddr, size);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun #endif /* _ASM_CACHEFLUSH_H */
154