xref: /OK3568_Linux_fs/kernel/arch/mips/include/asm/r4kcache.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Inline assembly cache operations.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9*4882a593Smuzhiyun  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10*4882a593Smuzhiyun  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #ifndef _ASM_R4KCACHE_H
13*4882a593Smuzhiyun #define _ASM_R4KCACHE_H
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/stringify.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/asm.h>
18*4882a593Smuzhiyun #include <asm/asm-eva.h>
19*4882a593Smuzhiyun #include <asm/cacheops.h>
20*4882a593Smuzhiyun #include <asm/compiler.h>
21*4882a593Smuzhiyun #include <asm/cpu-features.h>
22*4882a593Smuzhiyun #include <asm/cpu-type.h>
23*4882a593Smuzhiyun #include <asm/mipsmtregs.h>
24*4882a593Smuzhiyun #include <asm/mmzone.h>
25*4882a593Smuzhiyun #include <asm/unroll.h>
26*4882a593Smuzhiyun #include <linux/uaccess.h> /* for uaccess_kernel() */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun extern void (*r4k_blast_dcache)(void);
29*4882a593Smuzhiyun extern void (*r4k_blast_icache)(void);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * This macro return a properly sign-extended address suitable as base address
33*4882a593Smuzhiyun  * for indexed cache operations.  Two issues here:
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
36*4882a593Smuzhiyun  *    the index bits from the virtual address.	This breaks with tradition
37*4882a593Smuzhiyun  *    set by the R4000.	 To keep unpleasant surprises from happening we pick
38*4882a593Smuzhiyun  *    an address in KSEG0 / CKSEG0.
39*4882a593Smuzhiyun  *  - We need a properly sign extended address for 64-bit code.	 To get away
40*4882a593Smuzhiyun  *    without ifdefs we let the compiler do it by a type cast.
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun #define INDEX_BASE	CKSEG0
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define _cache_op(insn, op, addr)					\
45*4882a593Smuzhiyun 	__asm__ __volatile__(						\
46*4882a593Smuzhiyun 	"	.set	push					\n"	\
47*4882a593Smuzhiyun 	"	.set	noreorder				\n"	\
48*4882a593Smuzhiyun 	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
49*4882a593Smuzhiyun 	"	" insn("%0", "%1") "				\n"	\
50*4882a593Smuzhiyun 	"	.set	pop					\n"	\
51*4882a593Smuzhiyun 	:								\
52*4882a593Smuzhiyun 	: "i" (op), "R" (*(unsigned char *)(addr)))
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define cache_op(op, addr)						\
55*4882a593Smuzhiyun 	_cache_op(kernel_cache, op, addr)
56*4882a593Smuzhiyun 
flush_icache_line_indexed(unsigned long addr)57*4882a593Smuzhiyun static inline void flush_icache_line_indexed(unsigned long addr)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	cache_op(Index_Invalidate_I, addr);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
flush_dcache_line_indexed(unsigned long addr)62*4882a593Smuzhiyun static inline void flush_dcache_line_indexed(unsigned long addr)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	cache_op(Index_Writeback_Inv_D, addr);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
flush_scache_line_indexed(unsigned long addr)67*4882a593Smuzhiyun static inline void flush_scache_line_indexed(unsigned long addr)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	cache_op(Index_Writeback_Inv_SD, addr);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
flush_icache_line(unsigned long addr)72*4882a593Smuzhiyun static inline void flush_icache_line(unsigned long addr)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	switch (boot_cpu_type()) {
75*4882a593Smuzhiyun 	case CPU_LOONGSON2EF:
76*4882a593Smuzhiyun 		cache_op(Hit_Invalidate_I_Loongson2, addr);
77*4882a593Smuzhiyun 		break;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	default:
80*4882a593Smuzhiyun 		cache_op(Hit_Invalidate_I, addr);
81*4882a593Smuzhiyun 		break;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
flush_dcache_line(unsigned long addr)85*4882a593Smuzhiyun static inline void flush_dcache_line(unsigned long addr)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	cache_op(Hit_Writeback_Inv_D, addr);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
invalidate_dcache_line(unsigned long addr)90*4882a593Smuzhiyun static inline void invalidate_dcache_line(unsigned long addr)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	cache_op(Hit_Invalidate_D, addr);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
invalidate_scache_line(unsigned long addr)95*4882a593Smuzhiyun static inline void invalidate_scache_line(unsigned long addr)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	cache_op(Hit_Invalidate_SD, addr);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
flush_scache_line(unsigned long addr)100*4882a593Smuzhiyun static inline void flush_scache_line(unsigned long addr)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	cache_op(Hit_Writeback_Inv_SD, addr);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun #define protected_cache_op(op,addr)				\
106*4882a593Smuzhiyun ({								\
107*4882a593Smuzhiyun 	int __err = 0;						\
108*4882a593Smuzhiyun 	__asm__ __volatile__(					\
109*4882a593Smuzhiyun 	"	.set	push			\n"		\
110*4882a593Smuzhiyun 	"	.set	noreorder		\n"		\
111*4882a593Smuzhiyun 	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
112*4882a593Smuzhiyun 	"1:	cache	%1, (%2)		\n"		\
113*4882a593Smuzhiyun 	"2:	.insn				\n"		\
114*4882a593Smuzhiyun 	"	.set	pop			\n"		\
115*4882a593Smuzhiyun 	"	.section .fixup,\"ax\"		\n"		\
116*4882a593Smuzhiyun 	"3:	li	%0, %3			\n"		\
117*4882a593Smuzhiyun 	"	j	2b			\n"		\
118*4882a593Smuzhiyun 	"	.previous			\n"		\
119*4882a593Smuzhiyun 	"	.section __ex_table,\"a\"	\n"		\
120*4882a593Smuzhiyun 	"	"STR(PTR)" 1b, 3b		\n"		\
121*4882a593Smuzhiyun 	"	.previous"					\
122*4882a593Smuzhiyun 	: "+r" (__err)						\
123*4882a593Smuzhiyun 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
124*4882a593Smuzhiyun 	__err;							\
125*4882a593Smuzhiyun })
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #define protected_cachee_op(op,addr)				\
129*4882a593Smuzhiyun ({								\
130*4882a593Smuzhiyun 	int __err = 0;						\
131*4882a593Smuzhiyun 	__asm__ __volatile__(					\
132*4882a593Smuzhiyun 	"	.set	push			\n"		\
133*4882a593Smuzhiyun 	"	.set	noreorder		\n"		\
134*4882a593Smuzhiyun 	"	.set	mips0			\n"		\
135*4882a593Smuzhiyun 	"	.set	eva			\n"		\
136*4882a593Smuzhiyun 	"1:	cachee	%1, (%2)		\n"		\
137*4882a593Smuzhiyun 	"2:	.insn				\n"		\
138*4882a593Smuzhiyun 	"	.set	pop			\n"		\
139*4882a593Smuzhiyun 	"	.section .fixup,\"ax\"		\n"		\
140*4882a593Smuzhiyun 	"3:	li	%0, %3			\n"		\
141*4882a593Smuzhiyun 	"	j	2b			\n"		\
142*4882a593Smuzhiyun 	"	.previous			\n"		\
143*4882a593Smuzhiyun 	"	.section __ex_table,\"a\"	\n"		\
144*4882a593Smuzhiyun 	"	"STR(PTR)" 1b, 3b		\n"		\
145*4882a593Smuzhiyun 	"	.previous"					\
146*4882a593Smuzhiyun 	: "+r" (__err)						\
147*4882a593Smuzhiyun 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
148*4882a593Smuzhiyun 	__err;							\
149*4882a593Smuzhiyun })
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun  * The next two are for badland addresses like signal trampolines.
153*4882a593Smuzhiyun  */
protected_flush_icache_line(unsigned long addr)154*4882a593Smuzhiyun static inline int protected_flush_icache_line(unsigned long addr)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	switch (boot_cpu_type()) {
157*4882a593Smuzhiyun 	case CPU_LOONGSON2EF:
158*4882a593Smuzhiyun 		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	default:
161*4882a593Smuzhiyun #ifdef CONFIG_EVA
162*4882a593Smuzhiyun 		return protected_cachee_op(Hit_Invalidate_I, addr);
163*4882a593Smuzhiyun #else
164*4882a593Smuzhiyun 		return protected_cache_op(Hit_Invalidate_I, addr);
165*4882a593Smuzhiyun #endif
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
171*4882a593Smuzhiyun  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
172*4882a593Smuzhiyun  * caches.  We're talking about one cacheline unnecessarily getting invalidated
173*4882a593Smuzhiyun  * here so the penalty isn't overly hard.
174*4882a593Smuzhiyun  */
protected_writeback_dcache_line(unsigned long addr)175*4882a593Smuzhiyun static inline int protected_writeback_dcache_line(unsigned long addr)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun #ifdef CONFIG_EVA
178*4882a593Smuzhiyun 	return protected_cachee_op(Hit_Writeback_Inv_D, addr);
179*4882a593Smuzhiyun #else
180*4882a593Smuzhiyun 	return protected_cache_op(Hit_Writeback_Inv_D, addr);
181*4882a593Smuzhiyun #endif
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
protected_writeback_scache_line(unsigned long addr)184*4882a593Smuzhiyun static inline int protected_writeback_scache_line(unsigned long addr)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun #ifdef CONFIG_EVA
187*4882a593Smuzhiyun 	return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
188*4882a593Smuzhiyun #else
189*4882a593Smuzhiyun 	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
190*4882a593Smuzhiyun #endif
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun  * This one is RM7000-specific
195*4882a593Smuzhiyun  */
invalidate_tcache_page(unsigned long addr)196*4882a593Smuzhiyun static inline void invalidate_tcache_page(unsigned long addr)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	cache_op(Page_Invalidate_T, addr);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #define cache_unroll(times, insn, op, addr, lsize) do {			\
202*4882a593Smuzhiyun 	int i = 0;							\
203*4882a593Smuzhiyun 	unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize)));	\
204*4882a593Smuzhiyun } while (0)
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
207*4882a593Smuzhiyun #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
208*4882a593Smuzhiyun static inline void extra##blast_##pfx##cache##lsize(void)		\
209*4882a593Smuzhiyun {									\
210*4882a593Smuzhiyun 	unsigned long start = INDEX_BASE;				\
211*4882a593Smuzhiyun 	unsigned long end = start + current_cpu_data.desc.waysize;	\
212*4882a593Smuzhiyun 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
213*4882a593Smuzhiyun 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
214*4882a593Smuzhiyun 			       current_cpu_data.desc.waybit;		\
215*4882a593Smuzhiyun 	unsigned long ws, addr;						\
216*4882a593Smuzhiyun 									\
217*4882a593Smuzhiyun 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
218*4882a593Smuzhiyun 		for (addr = start; addr < end; addr += lsize * 32)	\
219*4882a593Smuzhiyun 			cache_unroll(32, kernel_cache, indexop,		\
220*4882a593Smuzhiyun 				     addr | ws, lsize);			\
221*4882a593Smuzhiyun }									\
222*4882a593Smuzhiyun 									\
223*4882a593Smuzhiyun static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
224*4882a593Smuzhiyun {									\
225*4882a593Smuzhiyun 	unsigned long start = page;					\
226*4882a593Smuzhiyun 	unsigned long end = page + PAGE_SIZE;				\
227*4882a593Smuzhiyun 									\
228*4882a593Smuzhiyun 	do {								\
229*4882a593Smuzhiyun 		cache_unroll(32, kernel_cache, hitop, start, lsize);	\
230*4882a593Smuzhiyun 		start += lsize * 32;					\
231*4882a593Smuzhiyun 	} while (start < end);						\
232*4882a593Smuzhiyun }									\
233*4882a593Smuzhiyun 									\
234*4882a593Smuzhiyun static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
235*4882a593Smuzhiyun {									\
236*4882a593Smuzhiyun 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
237*4882a593Smuzhiyun 	unsigned long start = INDEX_BASE + (page & indexmask);		\
238*4882a593Smuzhiyun 	unsigned long end = start + PAGE_SIZE;				\
239*4882a593Smuzhiyun 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
240*4882a593Smuzhiyun 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
241*4882a593Smuzhiyun 			       current_cpu_data.desc.waybit;		\
242*4882a593Smuzhiyun 	unsigned long ws, addr;						\
243*4882a593Smuzhiyun 									\
244*4882a593Smuzhiyun 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
245*4882a593Smuzhiyun 		for (addr = start; addr < end; addr += lsize * 32)	\
246*4882a593Smuzhiyun 			cache_unroll(32, kernel_cache, indexop,		\
247*4882a593Smuzhiyun 				     addr | ws, lsize);			\
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
251*4882a593Smuzhiyun __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
252*4882a593Smuzhiyun __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
253*4882a593Smuzhiyun __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
254*4882a593Smuzhiyun __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
255*4882a593Smuzhiyun __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
256*4882a593Smuzhiyun __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
257*4882a593Smuzhiyun __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
258*4882a593Smuzhiyun __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
259*4882a593Smuzhiyun __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
260*4882a593Smuzhiyun __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
261*4882a593Smuzhiyun __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
262*4882a593Smuzhiyun __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
265*4882a593Smuzhiyun __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
266*4882a593Smuzhiyun __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
267*4882a593Smuzhiyun __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
268*4882a593Smuzhiyun __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
269*4882a593Smuzhiyun __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
272*4882a593Smuzhiyun static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
273*4882a593Smuzhiyun {									\
274*4882a593Smuzhiyun 	unsigned long start = page;					\
275*4882a593Smuzhiyun 	unsigned long end = page + PAGE_SIZE;				\
276*4882a593Smuzhiyun 									\
277*4882a593Smuzhiyun 	do {								\
278*4882a593Smuzhiyun 		cache_unroll(32, user_cache, hitop, start, lsize);	\
279*4882a593Smuzhiyun 		start += lsize * 32;					\
280*4882a593Smuzhiyun 	} while (start < end);						\
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
284*4882a593Smuzhiyun 			 16)
285*4882a593Smuzhiyun __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
286*4882a593Smuzhiyun __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
287*4882a593Smuzhiyun 			 32)
288*4882a593Smuzhiyun __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
289*4882a593Smuzhiyun __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
290*4882a593Smuzhiyun 			 64)
291*4882a593Smuzhiyun __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun /* build blast_xxx_range, protected_blast_xxx_range */
294*4882a593Smuzhiyun #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
295*4882a593Smuzhiyun static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
296*4882a593Smuzhiyun 						    unsigned long end)	\
297*4882a593Smuzhiyun {									\
298*4882a593Smuzhiyun 	unsigned long lsize = cpu_##desc##_line_size();			\
299*4882a593Smuzhiyun 	unsigned long addr = start & ~(lsize - 1);			\
300*4882a593Smuzhiyun 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
301*4882a593Smuzhiyun 									\
302*4882a593Smuzhiyun 	while (1) {							\
303*4882a593Smuzhiyun 		prot##cache_op(hitop, addr);				\
304*4882a593Smuzhiyun 		if (addr == aend)					\
305*4882a593Smuzhiyun 			break;						\
306*4882a593Smuzhiyun 		addr += lsize;						\
307*4882a593Smuzhiyun 	}								\
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun #ifndef CONFIG_EVA
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
313*4882a593Smuzhiyun __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun #else
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
318*4882a593Smuzhiyun static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
319*4882a593Smuzhiyun 							unsigned long end) \
320*4882a593Smuzhiyun {									\
321*4882a593Smuzhiyun 	unsigned long lsize = cpu_##desc##_line_size();			\
322*4882a593Smuzhiyun 	unsigned long addr = start & ~(lsize - 1);			\
323*4882a593Smuzhiyun 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
324*4882a593Smuzhiyun 									\
325*4882a593Smuzhiyun 	if (!uaccess_kernel()) {					\
326*4882a593Smuzhiyun 		while (1) {						\
327*4882a593Smuzhiyun 			protected_cachee_op(hitop, addr);		\
328*4882a593Smuzhiyun 			if (addr == aend)				\
329*4882a593Smuzhiyun 				break;					\
330*4882a593Smuzhiyun 			addr += lsize;					\
331*4882a593Smuzhiyun 		}							\
332*4882a593Smuzhiyun 	} else {							\
333*4882a593Smuzhiyun 		while (1) {						\
334*4882a593Smuzhiyun 			protected_cache_op(hitop, addr);		\
335*4882a593Smuzhiyun 			if (addr == aend)				\
336*4882a593Smuzhiyun 				break;					\
337*4882a593Smuzhiyun 			addr += lsize;					\
338*4882a593Smuzhiyun 		}                                                       \
339*4882a593Smuzhiyun 									\
340*4882a593Smuzhiyun 	}								\
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
344*4882a593Smuzhiyun __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun #endif
347*4882a593Smuzhiyun __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
348*4882a593Smuzhiyun __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
349*4882a593Smuzhiyun 	protected_, loongson2_)
350*4882a593Smuzhiyun __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
351*4882a593Smuzhiyun __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
352*4882a593Smuzhiyun __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
353*4882a593Smuzhiyun /* blast_inv_dcache_range */
354*4882a593Smuzhiyun __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
355*4882a593Smuzhiyun __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /* Currently, this is very specific to Loongson-3 */
358*4882a593Smuzhiyun #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
359*4882a593Smuzhiyun static inline void blast_##pfx##cache##lsize##_node(long node)		\
360*4882a593Smuzhiyun {									\
361*4882a593Smuzhiyun 	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
362*4882a593Smuzhiyun 	unsigned long end = start + current_cpu_data.desc.waysize;	\
363*4882a593Smuzhiyun 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
364*4882a593Smuzhiyun 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
365*4882a593Smuzhiyun 			       current_cpu_data.desc.waybit;		\
366*4882a593Smuzhiyun 	unsigned long ws, addr;						\
367*4882a593Smuzhiyun 									\
368*4882a593Smuzhiyun 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
369*4882a593Smuzhiyun 		for (addr = start; addr < end; addr += lsize * 32)	\
370*4882a593Smuzhiyun 			cache_unroll(32, kernel_cache, indexop,		\
371*4882a593Smuzhiyun 				     addr | ws, lsize);			\
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
375*4882a593Smuzhiyun __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
376*4882a593Smuzhiyun __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
377*4882a593Smuzhiyun __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun #endif /* _ASM_R4KCACHE_H */
380