xref: /OK3568_Linux_fs/kernel/arch/arm/mm/cache-feroceon-l2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2008 Marvell Semiconductor
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This file is licensed under the terms of the GNU General Public
7*4882a593Smuzhiyun  * License version 2.  This program is licensed "as is" without any
8*4882a593Smuzhiyun  * warranty of any kind, whether express or implied.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * References:
11*4882a593Smuzhiyun  * - Unified Layer 2 Cache for Feroceon CPU Cores,
12*4882a593Smuzhiyun  *   Document ID MV-S104858-00, Rev. A, October 23 2007.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/of.h>
17*4882a593Smuzhiyun #include <linux/of_address.h>
18*4882a593Smuzhiyun #include <linux/highmem.h>
19*4882a593Smuzhiyun #include <linux/io.h>
20*4882a593Smuzhiyun #include <asm/cacheflush.h>
21*4882a593Smuzhiyun #include <asm/cp15.h>
22*4882a593Smuzhiyun #include <asm/hardware/cache-feroceon-l2.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define L2_WRITETHROUGH_KIRKWOOD	BIT(4)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Low-level cache maintenance operations.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * As well as the regular 'clean/invalidate/flush L2 cache line by
30*4882a593Smuzhiyun  * MVA' instructions, the Feroceon L2 cache controller also features
31*4882a593Smuzhiyun  * 'clean/invalidate L2 range by MVA' operations.
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * Cache range operations are initiated by writing the start and
34*4882a593Smuzhiyun  * end addresses to successive cp15 registers, and process every
35*4882a593Smuzhiyun  * cache line whose first byte address lies in the inclusive range
36*4882a593Smuzhiyun  * [start:end].
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * The cache range operations stall the CPU pipeline until completion.
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  * The range operations require two successive cp15 writes, in
41*4882a593Smuzhiyun  * between which we don't want to be preempted.
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun 
l2_get_va(unsigned long paddr)44*4882a593Smuzhiyun static inline unsigned long l2_get_va(unsigned long paddr)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
47*4882a593Smuzhiyun 	/*
48*4882a593Smuzhiyun 	 * Because range ops can't be done on physical addresses,
49*4882a593Smuzhiyun 	 * we simply install a virtual mapping for it only for the
50*4882a593Smuzhiyun 	 * TLB lookup to occur, hence no need to flush the untouched
51*4882a593Smuzhiyun 	 * memory mapping afterwards (note: a cache flush may happen
52*4882a593Smuzhiyun 	 * in some circumstances depending on the path taken in kunmap_atomic).
53*4882a593Smuzhiyun 	 */
54*4882a593Smuzhiyun 	void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
55*4882a593Smuzhiyun 	return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
56*4882a593Smuzhiyun #else
57*4882a593Smuzhiyun 	return __phys_to_virt(paddr);
58*4882a593Smuzhiyun #endif
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
l2_put_va(unsigned long vaddr)61*4882a593Smuzhiyun static inline void l2_put_va(unsigned long vaddr)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
64*4882a593Smuzhiyun 	kunmap_atomic((void *)vaddr);
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
l2_clean_pa(unsigned long addr)68*4882a593Smuzhiyun static inline void l2_clean_pa(unsigned long addr)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
l2_clean_pa_range(unsigned long start,unsigned long end)73*4882a593Smuzhiyun static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	unsigned long va_start, va_end, flags;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * Make sure 'start' and 'end' reference the same page, as
79*4882a593Smuzhiyun 	 * L2 is PIPT and range operations only do a TLB lookup on
80*4882a593Smuzhiyun 	 * the start address.
81*4882a593Smuzhiyun 	 */
82*4882a593Smuzhiyun 	BUG_ON((start ^ end) >> PAGE_SHIFT);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	va_start = l2_get_va(start);
85*4882a593Smuzhiyun 	va_end = va_start + (end - start);
86*4882a593Smuzhiyun 	raw_local_irq_save(flags);
87*4882a593Smuzhiyun 	__asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
88*4882a593Smuzhiyun 		"mcr p15, 1, %1, c15, c9, 5"
89*4882a593Smuzhiyun 		: : "r" (va_start), "r" (va_end));
90*4882a593Smuzhiyun 	raw_local_irq_restore(flags);
91*4882a593Smuzhiyun 	l2_put_va(va_start);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
l2_clean_inv_pa(unsigned long addr)94*4882a593Smuzhiyun static inline void l2_clean_inv_pa(unsigned long addr)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	__asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
l2_inv_pa(unsigned long addr)99*4882a593Smuzhiyun static inline void l2_inv_pa(unsigned long addr)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	__asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
l2_inv_pa_range(unsigned long start,unsigned long end)104*4882a593Smuzhiyun static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	unsigned long va_start, va_end, flags;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/*
109*4882a593Smuzhiyun 	 * Make sure 'start' and 'end' reference the same page, as
110*4882a593Smuzhiyun 	 * L2 is PIPT and range operations only do a TLB lookup on
111*4882a593Smuzhiyun 	 * the start address.
112*4882a593Smuzhiyun 	 */
113*4882a593Smuzhiyun 	BUG_ON((start ^ end) >> PAGE_SHIFT);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	va_start = l2_get_va(start);
116*4882a593Smuzhiyun 	va_end = va_start + (end - start);
117*4882a593Smuzhiyun 	raw_local_irq_save(flags);
118*4882a593Smuzhiyun 	__asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
119*4882a593Smuzhiyun 		"mcr p15, 1, %1, c15, c11, 5"
120*4882a593Smuzhiyun 		: : "r" (va_start), "r" (va_end));
121*4882a593Smuzhiyun 	raw_local_irq_restore(flags);
122*4882a593Smuzhiyun 	l2_put_va(va_start);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
l2_inv_all(void)125*4882a593Smuzhiyun static inline void l2_inv_all(void)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	__asm__("mcr p15, 1, %0, c15, c11, 0" : : "r" (0));
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * Linux primitives.
132*4882a593Smuzhiyun  *
133*4882a593Smuzhiyun  * Note that the end addresses passed to Linux primitives are
134*4882a593Smuzhiyun  * noninclusive, while the hardware cache range operations use
135*4882a593Smuzhiyun  * inclusive start and end addresses.
136*4882a593Smuzhiyun  */
137*4882a593Smuzhiyun #define CACHE_LINE_SIZE		32
138*4882a593Smuzhiyun #define MAX_RANGE_SIZE		1024
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun static int l2_wt_override;
141*4882a593Smuzhiyun 
calc_range_end(unsigned long start,unsigned long end)142*4882a593Smuzhiyun static unsigned long calc_range_end(unsigned long start, unsigned long end)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	unsigned long range_end;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	BUG_ON(start & (CACHE_LINE_SIZE - 1));
147*4882a593Smuzhiyun 	BUG_ON(end & (CACHE_LINE_SIZE - 1));
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/*
150*4882a593Smuzhiyun 	 * Try to process all cache lines between 'start' and 'end'.
151*4882a593Smuzhiyun 	 */
152*4882a593Smuzhiyun 	range_end = end;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/*
155*4882a593Smuzhiyun 	 * Limit the number of cache lines processed at once,
156*4882a593Smuzhiyun 	 * since cache range operations stall the CPU pipeline
157*4882a593Smuzhiyun 	 * until completion.
158*4882a593Smuzhiyun 	 */
159*4882a593Smuzhiyun 	if (range_end > start + MAX_RANGE_SIZE)
160*4882a593Smuzhiyun 		range_end = start + MAX_RANGE_SIZE;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/*
163*4882a593Smuzhiyun 	 * Cache range operations can't straddle a page boundary.
164*4882a593Smuzhiyun 	 */
165*4882a593Smuzhiyun 	if (range_end > (start | (PAGE_SIZE - 1)) + 1)
166*4882a593Smuzhiyun 		range_end = (start | (PAGE_SIZE - 1)) + 1;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	return range_end;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
feroceon_l2_inv_range(unsigned long start,unsigned long end)171*4882a593Smuzhiyun static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	/*
174*4882a593Smuzhiyun 	 * Clean and invalidate partial first cache line.
175*4882a593Smuzhiyun 	 */
176*4882a593Smuzhiyun 	if (start & (CACHE_LINE_SIZE - 1)) {
177*4882a593Smuzhiyun 		l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
178*4882a593Smuzhiyun 		start = (start | (CACHE_LINE_SIZE - 1)) + 1;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/*
182*4882a593Smuzhiyun 	 * Clean and invalidate partial last cache line.
183*4882a593Smuzhiyun 	 */
184*4882a593Smuzhiyun 	if (start < end && end & (CACHE_LINE_SIZE - 1)) {
185*4882a593Smuzhiyun 		l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
186*4882a593Smuzhiyun 		end &= ~(CACHE_LINE_SIZE - 1);
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	/*
190*4882a593Smuzhiyun 	 * Invalidate all full cache lines between 'start' and 'end'.
191*4882a593Smuzhiyun 	 */
192*4882a593Smuzhiyun 	while (start < end) {
193*4882a593Smuzhiyun 		unsigned long range_end = calc_range_end(start, end);
194*4882a593Smuzhiyun 		l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
195*4882a593Smuzhiyun 		start = range_end;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	dsb();
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
feroceon_l2_clean_range(unsigned long start,unsigned long end)201*4882a593Smuzhiyun static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	/*
204*4882a593Smuzhiyun 	 * If L2 is forced to WT, the L2 will always be clean and we
205*4882a593Smuzhiyun 	 * don't need to do anything here.
206*4882a593Smuzhiyun 	 */
207*4882a593Smuzhiyun 	if (!l2_wt_override) {
208*4882a593Smuzhiyun 		start &= ~(CACHE_LINE_SIZE - 1);
209*4882a593Smuzhiyun 		end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
210*4882a593Smuzhiyun 		while (start != end) {
211*4882a593Smuzhiyun 			unsigned long range_end = calc_range_end(start, end);
212*4882a593Smuzhiyun 			l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
213*4882a593Smuzhiyun 			start = range_end;
214*4882a593Smuzhiyun 		}
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	dsb();
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
feroceon_l2_flush_range(unsigned long start,unsigned long end)220*4882a593Smuzhiyun static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	start &= ~(CACHE_LINE_SIZE - 1);
223*4882a593Smuzhiyun 	end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
224*4882a593Smuzhiyun 	while (start != end) {
225*4882a593Smuzhiyun 		unsigned long range_end = calc_range_end(start, end);
226*4882a593Smuzhiyun 		if (!l2_wt_override)
227*4882a593Smuzhiyun 			l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
228*4882a593Smuzhiyun 		l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
229*4882a593Smuzhiyun 		start = range_end;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	dsb();
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun  * Routines to disable and re-enable the D-cache and I-cache at run
238*4882a593Smuzhiyun  * time.  These are necessary because the L2 cache can only be enabled
239*4882a593Smuzhiyun  * or disabled while the L1 Dcache and Icache are both disabled.
240*4882a593Smuzhiyun  */
flush_and_disable_dcache(void)241*4882a593Smuzhiyun static int __init flush_and_disable_dcache(void)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	u32 cr;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	cr = get_cr();
246*4882a593Smuzhiyun 	if (cr & CR_C) {
247*4882a593Smuzhiyun 		unsigned long flags;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		raw_local_irq_save(flags);
250*4882a593Smuzhiyun 		flush_cache_all();
251*4882a593Smuzhiyun 		set_cr(cr & ~CR_C);
252*4882a593Smuzhiyun 		raw_local_irq_restore(flags);
253*4882a593Smuzhiyun 		return 1;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 	return 0;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
enable_dcache(void)258*4882a593Smuzhiyun static void __init enable_dcache(void)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	u32 cr;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	cr = get_cr();
263*4882a593Smuzhiyun 	set_cr(cr | CR_C);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
__invalidate_icache(void)266*4882a593Smuzhiyun static void __init __invalidate_icache(void)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	__asm__("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
invalidate_and_disable_icache(void)271*4882a593Smuzhiyun static int __init invalidate_and_disable_icache(void)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	u32 cr;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	cr = get_cr();
276*4882a593Smuzhiyun 	if (cr & CR_I) {
277*4882a593Smuzhiyun 		set_cr(cr & ~CR_I);
278*4882a593Smuzhiyun 		__invalidate_icache();
279*4882a593Smuzhiyun 		return 1;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 	return 0;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
enable_icache(void)284*4882a593Smuzhiyun static void __init enable_icache(void)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	u32 cr;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	cr = get_cr();
289*4882a593Smuzhiyun 	set_cr(cr | CR_I);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
read_extra_features(void)292*4882a593Smuzhiyun static inline u32 read_extra_features(void)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	u32 u;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	__asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	return u;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
write_extra_features(u32 u)301*4882a593Smuzhiyun static inline void write_extra_features(u32 u)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	__asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
disable_l2_prefetch(void)306*4882a593Smuzhiyun static void __init disable_l2_prefetch(void)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	u32 u;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/*
311*4882a593Smuzhiyun 	 * Read the CPU Extra Features register and verify that the
312*4882a593Smuzhiyun 	 * Disable L2 Prefetch bit is set.
313*4882a593Smuzhiyun 	 */
314*4882a593Smuzhiyun 	u = read_extra_features();
315*4882a593Smuzhiyun 	if (!(u & 0x01000000)) {
316*4882a593Smuzhiyun 		pr_info("Feroceon L2: Disabling L2 prefetch.\n");
317*4882a593Smuzhiyun 		write_extra_features(u | 0x01000000);
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
enable_l2(void)321*4882a593Smuzhiyun static void __init enable_l2(void)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	u32 u;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	u = read_extra_features();
326*4882a593Smuzhiyun 	if (!(u & 0x00400000)) {
327*4882a593Smuzhiyun 		int i, d;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 		pr_info("Feroceon L2: Enabling L2\n");
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 		d = flush_and_disable_dcache();
332*4882a593Smuzhiyun 		i = invalidate_and_disable_icache();
333*4882a593Smuzhiyun 		l2_inv_all();
334*4882a593Smuzhiyun 		write_extra_features(u | 0x00400000);
335*4882a593Smuzhiyun 		if (i)
336*4882a593Smuzhiyun 			enable_icache();
337*4882a593Smuzhiyun 		if (d)
338*4882a593Smuzhiyun 			enable_dcache();
339*4882a593Smuzhiyun 	} else
340*4882a593Smuzhiyun 		pr_err(FW_BUG
341*4882a593Smuzhiyun 		       "Feroceon L2: bootloader left the L2 cache on!\n");
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
feroceon_l2_init(int __l2_wt_override)344*4882a593Smuzhiyun void __init feroceon_l2_init(int __l2_wt_override)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	l2_wt_override = __l2_wt_override;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	disable_l2_prefetch();
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	outer_cache.inv_range = feroceon_l2_inv_range;
351*4882a593Smuzhiyun 	outer_cache.clean_range = feroceon_l2_clean_range;
352*4882a593Smuzhiyun 	outer_cache.flush_range = feroceon_l2_flush_range;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	enable_l2();
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	pr_info("Feroceon L2: Cache support initialised%s.\n",
357*4882a593Smuzhiyun 			 l2_wt_override ? ", in WT override mode" : "");
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun #ifdef CONFIG_OF
360*4882a593Smuzhiyun static const struct of_device_id feroceon_ids[] __initconst = {
361*4882a593Smuzhiyun 	{ .compatible = "marvell,kirkwood-cache"},
362*4882a593Smuzhiyun 	{ .compatible = "marvell,feroceon-cache"},
363*4882a593Smuzhiyun 	{}
364*4882a593Smuzhiyun };
365*4882a593Smuzhiyun 
feroceon_of_init(void)366*4882a593Smuzhiyun int __init feroceon_of_init(void)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	struct device_node *node;
369*4882a593Smuzhiyun 	void __iomem *base;
370*4882a593Smuzhiyun 	bool l2_wt_override = false;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun #if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
373*4882a593Smuzhiyun 	l2_wt_override = true;
374*4882a593Smuzhiyun #endif
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	node = of_find_matching_node(NULL, feroceon_ids);
377*4882a593Smuzhiyun 	if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
378*4882a593Smuzhiyun 		base = of_iomap(node, 0);
379*4882a593Smuzhiyun 		if (!base)
380*4882a593Smuzhiyun 			return -ENOMEM;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		if (l2_wt_override)
383*4882a593Smuzhiyun 			writel(readl(base) | L2_WRITETHROUGH_KIRKWOOD, base);
384*4882a593Smuzhiyun 		else
385*4882a593Smuzhiyun 			writel(readl(base) & ~L2_WRITETHROUGH_KIRKWOOD, base);
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	feroceon_l2_init(l2_wt_override);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	return 0;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun #endif
393