xref: /OK3568_Linux_fs/kernel/arch/mips/mm/c-octeon.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2005-2007 Cavium Networks
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/export.h>
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/smp.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/bitops.h>
14*4882a593Smuzhiyun #include <linux/cpu.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/bcache.h>
18*4882a593Smuzhiyun #include <asm/bootinfo.h>
19*4882a593Smuzhiyun #include <asm/cacheops.h>
20*4882a593Smuzhiyun #include <asm/cpu-features.h>
21*4882a593Smuzhiyun #include <asm/cpu-type.h>
22*4882a593Smuzhiyun #include <asm/page.h>
23*4882a593Smuzhiyun #include <asm/r4kcache.h>
24*4882a593Smuzhiyun #include <asm/traps.h>
25*4882a593Smuzhiyun #include <asm/mmu_context.h>
26*4882a593Smuzhiyun #include <asm/war.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <asm/octeon/octeon.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun unsigned long long cache_err_dcache[NR_CPUS];
31*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cache_err_dcache);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun  * Octeon automatically flushes the dcache on tlb changes, so
35*4882a593Smuzhiyun  * from Linux's viewpoint it acts much like a physically
36*4882a593Smuzhiyun  * tagged cache. No flushing is needed
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  */
octeon_flush_data_cache_page(unsigned long addr)39*4882a593Smuzhiyun static void octeon_flush_data_cache_page(unsigned long addr)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun     /* Nothing to do */
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
octeon_local_flush_icache(void)44*4882a593Smuzhiyun static inline void octeon_local_flush_icache(void)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	asm volatile ("synci 0($0)");
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun  * Flush local I-cache for the specified range.
51*4882a593Smuzhiyun  */
local_octeon_flush_icache_range(unsigned long start,unsigned long end)52*4882a593Smuzhiyun static void local_octeon_flush_icache_range(unsigned long start,
53*4882a593Smuzhiyun 					    unsigned long end)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	octeon_local_flush_icache();
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun  * Flush caches as necessary for all cores affected by a
60*4882a593Smuzhiyun  * vma. If no vma is supplied, all cores are flushed.
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * @vma:    VMA to flush or NULL to flush all icaches.
63*4882a593Smuzhiyun  */
octeon_flush_icache_all_cores(struct vm_area_struct * vma)64*4882a593Smuzhiyun static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	extern void octeon_send_ipi_single(int cpu, unsigned int action);
67*4882a593Smuzhiyun #ifdef CONFIG_SMP
68*4882a593Smuzhiyun 	int cpu;
69*4882a593Smuzhiyun 	cpumask_t mask;
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	mb();
73*4882a593Smuzhiyun 	octeon_local_flush_icache();
74*4882a593Smuzhiyun #ifdef CONFIG_SMP
75*4882a593Smuzhiyun 	preempt_disable();
76*4882a593Smuzhiyun 	cpu = smp_processor_id();
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	/*
79*4882a593Smuzhiyun 	 * If we have a vma structure, we only need to worry about
80*4882a593Smuzhiyun 	 * cores it has been used on
81*4882a593Smuzhiyun 	 */
82*4882a593Smuzhiyun 	if (vma)
83*4882a593Smuzhiyun 		mask = *mm_cpumask(vma->vm_mm);
84*4882a593Smuzhiyun 	else
85*4882a593Smuzhiyun 		mask = *cpu_online_mask;
86*4882a593Smuzhiyun 	cpumask_clear_cpu(cpu, &mask);
87*4882a593Smuzhiyun 	for_each_cpu(cpu, &mask)
88*4882a593Smuzhiyun 		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	preempt_enable();
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun  * Called to flush the icache on all cores
97*4882a593Smuzhiyun  */
octeon_flush_icache_all(void)98*4882a593Smuzhiyun static void octeon_flush_icache_all(void)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	octeon_flush_icache_all_cores(NULL);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun  * Called to flush all memory associated with a memory
106*4882a593Smuzhiyun  * context.
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  * @mm:	    Memory context to flush
109*4882a593Smuzhiyun  */
octeon_flush_cache_mm(struct mm_struct * mm)110*4882a593Smuzhiyun static void octeon_flush_cache_mm(struct mm_struct *mm)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	/*
113*4882a593Smuzhiyun 	 * According to the R4K version of this file, CPUs without
114*4882a593Smuzhiyun 	 * dcache aliases don't need to do anything here
115*4882a593Smuzhiyun 	 */
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun  * Flush a range of kernel addresses out of the icache
121*4882a593Smuzhiyun  *
122*4882a593Smuzhiyun  */
octeon_flush_icache_range(unsigned long start,unsigned long end)123*4882a593Smuzhiyun static void octeon_flush_icache_range(unsigned long start, unsigned long end)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	octeon_flush_icache_all_cores(NULL);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /**
130*4882a593Smuzhiyun  * Flush a range out of a vma
131*4882a593Smuzhiyun  *
132*4882a593Smuzhiyun  * @vma:    VMA to flush
133*4882a593Smuzhiyun  * @start:
134*4882a593Smuzhiyun  * @end:
135*4882a593Smuzhiyun  */
octeon_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)136*4882a593Smuzhiyun static void octeon_flush_cache_range(struct vm_area_struct *vma,
137*4882a593Smuzhiyun 				     unsigned long start, unsigned long end)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	if (vma->vm_flags & VM_EXEC)
140*4882a593Smuzhiyun 		octeon_flush_icache_all_cores(vma);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /**
145*4882a593Smuzhiyun  * Flush a specific page of a vma
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * @vma:    VMA to flush page for
148*4882a593Smuzhiyun  * @page:   Page to flush
149*4882a593Smuzhiyun  * @pfn:
150*4882a593Smuzhiyun  */
octeon_flush_cache_page(struct vm_area_struct * vma,unsigned long page,unsigned long pfn)151*4882a593Smuzhiyun static void octeon_flush_cache_page(struct vm_area_struct *vma,
152*4882a593Smuzhiyun 				    unsigned long page, unsigned long pfn)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	if (vma->vm_flags & VM_EXEC)
155*4882a593Smuzhiyun 		octeon_flush_icache_all_cores(vma);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
octeon_flush_kernel_vmap_range(unsigned long vaddr,int size)158*4882a593Smuzhiyun static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	BUG();
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun  * Probe Octeon's caches
165*4882a593Smuzhiyun  *
166*4882a593Smuzhiyun  */
probe_octeon(void)167*4882a593Smuzhiyun static void probe_octeon(void)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	unsigned long icache_size;
170*4882a593Smuzhiyun 	unsigned long dcache_size;
171*4882a593Smuzhiyun 	unsigned int config1;
172*4882a593Smuzhiyun 	struct cpuinfo_mips *c = &current_cpu_data;
173*4882a593Smuzhiyun 	int cputype = current_cpu_type();
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	config1 = read_c0_config1();
176*4882a593Smuzhiyun 	switch (cputype) {
177*4882a593Smuzhiyun 	case CPU_CAVIUM_OCTEON:
178*4882a593Smuzhiyun 	case CPU_CAVIUM_OCTEON_PLUS:
179*4882a593Smuzhiyun 		c->icache.linesz = 2 << ((config1 >> 19) & 7);
180*4882a593Smuzhiyun 		c->icache.sets = 64 << ((config1 >> 22) & 7);
181*4882a593Smuzhiyun 		c->icache.ways = 1 + ((config1 >> 16) & 7);
182*4882a593Smuzhiyun 		c->icache.flags |= MIPS_CACHE_VTAG;
183*4882a593Smuzhiyun 		icache_size =
184*4882a593Smuzhiyun 			c->icache.sets * c->icache.ways * c->icache.linesz;
185*4882a593Smuzhiyun 		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
186*4882a593Smuzhiyun 		c->dcache.linesz = 128;
187*4882a593Smuzhiyun 		if (cputype == CPU_CAVIUM_OCTEON_PLUS)
188*4882a593Smuzhiyun 			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
189*4882a593Smuzhiyun 		else
190*4882a593Smuzhiyun 			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
191*4882a593Smuzhiyun 		c->dcache.ways = 64;
192*4882a593Smuzhiyun 		dcache_size =
193*4882a593Smuzhiyun 			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
194*4882a593Smuzhiyun 		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
195*4882a593Smuzhiyun 		c->options |= MIPS_CPU_PREFETCH;
196*4882a593Smuzhiyun 		break;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	case CPU_CAVIUM_OCTEON2:
199*4882a593Smuzhiyun 		c->icache.linesz = 2 << ((config1 >> 19) & 7);
200*4882a593Smuzhiyun 		c->icache.sets = 8;
201*4882a593Smuzhiyun 		c->icache.ways = 37;
202*4882a593Smuzhiyun 		c->icache.flags |= MIPS_CACHE_VTAG;
203*4882a593Smuzhiyun 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 		c->dcache.linesz = 128;
206*4882a593Smuzhiyun 		c->dcache.ways = 32;
207*4882a593Smuzhiyun 		c->dcache.sets = 8;
208*4882a593Smuzhiyun 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
209*4882a593Smuzhiyun 		c->options |= MIPS_CPU_PREFETCH;
210*4882a593Smuzhiyun 		break;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	case CPU_CAVIUM_OCTEON3:
213*4882a593Smuzhiyun 		c->icache.linesz = 128;
214*4882a593Smuzhiyun 		c->icache.sets = 16;
215*4882a593Smuzhiyun 		c->icache.ways = 39;
216*4882a593Smuzhiyun 		c->icache.flags |= MIPS_CACHE_VTAG;
217*4882a593Smuzhiyun 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		c->dcache.linesz = 128;
220*4882a593Smuzhiyun 		c->dcache.ways = 32;
221*4882a593Smuzhiyun 		c->dcache.sets = 8;
222*4882a593Smuzhiyun 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
223*4882a593Smuzhiyun 		c->options |= MIPS_CPU_PREFETCH;
224*4882a593Smuzhiyun 		break;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	default:
227*4882a593Smuzhiyun 		panic("Unsupported Cavium Networks CPU type");
228*4882a593Smuzhiyun 		break;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/* compute a couple of other cache variables */
232*4882a593Smuzhiyun 	c->icache.waysize = icache_size / c->icache.ways;
233*4882a593Smuzhiyun 	c->dcache.waysize = dcache_size / c->dcache.ways;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
236*4882a593Smuzhiyun 	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (smp_processor_id() == 0) {
239*4882a593Smuzhiyun 		pr_info("Primary instruction cache %ldkB, %s, %d way, "
240*4882a593Smuzhiyun 			"%d sets, linesize %d bytes.\n",
241*4882a593Smuzhiyun 			icache_size >> 10,
242*4882a593Smuzhiyun 			cpu_has_vtag_icache ?
243*4882a593Smuzhiyun 				"virtually tagged" : "physically tagged",
244*4882a593Smuzhiyun 			c->icache.ways, c->icache.sets, c->icache.linesz);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		pr_info("Primary data cache %ldkB, %d-way, %d sets, "
247*4882a593Smuzhiyun 			"linesize %d bytes.\n",
248*4882a593Smuzhiyun 			dcache_size >> 10, c->dcache.ways,
249*4882a593Smuzhiyun 			c->dcache.sets, c->dcache.linesz);
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
octeon_cache_error_setup(void)253*4882a593Smuzhiyun static void  octeon_cache_error_setup(void)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	extern char except_vec2_octeon;
256*4882a593Smuzhiyun 	set_handler(0x100, &except_vec2_octeon, 0x80);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun  * Setup the Octeon cache flush routines
261*4882a593Smuzhiyun  *
262*4882a593Smuzhiyun  */
octeon_cache_init(void)263*4882a593Smuzhiyun void octeon_cache_init(void)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	probe_octeon();
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	shm_align_mask = PAGE_SIZE - 1;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	flush_cache_all			= octeon_flush_icache_all;
270*4882a593Smuzhiyun 	__flush_cache_all		= octeon_flush_icache_all;
271*4882a593Smuzhiyun 	flush_cache_mm			= octeon_flush_cache_mm;
272*4882a593Smuzhiyun 	flush_cache_page		= octeon_flush_cache_page;
273*4882a593Smuzhiyun 	flush_cache_range		= octeon_flush_cache_range;
274*4882a593Smuzhiyun 	flush_icache_all		= octeon_flush_icache_all;
275*4882a593Smuzhiyun 	flush_data_cache_page		= octeon_flush_data_cache_page;
276*4882a593Smuzhiyun 	flush_icache_range		= octeon_flush_icache_range;
277*4882a593Smuzhiyun 	local_flush_icache_range	= local_octeon_flush_icache_range;
278*4882a593Smuzhiyun 	__flush_icache_user_range	= octeon_flush_icache_range;
279*4882a593Smuzhiyun 	__local_flush_icache_user_range	= local_octeon_flush_icache_range;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	build_clear_page();
284*4882a593Smuzhiyun 	build_copy_page();
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	board_cache_error_setup = octeon_cache_error_setup;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun  * Handle a cache error exception
291*4882a593Smuzhiyun  */
292*4882a593Smuzhiyun static RAW_NOTIFIER_HEAD(co_cache_error_chain);
293*4882a593Smuzhiyun 
register_co_cache_error_notifier(struct notifier_block * nb)294*4882a593Smuzhiyun int register_co_cache_error_notifier(struct notifier_block *nb)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	return raw_notifier_chain_register(&co_cache_error_chain, nb);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
299*4882a593Smuzhiyun 
unregister_co_cache_error_notifier(struct notifier_block * nb)300*4882a593Smuzhiyun int unregister_co_cache_error_notifier(struct notifier_block *nb)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
305*4882a593Smuzhiyun 
co_cache_error_call_notifiers(unsigned long val)306*4882a593Smuzhiyun static void co_cache_error_call_notifiers(unsigned long val)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
309*4882a593Smuzhiyun 	if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
310*4882a593Smuzhiyun 		u64 dcache_err;
311*4882a593Smuzhiyun 		unsigned long coreid = cvmx_get_core_num();
312*4882a593Smuzhiyun 		u64 icache_err = read_octeon_c0_icacheerr();
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		if (val) {
315*4882a593Smuzhiyun 			dcache_err = cache_err_dcache[coreid];
316*4882a593Smuzhiyun 			cache_err_dcache[coreid] = 0;
317*4882a593Smuzhiyun 		} else {
318*4882a593Smuzhiyun 			dcache_err = read_octeon_c0_dcacheerr();
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		pr_err("Core%lu: Cache error exception:\n", coreid);
322*4882a593Smuzhiyun 		pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
323*4882a593Smuzhiyun 		if (icache_err & 1) {
324*4882a593Smuzhiyun 			pr_err("CacheErr (Icache) == %llx\n",
325*4882a593Smuzhiyun 			       (unsigned long long)icache_err);
326*4882a593Smuzhiyun 			write_octeon_c0_icacheerr(0);
327*4882a593Smuzhiyun 		}
328*4882a593Smuzhiyun 		if (dcache_err & 1) {
329*4882a593Smuzhiyun 			pr_err("CacheErr (Dcache) == %llx\n",
330*4882a593Smuzhiyun 			       (unsigned long long)dcache_err);
331*4882a593Smuzhiyun 		}
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun  * Called when the the exception is recoverable
337*4882a593Smuzhiyun  */
338*4882a593Smuzhiyun 
cache_parity_error_octeon_recoverable(void)339*4882a593Smuzhiyun asmlinkage void cache_parity_error_octeon_recoverable(void)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	co_cache_error_call_notifiers(0);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /**
345*4882a593Smuzhiyun  * Called when the the exception is not recoverable
346*4882a593Smuzhiyun  */
347*4882a593Smuzhiyun 
cache_parity_error_octeon_non_recoverable(void)348*4882a593Smuzhiyun asmlinkage void cache_parity_error_octeon_non_recoverable(void)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	co_cache_error_call_notifiers(1);
351*4882a593Smuzhiyun 	panic("Can't handle cache error: nested exception");
352*4882a593Smuzhiyun }
353