xref: /OK3568_Linux_fs/kernel/arch/sparc/mm/srmmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * srmmu.c:  SRMMU specific routines for memory management.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
6*4882a593Smuzhiyun  * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
7*4882a593Smuzhiyun  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
8*4882a593Smuzhiyun  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9*4882a593Smuzhiyun  * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/seq_file.h>
13*4882a593Smuzhiyun #include <linux/spinlock.h>
14*4882a593Smuzhiyun #include <linux/memblock.h>
15*4882a593Smuzhiyun #include <linux/pagemap.h>
16*4882a593Smuzhiyun #include <linux/vmalloc.h>
17*4882a593Smuzhiyun #include <linux/kdebug.h>
18*4882a593Smuzhiyun #include <linux/export.h>
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/log2.h>
22*4882a593Smuzhiyun #include <linux/gfp.h>
23*4882a593Smuzhiyun #include <linux/fs.h>
24*4882a593Smuzhiyun #include <linux/mm.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <asm/mmu_context.h>
27*4882a593Smuzhiyun #include <asm/cacheflush.h>
28*4882a593Smuzhiyun #include <asm/tlbflush.h>
29*4882a593Smuzhiyun #include <asm/io-unit.h>
30*4882a593Smuzhiyun #include <asm/pgalloc.h>
31*4882a593Smuzhiyun #include <asm/pgtable.h>
32*4882a593Smuzhiyun #include <asm/bitext.h>
33*4882a593Smuzhiyun #include <asm/vaddrs.h>
34*4882a593Smuzhiyun #include <asm/cache.h>
35*4882a593Smuzhiyun #include <asm/traps.h>
36*4882a593Smuzhiyun #include <asm/oplib.h>
37*4882a593Smuzhiyun #include <asm/mbus.h>
38*4882a593Smuzhiyun #include <asm/page.h>
39*4882a593Smuzhiyun #include <asm/asi.h>
40*4882a593Smuzhiyun #include <asm/smp.h>
41*4882a593Smuzhiyun #include <asm/io.h>
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /* Now the cpu specific definitions. */
44*4882a593Smuzhiyun #include <asm/turbosparc.h>
45*4882a593Smuzhiyun #include <asm/tsunami.h>
46*4882a593Smuzhiyun #include <asm/viking.h>
47*4882a593Smuzhiyun #include <asm/swift.h>
48*4882a593Smuzhiyun #include <asm/leon.h>
49*4882a593Smuzhiyun #include <asm/mxcc.h>
50*4882a593Smuzhiyun #include <asm/ross.h>
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #include "mm_32.h"
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun enum mbus_module srmmu_modtype;
55*4882a593Smuzhiyun static unsigned int hwbug_bitmask;
56*4882a593Smuzhiyun int vac_cache_size;
57*4882a593Smuzhiyun EXPORT_SYMBOL(vac_cache_size);
58*4882a593Smuzhiyun int vac_line_size;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun extern struct resource sparc_iomap;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun extern unsigned long last_valid_pfn;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static pgd_t *srmmu_swapper_pg_dir;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
67*4882a593Smuzhiyun EXPORT_SYMBOL(sparc32_cachetlb_ops);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #ifdef CONFIG_SMP
70*4882a593Smuzhiyun const struct sparc32_cachetlb_ops *local_ops;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define FLUSH_BEGIN(mm)
73*4882a593Smuzhiyun #define FLUSH_END
74*4882a593Smuzhiyun #else
75*4882a593Smuzhiyun #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
76*4882a593Smuzhiyun #define FLUSH_END	}
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun int flush_page_for_dma_global = 1;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun char *srmmu_name;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun ctxd_t *srmmu_ctx_table_phys;
84*4882a593Smuzhiyun static ctxd_t *srmmu_context_table;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun int viking_mxcc_present;
87*4882a593Smuzhiyun static DEFINE_SPINLOCK(srmmu_context_spinlock);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun static int is_hypersparc;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static int srmmu_cache_pagetables;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* these will be initialized in srmmu_nocache_calcsize() */
94*4882a593Smuzhiyun static unsigned long srmmu_nocache_size;
95*4882a593Smuzhiyun static unsigned long srmmu_nocache_end;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
98*4882a593Smuzhiyun #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* The context table is a nocache user with the biggest alignment needs. */
101*4882a593Smuzhiyun #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun void *srmmu_nocache_pool;
104*4882a593Smuzhiyun static struct bit_map srmmu_nocache_map;
105*4882a593Smuzhiyun 
srmmu_pmd_none(pmd_t pmd)106*4882a593Smuzhiyun static inline int srmmu_pmd_none(pmd_t pmd)
107*4882a593Smuzhiyun { return !(pmd_val(pmd) & 0xFFFFFFF); }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* XXX should we hyper_flush_whole_icache here - Anton */
srmmu_ctxd_set(ctxd_t * ctxp,pgd_t * pgdp)110*4882a593Smuzhiyun static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	pte_t pte;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
115*4882a593Smuzhiyun 	set_pte((pte_t *)ctxp, pte);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * Locations of MSI Registers.
120*4882a593Smuzhiyun  */
121*4882a593Smuzhiyun #define MSI_MBUS_ARBEN	0xe0001008	/* MBus Arbiter Enable register */
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun  * Useful bits in the MSI Registers.
125*4882a593Smuzhiyun  */
126*4882a593Smuzhiyun #define MSI_ASYNC_MODE  0x80000000	/* Operate the MSI asynchronously */
127*4882a593Smuzhiyun 
msi_set_sync(void)128*4882a593Smuzhiyun static void msi_set_sync(void)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	__asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
131*4882a593Smuzhiyun 			      "andn %%g3, %2, %%g3\n\t"
132*4882a593Smuzhiyun 			      "sta %%g3, [%0] %1\n\t" : :
133*4882a593Smuzhiyun 			      "r" (MSI_MBUS_ARBEN),
134*4882a593Smuzhiyun 			      "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
pmd_set(pmd_t * pmdp,pte_t * ptep)137*4882a593Smuzhiyun void pmd_set(pmd_t *pmdp, pte_t *ptep)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	unsigned long ptp = __nocache_pa(ptep) >> 4;
140*4882a593Smuzhiyun 	set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun  * size: bytes to allocate in the nocache area.
145*4882a593Smuzhiyun  * align: bytes, number to align at.
146*4882a593Smuzhiyun  * Returns the virtual address of the allocated area.
147*4882a593Smuzhiyun  */
__srmmu_get_nocache(int size,int align)148*4882a593Smuzhiyun static void *__srmmu_get_nocache(int size, int align)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT;
151*4882a593Smuzhiyun 	unsigned long addr;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	if (size < minsz) {
154*4882a593Smuzhiyun 		printk(KERN_ERR "Size 0x%x too small for nocache request\n",
155*4882a593Smuzhiyun 		       size);
156*4882a593Smuzhiyun 		size = minsz;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 	if (size & (minsz - 1)) {
159*4882a593Smuzhiyun 		printk(KERN_ERR "Size 0x%x unaligned in nocache request\n",
160*4882a593Smuzhiyun 		       size);
161*4882a593Smuzhiyun 		size += minsz - 1;
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 	BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	offset = bit_map_string_get(&srmmu_nocache_map,
166*4882a593Smuzhiyun 				    size >> SRMMU_NOCACHE_BITMAP_SHIFT,
167*4882a593Smuzhiyun 				    align >> SRMMU_NOCACHE_BITMAP_SHIFT);
168*4882a593Smuzhiyun 	if (offset == -1) {
169*4882a593Smuzhiyun 		printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
170*4882a593Smuzhiyun 		       size, (int) srmmu_nocache_size,
171*4882a593Smuzhiyun 		       srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
172*4882a593Smuzhiyun 		return NULL;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
176*4882a593Smuzhiyun 	return (void *)addr;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
srmmu_get_nocache(int size,int align)179*4882a593Smuzhiyun void *srmmu_get_nocache(int size, int align)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	void *tmp;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	tmp = __srmmu_get_nocache(size, align);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (tmp)
186*4882a593Smuzhiyun 		memset(tmp, 0, size);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	return tmp;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
srmmu_free_nocache(void * addr,int size)191*4882a593Smuzhiyun void srmmu_free_nocache(void *addr, int size)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	unsigned long vaddr;
194*4882a593Smuzhiyun 	int offset;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	vaddr = (unsigned long)addr;
197*4882a593Smuzhiyun 	if (vaddr < SRMMU_NOCACHE_VADDR) {
198*4882a593Smuzhiyun 		printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
199*4882a593Smuzhiyun 		    vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
200*4882a593Smuzhiyun 		BUG();
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 	if (vaddr + size > srmmu_nocache_end) {
203*4882a593Smuzhiyun 		printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
204*4882a593Smuzhiyun 		    vaddr, srmmu_nocache_end);
205*4882a593Smuzhiyun 		BUG();
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 	if (!is_power_of_2(size)) {
208*4882a593Smuzhiyun 		printk("Size 0x%x is not a power of 2\n", size);
209*4882a593Smuzhiyun 		BUG();
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 	if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
212*4882a593Smuzhiyun 		printk("Size 0x%x is too small\n", size);
213*4882a593Smuzhiyun 		BUG();
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 	if (vaddr & (size - 1)) {
216*4882a593Smuzhiyun 		printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
217*4882a593Smuzhiyun 		BUG();
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
221*4882a593Smuzhiyun 	size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	bit_map_clear(&srmmu_nocache_map, offset, size);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
227*4882a593Smuzhiyun 						 unsigned long end);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /* Return how much physical memory we have.  */
probe_memory(void)230*4882a593Smuzhiyun static unsigned long __init probe_memory(void)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	unsigned long total = 0;
233*4882a593Smuzhiyun 	int i;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	for (i = 0; sp_banks[i].num_bytes; i++)
236*4882a593Smuzhiyun 		total += sp_banks[i].num_bytes;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	return total;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun  * Reserve nocache dynamically proportionally to the amount of
243*4882a593Smuzhiyun  * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
244*4882a593Smuzhiyun  */
srmmu_nocache_calcsize(void)245*4882a593Smuzhiyun static void __init srmmu_nocache_calcsize(void)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	unsigned long sysmemavail = probe_memory() / 1024;
248*4882a593Smuzhiyun 	int srmmu_nocache_npages;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	srmmu_nocache_npages =
251*4882a593Smuzhiyun 		sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun  /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
254*4882a593Smuzhiyun 	// if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
255*4882a593Smuzhiyun 	if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
256*4882a593Smuzhiyun 		srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/* anything above 1280 blows up */
259*4882a593Smuzhiyun 	if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
260*4882a593Smuzhiyun 		srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
263*4882a593Smuzhiyun 	srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
srmmu_nocache_init(void)266*4882a593Smuzhiyun static void __init srmmu_nocache_init(void)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	void *srmmu_nocache_bitmap;
269*4882a593Smuzhiyun 	unsigned int bitmap_bits;
270*4882a593Smuzhiyun 	pgd_t *pgd;
271*4882a593Smuzhiyun 	p4d_t *p4d;
272*4882a593Smuzhiyun 	pud_t *pud;
273*4882a593Smuzhiyun 	pmd_t *pmd;
274*4882a593Smuzhiyun 	pte_t *pte;
275*4882a593Smuzhiyun 	unsigned long paddr, vaddr;
276*4882a593Smuzhiyun 	unsigned long pteval;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
281*4882a593Smuzhiyun 					    SRMMU_NOCACHE_ALIGN_MAX);
282*4882a593Smuzhiyun 	if (!srmmu_nocache_pool)
283*4882a593Smuzhiyun 		panic("%s: Failed to allocate %lu bytes align=0x%x\n",
284*4882a593Smuzhiyun 		      __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
285*4882a593Smuzhiyun 	memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	srmmu_nocache_bitmap =
288*4882a593Smuzhiyun 		memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
289*4882a593Smuzhiyun 			       SMP_CACHE_BYTES);
290*4882a593Smuzhiyun 	if (!srmmu_nocache_bitmap)
291*4882a593Smuzhiyun 		panic("%s: Failed to allocate %zu bytes\n", __func__,
292*4882a593Smuzhiyun 		      BITS_TO_LONGS(bitmap_bits) * sizeof(long));
293*4882a593Smuzhiyun 	bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
296*4882a593Smuzhiyun 	memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
297*4882a593Smuzhiyun 	init_mm.pgd = srmmu_swapper_pg_dir;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	paddr = __pa((unsigned long)srmmu_nocache_pool);
302*4882a593Smuzhiyun 	vaddr = SRMMU_NOCACHE_VADDR;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	while (vaddr < srmmu_nocache_end) {
305*4882a593Smuzhiyun 		pgd = pgd_offset_k(vaddr);
306*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, vaddr);
307*4882a593Smuzhiyun 		pud = pud_offset(p4d, vaddr);
308*4882a593Smuzhiyun 		pmd = pmd_offset(__nocache_fix(pud), vaddr);
309*4882a593Smuzhiyun 		pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 		pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		if (srmmu_cache_pagetables)
314*4882a593Smuzhiyun 			pteval |= SRMMU_CACHE;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		set_pte(__nocache_fix(pte), __pte(pteval));
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		vaddr += PAGE_SIZE;
319*4882a593Smuzhiyun 		paddr += PAGE_SIZE;
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	flush_cache_all();
323*4882a593Smuzhiyun 	flush_tlb_all();
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
get_pgd_fast(void)326*4882a593Smuzhiyun pgd_t *get_pgd_fast(void)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	pgd_t *pgd = NULL;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
331*4882a593Smuzhiyun 	if (pgd) {
332*4882a593Smuzhiyun 		pgd_t *init = pgd_offset_k(0);
333*4882a593Smuzhiyun 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
334*4882a593Smuzhiyun 		memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
335*4882a593Smuzhiyun 						(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	return pgd;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun  * Hardware needs alignment to 256 only, but we align to whole page size
343*4882a593Smuzhiyun  * to reduce fragmentation problems due to the buddy principle.
344*4882a593Smuzhiyun  * XXX Provide actual fragmentation statistics in /proc.
345*4882a593Smuzhiyun  *
346*4882a593Smuzhiyun  * Alignments up to the page size are the same for physical and virtual
347*4882a593Smuzhiyun  * addresses of the nocache area.
348*4882a593Smuzhiyun  */
pte_alloc_one(struct mm_struct * mm)349*4882a593Smuzhiyun pgtable_t pte_alloc_one(struct mm_struct *mm)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	pte_t *ptep;
352*4882a593Smuzhiyun 	struct page *page;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if ((ptep = pte_alloc_one_kernel(mm)) == 0)
355*4882a593Smuzhiyun 		return NULL;
356*4882a593Smuzhiyun 	page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
357*4882a593Smuzhiyun 	spin_lock(&mm->page_table_lock);
358*4882a593Smuzhiyun 	if (page_ref_inc_return(page) == 2 && !pgtable_pte_page_ctor(page)) {
359*4882a593Smuzhiyun 		page_ref_dec(page);
360*4882a593Smuzhiyun 		ptep = NULL;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 	spin_unlock(&mm->page_table_lock);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	return ptep;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
pte_free(struct mm_struct * mm,pgtable_t ptep)367*4882a593Smuzhiyun void pte_free(struct mm_struct *mm, pgtable_t ptep)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	struct page *page;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
372*4882a593Smuzhiyun 	spin_lock(&mm->page_table_lock);
373*4882a593Smuzhiyun 	if (page_ref_dec_return(page) == 1)
374*4882a593Smuzhiyun 		pgtable_pte_page_dtor(page);
375*4882a593Smuzhiyun 	spin_unlock(&mm->page_table_lock);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /* context handling - a dynamically sized pool is used */
381*4882a593Smuzhiyun #define NO_CONTEXT	-1
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun struct ctx_list {
384*4882a593Smuzhiyun 	struct ctx_list *next;
385*4882a593Smuzhiyun 	struct ctx_list *prev;
386*4882a593Smuzhiyun 	unsigned int ctx_number;
387*4882a593Smuzhiyun 	struct mm_struct *ctx_mm;
388*4882a593Smuzhiyun };
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun static struct ctx_list *ctx_list_pool;
391*4882a593Smuzhiyun static struct ctx_list ctx_free;
392*4882a593Smuzhiyun static struct ctx_list ctx_used;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /* At boot time we determine the number of contexts */
395*4882a593Smuzhiyun static int num_contexts;
396*4882a593Smuzhiyun 
remove_from_ctx_list(struct ctx_list * entry)397*4882a593Smuzhiyun static inline void remove_from_ctx_list(struct ctx_list *entry)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	entry->next->prev = entry->prev;
400*4882a593Smuzhiyun 	entry->prev->next = entry->next;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
add_to_ctx_list(struct ctx_list * head,struct ctx_list * entry)403*4882a593Smuzhiyun static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	entry->next = head;
406*4882a593Smuzhiyun 	(entry->prev = head->prev)->next = entry;
407*4882a593Smuzhiyun 	head->prev = entry;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
410*4882a593Smuzhiyun #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 
alloc_context(struct mm_struct * old_mm,struct mm_struct * mm)413*4882a593Smuzhiyun static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct ctx_list *ctxp;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	ctxp = ctx_free.next;
418*4882a593Smuzhiyun 	if (ctxp != &ctx_free) {
419*4882a593Smuzhiyun 		remove_from_ctx_list(ctxp);
420*4882a593Smuzhiyun 		add_to_used_ctxlist(ctxp);
421*4882a593Smuzhiyun 		mm->context = ctxp->ctx_number;
422*4882a593Smuzhiyun 		ctxp->ctx_mm = mm;
423*4882a593Smuzhiyun 		return;
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun 	ctxp = ctx_used.next;
426*4882a593Smuzhiyun 	if (ctxp->ctx_mm == old_mm)
427*4882a593Smuzhiyun 		ctxp = ctxp->next;
428*4882a593Smuzhiyun 	if (ctxp == &ctx_used)
429*4882a593Smuzhiyun 		panic("out of mmu contexts");
430*4882a593Smuzhiyun 	flush_cache_mm(ctxp->ctx_mm);
431*4882a593Smuzhiyun 	flush_tlb_mm(ctxp->ctx_mm);
432*4882a593Smuzhiyun 	remove_from_ctx_list(ctxp);
433*4882a593Smuzhiyun 	add_to_used_ctxlist(ctxp);
434*4882a593Smuzhiyun 	ctxp->ctx_mm->context = NO_CONTEXT;
435*4882a593Smuzhiyun 	ctxp->ctx_mm = mm;
436*4882a593Smuzhiyun 	mm->context = ctxp->ctx_number;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
free_context(int context)439*4882a593Smuzhiyun static inline void free_context(int context)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	struct ctx_list *ctx_old;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	ctx_old = ctx_list_pool + context;
444*4882a593Smuzhiyun 	remove_from_ctx_list(ctx_old);
445*4882a593Smuzhiyun 	add_to_free_ctxlist(ctx_old);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
sparc_context_init(int numctx)448*4882a593Smuzhiyun static void __init sparc_context_init(int numctx)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	int ctx;
451*4882a593Smuzhiyun 	unsigned long size;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	size = numctx * sizeof(struct ctx_list);
454*4882a593Smuzhiyun 	ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
455*4882a593Smuzhiyun 	if (!ctx_list_pool)
456*4882a593Smuzhiyun 		panic("%s: Failed to allocate %lu bytes\n", __func__, size);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	for (ctx = 0; ctx < numctx; ctx++) {
459*4882a593Smuzhiyun 		struct ctx_list *clist;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 		clist = (ctx_list_pool + ctx);
462*4882a593Smuzhiyun 		clist->ctx_number = ctx;
463*4882a593Smuzhiyun 		clist->ctx_mm = NULL;
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun 	ctx_free.next = ctx_free.prev = &ctx_free;
466*4882a593Smuzhiyun 	ctx_used.next = ctx_used.prev = &ctx_used;
467*4882a593Smuzhiyun 	for (ctx = 0; ctx < numctx; ctx++)
468*4882a593Smuzhiyun 		add_to_free_ctxlist(ctx_list_pool + ctx);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
switch_mm(struct mm_struct * old_mm,struct mm_struct * mm,struct task_struct * tsk)471*4882a593Smuzhiyun void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
472*4882a593Smuzhiyun 	       struct task_struct *tsk)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	unsigned long flags;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (mm->context == NO_CONTEXT) {
477*4882a593Smuzhiyun 		spin_lock_irqsave(&srmmu_context_spinlock, flags);
478*4882a593Smuzhiyun 		alloc_context(old_mm, mm);
479*4882a593Smuzhiyun 		spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
480*4882a593Smuzhiyun 		srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	if (sparc_cpu_model == sparc_leon)
484*4882a593Smuzhiyun 		leon_switch_mm();
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (is_hypersparc)
487*4882a593Smuzhiyun 		hyper_flush_whole_icache();
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	srmmu_set_context(mm->context);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun /* Low level IO area allocation on the SRMMU. */
srmmu_mapioaddr(unsigned long physaddr,unsigned long virt_addr,int bus_type)493*4882a593Smuzhiyun static inline void srmmu_mapioaddr(unsigned long physaddr,
494*4882a593Smuzhiyun 				   unsigned long virt_addr, int bus_type)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	pgd_t *pgdp;
497*4882a593Smuzhiyun 	p4d_t *p4dp;
498*4882a593Smuzhiyun 	pud_t *pudp;
499*4882a593Smuzhiyun 	pmd_t *pmdp;
500*4882a593Smuzhiyun 	pte_t *ptep;
501*4882a593Smuzhiyun 	unsigned long tmp;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	physaddr &= PAGE_MASK;
504*4882a593Smuzhiyun 	pgdp = pgd_offset_k(virt_addr);
505*4882a593Smuzhiyun 	p4dp = p4d_offset(pgdp, virt_addr);
506*4882a593Smuzhiyun 	pudp = pud_offset(p4dp, virt_addr);
507*4882a593Smuzhiyun 	pmdp = pmd_offset(pudp, virt_addr);
508*4882a593Smuzhiyun 	ptep = pte_offset_kernel(pmdp, virt_addr);
509*4882a593Smuzhiyun 	tmp = (physaddr >> 4) | SRMMU_ET_PTE;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	/* I need to test whether this is consistent over all
512*4882a593Smuzhiyun 	 * sun4m's.  The bus_type represents the upper 4 bits of
513*4882a593Smuzhiyun 	 * 36-bit physical address on the I/O space lines...
514*4882a593Smuzhiyun 	 */
515*4882a593Smuzhiyun 	tmp |= (bus_type << 28);
516*4882a593Smuzhiyun 	tmp |= SRMMU_PRIV;
517*4882a593Smuzhiyun 	__flush_page_to_ram(virt_addr);
518*4882a593Smuzhiyun 	set_pte(ptep, __pte(tmp));
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun 
srmmu_mapiorange(unsigned int bus,unsigned long xpa,unsigned long xva,unsigned int len)521*4882a593Smuzhiyun void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
522*4882a593Smuzhiyun 		      unsigned long xva, unsigned int len)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	while (len != 0) {
525*4882a593Smuzhiyun 		len -= PAGE_SIZE;
526*4882a593Smuzhiyun 		srmmu_mapioaddr(xpa, xva, bus);
527*4882a593Smuzhiyun 		xva += PAGE_SIZE;
528*4882a593Smuzhiyun 		xpa += PAGE_SIZE;
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 	flush_tlb_all();
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
srmmu_unmapioaddr(unsigned long virt_addr)533*4882a593Smuzhiyun static inline void srmmu_unmapioaddr(unsigned long virt_addr)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	pgd_t *pgdp;
536*4882a593Smuzhiyun 	p4d_t *p4dp;
537*4882a593Smuzhiyun 	pud_t *pudp;
538*4882a593Smuzhiyun 	pmd_t *pmdp;
539*4882a593Smuzhiyun 	pte_t *ptep;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	pgdp = pgd_offset_k(virt_addr);
543*4882a593Smuzhiyun 	p4dp = p4d_offset(pgdp, virt_addr);
544*4882a593Smuzhiyun 	pudp = pud_offset(p4dp, virt_addr);
545*4882a593Smuzhiyun 	pmdp = pmd_offset(pudp, virt_addr);
546*4882a593Smuzhiyun 	ptep = pte_offset_kernel(pmdp, virt_addr);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	/* No need to flush uncacheable page. */
549*4882a593Smuzhiyun 	__pte_clear(ptep);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun 
srmmu_unmapiorange(unsigned long virt_addr,unsigned int len)552*4882a593Smuzhiyun void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun 	while (len != 0) {
555*4882a593Smuzhiyun 		len -= PAGE_SIZE;
556*4882a593Smuzhiyun 		srmmu_unmapioaddr(virt_addr);
557*4882a593Smuzhiyun 		virt_addr += PAGE_SIZE;
558*4882a593Smuzhiyun 	}
559*4882a593Smuzhiyun 	flush_tlb_all();
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun /* tsunami.S */
563*4882a593Smuzhiyun extern void tsunami_flush_cache_all(void);
564*4882a593Smuzhiyun extern void tsunami_flush_cache_mm(struct mm_struct *mm);
565*4882a593Smuzhiyun extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
566*4882a593Smuzhiyun extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
567*4882a593Smuzhiyun extern void tsunami_flush_page_to_ram(unsigned long page);
568*4882a593Smuzhiyun extern void tsunami_flush_page_for_dma(unsigned long page);
569*4882a593Smuzhiyun extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
570*4882a593Smuzhiyun extern void tsunami_flush_tlb_all(void);
571*4882a593Smuzhiyun extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
572*4882a593Smuzhiyun extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
573*4882a593Smuzhiyun extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
574*4882a593Smuzhiyun extern void tsunami_setup_blockops(void);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun /* swift.S */
577*4882a593Smuzhiyun extern void swift_flush_cache_all(void);
578*4882a593Smuzhiyun extern void swift_flush_cache_mm(struct mm_struct *mm);
579*4882a593Smuzhiyun extern void swift_flush_cache_range(struct vm_area_struct *vma,
580*4882a593Smuzhiyun 				    unsigned long start, unsigned long end);
581*4882a593Smuzhiyun extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
582*4882a593Smuzhiyun extern void swift_flush_page_to_ram(unsigned long page);
583*4882a593Smuzhiyun extern void swift_flush_page_for_dma(unsigned long page);
584*4882a593Smuzhiyun extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
585*4882a593Smuzhiyun extern void swift_flush_tlb_all(void);
586*4882a593Smuzhiyun extern void swift_flush_tlb_mm(struct mm_struct *mm);
587*4882a593Smuzhiyun extern void swift_flush_tlb_range(struct vm_area_struct *vma,
588*4882a593Smuzhiyun 				  unsigned long start, unsigned long end);
589*4882a593Smuzhiyun extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun #if 0  /* P3: deadwood to debug precise flushes on Swift. */
592*4882a593Smuzhiyun void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	int cctx, ctx1;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	page &= PAGE_MASK;
597*4882a593Smuzhiyun 	if ((ctx1 = vma->vm_mm->context) != -1) {
598*4882a593Smuzhiyun 		cctx = srmmu_get_context();
599*4882a593Smuzhiyun /* Is context # ever different from current context? P3 */
600*4882a593Smuzhiyun 		if (cctx != ctx1) {
601*4882a593Smuzhiyun 			printk("flush ctx %02x curr %02x\n", ctx1, cctx);
602*4882a593Smuzhiyun 			srmmu_set_context(ctx1);
603*4882a593Smuzhiyun 			swift_flush_page(page);
604*4882a593Smuzhiyun 			__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
605*4882a593Smuzhiyun 					"r" (page), "i" (ASI_M_FLUSH_PROBE));
606*4882a593Smuzhiyun 			srmmu_set_context(cctx);
607*4882a593Smuzhiyun 		} else {
608*4882a593Smuzhiyun 			 /* Rm. prot. bits from virt. c. */
609*4882a593Smuzhiyun 			/* swift_flush_cache_all(); */
610*4882a593Smuzhiyun 			/* swift_flush_cache_page(vma, page); */
611*4882a593Smuzhiyun 			swift_flush_page(page);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 			__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
614*4882a593Smuzhiyun 				"r" (page), "i" (ASI_M_FLUSH_PROBE));
615*4882a593Smuzhiyun 			/* same as above: srmmu_flush_tlb_page() */
616*4882a593Smuzhiyun 		}
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun #endif
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun /*
622*4882a593Smuzhiyun  * The following are all MBUS based SRMMU modules, and therefore could
623*4882a593Smuzhiyun  * be found in a multiprocessor configuration.  On the whole, these
624*4882a593Smuzhiyun  * chips seems to be much more touchy about DVMA and page tables
625*4882a593Smuzhiyun  * with respect to cache coherency.
626*4882a593Smuzhiyun  */
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun /* viking.S */
629*4882a593Smuzhiyun extern void viking_flush_cache_all(void);
630*4882a593Smuzhiyun extern void viking_flush_cache_mm(struct mm_struct *mm);
631*4882a593Smuzhiyun extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
632*4882a593Smuzhiyun 				     unsigned long end);
633*4882a593Smuzhiyun extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
634*4882a593Smuzhiyun extern void viking_flush_page_to_ram(unsigned long page);
635*4882a593Smuzhiyun extern void viking_flush_page_for_dma(unsigned long page);
636*4882a593Smuzhiyun extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
637*4882a593Smuzhiyun extern void viking_flush_page(unsigned long page);
638*4882a593Smuzhiyun extern void viking_mxcc_flush_page(unsigned long page);
639*4882a593Smuzhiyun extern void viking_flush_tlb_all(void);
640*4882a593Smuzhiyun extern void viking_flush_tlb_mm(struct mm_struct *mm);
641*4882a593Smuzhiyun extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
642*4882a593Smuzhiyun 				   unsigned long end);
643*4882a593Smuzhiyun extern void viking_flush_tlb_page(struct vm_area_struct *vma,
644*4882a593Smuzhiyun 				  unsigned long page);
645*4882a593Smuzhiyun extern void sun4dsmp_flush_tlb_all(void);
646*4882a593Smuzhiyun extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
647*4882a593Smuzhiyun extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
648*4882a593Smuzhiyun 				   unsigned long end);
649*4882a593Smuzhiyun extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
650*4882a593Smuzhiyun 				  unsigned long page);
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun /* hypersparc.S */
653*4882a593Smuzhiyun extern void hypersparc_flush_cache_all(void);
654*4882a593Smuzhiyun extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
655*4882a593Smuzhiyun extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
656*4882a593Smuzhiyun extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
657*4882a593Smuzhiyun extern void hypersparc_flush_page_to_ram(unsigned long page);
658*4882a593Smuzhiyun extern void hypersparc_flush_page_for_dma(unsigned long page);
659*4882a593Smuzhiyun extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
660*4882a593Smuzhiyun extern void hypersparc_flush_tlb_all(void);
661*4882a593Smuzhiyun extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
662*4882a593Smuzhiyun extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
663*4882a593Smuzhiyun extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
664*4882a593Smuzhiyun extern void hypersparc_setup_blockops(void);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun  * NOTE: All of this startup code assumes the low 16mb (approx.) of
668*4882a593Smuzhiyun  *       kernel mappings are done with one single contiguous chunk of
669*4882a593Smuzhiyun  *       ram.  On small ram machines (classics mainly) we only get
670*4882a593Smuzhiyun  *       around 8mb mapped for us.
671*4882a593Smuzhiyun  */
672*4882a593Smuzhiyun 
early_pgtable_allocfail(char * type)673*4882a593Smuzhiyun static void __init early_pgtable_allocfail(char *type)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
676*4882a593Smuzhiyun 	prom_halt();
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
srmmu_early_allocate_ptable_skeleton(unsigned long start,unsigned long end)679*4882a593Smuzhiyun static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
680*4882a593Smuzhiyun 							unsigned long end)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	pgd_t *pgdp;
683*4882a593Smuzhiyun 	p4d_t *p4dp;
684*4882a593Smuzhiyun 	pud_t *pudp;
685*4882a593Smuzhiyun 	pmd_t *pmdp;
686*4882a593Smuzhiyun 	pte_t *ptep;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	while (start < end) {
689*4882a593Smuzhiyun 		pgdp = pgd_offset_k(start);
690*4882a593Smuzhiyun 		p4dp = p4d_offset(pgdp, start);
691*4882a593Smuzhiyun 		pudp = pud_offset(p4dp, start);
692*4882a593Smuzhiyun 		if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
693*4882a593Smuzhiyun 			pmdp = __srmmu_get_nocache(
694*4882a593Smuzhiyun 			    SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
695*4882a593Smuzhiyun 			if (pmdp == NULL)
696*4882a593Smuzhiyun 				early_pgtable_allocfail("pmd");
697*4882a593Smuzhiyun 			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
698*4882a593Smuzhiyun 			pud_set(__nocache_fix(pudp), pmdp);
699*4882a593Smuzhiyun 		}
700*4882a593Smuzhiyun 		pmdp = pmd_offset(__nocache_fix(pudp), start);
701*4882a593Smuzhiyun 		if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
702*4882a593Smuzhiyun 			ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
703*4882a593Smuzhiyun 			if (ptep == NULL)
704*4882a593Smuzhiyun 				early_pgtable_allocfail("pte");
705*4882a593Smuzhiyun 			memset(__nocache_fix(ptep), 0, PTE_SIZE);
706*4882a593Smuzhiyun 			pmd_set(__nocache_fix(pmdp), ptep);
707*4882a593Smuzhiyun 		}
708*4882a593Smuzhiyun 		if (start > (0xffffffffUL - PMD_SIZE))
709*4882a593Smuzhiyun 			break;
710*4882a593Smuzhiyun 		start = (start + PMD_SIZE) & PMD_MASK;
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun 
srmmu_allocate_ptable_skeleton(unsigned long start,unsigned long end)714*4882a593Smuzhiyun static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
715*4882a593Smuzhiyun 						  unsigned long end)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	pgd_t *pgdp;
718*4882a593Smuzhiyun 	p4d_t *p4dp;
719*4882a593Smuzhiyun 	pud_t *pudp;
720*4882a593Smuzhiyun 	pmd_t *pmdp;
721*4882a593Smuzhiyun 	pte_t *ptep;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	while (start < end) {
724*4882a593Smuzhiyun 		pgdp = pgd_offset_k(start);
725*4882a593Smuzhiyun 		p4dp = p4d_offset(pgdp, start);
726*4882a593Smuzhiyun 		pudp = pud_offset(p4dp, start);
727*4882a593Smuzhiyun 		if (pud_none(*pudp)) {
728*4882a593Smuzhiyun 			pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
729*4882a593Smuzhiyun 			if (pmdp == NULL)
730*4882a593Smuzhiyun 				early_pgtable_allocfail("pmd");
731*4882a593Smuzhiyun 			memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
732*4882a593Smuzhiyun 			pud_set((pud_t *)pgdp, pmdp);
733*4882a593Smuzhiyun 		}
734*4882a593Smuzhiyun 		pmdp = pmd_offset(pudp, start);
735*4882a593Smuzhiyun 		if (srmmu_pmd_none(*pmdp)) {
736*4882a593Smuzhiyun 			ptep = __srmmu_get_nocache(PTE_SIZE,
737*4882a593Smuzhiyun 							     PTE_SIZE);
738*4882a593Smuzhiyun 			if (ptep == NULL)
739*4882a593Smuzhiyun 				early_pgtable_allocfail("pte");
740*4882a593Smuzhiyun 			memset(ptep, 0, PTE_SIZE);
741*4882a593Smuzhiyun 			pmd_set(pmdp, ptep);
742*4882a593Smuzhiyun 		}
743*4882a593Smuzhiyun 		if (start > (0xffffffffUL - PMD_SIZE))
744*4882a593Smuzhiyun 			break;
745*4882a593Smuzhiyun 		start = (start + PMD_SIZE) & PMD_MASK;
746*4882a593Smuzhiyun 	}
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun /* These flush types are not available on all chips... */
srmmu_probe(unsigned long vaddr)750*4882a593Smuzhiyun static inline unsigned long srmmu_probe(unsigned long vaddr)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	unsigned long retval;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	if (sparc_cpu_model != sparc_leon) {
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 		vaddr &= PAGE_MASK;
757*4882a593Smuzhiyun 		__asm__ __volatile__("lda [%1] %2, %0\n\t" :
758*4882a593Smuzhiyun 				     "=r" (retval) :
759*4882a593Smuzhiyun 				     "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
760*4882a593Smuzhiyun 	} else {
761*4882a593Smuzhiyun 		retval = leon_swprobe(vaddr, NULL);
762*4882a593Smuzhiyun 	}
763*4882a593Smuzhiyun 	return retval;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun  * This is much cleaner than poking around physical address space
768*4882a593Smuzhiyun  * looking at the prom's page table directly which is what most
769*4882a593Smuzhiyun  * other OS's do.  Yuck... this is much better.
770*4882a593Smuzhiyun  */
srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)771*4882a593Smuzhiyun static void __init srmmu_inherit_prom_mappings(unsigned long start,
772*4882a593Smuzhiyun 					       unsigned long end)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun 	unsigned long probed;
775*4882a593Smuzhiyun 	unsigned long addr;
776*4882a593Smuzhiyun 	pgd_t *pgdp;
777*4882a593Smuzhiyun 	p4d_t *p4dp;
778*4882a593Smuzhiyun 	pud_t *pudp;
779*4882a593Smuzhiyun 	pmd_t *pmdp;
780*4882a593Smuzhiyun 	pte_t *ptep;
781*4882a593Smuzhiyun 	int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	while (start <= end) {
784*4882a593Smuzhiyun 		if (start == 0)
785*4882a593Smuzhiyun 			break; /* probably wrap around */
786*4882a593Smuzhiyun 		if (start == 0xfef00000)
787*4882a593Smuzhiyun 			start = KADB_DEBUGGER_BEGVM;
788*4882a593Smuzhiyun 		probed = srmmu_probe(start);
789*4882a593Smuzhiyun 		if (!probed) {
790*4882a593Smuzhiyun 			/* continue probing until we find an entry */
791*4882a593Smuzhiyun 			start += PAGE_SIZE;
792*4882a593Smuzhiyun 			continue;
793*4882a593Smuzhiyun 		}
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 		/* A red snapper, see what it really is. */
796*4882a593Smuzhiyun 		what = 0;
797*4882a593Smuzhiyun 		addr = start - PAGE_SIZE;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 		if (!(start & ~(PMD_MASK))) {
800*4882a593Smuzhiyun 			if (srmmu_probe(addr + PMD_SIZE) == probed)
801*4882a593Smuzhiyun 				what = 1;
802*4882a593Smuzhiyun 		}
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 		if (!(start & ~(PGDIR_MASK))) {
805*4882a593Smuzhiyun 			if (srmmu_probe(addr + PGDIR_SIZE) == probed)
806*4882a593Smuzhiyun 				what = 2;
807*4882a593Smuzhiyun 		}
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 		pgdp = pgd_offset_k(start);
810*4882a593Smuzhiyun 		p4dp = p4d_offset(pgdp, start);
811*4882a593Smuzhiyun 		pudp = pud_offset(p4dp, start);
812*4882a593Smuzhiyun 		if (what == 2) {
813*4882a593Smuzhiyun 			*(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
814*4882a593Smuzhiyun 			start += PGDIR_SIZE;
815*4882a593Smuzhiyun 			continue;
816*4882a593Smuzhiyun 		}
817*4882a593Smuzhiyun 		if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
818*4882a593Smuzhiyun 			pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
819*4882a593Smuzhiyun 						   SRMMU_PMD_TABLE_SIZE);
820*4882a593Smuzhiyun 			if (pmdp == NULL)
821*4882a593Smuzhiyun 				early_pgtable_allocfail("pmd");
822*4882a593Smuzhiyun 			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
823*4882a593Smuzhiyun 			pud_set(__nocache_fix(pudp), pmdp);
824*4882a593Smuzhiyun 		}
825*4882a593Smuzhiyun 		pmdp = pmd_offset(__nocache_fix(pgdp), start);
826*4882a593Smuzhiyun 		if (what == 1) {
827*4882a593Smuzhiyun 			*(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
828*4882a593Smuzhiyun 			start += PMD_SIZE;
829*4882a593Smuzhiyun 			continue;
830*4882a593Smuzhiyun 		}
831*4882a593Smuzhiyun 		if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
832*4882a593Smuzhiyun 			ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
833*4882a593Smuzhiyun 			if (ptep == NULL)
834*4882a593Smuzhiyun 				early_pgtable_allocfail("pte");
835*4882a593Smuzhiyun 			memset(__nocache_fix(ptep), 0, PTE_SIZE);
836*4882a593Smuzhiyun 			pmd_set(__nocache_fix(pmdp), ptep);
837*4882a593Smuzhiyun 		}
838*4882a593Smuzhiyun 		ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
839*4882a593Smuzhiyun 		*(pte_t *)__nocache_fix(ptep) = __pte(probed);
840*4882a593Smuzhiyun 		start += PAGE_SIZE;
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun /* Create a third-level SRMMU 16MB page mapping. */
do_large_mapping(unsigned long vaddr,unsigned long phys_base)847*4882a593Smuzhiyun static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	pgd_t *pgdp = pgd_offset_k(vaddr);
850*4882a593Smuzhiyun 	unsigned long big_pte;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	big_pte = KERNEL_PTE(phys_base >> 4);
853*4882a593Smuzhiyun 	*(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
map_spbank(unsigned long vbase,int sp_entry)857*4882a593Smuzhiyun static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK);
860*4882a593Smuzhiyun 	unsigned long vstart = (vbase & PGDIR_MASK);
861*4882a593Smuzhiyun 	unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
862*4882a593Smuzhiyun 	/* Map "low" memory only */
863*4882a593Smuzhiyun 	const unsigned long min_vaddr = PAGE_OFFSET;
864*4882a593Smuzhiyun 	const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	if (vstart < min_vaddr || vstart >= max_vaddr)
867*4882a593Smuzhiyun 		return vstart;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	if (vend > max_vaddr || vend < min_vaddr)
870*4882a593Smuzhiyun 		vend = max_vaddr;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	while (vstart < vend) {
873*4882a593Smuzhiyun 		do_large_mapping(vstart, pstart);
874*4882a593Smuzhiyun 		vstart += PGDIR_SIZE; pstart += PGDIR_SIZE;
875*4882a593Smuzhiyun 	}
876*4882a593Smuzhiyun 	return vstart;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun 
map_kernel(void)879*4882a593Smuzhiyun static void __init map_kernel(void)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	int i;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	if (phys_base > 0) {
884*4882a593Smuzhiyun 		do_large_mapping(PAGE_OFFSET, phys_base);
885*4882a593Smuzhiyun 	}
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
888*4882a593Smuzhiyun 		map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
889*4882a593Smuzhiyun 	}
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun void (*poke_srmmu)(void) = NULL;
893*4882a593Smuzhiyun 
srmmu_paging_init(void)894*4882a593Smuzhiyun void __init srmmu_paging_init(void)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun 	int i;
897*4882a593Smuzhiyun 	phandle cpunode;
898*4882a593Smuzhiyun 	char node_str[128];
899*4882a593Smuzhiyun 	pgd_t *pgd;
900*4882a593Smuzhiyun 	p4d_t *p4d;
901*4882a593Smuzhiyun 	pud_t *pud;
902*4882a593Smuzhiyun 	pmd_t *pmd;
903*4882a593Smuzhiyun 	pte_t *pte;
904*4882a593Smuzhiyun 	unsigned long pages_avail;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	init_mm.context = (unsigned long) NO_CONTEXT;
907*4882a593Smuzhiyun 	sparc_iomap.start = SUN4M_IOBASE_VADDR;	/* 16MB of IOSPACE on all sun4m's. */
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	if (sparc_cpu_model == sun4d)
910*4882a593Smuzhiyun 		num_contexts = 65536; /* We know it is Viking */
911*4882a593Smuzhiyun 	else {
912*4882a593Smuzhiyun 		/* Find the number of contexts on the srmmu. */
913*4882a593Smuzhiyun 		cpunode = prom_getchild(prom_root_node);
914*4882a593Smuzhiyun 		num_contexts = 0;
915*4882a593Smuzhiyun 		while (cpunode != 0) {
916*4882a593Smuzhiyun 			prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
917*4882a593Smuzhiyun 			if (!strcmp(node_str, "cpu")) {
918*4882a593Smuzhiyun 				num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
919*4882a593Smuzhiyun 				break;
920*4882a593Smuzhiyun 			}
921*4882a593Smuzhiyun 			cpunode = prom_getsibling(cpunode);
922*4882a593Smuzhiyun 		}
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	if (!num_contexts) {
926*4882a593Smuzhiyun 		prom_printf("Something wrong, can't find cpu node in paging_init.\n");
927*4882a593Smuzhiyun 		prom_halt();
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	pages_avail = 0;
931*4882a593Smuzhiyun 	last_valid_pfn = bootmem_init(&pages_avail);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	srmmu_nocache_calcsize();
934*4882a593Smuzhiyun 	srmmu_nocache_init();
935*4882a593Smuzhiyun 	srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
936*4882a593Smuzhiyun 	map_kernel();
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	/* ctx table has to be physically aligned to its size */
939*4882a593Smuzhiyun 	srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
940*4882a593Smuzhiyun 	srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	for (i = 0; i < num_contexts; i++)
943*4882a593Smuzhiyun 		srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	flush_cache_all();
946*4882a593Smuzhiyun 	srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
947*4882a593Smuzhiyun #ifdef CONFIG_SMP
948*4882a593Smuzhiyun 	/* Stop from hanging here... */
949*4882a593Smuzhiyun 	local_ops->tlb_all();
950*4882a593Smuzhiyun #else
951*4882a593Smuzhiyun 	flush_tlb_all();
952*4882a593Smuzhiyun #endif
953*4882a593Smuzhiyun 	poke_srmmu();
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
956*4882a593Smuzhiyun 	srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	srmmu_allocate_ptable_skeleton(
959*4882a593Smuzhiyun 		__fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
960*4882a593Smuzhiyun 	srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	pgd = pgd_offset_k(PKMAP_BASE);
963*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, PKMAP_BASE);
964*4882a593Smuzhiyun 	pud = pud_offset(p4d, PKMAP_BASE);
965*4882a593Smuzhiyun 	pmd = pmd_offset(pud, PKMAP_BASE);
966*4882a593Smuzhiyun 	pte = pte_offset_kernel(pmd, PKMAP_BASE);
967*4882a593Smuzhiyun 	pkmap_page_table = pte;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	flush_cache_all();
970*4882a593Smuzhiyun 	flush_tlb_all();
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	sparc_context_init(num_contexts);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	kmap_init();
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	{
977*4882a593Smuzhiyun 		unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 		max_zone_pfn[ZONE_DMA] = max_low_pfn;
980*4882a593Smuzhiyun 		max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
981*4882a593Smuzhiyun 		max_zone_pfn[ZONE_HIGHMEM] = highend_pfn;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 		free_area_init(max_zone_pfn);
984*4882a593Smuzhiyun 	}
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun 
mmu_info(struct seq_file * m)987*4882a593Smuzhiyun void mmu_info(struct seq_file *m)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun 	seq_printf(m,
990*4882a593Smuzhiyun 		   "MMU type\t: %s\n"
991*4882a593Smuzhiyun 		   "contexts\t: %d\n"
992*4882a593Smuzhiyun 		   "nocache total\t: %ld\n"
993*4882a593Smuzhiyun 		   "nocache used\t: %d\n",
994*4882a593Smuzhiyun 		   srmmu_name,
995*4882a593Smuzhiyun 		   num_contexts,
996*4882a593Smuzhiyun 		   srmmu_nocache_size,
997*4882a593Smuzhiyun 		   srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun 
init_new_context(struct task_struct * tsk,struct mm_struct * mm)1000*4882a593Smuzhiyun int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun 	mm->context = NO_CONTEXT;
1003*4882a593Smuzhiyun 	return 0;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun 
destroy_context(struct mm_struct * mm)1006*4882a593Smuzhiyun void destroy_context(struct mm_struct *mm)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun 	unsigned long flags;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	if (mm->context != NO_CONTEXT) {
1011*4882a593Smuzhiyun 		flush_cache_mm(mm);
1012*4882a593Smuzhiyun 		srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1013*4882a593Smuzhiyun 		flush_tlb_mm(mm);
1014*4882a593Smuzhiyun 		spin_lock_irqsave(&srmmu_context_spinlock, flags);
1015*4882a593Smuzhiyun 		free_context(mm->context);
1016*4882a593Smuzhiyun 		spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
1017*4882a593Smuzhiyun 		mm->context = NO_CONTEXT;
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun /* Init various srmmu chip types. */
srmmu_is_bad(void)1022*4882a593Smuzhiyun static void __init srmmu_is_bad(void)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun 	prom_printf("Could not determine SRMMU chip type.\n");
1025*4882a593Smuzhiyun 	prom_halt();
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun 
init_vac_layout(void)1028*4882a593Smuzhiyun static void __init init_vac_layout(void)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun 	phandle nd;
1031*4882a593Smuzhiyun 	int cache_lines;
1032*4882a593Smuzhiyun 	char node_str[128];
1033*4882a593Smuzhiyun #ifdef CONFIG_SMP
1034*4882a593Smuzhiyun 	int cpu = 0;
1035*4882a593Smuzhiyun 	unsigned long max_size = 0;
1036*4882a593Smuzhiyun 	unsigned long min_line_size = 0x10000000;
1037*4882a593Smuzhiyun #endif
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	nd = prom_getchild(prom_root_node);
1040*4882a593Smuzhiyun 	while ((nd = prom_getsibling(nd)) != 0) {
1041*4882a593Smuzhiyun 		prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1042*4882a593Smuzhiyun 		if (!strcmp(node_str, "cpu")) {
1043*4882a593Smuzhiyun 			vac_line_size = prom_getint(nd, "cache-line-size");
1044*4882a593Smuzhiyun 			if (vac_line_size == -1) {
1045*4882a593Smuzhiyun 				prom_printf("can't determine cache-line-size, halting.\n");
1046*4882a593Smuzhiyun 				prom_halt();
1047*4882a593Smuzhiyun 			}
1048*4882a593Smuzhiyun 			cache_lines = prom_getint(nd, "cache-nlines");
1049*4882a593Smuzhiyun 			if (cache_lines == -1) {
1050*4882a593Smuzhiyun 				prom_printf("can't determine cache-nlines, halting.\n");
1051*4882a593Smuzhiyun 				prom_halt();
1052*4882a593Smuzhiyun 			}
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 			vac_cache_size = cache_lines * vac_line_size;
1055*4882a593Smuzhiyun #ifdef CONFIG_SMP
1056*4882a593Smuzhiyun 			if (vac_cache_size > max_size)
1057*4882a593Smuzhiyun 				max_size = vac_cache_size;
1058*4882a593Smuzhiyun 			if (vac_line_size < min_line_size)
1059*4882a593Smuzhiyun 				min_line_size = vac_line_size;
1060*4882a593Smuzhiyun 			//FIXME: cpus not contiguous!!
1061*4882a593Smuzhiyun 			cpu++;
1062*4882a593Smuzhiyun 			if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1063*4882a593Smuzhiyun 				break;
1064*4882a593Smuzhiyun #else
1065*4882a593Smuzhiyun 			break;
1066*4882a593Smuzhiyun #endif
1067*4882a593Smuzhiyun 		}
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 	if (nd == 0) {
1070*4882a593Smuzhiyun 		prom_printf("No CPU nodes found, halting.\n");
1071*4882a593Smuzhiyun 		prom_halt();
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun #ifdef CONFIG_SMP
1074*4882a593Smuzhiyun 	vac_cache_size = max_size;
1075*4882a593Smuzhiyun 	vac_line_size = min_line_size;
1076*4882a593Smuzhiyun #endif
1077*4882a593Smuzhiyun 	printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1078*4882a593Smuzhiyun 	       (int)vac_cache_size, (int)vac_line_size);
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
poke_hypersparc(void)1081*4882a593Smuzhiyun static void poke_hypersparc(void)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	volatile unsigned long clear;
1084*4882a593Smuzhiyun 	unsigned long mreg = srmmu_get_mmureg();
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	hyper_flush_unconditional_combined();
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	mreg &= ~(HYPERSPARC_CWENABLE);
1089*4882a593Smuzhiyun 	mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1090*4882a593Smuzhiyun 	mreg |= (HYPERSPARC_CMODE);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	srmmu_set_mmureg(mreg);
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun #if 0 /* XXX I think this is bad news... -DaveM */
1095*4882a593Smuzhiyun 	hyper_clear_all_tags();
1096*4882a593Smuzhiyun #endif
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1099*4882a593Smuzhiyun 	hyper_flush_whole_icache();
1100*4882a593Smuzhiyun 	clear = srmmu_get_faddr();
1101*4882a593Smuzhiyun 	clear = srmmu_get_fstatus();
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun static const struct sparc32_cachetlb_ops hypersparc_ops = {
1105*4882a593Smuzhiyun 	.cache_all	= hypersparc_flush_cache_all,
1106*4882a593Smuzhiyun 	.cache_mm	= hypersparc_flush_cache_mm,
1107*4882a593Smuzhiyun 	.cache_page	= hypersparc_flush_cache_page,
1108*4882a593Smuzhiyun 	.cache_range	= hypersparc_flush_cache_range,
1109*4882a593Smuzhiyun 	.tlb_all	= hypersparc_flush_tlb_all,
1110*4882a593Smuzhiyun 	.tlb_mm		= hypersparc_flush_tlb_mm,
1111*4882a593Smuzhiyun 	.tlb_page	= hypersparc_flush_tlb_page,
1112*4882a593Smuzhiyun 	.tlb_range	= hypersparc_flush_tlb_range,
1113*4882a593Smuzhiyun 	.page_to_ram	= hypersparc_flush_page_to_ram,
1114*4882a593Smuzhiyun 	.sig_insns	= hypersparc_flush_sig_insns,
1115*4882a593Smuzhiyun 	.page_for_dma	= hypersparc_flush_page_for_dma,
1116*4882a593Smuzhiyun };
1117*4882a593Smuzhiyun 
init_hypersparc(void)1118*4882a593Smuzhiyun static void __init init_hypersparc(void)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun 	srmmu_name = "ROSS HyperSparc";
1121*4882a593Smuzhiyun 	srmmu_modtype = HyperSparc;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	init_vac_layout();
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	is_hypersparc = 1;
1126*4882a593Smuzhiyun 	sparc32_cachetlb_ops = &hypersparc_ops;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	poke_srmmu = poke_hypersparc;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	hypersparc_setup_blockops();
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun 
poke_swift(void)1133*4882a593Smuzhiyun static void poke_swift(void)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun 	unsigned long mreg;
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	/* Clear any crap from the cache or else... */
1138*4882a593Smuzhiyun 	swift_flush_cache_all();
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	/* Enable I & D caches */
1141*4882a593Smuzhiyun 	mreg = srmmu_get_mmureg();
1142*4882a593Smuzhiyun 	mreg |= (SWIFT_IE | SWIFT_DE);
1143*4882a593Smuzhiyun 	/*
1144*4882a593Smuzhiyun 	 * The Swift branch folding logic is completely broken.  At
1145*4882a593Smuzhiyun 	 * trap time, if things are just right, if can mistakenly
1146*4882a593Smuzhiyun 	 * think that a trap is coming from kernel mode when in fact
1147*4882a593Smuzhiyun 	 * it is coming from user mode (it mis-executes the branch in
1148*4882a593Smuzhiyun 	 * the trap code).  So you see things like crashme completely
1149*4882a593Smuzhiyun 	 * hosing your machine which is completely unacceptable.  Turn
1150*4882a593Smuzhiyun 	 * this shit off... nice job Fujitsu.
1151*4882a593Smuzhiyun 	 */
1152*4882a593Smuzhiyun 	mreg &= ~(SWIFT_BF);
1153*4882a593Smuzhiyun 	srmmu_set_mmureg(mreg);
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun static const struct sparc32_cachetlb_ops swift_ops = {
1157*4882a593Smuzhiyun 	.cache_all	= swift_flush_cache_all,
1158*4882a593Smuzhiyun 	.cache_mm	= swift_flush_cache_mm,
1159*4882a593Smuzhiyun 	.cache_page	= swift_flush_cache_page,
1160*4882a593Smuzhiyun 	.cache_range	= swift_flush_cache_range,
1161*4882a593Smuzhiyun 	.tlb_all	= swift_flush_tlb_all,
1162*4882a593Smuzhiyun 	.tlb_mm		= swift_flush_tlb_mm,
1163*4882a593Smuzhiyun 	.tlb_page	= swift_flush_tlb_page,
1164*4882a593Smuzhiyun 	.tlb_range	= swift_flush_tlb_range,
1165*4882a593Smuzhiyun 	.page_to_ram	= swift_flush_page_to_ram,
1166*4882a593Smuzhiyun 	.sig_insns	= swift_flush_sig_insns,
1167*4882a593Smuzhiyun 	.page_for_dma	= swift_flush_page_for_dma,
1168*4882a593Smuzhiyun };
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun #define SWIFT_MASKID_ADDR  0x10003018
init_swift(void)1171*4882a593Smuzhiyun static void __init init_swift(void)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun 	unsigned long swift_rev;
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	__asm__ __volatile__("lda [%1] %2, %0\n\t"
1176*4882a593Smuzhiyun 			     "srl %0, 0x18, %0\n\t" :
1177*4882a593Smuzhiyun 			     "=r" (swift_rev) :
1178*4882a593Smuzhiyun 			     "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1179*4882a593Smuzhiyun 	srmmu_name = "Fujitsu Swift";
1180*4882a593Smuzhiyun 	switch (swift_rev) {
1181*4882a593Smuzhiyun 	case 0x11:
1182*4882a593Smuzhiyun 	case 0x20:
1183*4882a593Smuzhiyun 	case 0x23:
1184*4882a593Smuzhiyun 	case 0x30:
1185*4882a593Smuzhiyun 		srmmu_modtype = Swift_lots_o_bugs;
1186*4882a593Smuzhiyun 		hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1187*4882a593Smuzhiyun 		/*
1188*4882a593Smuzhiyun 		 * Gee george, I wonder why Sun is so hush hush about
1189*4882a593Smuzhiyun 		 * this hardware bug... really braindamage stuff going
1190*4882a593Smuzhiyun 		 * on here.  However I think we can find a way to avoid
1191*4882a593Smuzhiyun 		 * all of the workaround overhead under Linux.  Basically,
1192*4882a593Smuzhiyun 		 * any page fault can cause kernel pages to become user
1193*4882a593Smuzhiyun 		 * accessible (the mmu gets confused and clears some of
1194*4882a593Smuzhiyun 		 * the ACC bits in kernel ptes).  Aha, sounds pretty
1195*4882a593Smuzhiyun 		 * horrible eh?  But wait, after extensive testing it appears
1196*4882a593Smuzhiyun 		 * that if you use pgd_t level large kernel pte's (like the
1197*4882a593Smuzhiyun 		 * 4MB pages on the Pentium) the bug does not get tripped
1198*4882a593Smuzhiyun 		 * at all.  This avoids almost all of the major overhead.
1199*4882a593Smuzhiyun 		 * Welcome to a world where your vendor tells you to,
1200*4882a593Smuzhiyun 		 * "apply this kernel patch" instead of "sorry for the
1201*4882a593Smuzhiyun 		 * broken hardware, send it back and we'll give you
1202*4882a593Smuzhiyun 		 * properly functioning parts"
1203*4882a593Smuzhiyun 		 */
1204*4882a593Smuzhiyun 		break;
1205*4882a593Smuzhiyun 	case 0x25:
1206*4882a593Smuzhiyun 	case 0x31:
1207*4882a593Smuzhiyun 		srmmu_modtype = Swift_bad_c;
1208*4882a593Smuzhiyun 		hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1209*4882a593Smuzhiyun 		/*
1210*4882a593Smuzhiyun 		 * You see Sun allude to this hardware bug but never
1211*4882a593Smuzhiyun 		 * admit things directly, they'll say things like,
1212*4882a593Smuzhiyun 		 * "the Swift chip cache problems" or similar.
1213*4882a593Smuzhiyun 		 */
1214*4882a593Smuzhiyun 		break;
1215*4882a593Smuzhiyun 	default:
1216*4882a593Smuzhiyun 		srmmu_modtype = Swift_ok;
1217*4882a593Smuzhiyun 		break;
1218*4882a593Smuzhiyun 	}
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	sparc32_cachetlb_ops = &swift_ops;
1221*4882a593Smuzhiyun 	flush_page_for_dma_global = 0;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	/*
1224*4882a593Smuzhiyun 	 * Are you now convinced that the Swift is one of the
1225*4882a593Smuzhiyun 	 * biggest VLSI abortions of all time?  Bravo Fujitsu!
1226*4882a593Smuzhiyun 	 * Fujitsu, the !#?!%$'d up processor people.  I bet if
1227*4882a593Smuzhiyun 	 * you examined the microcode of the Swift you'd find
1228*4882a593Smuzhiyun 	 * XXX's all over the place.
1229*4882a593Smuzhiyun 	 */
1230*4882a593Smuzhiyun 	poke_srmmu = poke_swift;
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun 
turbosparc_flush_cache_all(void)1233*4882a593Smuzhiyun static void turbosparc_flush_cache_all(void)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	flush_user_windows();
1236*4882a593Smuzhiyun 	turbosparc_idflash_clear();
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun 
turbosparc_flush_cache_mm(struct mm_struct * mm)1239*4882a593Smuzhiyun static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1240*4882a593Smuzhiyun {
1241*4882a593Smuzhiyun 	FLUSH_BEGIN(mm)
1242*4882a593Smuzhiyun 	flush_user_windows();
1243*4882a593Smuzhiyun 	turbosparc_idflash_clear();
1244*4882a593Smuzhiyun 	FLUSH_END
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun 
turbosparc_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1247*4882a593Smuzhiyun static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun 	FLUSH_BEGIN(vma->vm_mm)
1250*4882a593Smuzhiyun 	flush_user_windows();
1251*4882a593Smuzhiyun 	turbosparc_idflash_clear();
1252*4882a593Smuzhiyun 	FLUSH_END
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun 
turbosparc_flush_cache_page(struct vm_area_struct * vma,unsigned long page)1255*4882a593Smuzhiyun static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun 	FLUSH_BEGIN(vma->vm_mm)
1258*4882a593Smuzhiyun 	flush_user_windows();
1259*4882a593Smuzhiyun 	if (vma->vm_flags & VM_EXEC)
1260*4882a593Smuzhiyun 		turbosparc_flush_icache();
1261*4882a593Smuzhiyun 	turbosparc_flush_dcache();
1262*4882a593Smuzhiyun 	FLUSH_END
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun /* TurboSparc is copy-back, if we turn it on, but this does not work. */
turbosparc_flush_page_to_ram(unsigned long page)1266*4882a593Smuzhiyun static void turbosparc_flush_page_to_ram(unsigned long page)
1267*4882a593Smuzhiyun {
1268*4882a593Smuzhiyun #ifdef TURBOSPARC_WRITEBACK
1269*4882a593Smuzhiyun 	volatile unsigned long clear;
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	if (srmmu_probe(page))
1272*4882a593Smuzhiyun 		turbosparc_flush_page_cache(page);
1273*4882a593Smuzhiyun 	clear = srmmu_get_fstatus();
1274*4882a593Smuzhiyun #endif
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun 
turbosparc_flush_sig_insns(struct mm_struct * mm,unsigned long insn_addr)1277*4882a593Smuzhiyun static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun 
turbosparc_flush_page_for_dma(unsigned long page)1281*4882a593Smuzhiyun static void turbosparc_flush_page_for_dma(unsigned long page)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun 	turbosparc_flush_dcache();
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun 
turbosparc_flush_tlb_all(void)1286*4882a593Smuzhiyun static void turbosparc_flush_tlb_all(void)
1287*4882a593Smuzhiyun {
1288*4882a593Smuzhiyun 	srmmu_flush_whole_tlb();
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun 
turbosparc_flush_tlb_mm(struct mm_struct * mm)1291*4882a593Smuzhiyun static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun 	FLUSH_BEGIN(mm)
1294*4882a593Smuzhiyun 	srmmu_flush_whole_tlb();
1295*4882a593Smuzhiyun 	FLUSH_END
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun 
turbosparc_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1298*4882a593Smuzhiyun static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	FLUSH_BEGIN(vma->vm_mm)
1301*4882a593Smuzhiyun 	srmmu_flush_whole_tlb();
1302*4882a593Smuzhiyun 	FLUSH_END
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun 
turbosparc_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)1305*4882a593Smuzhiyun static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun 	FLUSH_BEGIN(vma->vm_mm)
1308*4882a593Smuzhiyun 	srmmu_flush_whole_tlb();
1309*4882a593Smuzhiyun 	FLUSH_END
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 
poke_turbosparc(void)1313*4882a593Smuzhiyun static void poke_turbosparc(void)
1314*4882a593Smuzhiyun {
1315*4882a593Smuzhiyun 	unsigned long mreg = srmmu_get_mmureg();
1316*4882a593Smuzhiyun 	unsigned long ccreg;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	/* Clear any crap from the cache or else... */
1319*4882a593Smuzhiyun 	turbosparc_flush_cache_all();
1320*4882a593Smuzhiyun 	/* Temporarily disable I & D caches */
1321*4882a593Smuzhiyun 	mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
1322*4882a593Smuzhiyun 	mreg &= ~(TURBOSPARC_PCENABLE);		/* Don't check parity */
1323*4882a593Smuzhiyun 	srmmu_set_mmureg(mreg);
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	ccreg = turbosparc_get_ccreg();
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun #ifdef TURBOSPARC_WRITEBACK
1328*4882a593Smuzhiyun 	ccreg |= (TURBOSPARC_SNENABLE);		/* Do DVMA snooping in Dcache */
1329*4882a593Smuzhiyun 	ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1330*4882a593Smuzhiyun 			/* Write-back D-cache, emulate VLSI
1331*4882a593Smuzhiyun 			 * abortion number three, not number one */
1332*4882a593Smuzhiyun #else
1333*4882a593Smuzhiyun 	/* For now let's play safe, optimize later */
1334*4882a593Smuzhiyun 	ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1335*4882a593Smuzhiyun 			/* Do DVMA snooping in Dcache, Write-thru D-cache */
1336*4882a593Smuzhiyun 	ccreg &= ~(TURBOSPARC_uS2);
1337*4882a593Smuzhiyun 			/* Emulate VLSI abortion number three, not number one */
1338*4882a593Smuzhiyun #endif
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	switch (ccreg & 7) {
1341*4882a593Smuzhiyun 	case 0: /* No SE cache */
1342*4882a593Smuzhiyun 	case 7: /* Test mode */
1343*4882a593Smuzhiyun 		break;
1344*4882a593Smuzhiyun 	default:
1345*4882a593Smuzhiyun 		ccreg |= (TURBOSPARC_SCENABLE);
1346*4882a593Smuzhiyun 	}
1347*4882a593Smuzhiyun 	turbosparc_set_ccreg(ccreg);
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1350*4882a593Smuzhiyun 	mreg |= (TURBOSPARC_ICSNOOP);		/* Icache snooping on */
1351*4882a593Smuzhiyun 	srmmu_set_mmureg(mreg);
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun static const struct sparc32_cachetlb_ops turbosparc_ops = {
1355*4882a593Smuzhiyun 	.cache_all	= turbosparc_flush_cache_all,
1356*4882a593Smuzhiyun 	.cache_mm	= turbosparc_flush_cache_mm,
1357*4882a593Smuzhiyun 	.cache_page	= turbosparc_flush_cache_page,
1358*4882a593Smuzhiyun 	.cache_range	= turbosparc_flush_cache_range,
1359*4882a593Smuzhiyun 	.tlb_all	= turbosparc_flush_tlb_all,
1360*4882a593Smuzhiyun 	.tlb_mm		= turbosparc_flush_tlb_mm,
1361*4882a593Smuzhiyun 	.tlb_page	= turbosparc_flush_tlb_page,
1362*4882a593Smuzhiyun 	.tlb_range	= turbosparc_flush_tlb_range,
1363*4882a593Smuzhiyun 	.page_to_ram	= turbosparc_flush_page_to_ram,
1364*4882a593Smuzhiyun 	.sig_insns	= turbosparc_flush_sig_insns,
1365*4882a593Smuzhiyun 	.page_for_dma	= turbosparc_flush_page_for_dma,
1366*4882a593Smuzhiyun };
1367*4882a593Smuzhiyun 
init_turbosparc(void)1368*4882a593Smuzhiyun static void __init init_turbosparc(void)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun 	srmmu_name = "Fujitsu TurboSparc";
1371*4882a593Smuzhiyun 	srmmu_modtype = TurboSparc;
1372*4882a593Smuzhiyun 	sparc32_cachetlb_ops = &turbosparc_ops;
1373*4882a593Smuzhiyun 	poke_srmmu = poke_turbosparc;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun 
poke_tsunami(void)1376*4882a593Smuzhiyun static void poke_tsunami(void)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun 	unsigned long mreg = srmmu_get_mmureg();
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	tsunami_flush_icache();
1381*4882a593Smuzhiyun 	tsunami_flush_dcache();
1382*4882a593Smuzhiyun 	mreg &= ~TSUNAMI_ITD;
1383*4882a593Smuzhiyun 	mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1384*4882a593Smuzhiyun 	srmmu_set_mmureg(mreg);
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun static const struct sparc32_cachetlb_ops tsunami_ops = {
1388*4882a593Smuzhiyun 	.cache_all	= tsunami_flush_cache_all,
1389*4882a593Smuzhiyun 	.cache_mm	= tsunami_flush_cache_mm,
1390*4882a593Smuzhiyun 	.cache_page	= tsunami_flush_cache_page,
1391*4882a593Smuzhiyun 	.cache_range	= tsunami_flush_cache_range,
1392*4882a593Smuzhiyun 	.tlb_all	= tsunami_flush_tlb_all,
1393*4882a593Smuzhiyun 	.tlb_mm		= tsunami_flush_tlb_mm,
1394*4882a593Smuzhiyun 	.tlb_page	= tsunami_flush_tlb_page,
1395*4882a593Smuzhiyun 	.tlb_range	= tsunami_flush_tlb_range,
1396*4882a593Smuzhiyun 	.page_to_ram	= tsunami_flush_page_to_ram,
1397*4882a593Smuzhiyun 	.sig_insns	= tsunami_flush_sig_insns,
1398*4882a593Smuzhiyun 	.page_for_dma	= tsunami_flush_page_for_dma,
1399*4882a593Smuzhiyun };
1400*4882a593Smuzhiyun 
init_tsunami(void)1401*4882a593Smuzhiyun static void __init init_tsunami(void)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun 	/*
1404*4882a593Smuzhiyun 	 * Tsunami's pretty sane, Sun and TI actually got it
1405*4882a593Smuzhiyun 	 * somewhat right this time.  Fujitsu should have
1406*4882a593Smuzhiyun 	 * taken some lessons from them.
1407*4882a593Smuzhiyun 	 */
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	srmmu_name = "TI Tsunami";
1410*4882a593Smuzhiyun 	srmmu_modtype = Tsunami;
1411*4882a593Smuzhiyun 	sparc32_cachetlb_ops = &tsunami_ops;
1412*4882a593Smuzhiyun 	poke_srmmu = poke_tsunami;
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	tsunami_setup_blockops();
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun 
poke_viking(void)1417*4882a593Smuzhiyun static void poke_viking(void)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun 	unsigned long mreg = srmmu_get_mmureg();
1420*4882a593Smuzhiyun 	static int smp_catch;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	if (viking_mxcc_present) {
1423*4882a593Smuzhiyun 		unsigned long mxcc_control = mxcc_get_creg();
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 		mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1426*4882a593Smuzhiyun 		mxcc_control &= ~(MXCC_CTL_RRC);
1427*4882a593Smuzhiyun 		mxcc_set_creg(mxcc_control);
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 		/*
1430*4882a593Smuzhiyun 		 * We don't need memory parity checks.
1431*4882a593Smuzhiyun 		 * XXX This is a mess, have to dig out later. ecd.
1432*4882a593Smuzhiyun 		viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1433*4882a593Smuzhiyun 		 */
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 		/* We do cache ptables on MXCC. */
1436*4882a593Smuzhiyun 		mreg |= VIKING_TCENABLE;
1437*4882a593Smuzhiyun 	} else {
1438*4882a593Smuzhiyun 		unsigned long bpreg;
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 		mreg &= ~(VIKING_TCENABLE);
1441*4882a593Smuzhiyun 		if (smp_catch++) {
1442*4882a593Smuzhiyun 			/* Must disable mixed-cmd mode here for other cpu's. */
1443*4882a593Smuzhiyun 			bpreg = viking_get_bpreg();
1444*4882a593Smuzhiyun 			bpreg &= ~(VIKING_ACTION_MIX);
1445*4882a593Smuzhiyun 			viking_set_bpreg(bpreg);
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 			/* Just in case PROM does something funny. */
1448*4882a593Smuzhiyun 			msi_set_sync();
1449*4882a593Smuzhiyun 		}
1450*4882a593Smuzhiyun 	}
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	mreg |= VIKING_SPENABLE;
1453*4882a593Smuzhiyun 	mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1454*4882a593Smuzhiyun 	mreg |= VIKING_SBENABLE;
1455*4882a593Smuzhiyun 	mreg &= ~(VIKING_ACENABLE);
1456*4882a593Smuzhiyun 	srmmu_set_mmureg(mreg);
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun static struct sparc32_cachetlb_ops viking_ops __ro_after_init = {
1460*4882a593Smuzhiyun 	.cache_all	= viking_flush_cache_all,
1461*4882a593Smuzhiyun 	.cache_mm	= viking_flush_cache_mm,
1462*4882a593Smuzhiyun 	.cache_page	= viking_flush_cache_page,
1463*4882a593Smuzhiyun 	.cache_range	= viking_flush_cache_range,
1464*4882a593Smuzhiyun 	.tlb_all	= viking_flush_tlb_all,
1465*4882a593Smuzhiyun 	.tlb_mm		= viking_flush_tlb_mm,
1466*4882a593Smuzhiyun 	.tlb_page	= viking_flush_tlb_page,
1467*4882a593Smuzhiyun 	.tlb_range	= viking_flush_tlb_range,
1468*4882a593Smuzhiyun 	.page_to_ram	= viking_flush_page_to_ram,
1469*4882a593Smuzhiyun 	.sig_insns	= viking_flush_sig_insns,
1470*4882a593Smuzhiyun 	.page_for_dma	= viking_flush_page_for_dma,
1471*4882a593Smuzhiyun };
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun #ifdef CONFIG_SMP
1474*4882a593Smuzhiyun /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1475*4882a593Smuzhiyun  * perform the local TLB flush and all the other cpus will see it.
1476*4882a593Smuzhiyun  * But, unfortunately, there is a bug in the sun4d XBUS backplane
1477*4882a593Smuzhiyun  * that requires that we add some synchronization to these flushes.
1478*4882a593Smuzhiyun  *
1479*4882a593Smuzhiyun  * The bug is that the fifo which keeps track of all the pending TLB
1480*4882a593Smuzhiyun  * broadcasts in the system is an entry or two too small, so if we
1481*4882a593Smuzhiyun  * have too many going at once we'll overflow that fifo and lose a TLB
1482*4882a593Smuzhiyun  * flush resulting in corruption.
1483*4882a593Smuzhiyun  *
1484*4882a593Smuzhiyun  * Our workaround is to take a global spinlock around the TLB flushes,
1485*4882a593Smuzhiyun  * which guarentees we won't ever have too many pending.  It's a big
1486*4882a593Smuzhiyun  * hammer, but a semaphore like system to make sure we only have N TLB
1487*4882a593Smuzhiyun  * flushes going at once will require SMP locking anyways so there's
1488*4882a593Smuzhiyun  * no real value in trying any harder than this.
1489*4882a593Smuzhiyun  */
1490*4882a593Smuzhiyun static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = {
1491*4882a593Smuzhiyun 	.cache_all	= viking_flush_cache_all,
1492*4882a593Smuzhiyun 	.cache_mm	= viking_flush_cache_mm,
1493*4882a593Smuzhiyun 	.cache_page	= viking_flush_cache_page,
1494*4882a593Smuzhiyun 	.cache_range	= viking_flush_cache_range,
1495*4882a593Smuzhiyun 	.tlb_all	= sun4dsmp_flush_tlb_all,
1496*4882a593Smuzhiyun 	.tlb_mm		= sun4dsmp_flush_tlb_mm,
1497*4882a593Smuzhiyun 	.tlb_page	= sun4dsmp_flush_tlb_page,
1498*4882a593Smuzhiyun 	.tlb_range	= sun4dsmp_flush_tlb_range,
1499*4882a593Smuzhiyun 	.page_to_ram	= viking_flush_page_to_ram,
1500*4882a593Smuzhiyun 	.sig_insns	= viking_flush_sig_insns,
1501*4882a593Smuzhiyun 	.page_for_dma	= viking_flush_page_for_dma,
1502*4882a593Smuzhiyun };
1503*4882a593Smuzhiyun #endif
1504*4882a593Smuzhiyun 
init_viking(void)1505*4882a593Smuzhiyun static void __init init_viking(void)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun 	unsigned long mreg = srmmu_get_mmureg();
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	/* Ahhh, the viking.  SRMMU VLSI abortion number two... */
1510*4882a593Smuzhiyun 	if (mreg & VIKING_MMODE) {
1511*4882a593Smuzhiyun 		srmmu_name = "TI Viking";
1512*4882a593Smuzhiyun 		viking_mxcc_present = 0;
1513*4882a593Smuzhiyun 		msi_set_sync();
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 		/*
1516*4882a593Smuzhiyun 		 * We need this to make sure old viking takes no hits
1517*4882a593Smuzhiyun 		 * on it's cache for dma snoops to workaround the
1518*4882a593Smuzhiyun 		 * "load from non-cacheable memory" interrupt bug.
1519*4882a593Smuzhiyun 		 * This is only necessary because of the new way in
1520*4882a593Smuzhiyun 		 * which we use the IOMMU.
1521*4882a593Smuzhiyun 		 */
1522*4882a593Smuzhiyun 		viking_ops.page_for_dma = viking_flush_page;
1523*4882a593Smuzhiyun #ifdef CONFIG_SMP
1524*4882a593Smuzhiyun 		viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1525*4882a593Smuzhiyun #endif
1526*4882a593Smuzhiyun 		flush_page_for_dma_global = 0;
1527*4882a593Smuzhiyun 	} else {
1528*4882a593Smuzhiyun 		srmmu_name = "TI Viking/MXCC";
1529*4882a593Smuzhiyun 		viking_mxcc_present = 1;
1530*4882a593Smuzhiyun 		srmmu_cache_pagetables = 1;
1531*4882a593Smuzhiyun 	}
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1534*4882a593Smuzhiyun 		&viking_ops;
1535*4882a593Smuzhiyun #ifdef CONFIG_SMP
1536*4882a593Smuzhiyun 	if (sparc_cpu_model == sun4d)
1537*4882a593Smuzhiyun 		sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1538*4882a593Smuzhiyun 			&viking_sun4d_smp_ops;
1539*4882a593Smuzhiyun #endif
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	poke_srmmu = poke_viking;
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun /* Probe for the srmmu chip version. */
get_srmmu_type(void)1545*4882a593Smuzhiyun static void __init get_srmmu_type(void)
1546*4882a593Smuzhiyun {
1547*4882a593Smuzhiyun 	unsigned long mreg, psr;
1548*4882a593Smuzhiyun 	unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	srmmu_modtype = SRMMU_INVAL_MOD;
1551*4882a593Smuzhiyun 	hwbug_bitmask = 0;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	mreg = srmmu_get_mmureg(); psr = get_psr();
1554*4882a593Smuzhiyun 	mod_typ = (mreg & 0xf0000000) >> 28;
1555*4882a593Smuzhiyun 	mod_rev = (mreg & 0x0f000000) >> 24;
1556*4882a593Smuzhiyun 	psr_typ = (psr >> 28) & 0xf;
1557*4882a593Smuzhiyun 	psr_vers = (psr >> 24) & 0xf;
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	/* First, check for sparc-leon. */
1560*4882a593Smuzhiyun 	if (sparc_cpu_model == sparc_leon) {
1561*4882a593Smuzhiyun 		init_leon();
1562*4882a593Smuzhiyun 		return;
1563*4882a593Smuzhiyun 	}
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	/* Second, check for HyperSparc or Cypress. */
1566*4882a593Smuzhiyun 	if (mod_typ == 1) {
1567*4882a593Smuzhiyun 		switch (mod_rev) {
1568*4882a593Smuzhiyun 		case 7:
1569*4882a593Smuzhiyun 			/* UP or MP Hypersparc */
1570*4882a593Smuzhiyun 			init_hypersparc();
1571*4882a593Smuzhiyun 			break;
1572*4882a593Smuzhiyun 		case 0:
1573*4882a593Smuzhiyun 		case 2:
1574*4882a593Smuzhiyun 		case 10:
1575*4882a593Smuzhiyun 		case 11:
1576*4882a593Smuzhiyun 		case 12:
1577*4882a593Smuzhiyun 		case 13:
1578*4882a593Smuzhiyun 		case 14:
1579*4882a593Smuzhiyun 		case 15:
1580*4882a593Smuzhiyun 		default:
1581*4882a593Smuzhiyun 			prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1582*4882a593Smuzhiyun 			prom_halt();
1583*4882a593Smuzhiyun 			break;
1584*4882a593Smuzhiyun 		}
1585*4882a593Smuzhiyun 		return;
1586*4882a593Smuzhiyun 	}
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	/* Now Fujitsu TurboSparc. It might happen that it is
1589*4882a593Smuzhiyun 	 * in Swift emulation mode, so we will check later...
1590*4882a593Smuzhiyun 	 */
1591*4882a593Smuzhiyun 	if (psr_typ == 0 && psr_vers == 5) {
1592*4882a593Smuzhiyun 		init_turbosparc();
1593*4882a593Smuzhiyun 		return;
1594*4882a593Smuzhiyun 	}
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	/* Next check for Fujitsu Swift. */
1597*4882a593Smuzhiyun 	if (psr_typ == 0 && psr_vers == 4) {
1598*4882a593Smuzhiyun 		phandle cpunode;
1599*4882a593Smuzhiyun 		char node_str[128];
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 		/* Look if it is not a TurboSparc emulating Swift... */
1602*4882a593Smuzhiyun 		cpunode = prom_getchild(prom_root_node);
1603*4882a593Smuzhiyun 		while ((cpunode = prom_getsibling(cpunode)) != 0) {
1604*4882a593Smuzhiyun 			prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1605*4882a593Smuzhiyun 			if (!strcmp(node_str, "cpu")) {
1606*4882a593Smuzhiyun 				if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1607*4882a593Smuzhiyun 				    prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1608*4882a593Smuzhiyun 					init_turbosparc();
1609*4882a593Smuzhiyun 					return;
1610*4882a593Smuzhiyun 				}
1611*4882a593Smuzhiyun 				break;
1612*4882a593Smuzhiyun 			}
1613*4882a593Smuzhiyun 		}
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 		init_swift();
1616*4882a593Smuzhiyun 		return;
1617*4882a593Smuzhiyun 	}
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 	/* Now the Viking family of srmmu. */
1620*4882a593Smuzhiyun 	if (psr_typ == 4 &&
1621*4882a593Smuzhiyun 	   ((psr_vers == 0) ||
1622*4882a593Smuzhiyun 	    ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1623*4882a593Smuzhiyun 		init_viking();
1624*4882a593Smuzhiyun 		return;
1625*4882a593Smuzhiyun 	}
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	/* Finally the Tsunami. */
1628*4882a593Smuzhiyun 	if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1629*4882a593Smuzhiyun 		init_tsunami();
1630*4882a593Smuzhiyun 		return;
1631*4882a593Smuzhiyun 	}
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	/* Oh well */
1634*4882a593Smuzhiyun 	srmmu_is_bad();
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun #ifdef CONFIG_SMP
1638*4882a593Smuzhiyun /* Local cross-calls. */
smp_flush_page_for_dma(unsigned long page)1639*4882a593Smuzhiyun static void smp_flush_page_for_dma(unsigned long page)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun 	xc1((smpfunc_t) local_ops->page_for_dma, page);
1642*4882a593Smuzhiyun 	local_ops->page_for_dma(page);
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun 
smp_flush_cache_all(void)1645*4882a593Smuzhiyun static void smp_flush_cache_all(void)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun 	xc0((smpfunc_t) local_ops->cache_all);
1648*4882a593Smuzhiyun 	local_ops->cache_all();
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun 
smp_flush_tlb_all(void)1651*4882a593Smuzhiyun static void smp_flush_tlb_all(void)
1652*4882a593Smuzhiyun {
1653*4882a593Smuzhiyun 	xc0((smpfunc_t) local_ops->tlb_all);
1654*4882a593Smuzhiyun 	local_ops->tlb_all();
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun 
smp_flush_cache_mm(struct mm_struct * mm)1657*4882a593Smuzhiyun static void smp_flush_cache_mm(struct mm_struct *mm)
1658*4882a593Smuzhiyun {
1659*4882a593Smuzhiyun 	if (mm->context != NO_CONTEXT) {
1660*4882a593Smuzhiyun 		cpumask_t cpu_mask;
1661*4882a593Smuzhiyun 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
1662*4882a593Smuzhiyun 		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1663*4882a593Smuzhiyun 		if (!cpumask_empty(&cpu_mask))
1664*4882a593Smuzhiyun 			xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1665*4882a593Smuzhiyun 		local_ops->cache_mm(mm);
1666*4882a593Smuzhiyun 	}
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun 
smp_flush_tlb_mm(struct mm_struct * mm)1669*4882a593Smuzhiyun static void smp_flush_tlb_mm(struct mm_struct *mm)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun 	if (mm->context != NO_CONTEXT) {
1672*4882a593Smuzhiyun 		cpumask_t cpu_mask;
1673*4882a593Smuzhiyun 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
1674*4882a593Smuzhiyun 		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1675*4882a593Smuzhiyun 		if (!cpumask_empty(&cpu_mask)) {
1676*4882a593Smuzhiyun 			xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1677*4882a593Smuzhiyun 			if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1678*4882a593Smuzhiyun 				cpumask_copy(mm_cpumask(mm),
1679*4882a593Smuzhiyun 					     cpumask_of(smp_processor_id()));
1680*4882a593Smuzhiyun 		}
1681*4882a593Smuzhiyun 		local_ops->tlb_mm(mm);
1682*4882a593Smuzhiyun 	}
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun 
smp_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1685*4882a593Smuzhiyun static void smp_flush_cache_range(struct vm_area_struct *vma,
1686*4882a593Smuzhiyun 				  unsigned long start,
1687*4882a593Smuzhiyun 				  unsigned long end)
1688*4882a593Smuzhiyun {
1689*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	if (mm->context != NO_CONTEXT) {
1692*4882a593Smuzhiyun 		cpumask_t cpu_mask;
1693*4882a593Smuzhiyun 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
1694*4882a593Smuzhiyun 		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1695*4882a593Smuzhiyun 		if (!cpumask_empty(&cpu_mask))
1696*4882a593Smuzhiyun 			xc3((smpfunc_t) local_ops->cache_range,
1697*4882a593Smuzhiyun 			    (unsigned long) vma, start, end);
1698*4882a593Smuzhiyun 		local_ops->cache_range(vma, start, end);
1699*4882a593Smuzhiyun 	}
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun 
smp_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1702*4882a593Smuzhiyun static void smp_flush_tlb_range(struct vm_area_struct *vma,
1703*4882a593Smuzhiyun 				unsigned long start,
1704*4882a593Smuzhiyun 				unsigned long end)
1705*4882a593Smuzhiyun {
1706*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	if (mm->context != NO_CONTEXT) {
1709*4882a593Smuzhiyun 		cpumask_t cpu_mask;
1710*4882a593Smuzhiyun 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
1711*4882a593Smuzhiyun 		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1712*4882a593Smuzhiyun 		if (!cpumask_empty(&cpu_mask))
1713*4882a593Smuzhiyun 			xc3((smpfunc_t) local_ops->tlb_range,
1714*4882a593Smuzhiyun 			    (unsigned long) vma, start, end);
1715*4882a593Smuzhiyun 		local_ops->tlb_range(vma, start, end);
1716*4882a593Smuzhiyun 	}
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun 
smp_flush_cache_page(struct vm_area_struct * vma,unsigned long page)1719*4882a593Smuzhiyun static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1720*4882a593Smuzhiyun {
1721*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 	if (mm->context != NO_CONTEXT) {
1724*4882a593Smuzhiyun 		cpumask_t cpu_mask;
1725*4882a593Smuzhiyun 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
1726*4882a593Smuzhiyun 		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1727*4882a593Smuzhiyun 		if (!cpumask_empty(&cpu_mask))
1728*4882a593Smuzhiyun 			xc2((smpfunc_t) local_ops->cache_page,
1729*4882a593Smuzhiyun 			    (unsigned long) vma, page);
1730*4882a593Smuzhiyun 		local_ops->cache_page(vma, page);
1731*4882a593Smuzhiyun 	}
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun 
smp_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)1734*4882a593Smuzhiyun static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1735*4882a593Smuzhiyun {
1736*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	if (mm->context != NO_CONTEXT) {
1739*4882a593Smuzhiyun 		cpumask_t cpu_mask;
1740*4882a593Smuzhiyun 		cpumask_copy(&cpu_mask, mm_cpumask(mm));
1741*4882a593Smuzhiyun 		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1742*4882a593Smuzhiyun 		if (!cpumask_empty(&cpu_mask))
1743*4882a593Smuzhiyun 			xc2((smpfunc_t) local_ops->tlb_page,
1744*4882a593Smuzhiyun 			    (unsigned long) vma, page);
1745*4882a593Smuzhiyun 		local_ops->tlb_page(vma, page);
1746*4882a593Smuzhiyun 	}
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun 
smp_flush_page_to_ram(unsigned long page)1749*4882a593Smuzhiyun static void smp_flush_page_to_ram(unsigned long page)
1750*4882a593Smuzhiyun {
1751*4882a593Smuzhiyun 	/* Current theory is that those who call this are the one's
1752*4882a593Smuzhiyun 	 * who have just dirtied their cache with the pages contents
1753*4882a593Smuzhiyun 	 * in kernel space, therefore we only run this on local cpu.
1754*4882a593Smuzhiyun 	 *
1755*4882a593Smuzhiyun 	 * XXX This experiment failed, research further... -DaveM
1756*4882a593Smuzhiyun 	 */
1757*4882a593Smuzhiyun #if 1
1758*4882a593Smuzhiyun 	xc1((smpfunc_t) local_ops->page_to_ram, page);
1759*4882a593Smuzhiyun #endif
1760*4882a593Smuzhiyun 	local_ops->page_to_ram(page);
1761*4882a593Smuzhiyun }
1762*4882a593Smuzhiyun 
smp_flush_sig_insns(struct mm_struct * mm,unsigned long insn_addr)1763*4882a593Smuzhiyun static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1764*4882a593Smuzhiyun {
1765*4882a593Smuzhiyun 	cpumask_t cpu_mask;
1766*4882a593Smuzhiyun 	cpumask_copy(&cpu_mask, mm_cpumask(mm));
1767*4882a593Smuzhiyun 	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1768*4882a593Smuzhiyun 	if (!cpumask_empty(&cpu_mask))
1769*4882a593Smuzhiyun 		xc2((smpfunc_t) local_ops->sig_insns,
1770*4882a593Smuzhiyun 		    (unsigned long) mm, insn_addr);
1771*4882a593Smuzhiyun 	local_ops->sig_insns(mm, insn_addr);
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = {
1775*4882a593Smuzhiyun 	.cache_all	= smp_flush_cache_all,
1776*4882a593Smuzhiyun 	.cache_mm	= smp_flush_cache_mm,
1777*4882a593Smuzhiyun 	.cache_page	= smp_flush_cache_page,
1778*4882a593Smuzhiyun 	.cache_range	= smp_flush_cache_range,
1779*4882a593Smuzhiyun 	.tlb_all	= smp_flush_tlb_all,
1780*4882a593Smuzhiyun 	.tlb_mm		= smp_flush_tlb_mm,
1781*4882a593Smuzhiyun 	.tlb_page	= smp_flush_tlb_page,
1782*4882a593Smuzhiyun 	.tlb_range	= smp_flush_tlb_range,
1783*4882a593Smuzhiyun 	.page_to_ram	= smp_flush_page_to_ram,
1784*4882a593Smuzhiyun 	.sig_insns	= smp_flush_sig_insns,
1785*4882a593Smuzhiyun 	.page_for_dma	= smp_flush_page_for_dma,
1786*4882a593Smuzhiyun };
1787*4882a593Smuzhiyun #endif
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun /* Load up routines and constants for sun4m and sun4d mmu */
load_mmu(void)1790*4882a593Smuzhiyun void __init load_mmu(void)
1791*4882a593Smuzhiyun {
1792*4882a593Smuzhiyun 	/* Functions */
1793*4882a593Smuzhiyun 	get_srmmu_type();
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun #ifdef CONFIG_SMP
1796*4882a593Smuzhiyun 	/* El switcheroo... */
1797*4882a593Smuzhiyun 	local_ops = sparc32_cachetlb_ops;
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun 	if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1800*4882a593Smuzhiyun 		smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1801*4882a593Smuzhiyun 		smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1802*4882a593Smuzhiyun 		smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1803*4882a593Smuzhiyun 		smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1804*4882a593Smuzhiyun 	}
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	if (poke_srmmu == poke_viking) {
1807*4882a593Smuzhiyun 		/* Avoid unnecessary cross calls. */
1808*4882a593Smuzhiyun 		smp_cachetlb_ops.cache_all = local_ops->cache_all;
1809*4882a593Smuzhiyun 		smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1810*4882a593Smuzhiyun 		smp_cachetlb_ops.cache_range = local_ops->cache_range;
1811*4882a593Smuzhiyun 		smp_cachetlb_ops.cache_page = local_ops->cache_page;
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 		smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1814*4882a593Smuzhiyun 		smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1815*4882a593Smuzhiyun 		smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1816*4882a593Smuzhiyun 	}
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	/* It really is const after this point. */
1819*4882a593Smuzhiyun 	sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1820*4882a593Smuzhiyun 		&smp_cachetlb_ops;
1821*4882a593Smuzhiyun #endif
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	if (sparc_cpu_model != sun4d)
1824*4882a593Smuzhiyun 		ld_mmu_iommu();
1825*4882a593Smuzhiyun #ifdef CONFIG_SMP
1826*4882a593Smuzhiyun 	if (sparc_cpu_model == sun4d)
1827*4882a593Smuzhiyun 		sun4d_init_smp();
1828*4882a593Smuzhiyun 	else if (sparc_cpu_model == sparc_leon)
1829*4882a593Smuzhiyun 		leon_init_smp();
1830*4882a593Smuzhiyun 	else
1831*4882a593Smuzhiyun 		sun4m_init_smp();
1832*4882a593Smuzhiyun #endif
1833*4882a593Smuzhiyun }
1834