xref: /OK3568_Linux_fs/kernel/arch/powerpc/mm/nohash/44x.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Modifications by Matt Porter (mporter@mvista.com) to support
4*4882a593Smuzhiyun  * PPC44x Book E processors.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This file contains the routines for initializing the MMU
7*4882a593Smuzhiyun  * on the 4xx series of chips.
8*4882a593Smuzhiyun  *  -- paulus
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *  Derived from arch/ppc/mm/init.c:
11*4882a593Smuzhiyun  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
14*4882a593Smuzhiyun  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
15*4882a593Smuzhiyun  *    Copyright (C) 1996 Paul Mackerras
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  *  Derived from "arch/i386/mm/init.c"
18*4882a593Smuzhiyun  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/memblock.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <asm/mmu.h>
25*4882a593Smuzhiyun #include <asm/page.h>
26*4882a593Smuzhiyun #include <asm/cacheflush.h>
27*4882a593Smuzhiyun #include <asm/code-patching.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <mm/mmu_decl.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* Used by the 44x TLB replacement exception handler.
32*4882a593Smuzhiyun  * Just needed it declared someplace.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun unsigned int tlb_44x_index; /* = 0 */
35*4882a593Smuzhiyun unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
36*4882a593Smuzhiyun int icache_44x_need_flush;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun unsigned long tlb_47x_boltmap[1024/8];
39*4882a593Smuzhiyun 
ppc44x_update_tlb_hwater(void)40*4882a593Smuzhiyun static void ppc44x_update_tlb_hwater(void)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	/* The TLB miss handlers hard codes the watermark in a cmpli
43*4882a593Smuzhiyun 	 * instruction to improve performances rather than loading it
44*4882a593Smuzhiyun 	 * from the global variable. Thus, we patch the instructions
45*4882a593Smuzhiyun 	 * in the 2 TLB miss handlers when updating the value
46*4882a593Smuzhiyun 	 */
47*4882a593Smuzhiyun 	modify_instruction_site(&patch__tlb_44x_hwater_D, 0xffff, tlb_44x_hwater);
48*4882a593Smuzhiyun 	modify_instruction_site(&patch__tlb_44x_hwater_I, 0xffff, tlb_44x_hwater);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
53*4882a593Smuzhiyun  */
ppc44x_pin_tlb(unsigned int virt,unsigned int phys)54*4882a593Smuzhiyun static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	unsigned int entry = tlb_44x_hwater--;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	ppc44x_update_tlb_hwater();
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	mtspr(SPRN_MMUCR, 0);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	__asm__ __volatile__(
63*4882a593Smuzhiyun 		"tlbwe	%2,%3,%4\n"
64*4882a593Smuzhiyun 		"tlbwe	%1,%3,%5\n"
65*4882a593Smuzhiyun 		"tlbwe	%0,%3,%6\n"
66*4882a593Smuzhiyun 	:
67*4882a593Smuzhiyun 	: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
68*4882a593Smuzhiyun 	  "r" (phys),
69*4882a593Smuzhiyun 	  "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
70*4882a593Smuzhiyun 	  "r" (entry),
71*4882a593Smuzhiyun 	  "i" (PPC44x_TLB_PAGEID),
72*4882a593Smuzhiyun 	  "i" (PPC44x_TLB_XLAT),
73*4882a593Smuzhiyun 	  "i" (PPC44x_TLB_ATTRIB));
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
ppc47x_find_free_bolted(void)76*4882a593Smuzhiyun static int __init ppc47x_find_free_bolted(void)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	unsigned int mmube0 = mfspr(SPRN_MMUBE0);
79*4882a593Smuzhiyun 	unsigned int mmube1 = mfspr(SPRN_MMUBE1);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	if (!(mmube0 & MMUBE0_VBE0))
82*4882a593Smuzhiyun 		return 0;
83*4882a593Smuzhiyun 	if (!(mmube0 & MMUBE0_VBE1))
84*4882a593Smuzhiyun 		return 1;
85*4882a593Smuzhiyun 	if (!(mmube0 & MMUBE0_VBE2))
86*4882a593Smuzhiyun 		return 2;
87*4882a593Smuzhiyun 	if (!(mmube1 & MMUBE1_VBE3))
88*4882a593Smuzhiyun 		return 3;
89*4882a593Smuzhiyun 	if (!(mmube1 & MMUBE1_VBE4))
90*4882a593Smuzhiyun 		return 4;
91*4882a593Smuzhiyun 	if (!(mmube1 & MMUBE1_VBE5))
92*4882a593Smuzhiyun 		return 5;
93*4882a593Smuzhiyun 	return -1;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
ppc47x_update_boltmap(void)96*4882a593Smuzhiyun static void __init ppc47x_update_boltmap(void)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	unsigned int mmube0 = mfspr(SPRN_MMUBE0);
99*4882a593Smuzhiyun 	unsigned int mmube1 = mfspr(SPRN_MMUBE1);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (mmube0 & MMUBE0_VBE0)
102*4882a593Smuzhiyun 		__set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
103*4882a593Smuzhiyun 			  tlb_47x_boltmap);
104*4882a593Smuzhiyun 	if (mmube0 & MMUBE0_VBE1)
105*4882a593Smuzhiyun 		__set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
106*4882a593Smuzhiyun 			  tlb_47x_boltmap);
107*4882a593Smuzhiyun 	if (mmube0 & MMUBE0_VBE2)
108*4882a593Smuzhiyun 		__set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
109*4882a593Smuzhiyun 			  tlb_47x_boltmap);
110*4882a593Smuzhiyun 	if (mmube1 & MMUBE1_VBE3)
111*4882a593Smuzhiyun 		__set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
112*4882a593Smuzhiyun 			  tlb_47x_boltmap);
113*4882a593Smuzhiyun 	if (mmube1 & MMUBE1_VBE4)
114*4882a593Smuzhiyun 		__set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
115*4882a593Smuzhiyun 			  tlb_47x_boltmap);
116*4882a593Smuzhiyun 	if (mmube1 & MMUBE1_VBE5)
117*4882a593Smuzhiyun 		__set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
118*4882a593Smuzhiyun 			  tlb_47x_boltmap);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun  * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
123*4882a593Smuzhiyun  */
ppc47x_pin_tlb(unsigned int virt,unsigned int phys)124*4882a593Smuzhiyun static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	unsigned int rA;
127*4882a593Smuzhiyun 	int bolted;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* Base rA is HW way select, way 0, bolted bit set */
130*4882a593Smuzhiyun 	rA = 0x88000000;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/* Look for a bolted entry slot */
133*4882a593Smuzhiyun 	bolted = ppc47x_find_free_bolted();
134*4882a593Smuzhiyun 	BUG_ON(bolted < 0);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/* Insert bolted slot number */
137*4882a593Smuzhiyun 	rA |= bolted << 24;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
140*4882a593Smuzhiyun 		 virt, phys, bolted);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	mtspr(SPRN_MMUCR, 0);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	__asm__ __volatile__(
145*4882a593Smuzhiyun 		"tlbwe	%2,%3,0\n"
146*4882a593Smuzhiyun 		"tlbwe	%1,%3,1\n"
147*4882a593Smuzhiyun 		"tlbwe	%0,%3,2\n"
148*4882a593Smuzhiyun 		:
149*4882a593Smuzhiyun 		: "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
150*4882a593Smuzhiyun 		       PPC47x_TLB2_SX
151*4882a593Smuzhiyun #ifdef CONFIG_SMP
152*4882a593Smuzhiyun 		       | PPC47x_TLB2_M
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun 		       ),
155*4882a593Smuzhiyun 		  "r" (phys),
156*4882a593Smuzhiyun 		  "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
157*4882a593Smuzhiyun 		  "r" (rA));
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
MMU_init_hw(void)160*4882a593Smuzhiyun void __init MMU_init_hw(void)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	/* This is not useful on 47x but won't hurt either */
163*4882a593Smuzhiyun 	ppc44x_update_tlb_hwater();
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	flush_instruction_cache();
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
mmu_mapin_ram(unsigned long base,unsigned long top)168*4882a593Smuzhiyun unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	unsigned long addr;
171*4882a593Smuzhiyun 	unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* Pin in enough TLBs to cover any lowmem not covered by the
174*4882a593Smuzhiyun 	 * initial 256M mapping established in head_44x.S */
175*4882a593Smuzhiyun 	for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
176*4882a593Smuzhiyun 	     addr += PPC_PIN_SIZE) {
177*4882a593Smuzhiyun 		if (mmu_has_feature(MMU_FTR_TYPE_47x))
178*4882a593Smuzhiyun 			ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
179*4882a593Smuzhiyun 		else
180*4882a593Smuzhiyun 			ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 	if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
183*4882a593Smuzhiyun 		ppc47x_update_boltmap();
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #ifdef DEBUG
186*4882a593Smuzhiyun 		{
187*4882a593Smuzhiyun 			int i;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 			printk(KERN_DEBUG "bolted entries: ");
190*4882a593Smuzhiyun 			for (i = 0; i < 255; i++) {
191*4882a593Smuzhiyun 				if (test_bit(i, tlb_47x_boltmap))
192*4882a593Smuzhiyun 					printk("%d ", i);
193*4882a593Smuzhiyun 			}
194*4882a593Smuzhiyun 			printk("\n");
195*4882a593Smuzhiyun 		}
196*4882a593Smuzhiyun #endif /* DEBUG */
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 	return total_lowmem;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
setup_initial_memory_limit(phys_addr_t first_memblock_base,phys_addr_t first_memblock_size)201*4882a593Smuzhiyun void setup_initial_memory_limit(phys_addr_t first_memblock_base,
202*4882a593Smuzhiyun 				phys_addr_t first_memblock_size)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	u64 size;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun #ifndef CONFIG_NONSTATIC_KERNEL
207*4882a593Smuzhiyun 	/* We don't currently support the first MEMBLOCK not mapping 0
208*4882a593Smuzhiyun 	 * physical on those processors
209*4882a593Smuzhiyun 	 */
210*4882a593Smuzhiyun 	BUG_ON(first_memblock_base != 0);
211*4882a593Smuzhiyun #endif
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* 44x has a 256M TLB entry pinned at boot */
214*4882a593Smuzhiyun 	size = (min_t(u64, first_memblock_size, PPC_PIN_SIZE));
215*4882a593Smuzhiyun 	memblock_set_current_limit(first_memblock_base + size);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun #ifdef CONFIG_SMP
mmu_init_secondary(int cpu)219*4882a593Smuzhiyun void __init mmu_init_secondary(int cpu)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	unsigned long addr;
222*4882a593Smuzhiyun 	unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* Pin in enough TLBs to cover any lowmem not covered by the
225*4882a593Smuzhiyun 	 * initial 256M mapping established in head_44x.S
226*4882a593Smuzhiyun 	 *
227*4882a593Smuzhiyun 	 * WARNING: This is called with only the first 256M of the
228*4882a593Smuzhiyun 	 * linear mapping in the TLB and we can't take faults yet
229*4882a593Smuzhiyun 	 * so beware of what this code uses. It runs off a temporary
230*4882a593Smuzhiyun 	 * stack. current (r2) isn't initialized, smp_processor_id()
231*4882a593Smuzhiyun 	 * will not work, current thread info isn't accessible, ...
232*4882a593Smuzhiyun 	 */
233*4882a593Smuzhiyun 	for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
234*4882a593Smuzhiyun 	     addr += PPC_PIN_SIZE) {
235*4882a593Smuzhiyun 		if (mmu_has_feature(MMU_FTR_TYPE_47x))
236*4882a593Smuzhiyun 			ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
237*4882a593Smuzhiyun 		else
238*4882a593Smuzhiyun 			ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun #endif /* CONFIG_SMP */
242