xref: /OK3568_Linux_fs/kernel/arch/powerpc/mm/nohash/8xx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * This file contains the routines for initializing the MMU
4*4882a593Smuzhiyun  * on the 8xx series of chips.
5*4882a593Smuzhiyun  *  -- christophe
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *  Derived from arch/powerpc/mm/40x_mmu.c:
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/memblock.h>
11*4882a593Smuzhiyun #include <linux/mmu_context.h>
12*4882a593Smuzhiyun #include <linux/hugetlb.h>
13*4882a593Smuzhiyun #include <asm/fixmap.h>
14*4882a593Smuzhiyun #include <asm/code-patching.h>
15*4882a593Smuzhiyun #include <asm/inst.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <mm/mmu_decl.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun extern int __map_without_ltlbs;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun static unsigned long block_mapped_ram;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
27*4882a593Smuzhiyun  * Otherwise, returns 0
28*4882a593Smuzhiyun  */
v_block_mapped(unsigned long va)29*4882a593Smuzhiyun phys_addr_t v_block_mapped(unsigned long va)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	unsigned long p = PHYS_IMMR_BASE;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
34*4882a593Smuzhiyun 		return p + va - VIRT_IMMR_BASE;
35*4882a593Smuzhiyun 	if (__map_without_ltlbs)
36*4882a593Smuzhiyun 		return 0;
37*4882a593Smuzhiyun 	if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
38*4882a593Smuzhiyun 		return __pa(va);
39*4882a593Smuzhiyun 	return 0;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun  * Return VA for a given PA mapped with LTLBs or fixmap
44*4882a593Smuzhiyun  * Return 0 if not mapped
45*4882a593Smuzhiyun  */
p_block_mapped(phys_addr_t pa)46*4882a593Smuzhiyun unsigned long p_block_mapped(phys_addr_t pa)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	unsigned long p = PHYS_IMMR_BASE;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	if (pa >= p && pa < p + IMMR_SIZE)
51*4882a593Smuzhiyun 		return VIRT_IMMR_BASE + pa - p;
52*4882a593Smuzhiyun 	if (__map_without_ltlbs)
53*4882a593Smuzhiyun 		return 0;
54*4882a593Smuzhiyun 	if (pa < block_mapped_ram)
55*4882a593Smuzhiyun 		return (unsigned long)__va(pa);
56*4882a593Smuzhiyun 	return 0;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
early_hugepd_alloc_kernel(hugepd_t * pmdp,unsigned long va)59*4882a593Smuzhiyun static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	if (hpd_val(*pmdp) == 0) {
62*4882a593Smuzhiyun 		pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		if (!ptep)
65*4882a593Smuzhiyun 			return NULL;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 		hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
68*4882a593Smuzhiyun 		hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 	return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
__early_map_kernel_hugepage(unsigned long va,phys_addr_t pa,pgprot_t prot,int psize,bool new)73*4882a593Smuzhiyun static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
74*4882a593Smuzhiyun 					     pgprot_t prot, int psize, bool new)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	pmd_t *pmdp = pmd_off_k(va);
77*4882a593Smuzhiyun 	pte_t *ptep;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
80*4882a593Smuzhiyun 		return -EINVAL;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	if (new) {
83*4882a593Smuzhiyun 		if (WARN_ON(slab_is_available()))
84*4882a593Smuzhiyun 			return -EINVAL;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 		if (psize == MMU_PAGE_512K)
87*4882a593Smuzhiyun 			ptep = early_pte_alloc_kernel(pmdp, va);
88*4882a593Smuzhiyun 		else
89*4882a593Smuzhiyun 			ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
90*4882a593Smuzhiyun 	} else {
91*4882a593Smuzhiyun 		if (psize == MMU_PAGE_512K)
92*4882a593Smuzhiyun 			ptep = pte_offset_kernel(pmdp, va);
93*4882a593Smuzhiyun 		else
94*4882a593Smuzhiyun 			ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (WARN_ON(!ptep))
98*4882a593Smuzhiyun 		return -ENOMEM;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* The PTE should never be already present */
101*4882a593Smuzhiyun 	if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
102*4882a593Smuzhiyun 		return -EINVAL;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * MMU_init_hw does the chip-specific initialization of the MMU hardware.
111*4882a593Smuzhiyun  */
MMU_init_hw(void)112*4882a593Smuzhiyun void __init MMU_init_hw(void)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun static bool immr_is_mapped __initdata;
117*4882a593Smuzhiyun 
mmu_mapin_immr(void)118*4882a593Smuzhiyun void __init mmu_mapin_immr(void)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	if (immr_is_mapped)
121*4882a593Smuzhiyun 		return;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	immr_is_mapped = true;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	__early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
126*4882a593Smuzhiyun 				    PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
mmu_mapin_ram_chunk(unsigned long offset,unsigned long top,pgprot_t prot,bool new)129*4882a593Smuzhiyun static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
130*4882a593Smuzhiyun 				pgprot_t prot, bool new)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	unsigned long v = PAGE_OFFSET + offset;
133*4882a593Smuzhiyun 	unsigned long p = offset;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
138*4882a593Smuzhiyun 		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
139*4882a593Smuzhiyun 	for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
140*4882a593Smuzhiyun 		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
141*4882a593Smuzhiyun 	for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
142*4882a593Smuzhiyun 		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (!new)
145*4882a593Smuzhiyun 		flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
mmu_mapin_ram(unsigned long base,unsigned long top)148*4882a593Smuzhiyun unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
151*4882a593Smuzhiyun 	unsigned long sinittext = __pa(_sinittext);
152*4882a593Smuzhiyun 	bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled();
153*4882a593Smuzhiyun 	unsigned long boundary = strict_boundary ? sinittext : etext8;
154*4882a593Smuzhiyun 	unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	WARN_ON(top < einittext8);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	mmu_mapin_immr();
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (__map_without_ltlbs)
161*4882a593Smuzhiyun 		return 0;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
164*4882a593Smuzhiyun 	if (debug_pagealloc_enabled()) {
165*4882a593Smuzhiyun 		top = boundary;
166*4882a593Smuzhiyun 	} else {
167*4882a593Smuzhiyun 		mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
168*4882a593Smuzhiyun 		mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (top > SZ_32M)
172*4882a593Smuzhiyun 		memblock_set_current_limit(top);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	block_mapped_ram = top;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	return top;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
mmu_mark_initmem_nx(void)179*4882a593Smuzhiyun void mmu_mark_initmem_nx(void)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
182*4882a593Smuzhiyun 	unsigned long sinittext = __pa(_sinittext);
183*4882a593Smuzhiyun 	unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
184*4882a593Smuzhiyun 	unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
187*4882a593Smuzhiyun 	mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
190*4882a593Smuzhiyun 		mmu_pin_tlb(block_mapped_ram, false);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun #ifdef CONFIG_STRICT_KERNEL_RWX
mmu_mark_rodata_ro(void)194*4882a593Smuzhiyun void mmu_mark_rodata_ro(void)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	unsigned long sinittext = __pa(_sinittext);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
199*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
200*4882a593Smuzhiyun 		mmu_pin_tlb(block_mapped_ram, true);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun #endif
203*4882a593Smuzhiyun 
setup_initial_memory_limit(phys_addr_t first_memblock_base,phys_addr_t first_memblock_size)204*4882a593Smuzhiyun void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
205*4882a593Smuzhiyun 				       phys_addr_t first_memblock_size)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	/* We don't currently support the first MEMBLOCK not mapping 0
208*4882a593Smuzhiyun 	 * physical on those processors
209*4882a593Smuzhiyun 	 */
210*4882a593Smuzhiyun 	BUG_ON(first_memblock_base != 0);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* 8xx can only access 32MB at the moment */
213*4882a593Smuzhiyun 	memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun  * Set up to use a given MMU context.
218*4882a593Smuzhiyun  * id is context number, pgd is PGD pointer.
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * We place the physical address of the new task page directory loaded
221*4882a593Smuzhiyun  * into the MMU base register, and set the ASID compare register with
222*4882a593Smuzhiyun  * the new "context."
223*4882a593Smuzhiyun  */
set_context(unsigned long id,pgd_t * pgd)224*4882a593Smuzhiyun void set_context(unsigned long id, pgd_t *pgd)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	s16 offset = (s16)(__pa(swapper_pg_dir));
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/* Context switch the PTE pointer for the Abatron BDI2000.
229*4882a593Smuzhiyun 	 * The PGDIR is passed as second argument.
230*4882a593Smuzhiyun 	 */
231*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_BDI_SWITCH))
232*4882a593Smuzhiyun 		abatron_pteptrs[1] = pgd;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* Register M_TWB will contain base address of level 1 table minus the
235*4882a593Smuzhiyun 	 * lower part of the kernel PGDIR base address, so that all accesses to
236*4882a593Smuzhiyun 	 * level 1 table are done relative to lower part of kernel PGDIR base
237*4882a593Smuzhiyun 	 * address.
238*4882a593Smuzhiyun 	 */
239*4882a593Smuzhiyun 	mtspr(SPRN_M_TWB, __pa(pgd) - offset);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* Update context */
242*4882a593Smuzhiyun 	mtspr(SPRN_M_CASID, id - 1);
243*4882a593Smuzhiyun 	/* sync */
244*4882a593Smuzhiyun 	mb();
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUEP
setup_kuep(bool disabled)248*4882a593Smuzhiyun void __init setup_kuep(bool disabled)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	if (disabled)
251*4882a593Smuzhiyun 		return;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	pr_info("Activating Kernel Userspace Execution Prevention\n");
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	mtspr(SPRN_MI_AP, MI_APG_KUEP);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun #endif
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUAP
setup_kuap(bool disabled)260*4882a593Smuzhiyun void __init setup_kuap(bool disabled)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	pr_info("Activating Kernel Userspace Access Protection\n");
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	if (disabled)
265*4882a593Smuzhiyun 		pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	mtspr(SPRN_MD_AP, MD_APG_KUAP);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun #endif
270