1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * xtensa mmu stuff
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Extracted from init.c
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #include <linux/memblock.h>
8*4882a593Smuzhiyun #include <linux/percpu.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/string.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/cache.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <asm/tlb.h>
15*4882a593Smuzhiyun #include <asm/tlbflush.h>
16*4882a593Smuzhiyun #include <asm/mmu_context.h>
17*4882a593Smuzhiyun #include <asm/page.h>
18*4882a593Smuzhiyun #include <asm/initialize_mmu.h>
19*4882a593Smuzhiyun #include <asm/io.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #if defined(CONFIG_HIGHMEM)
init_pmd(unsigned long vaddr,unsigned long n_pages)22*4882a593Smuzhiyun static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun pmd_t *pmd = pmd_off_k(vaddr);
25*4882a593Smuzhiyun pte_t *pte;
26*4882a593Smuzhiyun unsigned long i;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun n_pages = ALIGN(n_pages, PTRS_PER_PTE);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
31*4882a593Smuzhiyun __func__, vaddr, n_pages);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
34*4882a593Smuzhiyun if (!pte)
35*4882a593Smuzhiyun panic("%s: Failed to allocate %lu bytes align=%lx\n",
36*4882a593Smuzhiyun __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun for (i = 0; i < n_pages; ++i)
39*4882a593Smuzhiyun pte_clear(NULL, 0, pte + i);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
42*4882a593Smuzhiyun pte_t *cur_pte = pte + i;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun BUG_ON(!pmd_none(*pmd));
45*4882a593Smuzhiyun set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
46*4882a593Smuzhiyun BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
47*4882a593Smuzhiyun pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
48*4882a593Smuzhiyun __func__, pmd, cur_pte);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun return pte;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
fixedrange_init(void)53*4882a593Smuzhiyun static void __init fixedrange_init(void)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun init_pmd(__fix_to_virt(0), __end_of_fixed_addresses);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun
paging_init(void)59*4882a593Smuzhiyun void __init paging_init(void)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
62*4882a593Smuzhiyun fixedrange_init();
63*4882a593Smuzhiyun pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
64*4882a593Smuzhiyun kmap_init();
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * Flush the mmu and reset associated register to default values.
70*4882a593Smuzhiyun */
init_mmu(void)71*4882a593Smuzhiyun void init_mmu(void)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * Writing zeros to the instruction and data TLBCFG special
76*4882a593Smuzhiyun * registers ensure that valid values exist in the register.
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * For existing PGSZID<w> fields, zero selects the first element
79*4882a593Smuzhiyun * of the page-size array. For nonexistent PGSZID<w> fields,
80*4882a593Smuzhiyun * zero is the best value to write. Also, when changing PGSZID<w>
81*4882a593Smuzhiyun * fields, the corresponding TLB must be flushed.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun set_itlbcfg_register(0);
84*4882a593Smuzhiyun set_dtlbcfg_register(0);
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun init_kio();
87*4882a593Smuzhiyun local_flush_tlb_all();
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* Set rasid register to a known value. */
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Set PTEVADDR special register to the start of the page
94*4882a593Smuzhiyun * table, which is in kernel mappable space (ie. not
95*4882a593Smuzhiyun * statically mapped). This register's value is undefined on
96*4882a593Smuzhiyun * reset.
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
init_kio(void)101*4882a593Smuzhiyun void init_kio(void)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * Update the IO area mapping in case xtensa_kio_paddr has changed
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
108*4882a593Smuzhiyun XCHAL_KIO_CACHED_VADDR + 6);
109*4882a593Smuzhiyun write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
110*4882a593Smuzhiyun XCHAL_KIO_CACHED_VADDR + 6);
111*4882a593Smuzhiyun write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
112*4882a593Smuzhiyun XCHAL_KIO_BYPASS_VADDR + 6);
113*4882a593Smuzhiyun write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
114*4882a593Smuzhiyun XCHAL_KIO_BYPASS_VADDR + 6);
115*4882a593Smuzhiyun #endif
116*4882a593Smuzhiyun }
117