1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/arm/mm/pgd.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1998-2005 Russell King
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #include <linux/mm.h>
8*4882a593Smuzhiyun #include <linux/gfp.h>
9*4882a593Smuzhiyun #include <linux/highmem.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <asm/cp15.h>
13*4882a593Smuzhiyun #include <asm/pgalloc.h>
14*4882a593Smuzhiyun #include <asm/page.h>
15*4882a593Smuzhiyun #include <asm/tlbflush.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "mm.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #ifdef CONFIG_ARM_LPAE
20*4882a593Smuzhiyun #define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
21*4882a593Smuzhiyun #define __pgd_free(pgd) kfree(pgd)
22*4882a593Smuzhiyun #else
23*4882a593Smuzhiyun #define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
24*4882a593Smuzhiyun #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * need to get a 16k page for level 1
29*4882a593Smuzhiyun */
pgd_alloc(struct mm_struct * mm)30*4882a593Smuzhiyun pgd_t *pgd_alloc(struct mm_struct *mm)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun pgd_t *new_pgd, *init_pgd;
33*4882a593Smuzhiyun p4d_t *new_p4d, *init_p4d;
34*4882a593Smuzhiyun pud_t *new_pud, *init_pud;
35*4882a593Smuzhiyun pmd_t *new_pmd, *init_pmd;
36*4882a593Smuzhiyun pte_t *new_pte, *init_pte;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun new_pgd = __pgd_alloc();
39*4882a593Smuzhiyun if (!new_pgd)
40*4882a593Smuzhiyun goto no_pgd;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Copy over the kernel and IO PGD entries
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun init_pgd = pgd_offset_k(0);
48*4882a593Smuzhiyun memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
49*4882a593Smuzhiyun (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #ifdef CONFIG_ARM_LPAE
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Allocate PMD table for modules and pkmap mappings.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
58*4882a593Smuzhiyun MODULES_VADDR);
59*4882a593Smuzhiyun if (!new_p4d)
60*4882a593Smuzhiyun goto no_p4d;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR);
63*4882a593Smuzhiyun if (!new_pud)
64*4882a593Smuzhiyun goto no_pud;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun new_pmd = pmd_alloc(mm, new_pud, 0);
67*4882a593Smuzhiyun if (!new_pmd)
68*4882a593Smuzhiyun goto no_pmd;
69*4882a593Smuzhiyun #endif
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (!vectors_high()) {
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * On ARM, first page must always be allocated since it
74*4882a593Smuzhiyun * contains the machine vectors. The vectors are always high
75*4882a593Smuzhiyun * with LPAE.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun new_p4d = p4d_alloc(mm, new_pgd, 0);
78*4882a593Smuzhiyun if (!new_p4d)
79*4882a593Smuzhiyun goto no_p4d;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun new_pud = pud_alloc(mm, new_p4d, 0);
82*4882a593Smuzhiyun if (!new_pud)
83*4882a593Smuzhiyun goto no_pud;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun new_pmd = pmd_alloc(mm, new_pud, 0);
86*4882a593Smuzhiyun if (!new_pmd)
87*4882a593Smuzhiyun goto no_pmd;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun new_pte = pte_alloc_map(mm, new_pmd, 0);
90*4882a593Smuzhiyun if (!new_pte)
91*4882a593Smuzhiyun goto no_pte;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #ifndef CONFIG_ARM_LPAE
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * Modify the PTE pointer to have the correct domain. This
96*4882a593Smuzhiyun * needs to be the vectors domain to avoid the low vectors
97*4882a593Smuzhiyun * being unmapped.
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
100*4882a593Smuzhiyun pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
101*4882a593Smuzhiyun #endif
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun init_p4d = p4d_offset(init_pgd, 0);
104*4882a593Smuzhiyun init_pud = pud_offset(init_p4d, 0);
105*4882a593Smuzhiyun init_pmd = pmd_offset(init_pud, 0);
106*4882a593Smuzhiyun init_pte = pte_offset_map(init_pmd, 0);
107*4882a593Smuzhiyun set_pte_ext(new_pte + 0, init_pte[0], 0);
108*4882a593Smuzhiyun set_pte_ext(new_pte + 1, init_pte[1], 0);
109*4882a593Smuzhiyun pte_unmap(init_pte);
110*4882a593Smuzhiyun pte_unmap(new_pte);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun return new_pgd;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun no_pte:
116*4882a593Smuzhiyun pmd_free(mm, new_pmd);
117*4882a593Smuzhiyun mm_dec_nr_pmds(mm);
118*4882a593Smuzhiyun no_pmd:
119*4882a593Smuzhiyun pud_free(mm, new_pud);
120*4882a593Smuzhiyun no_pud:
121*4882a593Smuzhiyun p4d_free(mm, new_p4d);
122*4882a593Smuzhiyun no_p4d:
123*4882a593Smuzhiyun __pgd_free(new_pgd);
124*4882a593Smuzhiyun no_pgd:
125*4882a593Smuzhiyun return NULL;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
pgd_free(struct mm_struct * mm,pgd_t * pgd_base)128*4882a593Smuzhiyun void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun pgd_t *pgd;
131*4882a593Smuzhiyun p4d_t *p4d;
132*4882a593Smuzhiyun pud_t *pud;
133*4882a593Smuzhiyun pmd_t *pmd;
134*4882a593Smuzhiyun pgtable_t pte;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (!pgd_base)
137*4882a593Smuzhiyun return;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun pgd = pgd_base + pgd_index(0);
140*4882a593Smuzhiyun if (pgd_none_or_clear_bad(pgd))
141*4882a593Smuzhiyun goto no_pgd;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun p4d = p4d_offset(pgd, 0);
144*4882a593Smuzhiyun if (p4d_none_or_clear_bad(p4d))
145*4882a593Smuzhiyun goto no_p4d;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun pud = pud_offset(p4d, 0);
148*4882a593Smuzhiyun if (pud_none_or_clear_bad(pud))
149*4882a593Smuzhiyun goto no_pud;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun pmd = pmd_offset(pud, 0);
152*4882a593Smuzhiyun if (pmd_none_or_clear_bad(pmd))
153*4882a593Smuzhiyun goto no_pmd;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun pte = pmd_pgtable(*pmd);
156*4882a593Smuzhiyun pmd_clear(pmd);
157*4882a593Smuzhiyun pte_free(mm, pte);
158*4882a593Smuzhiyun mm_dec_nr_ptes(mm);
159*4882a593Smuzhiyun no_pmd:
160*4882a593Smuzhiyun pud_clear(pud);
161*4882a593Smuzhiyun pmd_free(mm, pmd);
162*4882a593Smuzhiyun mm_dec_nr_pmds(mm);
163*4882a593Smuzhiyun no_pud:
164*4882a593Smuzhiyun p4d_clear(p4d);
165*4882a593Smuzhiyun pud_free(mm, pud);
166*4882a593Smuzhiyun no_p4d:
167*4882a593Smuzhiyun pgd_clear(pgd);
168*4882a593Smuzhiyun p4d_free(mm, p4d);
169*4882a593Smuzhiyun no_pgd:
170*4882a593Smuzhiyun #ifdef CONFIG_ARM_LPAE
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun * Free modules/pkmap or identity pmd tables.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
175*4882a593Smuzhiyun if (pgd_none_or_clear_bad(pgd))
176*4882a593Smuzhiyun continue;
177*4882a593Smuzhiyun if (pgd_val(*pgd) & L_PGD_SWAPPER)
178*4882a593Smuzhiyun continue;
179*4882a593Smuzhiyun p4d = p4d_offset(pgd, 0);
180*4882a593Smuzhiyun if (p4d_none_or_clear_bad(p4d))
181*4882a593Smuzhiyun continue;
182*4882a593Smuzhiyun pud = pud_offset(p4d, 0);
183*4882a593Smuzhiyun if (pud_none_or_clear_bad(pud))
184*4882a593Smuzhiyun continue;
185*4882a593Smuzhiyun pmd = pmd_offset(pud, 0);
186*4882a593Smuzhiyun pud_clear(pud);
187*4882a593Smuzhiyun pmd_free(mm, pmd);
188*4882a593Smuzhiyun mm_dec_nr_pmds(mm);
189*4882a593Smuzhiyun p4d_clear(p4d);
190*4882a593Smuzhiyun pud_free(mm, pud);
191*4882a593Smuzhiyun mm_dec_nr_puds(mm);
192*4882a593Smuzhiyun pgd_clear(pgd);
193*4882a593Smuzhiyun p4d_free(mm, p4d);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun #endif
196*4882a593Smuzhiyun __pgd_free(pgd_base);
197*4882a593Smuzhiyun }
198