1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_PGALLOC_H
3*4882a593Smuzhiyun #define _ASM_X86_PGALLOC_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/threads.h>
6*4882a593Smuzhiyun #include <linux/mm.h> /* for struct page */
7*4882a593Smuzhiyun #include <linux/pagemap.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_ALLOC_ONE
10*4882a593Smuzhiyun #define __HAVE_ARCH_PGD_FREE
11*4882a593Smuzhiyun #include <asm-generic/pgalloc.h>
12*4882a593Smuzhiyun
__paravirt_pgd_alloc(struct mm_struct * mm)13*4882a593Smuzhiyun static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT_XXL
16*4882a593Smuzhiyun #include <asm/paravirt.h>
17*4882a593Smuzhiyun #else
18*4882a593Smuzhiyun #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
paravirt_pgd_free(struct mm_struct * mm,pgd_t * pgd)19*4882a593Smuzhiyun static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
paravirt_alloc_pte(struct mm_struct * mm,unsigned long pfn)20*4882a593Smuzhiyun static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
paravirt_alloc_pmd(struct mm_struct * mm,unsigned long pfn)21*4882a593Smuzhiyun static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
paravirt_alloc_pmd_clone(unsigned long pfn,unsigned long clonepfn,unsigned long start,unsigned long count)22*4882a593Smuzhiyun static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
23*4882a593Smuzhiyun unsigned long start, unsigned long count) {}
paravirt_alloc_pud(struct mm_struct * mm,unsigned long pfn)24*4882a593Smuzhiyun static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
paravirt_alloc_p4d(struct mm_struct * mm,unsigned long pfn)25*4882a593Smuzhiyun static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {}
paravirt_release_pte(unsigned long pfn)26*4882a593Smuzhiyun static inline void paravirt_release_pte(unsigned long pfn) {}
paravirt_release_pmd(unsigned long pfn)27*4882a593Smuzhiyun static inline void paravirt_release_pmd(unsigned long pfn) {}
paravirt_release_pud(unsigned long pfn)28*4882a593Smuzhiyun static inline void paravirt_release_pud(unsigned long pfn) {}
paravirt_release_p4d(unsigned long pfn)29*4882a593Smuzhiyun static inline void paravirt_release_p4d(unsigned long pfn) {}
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * Flags to use when allocating a user page table page.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun extern gfp_t __userpte_alloc_gfp;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #ifdef CONFIG_PAGE_TABLE_ISOLATION
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Instead of one PGD, we acquire two PGDs. Being order-1, it is
40*4882a593Smuzhiyun * both 8k in size and 8k-aligned. That lets us just flip bit 12
41*4882a593Smuzhiyun * in a pointer to swap between the two 4k halves.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun #define PGD_ALLOCATION_ORDER 1
44*4882a593Smuzhiyun #else
45*4882a593Smuzhiyun #define PGD_ALLOCATION_ORDER 0
46*4882a593Smuzhiyun #endif
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * Allocate and free page tables.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun extern pgd_t *pgd_alloc(struct mm_struct *);
52*4882a593Smuzhiyun extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun extern pgtable_t pte_alloc_one(struct mm_struct *);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
57*4882a593Smuzhiyun
__pte_free_tlb(struct mmu_gather * tlb,struct page * pte,unsigned long address)58*4882a593Smuzhiyun static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
59*4882a593Smuzhiyun unsigned long address)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun ___pte_free_tlb(tlb, pte);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
pmd_populate_kernel(struct mm_struct * mm,pmd_t * pmd,pte_t * pte)64*4882a593Smuzhiyun static inline void pmd_populate_kernel(struct mm_struct *mm,
65*4882a593Smuzhiyun pmd_t *pmd, pte_t *pte)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
68*4882a593Smuzhiyun set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
pmd_populate_kernel_safe(struct mm_struct * mm,pmd_t * pmd,pte_t * pte)71*4882a593Smuzhiyun static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
72*4882a593Smuzhiyun pmd_t *pmd, pte_t *pte)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
75*4882a593Smuzhiyun set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
pmd_populate(struct mm_struct * mm,pmd_t * pmd,struct page * pte)78*4882a593Smuzhiyun static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
79*4882a593Smuzhiyun struct page *pte)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun unsigned long pfn = page_to_pfn(pte);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun paravirt_alloc_pte(mm, pfn);
84*4882a593Smuzhiyun set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define pmd_pgtable(pmd) pmd_page(pmd)
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 2
90*4882a593Smuzhiyun extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
91*4882a593Smuzhiyun
__pmd_free_tlb(struct mmu_gather * tlb,pmd_t * pmd,unsigned long address)92*4882a593Smuzhiyun static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
93*4882a593Smuzhiyun unsigned long address)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun ___pmd_free_tlb(tlb, pmd);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #ifdef CONFIG_X86_PAE
99*4882a593Smuzhiyun extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
100*4882a593Smuzhiyun #else /* !CONFIG_X86_PAE */
pud_populate(struct mm_struct * mm,pud_t * pud,pmd_t * pmd)101*4882a593Smuzhiyun static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
104*4882a593Smuzhiyun set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
pud_populate_safe(struct mm_struct * mm,pud_t * pud,pmd_t * pmd)107*4882a593Smuzhiyun static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
110*4882a593Smuzhiyun set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd)));
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun #endif /* CONFIG_X86_PAE */
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 3
p4d_populate(struct mm_struct * mm,p4d_t * p4d,pud_t * pud)115*4882a593Smuzhiyun static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
118*4882a593Smuzhiyun set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
p4d_populate_safe(struct mm_struct * mm,p4d_t * p4d,pud_t * pud)121*4882a593Smuzhiyun static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
124*4882a593Smuzhiyun set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
128*4882a593Smuzhiyun
__pud_free_tlb(struct mmu_gather * tlb,pud_t * pud,unsigned long address)129*4882a593Smuzhiyun static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
130*4882a593Smuzhiyun unsigned long address)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun ___pud_free_tlb(tlb, pud);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 4
pgd_populate(struct mm_struct * mm,pgd_t * pgd,p4d_t * p4d)136*4882a593Smuzhiyun static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun if (!pgtable_l5_enabled())
139*4882a593Smuzhiyun return;
140*4882a593Smuzhiyun paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
141*4882a593Smuzhiyun set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
pgd_populate_safe(struct mm_struct * mm,pgd_t * pgd,p4d_t * p4d)144*4882a593Smuzhiyun static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun if (!pgtable_l5_enabled())
147*4882a593Smuzhiyun return;
148*4882a593Smuzhiyun paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
149*4882a593Smuzhiyun set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
p4d_alloc_one(struct mm_struct * mm,unsigned long addr)152*4882a593Smuzhiyun static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun gfp_t gfp = GFP_KERNEL_ACCOUNT;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (mm == &init_mm)
157*4882a593Smuzhiyun gfp &= ~__GFP_ACCOUNT;
158*4882a593Smuzhiyun return (p4d_t *)get_zeroed_page(gfp);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
p4d_free(struct mm_struct * mm,p4d_t * p4d)161*4882a593Smuzhiyun static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun if (!pgtable_l5_enabled())
164*4882a593Smuzhiyun return;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
167*4882a593Smuzhiyun free_page((unsigned long)p4d);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
171*4882a593Smuzhiyun
__p4d_free_tlb(struct mmu_gather * tlb,p4d_t * p4d,unsigned long address)172*4882a593Smuzhiyun static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
173*4882a593Smuzhiyun unsigned long address)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun if (pgtable_l5_enabled())
176*4882a593Smuzhiyun ___p4d_free_tlb(tlb, p4d);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun #endif /* CONFIG_PGTABLE_LEVELS > 4 */
180*4882a593Smuzhiyun #endif /* CONFIG_PGTABLE_LEVELS > 3 */
181*4882a593Smuzhiyun #endif /* CONFIG_PGTABLE_LEVELS > 2 */
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun #endif /* _ASM_X86_PGALLOC_H */
184