xref: /OK3568_Linux_fs/kernel/include/asm-generic/pgalloc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ASM_GENERIC_PGALLOC_H
3*4882a593Smuzhiyun #define __ASM_GENERIC_PGALLOC_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #ifdef CONFIG_MMU
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #define GFP_PGTABLE_KERNEL	(GFP_KERNEL | __GFP_ZERO)
8*4882a593Smuzhiyun #define GFP_PGTABLE_USER	(GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /**
11*4882a593Smuzhiyun  * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
12*4882a593Smuzhiyun  * @mm: the mm_struct of the current context
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * This function is intended for architectures that need
15*4882a593Smuzhiyun  * anything beyond simple page allocation.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * Return: pointer to the allocated memory or %NULL on error
18*4882a593Smuzhiyun  */
__pte_alloc_one_kernel(struct mm_struct * mm)19*4882a593Smuzhiyun static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun  * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
27*4882a593Smuzhiyun  * @mm: the mm_struct of the current context
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * Return: pointer to the allocated memory or %NULL on error
30*4882a593Smuzhiyun  */
pte_alloc_one_kernel(struct mm_struct * mm)31*4882a593Smuzhiyun static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	return __pte_alloc_one_kernel(mm);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /**
38*4882a593Smuzhiyun  * pte_free_kernel - free PTE-level kernel page table page
39*4882a593Smuzhiyun  * @mm: the mm_struct of the current context
40*4882a593Smuzhiyun  * @pte: pointer to the memory containing the page table
41*4882a593Smuzhiyun  */
pte_free_kernel(struct mm_struct * mm,pte_t * pte)42*4882a593Smuzhiyun static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	free_page((unsigned long)pte);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun  * __pte_alloc_one - allocate a page for PTE-level user page table
49*4882a593Smuzhiyun  * @mm: the mm_struct of the current context
50*4882a593Smuzhiyun  * @gfp: GFP flags to use for the allocation
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  * Allocates a page and runs the pgtable_pte_page_ctor().
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  * This function is intended for architectures that need
55*4882a593Smuzhiyun  * anything beyond simple page allocation or must have custom GFP flags.
56*4882a593Smuzhiyun  *
57*4882a593Smuzhiyun  * Return: `struct page` initialized as page table or %NULL on error
58*4882a593Smuzhiyun  */
__pte_alloc_one(struct mm_struct * mm,gfp_t gfp)59*4882a593Smuzhiyun static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	struct page *pte;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	pte = alloc_page(gfp);
64*4882a593Smuzhiyun 	if (!pte)
65*4882a593Smuzhiyun 		return NULL;
66*4882a593Smuzhiyun 	if (!pgtable_pte_page_ctor(pte)) {
67*4882a593Smuzhiyun 		__free_page(pte);
68*4882a593Smuzhiyun 		return NULL;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	return pte;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #ifndef __HAVE_ARCH_PTE_ALLOC_ONE
75*4882a593Smuzhiyun /**
76*4882a593Smuzhiyun  * pte_alloc_one - allocate a page for PTE-level user page table
77*4882a593Smuzhiyun  * @mm: the mm_struct of the current context
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * Allocates a page and runs the pgtable_pte_page_ctor().
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  * Return: `struct page` initialized as page table or %NULL on error
82*4882a593Smuzhiyun  */
pte_alloc_one(struct mm_struct * mm)83*4882a593Smuzhiyun static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	return __pte_alloc_one(mm, GFP_PGTABLE_USER);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * Should really implement gc for free page table pages. This could be
91*4882a593Smuzhiyun  * done with a reference count in struct page.
92*4882a593Smuzhiyun  */
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun  * pte_free - free PTE-level user page table page
96*4882a593Smuzhiyun  * @mm: the mm_struct of the current context
97*4882a593Smuzhiyun  * @pte_page: the `struct page` representing the page table
98*4882a593Smuzhiyun  */
pte_free(struct mm_struct * mm,struct page * pte_page)99*4882a593Smuzhiyun static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	pgtable_pte_page_dtor(pte_page);
102*4882a593Smuzhiyun 	__free_page(pte_page);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 2
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #ifndef __HAVE_ARCH_PMD_ALLOC_ONE
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun  * pmd_alloc_one - allocate a page for PMD-level page table
111*4882a593Smuzhiyun  * @mm: the mm_struct of the current context
112*4882a593Smuzhiyun  *
113*4882a593Smuzhiyun  * Allocates a page and runs the pgtable_pmd_page_ctor().
114*4882a593Smuzhiyun  * Allocations use %GFP_PGTABLE_USER in user context and
115*4882a593Smuzhiyun  * %GFP_PGTABLE_KERNEL in kernel context.
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  * Return: pointer to the allocated memory or %NULL on error
118*4882a593Smuzhiyun  */
pmd_alloc_one(struct mm_struct * mm,unsigned long addr)119*4882a593Smuzhiyun static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct page *page;
122*4882a593Smuzhiyun 	gfp_t gfp = GFP_PGTABLE_USER;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (mm == &init_mm)
125*4882a593Smuzhiyun 		gfp = GFP_PGTABLE_KERNEL;
126*4882a593Smuzhiyun 	page = alloc_pages(gfp, 0);
127*4882a593Smuzhiyun 	if (!page)
128*4882a593Smuzhiyun 		return NULL;
129*4882a593Smuzhiyun 	if (!pgtable_pmd_page_ctor(page)) {
130*4882a593Smuzhiyun 		__free_pages(page, 0);
131*4882a593Smuzhiyun 		return NULL;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 	return (pmd_t *)page_address(page);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun #endif
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #ifndef __HAVE_ARCH_PMD_FREE
pmd_free(struct mm_struct * mm,pmd_t * pmd)138*4882a593Smuzhiyun static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
141*4882a593Smuzhiyun 	pgtable_pmd_page_dtor(virt_to_page(pmd));
142*4882a593Smuzhiyun 	free_page((unsigned long)pmd);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun #endif
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun #endif /* CONFIG_PGTABLE_LEVELS > 2 */
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 3
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
151*4882a593Smuzhiyun /**
152*4882a593Smuzhiyun  * pud_alloc_one - allocate a page for PUD-level page table
153*4882a593Smuzhiyun  * @mm: the mm_struct of the current context
154*4882a593Smuzhiyun  *
155*4882a593Smuzhiyun  * Allocates a page using %GFP_PGTABLE_USER for user context and
156*4882a593Smuzhiyun  * %GFP_PGTABLE_KERNEL for kernel context.
157*4882a593Smuzhiyun  *
158*4882a593Smuzhiyun  * Return: pointer to the allocated memory or %NULL on error
159*4882a593Smuzhiyun  */
pud_alloc_one(struct mm_struct * mm,unsigned long addr)160*4882a593Smuzhiyun static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	gfp_t gfp = GFP_PGTABLE_USER;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (mm == &init_mm)
165*4882a593Smuzhiyun 		gfp = GFP_PGTABLE_KERNEL;
166*4882a593Smuzhiyun 	return (pud_t *)get_zeroed_page(gfp);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun #endif
169*4882a593Smuzhiyun 
pud_free(struct mm_struct * mm,pud_t * pud)170*4882a593Smuzhiyun static inline void pud_free(struct mm_struct *mm, pud_t *pud)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
173*4882a593Smuzhiyun 	free_page((unsigned long)pud);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun #endif /* CONFIG_PGTABLE_LEVELS > 3 */
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #ifndef __HAVE_ARCH_PGD_FREE
pgd_free(struct mm_struct * mm,pgd_t * pgd)179*4882a593Smuzhiyun static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	free_page((unsigned long)pgd);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun #endif
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #endif /* CONFIG_MMU */
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun #endif /* __ASM_GENERIC_PGALLOC_H */
188