xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/book3s/64/pgalloc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/cpumask.h>
9*4882a593Smuzhiyun #include <linux/kmemleak.h>
10*4882a593Smuzhiyun #include <linux/percpu.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun struct vmemmap_backing {
13*4882a593Smuzhiyun 	struct vmemmap_backing *list;
14*4882a593Smuzhiyun 	unsigned long phys;
15*4882a593Smuzhiyun 	unsigned long virt_addr;
16*4882a593Smuzhiyun };
17*4882a593Smuzhiyun extern struct vmemmap_backing *vmemmap_list;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
20*4882a593Smuzhiyun extern void pmd_fragment_free(unsigned long *);
21*4882a593Smuzhiyun extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
22*4882a593Smuzhiyun extern void __tlb_remove_table(void *_table);
23*4882a593Smuzhiyun void pte_frag_destroy(void *pte_frag);
24*4882a593Smuzhiyun 
radix__pgd_alloc(struct mm_struct * mm)25*4882a593Smuzhiyun static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun #ifdef CONFIG_PPC_64K_PAGES
28*4882a593Smuzhiyun 	return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
29*4882a593Smuzhiyun #else
30*4882a593Smuzhiyun 	struct page *page;
31*4882a593Smuzhiyun 	page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
32*4882a593Smuzhiyun 				4);
33*4882a593Smuzhiyun 	if (!page)
34*4882a593Smuzhiyun 		return NULL;
35*4882a593Smuzhiyun 	return (pgd_t *) page_address(page);
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
radix__pgd_free(struct mm_struct * mm,pgd_t * pgd)39*4882a593Smuzhiyun static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun #ifdef CONFIG_PPC_64K_PAGES
42*4882a593Smuzhiyun 	free_page((unsigned long)pgd);
43*4882a593Smuzhiyun #else
44*4882a593Smuzhiyun 	free_pages((unsigned long)pgd, 4);
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
pgd_alloc(struct mm_struct * mm)48*4882a593Smuzhiyun static inline pgd_t *pgd_alloc(struct mm_struct *mm)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	pgd_t *pgd;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (radix_enabled())
53*4882a593Smuzhiyun 		return radix__pgd_alloc(mm);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
56*4882a593Smuzhiyun 			       pgtable_gfp_flags(mm, GFP_KERNEL));
57*4882a593Smuzhiyun 	if (unlikely(!pgd))
58*4882a593Smuzhiyun 		return pgd;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	/*
61*4882a593Smuzhiyun 	 * Don't scan the PGD for pointers, it contains references to PUDs but
62*4882a593Smuzhiyun 	 * those references are not full pointers and so can't be recognised by
63*4882a593Smuzhiyun 	 * kmemleak.
64*4882a593Smuzhiyun 	 */
65*4882a593Smuzhiyun 	kmemleak_no_scan(pgd);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/*
68*4882a593Smuzhiyun 	 * With hugetlb, we don't clear the second half of the page table.
69*4882a593Smuzhiyun 	 * If we share the same slab cache with the pmd or pud level table,
70*4882a593Smuzhiyun 	 * we need to make sure we zero out the full table on alloc.
71*4882a593Smuzhiyun 	 * With 4K we don't store slot in the second half. Hence we don't
72*4882a593Smuzhiyun 	 * need to do this for 4k.
73*4882a593Smuzhiyun 	 */
74*4882a593Smuzhiyun #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
75*4882a593Smuzhiyun 	(H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
76*4882a593Smuzhiyun 	memset(pgd, 0, PGD_TABLE_SIZE);
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun 	return pgd;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
pgd_free(struct mm_struct * mm,pgd_t * pgd)81*4882a593Smuzhiyun static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	if (radix_enabled())
84*4882a593Smuzhiyun 		return radix__pgd_free(mm, pgd);
85*4882a593Smuzhiyun 	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
p4d_populate(struct mm_struct * mm,p4d_t * pgd,pud_t * pud)88*4882a593Smuzhiyun static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	*pgd =  __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
pud_alloc_one(struct mm_struct * mm,unsigned long addr)93*4882a593Smuzhiyun static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	pud_t *pud;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
98*4882a593Smuzhiyun 			       pgtable_gfp_flags(mm, GFP_KERNEL));
99*4882a593Smuzhiyun 	/*
100*4882a593Smuzhiyun 	 * Tell kmemleak to ignore the PUD, that means don't scan it for
101*4882a593Smuzhiyun 	 * pointers and don't consider it a leak. PUDs are typically only
102*4882a593Smuzhiyun 	 * referred to by their PGD, but kmemleak is not able to recognise those
103*4882a593Smuzhiyun 	 * as pointers, leading to false leak reports.
104*4882a593Smuzhiyun 	 */
105*4882a593Smuzhiyun 	kmemleak_ignore(pud);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return pud;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
__pud_free(pud_t * pud)110*4882a593Smuzhiyun static inline void __pud_free(pud_t *pud)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	struct page *page = virt_to_page(pud);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	/*
115*4882a593Smuzhiyun 	 * Early pud pages allocated via memblock allocator
116*4882a593Smuzhiyun 	 * can't be directly freed to slab
117*4882a593Smuzhiyun 	 */
118*4882a593Smuzhiyun 	if (PageReserved(page))
119*4882a593Smuzhiyun 		free_reserved_page(page);
120*4882a593Smuzhiyun 	else
121*4882a593Smuzhiyun 		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
pud_free(struct mm_struct * mm,pud_t * pud)124*4882a593Smuzhiyun static inline void pud_free(struct mm_struct *mm, pud_t *pud)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	return __pud_free(pud);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
pud_populate(struct mm_struct * mm,pud_t * pud,pmd_t * pmd)129*4882a593Smuzhiyun static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	*pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
__pud_free_tlb(struct mmu_gather * tlb,pud_t * pud,unsigned long address)134*4882a593Smuzhiyun static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
135*4882a593Smuzhiyun 				  unsigned long address)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	pgtable_free_tlb(tlb, pud, PUD_INDEX);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
pmd_alloc_one(struct mm_struct * mm,unsigned long addr)140*4882a593Smuzhiyun static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	return pmd_fragment_alloc(mm, addr);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
pmd_free(struct mm_struct * mm,pmd_t * pmd)145*4882a593Smuzhiyun static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	pmd_fragment_free((unsigned long *)pmd);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
__pmd_free_tlb(struct mmu_gather * tlb,pmd_t * pmd,unsigned long address)150*4882a593Smuzhiyun static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
151*4882a593Smuzhiyun 				  unsigned long address)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
pmd_populate_kernel(struct mm_struct * mm,pmd_t * pmd,pte_t * pte)156*4882a593Smuzhiyun static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
157*4882a593Smuzhiyun 				       pte_t *pte)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	*pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte_page)162*4882a593Smuzhiyun static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
163*4882a593Smuzhiyun 				pgtable_t pte_page)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	*pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
__pte_free_tlb(struct mmu_gather * tlb,pgtable_t table,unsigned long address)168*4882a593Smuzhiyun static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
169*4882a593Smuzhiyun 				  unsigned long address)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	pgtable_free_tlb(tlb, table, PTE_INDEX);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
update_page_count(int psize,long count)175*4882a593Smuzhiyun static inline void update_page_count(int psize, long count)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PROC_FS))
178*4882a593Smuzhiyun 		atomic_long_add(count, &direct_pages_count[psize]);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
182