1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * PPC Huge TLB Page Support for Kernel.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2003 David Gibson, IBM Corporation.
5*4882a593Smuzhiyun * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Based on the IA-32 version:
8*4882a593Smuzhiyun * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/hugetlb.h>
15*4882a593Smuzhiyun #include <linux/export.h>
16*4882a593Smuzhiyun #include <linux/of_fdt.h>
17*4882a593Smuzhiyun #include <linux/memblock.h>
18*4882a593Smuzhiyun #include <linux/moduleparam.h>
19*4882a593Smuzhiyun #include <linux/swap.h>
20*4882a593Smuzhiyun #include <linux/swapops.h>
21*4882a593Smuzhiyun #include <linux/kmemleak.h>
22*4882a593Smuzhiyun #include <asm/pgalloc.h>
23*4882a593Smuzhiyun #include <asm/tlb.h>
24*4882a593Smuzhiyun #include <asm/setup.h>
25*4882a593Smuzhiyun #include <asm/hugetlb.h>
26*4882a593Smuzhiyun #include <asm/pte-walk.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun bool hugetlb_disabled = false;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define hugepd_none(hpd) (hpd_val(hpd) == 0)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
33*4882a593Smuzhiyun __builtin_ffs(sizeof(void *)))
34*4882a593Smuzhiyun
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)35*4882a593Smuzhiyun pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Only called for hugetlbfs pages, hence can ignore THP and the
39*4882a593Smuzhiyun * irq disabled walk.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun return __find_linux_pte(mm->pgd, addr, NULL, NULL);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
__hugepte_alloc(struct mm_struct * mm,hugepd_t * hpdp,unsigned long address,unsigned int pdshift,unsigned int pshift,spinlock_t * ptl)44*4882a593Smuzhiyun static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
45*4882a593Smuzhiyun unsigned long address, unsigned int pdshift,
46*4882a593Smuzhiyun unsigned int pshift, spinlock_t *ptl)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct kmem_cache *cachep;
49*4882a593Smuzhiyun pte_t *new;
50*4882a593Smuzhiyun int i;
51*4882a593Smuzhiyun int num_hugepd;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun if (pshift >= pdshift) {
54*4882a593Smuzhiyun cachep = PGT_CACHE(PTE_T_ORDER);
55*4882a593Smuzhiyun num_hugepd = 1 << (pshift - pdshift);
56*4882a593Smuzhiyun } else {
57*4882a593Smuzhiyun cachep = PGT_CACHE(pdshift - pshift);
58*4882a593Smuzhiyun num_hugepd = 1;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (!cachep) {
62*4882a593Smuzhiyun WARN_ONCE(1, "No page table cache created for hugetlb tables");
63*4882a593Smuzhiyun return -ENOMEM;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun BUG_ON(pshift > HUGEPD_SHIFT_MASK);
69*4882a593Smuzhiyun BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (!new)
72*4882a593Smuzhiyun return -ENOMEM;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * Make sure other cpus find the hugepd set only after a
76*4882a593Smuzhiyun * properly initialized page table is visible to them.
77*4882a593Smuzhiyun * For more details look for comment in __pte_alloc().
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun smp_wmb();
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun spin_lock(ptl);
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * We have multiple higher-level entries that point to the same
84*4882a593Smuzhiyun * actual pte location. Fill in each as we go and backtrack on error.
85*4882a593Smuzhiyun * We need all of these so the DTLB pgtable walk code can find the
86*4882a593Smuzhiyun * right higher-level entry without knowing if it's a hugepage or not.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun for (i = 0; i < num_hugepd; i++, hpdp++) {
89*4882a593Smuzhiyun if (unlikely(!hugepd_none(*hpdp)))
90*4882a593Smuzhiyun break;
91*4882a593Smuzhiyun hugepd_populate(hpdp, new, pshift);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun /* If we bailed from the for loop early, an error occurred, clean up */
94*4882a593Smuzhiyun if (i < num_hugepd) {
95*4882a593Smuzhiyun for (i = i - 1 ; i >= 0; i--, hpdp--)
96*4882a593Smuzhiyun *hpdp = __hugepd(0);
97*4882a593Smuzhiyun kmem_cache_free(cachep, new);
98*4882a593Smuzhiyun } else {
99*4882a593Smuzhiyun kmemleak_ignore(new);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun spin_unlock(ptl);
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * At this point we do the placement change only for BOOK3S 64. This would
107*4882a593Smuzhiyun * possibly work on other subarchs.
108*4882a593Smuzhiyun */
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)109*4882a593Smuzhiyun pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
110*4882a593Smuzhiyun unsigned long addr, unsigned long sz)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun pgd_t *pg;
113*4882a593Smuzhiyun p4d_t *p4;
114*4882a593Smuzhiyun pud_t *pu;
115*4882a593Smuzhiyun pmd_t *pm;
116*4882a593Smuzhiyun hugepd_t *hpdp = NULL;
117*4882a593Smuzhiyun unsigned pshift = __ffs(sz);
118*4882a593Smuzhiyun unsigned pdshift = PGDIR_SHIFT;
119*4882a593Smuzhiyun spinlock_t *ptl;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun addr &= ~(sz-1);
122*4882a593Smuzhiyun pg = pgd_offset(mm, addr);
123*4882a593Smuzhiyun p4 = p4d_offset(pg, addr);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
126*4882a593Smuzhiyun if (pshift == PGDIR_SHIFT)
127*4882a593Smuzhiyun /* 16GB huge page */
128*4882a593Smuzhiyun return (pte_t *) p4;
129*4882a593Smuzhiyun else if (pshift > PUD_SHIFT) {
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * We need to use hugepd table
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun ptl = &mm->page_table_lock;
134*4882a593Smuzhiyun hpdp = (hugepd_t *)p4;
135*4882a593Smuzhiyun } else {
136*4882a593Smuzhiyun pdshift = PUD_SHIFT;
137*4882a593Smuzhiyun pu = pud_alloc(mm, p4, addr);
138*4882a593Smuzhiyun if (!pu)
139*4882a593Smuzhiyun return NULL;
140*4882a593Smuzhiyun if (pshift == PUD_SHIFT)
141*4882a593Smuzhiyun return (pte_t *)pu;
142*4882a593Smuzhiyun else if (pshift > PMD_SHIFT) {
143*4882a593Smuzhiyun ptl = pud_lockptr(mm, pu);
144*4882a593Smuzhiyun hpdp = (hugepd_t *)pu;
145*4882a593Smuzhiyun } else {
146*4882a593Smuzhiyun pdshift = PMD_SHIFT;
147*4882a593Smuzhiyun pm = pmd_alloc(mm, pu, addr);
148*4882a593Smuzhiyun if (!pm)
149*4882a593Smuzhiyun return NULL;
150*4882a593Smuzhiyun if (pshift == PMD_SHIFT)
151*4882a593Smuzhiyun /* 16MB hugepage */
152*4882a593Smuzhiyun return (pte_t *)pm;
153*4882a593Smuzhiyun else {
154*4882a593Smuzhiyun ptl = pmd_lockptr(mm, pm);
155*4882a593Smuzhiyun hpdp = (hugepd_t *)pm;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun #else
160*4882a593Smuzhiyun if (pshift >= PGDIR_SHIFT) {
161*4882a593Smuzhiyun ptl = &mm->page_table_lock;
162*4882a593Smuzhiyun hpdp = (hugepd_t *)p4;
163*4882a593Smuzhiyun } else {
164*4882a593Smuzhiyun pdshift = PUD_SHIFT;
165*4882a593Smuzhiyun pu = pud_alloc(mm, p4, addr);
166*4882a593Smuzhiyun if (!pu)
167*4882a593Smuzhiyun return NULL;
168*4882a593Smuzhiyun if (pshift >= PUD_SHIFT) {
169*4882a593Smuzhiyun ptl = pud_lockptr(mm, pu);
170*4882a593Smuzhiyun hpdp = (hugepd_t *)pu;
171*4882a593Smuzhiyun } else {
172*4882a593Smuzhiyun pdshift = PMD_SHIFT;
173*4882a593Smuzhiyun pm = pmd_alloc(mm, pu, addr);
174*4882a593Smuzhiyun if (!pm)
175*4882a593Smuzhiyun return NULL;
176*4882a593Smuzhiyun ptl = pmd_lockptr(mm, pm);
177*4882a593Smuzhiyun hpdp = (hugepd_t *)pm;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun #endif
181*4882a593Smuzhiyun if (!hpdp)
182*4882a593Smuzhiyun return NULL;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT)
185*4882a593Smuzhiyun return pte_alloc_map(mm, (pmd_t *)hpdp, addr);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
190*4882a593Smuzhiyun pdshift, pshift, ptl))
191*4882a593Smuzhiyun return NULL;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return hugepte_offset(*hpdp, addr, pdshift);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * Tracks gpages after the device tree is scanned and before the
199*4882a593Smuzhiyun * huge_boot_pages list is ready on pseries.
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun #define MAX_NUMBER_GPAGES 1024
202*4882a593Smuzhiyun __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
203*4882a593Smuzhiyun __initdata static unsigned nr_gpages;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * Build list of addresses of gigantic pages. This function is used in early
207*4882a593Smuzhiyun * boot before the buddy allocator is setup.
208*4882a593Smuzhiyun */
pseries_add_gpage(u64 addr,u64 page_size,unsigned long number_of_pages)209*4882a593Smuzhiyun void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun if (!addr)
212*4882a593Smuzhiyun return;
213*4882a593Smuzhiyun while (number_of_pages > 0) {
214*4882a593Smuzhiyun gpage_freearray[nr_gpages] = addr;
215*4882a593Smuzhiyun nr_gpages++;
216*4882a593Smuzhiyun number_of_pages--;
217*4882a593Smuzhiyun addr += page_size;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
pseries_alloc_bootmem_huge_page(struct hstate * hstate)221*4882a593Smuzhiyun int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct huge_bootmem_page *m;
224*4882a593Smuzhiyun if (nr_gpages == 0)
225*4882a593Smuzhiyun return 0;
226*4882a593Smuzhiyun m = phys_to_virt(gpage_freearray[--nr_gpages]);
227*4882a593Smuzhiyun gpage_freearray[nr_gpages] = 0;
228*4882a593Smuzhiyun list_add(&m->list, &huge_boot_pages);
229*4882a593Smuzhiyun m->hstate = hstate;
230*4882a593Smuzhiyun return 1;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun #endif
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun
alloc_bootmem_huge_page(struct hstate * h)235*4882a593Smuzhiyun int __init alloc_bootmem_huge_page(struct hstate *h)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
239*4882a593Smuzhiyun if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
240*4882a593Smuzhiyun return pseries_alloc_bootmem_huge_page(h);
241*4882a593Smuzhiyun #endif
242*4882a593Smuzhiyun return __alloc_bootmem_huge_page(h);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun #ifndef CONFIG_PPC_BOOK3S_64
246*4882a593Smuzhiyun #define HUGEPD_FREELIST_SIZE \
247*4882a593Smuzhiyun ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun struct hugepd_freelist {
250*4882a593Smuzhiyun struct rcu_head rcu;
251*4882a593Smuzhiyun unsigned int index;
252*4882a593Smuzhiyun void *ptes[];
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
256*4882a593Smuzhiyun
hugepd_free_rcu_callback(struct rcu_head * head)257*4882a593Smuzhiyun static void hugepd_free_rcu_callback(struct rcu_head *head)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun struct hugepd_freelist *batch =
260*4882a593Smuzhiyun container_of(head, struct hugepd_freelist, rcu);
261*4882a593Smuzhiyun unsigned int i;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun for (i = 0; i < batch->index; i++)
264*4882a593Smuzhiyun kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun free_page((unsigned long)batch);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
hugepd_free(struct mmu_gather * tlb,void * hugepte)269*4882a593Smuzhiyun static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun struct hugepd_freelist **batchp;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun batchp = &get_cpu_var(hugepd_freelist_cur);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (atomic_read(&tlb->mm->mm_users) < 2 ||
276*4882a593Smuzhiyun mm_is_thread_local(tlb->mm)) {
277*4882a593Smuzhiyun kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
278*4882a593Smuzhiyun put_cpu_var(hugepd_freelist_cur);
279*4882a593Smuzhiyun return;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (*batchp == NULL) {
283*4882a593Smuzhiyun *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
284*4882a593Smuzhiyun (*batchp)->index = 0;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun (*batchp)->ptes[(*batchp)->index++] = hugepte;
288*4882a593Smuzhiyun if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
289*4882a593Smuzhiyun call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
290*4882a593Smuzhiyun *batchp = NULL;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun put_cpu_var(hugepd_freelist_cur);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun #else
hugepd_free(struct mmu_gather * tlb,void * hugepte)295*4882a593Smuzhiyun static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
296*4882a593Smuzhiyun #endif
297*4882a593Smuzhiyun
free_hugepd_range(struct mmu_gather * tlb,hugepd_t * hpdp,int pdshift,unsigned long start,unsigned long end,unsigned long floor,unsigned long ceiling)298*4882a593Smuzhiyun static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
299*4882a593Smuzhiyun unsigned long start, unsigned long end,
300*4882a593Smuzhiyun unsigned long floor, unsigned long ceiling)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun pte_t *hugepte = hugepd_page(*hpdp);
303*4882a593Smuzhiyun int i;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun unsigned long pdmask = ~((1UL << pdshift) - 1);
306*4882a593Smuzhiyun unsigned int num_hugepd = 1;
307*4882a593Smuzhiyun unsigned int shift = hugepd_shift(*hpdp);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Note: On fsl the hpdp may be the first of several */
310*4882a593Smuzhiyun if (shift > pdshift)
311*4882a593Smuzhiyun num_hugepd = 1 << (shift - pdshift);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun start &= pdmask;
314*4882a593Smuzhiyun if (start < floor)
315*4882a593Smuzhiyun return;
316*4882a593Smuzhiyun if (ceiling) {
317*4882a593Smuzhiyun ceiling &= pdmask;
318*4882a593Smuzhiyun if (! ceiling)
319*4882a593Smuzhiyun return;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun if (end - 1 > ceiling - 1)
322*4882a593Smuzhiyun return;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun for (i = 0; i < num_hugepd; i++, hpdp++)
325*4882a593Smuzhiyun *hpdp = __hugepd(0);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (shift >= pdshift)
328*4882a593Smuzhiyun hugepd_free(tlb, hugepte);
329*4882a593Smuzhiyun else
330*4882a593Smuzhiyun pgtable_free_tlb(tlb, hugepte,
331*4882a593Smuzhiyun get_hugepd_cache_index(pdshift - shift));
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
hugetlb_free_pte_range(struct mmu_gather * tlb,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)334*4882a593Smuzhiyun static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
335*4882a593Smuzhiyun unsigned long addr, unsigned long end,
336*4882a593Smuzhiyun unsigned long floor, unsigned long ceiling)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun unsigned long start = addr;
339*4882a593Smuzhiyun pgtable_t token = pmd_pgtable(*pmd);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun start &= PMD_MASK;
342*4882a593Smuzhiyun if (start < floor)
343*4882a593Smuzhiyun return;
344*4882a593Smuzhiyun if (ceiling) {
345*4882a593Smuzhiyun ceiling &= PMD_MASK;
346*4882a593Smuzhiyun if (!ceiling)
347*4882a593Smuzhiyun return;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun if (end - 1 > ceiling - 1)
350*4882a593Smuzhiyun return;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun pmd_clear(pmd);
353*4882a593Smuzhiyun pte_free_tlb(tlb, token, addr);
354*4882a593Smuzhiyun mm_dec_nr_ptes(tlb->mm);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
hugetlb_free_pmd_range(struct mmu_gather * tlb,pud_t * pud,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)357*4882a593Smuzhiyun static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
358*4882a593Smuzhiyun unsigned long addr, unsigned long end,
359*4882a593Smuzhiyun unsigned long floor, unsigned long ceiling)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun pmd_t *pmd;
362*4882a593Smuzhiyun unsigned long next;
363*4882a593Smuzhiyun unsigned long start;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun start = addr;
366*4882a593Smuzhiyun do {
367*4882a593Smuzhiyun unsigned long more;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun pmd = pmd_offset(pud, addr);
370*4882a593Smuzhiyun next = pmd_addr_end(addr, end);
371*4882a593Smuzhiyun if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
372*4882a593Smuzhiyun if (pmd_none_or_clear_bad(pmd))
373*4882a593Smuzhiyun continue;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun * if it is not hugepd pointer, we should already find
377*4882a593Smuzhiyun * it cleared.
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun continue;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun /*
386*4882a593Smuzhiyun * Increment next by the size of the huge mapping since
387*4882a593Smuzhiyun * there may be more than one entry at this level for a
388*4882a593Smuzhiyun * single hugepage, but all of them point to
389*4882a593Smuzhiyun * the same kmem cache that holds the hugepte.
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyun more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
392*4882a593Smuzhiyun if (more > next)
393*4882a593Smuzhiyun next = more;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
396*4882a593Smuzhiyun addr, next, floor, ceiling);
397*4882a593Smuzhiyun } while (addr = next, addr != end);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun start &= PUD_MASK;
400*4882a593Smuzhiyun if (start < floor)
401*4882a593Smuzhiyun return;
402*4882a593Smuzhiyun if (ceiling) {
403*4882a593Smuzhiyun ceiling &= PUD_MASK;
404*4882a593Smuzhiyun if (!ceiling)
405*4882a593Smuzhiyun return;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun if (end - 1 > ceiling - 1)
408*4882a593Smuzhiyun return;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun pmd = pmd_offset(pud, start);
411*4882a593Smuzhiyun pud_clear(pud);
412*4882a593Smuzhiyun pmd_free_tlb(tlb, pmd, start);
413*4882a593Smuzhiyun mm_dec_nr_pmds(tlb->mm);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
hugetlb_free_pud_range(struct mmu_gather * tlb,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)416*4882a593Smuzhiyun static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
417*4882a593Smuzhiyun unsigned long addr, unsigned long end,
418*4882a593Smuzhiyun unsigned long floor, unsigned long ceiling)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun pud_t *pud;
421*4882a593Smuzhiyun unsigned long next;
422*4882a593Smuzhiyun unsigned long start;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun start = addr;
425*4882a593Smuzhiyun do {
426*4882a593Smuzhiyun pud = pud_offset(p4d, addr);
427*4882a593Smuzhiyun next = pud_addr_end(addr, end);
428*4882a593Smuzhiyun if (!is_hugepd(__hugepd(pud_val(*pud)))) {
429*4882a593Smuzhiyun if (pud_none_or_clear_bad(pud))
430*4882a593Smuzhiyun continue;
431*4882a593Smuzhiyun hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
432*4882a593Smuzhiyun ceiling);
433*4882a593Smuzhiyun } else {
434*4882a593Smuzhiyun unsigned long more;
435*4882a593Smuzhiyun /*
436*4882a593Smuzhiyun * Increment next by the size of the huge mapping since
437*4882a593Smuzhiyun * there may be more than one entry at this level for a
438*4882a593Smuzhiyun * single hugepage, but all of them point to
439*4882a593Smuzhiyun * the same kmem cache that holds the hugepte.
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
442*4882a593Smuzhiyun if (more > next)
443*4882a593Smuzhiyun next = more;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
446*4882a593Smuzhiyun addr, next, floor, ceiling);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun } while (addr = next, addr != end);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun start &= PGDIR_MASK;
451*4882a593Smuzhiyun if (start < floor)
452*4882a593Smuzhiyun return;
453*4882a593Smuzhiyun if (ceiling) {
454*4882a593Smuzhiyun ceiling &= PGDIR_MASK;
455*4882a593Smuzhiyun if (!ceiling)
456*4882a593Smuzhiyun return;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun if (end - 1 > ceiling - 1)
459*4882a593Smuzhiyun return;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun pud = pud_offset(p4d, start);
462*4882a593Smuzhiyun p4d_clear(p4d);
463*4882a593Smuzhiyun pud_free_tlb(tlb, pud, start);
464*4882a593Smuzhiyun mm_dec_nr_puds(tlb->mm);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun * This function frees user-level page tables of a process.
469*4882a593Smuzhiyun */
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)470*4882a593Smuzhiyun void hugetlb_free_pgd_range(struct mmu_gather *tlb,
471*4882a593Smuzhiyun unsigned long addr, unsigned long end,
472*4882a593Smuzhiyun unsigned long floor, unsigned long ceiling)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun pgd_t *pgd;
475*4882a593Smuzhiyun p4d_t *p4d;
476*4882a593Smuzhiyun unsigned long next;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /*
479*4882a593Smuzhiyun * Because there are a number of different possible pagetable
480*4882a593Smuzhiyun * layouts for hugepage ranges, we limit knowledge of how
481*4882a593Smuzhiyun * things should be laid out to the allocation path
482*4882a593Smuzhiyun * (huge_pte_alloc(), above). Everything else works out the
483*4882a593Smuzhiyun * structure as it goes from information in the hugepd
484*4882a593Smuzhiyun * pointers. That means that we can't here use the
485*4882a593Smuzhiyun * optimization used in the normal page free_pgd_range(), of
486*4882a593Smuzhiyun * checking whether we're actually covering a large enough
487*4882a593Smuzhiyun * range to have to do anything at the top level of the walk
488*4882a593Smuzhiyun * instead of at the bottom.
489*4882a593Smuzhiyun *
490*4882a593Smuzhiyun * To make sense of this, you should probably go read the big
491*4882a593Smuzhiyun * block comment at the top of the normal free_pgd_range(),
492*4882a593Smuzhiyun * too.
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun do {
496*4882a593Smuzhiyun next = pgd_addr_end(addr, end);
497*4882a593Smuzhiyun pgd = pgd_offset(tlb->mm, addr);
498*4882a593Smuzhiyun p4d = p4d_offset(pgd, addr);
499*4882a593Smuzhiyun if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
500*4882a593Smuzhiyun if (p4d_none_or_clear_bad(p4d))
501*4882a593Smuzhiyun continue;
502*4882a593Smuzhiyun hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
503*4882a593Smuzhiyun } else {
504*4882a593Smuzhiyun unsigned long more;
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * Increment next by the size of the huge mapping since
507*4882a593Smuzhiyun * there may be more than one entry at the pgd level
508*4882a593Smuzhiyun * for a single hugepage, but all of them point to the
509*4882a593Smuzhiyun * same kmem cache that holds the hugepte.
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
512*4882a593Smuzhiyun if (more > next)
513*4882a593Smuzhiyun next = more;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT,
516*4882a593Smuzhiyun addr, next, floor, ceiling);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun } while (addr = next, addr != end);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)521*4882a593Smuzhiyun struct page *follow_huge_pd(struct vm_area_struct *vma,
522*4882a593Smuzhiyun unsigned long address, hugepd_t hpd,
523*4882a593Smuzhiyun int flags, int pdshift)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun pte_t *ptep;
526*4882a593Smuzhiyun spinlock_t *ptl;
527*4882a593Smuzhiyun struct page *page = NULL;
528*4882a593Smuzhiyun unsigned long mask;
529*4882a593Smuzhiyun int shift = hugepd_shift(hpd);
530*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun retry:
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun * hugepage directory entries are protected by mm->page_table_lock
535*4882a593Smuzhiyun * Use this instead of huge_pte_lockptr
536*4882a593Smuzhiyun */
537*4882a593Smuzhiyun ptl = &mm->page_table_lock;
538*4882a593Smuzhiyun spin_lock(ptl);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun ptep = hugepte_offset(hpd, address, pdshift);
541*4882a593Smuzhiyun if (pte_present(*ptep)) {
542*4882a593Smuzhiyun mask = (1UL << shift) - 1;
543*4882a593Smuzhiyun page = pte_page(*ptep);
544*4882a593Smuzhiyun page += ((address & mask) >> PAGE_SHIFT);
545*4882a593Smuzhiyun if (flags & FOLL_GET)
546*4882a593Smuzhiyun get_page(page);
547*4882a593Smuzhiyun } else {
548*4882a593Smuzhiyun if (is_hugetlb_entry_migration(*ptep)) {
549*4882a593Smuzhiyun spin_unlock(ptl);
550*4882a593Smuzhiyun __migration_entry_wait(mm, ptep, ptl);
551*4882a593Smuzhiyun goto retry;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun spin_unlock(ptl);
555*4882a593Smuzhiyun return page;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun #ifdef CONFIG_PPC_MM_SLICES
hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)559*4882a593Smuzhiyun unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
560*4882a593Smuzhiyun unsigned long len, unsigned long pgoff,
561*4882a593Smuzhiyun unsigned long flags)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct hstate *hstate = hstate_file(file);
564*4882a593Smuzhiyun int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun #ifdef CONFIG_PPC_RADIX_MMU
567*4882a593Smuzhiyun if (radix_enabled())
568*4882a593Smuzhiyun return radix__hugetlb_get_unmapped_area(file, addr, len,
569*4882a593Smuzhiyun pgoff, flags);
570*4882a593Smuzhiyun #endif
571*4882a593Smuzhiyun return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun #endif
574*4882a593Smuzhiyun
vma_mmu_pagesize(struct vm_area_struct * vma)575*4882a593Smuzhiyun unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun /* With radix we don't use slice, so derive it from vma*/
578*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
579*4882a593Smuzhiyun unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun return 1UL << mmu_psize_to_shift(psize);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun return vma_kernel_pagesize(vma);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
arch_hugetlb_valid_size(unsigned long size)586*4882a593Smuzhiyun bool __init arch_hugetlb_valid_size(unsigned long size)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun int shift = __ffs(size);
589*4882a593Smuzhiyun int mmu_psize;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /* Check that it is a page size supported by the hardware and
592*4882a593Smuzhiyun * that it fits within pagetable and slice limits. */
593*4882a593Smuzhiyun if (size <= PAGE_SIZE || !is_power_of_2(size))
594*4882a593Smuzhiyun return false;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun mmu_psize = check_and_get_huge_psize(shift);
597*4882a593Smuzhiyun if (mmu_psize < 0)
598*4882a593Smuzhiyun return false;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun return true;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
add_huge_page_size(unsigned long long size)605*4882a593Smuzhiyun static int __init add_huge_page_size(unsigned long long size)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun int shift = __ffs(size);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun if (!arch_hugetlb_valid_size((unsigned long)size))
610*4882a593Smuzhiyun return -EINVAL;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun hugetlb_add_hstate(shift - PAGE_SHIFT);
613*4882a593Smuzhiyun return 0;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
hugetlbpage_init(void)616*4882a593Smuzhiyun static int __init hugetlbpage_init(void)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun bool configured = false;
619*4882a593Smuzhiyun int psize;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (hugetlb_disabled) {
622*4882a593Smuzhiyun pr_info("HugeTLB support is disabled!\n");
623*4882a593Smuzhiyun return 0;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
627*4882a593Smuzhiyun !mmu_has_feature(MMU_FTR_16M_PAGE))
628*4882a593Smuzhiyun return -ENODEV;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
631*4882a593Smuzhiyun unsigned shift;
632*4882a593Smuzhiyun unsigned pdshift;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (!mmu_psize_defs[psize].shift)
635*4882a593Smuzhiyun continue;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun shift = mmu_psize_to_shift(psize);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
640*4882a593Smuzhiyun if (shift > PGDIR_SHIFT)
641*4882a593Smuzhiyun continue;
642*4882a593Smuzhiyun else if (shift > PUD_SHIFT)
643*4882a593Smuzhiyun pdshift = PGDIR_SHIFT;
644*4882a593Smuzhiyun else if (shift > PMD_SHIFT)
645*4882a593Smuzhiyun pdshift = PUD_SHIFT;
646*4882a593Smuzhiyun else
647*4882a593Smuzhiyun pdshift = PMD_SHIFT;
648*4882a593Smuzhiyun #else
649*4882a593Smuzhiyun if (shift < PUD_SHIFT)
650*4882a593Smuzhiyun pdshift = PMD_SHIFT;
651*4882a593Smuzhiyun else if (shift < PGDIR_SHIFT)
652*4882a593Smuzhiyun pdshift = PUD_SHIFT;
653*4882a593Smuzhiyun else
654*4882a593Smuzhiyun pdshift = PGDIR_SHIFT;
655*4882a593Smuzhiyun #endif
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (add_huge_page_size(1ULL << shift) < 0)
658*4882a593Smuzhiyun continue;
659*4882a593Smuzhiyun /*
660*4882a593Smuzhiyun * if we have pdshift and shift value same, we don't
661*4882a593Smuzhiyun * use pgt cache for hugepd.
662*4882a593Smuzhiyun */
663*4882a593Smuzhiyun if (pdshift > shift) {
664*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_PPC_8xx))
665*4882a593Smuzhiyun pgtable_cache_add(pdshift - shift);
666*4882a593Smuzhiyun } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
667*4882a593Smuzhiyun IS_ENABLED(CONFIG_PPC_8xx)) {
668*4882a593Smuzhiyun pgtable_cache_add(PTE_T_ORDER);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun configured = true;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (configured) {
675*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
676*4882a593Smuzhiyun hugetlbpage_init_default();
677*4882a593Smuzhiyun } else
678*4882a593Smuzhiyun pr_info("Failed to initialize. Disabling HugeTLB");
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun return 0;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun arch_initcall(hugetlbpage_init);
684*4882a593Smuzhiyun
flush_dcache_icache_hugepage(struct page * page)685*4882a593Smuzhiyun void flush_dcache_icache_hugepage(struct page *page)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun int i;
688*4882a593Smuzhiyun void *start;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun BUG_ON(!PageCompound(page));
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun for (i = 0; i < compound_nr(page); i++) {
693*4882a593Smuzhiyun if (!PageHighMem(page)) {
694*4882a593Smuzhiyun __flush_dcache_icache(page_address(page+i));
695*4882a593Smuzhiyun } else {
696*4882a593Smuzhiyun start = kmap_atomic(page+i);
697*4882a593Smuzhiyun __flush_dcache_icache(start);
698*4882a593Smuzhiyun kunmap_atomic(start);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
gigantic_hugetlb_cma_reserve(void)703*4882a593Smuzhiyun void __init gigantic_hugetlb_cma_reserve(void)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun unsigned long order = 0;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (radix_enabled())
708*4882a593Smuzhiyun order = PUD_SHIFT - PAGE_SHIFT;
709*4882a593Smuzhiyun else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift)
710*4882a593Smuzhiyun /*
711*4882a593Smuzhiyun * For pseries we do use ibm,expected#pages for reserving 16G pages.
712*4882a593Smuzhiyun */
713*4882a593Smuzhiyun order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if (order) {
716*4882a593Smuzhiyun VM_WARN_ON(order < MAX_ORDER);
717*4882a593Smuzhiyun hugetlb_cma_reserve(order);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun }
720