1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2005, Paul Mackerras, IBM Corporation.
4*4882a593Smuzhiyun * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
5*4882a593Smuzhiyun * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/mm_types.h>
10*4882a593Smuzhiyun #include <linux/mm.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <asm/sections.h>
13*4882a593Smuzhiyun #include <asm/mmu.h>
14*4882a593Smuzhiyun #include <asm/tlb.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <mm/mmu_decl.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
19*4882a593Smuzhiyun #include <trace/events/thp.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
22*4882a593Smuzhiyun #warning Limited user VSID range means pagetable space is wasted
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_VMEMMAP
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * vmemmap is the starting address of the virtual address space where
28*4882a593Smuzhiyun * struct pages are allocated for all possible PFNs present on the system
29*4882a593Smuzhiyun * including holes and bad memory (hence sparse). These virtual struct
30*4882a593Smuzhiyun * pages are stored in sequence in this virtual address space irrespective
31*4882a593Smuzhiyun * of the fact whether the corresponding PFN is valid or not. This achieves
32*4882a593Smuzhiyun * constant relationship between address of struct page and its PFN.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * During boot or memory hotplug operation when a new memory section is
35*4882a593Smuzhiyun * added, physical memory allocation (including hash table bolting) will
36*4882a593Smuzhiyun * be performed for the set of struct pages which are part of the memory
37*4882a593Smuzhiyun * section. This saves memory by not allocating struct pages for PFNs
38*4882a593Smuzhiyun * which are not valid.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * ----------------------------------------------
41*4882a593Smuzhiyun * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES|
42*4882a593Smuzhiyun * ----------------------------------------------
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * f000000000000000 c000000000000000
45*4882a593Smuzhiyun * vmemmap +--------------+ +--------------+
46*4882a593Smuzhiyun * + | page struct | +--------------> | page struct |
47*4882a593Smuzhiyun * | +--------------+ +--------------+
48*4882a593Smuzhiyun * | | page struct | +--------------> | page struct |
49*4882a593Smuzhiyun * | +--------------+ | +--------------+
50*4882a593Smuzhiyun * | | page struct | + +------> | page struct |
51*4882a593Smuzhiyun * | +--------------+ | +--------------+
52*4882a593Smuzhiyun * | | page struct | | +--> | page struct |
53*4882a593Smuzhiyun * | +--------------+ | | +--------------+
54*4882a593Smuzhiyun * | | page struct | | |
55*4882a593Smuzhiyun * | +--------------+ | |
56*4882a593Smuzhiyun * | | page struct | | |
57*4882a593Smuzhiyun * | +--------------+ | |
58*4882a593Smuzhiyun * | | page struct | | |
59*4882a593Smuzhiyun * | +--------------+ | |
60*4882a593Smuzhiyun * | | page struct | | |
61*4882a593Smuzhiyun * | +--------------+ | |
62*4882a593Smuzhiyun * | | page struct | +-------+ |
63*4882a593Smuzhiyun * | +--------------+ |
64*4882a593Smuzhiyun * | | page struct | +-----------+
65*4882a593Smuzhiyun * | +--------------+
66*4882a593Smuzhiyun * | | page struct | No mapping
67*4882a593Smuzhiyun * | +--------------+
68*4882a593Smuzhiyun * | | page struct | No mapping
69*4882a593Smuzhiyun * v +--------------+
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * -----------------------------------------
72*4882a593Smuzhiyun * | RELATION BETWEEN STRUCT PAGES AND PFNS|
73*4882a593Smuzhiyun * -----------------------------------------
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * vmemmap +--------------+ +---------------+
76*4882a593Smuzhiyun * + | page struct | +-------------> | PFN |
77*4882a593Smuzhiyun * | +--------------+ +---------------+
78*4882a593Smuzhiyun * | | page struct | +-------------> | PFN |
79*4882a593Smuzhiyun * | +--------------+ +---------------+
80*4882a593Smuzhiyun * | | page struct | +-------------> | PFN |
81*4882a593Smuzhiyun * | +--------------+ +---------------+
82*4882a593Smuzhiyun * | | page struct | +-------------> | PFN |
83*4882a593Smuzhiyun * | +--------------+ +---------------+
84*4882a593Smuzhiyun * | | |
85*4882a593Smuzhiyun * | +--------------+
86*4882a593Smuzhiyun * | | |
87*4882a593Smuzhiyun * | +--------------+
88*4882a593Smuzhiyun * | | |
89*4882a593Smuzhiyun * | +--------------+ +---------------+
90*4882a593Smuzhiyun * | | page struct | +-------------> | PFN |
91*4882a593Smuzhiyun * | +--------------+ +---------------+
92*4882a593Smuzhiyun * | | |
93*4882a593Smuzhiyun * | +--------------+
94*4882a593Smuzhiyun * | | |
95*4882a593Smuzhiyun * | +--------------+ +---------------+
96*4882a593Smuzhiyun * | | page struct | +-------------> | PFN |
97*4882a593Smuzhiyun * | +--------------+ +---------------+
98*4882a593Smuzhiyun * | | page struct | +-------------> | PFN |
99*4882a593Smuzhiyun * v +--------------+ +---------------+
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * On hash-based CPUs, the vmemmap is bolted in the hash table.
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun */
hash__vmemmap_create_mapping(unsigned long start,unsigned long page_size,unsigned long phys)105*4882a593Smuzhiyun int __meminit hash__vmemmap_create_mapping(unsigned long start,
106*4882a593Smuzhiyun unsigned long page_size,
107*4882a593Smuzhiyun unsigned long phys)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun int rc;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if ((start + page_size) >= H_VMEMMAP_END) {
112*4882a593Smuzhiyun pr_warn("Outside the supported range\n");
113*4882a593Smuzhiyun return -1;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun rc = htab_bolt_mapping(start, start + page_size, phys,
117*4882a593Smuzhiyun pgprot_val(PAGE_KERNEL),
118*4882a593Smuzhiyun mmu_vmemmap_psize, mmu_kernel_ssize);
119*4882a593Smuzhiyun if (rc < 0) {
120*4882a593Smuzhiyun int rc2 = htab_remove_mapping(start, start + page_size,
121*4882a593Smuzhiyun mmu_vmemmap_psize,
122*4882a593Smuzhiyun mmu_kernel_ssize);
123*4882a593Smuzhiyun BUG_ON(rc2 && (rc2 != -ENOENT));
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun return rc;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
hash__vmemmap_remove_mapping(unsigned long start,unsigned long page_size)129*4882a593Smuzhiyun void hash__vmemmap_remove_mapping(unsigned long start,
130*4882a593Smuzhiyun unsigned long page_size)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun int rc = htab_remove_mapping(start, start + page_size,
133*4882a593Smuzhiyun mmu_vmemmap_psize,
134*4882a593Smuzhiyun mmu_kernel_ssize);
135*4882a593Smuzhiyun BUG_ON((rc < 0) && (rc != -ENOENT));
136*4882a593Smuzhiyun WARN_ON(rc == -ENOENT);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun #endif
139*4882a593Smuzhiyun #endif /* CONFIG_SPARSEMEM_VMEMMAP */
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * map_kernel_page currently only called by __ioremap
143*4882a593Smuzhiyun * map_kernel_page adds an entry to the ioremap page table
144*4882a593Smuzhiyun * and adds an entry to the HPT, possibly bolting it
145*4882a593Smuzhiyun */
hash__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t prot)146*4882a593Smuzhiyun int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun pgd_t *pgdp;
149*4882a593Smuzhiyun p4d_t *p4dp;
150*4882a593Smuzhiyun pud_t *pudp;
151*4882a593Smuzhiyun pmd_t *pmdp;
152*4882a593Smuzhiyun pte_t *ptep;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
155*4882a593Smuzhiyun if (slab_is_available()) {
156*4882a593Smuzhiyun pgdp = pgd_offset_k(ea);
157*4882a593Smuzhiyun p4dp = p4d_offset(pgdp, ea);
158*4882a593Smuzhiyun pudp = pud_alloc(&init_mm, p4dp, ea);
159*4882a593Smuzhiyun if (!pudp)
160*4882a593Smuzhiyun return -ENOMEM;
161*4882a593Smuzhiyun pmdp = pmd_alloc(&init_mm, pudp, ea);
162*4882a593Smuzhiyun if (!pmdp)
163*4882a593Smuzhiyun return -ENOMEM;
164*4882a593Smuzhiyun ptep = pte_alloc_kernel(pmdp, ea);
165*4882a593Smuzhiyun if (!ptep)
166*4882a593Smuzhiyun return -ENOMEM;
167*4882a593Smuzhiyun set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
168*4882a593Smuzhiyun } else {
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * If the mm subsystem is not fully up, we cannot create a
171*4882a593Smuzhiyun * linux page table entry for this mapping. Simply bolt an
172*4882a593Smuzhiyun * entry in the hardware page table.
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
176*4882a593Smuzhiyun mmu_io_psize, mmu_kernel_ssize)) {
177*4882a593Smuzhiyun printk(KERN_ERR "Failed to do bolted mapping IO "
178*4882a593Smuzhiyun "memory at %016lx !\n", pa);
179*4882a593Smuzhiyun return -ENOMEM;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun smp_wmb();
184*4882a593Smuzhiyun return 0;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
188*4882a593Smuzhiyun
hash__pmd_hugepage_update(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long clr,unsigned long set)189*4882a593Smuzhiyun unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
190*4882a593Smuzhiyun pmd_t *pmdp, unsigned long clr,
191*4882a593Smuzhiyun unsigned long set)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun __be64 old_be, tmp;
194*4882a593Smuzhiyun unsigned long old;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM
197*4882a593Smuzhiyun WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
198*4882a593Smuzhiyun assert_spin_locked(pmd_lockptr(mm, pmdp));
199*4882a593Smuzhiyun #endif
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun __asm__ __volatile__(
202*4882a593Smuzhiyun "1: ldarx %0,0,%3\n\
203*4882a593Smuzhiyun and. %1,%0,%6\n\
204*4882a593Smuzhiyun bne- 1b \n\
205*4882a593Smuzhiyun andc %1,%0,%4 \n\
206*4882a593Smuzhiyun or %1,%1,%7\n\
207*4882a593Smuzhiyun stdcx. %1,0,%3 \n\
208*4882a593Smuzhiyun bne- 1b"
209*4882a593Smuzhiyun : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
210*4882a593Smuzhiyun : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
211*4882a593Smuzhiyun "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
212*4882a593Smuzhiyun : "cc" );
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun old = be64_to_cpu(old_be);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun trace_hugepage_update(addr, old, clr, set);
217*4882a593Smuzhiyun if (old & H_PAGE_HASHPTE)
218*4882a593Smuzhiyun hpte_do_hugepage_flush(mm, addr, pmdp, old);
219*4882a593Smuzhiyun return old;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
hash__pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)222*4882a593Smuzhiyun pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
223*4882a593Smuzhiyun pmd_t *pmdp)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun pmd_t pmd;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun VM_BUG_ON(address & ~HPAGE_PMD_MASK);
228*4882a593Smuzhiyun VM_BUG_ON(pmd_trans_huge(*pmdp));
229*4882a593Smuzhiyun VM_BUG_ON(pmd_devmap(*pmdp));
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun pmd = *pmdp;
232*4882a593Smuzhiyun pmd_clear(pmdp);
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun * Wait for all pending hash_page to finish. This is needed
235*4882a593Smuzhiyun * in case of subpage collapse. When we collapse normal pages
236*4882a593Smuzhiyun * to hugepage, we first clear the pmd, then invalidate all
237*4882a593Smuzhiyun * the PTE entries. The assumption here is that any low level
238*4882a593Smuzhiyun * page fault will see a none pmd and take the slow path that
239*4882a593Smuzhiyun * will wait on mmap_lock. But we could very well be in a
240*4882a593Smuzhiyun * hash_page with local ptep pointer value. Such a hash page
241*4882a593Smuzhiyun * can result in adding new HPTE entries for normal subpages.
242*4882a593Smuzhiyun * That means we could be modifying the page content as we
243*4882a593Smuzhiyun * copy them to a huge page. So wait for parallel hash_page
244*4882a593Smuzhiyun * to finish before invalidating HPTE entries. We can do this
245*4882a593Smuzhiyun * by sending an IPI to all the cpus and executing a dummy
246*4882a593Smuzhiyun * function there.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun serialize_against_pte_lookup(vma->vm_mm);
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * Now invalidate the hpte entries in the range
251*4882a593Smuzhiyun * covered by pmd. This make sure we take a
252*4882a593Smuzhiyun * fault and will find the pmd as none, which will
253*4882a593Smuzhiyun * result in a major fault which takes mmap_lock and
254*4882a593Smuzhiyun * hence wait for collapse to complete. Without this
255*4882a593Smuzhiyun * the __collapse_huge_page_copy can result in copying
256*4882a593Smuzhiyun * the old content.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
259*4882a593Smuzhiyun return pmd;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * We want to put the pgtable in pmd and use pgtable for tracking
264*4882a593Smuzhiyun * the base page size hptes
265*4882a593Smuzhiyun */
hash__pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)266*4882a593Smuzhiyun void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
267*4882a593Smuzhiyun pgtable_t pgtable)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun pgtable_t *pgtable_slot;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun assert_spin_locked(pmd_lockptr(mm, pmdp));
272*4882a593Smuzhiyun /*
273*4882a593Smuzhiyun * we store the pgtable in the second half of PMD
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
276*4882a593Smuzhiyun *pgtable_slot = pgtable;
277*4882a593Smuzhiyun /*
278*4882a593Smuzhiyun * expose the deposited pgtable to other cpus.
279*4882a593Smuzhiyun * before we set the hugepage PTE at pmd level
280*4882a593Smuzhiyun * hash fault code looks at the deposted pgtable
281*4882a593Smuzhiyun * to store hash index values.
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun smp_wmb();
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
hash__pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp)286*4882a593Smuzhiyun pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun pgtable_t pgtable;
289*4882a593Smuzhiyun pgtable_t *pgtable_slot;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun assert_spin_locked(pmd_lockptr(mm, pmdp));
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
294*4882a593Smuzhiyun pgtable = *pgtable_slot;
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun * Once we withdraw, mark the entry NULL.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun *pgtable_slot = NULL;
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun * We store HPTE information in the deposited PTE fragment.
301*4882a593Smuzhiyun * zero out the content on withdraw.
302*4882a593Smuzhiyun */
303*4882a593Smuzhiyun memset(pgtable, 0, PTE_FRAG_SIZE);
304*4882a593Smuzhiyun return pgtable;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /*
308*4882a593Smuzhiyun * A linux hugepage PMD was changed and the corresponding hash table entries
309*4882a593Smuzhiyun * neesd to be flushed.
310*4882a593Smuzhiyun */
hpte_do_hugepage_flush(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long old_pmd)311*4882a593Smuzhiyun void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
312*4882a593Smuzhiyun pmd_t *pmdp, unsigned long old_pmd)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun int ssize;
315*4882a593Smuzhiyun unsigned int psize;
316*4882a593Smuzhiyun unsigned long vsid;
317*4882a593Smuzhiyun unsigned long flags = 0;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* get the base page size,vsid and segment size */
320*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM
321*4882a593Smuzhiyun psize = get_slice_psize(mm, addr);
322*4882a593Smuzhiyun BUG_ON(psize == MMU_PAGE_16M);
323*4882a593Smuzhiyun #endif
324*4882a593Smuzhiyun if (old_pmd & H_PAGE_COMBO)
325*4882a593Smuzhiyun psize = MMU_PAGE_4K;
326*4882a593Smuzhiyun else
327*4882a593Smuzhiyun psize = MMU_PAGE_64K;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (!is_kernel_addr(addr)) {
330*4882a593Smuzhiyun ssize = user_segment_size(addr);
331*4882a593Smuzhiyun vsid = get_user_vsid(&mm->context, addr, ssize);
332*4882a593Smuzhiyun WARN_ON(vsid == 0);
333*4882a593Smuzhiyun } else {
334*4882a593Smuzhiyun vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
335*4882a593Smuzhiyun ssize = mmu_kernel_ssize;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun if (mm_is_thread_local(mm))
339*4882a593Smuzhiyun flags |= HPTE_LOCAL_UPDATE;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
hash__pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)344*4882a593Smuzhiyun pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
345*4882a593Smuzhiyun unsigned long addr, pmd_t *pmdp)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun pmd_t old_pmd;
348*4882a593Smuzhiyun pgtable_t pgtable;
349*4882a593Smuzhiyun unsigned long old;
350*4882a593Smuzhiyun pgtable_t *pgtable_slot;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
353*4882a593Smuzhiyun old_pmd = __pmd(old);
354*4882a593Smuzhiyun /*
355*4882a593Smuzhiyun * We have pmd == none and we are holding page_table_lock.
356*4882a593Smuzhiyun * So we can safely go and clear the pgtable hash
357*4882a593Smuzhiyun * index info.
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
360*4882a593Smuzhiyun pgtable = *pgtable_slot;
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun * Let's zero out old valid and hash index details
363*4882a593Smuzhiyun * hash fault look at them.
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun memset(pgtable, 0, PTE_FRAG_SIZE);
366*4882a593Smuzhiyun return old_pmd;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
hash__has_transparent_hugepage(void)369*4882a593Smuzhiyun int hash__has_transparent_hugepage(void)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (!mmu_has_feature(MMU_FTR_16M_PAGE))
373*4882a593Smuzhiyun return 0;
374*4882a593Smuzhiyun /*
375*4882a593Smuzhiyun * We support THP only if PMD_SIZE is 16MB.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
378*4882a593Smuzhiyun return 0;
379*4882a593Smuzhiyun /*
380*4882a593Smuzhiyun * We need to make sure that we support 16MB hugepage in a segement
381*4882a593Smuzhiyun * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
382*4882a593Smuzhiyun * of 64K.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun * If we have 64K HPTE, we will be using that by default
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun if (mmu_psize_defs[MMU_PAGE_64K].shift &&
388*4882a593Smuzhiyun (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
389*4882a593Smuzhiyun return 0;
390*4882a593Smuzhiyun /*
391*4882a593Smuzhiyun * Ok we only have 4K HPTE
392*4882a593Smuzhiyun */
393*4882a593Smuzhiyun if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
394*4882a593Smuzhiyun return 0;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun return 1;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun #ifdef CONFIG_STRICT_KERNEL_RWX
hash__change_memory_range(unsigned long start,unsigned long end,unsigned long newpp)403*4882a593Smuzhiyun static bool hash__change_memory_range(unsigned long start, unsigned long end,
404*4882a593Smuzhiyun unsigned long newpp)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun unsigned long idx;
407*4882a593Smuzhiyun unsigned int step, shift;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun shift = mmu_psize_defs[mmu_linear_psize].shift;
410*4882a593Smuzhiyun step = 1 << shift;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun start = ALIGN_DOWN(start, step);
413*4882a593Smuzhiyun end = ALIGN(end, step); // aligns up
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (start >= end)
416*4882a593Smuzhiyun return false;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
419*4882a593Smuzhiyun start, end, newpp, step);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun for (idx = start; idx < end; idx += step)
422*4882a593Smuzhiyun /* Not sure if we can do much with the return value */
423*4882a593Smuzhiyun mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
424*4882a593Smuzhiyun mmu_kernel_ssize);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun return true;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
hash__mark_rodata_ro(void)429*4882a593Smuzhiyun void hash__mark_rodata_ro(void)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun unsigned long start, end;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun start = (unsigned long)_stext;
434*4882a593Smuzhiyun end = (unsigned long)__init_begin;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
hash__mark_initmem_nx(void)439*4882a593Smuzhiyun void hash__mark_initmem_nx(void)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun unsigned long start, end, pp;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun start = (unsigned long)__init_begin;
444*4882a593Smuzhiyun end = (unsigned long)__init_end;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun WARN_ON(!hash__change_memory_range(start, end, pp));
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun #endif
451