1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun #include <linux/mm_types.h>
8*4882a593Smuzhiyun #include <linux/memblock.h>
9*4882a593Smuzhiyun #include <misc/cxl-base.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <asm/debugfs.h>
12*4882a593Smuzhiyun #include <asm/pgalloc.h>
13*4882a593Smuzhiyun #include <asm/tlb.h>
14*4882a593Smuzhiyun #include <asm/trace.h>
15*4882a593Smuzhiyun #include <asm/powernv.h>
16*4882a593Smuzhiyun #include <asm/firmware.h>
17*4882a593Smuzhiyun #include <asm/ultravisor.h>
18*4882a593Smuzhiyun #include <asm/kexec.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <mm/mmu_decl.h>
21*4882a593Smuzhiyun #include <trace/events/thp.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun unsigned long __pmd_frag_nr;
24*4882a593Smuzhiyun EXPORT_SYMBOL(__pmd_frag_nr);
25*4882a593Smuzhiyun unsigned long __pmd_frag_size_shift;
26*4882a593Smuzhiyun EXPORT_SYMBOL(__pmd_frag_size_shift);
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun * This is called when relaxing access to a hugepage. It's also called in the page
31*4882a593Smuzhiyun * fault path when we don't hit any of the major fault cases, ie, a minor
32*4882a593Smuzhiyun * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
33*4882a593Smuzhiyun * handled those two for us, we additionally deal with missing execute
34*4882a593Smuzhiyun * permission here on some processors
35*4882a593Smuzhiyun */
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)36*4882a593Smuzhiyun int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
37*4882a593Smuzhiyun pmd_t *pmdp, pmd_t entry, int dirty)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun int changed;
40*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM
41*4882a593Smuzhiyun WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
42*4882a593Smuzhiyun assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun changed = !pmd_same(*(pmdp), entry);
45*4882a593Smuzhiyun if (changed) {
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * We can use MMU_PAGE_2M here, because only radix
48*4882a593Smuzhiyun * path look at the psize.
49*4882a593Smuzhiyun */
50*4882a593Smuzhiyun __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
51*4882a593Smuzhiyun pmd_pte(entry), address, MMU_PAGE_2M);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun return changed;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)56*4882a593Smuzhiyun int pmdp_test_and_clear_young(struct vm_area_struct *vma,
57*4882a593Smuzhiyun unsigned long address, pmd_t *pmdp)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * set a new huge pmd. We should not be called for updating
63*4882a593Smuzhiyun * an existing pmd entry. That should go via pmd_hugepage_update.
64*4882a593Smuzhiyun */
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)65*4882a593Smuzhiyun void set_pmd_at(struct mm_struct *mm, unsigned long addr,
66*4882a593Smuzhiyun pmd_t *pmdp, pmd_t pmd)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Make sure hardware valid bit is not set. We don't do
71*4882a593Smuzhiyun * tlb flush for this update.
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
75*4882a593Smuzhiyun assert_spin_locked(pmd_lockptr(mm, pmdp));
76*4882a593Smuzhiyun WARN_ON(!(pmd_large(pmd)));
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun trace_hugepage_set_pmd(addr, pmd_val(pmd));
79*4882a593Smuzhiyun return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
do_nothing(void * unused)82*4882a593Smuzhiyun static void do_nothing(void *unused)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * Serialize against find_current_mm_pte which does lock-less
88*4882a593Smuzhiyun * lookup in page tables with local interrupts disabled. For huge pages
89*4882a593Smuzhiyun * it casts pmd_t to pte_t. Since format of pte_t is different from
90*4882a593Smuzhiyun * pmd_t we want to prevent transit from pmd pointing to page table
91*4882a593Smuzhiyun * to pmd pointing to huge page (and back) while interrupts are disabled.
92*4882a593Smuzhiyun * We clear pmd to possibly replace it with page table pointer in
93*4882a593Smuzhiyun * different code paths. So make sure we wait for the parallel
94*4882a593Smuzhiyun * find_current_mm_pte to finish.
95*4882a593Smuzhiyun */
serialize_against_pte_lookup(struct mm_struct * mm)96*4882a593Smuzhiyun void serialize_against_pte_lookup(struct mm_struct *mm)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun smp_mb();
99*4882a593Smuzhiyun smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * We use this to invalidate a pmdp entry before switching from a
104*4882a593Smuzhiyun * hugepte to regular pmd entry.
105*4882a593Smuzhiyun */
pmdp_invalidate(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)106*4882a593Smuzhiyun pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
107*4882a593Smuzhiyun pmd_t *pmdp)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun unsigned long old_pmd;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
112*4882a593Smuzhiyun flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
113*4882a593Smuzhiyun return __pmd(old_pmd);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,int full)116*4882a593Smuzhiyun pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
117*4882a593Smuzhiyun unsigned long addr, pmd_t *pmdp, int full)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun pmd_t pmd;
120*4882a593Smuzhiyun VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
121*4882a593Smuzhiyun VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
122*4882a593Smuzhiyun !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
123*4882a593Smuzhiyun pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * if it not a fullmm flush, then we can possibly end up converting
126*4882a593Smuzhiyun * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
127*4882a593Smuzhiyun * Make sure we flush the tlb in this case.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun if (!full)
130*4882a593Smuzhiyun flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
131*4882a593Smuzhiyun return pmd;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
pmd_set_protbits(pmd_t pmd,pgprot_t pgprot)134*4882a593Smuzhiyun static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
pfn_pmd(unsigned long pfn,pgprot_t pgprot)139*4882a593Smuzhiyun pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun unsigned long pmdv;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
144*4882a593Smuzhiyun return pmd_set_protbits(__pmd(pmdv), pgprot);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
mk_pmd(struct page * page,pgprot_t pgprot)147*4882a593Smuzhiyun pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun return pfn_pmd(page_to_pfn(page), pgprot);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
pmd_modify(pmd_t pmd,pgprot_t newprot)152*4882a593Smuzhiyun pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun unsigned long pmdv;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun pmdv = pmd_val(pmd);
157*4882a593Smuzhiyun pmdv &= _HPAGE_CHG_MASK;
158*4882a593Smuzhiyun return pmd_set_protbits(__pmd(pmdv), newprot);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* For use by kexec */
mmu_cleanup_all(void)163*4882a593Smuzhiyun void mmu_cleanup_all(void)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun if (radix_enabled())
166*4882a593Smuzhiyun radix__mmu_cleanup_all();
167*4882a593Smuzhiyun else if (mmu_hash_ops.hpte_clear_all)
168*4882a593Smuzhiyun mmu_hash_ops.hpte_clear_all();
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun reset_sprs();
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
create_section_mapping(unsigned long start,unsigned long end,int nid,pgprot_t prot)174*4882a593Smuzhiyun int __meminit create_section_mapping(unsigned long start, unsigned long end,
175*4882a593Smuzhiyun int nid, pgprot_t prot)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun if (radix_enabled())
178*4882a593Smuzhiyun return radix__create_section_mapping(start, end, nid, prot);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun return hash__create_section_mapping(start, end, nid, prot);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
remove_section_mapping(unsigned long start,unsigned long end)183*4882a593Smuzhiyun int __meminit remove_section_mapping(unsigned long start, unsigned long end)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun if (radix_enabled())
186*4882a593Smuzhiyun return radix__remove_section_mapping(start, end);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return hash__remove_section_mapping(start, end);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun #endif /* CONFIG_MEMORY_HOTPLUG */
191*4882a593Smuzhiyun
mmu_partition_table_init(void)192*4882a593Smuzhiyun void __init mmu_partition_table_init(void)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
195*4882a593Smuzhiyun unsigned long ptcr;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
198*4882a593Smuzhiyun /* Initialize the Partition Table with no entries */
199*4882a593Smuzhiyun partition_tb = memblock_alloc(patb_size, patb_size);
200*4882a593Smuzhiyun if (!partition_tb)
201*4882a593Smuzhiyun panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
202*4882a593Smuzhiyun __func__, patb_size, patb_size);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun * update partition table control register,
206*4882a593Smuzhiyun * 64 K size.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
209*4882a593Smuzhiyun set_ptcr_when_no_uv(ptcr);
210*4882a593Smuzhiyun powernv_set_nmmu_ptcr(ptcr);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
flush_partition(unsigned int lpid,bool radix)213*4882a593Smuzhiyun static void flush_partition(unsigned int lpid, bool radix)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun if (radix) {
216*4882a593Smuzhiyun radix__flush_all_lpid(lpid);
217*4882a593Smuzhiyun radix__flush_all_lpid_guest(lpid);
218*4882a593Smuzhiyun } else {
219*4882a593Smuzhiyun asm volatile("ptesync" : : : "memory");
220*4882a593Smuzhiyun asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
221*4882a593Smuzhiyun "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
222*4882a593Smuzhiyun /* do we need fixup here ?*/
223*4882a593Smuzhiyun asm volatile("eieio; tlbsync; ptesync" : : : "memory");
224*4882a593Smuzhiyun trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
mmu_partition_table_set_entry(unsigned int lpid,unsigned long dw0,unsigned long dw1,bool flush)228*4882a593Smuzhiyun void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
229*4882a593Smuzhiyun unsigned long dw1, bool flush)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun * When ultravisor is enabled, the partition table is stored in secure
235*4882a593Smuzhiyun * memory and can only be accessed doing an ultravisor call. However, we
236*4882a593Smuzhiyun * maintain a copy of the partition table in normal memory to allow Nest
237*4882a593Smuzhiyun * MMU translations to occur (for normal VMs).
238*4882a593Smuzhiyun *
239*4882a593Smuzhiyun * Therefore, here we always update partition_tb, regardless of whether
240*4882a593Smuzhiyun * we are running under an ultravisor or not.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun partition_tb[lpid].patb0 = cpu_to_be64(dw0);
243*4882a593Smuzhiyun partition_tb[lpid].patb1 = cpu_to_be64(dw1);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * If ultravisor is enabled, we do an ultravisor call to register the
247*4882a593Smuzhiyun * partition table entry (PATE), which also do a global flush of TLBs
248*4882a593Smuzhiyun * and partition table caches for the lpid. Otherwise, just do the
249*4882a593Smuzhiyun * flush. The type of flush (hash or radix) depends on what the previous
250*4882a593Smuzhiyun * use of the partition ID was, not the new use.
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
253*4882a593Smuzhiyun uv_register_pate(lpid, dw0, dw1);
254*4882a593Smuzhiyun pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
255*4882a593Smuzhiyun dw0, dw1);
256*4882a593Smuzhiyun } else if (flush) {
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * Boot does not need to flush, because MMU is off and each
259*4882a593Smuzhiyun * CPU does a tlbiel_all() before switching them on, which
260*4882a593Smuzhiyun * flushes everything.
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun flush_partition(lpid, (old & PATB_HR));
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
266*4882a593Smuzhiyun
get_pmd_from_cache(struct mm_struct * mm)267*4882a593Smuzhiyun static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun void *pmd_frag, *ret;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (PMD_FRAG_NR == 1)
272*4882a593Smuzhiyun return NULL;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun spin_lock(&mm->page_table_lock);
275*4882a593Smuzhiyun ret = mm->context.pmd_frag;
276*4882a593Smuzhiyun if (ret) {
277*4882a593Smuzhiyun pmd_frag = ret + PMD_FRAG_SIZE;
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun * If we have taken up all the fragments mark PTE page NULL
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
282*4882a593Smuzhiyun pmd_frag = NULL;
283*4882a593Smuzhiyun mm->context.pmd_frag = pmd_frag;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun spin_unlock(&mm->page_table_lock);
286*4882a593Smuzhiyun return (pmd_t *)ret;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
__alloc_for_pmdcache(struct mm_struct * mm)289*4882a593Smuzhiyun static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun void *ret = NULL;
292*4882a593Smuzhiyun struct page *page;
293*4882a593Smuzhiyun gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (mm == &init_mm)
296*4882a593Smuzhiyun gfp &= ~__GFP_ACCOUNT;
297*4882a593Smuzhiyun page = alloc_page(gfp);
298*4882a593Smuzhiyun if (!page)
299*4882a593Smuzhiyun return NULL;
300*4882a593Smuzhiyun if (!pgtable_pmd_page_ctor(page)) {
301*4882a593Smuzhiyun __free_pages(page, 0);
302*4882a593Smuzhiyun return NULL;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun atomic_set(&page->pt_frag_refcount, 1);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun ret = page_address(page);
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun * if we support only one fragment just return the
310*4882a593Smuzhiyun * allocated page.
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun if (PMD_FRAG_NR == 1)
313*4882a593Smuzhiyun return ret;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun spin_lock(&mm->page_table_lock);
316*4882a593Smuzhiyun /*
317*4882a593Smuzhiyun * If we find pgtable_page set, we return
318*4882a593Smuzhiyun * the allocated page with single fragement
319*4882a593Smuzhiyun * count.
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun if (likely(!mm->context.pmd_frag)) {
322*4882a593Smuzhiyun atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
323*4882a593Smuzhiyun mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun spin_unlock(&mm->page_table_lock);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return (pmd_t *)ret;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
pmd_fragment_alloc(struct mm_struct * mm,unsigned long vmaddr)330*4882a593Smuzhiyun pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun pmd_t *pmd;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun pmd = get_pmd_from_cache(mm);
335*4882a593Smuzhiyun if (pmd)
336*4882a593Smuzhiyun return pmd;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return __alloc_for_pmdcache(mm);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
pmd_fragment_free(unsigned long * pmd)341*4882a593Smuzhiyun void pmd_fragment_free(unsigned long *pmd)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct page *page = virt_to_page(pmd);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (PageReserved(page))
346*4882a593Smuzhiyun return free_reserved_page(page);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
349*4882a593Smuzhiyun if (atomic_dec_and_test(&page->pt_frag_refcount)) {
350*4882a593Smuzhiyun pgtable_pmd_page_dtor(page);
351*4882a593Smuzhiyun __free_page(page);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
pgtable_free(void * table,int index)355*4882a593Smuzhiyun static inline void pgtable_free(void *table, int index)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun switch (index) {
358*4882a593Smuzhiyun case PTE_INDEX:
359*4882a593Smuzhiyun pte_fragment_free(table, 0);
360*4882a593Smuzhiyun break;
361*4882a593Smuzhiyun case PMD_INDEX:
362*4882a593Smuzhiyun pmd_fragment_free(table);
363*4882a593Smuzhiyun break;
364*4882a593Smuzhiyun case PUD_INDEX:
365*4882a593Smuzhiyun __pud_free(table);
366*4882a593Smuzhiyun break;
367*4882a593Smuzhiyun #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
368*4882a593Smuzhiyun /* 16M hugepd directory at pud level */
369*4882a593Smuzhiyun case HTLB_16M_INDEX:
370*4882a593Smuzhiyun BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
371*4882a593Smuzhiyun kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
372*4882a593Smuzhiyun break;
373*4882a593Smuzhiyun /* 16G hugepd directory at the pgd level */
374*4882a593Smuzhiyun case HTLB_16G_INDEX:
375*4882a593Smuzhiyun BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
376*4882a593Smuzhiyun kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
377*4882a593Smuzhiyun break;
378*4882a593Smuzhiyun #endif
379*4882a593Smuzhiyun /* We don't free pgd table via RCU callback */
380*4882a593Smuzhiyun default:
381*4882a593Smuzhiyun BUG();
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
pgtable_free_tlb(struct mmu_gather * tlb,void * table,int index)385*4882a593Smuzhiyun void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun unsigned long pgf = (unsigned long)table;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
390*4882a593Smuzhiyun pgf |= index;
391*4882a593Smuzhiyun tlb_remove_table(tlb, (void *)pgf);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
__tlb_remove_table(void * _table)394*4882a593Smuzhiyun void __tlb_remove_table(void *_table)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
397*4882a593Smuzhiyun unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return pgtable_free(table, index);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
403*4882a593Smuzhiyun atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
404*4882a593Smuzhiyun
arch_report_meminfo(struct seq_file * m)405*4882a593Smuzhiyun void arch_report_meminfo(struct seq_file *m)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun /*
408*4882a593Smuzhiyun * Hash maps the memory with one size mmu_linear_psize.
409*4882a593Smuzhiyun * So don't bother to print these on hash
410*4882a593Smuzhiyun */
411*4882a593Smuzhiyun if (!radix_enabled())
412*4882a593Smuzhiyun return;
413*4882a593Smuzhiyun seq_printf(m, "DirectMap4k: %8lu kB\n",
414*4882a593Smuzhiyun atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
415*4882a593Smuzhiyun seq_printf(m, "DirectMap64k: %8lu kB\n",
416*4882a593Smuzhiyun atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
417*4882a593Smuzhiyun seq_printf(m, "DirectMap2M: %8lu kB\n",
418*4882a593Smuzhiyun atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
419*4882a593Smuzhiyun seq_printf(m, "DirectMap1G: %8lu kB\n",
420*4882a593Smuzhiyun atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun #endif /* CONFIG_PROC_FS */
423*4882a593Smuzhiyun
ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)424*4882a593Smuzhiyun pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
425*4882a593Smuzhiyun pte_t *ptep)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun unsigned long pte_val;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /*
430*4882a593Smuzhiyun * Clear the _PAGE_PRESENT so that no hardware parallel update is
431*4882a593Smuzhiyun * possible. Also keep the pte_present true so that we don't take
432*4882a593Smuzhiyun * wrong fault.
433*4882a593Smuzhiyun */
434*4882a593Smuzhiyun pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun return __pte(pte_val);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)440*4882a593Smuzhiyun void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
441*4882a593Smuzhiyun pte_t *ptep, pte_t old_pte, pte_t pte)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun if (radix_enabled())
444*4882a593Smuzhiyun return radix__ptep_modify_prot_commit(vma, addr,
445*4882a593Smuzhiyun ptep, old_pte, pte);
446*4882a593Smuzhiyun set_pte_at(vma->vm_mm, addr, ptep, pte);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun * For hash translation mode, we use the deposited table to store hash slot
451*4882a593Smuzhiyun * information and they are stored at PTRS_PER_PMD offset from related pmd
452*4882a593Smuzhiyun * location. Hence a pmd move requires deposit and withdraw.
453*4882a593Smuzhiyun *
454*4882a593Smuzhiyun * For radix translation with split pmd ptl, we store the deposited table in the
455*4882a593Smuzhiyun * pmd page. Hence if we have different pmd page we need to withdraw during pmd
456*4882a593Smuzhiyun * move.
457*4882a593Smuzhiyun *
458*4882a593Smuzhiyun * With hash we use deposited table always irrespective of anon or not.
459*4882a593Smuzhiyun * With radix we use deposited table only for anonymous mapping.
460*4882a593Smuzhiyun */
pmd_move_must_withdraw(struct spinlock * new_pmd_ptl,struct spinlock * old_pmd_ptl,struct vm_area_struct * vma)461*4882a593Smuzhiyun int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
462*4882a593Smuzhiyun struct spinlock *old_pmd_ptl,
463*4882a593Smuzhiyun struct vm_area_struct *vma)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun if (radix_enabled())
466*4882a593Smuzhiyun return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun return true;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun * Does the CPU support tlbie?
473*4882a593Smuzhiyun */
474*4882a593Smuzhiyun bool tlbie_capable __read_mostly = true;
475*4882a593Smuzhiyun EXPORT_SYMBOL(tlbie_capable);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun * Should tlbie be used for management of CPU TLBs, for kernel and process
479*4882a593Smuzhiyun * address spaces? tlbie may still be used for nMMU accelerators, and for KVM
480*4882a593Smuzhiyun * guest address spaces.
481*4882a593Smuzhiyun */
482*4882a593Smuzhiyun bool tlbie_enabled __read_mostly = true;
483*4882a593Smuzhiyun
setup_disable_tlbie(char * str)484*4882a593Smuzhiyun static int __init setup_disable_tlbie(char *str)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun if (!radix_enabled()) {
487*4882a593Smuzhiyun pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
488*4882a593Smuzhiyun return 1;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun tlbie_capable = false;
492*4882a593Smuzhiyun tlbie_enabled = false;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return 1;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun __setup("disable_tlbie", setup_disable_tlbie);
497*4882a593Smuzhiyun
pgtable_debugfs_setup(void)498*4882a593Smuzhiyun static int __init pgtable_debugfs_setup(void)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun if (!tlbie_capable)
501*4882a593Smuzhiyun return 0;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /*
504*4882a593Smuzhiyun * There is no locking vs tlb flushing when changing this value.
505*4882a593Smuzhiyun * The tlb flushers will see one value or another, and use either
506*4882a593Smuzhiyun * tlbie or tlbiel with IPIs. In both cases the TLBs will be
507*4882a593Smuzhiyun * invalidated as expected.
508*4882a593Smuzhiyun */
509*4882a593Smuzhiyun debugfs_create_bool("tlbie_enabled", 0600,
510*4882a593Smuzhiyun powerpc_debugfs_root,
511*4882a593Smuzhiyun &tlbie_enabled);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun return 0;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun arch_initcall(pgtable_debugfs_setup);
516