xref: /OK3568_Linux_fs/kernel/arch/parisc/mm/hugetlbpage.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * PARISC64 Huge TLB page support.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This parisc implementation is heavily based on the SPARC and x86 code.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 2015 Helge Deller <deller@gmx.de>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/fs.h>
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/sched/mm.h>
13*4882a593Smuzhiyun #include <linux/hugetlb.h>
14*4882a593Smuzhiyun #include <linux/pagemap.h>
15*4882a593Smuzhiyun #include <linux/sysctl.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/mman.h>
18*4882a593Smuzhiyun #include <asm/tlb.h>
19*4882a593Smuzhiyun #include <asm/tlbflush.h>
20*4882a593Smuzhiyun #include <asm/cacheflush.h>
21*4882a593Smuzhiyun #include <asm/mmu_context.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun unsigned long
hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)25*4882a593Smuzhiyun hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26*4882a593Smuzhiyun 		unsigned long len, unsigned long pgoff, unsigned long flags)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct hstate *h = hstate_file(file);
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	if (len & ~huge_page_mask(h))
31*4882a593Smuzhiyun 		return -EINVAL;
32*4882a593Smuzhiyun 	if (len > TASK_SIZE)
33*4882a593Smuzhiyun 		return -ENOMEM;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	if (flags & MAP_FIXED)
36*4882a593Smuzhiyun 		if (prepare_hugepage_range(file, addr, len))
37*4882a593Smuzhiyun 			return -EINVAL;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	if (addr)
40*4882a593Smuzhiyun 		addr = ALIGN(addr, huge_page_size(h));
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/* we need to make sure the colouring is OK */
43*4882a593Smuzhiyun 	return arch_get_unmapped_area(file, addr, len, pgoff, flags);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)47*4882a593Smuzhiyun pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
48*4882a593Smuzhiyun 			unsigned long addr, unsigned long sz)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	pgd_t *pgd;
51*4882a593Smuzhiyun 	p4d_t *p4d;
52*4882a593Smuzhiyun 	pud_t *pud;
53*4882a593Smuzhiyun 	pmd_t *pmd;
54*4882a593Smuzhiyun 	pte_t *pte = NULL;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	/* We must align the address, because our caller will run
57*4882a593Smuzhiyun 	 * set_huge_pte_at() on whatever we return, which writes out
58*4882a593Smuzhiyun 	 * all of the sub-ptes for the hugepage range.  So we have
59*4882a593Smuzhiyun 	 * to give it the first such sub-pte.
60*4882a593Smuzhiyun 	 */
61*4882a593Smuzhiyun 	addr &= HPAGE_MASK;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	pgd = pgd_offset(mm, addr);
64*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, addr);
65*4882a593Smuzhiyun 	pud = pud_alloc(mm, p4d, addr);
66*4882a593Smuzhiyun 	if (pud) {
67*4882a593Smuzhiyun 		pmd = pmd_alloc(mm, pud, addr);
68*4882a593Smuzhiyun 		if (pmd)
69*4882a593Smuzhiyun 			pte = pte_alloc_map(mm, pmd, addr);
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 	return pte;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)74*4882a593Smuzhiyun pte_t *huge_pte_offset(struct mm_struct *mm,
75*4882a593Smuzhiyun 		       unsigned long addr, unsigned long sz)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	pgd_t *pgd;
78*4882a593Smuzhiyun 	p4d_t *p4d;
79*4882a593Smuzhiyun 	pud_t *pud;
80*4882a593Smuzhiyun 	pmd_t *pmd;
81*4882a593Smuzhiyun 	pte_t *pte = NULL;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	addr &= HPAGE_MASK;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	pgd = pgd_offset(mm, addr);
86*4882a593Smuzhiyun 	if (!pgd_none(*pgd)) {
87*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, addr);
88*4882a593Smuzhiyun 		if (!p4d_none(*p4d)) {
89*4882a593Smuzhiyun 			pud = pud_offset(p4d, addr);
90*4882a593Smuzhiyun 			if (!pud_none(*pud)) {
91*4882a593Smuzhiyun 				pmd = pmd_offset(pud, addr);
92*4882a593Smuzhiyun 				if (!pmd_none(*pmd))
93*4882a593Smuzhiyun 					pte = pte_offset_map(pmd, addr);
94*4882a593Smuzhiyun 			}
95*4882a593Smuzhiyun 		}
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 	return pte;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* Purge data and instruction TLB entries.  Must be called holding
101*4882a593Smuzhiyun  * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
102*4882a593Smuzhiyun  * machines since the purge must be broadcast to all CPUs.
103*4882a593Smuzhiyun  */
purge_tlb_entries_huge(struct mm_struct * mm,unsigned long addr)104*4882a593Smuzhiyun static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	int i;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
109*4882a593Smuzhiyun 	 * Linux standard huge pages (e.g. 2 MB) */
110*4882a593Smuzhiyun 	BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	addr &= HPAGE_MASK;
113*4882a593Smuzhiyun 	addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
116*4882a593Smuzhiyun 		purge_tlb_entries(mm, addr);
117*4882a593Smuzhiyun 		addr += (1UL << REAL_HPAGE_SHIFT);
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
__set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)122*4882a593Smuzhiyun static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
123*4882a593Smuzhiyun 		     pte_t *ptep, pte_t entry)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	unsigned long addr_start;
126*4882a593Smuzhiyun 	int i;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	addr &= HPAGE_MASK;
129*4882a593Smuzhiyun 	addr_start = addr;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
132*4882a593Smuzhiyun 		set_pte(ptep, entry);
133*4882a593Smuzhiyun 		ptep++;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		addr += PAGE_SIZE;
136*4882a593Smuzhiyun 		pte_val(entry) += PAGE_SIZE;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	purge_tlb_entries_huge(mm, addr_start);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)142*4882a593Smuzhiyun void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
143*4882a593Smuzhiyun 		     pte_t *ptep, pte_t entry)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	__set_huge_pte_at(mm, addr, ptep, entry);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 
huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)149*4882a593Smuzhiyun pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
150*4882a593Smuzhiyun 			      pte_t *ptep)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	pte_t entry;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	entry = *ptep;
155*4882a593Smuzhiyun 	__set_huge_pte_at(mm, addr, ptep, __pte(0));
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	return entry;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 
huge_ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)161*4882a593Smuzhiyun void huge_ptep_set_wrprotect(struct mm_struct *mm,
162*4882a593Smuzhiyun 				unsigned long addr, pte_t *ptep)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	pte_t old_pte;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	old_pte = *ptep;
167*4882a593Smuzhiyun 	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)170*4882a593Smuzhiyun int huge_ptep_set_access_flags(struct vm_area_struct *vma,
171*4882a593Smuzhiyun 				unsigned long addr, pte_t *ptep,
172*4882a593Smuzhiyun 				pte_t pte, int dirty)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	int changed;
175*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	changed = !pte_same(*ptep, pte);
178*4882a593Smuzhiyun 	if (changed) {
179*4882a593Smuzhiyun 		__set_huge_pte_at(mm, addr, ptep, pte);
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 	return changed;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 
pmd_huge(pmd_t pmd)185*4882a593Smuzhiyun int pmd_huge(pmd_t pmd)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	return 0;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
pud_huge(pud_t pud)190*4882a593Smuzhiyun int pud_huge(pud_t pud)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	return 0;
193*4882a593Smuzhiyun }
194