1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * arch/arm/include/asm/pgtable-3level.h
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2011 ARM Ltd.
6*4882a593Smuzhiyun * Author: Catalin Marinas <catalin.marinas@arm.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #ifndef _ASM_PGTABLE_3LEVEL_H
9*4882a593Smuzhiyun #define _ASM_PGTABLE_3LEVEL_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun * With LPAE, there are 3 levels of page tables. Each level has 512 entries of
13*4882a593Smuzhiyun * 8 bytes each, occupying a 4K page. The first level table covers a range of
14*4882a593Smuzhiyun * 512GB, each entry representing 1GB. Since we are limited to 4GB input
15*4882a593Smuzhiyun * address range, only 4 entries in the PGD are used.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * There are enough spare bits in a page table entry for the kernel specific
18*4882a593Smuzhiyun * state.
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun #define PTRS_PER_PTE 512
21*4882a593Smuzhiyun #define PTRS_PER_PMD 512
22*4882a593Smuzhiyun #define PTRS_PER_PGD 4
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define PTE_HWTABLE_PTRS (0)
25*4882a593Smuzhiyun #define PTE_HWTABLE_OFF (0)
26*4882a593Smuzhiyun #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define MAX_POSSIBLE_PHYSMEM_BITS 40
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * PGDIR_SHIFT determines the size a top-level page table entry can map.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun #define PGDIR_SHIFT 30
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * PMD_SHIFT determines the size a middle-level page table entry can map.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun #define PMD_SHIFT 21
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define PMD_SIZE (1UL << PMD_SHIFT)
41*4882a593Smuzhiyun #define PMD_MASK (~((1 << PMD_SHIFT) - 1))
42*4882a593Smuzhiyun #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
43*4882a593Smuzhiyun #define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * section address mask and size definitions.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun #define SECTION_SHIFT 21
49*4882a593Smuzhiyun #define SECTION_SIZE (1UL << SECTION_SHIFT)
50*4882a593Smuzhiyun #define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Hugetlb definitions.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun #define HPAGE_SHIFT PMD_SHIFT
58*4882a593Smuzhiyun #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
59*4882a593Smuzhiyun #define HPAGE_MASK (~(HPAGE_SIZE - 1))
60*4882a593Smuzhiyun #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * "Linux" PTE definitions for LPAE.
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * These bits overlap with the hardware bits but the naming is preserved for
66*4882a593Smuzhiyun * consistency with the classic page table format.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun #define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
69*4882a593Smuzhiyun #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
70*4882a593Smuzhiyun #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
71*4882a593Smuzhiyun #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
72*4882a593Smuzhiyun #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
73*4882a593Smuzhiyun #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
74*4882a593Smuzhiyun #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
75*4882a593Smuzhiyun #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
76*4882a593Smuzhiyun #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
77*4882a593Smuzhiyun #define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
80*4882a593Smuzhiyun #define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
81*4882a593Smuzhiyun #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
82*4882a593Smuzhiyun #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * To be used in assembly code with the upper page attributes.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun #define L_PTE_XN_HIGH (1 << (54 - 32))
88*4882a593Smuzhiyun #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */
94*4882a593Smuzhiyun #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
95*4882a593Smuzhiyun #define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */
96*4882a593Smuzhiyun #define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */
97*4882a593Smuzhiyun #define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */
98*4882a593Smuzhiyun #define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */
99*4882a593Smuzhiyun #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */
100*4882a593Smuzhiyun #define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
101*4882a593Smuzhiyun #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */
102*4882a593Smuzhiyun #define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2)
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * Software PGD flags.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #ifndef __ASSEMBLY__
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #define pud_none(pud) (!pud_val(pud))
112*4882a593Smuzhiyun #define pud_bad(pud) (!(pud_val(pud) & 2))
113*4882a593Smuzhiyun #define pud_present(pud) (pud_val(pud))
114*4882a593Smuzhiyun #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
115*4882a593Smuzhiyun PMD_TYPE_TABLE)
116*4882a593Smuzhiyun #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
117*4882a593Smuzhiyun PMD_TYPE_SECT)
118*4882a593Smuzhiyun #define pmd_large(pmd) pmd_sect(pmd)
119*4882a593Smuzhiyun #define pmd_leaf(pmd) pmd_sect(pmd)
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #define pud_clear(pudp) \
122*4882a593Smuzhiyun do { \
123*4882a593Smuzhiyun *pudp = __pud(0); \
124*4882a593Smuzhiyun clean_pmd_entry(pudp); \
125*4882a593Smuzhiyun } while (0)
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun #define set_pud(pudp, pud) \
128*4882a593Smuzhiyun do { \
129*4882a593Smuzhiyun *pudp = pud; \
130*4882a593Smuzhiyun flush_pmd_entry(pudp); \
131*4882a593Smuzhiyun } while (0)
132*4882a593Smuzhiyun
pud_page_vaddr(pud_t pud)133*4882a593Smuzhiyun static inline pmd_t *pud_page_vaddr(pud_t pud)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun #define copy_pmd(pmdpd,pmdps) \
141*4882a593Smuzhiyun do { \
142*4882a593Smuzhiyun *pmdpd = *pmdps; \
143*4882a593Smuzhiyun flush_pmd_entry(pmdpd); \
144*4882a593Smuzhiyun } while (0)
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #define pmd_clear(pmdp) \
147*4882a593Smuzhiyun do { \
148*4882a593Smuzhiyun *pmdp = __pmd(0); \
149*4882a593Smuzhiyun clean_pmd_entry(pmdp); \
150*4882a593Smuzhiyun } while (0)
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
154*4882a593Smuzhiyun * that are written to a page table but not for ptes created with mk_pte.
155*4882a593Smuzhiyun *
156*4882a593Smuzhiyun * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
157*4882a593Smuzhiyun * hugetlb_cow, where it is compared with an entry in a page table.
158*4882a593Smuzhiyun * This comparison test fails erroneously leading ultimately to a memory leak.
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
161*4882a593Smuzhiyun * present before running the comparison.
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_SAME
164*4882a593Smuzhiyun #define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
165*4882a593Smuzhiyun : pte_val(pte_a)) \
166*4882a593Smuzhiyun == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
167*4882a593Smuzhiyun : pte_val(pte_b)))
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun #define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
172*4882a593Smuzhiyun #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun #define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
175*4882a593Smuzhiyun : !!(pmd_val(pmd) & (val)))
176*4882a593Smuzhiyun #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun #define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
179*4882a593Smuzhiyun #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
180*4882a593Smuzhiyun #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
pte_mkspecial(pte_t pte)181*4882a593Smuzhiyun static inline pte_t pte_mkspecial(pte_t pte)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun pte_val(pte) |= L_PTE_SPECIAL;
184*4882a593Smuzhiyun return pte;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
188*4882a593Smuzhiyun #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
189*4882a593Smuzhiyun #define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
190*4882a593Smuzhiyun #define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
193*4882a593Smuzhiyun #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
196*4882a593Smuzhiyun #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
197*4882a593Smuzhiyun #endif
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun #define PMD_BIT_FUNC(fn,op) \
200*4882a593Smuzhiyun static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
203*4882a593Smuzhiyun PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
204*4882a593Smuzhiyun PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
205*4882a593Smuzhiyun PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
206*4882a593Smuzhiyun PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY);
207*4882a593Smuzhiyun PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
212*4882a593Smuzhiyun #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
213*4882a593Smuzhiyun #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
216*4882a593Smuzhiyun #define pmdp_establish generic_pmdp_establish
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
pmd_mkinvalid(pmd_t pmd)219*4882a593Smuzhiyun static inline pmd_t pmd_mkinvalid(pmd_t pmd)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
pmd_modify(pmd_t pmd,pgprot_t newprot)224*4882a593Smuzhiyun static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
227*4882a593Smuzhiyun L_PMD_SECT_VALID | L_PMD_SECT_NONE;
228*4882a593Smuzhiyun pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
229*4882a593Smuzhiyun return pmd;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)232*4882a593Smuzhiyun static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
233*4882a593Smuzhiyun pmd_t *pmdp, pmd_t pmd)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun BUG_ON(addr >= TASK_SIZE);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* create a faulting entry if PROT_NONE protected */
238*4882a593Smuzhiyun if (pmd_val(pmd) & L_PMD_SECT_NONE)
239*4882a593Smuzhiyun pmd_val(pmd) &= ~L_PMD_SECT_VALID;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (pmd_write(pmd) && pmd_dirty(pmd))
242*4882a593Smuzhiyun pmd_val(pmd) &= ~PMD_SECT_AP2;
243*4882a593Smuzhiyun else
244*4882a593Smuzhiyun pmd_val(pmd) |= PMD_SECT_AP2;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
247*4882a593Smuzhiyun flush_pmd_entry(pmdp);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun #endif /* _ASM_PGTABLE_3LEVEL_H */
253