xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/pgtable.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012 ARM Ltd.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef __ASM_PGTABLE_H
6*4882a593Smuzhiyun #define __ASM_PGTABLE_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <asm/bug.h>
9*4882a593Smuzhiyun #include <asm/proc-fns.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <asm/memory.h>
12*4882a593Smuzhiyun #include <asm/mte.h>
13*4882a593Smuzhiyun #include <asm/pgtable-hwdef.h>
14*4882a593Smuzhiyun #include <asm/pgtable-prot.h>
15*4882a593Smuzhiyun #include <asm/tlbflush.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * VMALLOC range.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * VMALLOC_START: beginning of the kernel vmalloc space
21*4882a593Smuzhiyun  * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
22*4882a593Smuzhiyun  *	and fixed mappings
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun #define VMALLOC_START		(MODULES_END)
25*4882a593Smuzhiyun #define VMALLOC_END		(- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define FIRST_USER_ADDRESS	0UL
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #ifndef __ASSEMBLY__
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <asm/cmpxchg.h>
34*4882a593Smuzhiyun #include <asm/fixmap.h>
35*4882a593Smuzhiyun #include <linux/mmdebug.h>
36*4882a593Smuzhiyun #include <linux/mm_types.h>
37*4882a593Smuzhiyun #include <linux/sched.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40*4882a593Smuzhiyun #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* Set stride and tlb_level in flush_*_tlb_range */
43*4882a593Smuzhiyun #define flush_pmd_tlb_range(vma, addr, end)	\
44*4882a593Smuzhiyun 	__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
45*4882a593Smuzhiyun #define flush_pud_tlb_range(vma, addr, end)	\
46*4882a593Smuzhiyun 	__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
47*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun  * Outside of a few very special situations (e.g. hibernation), we always
51*4882a593Smuzhiyun  * use broadcast TLB invalidation instructions, therefore a spurious page
52*4882a593Smuzhiyun  * fault on one CPU which has been handled concurrently by another CPU
53*4882a593Smuzhiyun  * does not need to perform additional invalidation.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * ZERO_PAGE is a global shared page that is always zero: used
59*4882a593Smuzhiyun  * for zero-mapped memory areas etc..
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
62*4882a593Smuzhiyun #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define pte_ERROR(e)	\
65*4882a593Smuzhiyun 	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun  * Macros to convert between a physical address and its placement in a
69*4882a593Smuzhiyun  * page table entry, taking care of 52-bit addresses.
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun #ifdef CONFIG_ARM64_PA_BITS_52
__pte_to_phys(pte_t pte)72*4882a593Smuzhiyun static inline phys_addr_t __pte_to_phys(pte_t pte)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return (pte_val(pte) & PTE_ADDR_LOW) |
75*4882a593Smuzhiyun 		((pte_val(pte) & PTE_ADDR_HIGH) << 36);
76*4882a593Smuzhiyun }
__phys_to_pte_val(phys_addr_t phys)77*4882a593Smuzhiyun static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	return (phys | (phys >> 36)) & PTE_ADDR_MASK;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun #else
82*4882a593Smuzhiyun #define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_MASK)
83*4882a593Smuzhiyun #define __phys_to_pte_val(phys)	(phys)
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
87*4882a593Smuzhiyun #define pfn_pte(pfn,prot)	\
88*4882a593Smuzhiyun 	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define pte_none(pte)		(!pte_val(pte))
91*4882a593Smuzhiyun #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
92*4882a593Smuzhiyun #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun  * The following only work if pte_present(). Undefined behaviour otherwise.
96*4882a593Smuzhiyun  */
97*4882a593Smuzhiyun #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
98*4882a593Smuzhiyun #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
99*4882a593Smuzhiyun #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
100*4882a593Smuzhiyun #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
101*4882a593Smuzhiyun #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
102*4882a593Smuzhiyun #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
103*4882a593Smuzhiyun #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
104*4882a593Smuzhiyun #define pte_tagged(pte)		((pte_val(pte) & PTE_ATTRINDX_MASK) == \
105*4882a593Smuzhiyun 				 PTE_ATTRINDX(MT_NORMAL_TAGGED))
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #define pte_cont_addr_end(addr, end)						\
108*4882a593Smuzhiyun ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
109*4882a593Smuzhiyun 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
110*4882a593Smuzhiyun })
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define pmd_cont_addr_end(addr, end)						\
113*4882a593Smuzhiyun ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
114*4882a593Smuzhiyun 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
115*4882a593Smuzhiyun })
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
118*4882a593Smuzhiyun #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
119*4882a593Smuzhiyun #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
122*4882a593Smuzhiyun #define pte_valid_not_user(pte) \
123*4882a593Smuzhiyun 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
124*4882a593Smuzhiyun #define pte_valid_user(pte) \
125*4882a593Smuzhiyun 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
129*4882a593Smuzhiyun  * so that we don't erroneously return false for pages that have been
130*4882a593Smuzhiyun  * remapped as PROT_NONE but are yet to be flushed from the TLB.
131*4882a593Smuzhiyun  * Note that we can't make any assumptions based on the state of the access
132*4882a593Smuzhiyun  * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
133*4882a593Smuzhiyun  * TLB.
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun #define pte_accessible(mm, pte)	\
136*4882a593Smuzhiyun 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun  * p??_access_permitted() is true for valid user mappings (subject to the
140*4882a593Smuzhiyun  * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
141*4882a593Smuzhiyun  * set.
142*4882a593Smuzhiyun  */
143*4882a593Smuzhiyun #define pte_access_permitted(pte, write) \
144*4882a593Smuzhiyun 	(pte_valid_user(pte) && (!(write) || pte_write(pte)))
145*4882a593Smuzhiyun #define pmd_access_permitted(pmd, write) \
146*4882a593Smuzhiyun 	(pte_access_permitted(pmd_pte(pmd), (write)))
147*4882a593Smuzhiyun #define pud_access_permitted(pud, write) \
148*4882a593Smuzhiyun 	(pte_access_permitted(pud_pte(pud), (write)))
149*4882a593Smuzhiyun 
clear_pte_bit(pte_t pte,pgprot_t prot)150*4882a593Smuzhiyun static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	pte_val(pte) &= ~pgprot_val(prot);
153*4882a593Smuzhiyun 	return pte;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
set_pte_bit(pte_t pte,pgprot_t prot)156*4882a593Smuzhiyun static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	pte_val(pte) |= pgprot_val(prot);
159*4882a593Smuzhiyun 	return pte;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
clear_pmd_bit(pmd_t pmd,pgprot_t prot)162*4882a593Smuzhiyun static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	pmd_val(pmd) &= ~pgprot_val(prot);
165*4882a593Smuzhiyun 	return pmd;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
set_pmd_bit(pmd_t pmd,pgprot_t prot)168*4882a593Smuzhiyun static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	pmd_val(pmd) |= pgprot_val(prot);
171*4882a593Smuzhiyun 	return pmd;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
pte_mkwrite(pte_t pte)174*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
177*4882a593Smuzhiyun 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
178*4882a593Smuzhiyun 	return pte;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
pte_mkclean(pte_t pte)181*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
184*4882a593Smuzhiyun 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	return pte;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
pte_mkdirty(pte_t pte)189*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (pte_write(pte))
194*4882a593Smuzhiyun 		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return pte;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
pte_wrprotect(pte_t pte)199*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	/*
202*4882a593Smuzhiyun 	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
203*4882a593Smuzhiyun 	 * clear), set the PTE_DIRTY bit.
204*4882a593Smuzhiyun 	 */
205*4882a593Smuzhiyun 	if (pte_hw_dirty(pte))
206*4882a593Smuzhiyun 		pte = pte_mkdirty(pte);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
209*4882a593Smuzhiyun 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
210*4882a593Smuzhiyun 	return pte;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
pte_mkold(pte_t pte)213*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	return clear_pte_bit(pte, __pgprot(PTE_AF));
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
pte_mkyoung(pte_t pte)218*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	return set_pte_bit(pte, __pgprot(PTE_AF));
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
pte_mkspecial(pte_t pte)223*4882a593Smuzhiyun static inline pte_t pte_mkspecial(pte_t pte)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
pte_mkcont(pte_t pte)228*4882a593Smuzhiyun static inline pte_t pte_mkcont(pte_t pte)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
231*4882a593Smuzhiyun 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
pte_mknoncont(pte_t pte)234*4882a593Smuzhiyun static inline pte_t pte_mknoncont(pte_t pte)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
pte_mkpresent(pte_t pte)239*4882a593Smuzhiyun static inline pte_t pte_mkpresent(pte_t pte)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	return set_pte_bit(pte, __pgprot(PTE_VALID));
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
pmd_mkcont(pmd_t pmd)244*4882a593Smuzhiyun static inline pmd_t pmd_mkcont(pmd_t pmd)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
pte_mkdevmap(pte_t pte)249*4882a593Smuzhiyun static inline pte_t pte_mkdevmap(pte_t pte)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
set_pte(pte_t * ptep,pte_t pte)254*4882a593Smuzhiyun static inline void set_pte(pte_t *ptep, pte_t pte)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	WRITE_ONCE(*ptep, pte);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/*
259*4882a593Smuzhiyun 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
260*4882a593Smuzhiyun 	 * or update_mmu_cache() have the necessary barriers.
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun 	if (pte_valid_not_user(pte)) {
263*4882a593Smuzhiyun 		dsb(ishst);
264*4882a593Smuzhiyun 		isb();
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun extern void __sync_icache_dcache(pte_t pteval);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun  * PTE bits configuration in the presence of hardware Dirty Bit Management
272*4882a593Smuzhiyun  * (PTE_WRITE == PTE_DBM):
273*4882a593Smuzhiyun  *
274*4882a593Smuzhiyun  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
275*4882a593Smuzhiyun  *   0      0      |   1           0          0
276*4882a593Smuzhiyun  *   0      1      |   1           1          0
277*4882a593Smuzhiyun  *   1      0      |   1           0          1
278*4882a593Smuzhiyun  *   1      1      |   0           1          x
279*4882a593Smuzhiyun  *
280*4882a593Smuzhiyun  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
281*4882a593Smuzhiyun  * the page fault mechanism. Checking the dirty status of a pte becomes:
282*4882a593Smuzhiyun  *
283*4882a593Smuzhiyun  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
284*4882a593Smuzhiyun  */
285*4882a593Smuzhiyun 
__check_racy_pte_update(struct mm_struct * mm,pte_t * ptep,pte_t pte)286*4882a593Smuzhiyun static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
287*4882a593Smuzhiyun 					   pte_t pte)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	pte_t old_pte;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
292*4882a593Smuzhiyun 		return;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	old_pte = READ_ONCE(*ptep);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	if (!pte_valid(old_pte) || !pte_valid(pte))
297*4882a593Smuzhiyun 		return;
298*4882a593Smuzhiyun 	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
299*4882a593Smuzhiyun 		return;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/*
302*4882a593Smuzhiyun 	 * Check for potential race with hardware updates of the pte
303*4882a593Smuzhiyun 	 * (ptep_set_access_flags safely changes valid ptes without going
304*4882a593Smuzhiyun 	 * through an invalid entry).
305*4882a593Smuzhiyun 	 */
306*4882a593Smuzhiyun 	VM_WARN_ONCE(!pte_young(pte),
307*4882a593Smuzhiyun 		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
308*4882a593Smuzhiyun 		     __func__, pte_val(old_pte), pte_val(pte));
309*4882a593Smuzhiyun 	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
310*4882a593Smuzhiyun 		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
311*4882a593Smuzhiyun 		     __func__, pte_val(old_pte), pte_val(pte));
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)314*4882a593Smuzhiyun static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
315*4882a593Smuzhiyun 			      pte_t *ptep, pte_t pte)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
318*4882a593Smuzhiyun 		__sync_icache_dcache(pte);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (system_supports_mte() &&
321*4882a593Smuzhiyun 	    pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
322*4882a593Smuzhiyun 		mte_sync_tags(ptep, pte);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	__check_racy_pte_update(mm, ptep, pte);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	set_pte(ptep, pte);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun  * Huge pte definitions.
331*4882a593Smuzhiyun  */
332*4882a593Smuzhiyun #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun  * Hugetlb definitions.
336*4882a593Smuzhiyun  */
337*4882a593Smuzhiyun #define HUGE_MAX_HSTATE		4
338*4882a593Smuzhiyun #define HPAGE_SHIFT		PMD_SHIFT
339*4882a593Smuzhiyun #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
340*4882a593Smuzhiyun #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
341*4882a593Smuzhiyun #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
342*4882a593Smuzhiyun 
pgd_pte(pgd_t pgd)343*4882a593Smuzhiyun static inline pte_t pgd_pte(pgd_t pgd)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	return __pte(pgd_val(pgd));
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
p4d_pte(p4d_t p4d)348*4882a593Smuzhiyun static inline pte_t p4d_pte(p4d_t p4d)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	return __pte(p4d_val(p4d));
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
pud_pte(pud_t pud)353*4882a593Smuzhiyun static inline pte_t pud_pte(pud_t pud)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	return __pte(pud_val(pud));
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
pte_pud(pte_t pte)358*4882a593Smuzhiyun static inline pud_t pte_pud(pte_t pte)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	return __pud(pte_val(pte));
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
pud_pmd(pud_t pud)363*4882a593Smuzhiyun static inline pmd_t pud_pmd(pud_t pud)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	return __pmd(pud_val(pud));
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
pmd_pte(pmd_t pmd)368*4882a593Smuzhiyun static inline pte_t pmd_pte(pmd_t pmd)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	return __pte(pmd_val(pmd));
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
pte_pmd(pte_t pte)373*4882a593Smuzhiyun static inline pmd_t pte_pmd(pte_t pte)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	return __pmd(pte_val(pte));
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
mk_pud_sect_prot(pgprot_t prot)378*4882a593Smuzhiyun static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
mk_pmd_sect_prot(pgprot_t prot)383*4882a593Smuzhiyun static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun #ifdef CONFIG_NUMA_BALANCING
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun  * See the comment in include/linux/pgtable.h
391*4882a593Smuzhiyun  */
pte_protnone(pte_t pte)392*4882a593Smuzhiyun static inline int pte_protnone(pte_t pte)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
pmd_protnone(pmd_t pmd)397*4882a593Smuzhiyun static inline int pmd_protnone(pmd_t pmd)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	return pte_protnone(pmd_pte(pmd));
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun #endif
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun #define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
404*4882a593Smuzhiyun 
pmd_present(pmd_t pmd)405*4882a593Smuzhiyun static inline int pmd_present(pmd_t pmd)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun /*
411*4882a593Smuzhiyun  * THP definitions.
412*4882a593Smuzhiyun  */
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)415*4882a593Smuzhiyun static inline int pmd_trans_huge(pmd_t pmd)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
422*4882a593Smuzhiyun #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
423*4882a593Smuzhiyun #define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
424*4882a593Smuzhiyun #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
425*4882a593Smuzhiyun #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
426*4882a593Smuzhiyun #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
427*4882a593Smuzhiyun #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
428*4882a593Smuzhiyun #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
429*4882a593Smuzhiyun #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
430*4882a593Smuzhiyun 
pmd_mkinvalid(pmd_t pmd)431*4882a593Smuzhiyun static inline pmd_t pmd_mkinvalid(pmd_t pmd)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
434*4882a593Smuzhiyun 	pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	return pmd;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
446*4882a593Smuzhiyun #define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
447*4882a593Smuzhiyun #endif
pmd_mkdevmap(pmd_t pmd)448*4882a593Smuzhiyun static inline pmd_t pmd_mkdevmap(pmd_t pmd)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun #define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
454*4882a593Smuzhiyun #define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
455*4882a593Smuzhiyun #define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
456*4882a593Smuzhiyun #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
457*4882a593Smuzhiyun #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun #define pud_young(pud)		pte_young(pud_pte(pud))
460*4882a593Smuzhiyun #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
461*4882a593Smuzhiyun #define pud_write(pud)		pte_write(pud_pte(pud))
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun #define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun #define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
466*4882a593Smuzhiyun #define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
467*4882a593Smuzhiyun #define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
468*4882a593Smuzhiyun #define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
471*4882a593Smuzhiyun #define set_pud_at(mm, addr, pudp, pud)	set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun #define __p4d_to_phys(p4d)	__pte_to_phys(p4d_pte(p4d))
474*4882a593Smuzhiyun #define __phys_to_p4d_val(phys)	__phys_to_pte_val(phys)
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun #define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
477*4882a593Smuzhiyun #define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun #define __pgprot_modify(prot,mask,bits) \
480*4882a593Smuzhiyun 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun #define pgprot_nx(prot) \
483*4882a593Smuzhiyun 	__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun /*
486*4882a593Smuzhiyun  * Mark the prot value as uncacheable and unbufferable.
487*4882a593Smuzhiyun  */
488*4882a593Smuzhiyun #define pgprot_noncached(prot) \
489*4882a593Smuzhiyun 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
490*4882a593Smuzhiyun #define pgprot_writecombine(prot) \
491*4882a593Smuzhiyun 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
492*4882a593Smuzhiyun #define pgprot_device(prot) \
493*4882a593Smuzhiyun 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
494*4882a593Smuzhiyun #define pgprot_tagged(prot) \
495*4882a593Smuzhiyun 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
496*4882a593Smuzhiyun #define pgprot_mhp	pgprot_tagged
497*4882a593Smuzhiyun /*
498*4882a593Smuzhiyun  * DMA allocations for non-coherent devices use what the Arm architecture calls
499*4882a593Smuzhiyun  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
500*4882a593Smuzhiyun  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
501*4882a593Smuzhiyun  * is intended for MMIO and thus forbids speculation, preserves access size,
502*4882a593Smuzhiyun  * requires strict alignment and can also force write responses to come from the
503*4882a593Smuzhiyun  * endpoint.
504*4882a593Smuzhiyun  */
505*4882a593Smuzhiyun #define pgprot_dmacoherent(prot) \
506*4882a593Smuzhiyun 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
507*4882a593Smuzhiyun 			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun /*
510*4882a593Smuzhiyun  * Mark the prot value as outer cacheable and inner non-cacheable. Non-coherent
511*4882a593Smuzhiyun  * devices on a system with support for a system or last level cache use these
512*4882a593Smuzhiyun  * attributes to cache allocations in the system cache.
513*4882a593Smuzhiyun  */
514*4882a593Smuzhiyun #define pgprot_syscached(prot) \
515*4882a593Smuzhiyun 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
516*4882a593Smuzhiyun 			PTE_ATTRINDX(MT_NORMAL_iNC_oWB) | PTE_PXN | PTE_UXN)
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun #define __HAVE_PHYS_MEM_ACCESS_PROT
519*4882a593Smuzhiyun struct file;
520*4882a593Smuzhiyun extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
521*4882a593Smuzhiyun 				     unsigned long size, pgprot_t vma_prot);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun #define pmd_none(pmd)		(!pmd_val(pmd))
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
526*4882a593Smuzhiyun 				 PMD_TYPE_TABLE)
527*4882a593Smuzhiyun #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
528*4882a593Smuzhiyun 				 PMD_TYPE_SECT)
529*4882a593Smuzhiyun #define pmd_leaf(pmd)		(pmd_present(pmd) && !pmd_table(pmd))
530*4882a593Smuzhiyun #define pmd_bad(pmd)		(!pmd_table(pmd))
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
pud_sect(pud_t pud)533*4882a593Smuzhiyun static inline bool pud_sect(pud_t pud) { return false; }
pud_table(pud_t pud)534*4882a593Smuzhiyun static inline bool pud_table(pud_t pud) { return true; }
535*4882a593Smuzhiyun #else
536*4882a593Smuzhiyun #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
537*4882a593Smuzhiyun 				 PUD_TYPE_SECT)
538*4882a593Smuzhiyun #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
539*4882a593Smuzhiyun 				 PUD_TYPE_TABLE)
540*4882a593Smuzhiyun #endif
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun extern pgd_t init_pg_dir[PTRS_PER_PGD];
543*4882a593Smuzhiyun extern pgd_t init_pg_end[];
544*4882a593Smuzhiyun extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
545*4882a593Smuzhiyun extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
546*4882a593Smuzhiyun extern pgd_t idmap_pg_end[];
547*4882a593Smuzhiyun extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
548*4882a593Smuzhiyun extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
553*4882a593Smuzhiyun extern int populate_range_driver_managed(u64 start, u64 size,
554*4882a593Smuzhiyun 		const char *resource_name);
555*4882a593Smuzhiyun extern int depopulate_range_driver_managed(u64 start, u64 size,
556*4882a593Smuzhiyun 		const char *resource_name);
557*4882a593Smuzhiyun #endif
558*4882a593Smuzhiyun 
in_swapper_pgdir(void * addr)559*4882a593Smuzhiyun static inline bool in_swapper_pgdir(void *addr)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	return ((unsigned long)addr & PAGE_MASK) ==
562*4882a593Smuzhiyun 	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
set_pmd(pmd_t * pmdp,pmd_t pmd)565*4882a593Smuzhiyun static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun #ifdef __PAGETABLE_PMD_FOLDED
568*4882a593Smuzhiyun 	if (in_swapper_pgdir(pmdp)) {
569*4882a593Smuzhiyun 		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
570*4882a593Smuzhiyun 		return;
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun #endif /* __PAGETABLE_PMD_FOLDED */
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	WRITE_ONCE(*pmdp, pmd);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	if (pmd_valid(pmd)) {
577*4882a593Smuzhiyun 		dsb(ishst);
578*4882a593Smuzhiyun 		isb();
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
pmd_clear(pmd_t * pmdp)582*4882a593Smuzhiyun static inline void pmd_clear(pmd_t *pmdp)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	set_pmd(pmdp, __pmd(0));
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
pmd_page_paddr(pmd_t pmd)587*4882a593Smuzhiyun static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	return __pmd_to_phys(pmd);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
pmd_page_vaddr(pmd_t pmd)592*4882a593Smuzhiyun static inline unsigned long pmd_page_vaddr(pmd_t pmd)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	return (unsigned long)__va(pmd_page_paddr(pmd));
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun /* Find an entry in the third-level page table. */
598*4882a593Smuzhiyun #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
601*4882a593Smuzhiyun #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
602*4882a593Smuzhiyun #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun #define pmd_page(pmd)			phys_to_page(__pmd_to_phys(pmd))
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun /* use ONLY for statically allocated translation tables */
607*4882a593Smuzhiyun #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun  * Conversion functions: convert a page and protection to a page entry,
611*4882a593Smuzhiyun  * and a page entry and page directory to the page they refer to.
612*4882a593Smuzhiyun  */
613*4882a593Smuzhiyun #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 2
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun #define pmd_ERROR(e)	\
618*4882a593Smuzhiyun 	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun #define pud_none(pud)		(!pud_val(pud))
621*4882a593Smuzhiyun #define pud_bad(pud)		(!pud_table(pud))
622*4882a593Smuzhiyun #define pud_present(pud)	pte_present(pud_pte(pud))
623*4882a593Smuzhiyun #define pud_leaf(pud)		(pud_present(pud) && !pud_table(pud))
624*4882a593Smuzhiyun #define pud_valid(pud)		pte_valid(pud_pte(pud))
625*4882a593Smuzhiyun 
set_pud(pud_t * pudp,pud_t pud)626*4882a593Smuzhiyun static inline void set_pud(pud_t *pudp, pud_t pud)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun #ifdef __PAGETABLE_PUD_FOLDED
629*4882a593Smuzhiyun 	if (in_swapper_pgdir(pudp)) {
630*4882a593Smuzhiyun 		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
631*4882a593Smuzhiyun 		return;
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun #endif /* __PAGETABLE_PUD_FOLDED */
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	WRITE_ONCE(*pudp, pud);
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	if (pud_valid(pud)) {
638*4882a593Smuzhiyun 		dsb(ishst);
639*4882a593Smuzhiyun 		isb();
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun 
pud_clear(pud_t * pudp)643*4882a593Smuzhiyun static inline void pud_clear(pud_t *pudp)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun 	set_pud(pudp, __pud(0));
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun 
pud_page_paddr(pud_t pud)648*4882a593Smuzhiyun static inline phys_addr_t pud_page_paddr(pud_t pud)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun 	return __pud_to_phys(pud);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
pud_page_vaddr(pud_t pud)653*4882a593Smuzhiyun static inline unsigned long pud_page_vaddr(pud_t pud)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun 	return (unsigned long)__va(pud_page_paddr(pud));
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun /* Find an entry in the second-level page table. */
659*4882a593Smuzhiyun #define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
662*4882a593Smuzhiyun #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
663*4882a593Smuzhiyun #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun #define pud_page(pud)			phys_to_page(__pud_to_phys(pud))
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun /* use ONLY for statically allocated translation tables */
668*4882a593Smuzhiyun #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun #else
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
675*4882a593Smuzhiyun #define pmd_set_fixmap(addr)		NULL
676*4882a593Smuzhiyun #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
677*4882a593Smuzhiyun #define pmd_clear_fixmap()
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 3
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun #define pud_ERROR(e)	\
686*4882a593Smuzhiyun 	pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun #define p4d_none(p4d)		(!p4d_val(p4d))
689*4882a593Smuzhiyun #define p4d_bad(p4d)		(!(p4d_val(p4d) & 2))
690*4882a593Smuzhiyun #define p4d_present(p4d)	(p4d_val(p4d))
691*4882a593Smuzhiyun 
set_p4d(p4d_t * p4dp,p4d_t p4d)692*4882a593Smuzhiyun static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	if (in_swapper_pgdir(p4dp)) {
695*4882a593Smuzhiyun 		set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
696*4882a593Smuzhiyun 		return;
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	WRITE_ONCE(*p4dp, p4d);
700*4882a593Smuzhiyun 	dsb(ishst);
701*4882a593Smuzhiyun 	isb();
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
p4d_clear(p4d_t * p4dp)704*4882a593Smuzhiyun static inline void p4d_clear(p4d_t *p4dp)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	set_p4d(p4dp, __p4d(0));
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun 
p4d_page_paddr(p4d_t p4d)709*4882a593Smuzhiyun static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	return __p4d_to_phys(p4d);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun 
p4d_page_vaddr(p4d_t p4d)714*4882a593Smuzhiyun static inline unsigned long p4d_page_vaddr(p4d_t p4d)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun 	return (unsigned long)__va(p4d_page_paddr(p4d));
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun /* Find an entry in the frst-level page table. */
720*4882a593Smuzhiyun #define pud_offset_phys(dir, addr)	(p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
723*4882a593Smuzhiyun #define pud_set_fixmap_offset(p4d, addr)	pud_set_fixmap(pud_offset_phys(p4d, addr))
724*4882a593Smuzhiyun #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun #define p4d_page(p4d)		pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun /* use ONLY for statically allocated translation tables */
729*4882a593Smuzhiyun #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun #else
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun #define p4d_page_paddr(p4d)	({ BUILD_BUG(); 0;})
734*4882a593Smuzhiyun #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
737*4882a593Smuzhiyun #define pud_set_fixmap(addr)		NULL
738*4882a593Smuzhiyun #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
739*4882a593Smuzhiyun #define pud_clear_fixmap()
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun #define pgd_ERROR(e)	\
746*4882a593Smuzhiyun 	pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
749*4882a593Smuzhiyun #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
750*4882a593Smuzhiyun 
pte_modify(pte_t pte,pgprot_t newprot)751*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun 	/*
754*4882a593Smuzhiyun 	 * Normal and Normal-Tagged are two different memory types and indices
755*4882a593Smuzhiyun 	 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
756*4882a593Smuzhiyun 	 */
757*4882a593Smuzhiyun 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
758*4882a593Smuzhiyun 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
759*4882a593Smuzhiyun 			      PTE_ATTRINDX_MASK;
760*4882a593Smuzhiyun 	/* preserve the hardware dirty information */
761*4882a593Smuzhiyun 	if (pte_hw_dirty(pte))
762*4882a593Smuzhiyun 		pte = pte_mkdirty(pte);
763*4882a593Smuzhiyun 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
764*4882a593Smuzhiyun 	return pte;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
pmd_modify(pmd_t pmd,pgprot_t newprot)767*4882a593Smuzhiyun static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
773*4882a593Smuzhiyun extern int ptep_set_access_flags(struct vm_area_struct *vma,
774*4882a593Smuzhiyun 				 unsigned long address, pte_t *ptep,
775*4882a593Smuzhiyun 				 pte_t entry, int dirty);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
778*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)779*4882a593Smuzhiyun static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
780*4882a593Smuzhiyun 					unsigned long address, pmd_t *pmdp,
781*4882a593Smuzhiyun 					pmd_t entry, int dirty)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
pud_devmap(pud_t pud)786*4882a593Smuzhiyun static inline int pud_devmap(pud_t pud)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun 	return 0;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
pgd_devmap(pgd_t pgd)791*4882a593Smuzhiyun static inline int pgd_devmap(pgd_t pgd)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	return 0;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun #endif
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun /*
798*4882a593Smuzhiyun  * Atomic pte/pmd modifications.
799*4882a593Smuzhiyun  */
800*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
__ptep_test_and_clear_young(pte_t * ptep)801*4882a593Smuzhiyun static inline int __ptep_test_and_clear_young(pte_t *ptep)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	pte_t old_pte, pte;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	pte = READ_ONCE(*ptep);
806*4882a593Smuzhiyun 	do {
807*4882a593Smuzhiyun 		old_pte = pte;
808*4882a593Smuzhiyun 		pte = pte_mkold(pte);
809*4882a593Smuzhiyun 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
810*4882a593Smuzhiyun 					       pte_val(old_pte), pte_val(pte));
811*4882a593Smuzhiyun 	} while (pte_val(pte) != pte_val(old_pte));
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	return pte_young(pte);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)816*4882a593Smuzhiyun static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
817*4882a593Smuzhiyun 					    unsigned long address,
818*4882a593Smuzhiyun 					    pte_t *ptep)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun 	return __ptep_test_and_clear_young(ptep);
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)824*4882a593Smuzhiyun static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
825*4882a593Smuzhiyun 					 unsigned long address, pte_t *ptep)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	int young = ptep_test_and_clear_young(vma, address, ptep);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	if (young) {
830*4882a593Smuzhiyun 		/*
831*4882a593Smuzhiyun 		 * We can elide the trailing DSB here since the worst that can
832*4882a593Smuzhiyun 		 * happen is that a CPU continues to use the young entry in its
833*4882a593Smuzhiyun 		 * TLB and we mistakenly reclaim the associated page. The
834*4882a593Smuzhiyun 		 * window for such an event is bounded by the next
835*4882a593Smuzhiyun 		 * context-switch, which provides a DSB to complete the TLB
836*4882a593Smuzhiyun 		 * invalidation.
837*4882a593Smuzhiyun 		 */
838*4882a593Smuzhiyun 		flush_tlb_page_nosync(vma, address);
839*4882a593Smuzhiyun 	}
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	return young;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
845*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)846*4882a593Smuzhiyun static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
847*4882a593Smuzhiyun 					    unsigned long address,
848*4882a593Smuzhiyun 					    pmd_t *pmdp)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)855*4882a593Smuzhiyun static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
856*4882a593Smuzhiyun 				       unsigned long address, pte_t *ptep)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun 	return __pte(xchg_relaxed(&pte_val(*ptep), 0));
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
862*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)863*4882a593Smuzhiyun static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
864*4882a593Smuzhiyun 					    unsigned long address, pmd_t *pmdp)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun /*
871*4882a593Smuzhiyun  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
872*4882a593Smuzhiyun  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
873*4882a593Smuzhiyun  */
874*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)875*4882a593Smuzhiyun static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun 	pte_t old_pte, pte;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	pte = READ_ONCE(*ptep);
880*4882a593Smuzhiyun 	do {
881*4882a593Smuzhiyun 		old_pte = pte;
882*4882a593Smuzhiyun 		pte = pte_wrprotect(pte);
883*4882a593Smuzhiyun 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
884*4882a593Smuzhiyun 					       pte_val(old_pte), pte_val(pte));
885*4882a593Smuzhiyun 	} while (pte_val(pte) != pte_val(old_pte));
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
889*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)890*4882a593Smuzhiyun static inline void pmdp_set_wrprotect(struct mm_struct *mm,
891*4882a593Smuzhiyun 				      unsigned long address, pmd_t *pmdp)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)897*4882a593Smuzhiyun static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
898*4882a593Smuzhiyun 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun 	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun #endif
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun /*
905*4882a593Smuzhiyun  * Encode and decode a swap entry:
906*4882a593Smuzhiyun  *	bits 0-1:	present (must be zero)
907*4882a593Smuzhiyun  *	bits 2-7:	swap type
908*4882a593Smuzhiyun  *	bits 8-57:	swap offset
909*4882a593Smuzhiyun  *	bit  58:	PTE_PROT_NONE (must be zero)
910*4882a593Smuzhiyun  */
911*4882a593Smuzhiyun #define __SWP_TYPE_SHIFT	2
912*4882a593Smuzhiyun #define __SWP_TYPE_BITS		6
913*4882a593Smuzhiyun #define __SWP_OFFSET_BITS	50
914*4882a593Smuzhiyun #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
915*4882a593Smuzhiyun #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
916*4882a593Smuzhiyun #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
919*4882a593Smuzhiyun #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
920*4882a593Smuzhiyun #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
923*4882a593Smuzhiyun #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
926*4882a593Smuzhiyun #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val(pmd) })
927*4882a593Smuzhiyun #define __swp_entry_to_pmd(swp)		__pmd((swp).val)
928*4882a593Smuzhiyun #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun /*
931*4882a593Smuzhiyun  * Ensure that there are not more swap files than can be encoded in the kernel
932*4882a593Smuzhiyun  * PTEs.
933*4882a593Smuzhiyun  */
934*4882a593Smuzhiyun #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun extern int kern_addr_valid(unsigned long addr);
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun #ifdef CONFIG_ARM64_MTE
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun #define __HAVE_ARCH_PREPARE_TO_SWAP
arch_prepare_to_swap(struct page * page)941*4882a593Smuzhiyun static inline int arch_prepare_to_swap(struct page *page)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	if (system_supports_mte())
944*4882a593Smuzhiyun 		return mte_save_tags(page);
945*4882a593Smuzhiyun 	return 0;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun #define __HAVE_ARCH_SWAP_INVALIDATE
arch_swap_invalidate_page(int type,pgoff_t offset)949*4882a593Smuzhiyun static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun 	if (system_supports_mte())
952*4882a593Smuzhiyun 		mte_invalidate_tags(type, offset);
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun 
arch_swap_invalidate_area(int type)955*4882a593Smuzhiyun static inline void arch_swap_invalidate_area(int type)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun 	if (system_supports_mte())
958*4882a593Smuzhiyun 		mte_invalidate_tags_area(type);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun #define __HAVE_ARCH_SWAP_RESTORE
arch_swap_restore(swp_entry_t entry,struct page * page)962*4882a593Smuzhiyun static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun 	if (system_supports_mte() && mte_restore_tags(entry, page))
965*4882a593Smuzhiyun 		set_bit(PG_mte_tagged, &page->flags);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun #endif /* CONFIG_ARM64_MTE */
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun  * On AArch64, the cache coherency is handled via the set_pte_at() function.
972*4882a593Smuzhiyun  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)973*4882a593Smuzhiyun static inline void update_mmu_cache(struct vm_area_struct *vma,
974*4882a593Smuzhiyun 				    unsigned long addr, pte_t *ptep)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun 	/*
977*4882a593Smuzhiyun 	 * We don't do anything here, so there's a very small chance of
978*4882a593Smuzhiyun 	 * us retaking a user fault which we just fixed up. The alternative
979*4882a593Smuzhiyun 	 * is doing a dsb(ishst), but that penalises the fastpath.
980*4882a593Smuzhiyun 	 */
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun #ifdef CONFIG_ARM64_PA_BITS_52
986*4882a593Smuzhiyun #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
987*4882a593Smuzhiyun #else
988*4882a593Smuzhiyun #define phys_to_ttbr(addr)	(addr)
989*4882a593Smuzhiyun #endif
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun /*
992*4882a593Smuzhiyun  * On arm64 without hardware Access Flag, copying from user will fail because
993*4882a593Smuzhiyun  * the pte is old and cannot be marked young. So we always end up with zeroed
994*4882a593Smuzhiyun  * page after fork() + CoW for pfn mappings. We don't always have a
995*4882a593Smuzhiyun  * hardware-managed access flag on arm64.
996*4882a593Smuzhiyun  */
arch_faults_on_old_pte(void)997*4882a593Smuzhiyun static inline bool arch_faults_on_old_pte(void)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun 	WARN_ON(preemptible());
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	return !cpu_has_hw_af();
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun #define arch_faults_on_old_pte		arch_faults_on_old_pte
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun /*
1006*4882a593Smuzhiyun  * Experimentally, it's cheap to set the access flag in hardware and we
1007*4882a593Smuzhiyun  * benefit from prefaulting mappings as 'old' to start with.
1008*4882a593Smuzhiyun  */
arch_wants_old_prefaulted_pte(void)1009*4882a593Smuzhiyun static inline bool arch_wants_old_prefaulted_pte(void)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun 	return !arch_faults_on_old_pte();
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun #define arch_wants_old_prefaulted_pte	arch_wants_old_prefaulted_pte
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun #endif /* __ASM_PGTABLE_H */
1018