xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/pgtable.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_PGTABLE_H
3*4882a593Smuzhiyun #define _ASM_X86_PGTABLE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/mem_encrypt.h>
6*4882a593Smuzhiyun #include <asm/page.h>
7*4882a593Smuzhiyun #include <asm/pgtable_types.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * Macro to mark a page protection value as UC-
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #define pgprot_noncached(prot)						\
13*4882a593Smuzhiyun 	((boot_cpu_data.x86 > 3)					\
14*4882a593Smuzhiyun 	 ? (__pgprot(pgprot_val(prot) |					\
15*4882a593Smuzhiyun 		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16*4882a593Smuzhiyun 	 : (prot))
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * Macros to add or remove encryption attribute
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun #define pgprot_encrypted(prot)	__pgprot(__sme_set(pgprot_val(prot)))
22*4882a593Smuzhiyun #define pgprot_decrypted(prot)	__pgprot(__sme_clr(pgprot_val(prot)))
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifndef __ASSEMBLY__
25*4882a593Smuzhiyun #include <asm/x86_init.h>
26*4882a593Smuzhiyun #include <asm/fpu/xstate.h>
27*4882a593Smuzhiyun #include <asm/fpu/api.h>
28*4882a593Smuzhiyun #include <asm-generic/pgtable_uffd.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun extern pgd_t early_top_pgt[PTRS_PER_PGD];
31*4882a593Smuzhiyun bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
34*4882a593Smuzhiyun void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
35*4882a593Smuzhiyun 				   bool user);
36*4882a593Smuzhiyun void ptdump_walk_pgd_level_checkwx(void);
37*4882a593Smuzhiyun void ptdump_walk_user_pgd_level_checkwx(void);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_WX
40*4882a593Smuzhiyun #define debug_checkwx()		ptdump_walk_pgd_level_checkwx()
41*4882a593Smuzhiyun #define debug_checkwx_user()	ptdump_walk_user_pgd_level_checkwx()
42*4882a593Smuzhiyun #else
43*4882a593Smuzhiyun #define debug_checkwx()		do { } while (0)
44*4882a593Smuzhiyun #define debug_checkwx_user()	do { } while (0)
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun  * ZERO_PAGE is a global shared page that is always zero: used
49*4882a593Smuzhiyun  * for zero-mapped memory areas etc..
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
52*4882a593Smuzhiyun 	__visible;
53*4882a593Smuzhiyun #define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun extern spinlock_t pgd_lock;
56*4882a593Smuzhiyun extern struct list_head pgd_list;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun extern struct mm_struct *pgd_page_get_mm(struct page *page);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun extern pmdval_t early_pmd_flags;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT_XXL
63*4882a593Smuzhiyun #include <asm/paravirt.h>
64*4882a593Smuzhiyun #else  /* !CONFIG_PARAVIRT_XXL */
65*4882a593Smuzhiyun #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define set_pte_atomic(ptep, pte)					\
68*4882a593Smuzhiyun 	native_set_pte_atomic(ptep, pte)
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #ifndef __PAGETABLE_P4D_FOLDED
73*4882a593Smuzhiyun #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
74*4882a593Smuzhiyun #define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
75*4882a593Smuzhiyun #endif
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #ifndef set_p4d
78*4882a593Smuzhiyun # define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #ifndef __PAGETABLE_PUD_FOLDED
82*4882a593Smuzhiyun #define p4d_clear(p4d)			native_p4d_clear(p4d)
83*4882a593Smuzhiyun #endif
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #ifndef set_pud
86*4882a593Smuzhiyun # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #ifndef __PAGETABLE_PUD_FOLDED
90*4882a593Smuzhiyun #define pud_clear(pud)			native_pud_clear(pud)
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
94*4882a593Smuzhiyun #define pmd_clear(pmd)			native_pmd_clear(pmd)
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define pgd_val(x)	native_pgd_val(x)
97*4882a593Smuzhiyun #define __pgd(x)	native_make_pgd(x)
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #ifndef __PAGETABLE_P4D_FOLDED
100*4882a593Smuzhiyun #define p4d_val(x)	native_p4d_val(x)
101*4882a593Smuzhiyun #define __p4d(x)	native_make_p4d(x)
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #ifndef __PAGETABLE_PUD_FOLDED
105*4882a593Smuzhiyun #define pud_val(x)	native_pud_val(x)
106*4882a593Smuzhiyun #define __pud(x)	native_make_pud(x)
107*4882a593Smuzhiyun #endif
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #ifndef __PAGETABLE_PMD_FOLDED
110*4882a593Smuzhiyun #define pmd_val(x)	native_pmd_val(x)
111*4882a593Smuzhiyun #define __pmd(x)	native_make_pmd(x)
112*4882a593Smuzhiyun #endif
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #define pte_val(x)	native_pte_val(x)
115*4882a593Smuzhiyun #define __pte(x)	native_make_pte(x)
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #define arch_end_context_switch(prev)	do {} while(0)
118*4882a593Smuzhiyun #endif	/* CONFIG_PARAVIRT_XXL */
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun  * The following only work if pte_present() is true.
122*4882a593Smuzhiyun  * Undefined behaviour if not..
123*4882a593Smuzhiyun  */
pte_dirty(pte_t pte)124*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_DIRTY;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 
read_pkru(void)130*4882a593Smuzhiyun static inline u32 read_pkru(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	if (boot_cpu_has(X86_FEATURE_OSPKE))
133*4882a593Smuzhiyun 		return rdpkru();
134*4882a593Smuzhiyun 	return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
write_pkru(u32 pkru)137*4882a593Smuzhiyun static inline void write_pkru(u32 pkru)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct pkru_state *pk;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
142*4882a593Smuzhiyun 		return;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	pk = get_xsave_addr(&current->thread.fpu.state.xsave, XFEATURE_PKRU);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/*
147*4882a593Smuzhiyun 	 * The PKRU value in xstate needs to be in sync with the value that is
148*4882a593Smuzhiyun 	 * written to the CPU. The FPU restore on return to userland would
149*4882a593Smuzhiyun 	 * otherwise load the previous value again.
150*4882a593Smuzhiyun 	 */
151*4882a593Smuzhiyun 	fpregs_lock();
152*4882a593Smuzhiyun 	if (pk)
153*4882a593Smuzhiyun 		pk->pkru = pkru;
154*4882a593Smuzhiyun 	__write_pkru(pkru);
155*4882a593Smuzhiyun 	fpregs_unlock();
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
pte_young(pte_t pte)158*4882a593Smuzhiyun static inline int pte_young(pte_t pte)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_ACCESSED;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
pmd_dirty(pmd_t pmd)163*4882a593Smuzhiyun static inline int pmd_dirty(pmd_t pmd)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	return pmd_flags(pmd) & _PAGE_DIRTY;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
pmd_young(pmd_t pmd)168*4882a593Smuzhiyun static inline int pmd_young(pmd_t pmd)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	return pmd_flags(pmd) & _PAGE_ACCESSED;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
pud_dirty(pud_t pud)173*4882a593Smuzhiyun static inline int pud_dirty(pud_t pud)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	return pud_flags(pud) & _PAGE_DIRTY;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
pud_young(pud_t pud)178*4882a593Smuzhiyun static inline int pud_young(pud_t pud)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	return pud_flags(pud) & _PAGE_ACCESSED;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
pte_write(pte_t pte)183*4882a593Smuzhiyun static inline int pte_write(pte_t pte)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_RW;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
pte_huge(pte_t pte)188*4882a593Smuzhiyun static inline int pte_huge(pte_t pte)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_PSE;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
pte_global(pte_t pte)193*4882a593Smuzhiyun static inline int pte_global(pte_t pte)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_GLOBAL;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
pte_exec(pte_t pte)198*4882a593Smuzhiyun static inline int pte_exec(pte_t pte)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	return !(pte_flags(pte) & _PAGE_NX);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
pte_special(pte_t pte)203*4882a593Smuzhiyun static inline int pte_special(pte_t pte)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_SPECIAL;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /* Entries that were set to PROT_NONE are inverted */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun static inline u64 protnone_mask(u64 val);
211*4882a593Smuzhiyun 
pte_pfn(pte_t pte)212*4882a593Smuzhiyun static inline unsigned long pte_pfn(pte_t pte)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	phys_addr_t pfn = pte_val(pte);
215*4882a593Smuzhiyun 	pfn ^= protnone_mask(pfn);
216*4882a593Smuzhiyun 	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
pmd_pfn(pmd_t pmd)219*4882a593Smuzhiyun static inline unsigned long pmd_pfn(pmd_t pmd)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	phys_addr_t pfn = pmd_val(pmd);
222*4882a593Smuzhiyun 	pfn ^= protnone_mask(pfn);
223*4882a593Smuzhiyun 	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
pud_pfn(pud_t pud)226*4882a593Smuzhiyun static inline unsigned long pud_pfn(pud_t pud)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	phys_addr_t pfn = pud_val(pud);
229*4882a593Smuzhiyun 	pfn ^= protnone_mask(pfn);
230*4882a593Smuzhiyun 	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
p4d_pfn(p4d_t p4d)233*4882a593Smuzhiyun static inline unsigned long p4d_pfn(p4d_t p4d)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
pgd_pfn(pgd_t pgd)238*4882a593Smuzhiyun static inline unsigned long pgd_pfn(pgd_t pgd)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun #define p4d_leaf	p4d_large
p4d_large(p4d_t p4d)244*4882a593Smuzhiyun static inline int p4d_large(p4d_t p4d)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	/* No 512 GiB pages yet */
247*4882a593Smuzhiyun 	return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun #define pmd_leaf	pmd_large
pmd_large(pmd_t pte)253*4882a593Smuzhiyun static inline int pmd_large(pmd_t pte)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	return pmd_flags(pte) & _PAGE_PSE;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
259*4882a593Smuzhiyun /* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
pmd_trans_huge(pmd_t pmd)260*4882a593Smuzhiyun static inline int pmd_trans_huge(pmd_t pmd)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_trans_huge(pud_t pud)266*4882a593Smuzhiyun static inline int pud_trans_huge(pud_t pud)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun #endif
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)273*4882a593Smuzhiyun static inline int has_transparent_hugepage(void)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	return boot_cpu_has(X86_FEATURE_PSE);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pmd_devmap(pmd_t pmd)279*4882a593Smuzhiyun static inline int pmd_devmap(pmd_t pmd)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_devmap(pud_t pud)285*4882a593Smuzhiyun static inline int pud_devmap(pud_t pud)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	return !!(pud_val(pud) & _PAGE_DEVMAP);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun #else
pud_devmap(pud_t pud)290*4882a593Smuzhiyun static inline int pud_devmap(pud_t pud)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	return 0;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun #endif
295*4882a593Smuzhiyun 
pgd_devmap(pgd_t pgd)296*4882a593Smuzhiyun static inline int pgd_devmap(pgd_t pgd)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun #endif
301*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
302*4882a593Smuzhiyun 
pte_set_flags(pte_t pte,pteval_t set)303*4882a593Smuzhiyun static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	pteval_t v = native_pte_val(pte);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	return native_make_pte(v | set);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
pte_clear_flags(pte_t pte,pteval_t clear)310*4882a593Smuzhiyun static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	pteval_t v = native_pte_val(pte);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	return native_make_pte(v & ~clear);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_uffd_wp(pte_t pte)318*4882a593Smuzhiyun static inline int pte_uffd_wp(pte_t pte)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_UFFD_WP;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
pte_mkuffd_wp(pte_t pte)323*4882a593Smuzhiyun static inline pte_t pte_mkuffd_wp(pte_t pte)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_UFFD_WP);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
pte_clear_uffd_wp(pte_t pte)328*4882a593Smuzhiyun static inline pte_t pte_clear_uffd_wp(pte_t pte)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_UFFD_WP);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
333*4882a593Smuzhiyun 
pte_mkclean(pte_t pte)334*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_DIRTY);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
pte_mkold(pte_t pte)339*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_ACCESSED);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
pte_wrprotect(pte_t pte)344*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_RW);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
pte_mkexec(pte_t pte)349*4882a593Smuzhiyun static inline pte_t pte_mkexec(pte_t pte)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_NX);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
pte_mkdirty(pte_t pte)354*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
pte_mkyoung(pte_t pte)359*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_ACCESSED);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
pte_mkwrite(pte_t pte)364*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_RW);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
pte_mkhuge(pte_t pte)369*4882a593Smuzhiyun static inline pte_t pte_mkhuge(pte_t pte)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_PSE);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
pte_clrhuge(pte_t pte)374*4882a593Smuzhiyun static inline pte_t pte_clrhuge(pte_t pte)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_PSE);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
pte_mkglobal(pte_t pte)379*4882a593Smuzhiyun static inline pte_t pte_mkglobal(pte_t pte)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_GLOBAL);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
pte_clrglobal(pte_t pte)384*4882a593Smuzhiyun static inline pte_t pte_clrglobal(pte_t pte)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_GLOBAL);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun 
pte_mkspecial(pte_t pte)389*4882a593Smuzhiyun static inline pte_t pte_mkspecial(pte_t pte)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_SPECIAL);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
pte_mkdevmap(pte_t pte)394*4882a593Smuzhiyun static inline pte_t pte_mkdevmap(pte_t pte)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
pmd_set_flags(pmd_t pmd,pmdval_t set)399*4882a593Smuzhiyun static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	pmdval_t v = native_pmd_val(pmd);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	return native_make_pmd(v | set);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
pmd_clear_flags(pmd_t pmd,pmdval_t clear)406*4882a593Smuzhiyun static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	pmdval_t v = native_pmd_val(pmd);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	return native_make_pmd(v & ~clear);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pmd_uffd_wp(pmd_t pmd)414*4882a593Smuzhiyun static inline int pmd_uffd_wp(pmd_t pmd)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	return pmd_flags(pmd) & _PAGE_UFFD_WP;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
pmd_mkuffd_wp(pmd_t pmd)419*4882a593Smuzhiyun static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	return pmd_set_flags(pmd, _PAGE_UFFD_WP);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
pmd_clear_uffd_wp(pmd_t pmd)424*4882a593Smuzhiyun static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
429*4882a593Smuzhiyun 
pmd_mkold(pmd_t pmd)430*4882a593Smuzhiyun static inline pmd_t pmd_mkold(pmd_t pmd)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
pmd_mkclean(pmd_t pmd)435*4882a593Smuzhiyun static inline pmd_t pmd_mkclean(pmd_t pmd)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	return pmd_clear_flags(pmd, _PAGE_DIRTY);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
pmd_wrprotect(pmd_t pmd)440*4882a593Smuzhiyun static inline pmd_t pmd_wrprotect(pmd_t pmd)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	return pmd_clear_flags(pmd, _PAGE_RW);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
pmd_mkdirty(pmd_t pmd)445*4882a593Smuzhiyun static inline pmd_t pmd_mkdirty(pmd_t pmd)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun 
pmd_mkdevmap(pmd_t pmd)450*4882a593Smuzhiyun static inline pmd_t pmd_mkdevmap(pmd_t pmd)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	return pmd_set_flags(pmd, _PAGE_DEVMAP);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
pmd_mkhuge(pmd_t pmd)455*4882a593Smuzhiyun static inline pmd_t pmd_mkhuge(pmd_t pmd)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	return pmd_set_flags(pmd, _PAGE_PSE);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
pmd_mkyoung(pmd_t pmd)460*4882a593Smuzhiyun static inline pmd_t pmd_mkyoung(pmd_t pmd)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	return pmd_set_flags(pmd, _PAGE_ACCESSED);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
pmd_mkwrite(pmd_t pmd)465*4882a593Smuzhiyun static inline pmd_t pmd_mkwrite(pmd_t pmd)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	return pmd_set_flags(pmd, _PAGE_RW);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
pud_set_flags(pud_t pud,pudval_t set)470*4882a593Smuzhiyun static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	pudval_t v = native_pud_val(pud);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	return native_make_pud(v | set);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
pud_clear_flags(pud_t pud,pudval_t clear)477*4882a593Smuzhiyun static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	pudval_t v = native_pud_val(pud);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	return native_make_pud(v & ~clear);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
pud_mkold(pud_t pud)484*4882a593Smuzhiyun static inline pud_t pud_mkold(pud_t pud)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun 	return pud_clear_flags(pud, _PAGE_ACCESSED);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
pud_mkclean(pud_t pud)489*4882a593Smuzhiyun static inline pud_t pud_mkclean(pud_t pud)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	return pud_clear_flags(pud, _PAGE_DIRTY);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
pud_wrprotect(pud_t pud)494*4882a593Smuzhiyun static inline pud_t pud_wrprotect(pud_t pud)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	return pud_clear_flags(pud, _PAGE_RW);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
pud_mkdirty(pud_t pud)499*4882a593Smuzhiyun static inline pud_t pud_mkdirty(pud_t pud)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
pud_mkdevmap(pud_t pud)504*4882a593Smuzhiyun static inline pud_t pud_mkdevmap(pud_t pud)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	return pud_set_flags(pud, _PAGE_DEVMAP);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
pud_mkhuge(pud_t pud)509*4882a593Smuzhiyun static inline pud_t pud_mkhuge(pud_t pud)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun 	return pud_set_flags(pud, _PAGE_PSE);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
pud_mkyoung(pud_t pud)514*4882a593Smuzhiyun static inline pud_t pud_mkyoung(pud_t pud)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	return pud_set_flags(pud, _PAGE_ACCESSED);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
pud_mkwrite(pud_t pud)519*4882a593Smuzhiyun static inline pud_t pud_mkwrite(pud_t pud)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	return pud_set_flags(pud, _PAGE_RW);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_soft_dirty(pte_t pte)525*4882a593Smuzhiyun static inline int pte_soft_dirty(pte_t pte)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
pmd_soft_dirty(pmd_t pmd)530*4882a593Smuzhiyun static inline int pmd_soft_dirty(pmd_t pmd)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun 
pud_soft_dirty(pud_t pud)535*4882a593Smuzhiyun static inline int pud_soft_dirty(pud_t pud)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
pte_mksoft_dirty(pte_t pte)540*4882a593Smuzhiyun static inline pte_t pte_mksoft_dirty(pte_t pte)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
pmd_mksoft_dirty(pmd_t pmd)545*4882a593Smuzhiyun static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
pud_mksoft_dirty(pud_t pud)550*4882a593Smuzhiyun static inline pud_t pud_mksoft_dirty(pud_t pud)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
pte_clear_soft_dirty(pte_t pte)555*4882a593Smuzhiyun static inline pte_t pte_clear_soft_dirty(pte_t pte)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
pmd_clear_soft_dirty(pmd_t pmd)560*4882a593Smuzhiyun static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
pud_clear_soft_dirty(pud_t pud)565*4882a593Smuzhiyun static inline pud_t pud_clear_soft_dirty(pud_t pud)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun /*
573*4882a593Smuzhiyun  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
574*4882a593Smuzhiyun  * can use those bits for other purposes, so leave them be.
575*4882a593Smuzhiyun  */
massage_pgprot(pgprot_t pgprot)576*4882a593Smuzhiyun static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	pgprotval_t protval = pgprot_val(pgprot);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (protval & _PAGE_PRESENT)
581*4882a593Smuzhiyun 		protval &= __supported_pte_mask;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	return protval;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
check_pgprot(pgprot_t pgprot)586*4882a593Smuzhiyun static inline pgprotval_t check_pgprot(pgprot_t pgprot)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun 	pgprotval_t massaged_val = massage_pgprot(pgprot);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	/* mmdebug.h can not be included here because of dependencies */
591*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM
592*4882a593Smuzhiyun 	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
593*4882a593Smuzhiyun 		  "attempted to set unsupported pgprot: %016llx "
594*4882a593Smuzhiyun 		  "bits: %016llx supported: %016llx\n",
595*4882a593Smuzhiyun 		  (u64)pgprot_val(pgprot),
596*4882a593Smuzhiyun 		  (u64)pgprot_val(pgprot) ^ massaged_val,
597*4882a593Smuzhiyun 		  (u64)__supported_pte_mask);
598*4882a593Smuzhiyun #endif
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	return massaged_val;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
pfn_pte(unsigned long page_nr,pgprot_t pgprot)603*4882a593Smuzhiyun static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
606*4882a593Smuzhiyun 	pfn ^= protnone_mask(pgprot_val(pgprot));
607*4882a593Smuzhiyun 	pfn &= PTE_PFN_MASK;
608*4882a593Smuzhiyun 	return __pte(pfn | check_pgprot(pgprot));
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)611*4882a593Smuzhiyun static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
614*4882a593Smuzhiyun 	pfn ^= protnone_mask(pgprot_val(pgprot));
615*4882a593Smuzhiyun 	pfn &= PHYSICAL_PMD_PAGE_MASK;
616*4882a593Smuzhiyun 	return __pmd(pfn | check_pgprot(pgprot));
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
pfn_pud(unsigned long page_nr,pgprot_t pgprot)619*4882a593Smuzhiyun static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
622*4882a593Smuzhiyun 	pfn ^= protnone_mask(pgprot_val(pgprot));
623*4882a593Smuzhiyun 	pfn &= PHYSICAL_PUD_PAGE_MASK;
624*4882a593Smuzhiyun 	return __pud(pfn | check_pgprot(pgprot));
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
pmd_mkinvalid(pmd_t pmd)627*4882a593Smuzhiyun static inline pmd_t pmd_mkinvalid(pmd_t pmd)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	return pfn_pmd(pmd_pfn(pmd),
630*4882a593Smuzhiyun 		      __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
634*4882a593Smuzhiyun 
pte_modify(pte_t pte,pgprot_t newprot)635*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	pteval_t val = pte_val(pte), oldval = val;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	/*
640*4882a593Smuzhiyun 	 * Chop off the NX bit (if present), and add the NX portion of
641*4882a593Smuzhiyun 	 * the newprot (if present):
642*4882a593Smuzhiyun 	 */
643*4882a593Smuzhiyun 	val &= _PAGE_CHG_MASK;
644*4882a593Smuzhiyun 	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
645*4882a593Smuzhiyun 	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
646*4882a593Smuzhiyun 	return __pte(val);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun 
pmd_modify(pmd_t pmd,pgprot_t newprot)649*4882a593Smuzhiyun static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	pmdval_t val = pmd_val(pmd), oldval = val;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	val &= _HPAGE_CHG_MASK;
654*4882a593Smuzhiyun 	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
655*4882a593Smuzhiyun 	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
656*4882a593Smuzhiyun 	return __pmd(val);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun /*
660*4882a593Smuzhiyun  * mprotect needs to preserve PAT and encryption bits when updating
661*4882a593Smuzhiyun  * vm_page_prot
662*4882a593Smuzhiyun  */
663*4882a593Smuzhiyun #define pgprot_modify pgprot_modify
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)664*4882a593Smuzhiyun static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
667*4882a593Smuzhiyun 	pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
668*4882a593Smuzhiyun 	return __pgprot(preservebits | addbits);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun #define pte_pgprot(x) __pgprot(pte_flags(x))
672*4882a593Smuzhiyun #define pmd_pgprot(x) __pgprot(pmd_flags(x))
673*4882a593Smuzhiyun #define pud_pgprot(x) __pgprot(pud_flags(x))
674*4882a593Smuzhiyun #define p4d_pgprot(x) __pgprot(p4d_flags(x))
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun #define canon_pgprot(p) __pgprot(massage_pgprot(p))
677*4882a593Smuzhiyun 
arch_filter_pgprot(pgprot_t prot)678*4882a593Smuzhiyun static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	return canon_pgprot(prot);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
is_new_memtype_allowed(u64 paddr,unsigned long size,enum page_cache_mode pcm,enum page_cache_mode new_pcm)683*4882a593Smuzhiyun static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
684*4882a593Smuzhiyun 					 enum page_cache_mode pcm,
685*4882a593Smuzhiyun 					 enum page_cache_mode new_pcm)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	/*
688*4882a593Smuzhiyun 	 * PAT type is always WB for untracked ranges, so no need to check.
689*4882a593Smuzhiyun 	 */
690*4882a593Smuzhiyun 	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
691*4882a593Smuzhiyun 		return 1;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	/*
694*4882a593Smuzhiyun 	 * Certain new memtypes are not allowed with certain
695*4882a593Smuzhiyun 	 * requested memtype:
696*4882a593Smuzhiyun 	 * - request is uncached, return cannot be write-back
697*4882a593Smuzhiyun 	 * - request is write-combine, return cannot be write-back
698*4882a593Smuzhiyun 	 * - request is write-through, return cannot be write-back
699*4882a593Smuzhiyun 	 * - request is write-through, return cannot be write-combine
700*4882a593Smuzhiyun 	 */
701*4882a593Smuzhiyun 	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
702*4882a593Smuzhiyun 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
703*4882a593Smuzhiyun 	    (pcm == _PAGE_CACHE_MODE_WC &&
704*4882a593Smuzhiyun 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
705*4882a593Smuzhiyun 	    (pcm == _PAGE_CACHE_MODE_WT &&
706*4882a593Smuzhiyun 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
707*4882a593Smuzhiyun 	    (pcm == _PAGE_CACHE_MODE_WT &&
708*4882a593Smuzhiyun 	     new_pcm == _PAGE_CACHE_MODE_WC)) {
709*4882a593Smuzhiyun 		return 0;
710*4882a593Smuzhiyun 	}
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	return 1;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun pmd_t *populate_extra_pmd(unsigned long vaddr);
716*4882a593Smuzhiyun pte_t *populate_extra_pte(unsigned long vaddr);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun #ifdef CONFIG_PAGE_TABLE_ISOLATION
719*4882a593Smuzhiyun pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun /*
722*4882a593Smuzhiyun  * Take a PGD location (pgdp) and a pgd value that needs to be set there.
723*4882a593Smuzhiyun  * Populates the user and returns the resulting PGD that must be set in
724*4882a593Smuzhiyun  * the kernel copy of the page tables.
725*4882a593Smuzhiyun  */
pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)726*4882a593Smuzhiyun static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	if (!static_cpu_has(X86_FEATURE_PTI))
729*4882a593Smuzhiyun 		return pgd;
730*4882a593Smuzhiyun 	return __pti_set_user_pgtbl(pgdp, pgd);
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun #else   /* CONFIG_PAGE_TABLE_ISOLATION */
pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)733*4882a593Smuzhiyun static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	return pgd;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun #endif  /* CONFIG_PAGE_TABLE_ISOLATION */
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun #endif	/* __ASSEMBLY__ */
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun #ifdef CONFIG_X86_32
743*4882a593Smuzhiyun # include <asm/pgtable_32.h>
744*4882a593Smuzhiyun #else
745*4882a593Smuzhiyun # include <asm/pgtable_64.h>
746*4882a593Smuzhiyun #endif
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun #ifndef __ASSEMBLY__
749*4882a593Smuzhiyun #include <linux/mm_types.h>
750*4882a593Smuzhiyun #include <linux/mmdebug.h>
751*4882a593Smuzhiyun #include <linux/log2.h>
752*4882a593Smuzhiyun #include <asm/fixmap.h>
753*4882a593Smuzhiyun 
pte_none(pte_t pte)754*4882a593Smuzhiyun static inline int pte_none(pte_t pte)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)760*4882a593Smuzhiyun static inline int pte_same(pte_t a, pte_t b)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	return a.pte == b.pte;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
pte_present(pte_t a)765*4882a593Smuzhiyun static inline int pte_present(pte_t a)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun 	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pte_devmap(pte_t a)771*4882a593Smuzhiyun static inline int pte_devmap(pte_t a)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun #endif
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)778*4882a593Smuzhiyun static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	if (pte_flags(a) & _PAGE_PRESENT)
781*4882a593Smuzhiyun 		return true;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if ((pte_flags(a) & _PAGE_PROTNONE) &&
784*4882a593Smuzhiyun 			mm_tlb_flush_pending(mm))
785*4882a593Smuzhiyun 		return true;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	return false;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
pmd_present(pmd_t pmd)790*4882a593Smuzhiyun static inline int pmd_present(pmd_t pmd)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun 	/*
793*4882a593Smuzhiyun 	 * Checking for _PAGE_PSE is needed too because
794*4882a593Smuzhiyun 	 * split_huge_page will temporarily clear the present bit (but
795*4882a593Smuzhiyun 	 * the _PAGE_PSE flag will remain set at all times while the
796*4882a593Smuzhiyun 	 * _PAGE_PRESENT bit is clear).
797*4882a593Smuzhiyun 	 */
798*4882a593Smuzhiyun 	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun #ifdef CONFIG_NUMA_BALANCING
802*4882a593Smuzhiyun /*
803*4882a593Smuzhiyun  * These work without NUMA balancing but the kernel does not care. See the
804*4882a593Smuzhiyun  * comment in include/linux/pgtable.h
805*4882a593Smuzhiyun  */
pte_protnone(pte_t pte)806*4882a593Smuzhiyun static inline int pte_protnone(pte_t pte)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
809*4882a593Smuzhiyun 		== _PAGE_PROTNONE;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
pmd_protnone(pmd_t pmd)812*4882a593Smuzhiyun static inline int pmd_protnone(pmd_t pmd)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
815*4882a593Smuzhiyun 		== _PAGE_PROTNONE;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun #endif /* CONFIG_NUMA_BALANCING */
818*4882a593Smuzhiyun 
pmd_none(pmd_t pmd)819*4882a593Smuzhiyun static inline int pmd_none(pmd_t pmd)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun 	/* Only check low word on 32-bit platforms, since it might be
822*4882a593Smuzhiyun 	   out of sync with upper half. */
823*4882a593Smuzhiyun 	unsigned long val = native_pmd_val(pmd);
824*4882a593Smuzhiyun 	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
pmd_page_vaddr(pmd_t pmd)827*4882a593Smuzhiyun static inline unsigned long pmd_page_vaddr(pmd_t pmd)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun 	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun /*
833*4882a593Smuzhiyun  * Currently stuck as a macro due to indirect forward reference to
834*4882a593Smuzhiyun  * linux/mmzone.h's __section_mem_map_addr() definition:
835*4882a593Smuzhiyun  */
836*4882a593Smuzhiyun #define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun /*
839*4882a593Smuzhiyun  * Conversion functions: convert a page and protection to a page entry,
840*4882a593Smuzhiyun  * and a page entry and page directory to the page they refer to.
841*4882a593Smuzhiyun  *
842*4882a593Smuzhiyun  * (Currently stuck as a macro because of indirect forward reference
843*4882a593Smuzhiyun  * to linux/mm.h:page_to_nid())
844*4882a593Smuzhiyun  */
845*4882a593Smuzhiyun #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
846*4882a593Smuzhiyun 
pmd_bad(pmd_t pmd)847*4882a593Smuzhiyun static inline int pmd_bad(pmd_t pmd)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun 
pages_to_mb(unsigned long npg)852*4882a593Smuzhiyun static inline unsigned long pages_to_mb(unsigned long npg)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun 	return npg >> (20 - PAGE_SHIFT);
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 2
pud_none(pud_t pud)858*4882a593Smuzhiyun static inline int pud_none(pud_t pud)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun 	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun 
pud_present(pud_t pud)863*4882a593Smuzhiyun static inline int pud_present(pud_t pud)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	return pud_flags(pud) & _PAGE_PRESENT;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun 
pud_page_vaddr(pud_t pud)868*4882a593Smuzhiyun static inline unsigned long pud_page_vaddr(pud_t pud)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun 	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun /*
874*4882a593Smuzhiyun  * Currently stuck as a macro due to indirect forward reference to
875*4882a593Smuzhiyun  * linux/mmzone.h's __section_mem_map_addr() definition:
876*4882a593Smuzhiyun  */
877*4882a593Smuzhiyun #define pud_page(pud)	pfn_to_page(pud_pfn(pud))
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun #define pud_leaf	pud_large
pud_large(pud_t pud)880*4882a593Smuzhiyun static inline int pud_large(pud_t pud)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
883*4882a593Smuzhiyun 		(_PAGE_PSE | _PAGE_PRESENT);
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun 
pud_bad(pud_t pud)886*4882a593Smuzhiyun static inline int pud_bad(pud_t pud)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun #else
891*4882a593Smuzhiyun #define pud_leaf	pud_large
pud_large(pud_t pud)892*4882a593Smuzhiyun static inline int pud_large(pud_t pud)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun 	return 0;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 3
p4d_none(p4d_t p4d)899*4882a593Smuzhiyun static inline int p4d_none(p4d_t p4d)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun 	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun 
p4d_present(p4d_t p4d)904*4882a593Smuzhiyun static inline int p4d_present(p4d_t p4d)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun 	return p4d_flags(p4d) & _PAGE_PRESENT;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun 
p4d_page_vaddr(p4d_t p4d)909*4882a593Smuzhiyun static inline unsigned long p4d_page_vaddr(p4d_t p4d)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun /*
915*4882a593Smuzhiyun  * Currently stuck as a macro due to indirect forward reference to
916*4882a593Smuzhiyun  * linux/mmzone.h's __section_mem_map_addr() definition:
917*4882a593Smuzhiyun  */
918*4882a593Smuzhiyun #define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
919*4882a593Smuzhiyun 
p4d_bad(p4d_t p4d)920*4882a593Smuzhiyun static inline int p4d_bad(p4d_t p4d)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
925*4882a593Smuzhiyun 		ignore_flags |= _PAGE_NX;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	return (p4d_flags(p4d) & ~ignore_flags) != 0;
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
930*4882a593Smuzhiyun 
p4d_index(unsigned long address)931*4882a593Smuzhiyun static inline unsigned long p4d_index(unsigned long address)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 4
pgd_present(pgd_t pgd)937*4882a593Smuzhiyun static inline int pgd_present(pgd_t pgd)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	if (!pgtable_l5_enabled())
940*4882a593Smuzhiyun 		return 1;
941*4882a593Smuzhiyun 	return pgd_flags(pgd) & _PAGE_PRESENT;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun 
pgd_page_vaddr(pgd_t pgd)944*4882a593Smuzhiyun static inline unsigned long pgd_page_vaddr(pgd_t pgd)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun /*
950*4882a593Smuzhiyun  * Currently stuck as a macro due to indirect forward reference to
951*4882a593Smuzhiyun  * linux/mmzone.h's __section_mem_map_addr() definition:
952*4882a593Smuzhiyun  */
953*4882a593Smuzhiyun #define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun /* to find an entry in a page-table-directory. */
p4d_offset(pgd_t * pgd,unsigned long address)956*4882a593Smuzhiyun static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun 	if (!pgtable_l5_enabled())
959*4882a593Smuzhiyun 		return (p4d_t *)pgd;
960*4882a593Smuzhiyun 	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun 
pgd_bad(pgd_t pgd)963*4882a593Smuzhiyun static inline int pgd_bad(pgd_t pgd)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun 	unsigned long ignore_flags = _PAGE_USER;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	if (!pgtable_l5_enabled())
968*4882a593Smuzhiyun 		return 0;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
971*4882a593Smuzhiyun 		ignore_flags |= _PAGE_NX;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun 
pgd_none(pgd_t pgd)976*4882a593Smuzhiyun static inline int pgd_none(pgd_t pgd)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun 	if (!pgtable_l5_enabled())
979*4882a593Smuzhiyun 		return 0;
980*4882a593Smuzhiyun 	/*
981*4882a593Smuzhiyun 	 * There is no need to do a workaround for the KNL stray
982*4882a593Smuzhiyun 	 * A/D bit erratum here.  PGDs only point to page tables
983*4882a593Smuzhiyun 	 * except on 32-bit non-PAE which is not supported on
984*4882a593Smuzhiyun 	 * KNL.
985*4882a593Smuzhiyun 	 */
986*4882a593Smuzhiyun 	return !native_pgd_val(pgd);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun #endif	/* CONFIG_PGTABLE_LEVELS > 4 */
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun #endif	/* __ASSEMBLY__ */
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
993*4882a593Smuzhiyun #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun #ifndef __ASSEMBLY__
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun extern int direct_gbpages;
998*4882a593Smuzhiyun void init_mem_mapping(void);
999*4882a593Smuzhiyun void early_alloc_pgt_buf(void);
1000*4882a593Smuzhiyun extern void memblock_find_dma_reserve(void);
1001*4882a593Smuzhiyun void __init poking_init(void);
1002*4882a593Smuzhiyun unsigned long init_memory_mapping(unsigned long start,
1003*4882a593Smuzhiyun 				  unsigned long end, pgprot_t prot);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun #ifdef CONFIG_X86_64
1006*4882a593Smuzhiyun extern pgd_t trampoline_pgd_entry;
1007*4882a593Smuzhiyun #endif
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun /* local pte updates need not use xchg for locking */
native_local_ptep_get_and_clear(pte_t * ptep)1010*4882a593Smuzhiyun static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun 	pte_t res = *ptep;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	/* Pure native function needs no input for mm, addr */
1015*4882a593Smuzhiyun 	native_pte_clear(NULL, 0, ptep);
1016*4882a593Smuzhiyun 	return res;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun 
native_local_pmdp_get_and_clear(pmd_t * pmdp)1019*4882a593Smuzhiyun static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun 	pmd_t res = *pmdp;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	native_pmd_clear(pmdp);
1024*4882a593Smuzhiyun 	return res;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
native_local_pudp_get_and_clear(pud_t * pudp)1027*4882a593Smuzhiyun static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun 	pud_t res = *pudp;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	native_pud_clear(pudp);
1032*4882a593Smuzhiyun 	return res;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)1035*4882a593Smuzhiyun static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1036*4882a593Smuzhiyun 			      pte_t *ptep, pte_t pte)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun 	set_pte(ptep, pte);
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)1041*4882a593Smuzhiyun static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1042*4882a593Smuzhiyun 			      pmd_t *pmdp, pmd_t pmd)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun 	set_pmd(pmdp, pmd);
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun 
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)1047*4882a593Smuzhiyun static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1048*4882a593Smuzhiyun 			      pud_t *pudp, pud_t pud)
1049*4882a593Smuzhiyun {
1050*4882a593Smuzhiyun 	native_set_pud(pudp, pud);
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun /*
1054*4882a593Smuzhiyun  * We only update the dirty/accessed state if we set
1055*4882a593Smuzhiyun  * the dirty bit by hand in the kernel, since the hardware
1056*4882a593Smuzhiyun  * will do the accessed bit for us, and we don't want to
1057*4882a593Smuzhiyun  * race with other CPU's that might be updating the dirty
1058*4882a593Smuzhiyun  * bit at the same time.
1059*4882a593Smuzhiyun  */
1060*4882a593Smuzhiyun struct vm_area_struct;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1063*4882a593Smuzhiyun extern int ptep_set_access_flags(struct vm_area_struct *vma,
1064*4882a593Smuzhiyun 				 unsigned long address, pte_t *ptep,
1065*4882a593Smuzhiyun 				 pte_t entry, int dirty);
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1068*4882a593Smuzhiyun extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1069*4882a593Smuzhiyun 				     unsigned long addr, pte_t *ptep);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1072*4882a593Smuzhiyun extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1073*4882a593Smuzhiyun 				  unsigned long address, pte_t *ptep);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1076*4882a593Smuzhiyun static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1077*4882a593Smuzhiyun 				       pte_t *ptep)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun 	pte_t pte = native_ptep_get_and_clear(ptep);
1080*4882a593Smuzhiyun 	return pte;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1084*4882a593Smuzhiyun static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1085*4882a593Smuzhiyun 					    unsigned long addr, pte_t *ptep,
1086*4882a593Smuzhiyun 					    int full)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun 	pte_t pte;
1089*4882a593Smuzhiyun 	if (full) {
1090*4882a593Smuzhiyun 		/*
1091*4882a593Smuzhiyun 		 * Full address destruction in progress; paravirt does not
1092*4882a593Smuzhiyun 		 * care about updates and native needs no locking
1093*4882a593Smuzhiyun 		 */
1094*4882a593Smuzhiyun 		pte = native_local_ptep_get_and_clear(ptep);
1095*4882a593Smuzhiyun 	} else {
1096*4882a593Smuzhiyun 		pte = ptep_get_and_clear(mm, addr, ptep);
1097*4882a593Smuzhiyun 	}
1098*4882a593Smuzhiyun 	return pte;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1102*4882a593Smuzhiyun static inline void ptep_set_wrprotect(struct mm_struct *mm,
1103*4882a593Smuzhiyun 				      unsigned long addr, pte_t *ptep)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1113*4882a593Smuzhiyun extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1114*4882a593Smuzhiyun 				 unsigned long address, pmd_t *pmdp,
1115*4882a593Smuzhiyun 				 pmd_t entry, int dirty);
1116*4882a593Smuzhiyun extern int pudp_set_access_flags(struct vm_area_struct *vma,
1117*4882a593Smuzhiyun 				 unsigned long address, pud_t *pudp,
1118*4882a593Smuzhiyun 				 pud_t entry, int dirty);
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1121*4882a593Smuzhiyun extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1122*4882a593Smuzhiyun 				     unsigned long addr, pmd_t *pmdp);
1123*4882a593Smuzhiyun extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1124*4882a593Smuzhiyun 				     unsigned long addr, pud_t *pudp);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1127*4882a593Smuzhiyun extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1128*4882a593Smuzhiyun 				  unsigned long address, pmd_t *pmdp);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun #define pmd_write pmd_write
pmd_write(pmd_t pmd)1132*4882a593Smuzhiyun static inline int pmd_write(pmd_t pmd)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun 	return pmd_flags(pmd) & _PAGE_RW;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1138*4882a593Smuzhiyun static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1139*4882a593Smuzhiyun 				       pmd_t *pmdp)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun 	return native_pmdp_get_and_clear(pmdp);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pud_t * pudp)1145*4882a593Smuzhiyun static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1146*4882a593Smuzhiyun 					unsigned long addr, pud_t *pudp)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun 	return native_pudp_get_and_clear(pudp);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1152*4882a593Smuzhiyun static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1153*4882a593Smuzhiyun 				      unsigned long addr, pmd_t *pmdp)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun 	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun #define pud_write pud_write
pud_write(pud_t pud)1159*4882a593Smuzhiyun static inline int pud_write(pud_t pud)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun 	return pud_flags(pud) & _PAGE_RW;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun #ifndef pmdp_establish
1165*4882a593Smuzhiyun #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1166*4882a593Smuzhiyun static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1167*4882a593Smuzhiyun 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1168*4882a593Smuzhiyun {
1169*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_SMP)) {
1170*4882a593Smuzhiyun 		return xchg(pmdp, pmd);
1171*4882a593Smuzhiyun 	} else {
1172*4882a593Smuzhiyun 		pmd_t old = *pmdp;
1173*4882a593Smuzhiyun 		WRITE_ONCE(*pmdp, pmd);
1174*4882a593Smuzhiyun 		return old;
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun #endif
1178*4882a593Smuzhiyun /*
1179*4882a593Smuzhiyun  * Page table pages are page-aligned.  The lower half of the top
1180*4882a593Smuzhiyun  * level is used for userspace and the top half for the kernel.
1181*4882a593Smuzhiyun  *
1182*4882a593Smuzhiyun  * Returns true for parts of the PGD that map userspace and
1183*4882a593Smuzhiyun  * false for the parts that map the kernel.
1184*4882a593Smuzhiyun  */
pgdp_maps_userspace(void * __ptr)1185*4882a593Smuzhiyun static inline bool pgdp_maps_userspace(void *__ptr)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun 	unsigned long ptr = (unsigned long)__ptr;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun #define pgd_leaf	pgd_large
pgd_large(pgd_t pgd)1193*4882a593Smuzhiyun static inline int pgd_large(pgd_t pgd) { return 0; }
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun #ifdef CONFIG_PAGE_TABLE_ISOLATION
1196*4882a593Smuzhiyun /*
1197*4882a593Smuzhiyun  * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1198*4882a593Smuzhiyun  * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
1199*4882a593Smuzhiyun  * the user one is in the last 4k.  To switch between them, you
1200*4882a593Smuzhiyun  * just need to flip the 12th bit in their addresses.
1201*4882a593Smuzhiyun  */
1202*4882a593Smuzhiyun #define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun /*
1205*4882a593Smuzhiyun  * This generates better code than the inline assembly in
1206*4882a593Smuzhiyun  * __set_bit().
1207*4882a593Smuzhiyun  */
ptr_set_bit(void * ptr,int bit)1208*4882a593Smuzhiyun static inline void *ptr_set_bit(void *ptr, int bit)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun 	unsigned long __ptr = (unsigned long)ptr;
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	__ptr |= BIT(bit);
1213*4882a593Smuzhiyun 	return (void *)__ptr;
1214*4882a593Smuzhiyun }
ptr_clear_bit(void * ptr,int bit)1215*4882a593Smuzhiyun static inline void *ptr_clear_bit(void *ptr, int bit)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun 	unsigned long __ptr = (unsigned long)ptr;
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	__ptr &= ~BIT(bit);
1220*4882a593Smuzhiyun 	return (void *)__ptr;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun 
kernel_to_user_pgdp(pgd_t * pgdp)1223*4882a593Smuzhiyun static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun 	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun 
user_to_kernel_pgdp(pgd_t * pgdp)1228*4882a593Smuzhiyun static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun 	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun 
kernel_to_user_p4dp(p4d_t * p4dp)1233*4882a593Smuzhiyun static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun 
user_to_kernel_p4dp(p4d_t * p4dp)1238*4882a593Smuzhiyun static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun 	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun #endif /* CONFIG_PAGE_TABLE_ISOLATION */
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun /*
1245*4882a593Smuzhiyun  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1246*4882a593Smuzhiyun  *
1247*4882a593Smuzhiyun  *  dst - pointer to pgd range anwhere on a pgd page
1248*4882a593Smuzhiyun  *  src - ""
1249*4882a593Smuzhiyun  *  count - the number of pgds to copy.
1250*4882a593Smuzhiyun  *
1251*4882a593Smuzhiyun  * dst and src can be on the same page, but the range must not overlap,
1252*4882a593Smuzhiyun  * and must not cross a page boundary.
1253*4882a593Smuzhiyun  */
clone_pgd_range(pgd_t * dst,pgd_t * src,int count)1254*4882a593Smuzhiyun static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1255*4882a593Smuzhiyun {
1256*4882a593Smuzhiyun 	memcpy(dst, src, count * sizeof(pgd_t));
1257*4882a593Smuzhiyun #ifdef CONFIG_PAGE_TABLE_ISOLATION
1258*4882a593Smuzhiyun 	if (!static_cpu_has(X86_FEATURE_PTI))
1259*4882a593Smuzhiyun 		return;
1260*4882a593Smuzhiyun 	/* Clone the user space pgd as well */
1261*4882a593Smuzhiyun 	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1262*4882a593Smuzhiyun 	       count * sizeof(pgd_t));
1263*4882a593Smuzhiyun #endif
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun #define PTE_SHIFT ilog2(PTRS_PER_PTE)
page_level_shift(enum pg_level level)1267*4882a593Smuzhiyun static inline int page_level_shift(enum pg_level level)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1270*4882a593Smuzhiyun }
page_level_size(enum pg_level level)1271*4882a593Smuzhiyun static inline unsigned long page_level_size(enum pg_level level)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun 	return 1UL << page_level_shift(level);
1274*4882a593Smuzhiyun }
page_level_mask(enum pg_level level)1275*4882a593Smuzhiyun static inline unsigned long page_level_mask(enum pg_level level)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun 	return ~(page_level_size(level) - 1);
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun /*
1281*4882a593Smuzhiyun  * The x86 doesn't have any external MMU info: the kernel page
1282*4882a593Smuzhiyun  * tables contain all the necessary information.
1283*4882a593Smuzhiyun  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1284*4882a593Smuzhiyun static inline void update_mmu_cache(struct vm_area_struct *vma,
1285*4882a593Smuzhiyun 		unsigned long addr, pte_t *ptep)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun }
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)1288*4882a593Smuzhiyun static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1289*4882a593Smuzhiyun 		unsigned long addr, pmd_t *pmd)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun }
update_mmu_cache_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud)1292*4882a593Smuzhiyun static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1293*4882a593Smuzhiyun 		unsigned long addr, pud_t *pud)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_swp_mksoft_dirty(pte_t pte)1298*4882a593Smuzhiyun static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun 
pte_swp_soft_dirty(pte_t pte)1303*4882a593Smuzhiyun static inline int pte_swp_soft_dirty(pte_t pte)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun 
pte_swp_clear_soft_dirty(pte_t pte)1308*4882a593Smuzhiyun static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1309*4882a593Smuzhiyun {
1310*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_mksoft_dirty(pmd_t pmd)1314*4882a593Smuzhiyun static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun 	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun 
pmd_swp_soft_dirty(pmd_t pmd)1319*4882a593Smuzhiyun static inline int pmd_swp_soft_dirty(pmd_t pmd)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun 	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun 
pmd_swp_clear_soft_dirty(pmd_t pmd)1324*4882a593Smuzhiyun static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1325*4882a593Smuzhiyun {
1326*4882a593Smuzhiyun 	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun #endif
1329*4882a593Smuzhiyun #endif
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_swp_mkuffd_wp(pte_t pte)1332*4882a593Smuzhiyun static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun 	return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun 
pte_swp_uffd_wp(pte_t pte)1337*4882a593Smuzhiyun static inline int pte_swp_uffd_wp(pte_t pte)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun 	return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun 
pte_swp_clear_uffd_wp(pte_t pte)1342*4882a593Smuzhiyun static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1343*4882a593Smuzhiyun {
1344*4882a593Smuzhiyun 	return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun 
pmd_swp_mkuffd_wp(pmd_t pmd)1347*4882a593Smuzhiyun static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1348*4882a593Smuzhiyun {
1349*4882a593Smuzhiyun 	return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun 
pmd_swp_uffd_wp(pmd_t pmd)1352*4882a593Smuzhiyun static inline int pmd_swp_uffd_wp(pmd_t pmd)
1353*4882a593Smuzhiyun {
1354*4882a593Smuzhiyun 	return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun 
pmd_swp_clear_uffd_wp(pmd_t pmd)1357*4882a593Smuzhiyun static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1358*4882a593Smuzhiyun {
1359*4882a593Smuzhiyun 	return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun #define PKRU_AD_BIT 0x1u
1364*4882a593Smuzhiyun #define PKRU_WD_BIT 0x2u
1365*4882a593Smuzhiyun #define PKRU_BITS_PER_PKEY 2
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1368*4882a593Smuzhiyun extern u32 init_pkru_value;
1369*4882a593Smuzhiyun #else
1370*4882a593Smuzhiyun #define init_pkru_value	0
1371*4882a593Smuzhiyun #endif
1372*4882a593Smuzhiyun 
__pkru_allows_read(u32 pkru,u16 pkey)1373*4882a593Smuzhiyun static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun 	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1376*4882a593Smuzhiyun 	return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun 
__pkru_allows_write(u32 pkru,u16 pkey)1379*4882a593Smuzhiyun static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1380*4882a593Smuzhiyun {
1381*4882a593Smuzhiyun 	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1382*4882a593Smuzhiyun 	/*
1383*4882a593Smuzhiyun 	 * Access-disable disables writes too so we need to check
1384*4882a593Smuzhiyun 	 * both bits here.
1385*4882a593Smuzhiyun 	 */
1386*4882a593Smuzhiyun 	return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun 
pte_flags_pkey(unsigned long pte_flags)1389*4882a593Smuzhiyun static inline u16 pte_flags_pkey(unsigned long pte_flags)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1392*4882a593Smuzhiyun 	/* ifdef to avoid doing 59-bit shift on 32-bit values */
1393*4882a593Smuzhiyun 	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1394*4882a593Smuzhiyun #else
1395*4882a593Smuzhiyun 	return 0;
1396*4882a593Smuzhiyun #endif
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun 
__pkru_allows_pkey(u16 pkey,bool write)1399*4882a593Smuzhiyun static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun 	u32 pkru = read_pkru();
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	if (!__pkru_allows_read(pkru, pkey))
1404*4882a593Smuzhiyun 		return false;
1405*4882a593Smuzhiyun 	if (write && !__pkru_allows_write(pkru, pkey))
1406*4882a593Smuzhiyun 		return false;
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	return true;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun /*
1412*4882a593Smuzhiyun  * 'pteval' can come from a PTE, PMD or PUD.  We only check
1413*4882a593Smuzhiyun  * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1414*4882a593Smuzhiyun  * same value on all 3 types.
1415*4882a593Smuzhiyun  */
__pte_access_permitted(unsigned long pteval,bool write)1416*4882a593Smuzhiyun static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1417*4882a593Smuzhiyun {
1418*4882a593Smuzhiyun 	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	if (write)
1421*4882a593Smuzhiyun 		need_pte_bits |= _PAGE_RW;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	if ((pteval & need_pte_bits) != need_pte_bits)
1424*4882a593Smuzhiyun 		return 0;
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)1430*4882a593Smuzhiyun static inline bool pte_access_permitted(pte_t pte, bool write)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun 	return __pte_access_permitted(pte_val(pte), write);
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun #define pmd_access_permitted pmd_access_permitted
pmd_access_permitted(pmd_t pmd,bool write)1436*4882a593Smuzhiyun static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun 	return __pte_access_permitted(pmd_val(pmd), write);
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun #define pud_access_permitted pud_access_permitted
pud_access_permitted(pud_t pud,bool write)1442*4882a593Smuzhiyun static inline bool pud_access_permitted(pud_t pud, bool write)
1443*4882a593Smuzhiyun {
1444*4882a593Smuzhiyun 	return __pte_access_permitted(pud_val(pud), write);
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1448*4882a593Smuzhiyun extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1449*4882a593Smuzhiyun 
arch_has_pfn_modify_check(void)1450*4882a593Smuzhiyun static inline bool arch_has_pfn_modify_check(void)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun 	return boot_cpu_has_bug(X86_BUG_L1TF);
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun #define arch_faults_on_old_pte arch_faults_on_old_pte
arch_faults_on_old_pte(void)1456*4882a593Smuzhiyun static inline bool arch_faults_on_old_pte(void)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun 	return false;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun #endif	/* __ASSEMBLY__ */
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun #endif /* _ASM_X86_PGTABLE_H */
1464