xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/pgtable-3level.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_PGTABLE_3LEVEL_H
3*4882a593Smuzhiyun #define _ASM_X86_PGTABLE_3LEVEL_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm/atomic64_32.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun  * Intel Physical Address Extension (PAE) Mode - three-level page
9*4882a593Smuzhiyun  * tables on PPro+ CPUs.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define pte_ERROR(e)							\
15*4882a593Smuzhiyun 	pr_err("%s:%d: bad pte %p(%08lx%08lx)\n",			\
16*4882a593Smuzhiyun 	       __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
17*4882a593Smuzhiyun #define pmd_ERROR(e)							\
18*4882a593Smuzhiyun 	pr_err("%s:%d: bad pmd %p(%016Lx)\n",				\
19*4882a593Smuzhiyun 	       __FILE__, __LINE__, &(e), pmd_val(e))
20*4882a593Smuzhiyun #define pgd_ERROR(e)							\
21*4882a593Smuzhiyun 	pr_err("%s:%d: bad pgd %p(%016Lx)\n",				\
22*4882a593Smuzhiyun 	       __FILE__, __LINE__, &(e), pgd_val(e))
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* Rules for using set_pte: the pte being assigned *must* be
25*4882a593Smuzhiyun  * either not present or in a state where the hardware will
26*4882a593Smuzhiyun  * not attempt to update the pte.  In places where this is
27*4882a593Smuzhiyun  * not possible, use pte_get_and_clear to obtain the old pte
28*4882a593Smuzhiyun  * value and then use set_pte to update it.  -ben
29*4882a593Smuzhiyun  */
native_set_pte(pte_t * ptep,pte_t pte)30*4882a593Smuzhiyun static inline void native_set_pte(pte_t *ptep, pte_t pte)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	ptep->pte_high = pte.pte_high;
33*4882a593Smuzhiyun 	smp_wmb();
34*4882a593Smuzhiyun 	ptep->pte_low = pte.pte_low;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define pmd_read_atomic pmd_read_atomic
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * pte_offset_map_lock() on 32-bit PAE kernels was reading the pmd_t with
40*4882a593Smuzhiyun  * a "*pmdp" dereference done by GCC. Problem is, in certain places
41*4882a593Smuzhiyun  * where pte_offset_map_lock() is called, concurrent page faults are
42*4882a593Smuzhiyun  * allowed, if the mmap_lock is hold for reading. An example is mincore
43*4882a593Smuzhiyun  * vs page faults vs MADV_DONTNEED. On the page fault side
44*4882a593Smuzhiyun  * pmd_populate() rightfully does a set_64bit(), but if we're reading the
45*4882a593Smuzhiyun  * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
46*4882a593Smuzhiyun  * because GCC will not read the 64-bit value of the pmd atomically.
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  * To fix this all places running pte_offset_map_lock() while holding the
49*4882a593Smuzhiyun  * mmap_lock in read mode, shall read the pmdp pointer using this
50*4882a593Smuzhiyun  * function to know if the pmd is null or not, and in turn to know if
51*4882a593Smuzhiyun  * they can run pte_offset_map_lock() or pmd_trans_huge() or other pmd
52*4882a593Smuzhiyun  * operations.
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  * Without THP if the mmap_lock is held for reading, the pmd can only
55*4882a593Smuzhiyun  * transition from null to not null while pmd_read_atomic() runs. So
56*4882a593Smuzhiyun  * we can always return atomic pmd values with this function.
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * With THP if the mmap_lock is held for reading, the pmd can become
59*4882a593Smuzhiyun  * trans_huge or none or point to a pte (and in turn become "stable")
60*4882a593Smuzhiyun  * at any time under pmd_read_atomic(). We could read it truly
61*4882a593Smuzhiyun  * atomically here with an atomic64_read() for the THP enabled case (and
62*4882a593Smuzhiyun  * it would be a whole lot simpler), but to avoid using cmpxchg8b we
63*4882a593Smuzhiyun  * only return an atomic pmdval if the low part of the pmdval is later
64*4882a593Smuzhiyun  * found to be stable (i.e. pointing to a pte). We are also returning a
65*4882a593Smuzhiyun  * 'none' (zero) pmdval if the low part of the pmd is zero.
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * In some cases the high and low part of the pmdval returned may not be
68*4882a593Smuzhiyun  * consistent if THP is enabled (the low part may point to previously
69*4882a593Smuzhiyun  * mapped hugepage, while the high part may point to a more recently
70*4882a593Smuzhiyun  * mapped hugepage), but pmd_none_or_trans_huge_or_clear_bad() only
71*4882a593Smuzhiyun  * needs the low part of the pmd to be read atomically to decide if the
72*4882a593Smuzhiyun  * pmd is unstable or not, with the only exception when the low part
73*4882a593Smuzhiyun  * of the pmd is zero, in which case we return a 'none' pmd.
74*4882a593Smuzhiyun  */
pmd_read_atomic(pmd_t * pmdp)75*4882a593Smuzhiyun static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	pmdval_t ret;
78*4882a593Smuzhiyun 	u32 *tmp = (u32 *)pmdp;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	ret = (pmdval_t) (*tmp);
81*4882a593Smuzhiyun 	if (ret) {
82*4882a593Smuzhiyun 		/*
83*4882a593Smuzhiyun 		 * If the low part is null, we must not read the high part
84*4882a593Smuzhiyun 		 * or we can end up with a partial pmd.
85*4882a593Smuzhiyun 		 */
86*4882a593Smuzhiyun 		smp_rmb();
87*4882a593Smuzhiyun 		ret |= ((pmdval_t)*(tmp + 1)) << 32;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	return (pmd_t) { ret };
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
native_set_pte_atomic(pte_t * ptep,pte_t pte)93*4882a593Smuzhiyun static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
native_set_pmd(pmd_t * pmdp,pmd_t pmd)98*4882a593Smuzhiyun static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
native_set_pud(pud_t * pudp,pud_t pud)103*4882a593Smuzhiyun static inline void native_set_pud(pud_t *pudp, pud_t pud)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun #ifdef CONFIG_PAGE_TABLE_ISOLATION
106*4882a593Smuzhiyun 	pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
107*4882a593Smuzhiyun #endif
108*4882a593Smuzhiyun 	set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
113*4882a593Smuzhiyun  * entry, so clear the bottom half first and enforce ordering with a compiler
114*4882a593Smuzhiyun  * barrier.
115*4882a593Smuzhiyun  */
native_pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)116*4882a593Smuzhiyun static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
117*4882a593Smuzhiyun 				    pte_t *ptep)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	ptep->pte_low = 0;
120*4882a593Smuzhiyun 	smp_wmb();
121*4882a593Smuzhiyun 	ptep->pte_high = 0;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
native_pmd_clear(pmd_t * pmd)124*4882a593Smuzhiyun static inline void native_pmd_clear(pmd_t *pmd)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	u32 *tmp = (u32 *)pmd;
127*4882a593Smuzhiyun 	*tmp = 0;
128*4882a593Smuzhiyun 	smp_wmb();
129*4882a593Smuzhiyun 	*(tmp + 1) = 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
native_pud_clear(pud_t * pudp)132*4882a593Smuzhiyun static inline void native_pud_clear(pud_t *pudp)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
pud_clear(pud_t * pudp)136*4882a593Smuzhiyun static inline void pud_clear(pud_t *pudp)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	set_pud(pudp, __pud(0));
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/*
141*4882a593Smuzhiyun 	 * According to Intel App note "TLBs, Paging-Structure Caches,
142*4882a593Smuzhiyun 	 * and Their Invalidation", April 2007, document 317080-001,
143*4882a593Smuzhiyun 	 * section 8.1: in PAE mode we explicitly have to flush the
144*4882a593Smuzhiyun 	 * TLB via cr3 if the top-level pgd is changed...
145*4882a593Smuzhiyun 	 *
146*4882a593Smuzhiyun 	 * Currently all places where pud_clear() is called either have
147*4882a593Smuzhiyun 	 * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
148*4882a593Smuzhiyun 	 * pud_clear_bad()), so we don't need TLB flush here.
149*4882a593Smuzhiyun 	 */
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #ifdef CONFIG_SMP
native_ptep_get_and_clear(pte_t * ptep)153*4882a593Smuzhiyun static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	pte_t res;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	return res;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun #else
162*4882a593Smuzhiyun #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
163*4882a593Smuzhiyun #endif
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun union split_pmd {
166*4882a593Smuzhiyun 	struct {
167*4882a593Smuzhiyun 		u32 pmd_low;
168*4882a593Smuzhiyun 		u32 pmd_high;
169*4882a593Smuzhiyun 	};
170*4882a593Smuzhiyun 	pmd_t pmd;
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun #ifdef CONFIG_SMP
native_pmdp_get_and_clear(pmd_t * pmdp)174*4882a593Smuzhiyun static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	union split_pmd res, *orig = (union split_pmd *)pmdp;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* xchg acts as a barrier before setting of the high bits */
179*4882a593Smuzhiyun 	res.pmd_low = xchg(&orig->pmd_low, 0);
180*4882a593Smuzhiyun 	res.pmd_high = orig->pmd_high;
181*4882a593Smuzhiyun 	orig->pmd_high = 0;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	return res.pmd;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun #else
186*4882a593Smuzhiyun #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
187*4882a593Smuzhiyun #endif
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun #ifndef pmdp_establish
190*4882a593Smuzhiyun #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)191*4882a593Smuzhiyun static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
192*4882a593Smuzhiyun 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	pmd_t old;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/*
197*4882a593Smuzhiyun 	 * If pmd has present bit cleared we can get away without expensive
198*4882a593Smuzhiyun 	 * cmpxchg64: we can update pmdp half-by-half without racing with
199*4882a593Smuzhiyun 	 * anybody.
200*4882a593Smuzhiyun 	 */
201*4882a593Smuzhiyun 	if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
202*4882a593Smuzhiyun 		union split_pmd old, new, *ptr;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		ptr = (union split_pmd *)pmdp;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		new.pmd = pmd;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		/* xchg acts as a barrier before setting of the high bits */
209*4882a593Smuzhiyun 		old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low);
210*4882a593Smuzhiyun 		old.pmd_high = ptr->pmd_high;
211*4882a593Smuzhiyun 		ptr->pmd_high = new.pmd_high;
212*4882a593Smuzhiyun 		return old.pmd;
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	do {
216*4882a593Smuzhiyun 		old = *pmdp;
217*4882a593Smuzhiyun 	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	return old;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun #ifdef CONFIG_SMP
224*4882a593Smuzhiyun union split_pud {
225*4882a593Smuzhiyun 	struct {
226*4882a593Smuzhiyun 		u32 pud_low;
227*4882a593Smuzhiyun 		u32 pud_high;
228*4882a593Smuzhiyun 	};
229*4882a593Smuzhiyun 	pud_t pud;
230*4882a593Smuzhiyun };
231*4882a593Smuzhiyun 
native_pudp_get_and_clear(pud_t * pudp)232*4882a593Smuzhiyun static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	union split_pud res, *orig = (union split_pud *)pudp;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun #ifdef CONFIG_PAGE_TABLE_ISOLATION
237*4882a593Smuzhiyun 	pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
238*4882a593Smuzhiyun #endif
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/* xchg acts as a barrier before setting of the high bits */
241*4882a593Smuzhiyun 	res.pud_low = xchg(&orig->pud_low, 0);
242*4882a593Smuzhiyun 	res.pud_high = orig->pud_high;
243*4882a593Smuzhiyun 	orig->pud_high = 0;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return res.pud;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun #else
248*4882a593Smuzhiyun #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
249*4882a593Smuzhiyun #endif
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /* Encode and de-code a swap entry */
252*4882a593Smuzhiyun #define SWP_TYPE_BITS		5
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun #define SWP_OFFSET_FIRST_BIT	(_PAGE_BIT_PROTNONE + 1)
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /* We always extract/encode the offset by shifting it all the way up, and then down again */
257*4882a593Smuzhiyun #define SWP_OFFSET_SHIFT	(SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
260*4882a593Smuzhiyun #define __swp_type(x)			(((x).val) & 0x1f)
261*4882a593Smuzhiyun #define __swp_offset(x)			((x).val >> 5)
262*4882a593Smuzhiyun #define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun  * Normally, __swp_entry() converts from arch-independent swp_entry_t to
266*4882a593Smuzhiyun  * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
267*4882a593Smuzhiyun  * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
268*4882a593Smuzhiyun  * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
269*4882a593Smuzhiyun  * __swp_entry_to_pte() through the following helper macro based on 64bit
270*4882a593Smuzhiyun  * __swp_entry().
271*4882a593Smuzhiyun  */
272*4882a593Smuzhiyun #define __swp_pteval_entry(type, offset) ((pteval_t) { \
273*4882a593Smuzhiyun 	(~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
274*4882a593Smuzhiyun 	| ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #define __swp_entry_to_pte(x)	((pte_t){ .pte = \
277*4882a593Smuzhiyun 		__swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun  * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
280*4882a593Smuzhiyun  * swp_entry_t, but also has to convert it from 64bit to the 32bit
281*4882a593Smuzhiyun  * intermediate representation, using the following macros based on 64bit
282*4882a593Smuzhiyun  * __swp_type() and __swp_offset().
283*4882a593Smuzhiyun  */
284*4882a593Smuzhiyun #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
285*4882a593Smuzhiyun #define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun #define __pte_to_swp_entry(pte)	(__swp_entry(__pteval_swp_type(pte), \
288*4882a593Smuzhiyun 					     __pteval_swp_offset(pte)))
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun #include <asm/pgtable-invert.h>
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
293