1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun #ifndef KVM_X86_MMU_SPTE_H
4*4882a593Smuzhiyun #define KVM_X86_MMU_SPTE_H
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "mmu_internal.h"
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define PT_FIRST_AVAIL_BITS_SHIFT 10
9*4882a593Smuzhiyun #define PT64_SECOND_AVAIL_BITS_SHIFT 54
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
13*4882a593Smuzhiyun * Access Tracking SPTEs.
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun #define SPTE_SPECIAL_MASK (3ULL << 52)
16*4882a593Smuzhiyun #define SPTE_AD_ENABLED_MASK (0ULL << 52)
17*4882a593Smuzhiyun #define SPTE_AD_DISABLED_MASK (1ULL << 52)
18*4882a593Smuzhiyun #define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
19*4882a593Smuzhiyun #define SPTE_MMIO_MASK (3ULL << 52)
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
22*4882a593Smuzhiyun #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
23*4882a593Smuzhiyun #else
24*4882a593Smuzhiyun #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
28*4882a593Smuzhiyun | shadow_x_mask | shadow_nx_mask | shadow_me_mask)
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define ACC_EXEC_MASK 1
31*4882a593Smuzhiyun #define ACC_WRITE_MASK PT_WRITABLE_MASK
32*4882a593Smuzhiyun #define ACC_USER_MASK PT_USER_MASK
33*4882a593Smuzhiyun #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* The mask for the R/X bits in EPT PTEs */
36*4882a593Smuzhiyun #define PT64_EPT_READABLE_MASK 0x1ull
37*4882a593Smuzhiyun #define PT64_EPT_EXECUTABLE_MASK 0x4ull
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define PT64_LEVEL_BITS 9
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define PT64_LEVEL_SHIFT(level) \
42*4882a593Smuzhiyun (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define PT64_INDEX(address, level)\
45*4882a593Smuzhiyun (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
46*4882a593Smuzhiyun #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
50*4882a593Smuzhiyun #define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * Due to limited space in PTEs, the MMIO generation is a 18 bit subset of
54*4882a593Smuzhiyun * the memslots generation and is derived as follows:
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
57*4882a593Smuzhiyun * Bits 9-17 of the MMIO generation are propagated to spte bits 54-62
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
60*4882a593Smuzhiyun * the MMIO generation number, as doing so would require stealing a bit from
61*4882a593Smuzhiyun * the "real" generation number and thus effectively halve the maximum number
62*4882a593Smuzhiyun * of MMIO generations that can be handled before encountering a wrap (which
63*4882a593Smuzhiyun * requires a full MMU zap). The flag is instead explicitly queried when
64*4882a593Smuzhiyun * checking for MMIO spte cache hits.
65*4882a593Smuzhiyun */
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define MMIO_SPTE_GEN_LOW_START 3
68*4882a593Smuzhiyun #define MMIO_SPTE_GEN_LOW_END 11
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT
71*4882a593Smuzhiyun #define MMIO_SPTE_GEN_HIGH_END 62
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
74*4882a593Smuzhiyun MMIO_SPTE_GEN_LOW_START)
75*4882a593Smuzhiyun #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
76*4882a593Smuzhiyun MMIO_SPTE_GEN_HIGH_START)
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
79*4882a593Smuzhiyun #define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* remember to adjust the comment above as well if you change these */
82*4882a593Smuzhiyun static_assert(MMIO_SPTE_GEN_LOW_BITS == 9 && MMIO_SPTE_GEN_HIGH_BITS == 9);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0)
85*4882a593Smuzhiyun #define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun extern u64 __read_mostly shadow_nx_mask;
90*4882a593Smuzhiyun extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
91*4882a593Smuzhiyun extern u64 __read_mostly shadow_user_mask;
92*4882a593Smuzhiyun extern u64 __read_mostly shadow_accessed_mask;
93*4882a593Smuzhiyun extern u64 __read_mostly shadow_dirty_mask;
94*4882a593Smuzhiyun extern u64 __read_mostly shadow_mmio_value;
95*4882a593Smuzhiyun extern u64 __read_mostly shadow_mmio_access_mask;
96*4882a593Smuzhiyun extern u64 __read_mostly shadow_present_mask;
97*4882a593Smuzhiyun extern u64 __read_mostly shadow_me_mask;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK;
101*4882a593Smuzhiyun * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
102*4882a593Smuzhiyun * pages.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun extern u64 __read_mostly shadow_acc_track_mask;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
108*4882a593Smuzhiyun * to guard against L1TF attacks.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * The number of high-order 1 bits to use in the mask above.
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun #define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun * The mask/shift to use for saving the original R/X bits when marking the PTE
119*4882a593Smuzhiyun * as not-present for access tracking purposes. We do not save the W bit as the
120*4882a593Smuzhiyun * PTEs being access tracked also need to be dirty tracked, so the W bit will be
121*4882a593Smuzhiyun * restored only when a write is attempted to the page.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun #define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \
124*4882a593Smuzhiyun PT64_EPT_EXECUTABLE_MASK)
125*4882a593Smuzhiyun #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT PT64_SECOND_AVAIL_BITS_SHIFT
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * In some cases, we need to preserve the GFN of a non-present or reserved
129*4882a593Smuzhiyun * SPTE when we usurp the upper five bits of the physical address space to
130*4882a593Smuzhiyun * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
131*4882a593Smuzhiyun * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
132*4882a593Smuzhiyun * left into the reserved bits, i.e. the GFN in the SPTE will be split into
133*4882a593Smuzhiyun * high and low parts. This mask covers the lower bits of the GFN.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * The number of non-reserved physical address bits irrespective of features
139*4882a593Smuzhiyun * that repurpose legal bits, e.g. MKTME.
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun extern u8 __read_mostly shadow_phys_bits;
142*4882a593Smuzhiyun
is_mmio_spte(u64 spte)143*4882a593Smuzhiyun static inline bool is_mmio_spte(u64 spte)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
sp_ad_disabled(struct kvm_mmu_page * sp)148*4882a593Smuzhiyun static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun return sp->role.ad_disabled;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
spte_ad_enabled(u64 spte)153*4882a593Smuzhiyun static inline bool spte_ad_enabled(u64 spte)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun MMU_WARN_ON(is_mmio_spte(spte));
156*4882a593Smuzhiyun return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
spte_ad_need_write_protect(u64 spte)159*4882a593Smuzhiyun static inline bool spte_ad_need_write_protect(u64 spte)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun MMU_WARN_ON(is_mmio_spte(spte));
162*4882a593Smuzhiyun return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
spte_shadow_accessed_mask(u64 spte)165*4882a593Smuzhiyun static inline u64 spte_shadow_accessed_mask(u64 spte)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun MMU_WARN_ON(is_mmio_spte(spte));
168*4882a593Smuzhiyun return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
spte_shadow_dirty_mask(u64 spte)171*4882a593Smuzhiyun static inline u64 spte_shadow_dirty_mask(u64 spte)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun MMU_WARN_ON(is_mmio_spte(spte));
174*4882a593Smuzhiyun return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
is_access_track_spte(u64 spte)177*4882a593Smuzhiyun static inline bool is_access_track_spte(u64 spte)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
is_shadow_present_pte(u64 pte)182*4882a593Smuzhiyun static inline int is_shadow_present_pte(u64 pte)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun return (pte != 0) && !is_mmio_spte(pte);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
is_large_pte(u64 pte)187*4882a593Smuzhiyun static inline int is_large_pte(u64 pte)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun return pte & PT_PAGE_SIZE_MASK;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
is_last_spte(u64 pte,int level)192*4882a593Smuzhiyun static inline int is_last_spte(u64 pte, int level)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun if (level == PG_LEVEL_4K)
195*4882a593Smuzhiyun return 1;
196*4882a593Smuzhiyun if (is_large_pte(pte))
197*4882a593Smuzhiyun return 1;
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
is_executable_pte(u64 spte)201*4882a593Smuzhiyun static inline bool is_executable_pte(u64 spte)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
spte_to_pfn(u64 pte)206*4882a593Smuzhiyun static inline kvm_pfn_t spte_to_pfn(u64 pte)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
is_accessed_spte(u64 spte)211*4882a593Smuzhiyun static inline bool is_accessed_spte(u64 spte)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun u64 accessed_mask = spte_shadow_accessed_mask(spte);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun return accessed_mask ? spte & accessed_mask
216*4882a593Smuzhiyun : !is_access_track_spte(spte);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
is_dirty_spte(u64 spte)219*4882a593Smuzhiyun static inline bool is_dirty_spte(u64 spte)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun u64 dirty_mask = spte_shadow_dirty_mask(spte);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
spte_can_locklessly_be_made_writable(u64 spte)226*4882a593Smuzhiyun static inline bool spte_can_locklessly_be_made_writable(u64 spte)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
229*4882a593Smuzhiyun (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
get_mmio_spte_generation(u64 spte)232*4882a593Smuzhiyun static inline u64 get_mmio_spte_generation(u64 spte)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun u64 gen;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT;
237*4882a593Smuzhiyun gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT;
238*4882a593Smuzhiyun return gen;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Bits which may be returned by set_spte() */
242*4882a593Smuzhiyun #define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
243*4882a593Smuzhiyun #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
244*4882a593Smuzhiyun #define SET_SPTE_SPURIOUS BIT(2)
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
247*4882a593Smuzhiyun gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
248*4882a593Smuzhiyun bool can_unsync, bool host_writable, bool ad_disabled,
249*4882a593Smuzhiyun u64 *new_spte);
250*4882a593Smuzhiyun u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
251*4882a593Smuzhiyun u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
252*4882a593Smuzhiyun u64 mark_spte_for_access_track(u64 spte);
253*4882a593Smuzhiyun u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun void kvm_mmu_reset_all_pte_masks(void);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun #endif
258