1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Kernel-based Virtual Machine driver for Linux
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This module enables machines with Intel VT-x extensions to run virtual
6*4882a593Smuzhiyun * machines without emulation or binary translation.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * MMU support
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Copyright (C) 2006 Qumranet, Inc.
11*4882a593Smuzhiyun * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Authors:
14*4882a593Smuzhiyun * Yaniv Kamay <yaniv@qumranet.com>
15*4882a593Smuzhiyun * Avi Kivity <avi@qumranet.com>
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * We need the mmu code to access both 32-bit and 64-bit guest ptes,
20*4882a593Smuzhiyun * so the code in this file is compiled twice, once per pte size.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #if PTTYPE == 64
24*4882a593Smuzhiyun #define pt_element_t u64
25*4882a593Smuzhiyun #define guest_walker guest_walker64
26*4882a593Smuzhiyun #define FNAME(name) paging##64_##name
27*4882a593Smuzhiyun #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
28*4882a593Smuzhiyun #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
29*4882a593Smuzhiyun #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
30*4882a593Smuzhiyun #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
31*4882a593Smuzhiyun #define PT_LEVEL_BITS PT64_LEVEL_BITS
32*4882a593Smuzhiyun #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
33*4882a593Smuzhiyun #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
34*4882a593Smuzhiyun #define PT_HAVE_ACCESSED_DIRTY(mmu) true
35*4882a593Smuzhiyun #ifdef CONFIG_X86_64
36*4882a593Smuzhiyun #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
37*4882a593Smuzhiyun #define CMPXCHG "cmpxchgq"
38*4882a593Smuzhiyun #else
39*4882a593Smuzhiyun #define PT_MAX_FULL_LEVELS 2
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun #elif PTTYPE == 32
42*4882a593Smuzhiyun #define pt_element_t u32
43*4882a593Smuzhiyun #define guest_walker guest_walker32
44*4882a593Smuzhiyun #define FNAME(name) paging##32_##name
45*4882a593Smuzhiyun #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
46*4882a593Smuzhiyun #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
47*4882a593Smuzhiyun #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
48*4882a593Smuzhiyun #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
49*4882a593Smuzhiyun #define PT_LEVEL_BITS PT32_LEVEL_BITS
50*4882a593Smuzhiyun #define PT_MAX_FULL_LEVELS 2
51*4882a593Smuzhiyun #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
52*4882a593Smuzhiyun #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
53*4882a593Smuzhiyun #define PT_HAVE_ACCESSED_DIRTY(mmu) true
54*4882a593Smuzhiyun #define CMPXCHG "cmpxchgl"
55*4882a593Smuzhiyun #elif PTTYPE == PTTYPE_EPT
56*4882a593Smuzhiyun #define pt_element_t u64
57*4882a593Smuzhiyun #define guest_walker guest_walkerEPT
58*4882a593Smuzhiyun #define FNAME(name) ept_##name
59*4882a593Smuzhiyun #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
60*4882a593Smuzhiyun #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
61*4882a593Smuzhiyun #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
62*4882a593Smuzhiyun #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
63*4882a593Smuzhiyun #define PT_LEVEL_BITS PT64_LEVEL_BITS
64*4882a593Smuzhiyun #define PT_GUEST_DIRTY_SHIFT 9
65*4882a593Smuzhiyun #define PT_GUEST_ACCESSED_SHIFT 8
66*4882a593Smuzhiyun #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
67*4882a593Smuzhiyun #ifdef CONFIG_X86_64
68*4882a593Smuzhiyun #define CMPXCHG "cmpxchgq"
69*4882a593Smuzhiyun #endif
70*4882a593Smuzhiyun #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
71*4882a593Smuzhiyun #else
72*4882a593Smuzhiyun #error Invalid PTTYPE value
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT)
76*4882a593Smuzhiyun #define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
79*4882a593Smuzhiyun #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * The guest_walker structure emulates the behavior of the hardware page
83*4882a593Smuzhiyun * table walker.
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun struct guest_walker {
86*4882a593Smuzhiyun int level;
87*4882a593Smuzhiyun unsigned max_level;
88*4882a593Smuzhiyun gfn_t table_gfn[PT_MAX_FULL_LEVELS];
89*4882a593Smuzhiyun pt_element_t ptes[PT_MAX_FULL_LEVELS];
90*4882a593Smuzhiyun pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
91*4882a593Smuzhiyun gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
92*4882a593Smuzhiyun pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
93*4882a593Smuzhiyun bool pte_writable[PT_MAX_FULL_LEVELS];
94*4882a593Smuzhiyun unsigned int pt_access[PT_MAX_FULL_LEVELS];
95*4882a593Smuzhiyun unsigned int pte_access;
96*4882a593Smuzhiyun gfn_t gfn;
97*4882a593Smuzhiyun struct x86_exception fault;
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
gpte_to_gfn_lvl(pt_element_t gpte,int lvl)100*4882a593Smuzhiyun static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
FNAME(protect_clean_gpte)105*4882a593Smuzhiyun static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
106*4882a593Smuzhiyun unsigned gpte)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun unsigned mask;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* dirty bit is not supported, so no need to track it */
111*4882a593Smuzhiyun if (!PT_HAVE_ACCESSED_DIRTY(mmu))
112*4882a593Smuzhiyun return;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun mask = (unsigned)~ACC_WRITE_MASK;
117*4882a593Smuzhiyun /* Allow write access to dirty gptes */
118*4882a593Smuzhiyun mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
119*4882a593Smuzhiyun PT_WRITABLE_MASK;
120*4882a593Smuzhiyun *access &= mask;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
FNAME(is_present_gpte)123*4882a593Smuzhiyun static inline int FNAME(is_present_gpte)(unsigned long pte)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun #if PTTYPE != PTTYPE_EPT
126*4882a593Smuzhiyun return pte & PT_PRESENT_MASK;
127*4882a593Smuzhiyun #else
128*4882a593Smuzhiyun return pte & 7;
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
FNAME(is_bad_mt_xwr)132*4882a593Smuzhiyun static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun #if PTTYPE != PTTYPE_EPT
135*4882a593Smuzhiyun return false;
136*4882a593Smuzhiyun #else
137*4882a593Smuzhiyun return __is_bad_mt_xwr(rsvd_check, gpte);
138*4882a593Smuzhiyun #endif
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
FNAME(is_rsvd_bits_set)141*4882a593Smuzhiyun static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) ||
144*4882a593Smuzhiyun FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
FNAME(cmpxchg_gpte)147*4882a593Smuzhiyun static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
148*4882a593Smuzhiyun pt_element_t __user *ptep_user, unsigned index,
149*4882a593Smuzhiyun pt_element_t orig_pte, pt_element_t new_pte)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun int r = -EFAULT;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
154*4882a593Smuzhiyun return -EFAULT;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun #ifdef CMPXCHG
157*4882a593Smuzhiyun asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
158*4882a593Smuzhiyun "mov $0, %[r]\n"
159*4882a593Smuzhiyun "setnz %b[r]\n"
160*4882a593Smuzhiyun "2:"
161*4882a593Smuzhiyun _ASM_EXTABLE_UA(1b, 2b)
162*4882a593Smuzhiyun : [ptr] "+m" (*ptep_user),
163*4882a593Smuzhiyun [old] "+a" (orig_pte),
164*4882a593Smuzhiyun [r] "+q" (r)
165*4882a593Smuzhiyun : [new] "r" (new_pte)
166*4882a593Smuzhiyun : "memory");
167*4882a593Smuzhiyun #else
168*4882a593Smuzhiyun asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
169*4882a593Smuzhiyun "movl $0, %[r]\n"
170*4882a593Smuzhiyun "jz 2f\n"
171*4882a593Smuzhiyun "incl %[r]\n"
172*4882a593Smuzhiyun "2:"
173*4882a593Smuzhiyun _ASM_EXTABLE_UA(1b, 2b)
174*4882a593Smuzhiyun : [ptr] "+m" (*ptep_user),
175*4882a593Smuzhiyun [old] "+A" (orig_pte),
176*4882a593Smuzhiyun [r] "+rm" (r)
177*4882a593Smuzhiyun : [new_lo] "b" ((u32)new_pte),
178*4882a593Smuzhiyun [new_hi] "c" ((u32)(new_pte >> 32))
179*4882a593Smuzhiyun : "memory");
180*4882a593Smuzhiyun #endif
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun user_access_end();
183*4882a593Smuzhiyun return r;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
FNAME(prefetch_invalid_gpte)186*4882a593Smuzhiyun static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
187*4882a593Smuzhiyun struct kvm_mmu_page *sp, u64 *spte,
188*4882a593Smuzhiyun u64 gpte)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun if (!FNAME(is_present_gpte)(gpte))
191*4882a593Smuzhiyun goto no_present;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* if accessed bit is not supported prefetch non accessed gpte */
194*4882a593Smuzhiyun if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
195*4882a593Smuzhiyun !(gpte & PT_GUEST_ACCESSED_MASK))
196*4882a593Smuzhiyun goto no_present;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K))
199*4882a593Smuzhiyun goto no_present;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return false;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun no_present:
204*4882a593Smuzhiyun drop_spte(vcpu->kvm, spte);
205*4882a593Smuzhiyun return true;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * For PTTYPE_EPT, a page table can be executable but not readable
210*4882a593Smuzhiyun * on supported processors. Therefore, set_spte does not automatically
211*4882a593Smuzhiyun * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
212*4882a593Smuzhiyun * to signify readability since it isn't used in the EPT case
213*4882a593Smuzhiyun */
FNAME(gpte_access)214*4882a593Smuzhiyun static inline unsigned FNAME(gpte_access)(u64 gpte)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun unsigned access;
217*4882a593Smuzhiyun #if PTTYPE == PTTYPE_EPT
218*4882a593Smuzhiyun access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
219*4882a593Smuzhiyun ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
220*4882a593Smuzhiyun ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0);
221*4882a593Smuzhiyun #else
222*4882a593Smuzhiyun BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
223*4882a593Smuzhiyun BUILD_BUG_ON(ACC_EXEC_MASK != 1);
224*4882a593Smuzhiyun access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
225*4882a593Smuzhiyun /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */
226*4882a593Smuzhiyun access ^= (gpte >> PT64_NX_SHIFT);
227*4882a593Smuzhiyun #endif
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun return access;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
FNAME(update_accessed_dirty_bits)232*4882a593Smuzhiyun static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
233*4882a593Smuzhiyun struct kvm_mmu *mmu,
234*4882a593Smuzhiyun struct guest_walker *walker,
235*4882a593Smuzhiyun gpa_t addr, int write_fault)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun unsigned level, index;
238*4882a593Smuzhiyun pt_element_t pte, orig_pte;
239*4882a593Smuzhiyun pt_element_t __user *ptep_user;
240*4882a593Smuzhiyun gfn_t table_gfn;
241*4882a593Smuzhiyun int ret;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* dirty/accessed bits are not supported, so no need to update them */
244*4882a593Smuzhiyun if (!PT_HAVE_ACCESSED_DIRTY(mmu))
245*4882a593Smuzhiyun return 0;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun for (level = walker->max_level; level >= walker->level; --level) {
248*4882a593Smuzhiyun pte = orig_pte = walker->ptes[level - 1];
249*4882a593Smuzhiyun table_gfn = walker->table_gfn[level - 1];
250*4882a593Smuzhiyun ptep_user = walker->ptep_user[level - 1];
251*4882a593Smuzhiyun index = offset_in_page(ptep_user) / sizeof(pt_element_t);
252*4882a593Smuzhiyun if (!(pte & PT_GUEST_ACCESSED_MASK)) {
253*4882a593Smuzhiyun trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
254*4882a593Smuzhiyun pte |= PT_GUEST_ACCESSED_MASK;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun if (level == walker->level && write_fault &&
257*4882a593Smuzhiyun !(pte & PT_GUEST_DIRTY_MASK)) {
258*4882a593Smuzhiyun trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
259*4882a593Smuzhiyun #if PTTYPE == PTTYPE_EPT
260*4882a593Smuzhiyun if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
261*4882a593Smuzhiyun return -EINVAL;
262*4882a593Smuzhiyun #endif
263*4882a593Smuzhiyun pte |= PT_GUEST_DIRTY_MASK;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun if (pte == orig_pte)
266*4882a593Smuzhiyun continue;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * If the slot is read-only, simply do not process the accessed
270*4882a593Smuzhiyun * and dirty bits. This is the correct thing to do if the slot
271*4882a593Smuzhiyun * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
272*4882a593Smuzhiyun * are only supported if the accessed and dirty bits are already
273*4882a593Smuzhiyun * set in the ROM (so that MMIO writes are never needed).
274*4882a593Smuzhiyun *
275*4882a593Smuzhiyun * Note that NPT does not allow this at all and faults, since
276*4882a593Smuzhiyun * it always wants nested page table entries for the guest
277*4882a593Smuzhiyun * page tables to be writable. And EPT works but will simply
278*4882a593Smuzhiyun * overwrite the read-only memory to set the accessed and dirty
279*4882a593Smuzhiyun * bits.
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun if (unlikely(!walker->pte_writable[level - 1]))
282*4882a593Smuzhiyun continue;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
285*4882a593Smuzhiyun if (ret)
286*4882a593Smuzhiyun return ret;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
289*4882a593Smuzhiyun walker->ptes[level - 1] = pte;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun return 0;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
FNAME(gpte_pkeys)294*4882a593Smuzhiyun static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun unsigned pkeys = 0;
297*4882a593Smuzhiyun #if PTTYPE == 64
298*4882a593Smuzhiyun pte_t pte = {.pte = gpte};
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun pkeys = pte_flags_pkey(pte_flags(pte));
301*4882a593Smuzhiyun #endif
302*4882a593Smuzhiyun return pkeys;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * Fetch a guest pte for a guest virtual address, or for an L2's GPA.
307*4882a593Smuzhiyun */
FNAME(walk_addr_generic)308*4882a593Smuzhiyun static int FNAME(walk_addr_generic)(struct guest_walker *walker,
309*4882a593Smuzhiyun struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
310*4882a593Smuzhiyun gpa_t addr, u32 access)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun int ret;
313*4882a593Smuzhiyun pt_element_t pte;
314*4882a593Smuzhiyun pt_element_t __user *ptep_user;
315*4882a593Smuzhiyun gfn_t table_gfn;
316*4882a593Smuzhiyun u64 pt_access, pte_access;
317*4882a593Smuzhiyun unsigned index, accessed_dirty, pte_pkey;
318*4882a593Smuzhiyun unsigned nested_access;
319*4882a593Smuzhiyun gpa_t pte_gpa;
320*4882a593Smuzhiyun bool have_ad;
321*4882a593Smuzhiyun int offset;
322*4882a593Smuzhiyun u64 walk_nx_mask = 0;
323*4882a593Smuzhiyun const int write_fault = access & PFERR_WRITE_MASK;
324*4882a593Smuzhiyun const int user_fault = access & PFERR_USER_MASK;
325*4882a593Smuzhiyun const int fetch_fault = access & PFERR_FETCH_MASK;
326*4882a593Smuzhiyun u16 errcode = 0;
327*4882a593Smuzhiyun gpa_t real_gpa;
328*4882a593Smuzhiyun gfn_t gfn;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun trace_kvm_mmu_pagetable_walk(addr, access);
331*4882a593Smuzhiyun retry_walk:
332*4882a593Smuzhiyun walker->level = mmu->root_level;
333*4882a593Smuzhiyun pte = mmu->get_guest_pgd(vcpu);
334*4882a593Smuzhiyun have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun #if PTTYPE == 64
337*4882a593Smuzhiyun walk_nx_mask = 1ULL << PT64_NX_SHIFT;
338*4882a593Smuzhiyun if (walker->level == PT32E_ROOT_LEVEL) {
339*4882a593Smuzhiyun pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
340*4882a593Smuzhiyun trace_kvm_mmu_paging_element(pte, walker->level);
341*4882a593Smuzhiyun if (!FNAME(is_present_gpte)(pte))
342*4882a593Smuzhiyun goto error;
343*4882a593Smuzhiyun --walker->level;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun #endif
346*4882a593Smuzhiyun walker->max_level = walker->level;
347*4882a593Smuzhiyun ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /*
350*4882a593Smuzhiyun * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
351*4882a593Smuzhiyun * by the MOV to CR instruction are treated as reads and do not cause the
352*4882a593Smuzhiyun * processor to set the dirty flag in any EPT paging-structure entry.
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun pte_access = ~0;
357*4882a593Smuzhiyun ++walker->level;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun do {
360*4882a593Smuzhiyun unsigned long host_addr;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun pt_access = pte_access;
363*4882a593Smuzhiyun --walker->level;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun index = PT_INDEX(addr, walker->level);
366*4882a593Smuzhiyun table_gfn = gpte_to_gfn(pte);
367*4882a593Smuzhiyun offset = index * sizeof(pt_element_t);
368*4882a593Smuzhiyun pte_gpa = gfn_to_gpa(table_gfn) + offset;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun BUG_ON(walker->level < 1);
371*4882a593Smuzhiyun walker->table_gfn[walker->level - 1] = table_gfn;
372*4882a593Smuzhiyun walker->pte_gpa[walker->level - 1] = pte_gpa;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
375*4882a593Smuzhiyun nested_access,
376*4882a593Smuzhiyun &walker->fault);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /*
379*4882a593Smuzhiyun * FIXME: This can happen if emulation (for of an INS/OUTS
380*4882a593Smuzhiyun * instruction) triggers a nested page fault. The exit
381*4882a593Smuzhiyun * qualification / exit info field will incorrectly have
382*4882a593Smuzhiyun * "guest page access" as the nested page fault's cause,
383*4882a593Smuzhiyun * instead of "guest page structure access". To fix this,
384*4882a593Smuzhiyun * the x86_exception struct should be augmented with enough
385*4882a593Smuzhiyun * information to fix the exit_qualification or exit_info_1
386*4882a593Smuzhiyun * fields.
387*4882a593Smuzhiyun */
388*4882a593Smuzhiyun if (unlikely(real_gpa == UNMAPPED_GVA))
389*4882a593Smuzhiyun return 0;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
392*4882a593Smuzhiyun &walker->pte_writable[walker->level - 1]);
393*4882a593Smuzhiyun if (unlikely(kvm_is_error_hva(host_addr)))
394*4882a593Smuzhiyun goto error;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
397*4882a593Smuzhiyun if (unlikely(__get_user(pte, ptep_user)))
398*4882a593Smuzhiyun goto error;
399*4882a593Smuzhiyun walker->ptep_user[walker->level - 1] = ptep_user;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun trace_kvm_mmu_paging_element(pte, walker->level);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun * Inverting the NX it lets us AND it like other
405*4882a593Smuzhiyun * permission bits.
406*4882a593Smuzhiyun */
407*4882a593Smuzhiyun pte_access = pt_access & (pte ^ walk_nx_mask);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun if (unlikely(!FNAME(is_present_gpte)(pte)))
410*4882a593Smuzhiyun goto error;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
413*4882a593Smuzhiyun errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
414*4882a593Smuzhiyun goto error;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun walker->ptes[walker->level - 1] = pte;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* Convert to ACC_*_MASK flags for struct guest_walker. */
420*4882a593Smuzhiyun walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
421*4882a593Smuzhiyun } while (!is_last_gpte(mmu, walker->level, pte));
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
424*4882a593Smuzhiyun accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* Convert to ACC_*_MASK flags for struct guest_walker. */
427*4882a593Smuzhiyun walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
428*4882a593Smuzhiyun errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
429*4882a593Smuzhiyun if (unlikely(errcode))
430*4882a593Smuzhiyun goto error;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun gfn = gpte_to_gfn_lvl(pte, walker->level);
433*4882a593Smuzhiyun gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (PTTYPE == 32 && walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
436*4882a593Smuzhiyun gfn += pse36_gfn_delta(pte);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
439*4882a593Smuzhiyun if (real_gpa == UNMAPPED_GVA)
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun walker->gfn = real_gpa >> PAGE_SHIFT;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun if (!write_fault)
445*4882a593Smuzhiyun FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
446*4882a593Smuzhiyun else
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun * On a write fault, fold the dirty bit into accessed_dirty.
449*4882a593Smuzhiyun * For modes without A/D bits support accessed_dirty will be
450*4882a593Smuzhiyun * always clear.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun accessed_dirty &= pte >>
453*4882a593Smuzhiyun (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (unlikely(!accessed_dirty)) {
456*4882a593Smuzhiyun ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
457*4882a593Smuzhiyun addr, write_fault);
458*4882a593Smuzhiyun if (unlikely(ret < 0))
459*4882a593Smuzhiyun goto error;
460*4882a593Smuzhiyun else if (ret)
461*4882a593Smuzhiyun goto retry_walk;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
465*4882a593Smuzhiyun __func__, (u64)pte, walker->pte_access,
466*4882a593Smuzhiyun walker->pt_access[walker->level - 1]);
467*4882a593Smuzhiyun return 1;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun error:
470*4882a593Smuzhiyun errcode |= write_fault | user_fault;
471*4882a593Smuzhiyun if (fetch_fault && (mmu->nx || mmu->mmu_role.ext.cr4_smep))
472*4882a593Smuzhiyun errcode |= PFERR_FETCH_MASK;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun walker->fault.vector = PF_VECTOR;
475*4882a593Smuzhiyun walker->fault.error_code_valid = true;
476*4882a593Smuzhiyun walker->fault.error_code = errcode;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun #if PTTYPE == PTTYPE_EPT
479*4882a593Smuzhiyun /*
480*4882a593Smuzhiyun * Use PFERR_RSVD_MASK in error_code to to tell if EPT
481*4882a593Smuzhiyun * misconfiguration requires to be injected. The detection is
482*4882a593Smuzhiyun * done by is_rsvd_bits_set() above.
483*4882a593Smuzhiyun *
484*4882a593Smuzhiyun * We set up the value of exit_qualification to inject:
485*4882a593Smuzhiyun * [2:0] - Derive from the access bits. The exit_qualification might be
486*4882a593Smuzhiyun * out of date if it is serving an EPT misconfiguration.
487*4882a593Smuzhiyun * [5:3] - Calculated by the page walk of the guest EPT page tables
488*4882a593Smuzhiyun * [7:8] - Derived from [7:8] of real exit_qualification
489*4882a593Smuzhiyun *
490*4882a593Smuzhiyun * The other bits are set to 0.
491*4882a593Smuzhiyun */
492*4882a593Smuzhiyun if (!(errcode & PFERR_RSVD_MASK)) {
493*4882a593Smuzhiyun vcpu->arch.exit_qualification &= 0x180;
494*4882a593Smuzhiyun if (write_fault)
495*4882a593Smuzhiyun vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
496*4882a593Smuzhiyun if (user_fault)
497*4882a593Smuzhiyun vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ;
498*4882a593Smuzhiyun if (fetch_fault)
499*4882a593Smuzhiyun vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
500*4882a593Smuzhiyun vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun #endif
503*4882a593Smuzhiyun walker->fault.address = addr;
504*4882a593Smuzhiyun walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun trace_kvm_mmu_walker_error(walker->fault.error_code);
507*4882a593Smuzhiyun return 0;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
FNAME(walk_addr)510*4882a593Smuzhiyun static int FNAME(walk_addr)(struct guest_walker *walker,
511*4882a593Smuzhiyun struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
514*4882a593Smuzhiyun access);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun #if PTTYPE != PTTYPE_EPT
FNAME(walk_addr_nested)518*4882a593Smuzhiyun static int FNAME(walk_addr_nested)(struct guest_walker *walker,
519*4882a593Smuzhiyun struct kvm_vcpu *vcpu, gva_t addr,
520*4882a593Smuzhiyun u32 access)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
523*4882a593Smuzhiyun addr, access);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun #endif
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun static bool
FNAME(prefetch_gpte)528*4882a593Smuzhiyun FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
529*4882a593Smuzhiyun u64 *spte, pt_element_t gpte, bool no_dirty_log)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun unsigned pte_access;
532*4882a593Smuzhiyun gfn_t gfn;
533*4882a593Smuzhiyun kvm_pfn_t pfn;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
536*4882a593Smuzhiyun return false;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun gfn = gpte_to_gfn(gpte);
541*4882a593Smuzhiyun pte_access = sp->role.access & FNAME(gpte_access)(gpte);
542*4882a593Smuzhiyun FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
543*4882a593Smuzhiyun pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
544*4882a593Smuzhiyun no_dirty_log && (pte_access & ACC_WRITE_MASK));
545*4882a593Smuzhiyun if (is_error_pfn(pfn))
546*4882a593Smuzhiyun return false;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /*
549*4882a593Smuzhiyun * we call mmu_set_spte() with host_writable = true because
550*4882a593Smuzhiyun * pte_prefetch_gfn_to_pfn always gets a writable pfn.
551*4882a593Smuzhiyun */
552*4882a593Smuzhiyun mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn,
553*4882a593Smuzhiyun true, true);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun kvm_release_pfn_clean(pfn);
556*4882a593Smuzhiyun return true;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
FNAME(update_pte)559*4882a593Smuzhiyun static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
560*4882a593Smuzhiyun u64 *spte, const void *pte)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun pt_element_t gpte = *(const pt_element_t *)pte;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
FNAME(gpte_changed)567*4882a593Smuzhiyun static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
568*4882a593Smuzhiyun struct guest_walker *gw, int level)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun pt_element_t curr_pte;
571*4882a593Smuzhiyun gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
572*4882a593Smuzhiyun u64 mask;
573*4882a593Smuzhiyun int r, index;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (level == PG_LEVEL_4K) {
576*4882a593Smuzhiyun mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
577*4882a593Smuzhiyun base_gpa = pte_gpa & ~mask;
578*4882a593Smuzhiyun index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
581*4882a593Smuzhiyun gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
582*4882a593Smuzhiyun curr_pte = gw->prefetch_ptes[index];
583*4882a593Smuzhiyun } else
584*4882a593Smuzhiyun r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
585*4882a593Smuzhiyun &curr_pte, sizeof(curr_pte));
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun return r || curr_pte != gw->ptes[level - 1];
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
FNAME(pte_prefetch)590*4882a593Smuzhiyun static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
591*4882a593Smuzhiyun u64 *sptep)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun struct kvm_mmu_page *sp;
594*4882a593Smuzhiyun pt_element_t *gptep = gw->prefetch_ptes;
595*4882a593Smuzhiyun u64 *spte;
596*4882a593Smuzhiyun int i;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun sp = sptep_to_sp(sptep);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun if (sp->role.level > PG_LEVEL_4K)
601*4882a593Smuzhiyun return;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (sp->role.direct)
604*4882a593Smuzhiyun return __direct_pte_prefetch(vcpu, sp, sptep);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
607*4882a593Smuzhiyun spte = sp->spt + i;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
610*4882a593Smuzhiyun if (spte == sptep)
611*4882a593Smuzhiyun continue;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (is_shadow_present_pte(*spte))
614*4882a593Smuzhiyun continue;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
617*4882a593Smuzhiyun break;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /*
622*4882a593Smuzhiyun * Fetch a shadow pte for a specific level in the paging hierarchy.
623*4882a593Smuzhiyun * If the guest tries to write a write-protected page, we need to
624*4882a593Smuzhiyun * emulate this operation, return 1 to indicate this case.
625*4882a593Smuzhiyun */
FNAME(fetch)626*4882a593Smuzhiyun static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
627*4882a593Smuzhiyun struct guest_walker *gw, u32 error_code,
628*4882a593Smuzhiyun int max_level, kvm_pfn_t pfn, bool map_writable,
629*4882a593Smuzhiyun bool prefault)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
632*4882a593Smuzhiyun bool write_fault = error_code & PFERR_WRITE_MASK;
633*4882a593Smuzhiyun bool exec = error_code & PFERR_FETCH_MASK;
634*4882a593Smuzhiyun bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
635*4882a593Smuzhiyun struct kvm_mmu_page *sp = NULL;
636*4882a593Smuzhiyun struct kvm_shadow_walk_iterator it;
637*4882a593Smuzhiyun unsigned int direct_access, access;
638*4882a593Smuzhiyun int top_level, level, req_level, ret;
639*4882a593Smuzhiyun gfn_t base_gfn = gw->gfn;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun direct_access = gw->pte_access;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun top_level = vcpu->arch.mmu->root_level;
644*4882a593Smuzhiyun if (top_level == PT32E_ROOT_LEVEL)
645*4882a593Smuzhiyun top_level = PT32_ROOT_LEVEL;
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * Verify that the top-level gpte is still there. Since the page
648*4882a593Smuzhiyun * is a root page, it is either write protected (and cannot be
649*4882a593Smuzhiyun * changed from now on) or it is invalid (in which case, we don't
650*4882a593Smuzhiyun * really care if it changes underneath us after this point).
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun if (FNAME(gpte_changed)(vcpu, gw, top_level))
653*4882a593Smuzhiyun goto out_gpte_changed;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
656*4882a593Smuzhiyun goto out_gpte_changed;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun for (shadow_walk_init(&it, vcpu, addr);
659*4882a593Smuzhiyun shadow_walk_okay(&it) && it.level > gw->level;
660*4882a593Smuzhiyun shadow_walk_next(&it)) {
661*4882a593Smuzhiyun gfn_t table_gfn;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun clear_sp_write_flooding_count(it.sptep);
664*4882a593Smuzhiyun drop_large_spte(vcpu, it.sptep);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun sp = NULL;
667*4882a593Smuzhiyun if (!is_shadow_present_pte(*it.sptep)) {
668*4882a593Smuzhiyun table_gfn = gw->table_gfn[it.level - 2];
669*4882a593Smuzhiyun access = gw->pt_access[it.level - 2];
670*4882a593Smuzhiyun sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
671*4882a593Smuzhiyun false, access);
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /*
675*4882a593Smuzhiyun * Verify that the gpte in the page we've just write
676*4882a593Smuzhiyun * protected is still there.
677*4882a593Smuzhiyun */
678*4882a593Smuzhiyun if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
679*4882a593Smuzhiyun goto out_gpte_changed;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (sp)
682*4882a593Smuzhiyun link_shadow_page(vcpu, it.sptep, sp);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun level = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn,
686*4882a593Smuzhiyun huge_page_disallowed, &req_level);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
691*4882a593Smuzhiyun clear_sp_write_flooding_count(it.sptep);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /*
694*4882a593Smuzhiyun * We cannot overwrite existing page tables with an NX
695*4882a593Smuzhiyun * large page, as the leaf could be executable.
696*4882a593Smuzhiyun */
697*4882a593Smuzhiyun if (nx_huge_page_workaround_enabled)
698*4882a593Smuzhiyun disallowed_hugepage_adjust(*it.sptep, gw->gfn, it.level,
699*4882a593Smuzhiyun &pfn, &level);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
702*4882a593Smuzhiyun if (it.level == level)
703*4882a593Smuzhiyun break;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun validate_direct_spte(vcpu, it.sptep, direct_access);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun drop_large_spte(vcpu, it.sptep);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (!is_shadow_present_pte(*it.sptep)) {
710*4882a593Smuzhiyun sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
711*4882a593Smuzhiyun it.level - 1, true, direct_access);
712*4882a593Smuzhiyun link_shadow_page(vcpu, it.sptep, sp);
713*4882a593Smuzhiyun if (huge_page_disallowed && req_level >= it.level)
714*4882a593Smuzhiyun account_huge_nx_page(vcpu->kvm, sp);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
719*4882a593Smuzhiyun it.level, base_gfn, pfn, prefault, map_writable);
720*4882a593Smuzhiyun if (ret == RET_PF_SPURIOUS)
721*4882a593Smuzhiyun return ret;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun FNAME(pte_prefetch)(vcpu, gw, it.sptep);
724*4882a593Smuzhiyun ++vcpu->stat.pf_fixed;
725*4882a593Smuzhiyun return ret;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun out_gpte_changed:
728*4882a593Smuzhiyun return RET_PF_RETRY;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /*
732*4882a593Smuzhiyun * To see whether the mapped gfn can write its page table in the current
733*4882a593Smuzhiyun * mapping.
734*4882a593Smuzhiyun *
735*4882a593Smuzhiyun * It is the helper function of FNAME(page_fault). When guest uses large page
736*4882a593Smuzhiyun * size to map the writable gfn which is used as current page table, we should
737*4882a593Smuzhiyun * force kvm to use small page size to map it because new shadow page will be
738*4882a593Smuzhiyun * created when kvm establishes shadow page table that stop kvm using large
739*4882a593Smuzhiyun * page size. Do it early can avoid unnecessary #PF and emulation.
740*4882a593Smuzhiyun *
741*4882a593Smuzhiyun * @write_fault_to_shadow_pgtable will return true if the fault gfn is
742*4882a593Smuzhiyun * currently used as its page table.
743*4882a593Smuzhiyun *
744*4882a593Smuzhiyun * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
745*4882a593Smuzhiyun * since the PDPT is always shadowed, that means, we can not use large page
746*4882a593Smuzhiyun * size to map the gfn which is used as PDPT.
747*4882a593Smuzhiyun */
748*4882a593Smuzhiyun static bool
FNAME(is_self_change_mapping)749*4882a593Smuzhiyun FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
750*4882a593Smuzhiyun struct guest_walker *walker, bool user_fault,
751*4882a593Smuzhiyun bool *write_fault_to_shadow_pgtable)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun int level;
754*4882a593Smuzhiyun gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
755*4882a593Smuzhiyun bool self_changed = false;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (!(walker->pte_access & ACC_WRITE_MASK ||
758*4882a593Smuzhiyun (!is_write_protection(vcpu) && !user_fault)))
759*4882a593Smuzhiyun return false;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun for (level = walker->level; level <= walker->max_level; level++) {
762*4882a593Smuzhiyun gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun self_changed |= !(gfn & mask);
765*4882a593Smuzhiyun *write_fault_to_shadow_pgtable |= !gfn;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun return self_changed;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /*
772*4882a593Smuzhiyun * Page fault handler. There are several causes for a page fault:
773*4882a593Smuzhiyun * - there is no shadow pte for the guest pte
774*4882a593Smuzhiyun * - write access through a shadow pte marked read only so that we can set
775*4882a593Smuzhiyun * the dirty bit
776*4882a593Smuzhiyun * - write access to a shadow pte marked read only so we can update the page
777*4882a593Smuzhiyun * dirty bitmap, when userspace requests it
778*4882a593Smuzhiyun * - mmio access; in this case we will never install a present shadow pte
779*4882a593Smuzhiyun * - normal guest page fault due to the guest pte marked not present, not
780*4882a593Smuzhiyun * writable, or not executable
781*4882a593Smuzhiyun *
782*4882a593Smuzhiyun * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
783*4882a593Smuzhiyun * a negative value on error.
784*4882a593Smuzhiyun */
FNAME(page_fault)785*4882a593Smuzhiyun static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
786*4882a593Smuzhiyun bool prefault)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun bool write_fault = error_code & PFERR_WRITE_MASK;
789*4882a593Smuzhiyun bool user_fault = error_code & PFERR_USER_MASK;
790*4882a593Smuzhiyun struct guest_walker walker;
791*4882a593Smuzhiyun int r;
792*4882a593Smuzhiyun kvm_pfn_t pfn;
793*4882a593Smuzhiyun unsigned long mmu_seq;
794*4882a593Smuzhiyun bool map_writable, is_self_change_mapping;
795*4882a593Smuzhiyun int max_level;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /*
800*4882a593Smuzhiyun * If PFEC.RSVD is set, this is a shadow page fault.
801*4882a593Smuzhiyun * The bit needs to be cleared before walking guest page tables.
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun error_code &= ~PFERR_RSVD_MASK;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /*
806*4882a593Smuzhiyun * Look up the guest pte for the faulting address.
807*4882a593Smuzhiyun */
808*4882a593Smuzhiyun r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /*
811*4882a593Smuzhiyun * The page is not mapped by the guest. Let the guest handle it.
812*4882a593Smuzhiyun */
813*4882a593Smuzhiyun if (!r) {
814*4882a593Smuzhiyun pgprintk("%s: guest page fault\n", __func__);
815*4882a593Smuzhiyun if (!prefault)
816*4882a593Smuzhiyun kvm_inject_emulated_page_fault(vcpu, &walker.fault);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun return RET_PF_RETRY;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
822*4882a593Smuzhiyun shadow_page_table_clear_flood(vcpu, addr);
823*4882a593Smuzhiyun return RET_PF_EMULATE;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun r = mmu_topup_memory_caches(vcpu, true);
827*4882a593Smuzhiyun if (r)
828*4882a593Smuzhiyun return r;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun vcpu->arch.write_fault_to_shadow_pgtable = false;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
833*4882a593Smuzhiyun &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun if (is_self_change_mapping)
836*4882a593Smuzhiyun max_level = PG_LEVEL_4K;
837*4882a593Smuzhiyun else
838*4882a593Smuzhiyun max_level = walker.level;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun mmu_seq = vcpu->kvm->mmu_notifier_seq;
841*4882a593Smuzhiyun smp_rmb();
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
844*4882a593Smuzhiyun &map_writable))
845*4882a593Smuzhiyun return RET_PF_RETRY;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
848*4882a593Smuzhiyun return r;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /*
851*4882a593Smuzhiyun * Do not change pte_access if the pfn is a mmio page, otherwise
852*4882a593Smuzhiyun * we will cache the incorrect access into mmio spte.
853*4882a593Smuzhiyun */
854*4882a593Smuzhiyun if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
855*4882a593Smuzhiyun !is_write_protection(vcpu) && !user_fault &&
856*4882a593Smuzhiyun !is_noslot_pfn(pfn)) {
857*4882a593Smuzhiyun walker.pte_access |= ACC_WRITE_MASK;
858*4882a593Smuzhiyun walker.pte_access &= ~ACC_USER_MASK;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /*
861*4882a593Smuzhiyun * If we converted a user page to a kernel page,
862*4882a593Smuzhiyun * so that the kernel can write to it when cr0.wp=0,
863*4882a593Smuzhiyun * then we should prevent the kernel from executing it
864*4882a593Smuzhiyun * if SMEP is enabled.
865*4882a593Smuzhiyun */
866*4882a593Smuzhiyun if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
867*4882a593Smuzhiyun walker.pte_access &= ~ACC_EXEC_MASK;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun r = RET_PF_RETRY;
871*4882a593Smuzhiyun spin_lock(&vcpu->kvm->mmu_lock);
872*4882a593Smuzhiyun if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
873*4882a593Smuzhiyun goto out_unlock;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
876*4882a593Smuzhiyun r = make_mmu_pages_available(vcpu);
877*4882a593Smuzhiyun if (r)
878*4882a593Smuzhiyun goto out_unlock;
879*4882a593Smuzhiyun r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn,
880*4882a593Smuzhiyun map_writable, prefault);
881*4882a593Smuzhiyun kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun out_unlock:
884*4882a593Smuzhiyun spin_unlock(&vcpu->kvm->mmu_lock);
885*4882a593Smuzhiyun kvm_release_pfn_clean(pfn);
886*4882a593Smuzhiyun return r;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
FNAME(get_level1_sp_gpa)889*4882a593Smuzhiyun static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun int offset = 0;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun WARN_ON(sp->role.level != PG_LEVEL_4K);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun if (PTTYPE == 32)
896*4882a593Smuzhiyun offset = sp->role.quadrant << PT64_LEVEL_BITS;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
FNAME(invlpg)901*4882a593Smuzhiyun static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun struct kvm_shadow_walk_iterator iterator;
904*4882a593Smuzhiyun struct kvm_mmu_page *sp;
905*4882a593Smuzhiyun u64 old_spte;
906*4882a593Smuzhiyun int level;
907*4882a593Smuzhiyun u64 *sptep;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun vcpu_clear_mmio_info(vcpu, gva);
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /*
912*4882a593Smuzhiyun * No need to check return value here, rmap_can_add() can
913*4882a593Smuzhiyun * help us to skip pte prefetch later.
914*4882a593Smuzhiyun */
915*4882a593Smuzhiyun mmu_topup_memory_caches(vcpu, true);
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (!VALID_PAGE(root_hpa)) {
918*4882a593Smuzhiyun WARN_ON(1);
919*4882a593Smuzhiyun return;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun spin_lock(&vcpu->kvm->mmu_lock);
923*4882a593Smuzhiyun for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
924*4882a593Smuzhiyun level = iterator.level;
925*4882a593Smuzhiyun sptep = iterator.sptep;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun sp = sptep_to_sp(sptep);
928*4882a593Smuzhiyun old_spte = *sptep;
929*4882a593Smuzhiyun if (is_last_spte(old_spte, level)) {
930*4882a593Smuzhiyun pt_element_t gpte;
931*4882a593Smuzhiyun gpa_t pte_gpa;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (!sp->unsync)
934*4882a593Smuzhiyun break;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun pte_gpa = FNAME(get_level1_sp_gpa)(sp);
937*4882a593Smuzhiyun pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL);
940*4882a593Smuzhiyun if (is_shadow_present_pte(old_spte))
941*4882a593Smuzhiyun kvm_flush_remote_tlbs_with_address(vcpu->kvm,
942*4882a593Smuzhiyun sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (!rmap_can_add(vcpu))
945*4882a593Smuzhiyun break;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
948*4882a593Smuzhiyun sizeof(pt_element_t)))
949*4882a593Smuzhiyun break;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun FNAME(update_pte)(vcpu, sp, sptep, &gpte);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
955*4882a593Smuzhiyun break;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun spin_unlock(&vcpu->kvm->mmu_lock);
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
FNAME(gva_to_gpa)961*4882a593Smuzhiyun static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
962*4882a593Smuzhiyun struct x86_exception *exception)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun struct guest_walker walker;
965*4882a593Smuzhiyun gpa_t gpa = UNMAPPED_GVA;
966*4882a593Smuzhiyun int r;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun r = FNAME(walk_addr)(&walker, vcpu, addr, access);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun if (r) {
971*4882a593Smuzhiyun gpa = gfn_to_gpa(walker.gfn);
972*4882a593Smuzhiyun gpa |= addr & ~PAGE_MASK;
973*4882a593Smuzhiyun } else if (exception)
974*4882a593Smuzhiyun *exception = walker.fault;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun return gpa;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun #if PTTYPE != PTTYPE_EPT
980*4882a593Smuzhiyun /* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
FNAME(gva_to_gpa_nested)981*4882a593Smuzhiyun static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
982*4882a593Smuzhiyun u32 access,
983*4882a593Smuzhiyun struct x86_exception *exception)
984*4882a593Smuzhiyun {
985*4882a593Smuzhiyun struct guest_walker walker;
986*4882a593Smuzhiyun gpa_t gpa = UNMAPPED_GVA;
987*4882a593Smuzhiyun int r;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun #ifndef CONFIG_X86_64
990*4882a593Smuzhiyun /* A 64-bit GVA should be impossible on 32-bit KVM. */
991*4882a593Smuzhiyun WARN_ON_ONCE(vaddr >> 32);
992*4882a593Smuzhiyun #endif
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun if (r) {
997*4882a593Smuzhiyun gpa = gfn_to_gpa(walker.gfn);
998*4882a593Smuzhiyun gpa |= vaddr & ~PAGE_MASK;
999*4882a593Smuzhiyun } else if (exception)
1000*4882a593Smuzhiyun *exception = walker.fault;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun return gpa;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun #endif
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun /*
1007*4882a593Smuzhiyun * Using the cached information from sp->gfns is safe because:
1008*4882a593Smuzhiyun * - The spte has a reference to the struct page, so the pfn for a given gfn
1009*4882a593Smuzhiyun * can't change unless all sptes pointing to it are nuked first.
1010*4882a593Smuzhiyun *
1011*4882a593Smuzhiyun * Note:
1012*4882a593Smuzhiyun * We should flush all tlbs if spte is dropped even though guest is
1013*4882a593Smuzhiyun * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
1014*4882a593Smuzhiyun * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
1015*4882a593Smuzhiyun * used by guest then tlbs are not flushed, so guest is allowed to access the
1016*4882a593Smuzhiyun * freed pages.
1017*4882a593Smuzhiyun * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
1018*4882a593Smuzhiyun */
FNAME(sync_page)1019*4882a593Smuzhiyun static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun int i, nr_present = 0;
1022*4882a593Smuzhiyun bool host_writable;
1023*4882a593Smuzhiyun gpa_t first_pte_gpa;
1024*4882a593Smuzhiyun int set_spte_ret = 0;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun /* direct kvm_mmu_page can not be unsync. */
1027*4882a593Smuzhiyun BUG_ON(sp->role.direct);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
1032*4882a593Smuzhiyun unsigned pte_access;
1033*4882a593Smuzhiyun pt_element_t gpte;
1034*4882a593Smuzhiyun gpa_t pte_gpa;
1035*4882a593Smuzhiyun gfn_t gfn;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun if (!sp->spt[i])
1038*4882a593Smuzhiyun continue;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
1043*4882a593Smuzhiyun sizeof(pt_element_t)))
1044*4882a593Smuzhiyun return 0;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
1047*4882a593Smuzhiyun /*
1048*4882a593Smuzhiyun * Update spte before increasing tlbs_dirty to make
1049*4882a593Smuzhiyun * sure no tlb flush is lost after spte is zapped; see
1050*4882a593Smuzhiyun * the comments in kvm_flush_remote_tlbs().
1051*4882a593Smuzhiyun */
1052*4882a593Smuzhiyun smp_wmb();
1053*4882a593Smuzhiyun vcpu->kvm->tlbs_dirty++;
1054*4882a593Smuzhiyun continue;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun gfn = gpte_to_gfn(gpte);
1058*4882a593Smuzhiyun pte_access = sp->role.access;
1059*4882a593Smuzhiyun pte_access &= FNAME(gpte_access)(gpte);
1060*4882a593Smuzhiyun FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
1063*4882a593Smuzhiyun &nr_present))
1064*4882a593Smuzhiyun continue;
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun if (gfn != sp->gfns[i]) {
1067*4882a593Smuzhiyun drop_spte(vcpu->kvm, &sp->spt[i]);
1068*4882a593Smuzhiyun /*
1069*4882a593Smuzhiyun * The same as above where we are doing
1070*4882a593Smuzhiyun * prefetch_invalid_gpte().
1071*4882a593Smuzhiyun */
1072*4882a593Smuzhiyun smp_wmb();
1073*4882a593Smuzhiyun vcpu->kvm->tlbs_dirty++;
1074*4882a593Smuzhiyun continue;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun nr_present++;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun set_spte_ret |= set_spte(vcpu, &sp->spt[i],
1082*4882a593Smuzhiyun pte_access, PG_LEVEL_4K,
1083*4882a593Smuzhiyun gfn, spte_to_pfn(sp->spt[i]),
1084*4882a593Smuzhiyun true, false, host_writable);
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
1088*4882a593Smuzhiyun kvm_flush_remote_tlbs(vcpu->kvm);
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun return nr_present;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun #undef pt_element_t
1094*4882a593Smuzhiyun #undef guest_walker
1095*4882a593Smuzhiyun #undef FNAME
1096*4882a593Smuzhiyun #undef PT_BASE_ADDR_MASK
1097*4882a593Smuzhiyun #undef PT_INDEX
1098*4882a593Smuzhiyun #undef PT_LVL_ADDR_MASK
1099*4882a593Smuzhiyun #undef PT_LVL_OFFSET_MASK
1100*4882a593Smuzhiyun #undef PT_LEVEL_BITS
1101*4882a593Smuzhiyun #undef PT_MAX_FULL_LEVELS
1102*4882a593Smuzhiyun #undef gpte_to_gfn
1103*4882a593Smuzhiyun #undef gpte_to_gfn_lvl
1104*4882a593Smuzhiyun #undef CMPXCHG
1105*4882a593Smuzhiyun #undef PT_GUEST_ACCESSED_MASK
1106*4882a593Smuzhiyun #undef PT_GUEST_DIRTY_MASK
1107*4882a593Smuzhiyun #undef PT_GUEST_DIRTY_SHIFT
1108*4882a593Smuzhiyun #undef PT_GUEST_ACCESSED_SHIFT
1109*4882a593Smuzhiyun #undef PT_HAVE_ACCESSED_DIRTY
1110