1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Kernel-based Virtual Machine driver for Linux
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * AMD SVM support
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2006 Qumranet, Inc.
8*4882a593Smuzhiyun * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Authors:
11*4882a593Smuzhiyun * Yaniv Kamay <yaniv@qumranet.com>
12*4882a593Smuzhiyun * Avi Kivity <avi@qumranet.com>
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define pr_fmt(fmt) "SVM: " fmt
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/kvm_types.h>
18*4882a593Smuzhiyun #include <linux/kvm_host.h>
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <asm/msr-index.h>
22*4882a593Smuzhiyun #include <asm/debugreg.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include "kvm_emulate.h"
25*4882a593Smuzhiyun #include "trace.h"
26*4882a593Smuzhiyun #include "mmu.h"
27*4882a593Smuzhiyun #include "x86.h"
28*4882a593Smuzhiyun #include "cpuid.h"
29*4882a593Smuzhiyun #include "lapic.h"
30*4882a593Smuzhiyun #include "svm.h"
31*4882a593Smuzhiyun
nested_svm_inject_npf_exit(struct kvm_vcpu * vcpu,struct x86_exception * fault)32*4882a593Smuzhiyun static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
33*4882a593Smuzhiyun struct x86_exception *fault)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct vcpu_svm *svm = to_svm(vcpu);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * TODO: track the cause of the nested page fault, and
40*4882a593Smuzhiyun * correctly fill in the high bits of exit_info_1.
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43*4882a593Smuzhiyun svm->vmcb->control.exit_code_hi = 0;
44*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 = (1ULL << 32);
45*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = fault->address;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 |= fault->error_code;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun nested_svm_vmexit(svm);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
svm_inject_page_fault_nested(struct kvm_vcpu * vcpu,struct x86_exception * fault)54*4882a593Smuzhiyun static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct vcpu_svm *svm = to_svm(vcpu);
57*4882a593Smuzhiyun WARN_ON(!is_guest_mode(vcpu));
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
60*4882a593Smuzhiyun !svm->nested.nested_run_pending) {
61*4882a593Smuzhiyun svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
62*4882a593Smuzhiyun svm->vmcb->control.exit_code_hi = 0;
63*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 = fault->error_code;
64*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = fault->address;
65*4882a593Smuzhiyun nested_svm_vmexit(svm);
66*4882a593Smuzhiyun } else {
67*4882a593Smuzhiyun kvm_inject_page_fault(vcpu, fault);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
nested_svm_get_tdp_pdptr(struct kvm_vcpu * vcpu,int index)71*4882a593Smuzhiyun static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct vcpu_svm *svm = to_svm(vcpu);
74*4882a593Smuzhiyun u64 cr3 = svm->nested.ctl.nested_cr3;
75*4882a593Smuzhiyun u64 pdpte;
76*4882a593Smuzhiyun int ret;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
79*4882a593Smuzhiyun offset_in_page(cr3) + index * 8, 8);
80*4882a593Smuzhiyun if (ret)
81*4882a593Smuzhiyun return 0;
82*4882a593Smuzhiyun return pdpte;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
nested_svm_get_tdp_cr3(struct kvm_vcpu * vcpu)85*4882a593Smuzhiyun static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct vcpu_svm *svm = to_svm(vcpu);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return svm->nested.ctl.nested_cr3;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
nested_svm_init_mmu_context(struct kvm_vcpu * vcpu)92*4882a593Smuzhiyun static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct vcpu_svm *svm = to_svm(vcpu);
95*4882a593Smuzhiyun struct vmcb *hsave = svm->nested.hsave;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun WARN_ON(mmu_is_nested(vcpu));
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun vcpu->arch.mmu = &vcpu->arch.guest_mmu;
100*4882a593Smuzhiyun kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
101*4882a593Smuzhiyun svm->nested.ctl.nested_cr3);
102*4882a593Smuzhiyun vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
103*4882a593Smuzhiyun vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
104*4882a593Smuzhiyun vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
105*4882a593Smuzhiyun reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
106*4882a593Smuzhiyun vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
nested_svm_uninit_mmu_context(struct kvm_vcpu * vcpu)109*4882a593Smuzhiyun static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun vcpu->arch.mmu = &vcpu->arch.root_mmu;
112*4882a593Smuzhiyun vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
recalc_intercepts(struct vcpu_svm * svm)115*4882a593Smuzhiyun void recalc_intercepts(struct vcpu_svm *svm)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct vmcb_control_area *c, *h, *g;
118*4882a593Smuzhiyun unsigned int i;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (!is_guest_mode(&svm->vcpu))
123*4882a593Smuzhiyun return;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun c = &svm->vmcb->control;
126*4882a593Smuzhiyun h = &svm->nested.hsave->control;
127*4882a593Smuzhiyun g = &svm->nested.ctl;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun for (i = 0; i < MAX_INTERCEPT; i++)
130*4882a593Smuzhiyun c->intercepts[i] = h->intercepts[i];
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun if (g->int_ctl & V_INTR_MASKING_MASK) {
133*4882a593Smuzhiyun /* We only want the cr8 intercept bits of L1 */
134*4882a593Smuzhiyun vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
135*4882a593Smuzhiyun vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
139*4882a593Smuzhiyun * affect any interrupt we may want to inject; therefore,
140*4882a593Smuzhiyun * interrupt window vmexits are irrelevant to L0.
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun vmcb_clr_intercept(c, INTERCEPT_VINTR);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* We don't want to see VMMCALLs from a nested guest */
146*4882a593Smuzhiyun vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun for (i = 0; i < MAX_INTERCEPT; i++)
149*4882a593Smuzhiyun c->intercepts[i] |= g->intercepts[i];
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun vmcb_set_intercept(c, INTERCEPT_VMLOAD);
152*4882a593Smuzhiyun vmcb_set_intercept(c, INTERCEPT_VMSAVE);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
copy_vmcb_control_area(struct vmcb_control_area * dst,struct vmcb_control_area * from)155*4882a593Smuzhiyun static void copy_vmcb_control_area(struct vmcb_control_area *dst,
156*4882a593Smuzhiyun struct vmcb_control_area *from)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun unsigned int i;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun for (i = 0; i < MAX_INTERCEPT; i++)
161*4882a593Smuzhiyun dst->intercepts[i] = from->intercepts[i];
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun dst->iopm_base_pa = from->iopm_base_pa;
164*4882a593Smuzhiyun dst->msrpm_base_pa = from->msrpm_base_pa;
165*4882a593Smuzhiyun dst->tsc_offset = from->tsc_offset;
166*4882a593Smuzhiyun /* asid not copied, it is handled manually for svm->vmcb. */
167*4882a593Smuzhiyun dst->tlb_ctl = from->tlb_ctl;
168*4882a593Smuzhiyun dst->int_ctl = from->int_ctl;
169*4882a593Smuzhiyun dst->int_vector = from->int_vector;
170*4882a593Smuzhiyun dst->int_state = from->int_state;
171*4882a593Smuzhiyun dst->exit_code = from->exit_code;
172*4882a593Smuzhiyun dst->exit_code_hi = from->exit_code_hi;
173*4882a593Smuzhiyun dst->exit_info_1 = from->exit_info_1;
174*4882a593Smuzhiyun dst->exit_info_2 = from->exit_info_2;
175*4882a593Smuzhiyun dst->exit_int_info = from->exit_int_info;
176*4882a593Smuzhiyun dst->exit_int_info_err = from->exit_int_info_err;
177*4882a593Smuzhiyun dst->nested_ctl = from->nested_ctl;
178*4882a593Smuzhiyun dst->event_inj = from->event_inj;
179*4882a593Smuzhiyun dst->event_inj_err = from->event_inj_err;
180*4882a593Smuzhiyun dst->nested_cr3 = from->nested_cr3;
181*4882a593Smuzhiyun dst->virt_ext = from->virt_ext;
182*4882a593Smuzhiyun dst->pause_filter_count = from->pause_filter_count;
183*4882a593Smuzhiyun dst->pause_filter_thresh = from->pause_filter_thresh;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
nested_svm_vmrun_msrpm(struct vcpu_svm * svm)186*4882a593Smuzhiyun static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * This function merges the msr permission bitmaps of kvm and the
190*4882a593Smuzhiyun * nested vmcb. It is optimized in that it only merges the parts where
191*4882a593Smuzhiyun * the kvm msr permission bitmap may contain zero bits
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun int i;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
196*4882a593Smuzhiyun return true;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun for (i = 0; i < MSRPM_OFFSETS; i++) {
199*4882a593Smuzhiyun u32 value, p;
200*4882a593Smuzhiyun u64 offset;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (msrpm_offsets[i] == 0xffffffff)
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun p = msrpm_offsets[i];
206*4882a593Smuzhiyun offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
209*4882a593Smuzhiyun return false;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun svm->nested.msrpm[p] = svm->msrpm[p] | value;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return true;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
svm_get_nested_state_pages(struct kvm_vcpu * vcpu)219*4882a593Smuzhiyun static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct vcpu_svm *svm = to_svm(vcpu);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (WARN_ON(!is_guest_mode(vcpu)))
224*4882a593Smuzhiyun return true;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (!nested_svm_vmrun_msrpm(svm)) {
227*4882a593Smuzhiyun vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
228*4882a593Smuzhiyun vcpu->run->internal.suberror =
229*4882a593Smuzhiyun KVM_INTERNAL_ERROR_EMULATION;
230*4882a593Smuzhiyun vcpu->run->internal.ndata = 0;
231*4882a593Smuzhiyun return false;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun return true;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
nested_vmcb_check_controls(struct vmcb_control_area * control)237*4882a593Smuzhiyun static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
240*4882a593Smuzhiyun return false;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (control->asid == 0)
243*4882a593Smuzhiyun return false;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
246*4882a593Smuzhiyun !npt_enabled)
247*4882a593Smuzhiyun return false;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun return true;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
nested_vmcb_check_save(struct vcpu_svm * svm,struct vmcb * vmcb12)252*4882a593Smuzhiyun static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct kvm_vcpu *vcpu = &svm->vcpu;
255*4882a593Smuzhiyun bool vmcb12_lma;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * FIXME: these should be done after copying the fields,
259*4882a593Smuzhiyun * to avoid TOC/TOU races. For these save area checks
260*4882a593Smuzhiyun * the possible damage is limited since kvm_set_cr0 and
261*4882a593Smuzhiyun * kvm_set_cr4 handle failure; EFER_SVME is an exception
262*4882a593Smuzhiyun * so it is force-set later in nested_prepare_vmcb_save.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun if ((vmcb12->save.efer & EFER_SVME) == 0)
265*4882a593Smuzhiyun return false;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
268*4882a593Smuzhiyun return false;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
271*4882a593Smuzhiyun return false;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (vmcb12_lma) {
276*4882a593Smuzhiyun if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
277*4882a593Smuzhiyun !(vmcb12->save.cr0 & X86_CR0_PE) ||
278*4882a593Smuzhiyun (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
279*4882a593Smuzhiyun return false;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
282*4882a593Smuzhiyun return false;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun return true;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
load_nested_vmcb_control(struct vcpu_svm * svm,struct vmcb_control_area * control)287*4882a593Smuzhiyun static void load_nested_vmcb_control(struct vcpu_svm *svm,
288*4882a593Smuzhiyun struct vmcb_control_area *control)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun copy_vmcb_control_area(&svm->nested.ctl, control);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* Copy it here because nested_svm_check_controls will check it. */
293*4882a593Smuzhiyun svm->nested.ctl.asid = control->asid;
294*4882a593Smuzhiyun svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
295*4882a593Smuzhiyun svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /*
299*4882a593Smuzhiyun * Synchronize fields that are written by the processor, so that
300*4882a593Smuzhiyun * they can be copied back into the nested_vmcb.
301*4882a593Smuzhiyun */
sync_nested_vmcb_control(struct vcpu_svm * svm)302*4882a593Smuzhiyun void sync_nested_vmcb_control(struct vcpu_svm *svm)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun u32 mask;
305*4882a593Smuzhiyun svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
306*4882a593Smuzhiyun svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* Only a few fields of int_ctl are written by the processor. */
309*4882a593Smuzhiyun mask = V_IRQ_MASK | V_TPR_MASK;
310*4882a593Smuzhiyun if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
311*4882a593Smuzhiyun svm_is_intercept(svm, INTERCEPT_VINTR)) {
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun * In order to request an interrupt window, L0 is usurping
314*4882a593Smuzhiyun * svm->vmcb->control.int_ctl and possibly setting V_IRQ
315*4882a593Smuzhiyun * even if it was clear in L1's VMCB. Restoring it would be
316*4882a593Smuzhiyun * wrong. However, in this case V_IRQ will remain true until
317*4882a593Smuzhiyun * interrupt_window_interception calls svm_clear_vintr and
318*4882a593Smuzhiyun * restores int_ctl. We can just leave it aside.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun mask &= ~V_IRQ_MASK;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun svm->nested.ctl.int_ctl &= ~mask;
323*4882a593Smuzhiyun svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * Transfer any event that L0 or L1 wanted to inject into L2 to
328*4882a593Smuzhiyun * EXIT_INT_INFO.
329*4882a593Smuzhiyun */
nested_vmcb_save_pending_event(struct vcpu_svm * svm,struct vmcb * vmcb12)330*4882a593Smuzhiyun static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
331*4882a593Smuzhiyun struct vmcb *vmcb12)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun struct kvm_vcpu *vcpu = &svm->vcpu;
334*4882a593Smuzhiyun u32 exit_int_info = 0;
335*4882a593Smuzhiyun unsigned int nr;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (vcpu->arch.exception.injected) {
338*4882a593Smuzhiyun nr = vcpu->arch.exception.nr;
339*4882a593Smuzhiyun exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun if (vcpu->arch.exception.has_error_code) {
342*4882a593Smuzhiyun exit_int_info |= SVM_EVTINJ_VALID_ERR;
343*4882a593Smuzhiyun vmcb12->control.exit_int_info_err =
344*4882a593Smuzhiyun vcpu->arch.exception.error_code;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun } else if (vcpu->arch.nmi_injected) {
348*4882a593Smuzhiyun exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun } else if (vcpu->arch.interrupt.injected) {
351*4882a593Smuzhiyun nr = vcpu->arch.interrupt.nr;
352*4882a593Smuzhiyun exit_int_info = nr | SVM_EVTINJ_VALID;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (vcpu->arch.interrupt.soft)
355*4882a593Smuzhiyun exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
356*4882a593Smuzhiyun else
357*4882a593Smuzhiyun exit_int_info |= SVM_EVTINJ_TYPE_INTR;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun vmcb12->control.exit_int_info = exit_int_info;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
nested_npt_enabled(struct vcpu_svm * svm)363*4882a593Smuzhiyun static inline bool nested_npt_enabled(struct vcpu_svm *svm)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
370*4882a593Smuzhiyun * if we are emulating VM-Entry into a guest with NPT enabled.
371*4882a593Smuzhiyun */
nested_svm_load_cr3(struct kvm_vcpu * vcpu,unsigned long cr3,bool nested_npt)372*4882a593Smuzhiyun static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
373*4882a593Smuzhiyun bool nested_npt)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))
376*4882a593Smuzhiyun return -EINVAL;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (!nested_npt && is_pae_paging(vcpu) &&
379*4882a593Smuzhiyun (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
380*4882a593Smuzhiyun if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
381*4882a593Smuzhiyun return -EINVAL;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun * TODO: optimize unconditional TLB flush/MMU sync here and in
386*4882a593Smuzhiyun * kvm_init_shadow_npt_mmu().
387*4882a593Smuzhiyun */
388*4882a593Smuzhiyun if (!nested_npt)
389*4882a593Smuzhiyun kvm_mmu_new_pgd(vcpu, cr3, false, false);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun vcpu->arch.cr3 = cr3;
392*4882a593Smuzhiyun kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun kvm_init_mmu(vcpu, false);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
nested_prepare_vmcb_save(struct vcpu_svm * svm,struct vmcb * vmcb12)399*4882a593Smuzhiyun static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun /* Load the nested guest state */
402*4882a593Smuzhiyun svm->vmcb->save.es = vmcb12->save.es;
403*4882a593Smuzhiyun svm->vmcb->save.cs = vmcb12->save.cs;
404*4882a593Smuzhiyun svm->vmcb->save.ss = vmcb12->save.ss;
405*4882a593Smuzhiyun svm->vmcb->save.ds = vmcb12->save.ds;
406*4882a593Smuzhiyun svm->vmcb->save.gdtr = vmcb12->save.gdtr;
407*4882a593Smuzhiyun svm->vmcb->save.idtr = vmcb12->save.idtr;
408*4882a593Smuzhiyun kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /*
411*4882a593Smuzhiyun * Force-set EFER_SVME even though it is checked earlier on the
412*4882a593Smuzhiyun * VMCB12, because the guest can flip the bit between the check
413*4882a593Smuzhiyun * and now. Clearing EFER_SVME would call svm_free_nested.
414*4882a593Smuzhiyun */
415*4882a593Smuzhiyun svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
418*4882a593Smuzhiyun svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
419*4882a593Smuzhiyun svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
420*4882a593Smuzhiyun kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
421*4882a593Smuzhiyun kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
422*4882a593Smuzhiyun kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* In case we don't even reach vcpu_run, the fields are not updated */
425*4882a593Smuzhiyun svm->vmcb->save.rax = vmcb12->save.rax;
426*4882a593Smuzhiyun svm->vmcb->save.rsp = vmcb12->save.rsp;
427*4882a593Smuzhiyun svm->vmcb->save.rip = vmcb12->save.rip;
428*4882a593Smuzhiyun svm->vmcb->save.dr7 = vmcb12->save.dr7;
429*4882a593Smuzhiyun svm->vcpu.arch.dr6 = vmcb12->save.dr6;
430*4882a593Smuzhiyun svm->vmcb->save.cpl = vmcb12->save.cpl;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
nested_prepare_vmcb_control(struct vcpu_svm * svm)433*4882a593Smuzhiyun static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun const u32 int_ctl_vmcb01_bits =
436*4882a593Smuzhiyun V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (nested_npt_enabled(svm))
441*4882a593Smuzhiyun nested_svm_init_mmu_context(&svm->vcpu);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
444*4882a593Smuzhiyun svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun svm->vmcb->control.int_ctl =
447*4882a593Smuzhiyun (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
448*4882a593Smuzhiyun (svm->nested.hsave->control.int_ctl & int_ctl_vmcb01_bits);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
451*4882a593Smuzhiyun svm->vmcb->control.int_state = svm->nested.ctl.int_state;
452*4882a593Smuzhiyun svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
453*4882a593Smuzhiyun svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
456*4882a593Smuzhiyun svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Enter Guest-Mode */
459*4882a593Smuzhiyun enter_guest_mode(&svm->vcpu);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /*
462*4882a593Smuzhiyun * Merge guest and host intercepts - must be called with vcpu in
463*4882a593Smuzhiyun * guest-mode to take affect here
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun recalc_intercepts(svm);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun vmcb_mark_all_dirty(svm->vmcb);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
enter_svm_guest_mode(struct vcpu_svm * svm,u64 vmcb12_gpa,struct vmcb * vmcb12)470*4882a593Smuzhiyun int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
471*4882a593Smuzhiyun struct vmcb *vmcb12)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun int ret;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun svm->nested.vmcb12_gpa = vmcb12_gpa;
476*4882a593Smuzhiyun nested_prepare_vmcb_save(svm, vmcb12);
477*4882a593Smuzhiyun nested_prepare_vmcb_control(svm);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
480*4882a593Smuzhiyun nested_npt_enabled(svm));
481*4882a593Smuzhiyun if (ret)
482*4882a593Smuzhiyun return ret;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (!npt_enabled)
485*4882a593Smuzhiyun svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun svm_set_gif(svm, true);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun return 0;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
nested_svm_vmrun(struct vcpu_svm * svm)492*4882a593Smuzhiyun int nested_svm_vmrun(struct vcpu_svm *svm)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun int ret;
495*4882a593Smuzhiyun struct vmcb *vmcb12;
496*4882a593Smuzhiyun struct vmcb *hsave = svm->nested.hsave;
497*4882a593Smuzhiyun struct vmcb *vmcb = svm->vmcb;
498*4882a593Smuzhiyun struct kvm_host_map map;
499*4882a593Smuzhiyun u64 vmcb12_gpa;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (is_smm(&svm->vcpu)) {
502*4882a593Smuzhiyun kvm_queue_exception(&svm->vcpu, UD_VECTOR);
503*4882a593Smuzhiyun return 1;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun vmcb12_gpa = svm->vmcb->save.rax;
507*4882a593Smuzhiyun ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
508*4882a593Smuzhiyun if (ret == -EINVAL) {
509*4882a593Smuzhiyun kvm_inject_gp(&svm->vcpu, 0);
510*4882a593Smuzhiyun return 1;
511*4882a593Smuzhiyun } else if (ret) {
512*4882a593Smuzhiyun return kvm_skip_emulated_instruction(&svm->vcpu);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun ret = kvm_skip_emulated_instruction(&svm->vcpu);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun vmcb12 = map.hva;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (WARN_ON_ONCE(!svm->nested.initialized))
520*4882a593Smuzhiyun return -EINVAL;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun load_nested_vmcb_control(svm, &vmcb12->control);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (!nested_vmcb_check_save(svm, vmcb12) ||
525*4882a593Smuzhiyun !nested_vmcb_check_controls(&svm->nested.ctl)) {
526*4882a593Smuzhiyun vmcb12->control.exit_code = SVM_EXIT_ERR;
527*4882a593Smuzhiyun vmcb12->control.exit_code_hi = 0;
528*4882a593Smuzhiyun vmcb12->control.exit_info_1 = 0;
529*4882a593Smuzhiyun vmcb12->control.exit_info_2 = 0;
530*4882a593Smuzhiyun goto out;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
534*4882a593Smuzhiyun vmcb12->save.rip,
535*4882a593Smuzhiyun vmcb12->control.int_ctl,
536*4882a593Smuzhiyun vmcb12->control.event_inj,
537*4882a593Smuzhiyun vmcb12->control.nested_ctl);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
540*4882a593Smuzhiyun vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
541*4882a593Smuzhiyun vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
542*4882a593Smuzhiyun vmcb12->control.intercepts[INTERCEPT_WORD3],
543*4882a593Smuzhiyun vmcb12->control.intercepts[INTERCEPT_WORD4],
544*4882a593Smuzhiyun vmcb12->control.intercepts[INTERCEPT_WORD5]);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /* Clear internal status */
547*4882a593Smuzhiyun kvm_clear_exception_queue(&svm->vcpu);
548*4882a593Smuzhiyun kvm_clear_interrupt_queue(&svm->vcpu);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * Save the old vmcb, so we don't need to pick what we save, but can
552*4882a593Smuzhiyun * restore everything when a VMEXIT occurs
553*4882a593Smuzhiyun */
554*4882a593Smuzhiyun hsave->save.es = vmcb->save.es;
555*4882a593Smuzhiyun hsave->save.cs = vmcb->save.cs;
556*4882a593Smuzhiyun hsave->save.ss = vmcb->save.ss;
557*4882a593Smuzhiyun hsave->save.ds = vmcb->save.ds;
558*4882a593Smuzhiyun hsave->save.gdtr = vmcb->save.gdtr;
559*4882a593Smuzhiyun hsave->save.idtr = vmcb->save.idtr;
560*4882a593Smuzhiyun hsave->save.efer = svm->vcpu.arch.efer;
561*4882a593Smuzhiyun hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
562*4882a593Smuzhiyun hsave->save.cr4 = svm->vcpu.arch.cr4;
563*4882a593Smuzhiyun hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
564*4882a593Smuzhiyun hsave->save.rip = kvm_rip_read(&svm->vcpu);
565*4882a593Smuzhiyun hsave->save.rsp = vmcb->save.rsp;
566*4882a593Smuzhiyun hsave->save.rax = vmcb->save.rax;
567*4882a593Smuzhiyun if (npt_enabled)
568*4882a593Smuzhiyun hsave->save.cr3 = vmcb->save.cr3;
569*4882a593Smuzhiyun else
570*4882a593Smuzhiyun hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun copy_vmcb_control_area(&hsave->control, &vmcb->control);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun svm->nested.nested_run_pending = 1;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
577*4882a593Smuzhiyun goto out_exit_err;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun if (nested_svm_vmrun_msrpm(svm))
580*4882a593Smuzhiyun goto out;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun out_exit_err:
583*4882a593Smuzhiyun svm->nested.nested_run_pending = 0;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun svm->vmcb->control.exit_code = SVM_EXIT_ERR;
586*4882a593Smuzhiyun svm->vmcb->control.exit_code_hi = 0;
587*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 = 0;
588*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = 0;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun nested_svm_vmexit(svm);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun out:
593*4882a593Smuzhiyun kvm_vcpu_unmap(&svm->vcpu, &map, true);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun return ret;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
nested_svm_vmloadsave(struct vmcb * from_vmcb,struct vmcb * to_vmcb)598*4882a593Smuzhiyun void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun to_vmcb->save.fs = from_vmcb->save.fs;
601*4882a593Smuzhiyun to_vmcb->save.gs = from_vmcb->save.gs;
602*4882a593Smuzhiyun to_vmcb->save.tr = from_vmcb->save.tr;
603*4882a593Smuzhiyun to_vmcb->save.ldtr = from_vmcb->save.ldtr;
604*4882a593Smuzhiyun to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
605*4882a593Smuzhiyun to_vmcb->save.star = from_vmcb->save.star;
606*4882a593Smuzhiyun to_vmcb->save.lstar = from_vmcb->save.lstar;
607*4882a593Smuzhiyun to_vmcb->save.cstar = from_vmcb->save.cstar;
608*4882a593Smuzhiyun to_vmcb->save.sfmask = from_vmcb->save.sfmask;
609*4882a593Smuzhiyun to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
610*4882a593Smuzhiyun to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
611*4882a593Smuzhiyun to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
nested_svm_vmexit(struct vcpu_svm * svm)614*4882a593Smuzhiyun int nested_svm_vmexit(struct vcpu_svm *svm)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun int rc;
617*4882a593Smuzhiyun struct vmcb *vmcb12;
618*4882a593Smuzhiyun struct vmcb *hsave = svm->nested.hsave;
619*4882a593Smuzhiyun struct vmcb *vmcb = svm->vmcb;
620*4882a593Smuzhiyun struct kvm_host_map map;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
623*4882a593Smuzhiyun if (rc) {
624*4882a593Smuzhiyun if (rc == -EINVAL)
625*4882a593Smuzhiyun kvm_inject_gp(&svm->vcpu, 0);
626*4882a593Smuzhiyun return 1;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun vmcb12 = map.hva;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /* Exit Guest-Mode */
632*4882a593Smuzhiyun leave_guest_mode(&svm->vcpu);
633*4882a593Smuzhiyun svm->nested.vmcb12_gpa = 0;
634*4882a593Smuzhiyun WARN_ON_ONCE(svm->nested.nested_run_pending);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /* in case we halted in L2 */
639*4882a593Smuzhiyun svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /* Give the current vmcb to the guest */
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun vmcb12->save.es = vmcb->save.es;
644*4882a593Smuzhiyun vmcb12->save.cs = vmcb->save.cs;
645*4882a593Smuzhiyun vmcb12->save.ss = vmcb->save.ss;
646*4882a593Smuzhiyun vmcb12->save.ds = vmcb->save.ds;
647*4882a593Smuzhiyun vmcb12->save.gdtr = vmcb->save.gdtr;
648*4882a593Smuzhiyun vmcb12->save.idtr = vmcb->save.idtr;
649*4882a593Smuzhiyun vmcb12->save.efer = svm->vcpu.arch.efer;
650*4882a593Smuzhiyun vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu);
651*4882a593Smuzhiyun vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu);
652*4882a593Smuzhiyun vmcb12->save.cr2 = vmcb->save.cr2;
653*4882a593Smuzhiyun vmcb12->save.cr4 = svm->vcpu.arch.cr4;
654*4882a593Smuzhiyun vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
655*4882a593Smuzhiyun vmcb12->save.rip = kvm_rip_read(&svm->vcpu);
656*4882a593Smuzhiyun vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu);
657*4882a593Smuzhiyun vmcb12->save.rax = kvm_rax_read(&svm->vcpu);
658*4882a593Smuzhiyun vmcb12->save.dr7 = vmcb->save.dr7;
659*4882a593Smuzhiyun vmcb12->save.dr6 = svm->vcpu.arch.dr6;
660*4882a593Smuzhiyun vmcb12->save.cpl = vmcb->save.cpl;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun vmcb12->control.int_state = vmcb->control.int_state;
663*4882a593Smuzhiyun vmcb12->control.exit_code = vmcb->control.exit_code;
664*4882a593Smuzhiyun vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi;
665*4882a593Smuzhiyun vmcb12->control.exit_info_1 = vmcb->control.exit_info_1;
666*4882a593Smuzhiyun vmcb12->control.exit_info_2 = vmcb->control.exit_info_2;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun if (vmcb12->control.exit_code != SVM_EXIT_ERR)
669*4882a593Smuzhiyun nested_vmcb_save_pending_event(svm, vmcb12);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun if (svm->nrips_enabled)
672*4882a593Smuzhiyun vmcb12->control.next_rip = vmcb->control.next_rip;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
675*4882a593Smuzhiyun vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
676*4882a593Smuzhiyun vmcb12->control.event_inj = svm->nested.ctl.event_inj;
677*4882a593Smuzhiyun vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun vmcb12->control.pause_filter_count =
680*4882a593Smuzhiyun svm->vmcb->control.pause_filter_count;
681*4882a593Smuzhiyun vmcb12->control.pause_filter_thresh =
682*4882a593Smuzhiyun svm->vmcb->control.pause_filter_thresh;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /* Restore the original control entries */
685*4882a593Smuzhiyun copy_vmcb_control_area(&vmcb->control, &hsave->control);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun /* On vmexit the GIF is set to false */
688*4882a593Smuzhiyun svm_set_gif(svm, false);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
691*4882a593Smuzhiyun svm->vcpu.arch.l1_tsc_offset;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun svm->nested.ctl.nested_cr3 = 0;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /* Restore selected save entries */
696*4882a593Smuzhiyun svm->vmcb->save.es = hsave->save.es;
697*4882a593Smuzhiyun svm->vmcb->save.cs = hsave->save.cs;
698*4882a593Smuzhiyun svm->vmcb->save.ss = hsave->save.ss;
699*4882a593Smuzhiyun svm->vmcb->save.ds = hsave->save.ds;
700*4882a593Smuzhiyun svm->vmcb->save.gdtr = hsave->save.gdtr;
701*4882a593Smuzhiyun svm->vmcb->save.idtr = hsave->save.idtr;
702*4882a593Smuzhiyun kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
703*4882a593Smuzhiyun svm_set_efer(&svm->vcpu, hsave->save.efer);
704*4882a593Smuzhiyun svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
705*4882a593Smuzhiyun svm_set_cr4(&svm->vcpu, hsave->save.cr4);
706*4882a593Smuzhiyun kvm_rax_write(&svm->vcpu, hsave->save.rax);
707*4882a593Smuzhiyun kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
708*4882a593Smuzhiyun kvm_rip_write(&svm->vcpu, hsave->save.rip);
709*4882a593Smuzhiyun svm->vmcb->save.dr7 = 0;
710*4882a593Smuzhiyun svm->vmcb->save.cpl = 0;
711*4882a593Smuzhiyun svm->vmcb->control.exit_int_info = 0;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun vmcb_mark_all_dirty(svm->vmcb);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
716*4882a593Smuzhiyun vmcb12->control.exit_info_1,
717*4882a593Smuzhiyun vmcb12->control.exit_info_2,
718*4882a593Smuzhiyun vmcb12->control.exit_int_info,
719*4882a593Smuzhiyun vmcb12->control.exit_int_info_err,
720*4882a593Smuzhiyun KVM_ISA_SVM);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun kvm_vcpu_unmap(&svm->vcpu, &map, true);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun nested_svm_uninit_mmu_context(&svm->vcpu);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
727*4882a593Smuzhiyun if (rc)
728*4882a593Smuzhiyun return 1;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun if (npt_enabled)
731*4882a593Smuzhiyun svm->vmcb->save.cr3 = hsave->save.cr3;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /*
734*4882a593Smuzhiyun * Drop what we picked up for L2 via svm_complete_interrupts() so it
735*4882a593Smuzhiyun * doesn't end up in L1.
736*4882a593Smuzhiyun */
737*4882a593Smuzhiyun svm->vcpu.arch.nmi_injected = false;
738*4882a593Smuzhiyun kvm_clear_exception_queue(&svm->vcpu);
739*4882a593Smuzhiyun kvm_clear_interrupt_queue(&svm->vcpu);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun return 0;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
svm_allocate_nested(struct vcpu_svm * svm)744*4882a593Smuzhiyun int svm_allocate_nested(struct vcpu_svm *svm)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun struct page *hsave_page;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (svm->nested.initialized)
749*4882a593Smuzhiyun return 0;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun hsave_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
752*4882a593Smuzhiyun if (!hsave_page)
753*4882a593Smuzhiyun return -ENOMEM;
754*4882a593Smuzhiyun svm->nested.hsave = page_address(hsave_page);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun svm->nested.msrpm = svm_vcpu_alloc_msrpm();
757*4882a593Smuzhiyun if (!svm->nested.msrpm)
758*4882a593Smuzhiyun goto err_free_hsave;
759*4882a593Smuzhiyun svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun svm->nested.initialized = true;
762*4882a593Smuzhiyun return 0;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun err_free_hsave:
765*4882a593Smuzhiyun __free_page(hsave_page);
766*4882a593Smuzhiyun return -ENOMEM;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
svm_free_nested(struct vcpu_svm * svm)769*4882a593Smuzhiyun void svm_free_nested(struct vcpu_svm *svm)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun if (!svm->nested.initialized)
772*4882a593Smuzhiyun return;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun svm_vcpu_free_msrpm(svm->nested.msrpm);
775*4882a593Smuzhiyun svm->nested.msrpm = NULL;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun __free_page(virt_to_page(svm->nested.hsave));
778*4882a593Smuzhiyun svm->nested.hsave = NULL;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun svm->nested.initialized = false;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /*
784*4882a593Smuzhiyun * Forcibly leave nested mode in order to be able to reset the VCPU later on.
785*4882a593Smuzhiyun */
svm_leave_nested(struct kvm_vcpu * vcpu)786*4882a593Smuzhiyun void svm_leave_nested(struct kvm_vcpu *vcpu)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun struct vcpu_svm *svm = to_svm(vcpu);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun if (is_guest_mode(&svm->vcpu)) {
791*4882a593Smuzhiyun struct vmcb *hsave = svm->nested.hsave;
792*4882a593Smuzhiyun struct vmcb *vmcb = svm->vmcb;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun svm->nested.nested_run_pending = 0;
795*4882a593Smuzhiyun leave_guest_mode(&svm->vcpu);
796*4882a593Smuzhiyun copy_vmcb_control_area(&vmcb->control, &hsave->control);
797*4882a593Smuzhiyun nested_svm_uninit_mmu_context(&svm->vcpu);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
nested_svm_exit_handled_msr(struct vcpu_svm * svm)803*4882a593Smuzhiyun static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun u32 offset, msr, value;
806*4882a593Smuzhiyun int write, mask;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
809*4882a593Smuzhiyun return NESTED_EXIT_HOST;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
812*4882a593Smuzhiyun offset = svm_msrpm_offset(msr);
813*4882a593Smuzhiyun write = svm->vmcb->control.exit_info_1 & 1;
814*4882a593Smuzhiyun mask = 1 << ((2 * (msr & 0xf)) + write);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun if (offset == MSR_INVALID)
817*4882a593Smuzhiyun return NESTED_EXIT_DONE;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /* Offset is in 32 bit units but need in 8 bit units */
820*4882a593Smuzhiyun offset *= 4;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
823*4882a593Smuzhiyun return NESTED_EXIT_DONE;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
nested_svm_intercept_ioio(struct vcpu_svm * svm)828*4882a593Smuzhiyun static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun unsigned port, size, iopm_len;
831*4882a593Smuzhiyun u16 val, mask;
832*4882a593Smuzhiyun u8 start_bit;
833*4882a593Smuzhiyun u64 gpa;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
836*4882a593Smuzhiyun return NESTED_EXIT_HOST;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun port = svm->vmcb->control.exit_info_1 >> 16;
839*4882a593Smuzhiyun size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
840*4882a593Smuzhiyun SVM_IOIO_SIZE_SHIFT;
841*4882a593Smuzhiyun gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
842*4882a593Smuzhiyun start_bit = port % 8;
843*4882a593Smuzhiyun iopm_len = (start_bit + size > 8) ? 2 : 1;
844*4882a593Smuzhiyun mask = (0xf >> (4 - size)) << start_bit;
845*4882a593Smuzhiyun val = 0;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
848*4882a593Smuzhiyun return NESTED_EXIT_DONE;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
nested_svm_intercept(struct vcpu_svm * svm)853*4882a593Smuzhiyun static int nested_svm_intercept(struct vcpu_svm *svm)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun u32 exit_code = svm->vmcb->control.exit_code;
856*4882a593Smuzhiyun int vmexit = NESTED_EXIT_HOST;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun switch (exit_code) {
859*4882a593Smuzhiyun case SVM_EXIT_MSR:
860*4882a593Smuzhiyun vmexit = nested_svm_exit_handled_msr(svm);
861*4882a593Smuzhiyun break;
862*4882a593Smuzhiyun case SVM_EXIT_IOIO:
863*4882a593Smuzhiyun vmexit = nested_svm_intercept_ioio(svm);
864*4882a593Smuzhiyun break;
865*4882a593Smuzhiyun case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
866*4882a593Smuzhiyun if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
867*4882a593Smuzhiyun vmexit = NESTED_EXIT_DONE;
868*4882a593Smuzhiyun break;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
871*4882a593Smuzhiyun if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
872*4882a593Smuzhiyun vmexit = NESTED_EXIT_DONE;
873*4882a593Smuzhiyun break;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
876*4882a593Smuzhiyun /*
877*4882a593Smuzhiyun * Host-intercepted exceptions have been checked already in
878*4882a593Smuzhiyun * nested_svm_exit_special. There is nothing to do here,
879*4882a593Smuzhiyun * the vmexit is injected by svm_check_nested_events.
880*4882a593Smuzhiyun */
881*4882a593Smuzhiyun vmexit = NESTED_EXIT_DONE;
882*4882a593Smuzhiyun break;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun case SVM_EXIT_ERR: {
885*4882a593Smuzhiyun vmexit = NESTED_EXIT_DONE;
886*4882a593Smuzhiyun break;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun default: {
889*4882a593Smuzhiyun if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
890*4882a593Smuzhiyun vmexit = NESTED_EXIT_DONE;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun return vmexit;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
nested_svm_exit_handled(struct vcpu_svm * svm)897*4882a593Smuzhiyun int nested_svm_exit_handled(struct vcpu_svm *svm)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun int vmexit;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun vmexit = nested_svm_intercept(svm);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (vmexit == NESTED_EXIT_DONE)
904*4882a593Smuzhiyun nested_svm_vmexit(svm);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return vmexit;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
nested_svm_check_permissions(struct vcpu_svm * svm)909*4882a593Smuzhiyun int nested_svm_check_permissions(struct vcpu_svm *svm)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun if (!(svm->vcpu.arch.efer & EFER_SVME) ||
912*4882a593Smuzhiyun !is_paging(&svm->vcpu)) {
913*4882a593Smuzhiyun kvm_queue_exception(&svm->vcpu, UD_VECTOR);
914*4882a593Smuzhiyun return 1;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (svm->vmcb->save.cpl) {
918*4882a593Smuzhiyun kvm_inject_gp(&svm->vcpu, 0);
919*4882a593Smuzhiyun return 1;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun return 0;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
nested_exit_on_exception(struct vcpu_svm * svm)925*4882a593Smuzhiyun static bool nested_exit_on_exception(struct vcpu_svm *svm)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun unsigned int nr = svm->vcpu.arch.exception.nr;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
nested_svm_inject_exception_vmexit(struct vcpu_svm * svm)932*4882a593Smuzhiyun static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun unsigned int nr = svm->vcpu.arch.exception.nr;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
937*4882a593Smuzhiyun svm->vmcb->control.exit_code_hi = 0;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if (svm->vcpu.arch.exception.has_error_code)
940*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun /*
943*4882a593Smuzhiyun * EXITINFO2 is undefined for all exception intercepts other
944*4882a593Smuzhiyun * than #PF.
945*4882a593Smuzhiyun */
946*4882a593Smuzhiyun if (nr == PF_VECTOR) {
947*4882a593Smuzhiyun if (svm->vcpu.arch.exception.nested_apf)
948*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
949*4882a593Smuzhiyun else if (svm->vcpu.arch.exception.has_payload)
950*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
951*4882a593Smuzhiyun else
952*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
953*4882a593Smuzhiyun } else if (nr == DB_VECTOR) {
954*4882a593Smuzhiyun /* See inject_pending_event. */
955*4882a593Smuzhiyun kvm_deliver_exception_payload(&svm->vcpu);
956*4882a593Smuzhiyun if (svm->vcpu.arch.dr7 & DR7_GD) {
957*4882a593Smuzhiyun svm->vcpu.arch.dr7 &= ~DR7_GD;
958*4882a593Smuzhiyun kvm_update_dr7(&svm->vcpu);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun } else
961*4882a593Smuzhiyun WARN_ON(svm->vcpu.arch.exception.has_payload);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun nested_svm_vmexit(svm);
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
nested_svm_smi(struct vcpu_svm * svm)966*4882a593Smuzhiyun static void nested_svm_smi(struct vcpu_svm *svm)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun svm->vmcb->control.exit_code = SVM_EXIT_SMI;
969*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 = 0;
970*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = 0;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun nested_svm_vmexit(svm);
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
nested_svm_nmi(struct vcpu_svm * svm)975*4882a593Smuzhiyun static void nested_svm_nmi(struct vcpu_svm *svm)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun svm->vmcb->control.exit_code = SVM_EXIT_NMI;
978*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 = 0;
979*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = 0;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun nested_svm_vmexit(svm);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
nested_svm_intr(struct vcpu_svm * svm)984*4882a593Smuzhiyun static void nested_svm_intr(struct vcpu_svm *svm)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun svm->vmcb->control.exit_code = SVM_EXIT_INTR;
989*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 = 0;
990*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = 0;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun nested_svm_vmexit(svm);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
nested_exit_on_init(struct vcpu_svm * svm)995*4882a593Smuzhiyun static inline bool nested_exit_on_init(struct vcpu_svm *svm)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
nested_svm_init(struct vcpu_svm * svm)1000*4882a593Smuzhiyun static void nested_svm_init(struct vcpu_svm *svm)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun svm->vmcb->control.exit_code = SVM_EXIT_INIT;
1003*4882a593Smuzhiyun svm->vmcb->control.exit_info_1 = 0;
1004*4882a593Smuzhiyun svm->vmcb->control.exit_info_2 = 0;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun nested_svm_vmexit(svm);
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun
svm_check_nested_events(struct kvm_vcpu * vcpu)1010*4882a593Smuzhiyun static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun struct vcpu_svm *svm = to_svm(vcpu);
1013*4882a593Smuzhiyun bool block_nested_events =
1014*4882a593Smuzhiyun kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1015*4882a593Smuzhiyun struct kvm_lapic *apic = vcpu->arch.apic;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun if (lapic_in_kernel(vcpu) &&
1018*4882a593Smuzhiyun test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1019*4882a593Smuzhiyun if (block_nested_events)
1020*4882a593Smuzhiyun return -EBUSY;
1021*4882a593Smuzhiyun if (!nested_exit_on_init(svm))
1022*4882a593Smuzhiyun return 0;
1023*4882a593Smuzhiyun nested_svm_init(svm);
1024*4882a593Smuzhiyun return 0;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun if (vcpu->arch.exception.pending) {
1028*4882a593Smuzhiyun if (block_nested_events)
1029*4882a593Smuzhiyun return -EBUSY;
1030*4882a593Smuzhiyun if (!nested_exit_on_exception(svm))
1031*4882a593Smuzhiyun return 0;
1032*4882a593Smuzhiyun nested_svm_inject_exception_vmexit(svm);
1033*4882a593Smuzhiyun return 0;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1037*4882a593Smuzhiyun if (block_nested_events)
1038*4882a593Smuzhiyun return -EBUSY;
1039*4882a593Smuzhiyun if (!nested_exit_on_smi(svm))
1040*4882a593Smuzhiyun return 0;
1041*4882a593Smuzhiyun nested_svm_smi(svm);
1042*4882a593Smuzhiyun return 0;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1046*4882a593Smuzhiyun if (block_nested_events)
1047*4882a593Smuzhiyun return -EBUSY;
1048*4882a593Smuzhiyun if (!nested_exit_on_nmi(svm))
1049*4882a593Smuzhiyun return 0;
1050*4882a593Smuzhiyun nested_svm_nmi(svm);
1051*4882a593Smuzhiyun return 0;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1055*4882a593Smuzhiyun if (block_nested_events)
1056*4882a593Smuzhiyun return -EBUSY;
1057*4882a593Smuzhiyun if (!nested_exit_on_intr(svm))
1058*4882a593Smuzhiyun return 0;
1059*4882a593Smuzhiyun nested_svm_intr(svm);
1060*4882a593Smuzhiyun return 0;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun return 0;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
nested_svm_exit_special(struct vcpu_svm * svm)1066*4882a593Smuzhiyun int nested_svm_exit_special(struct vcpu_svm *svm)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun u32 exit_code = svm->vmcb->control.exit_code;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun switch (exit_code) {
1071*4882a593Smuzhiyun case SVM_EXIT_INTR:
1072*4882a593Smuzhiyun case SVM_EXIT_NMI:
1073*4882a593Smuzhiyun case SVM_EXIT_NPF:
1074*4882a593Smuzhiyun return NESTED_EXIT_HOST;
1075*4882a593Smuzhiyun case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1076*4882a593Smuzhiyun u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] &
1079*4882a593Smuzhiyun excp_bits)
1080*4882a593Smuzhiyun return NESTED_EXIT_HOST;
1081*4882a593Smuzhiyun else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1082*4882a593Smuzhiyun svm->vcpu.arch.apf.host_apf_flags)
1083*4882a593Smuzhiyun /* Trap async PF even if not shadowing */
1084*4882a593Smuzhiyun return NESTED_EXIT_HOST;
1085*4882a593Smuzhiyun break;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun default:
1088*4882a593Smuzhiyun break;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun return NESTED_EXIT_CONTINUE;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
svm_get_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,u32 user_data_size)1094*4882a593Smuzhiyun static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1095*4882a593Smuzhiyun struct kvm_nested_state __user *user_kvm_nested_state,
1096*4882a593Smuzhiyun u32 user_data_size)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun struct vcpu_svm *svm;
1099*4882a593Smuzhiyun struct kvm_nested_state kvm_state = {
1100*4882a593Smuzhiyun .flags = 0,
1101*4882a593Smuzhiyun .format = KVM_STATE_NESTED_FORMAT_SVM,
1102*4882a593Smuzhiyun .size = sizeof(kvm_state),
1103*4882a593Smuzhiyun };
1104*4882a593Smuzhiyun struct vmcb __user *user_vmcb = (struct vmcb __user *)
1105*4882a593Smuzhiyun &user_kvm_nested_state->data.svm[0];
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun if (!vcpu)
1108*4882a593Smuzhiyun return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun svm = to_svm(vcpu);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun if (user_data_size < kvm_state.size)
1113*4882a593Smuzhiyun goto out;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun /* First fill in the header and copy it out. */
1116*4882a593Smuzhiyun if (is_guest_mode(vcpu)) {
1117*4882a593Smuzhiyun kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1118*4882a593Smuzhiyun kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1119*4882a593Smuzhiyun kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun if (svm->nested.nested_run_pending)
1122*4882a593Smuzhiyun kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun if (gif_set(svm))
1126*4882a593Smuzhiyun kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1129*4882a593Smuzhiyun return -EFAULT;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if (!is_guest_mode(vcpu))
1132*4882a593Smuzhiyun goto out;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /*
1135*4882a593Smuzhiyun * Copy over the full size of the VMCB rather than just the size
1136*4882a593Smuzhiyun * of the structs.
1137*4882a593Smuzhiyun */
1138*4882a593Smuzhiyun if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1139*4882a593Smuzhiyun return -EFAULT;
1140*4882a593Smuzhiyun if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1141*4882a593Smuzhiyun sizeof(user_vmcb->control)))
1142*4882a593Smuzhiyun return -EFAULT;
1143*4882a593Smuzhiyun if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1144*4882a593Smuzhiyun sizeof(user_vmcb->save)))
1145*4882a593Smuzhiyun return -EFAULT;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun out:
1148*4882a593Smuzhiyun return kvm_state.size;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
svm_set_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,struct kvm_nested_state * kvm_state)1151*4882a593Smuzhiyun static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1152*4882a593Smuzhiyun struct kvm_nested_state __user *user_kvm_nested_state,
1153*4882a593Smuzhiyun struct kvm_nested_state *kvm_state)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun struct vcpu_svm *svm = to_svm(vcpu);
1156*4882a593Smuzhiyun struct vmcb *hsave = svm->nested.hsave;
1157*4882a593Smuzhiyun struct vmcb __user *user_vmcb = (struct vmcb __user *)
1158*4882a593Smuzhiyun &user_kvm_nested_state->data.svm[0];
1159*4882a593Smuzhiyun struct vmcb_control_area *ctl;
1160*4882a593Smuzhiyun struct vmcb_save_area *save;
1161*4882a593Smuzhiyun int ret;
1162*4882a593Smuzhiyun u32 cr0;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1165*4882a593Smuzhiyun KVM_STATE_NESTED_SVM_VMCB_SIZE);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1168*4882a593Smuzhiyun return -EINVAL;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1171*4882a593Smuzhiyun KVM_STATE_NESTED_RUN_PENDING |
1172*4882a593Smuzhiyun KVM_STATE_NESTED_GIF_SET))
1173*4882a593Smuzhiyun return -EINVAL;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun /*
1176*4882a593Smuzhiyun * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1177*4882a593Smuzhiyun * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1178*4882a593Smuzhiyun */
1179*4882a593Smuzhiyun if (!(vcpu->arch.efer & EFER_SVME)) {
1180*4882a593Smuzhiyun /* GIF=1 and no guest mode are required if SVME=0. */
1181*4882a593Smuzhiyun if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1182*4882a593Smuzhiyun return -EINVAL;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1186*4882a593Smuzhiyun if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1187*4882a593Smuzhiyun return -EINVAL;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1190*4882a593Smuzhiyun svm_leave_nested(vcpu);
1191*4882a593Smuzhiyun svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1192*4882a593Smuzhiyun return 0;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1196*4882a593Smuzhiyun return -EINVAL;
1197*4882a593Smuzhiyun if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1198*4882a593Smuzhiyun return -EINVAL;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun ret = -ENOMEM;
1201*4882a593Smuzhiyun ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1202*4882a593Smuzhiyun save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1203*4882a593Smuzhiyun if (!ctl || !save)
1204*4882a593Smuzhiyun goto out_free;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun ret = -EFAULT;
1207*4882a593Smuzhiyun if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1208*4882a593Smuzhiyun goto out_free;
1209*4882a593Smuzhiyun if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1210*4882a593Smuzhiyun goto out_free;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun ret = -EINVAL;
1213*4882a593Smuzhiyun if (!nested_vmcb_check_controls(ctl))
1214*4882a593Smuzhiyun goto out_free;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /*
1217*4882a593Smuzhiyun * Processor state contains L2 state. Check that it is
1218*4882a593Smuzhiyun * valid for guest mode (see nested_vmcb_checks).
1219*4882a593Smuzhiyun */
1220*4882a593Smuzhiyun cr0 = kvm_read_cr0(vcpu);
1221*4882a593Smuzhiyun if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1222*4882a593Smuzhiyun goto out_free;
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun /*
1225*4882a593Smuzhiyun * Validate host state saved from before VMRUN (see
1226*4882a593Smuzhiyun * nested_svm_check_permissions).
1227*4882a593Smuzhiyun * TODO: validate reserved bits for all saved state.
1228*4882a593Smuzhiyun */
1229*4882a593Smuzhiyun if (!(save->cr0 & X86_CR0_PG))
1230*4882a593Smuzhiyun goto out_free;
1231*4882a593Smuzhiyun if (!(save->efer & EFER_SVME))
1232*4882a593Smuzhiyun goto out_free;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun /*
1235*4882a593Smuzhiyun * All checks done, we can enter guest mode. L1 control fields
1236*4882a593Smuzhiyun * come from the nested save state. Guest state is already
1237*4882a593Smuzhiyun * in the registers, the save area of the nested state instead
1238*4882a593Smuzhiyun * contains saved L1 state.
1239*4882a593Smuzhiyun */
1240*4882a593Smuzhiyun copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
1241*4882a593Smuzhiyun hsave->save = *save;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun if (is_guest_mode(vcpu))
1244*4882a593Smuzhiyun svm_leave_nested(vcpu);
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1247*4882a593Smuzhiyun load_nested_vmcb_control(svm, ctl);
1248*4882a593Smuzhiyun nested_prepare_vmcb_control(svm);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1251*4882a593Smuzhiyun ret = 0;
1252*4882a593Smuzhiyun out_free:
1253*4882a593Smuzhiyun kfree(save);
1254*4882a593Smuzhiyun kfree(ctl);
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun return ret;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun struct kvm_x86_nested_ops svm_nested_ops = {
1260*4882a593Smuzhiyun .leave_nested = svm_leave_nested,
1261*4882a593Smuzhiyun .check_events = svm_check_nested_events,
1262*4882a593Smuzhiyun .get_nested_state_pages = svm_get_nested_state_pages,
1263*4882a593Smuzhiyun .get_state = svm_get_nested_state,
1264*4882a593Smuzhiyun .set_state = svm_set_nested_state,
1265*4882a593Smuzhiyun };
1266