Lines Matching refs:svm
35 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_npf_exit() local
37 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { in nested_svm_inject_npf_exit()
42 svm->vmcb->control.exit_code = SVM_EXIT_NPF; in nested_svm_inject_npf_exit()
43 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_npf_exit()
44 svm->vmcb->control.exit_info_1 = (1ULL << 32); in nested_svm_inject_npf_exit()
45 svm->vmcb->control.exit_info_2 = fault->address; in nested_svm_inject_npf_exit()
48 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; in nested_svm_inject_npf_exit()
49 svm->vmcb->control.exit_info_1 |= fault->error_code; in nested_svm_inject_npf_exit()
51 nested_svm_vmexit(svm); in nested_svm_inject_npf_exit()
56 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_page_fault_nested() local
59 if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && in svm_inject_page_fault_nested()
60 !svm->nested.nested_run_pending) { in svm_inject_page_fault_nested()
61 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; in svm_inject_page_fault_nested()
62 svm->vmcb->control.exit_code_hi = 0; in svm_inject_page_fault_nested()
63 svm->vmcb->control.exit_info_1 = fault->error_code; in svm_inject_page_fault_nested()
64 svm->vmcb->control.exit_info_2 = fault->address; in svm_inject_page_fault_nested()
65 nested_svm_vmexit(svm); in svm_inject_page_fault_nested()
73 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr() local
74 u64 cr3 = svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_pdptr()
87 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3() local
89 return svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_cr3()
94 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_init_mmu_context() local
95 struct vmcb *hsave = svm->nested.hsave; in nested_svm_init_mmu_context()
101 svm->nested.ctl.nested_cr3); in nested_svm_init_mmu_context()
115 void recalc_intercepts(struct vcpu_svm *svm) in recalc_intercepts() argument
120 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts()
122 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
125 c = &svm->vmcb->control; in recalc_intercepts()
126 h = &svm->nested.hsave->control; in recalc_intercepts()
127 g = &svm->nested.ctl; in recalc_intercepts()
186 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) in nested_svm_vmrun_msrpm() argument
195 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
206 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); in nested_svm_vmrun_msrpm()
208 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) in nested_svm_vmrun_msrpm()
211 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
214 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); in nested_svm_vmrun_msrpm()
221 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nested_state_pages() local
226 if (!nested_svm_vmrun_msrpm(svm)) { in svm_get_nested_state_pages()
252 static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12) in nested_vmcb_check_save() argument
254 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_vmcb_check_save()
281 if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) in nested_vmcb_check_save()
287 static void load_nested_vmcb_control(struct vcpu_svm *svm, in load_nested_vmcb_control() argument
290 copy_vmcb_control_area(&svm->nested.ctl, control); in load_nested_vmcb_control()
293 svm->nested.ctl.asid = control->asid; in load_nested_vmcb_control()
294 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; in load_nested_vmcb_control()
295 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; in load_nested_vmcb_control()
302 void sync_nested_vmcb_control(struct vcpu_svm *svm) in sync_nested_vmcb_control() argument
305 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; in sync_nested_vmcb_control()
306 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; in sync_nested_vmcb_control()
310 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && in sync_nested_vmcb_control()
311 svm_is_intercept(svm, INTERCEPT_VINTR)) { in sync_nested_vmcb_control()
322 svm->nested.ctl.int_ctl &= ~mask; in sync_nested_vmcb_control()
323 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; in sync_nested_vmcb_control()
330 static void nested_vmcb_save_pending_event(struct vcpu_svm *svm, in nested_vmcb_save_pending_event() argument
333 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_vmcb_save_pending_event()
363 static inline bool nested_npt_enabled(struct vcpu_svm *svm) in nested_npt_enabled() argument
365 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; in nested_npt_enabled()
399 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12) in nested_prepare_vmcb_save() argument
402 svm->vmcb->save.es = vmcb12->save.es; in nested_prepare_vmcb_save()
403 svm->vmcb->save.cs = vmcb12->save.cs; in nested_prepare_vmcb_save()
404 svm->vmcb->save.ss = vmcb12->save.ss; in nested_prepare_vmcb_save()
405 svm->vmcb->save.ds = vmcb12->save.ds; in nested_prepare_vmcb_save()
406 svm->vmcb->save.gdtr = vmcb12->save.gdtr; in nested_prepare_vmcb_save()
407 svm->vmcb->save.idtr = vmcb12->save.idtr; in nested_prepare_vmcb_save()
408 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags); in nested_prepare_vmcb_save()
415 svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME); in nested_prepare_vmcb_save()
417 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); in nested_prepare_vmcb_save()
418 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); in nested_prepare_vmcb_save()
419 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2; in nested_prepare_vmcb_save()
420 kvm_rax_write(&svm->vcpu, vmcb12->save.rax); in nested_prepare_vmcb_save()
421 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); in nested_prepare_vmcb_save()
422 kvm_rip_write(&svm->vcpu, vmcb12->save.rip); in nested_prepare_vmcb_save()
425 svm->vmcb->save.rax = vmcb12->save.rax; in nested_prepare_vmcb_save()
426 svm->vmcb->save.rsp = vmcb12->save.rsp; in nested_prepare_vmcb_save()
427 svm->vmcb->save.rip = vmcb12->save.rip; in nested_prepare_vmcb_save()
428 svm->vmcb->save.dr7 = vmcb12->save.dr7; in nested_prepare_vmcb_save()
429 svm->vcpu.arch.dr6 = vmcb12->save.dr6; in nested_prepare_vmcb_save()
430 svm->vmcb->save.cpl = vmcb12->save.cpl; in nested_prepare_vmcb_save()
433 static void nested_prepare_vmcb_control(struct vcpu_svm *svm) in nested_prepare_vmcb_control() argument
440 if (nested_npt_enabled(svm)) in nested_prepare_vmcb_control()
441 nested_svm_init_mmu_context(&svm->vcpu); in nested_prepare_vmcb_control()
443 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = in nested_prepare_vmcb_control()
444 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; in nested_prepare_vmcb_control()
446 svm->vmcb->control.int_ctl = in nested_prepare_vmcb_control()
447 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | in nested_prepare_vmcb_control()
448 (svm->nested.hsave->control.int_ctl & int_ctl_vmcb01_bits); in nested_prepare_vmcb_control()
450 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; in nested_prepare_vmcb_control()
451 svm->vmcb->control.int_state = svm->nested.ctl.int_state; in nested_prepare_vmcb_control()
452 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; in nested_prepare_vmcb_control()
453 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_prepare_vmcb_control()
455 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count; in nested_prepare_vmcb_control()
456 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh; in nested_prepare_vmcb_control()
459 enter_guest_mode(&svm->vcpu); in nested_prepare_vmcb_control()
465 recalc_intercepts(svm); in nested_prepare_vmcb_control()
467 vmcb_mark_all_dirty(svm->vmcb); in nested_prepare_vmcb_control()
470 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, in enter_svm_guest_mode() argument
475 svm->nested.vmcb12_gpa = vmcb12_gpa; in enter_svm_guest_mode()
476 nested_prepare_vmcb_save(svm, vmcb12); in enter_svm_guest_mode()
477 nested_prepare_vmcb_control(svm); in enter_svm_guest_mode()
479 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, in enter_svm_guest_mode()
480 nested_npt_enabled(svm)); in enter_svm_guest_mode()
485 svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested; in enter_svm_guest_mode()
487 svm_set_gif(svm, true); in enter_svm_guest_mode()
492 int nested_svm_vmrun(struct vcpu_svm *svm) in nested_svm_vmrun() argument
496 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmrun()
497 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmrun()
501 if (is_smm(&svm->vcpu)) { in nested_svm_vmrun()
502 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in nested_svm_vmrun()
506 vmcb12_gpa = svm->vmcb->save.rax; in nested_svm_vmrun()
507 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map); in nested_svm_vmrun()
509 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_vmrun()
512 return kvm_skip_emulated_instruction(&svm->vcpu); in nested_svm_vmrun()
515 ret = kvm_skip_emulated_instruction(&svm->vcpu); in nested_svm_vmrun()
519 if (WARN_ON_ONCE(!svm->nested.initialized)) in nested_svm_vmrun()
522 load_nested_vmcb_control(svm, &vmcb12->control); in nested_svm_vmrun()
524 if (!nested_vmcb_check_save(svm, vmcb12) || in nested_svm_vmrun()
525 !nested_vmcb_check_controls(&svm->nested.ctl)) { in nested_svm_vmrun()
533 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, in nested_svm_vmrun()
547 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmrun()
548 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmrun()
560 hsave->save.efer = svm->vcpu.arch.efer; in nested_svm_vmrun()
561 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmrun()
562 hsave->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmrun()
563 hsave->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmrun()
564 hsave->save.rip = kvm_rip_read(&svm->vcpu); in nested_svm_vmrun()
570 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmrun()
574 svm->nested.nested_run_pending = 1; in nested_svm_vmrun()
576 if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12)) in nested_svm_vmrun()
579 if (nested_svm_vmrun_msrpm(svm)) in nested_svm_vmrun()
583 svm->nested.nested_run_pending = 0; in nested_svm_vmrun()
585 svm->vmcb->control.exit_code = SVM_EXIT_ERR; in nested_svm_vmrun()
586 svm->vmcb->control.exit_code_hi = 0; in nested_svm_vmrun()
587 svm->vmcb->control.exit_info_1 = 0; in nested_svm_vmrun()
588 svm->vmcb->control.exit_info_2 = 0; in nested_svm_vmrun()
590 nested_svm_vmexit(svm); in nested_svm_vmrun()
593 kvm_vcpu_unmap(&svm->vcpu, &map, true); in nested_svm_vmrun()
614 int nested_svm_vmexit(struct vcpu_svm *svm) in nested_svm_vmexit() argument
618 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmexit()
619 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmexit()
622 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); in nested_svm_vmexit()
625 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_vmexit()
632 leave_guest_mode(&svm->vcpu); in nested_svm_vmexit()
633 svm->nested.vmcb12_gpa = 0; in nested_svm_vmexit()
634 WARN_ON_ONCE(svm->nested.nested_run_pending); in nested_svm_vmexit()
636 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu); in nested_svm_vmexit()
639 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; in nested_svm_vmexit()
649 vmcb12->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
650 vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmexit()
651 vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmexit()
653 vmcb12->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
654 vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmexit()
655 vmcb12->save.rip = kvm_rip_read(&svm->vcpu); in nested_svm_vmexit()
656 vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu); in nested_svm_vmexit()
657 vmcb12->save.rax = kvm_rax_read(&svm->vcpu); in nested_svm_vmexit()
659 vmcb12->save.dr6 = svm->vcpu.arch.dr6; in nested_svm_vmexit()
669 nested_vmcb_save_pending_event(svm, vmcb12); in nested_svm_vmexit()
671 if (svm->nrips_enabled) in nested_svm_vmexit()
674 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; in nested_svm_vmexit()
675 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl; in nested_svm_vmexit()
676 vmcb12->control.event_inj = svm->nested.ctl.event_inj; in nested_svm_vmexit()
677 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_svm_vmexit()
680 svm->vmcb->control.pause_filter_count; in nested_svm_vmexit()
682 svm->vmcb->control.pause_filter_thresh; in nested_svm_vmexit()
688 svm_set_gif(svm, false); in nested_svm_vmexit()
690 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = in nested_svm_vmexit()
691 svm->vcpu.arch.l1_tsc_offset; in nested_svm_vmexit()
693 svm->nested.ctl.nested_cr3 = 0; in nested_svm_vmexit()
696 svm->vmcb->save.es = hsave->save.es; in nested_svm_vmexit()
697 svm->vmcb->save.cs = hsave->save.cs; in nested_svm_vmexit()
698 svm->vmcb->save.ss = hsave->save.ss; in nested_svm_vmexit()
699 svm->vmcb->save.ds = hsave->save.ds; in nested_svm_vmexit()
700 svm->vmcb->save.gdtr = hsave->save.gdtr; in nested_svm_vmexit()
701 svm->vmcb->save.idtr = hsave->save.idtr; in nested_svm_vmexit()
702 kvm_set_rflags(&svm->vcpu, hsave->save.rflags); in nested_svm_vmexit()
703 svm_set_efer(&svm->vcpu, hsave->save.efer); in nested_svm_vmexit()
704 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); in nested_svm_vmexit()
705 svm_set_cr4(&svm->vcpu, hsave->save.cr4); in nested_svm_vmexit()
706 kvm_rax_write(&svm->vcpu, hsave->save.rax); in nested_svm_vmexit()
707 kvm_rsp_write(&svm->vcpu, hsave->save.rsp); in nested_svm_vmexit()
708 kvm_rip_write(&svm->vcpu, hsave->save.rip); in nested_svm_vmexit()
709 svm->vmcb->save.dr7 = 0; in nested_svm_vmexit()
710 svm->vmcb->save.cpl = 0; in nested_svm_vmexit()
711 svm->vmcb->control.exit_int_info = 0; in nested_svm_vmexit()
713 vmcb_mark_all_dirty(svm->vmcb); in nested_svm_vmexit()
722 kvm_vcpu_unmap(&svm->vcpu, &map, true); in nested_svm_vmexit()
724 nested_svm_uninit_mmu_context(&svm->vcpu); in nested_svm_vmexit()
726 rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false); in nested_svm_vmexit()
731 svm->vmcb->save.cr3 = hsave->save.cr3; in nested_svm_vmexit()
737 svm->vcpu.arch.nmi_injected = false; in nested_svm_vmexit()
738 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmexit()
739 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmexit()
744 int svm_allocate_nested(struct vcpu_svm *svm) in svm_allocate_nested() argument
748 if (svm->nested.initialized) in svm_allocate_nested()
754 svm->nested.hsave = page_address(hsave_page); in svm_allocate_nested()
756 svm->nested.msrpm = svm_vcpu_alloc_msrpm(); in svm_allocate_nested()
757 if (!svm->nested.msrpm) in svm_allocate_nested()
759 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); in svm_allocate_nested()
761 svm->nested.initialized = true; in svm_allocate_nested()
769 void svm_free_nested(struct vcpu_svm *svm) in svm_free_nested() argument
771 if (!svm->nested.initialized) in svm_free_nested()
774 svm_vcpu_free_msrpm(svm->nested.msrpm); in svm_free_nested()
775 svm->nested.msrpm = NULL; in svm_free_nested()
777 __free_page(virt_to_page(svm->nested.hsave)); in svm_free_nested()
778 svm->nested.hsave = NULL; in svm_free_nested()
780 svm->nested.initialized = false; in svm_free_nested()
788 struct vcpu_svm *svm = to_svm(vcpu); in svm_leave_nested() local
790 if (is_guest_mode(&svm->vcpu)) { in svm_leave_nested()
791 struct vmcb *hsave = svm->nested.hsave; in svm_leave_nested()
792 struct vmcb *vmcb = svm->vmcb; in svm_leave_nested()
794 svm->nested.nested_run_pending = 0; in svm_leave_nested()
795 leave_guest_mode(&svm->vcpu); in svm_leave_nested()
797 nested_svm_uninit_mmu_context(&svm->vcpu); in svm_leave_nested()
800 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu); in svm_leave_nested()
803 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) in nested_svm_exit_handled_msr() argument
808 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
811 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
813 write = svm->vmcb->control.exit_info_1 & 1; in nested_svm_exit_handled_msr()
822 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) in nested_svm_exit_handled_msr()
828 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) in nested_svm_intercept_ioio() argument
835 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
838 port = svm->vmcb->control.exit_info_1 >> 16; in nested_svm_intercept_ioio()
839 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> in nested_svm_intercept_ioio()
841 gpa = svm->nested.ctl.iopm_base_pa + (port / 8); in nested_svm_intercept_ioio()
847 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
853 static int nested_svm_intercept(struct vcpu_svm *svm) in nested_svm_intercept() argument
855 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_intercept()
860 vmexit = nested_svm_exit_handled_msr(svm); in nested_svm_intercept()
863 vmexit = nested_svm_intercept_ioio(svm); in nested_svm_intercept()
866 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
871 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
889 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
897 int nested_svm_exit_handled(struct vcpu_svm *svm) in nested_svm_exit_handled() argument
901 vmexit = nested_svm_intercept(svm); in nested_svm_exit_handled()
904 nested_svm_vmexit(svm); in nested_svm_exit_handled()
909 int nested_svm_check_permissions(struct vcpu_svm *svm) in nested_svm_check_permissions() argument
911 if (!(svm->vcpu.arch.efer & EFER_SVME) || in nested_svm_check_permissions()
912 !is_paging(&svm->vcpu)) { in nested_svm_check_permissions()
913 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in nested_svm_check_permissions()
917 if (svm->vmcb->save.cpl) { in nested_svm_check_permissions()
918 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_check_permissions()
925 static bool nested_exit_on_exception(struct vcpu_svm *svm) in nested_exit_on_exception() argument
927 unsigned int nr = svm->vcpu.arch.exception.nr; in nested_exit_on_exception()
929 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr)); in nested_exit_on_exception()
932 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) in nested_svm_inject_exception_vmexit() argument
934 unsigned int nr = svm->vcpu.arch.exception.nr; in nested_svm_inject_exception_vmexit()
936 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; in nested_svm_inject_exception_vmexit()
937 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_exception_vmexit()
939 if (svm->vcpu.arch.exception.has_error_code) in nested_svm_inject_exception_vmexit()
940 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; in nested_svm_inject_exception_vmexit()
947 if (svm->vcpu.arch.exception.nested_apf) in nested_svm_inject_exception_vmexit()
948 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; in nested_svm_inject_exception_vmexit()
949 else if (svm->vcpu.arch.exception.has_payload) in nested_svm_inject_exception_vmexit()
950 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; in nested_svm_inject_exception_vmexit()
952 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; in nested_svm_inject_exception_vmexit()
955 kvm_deliver_exception_payload(&svm->vcpu); in nested_svm_inject_exception_vmexit()
956 if (svm->vcpu.arch.dr7 & DR7_GD) { in nested_svm_inject_exception_vmexit()
957 svm->vcpu.arch.dr7 &= ~DR7_GD; in nested_svm_inject_exception_vmexit()
958 kvm_update_dr7(&svm->vcpu); in nested_svm_inject_exception_vmexit()
961 WARN_ON(svm->vcpu.arch.exception.has_payload); in nested_svm_inject_exception_vmexit()
963 nested_svm_vmexit(svm); in nested_svm_inject_exception_vmexit()
966 static void nested_svm_smi(struct vcpu_svm *svm) in nested_svm_smi() argument
968 svm->vmcb->control.exit_code = SVM_EXIT_SMI; in nested_svm_smi()
969 svm->vmcb->control.exit_info_1 = 0; in nested_svm_smi()
970 svm->vmcb->control.exit_info_2 = 0; in nested_svm_smi()
972 nested_svm_vmexit(svm); in nested_svm_smi()
975 static void nested_svm_nmi(struct vcpu_svm *svm) in nested_svm_nmi() argument
977 svm->vmcb->control.exit_code = SVM_EXIT_NMI; in nested_svm_nmi()
978 svm->vmcb->control.exit_info_1 = 0; in nested_svm_nmi()
979 svm->vmcb->control.exit_info_2 = 0; in nested_svm_nmi()
981 nested_svm_vmexit(svm); in nested_svm_nmi()
984 static void nested_svm_intr(struct vcpu_svm *svm) in nested_svm_intr() argument
986 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); in nested_svm_intr()
988 svm->vmcb->control.exit_code = SVM_EXIT_INTR; in nested_svm_intr()
989 svm->vmcb->control.exit_info_1 = 0; in nested_svm_intr()
990 svm->vmcb->control.exit_info_2 = 0; in nested_svm_intr()
992 nested_svm_vmexit(svm); in nested_svm_intr()
995 static inline bool nested_exit_on_init(struct vcpu_svm *svm) in nested_exit_on_init() argument
997 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); in nested_exit_on_init()
1000 static void nested_svm_init(struct vcpu_svm *svm) in nested_svm_init() argument
1002 svm->vmcb->control.exit_code = SVM_EXIT_INIT; in nested_svm_init()
1003 svm->vmcb->control.exit_info_1 = 0; in nested_svm_init()
1004 svm->vmcb->control.exit_info_2 = 0; in nested_svm_init()
1006 nested_svm_vmexit(svm); in nested_svm_init()
1012 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_nested_events() local
1014 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; in svm_check_nested_events()
1021 if (!nested_exit_on_init(svm)) in svm_check_nested_events()
1023 nested_svm_init(svm); in svm_check_nested_events()
1030 if (!nested_exit_on_exception(svm)) in svm_check_nested_events()
1032 nested_svm_inject_exception_vmexit(svm); in svm_check_nested_events()
1039 if (!nested_exit_on_smi(svm)) in svm_check_nested_events()
1041 nested_svm_smi(svm); in svm_check_nested_events()
1048 if (!nested_exit_on_nmi(svm)) in svm_check_nested_events()
1050 nested_svm_nmi(svm); in svm_check_nested_events()
1057 if (!nested_exit_on_intr(svm)) in svm_check_nested_events()
1059 nested_svm_intr(svm); in svm_check_nested_events()
1066 int nested_svm_exit_special(struct vcpu_svm *svm) in nested_svm_exit_special() argument
1068 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_exit_special()
1078 if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] & in nested_svm_exit_special()
1082 svm->vcpu.arch.apf.host_apf_flags) in nested_svm_exit_special()
1098 struct vcpu_svm *svm; in svm_get_nested_state() local
1105 &user_kvm_nested_state->data.svm[0]; in svm_get_nested_state()
1110 svm = to_svm(vcpu); in svm_get_nested_state()
1117 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; in svm_get_nested_state()
1121 if (svm->nested.nested_run_pending) in svm_get_nested_state()
1125 if (gif_set(svm)) in svm_get_nested_state()
1140 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, in svm_get_nested_state()
1143 if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save, in svm_get_nested_state()
1155 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nested_state() local
1156 struct vmcb *hsave = svm->nested.hsave; in svm_set_nested_state()
1158 &user_kvm_nested_state->data.svm[0]; in svm_set_nested_state()
1191 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); in svm_set_nested_state()
1195 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) in svm_set_nested_state()
1240 copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); in svm_set_nested_state()
1246 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; in svm_set_nested_state()
1247 load_nested_vmcb_control(svm, ctl); in svm_set_nested_state()
1248 nested_prepare_vmcb_control(svm); in svm_set_nested_state()