Lines Matching refs:svm

198 static void svm_complete_interrupts(struct vcpu_svm *svm);
268 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_efer() local
283 svm_set_gif(svm, true); in svm_set_efer()
290 if (!is_smm(&svm->vcpu)) in svm_set_efer()
291 svm_free_nested(svm); in svm_set_efer()
294 int ret = svm_allocate_nested(svm); in svm_set_efer()
303 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
304 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer()
310 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow() local
313 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
320 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_interrupt_shadow() local
323 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
325 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
331 struct vcpu_svm *svm = to_svm(vcpu); in skip_emulated_instruction() local
333 if (nrips && svm->vmcb->control.next_rip != 0) { in skip_emulated_instruction()
335 svm->next_rip = svm->vmcb->control.next_rip; in skip_emulated_instruction()
338 if (!svm->next_rip) { in skip_emulated_instruction()
342 kvm_rip_write(vcpu, svm->next_rip); in skip_emulated_instruction()
351 struct vcpu_svm *svm = to_svm(vcpu); in svm_queue_exception() local
356 kvm_deliver_exception_payload(&svm->vcpu); in svm_queue_exception()
359 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
368 (void)skip_emulated_instruction(&svm->vcpu); in svm_queue_exception()
369 rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
370 svm->int3_rip = rip + svm->vmcb->save.cs.base; in svm_queue_exception()
371 svm->int3_injected = rip - old_rip; in svm_queue_exception()
374 svm->vmcb->control.event_inj = nr in svm_queue_exception()
378 svm->vmcb->control.event_inj_err = error_code; in svm_queue_exception()
589 struct vcpu_svm *svm = to_svm(vcpu); in set_shadow_msr_intercept() local
597 set_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
599 clear_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
602 set_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
604 clear_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
704 struct vcpu_svm *svm = to_svm(vcpu); in svm_msr_filter_changed() local
714 u32 read = test_bit(i, svm->shadow_msr_intercept.read); in svm_msr_filter_changed()
715 u32 write = test_bit(i, svm->shadow_msr_intercept.write); in svm_msr_filter_changed()
717 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); in svm_msr_filter_changed()
766 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_lbrv() local
768 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; in svm_enable_lbrv()
769 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); in svm_enable_lbrv()
770 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); in svm_enable_lbrv()
771 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); in svm_enable_lbrv()
772 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); in svm_enable_lbrv()
777 struct vcpu_svm *svm = to_svm(vcpu); in svm_disable_lbrv() local
779 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; in svm_disable_lbrv()
780 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); in svm_disable_lbrv()
781 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); in svm_disable_lbrv()
782 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); in svm_disable_lbrv()
783 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); in svm_disable_lbrv()
786 void disable_nmi_singlestep(struct vcpu_svm *svm) in disable_nmi_singlestep() argument
788 svm->nmi_singlestep = false; in disable_nmi_singlestep()
790 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { in disable_nmi_singlestep()
792 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in disable_nmi_singlestep()
793 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; in disable_nmi_singlestep()
794 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in disable_nmi_singlestep()
795 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; in disable_nmi_singlestep()
801 struct vcpu_svm *svm = to_svm(vcpu); in grow_ple_window() local
802 struct vmcb_control_area *control = &svm->vmcb->control; in grow_ple_window()
811 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in grow_ple_window()
819 struct vcpu_svm *svm = to_svm(vcpu); in shrink_ple_window() local
820 struct vmcb_control_area *control = &svm->vmcb->control; in shrink_ple_window()
829 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in shrink_ple_window()
1066 struct vcpu_svm *svm = to_svm(vcpu); in svm_write_l1_tsc_offset() local
1071 g_tsc_offset = svm->vmcb->control.tsc_offset - in svm_write_l1_tsc_offset()
1072 svm->nested.hsave->control.tsc_offset; in svm_write_l1_tsc_offset()
1073 svm->nested.hsave->control.tsc_offset = offset; in svm_write_l1_tsc_offset()
1077 svm->vmcb->control.tsc_offset - g_tsc_offset, in svm_write_l1_tsc_offset()
1080 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; in svm_write_l1_tsc_offset()
1082 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_write_l1_tsc_offset()
1083 return svm->vmcb->control.tsc_offset; in svm_write_l1_tsc_offset()
1086 static void svm_check_invpcid(struct vcpu_svm *svm) in svm_check_invpcid() argument
1094 !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID)) in svm_check_invpcid()
1095 svm_set_intercept(svm, INTERCEPT_INVPCID); in svm_check_invpcid()
1097 svm_clr_intercept(svm, INTERCEPT_INVPCID); in svm_check_invpcid()
1101 static void init_vmcb(struct vcpu_svm *svm) in init_vmcb() argument
1103 struct vmcb_control_area *control = &svm->vmcb->control; in init_vmcb()
1104 struct vmcb_save_area *save = &svm->vmcb->save; in init_vmcb()
1106 svm->vcpu.arch.hflags = 0; in init_vmcb()
1108 svm_set_intercept(svm, INTERCEPT_CR0_READ); in init_vmcb()
1109 svm_set_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1110 svm_set_intercept(svm, INTERCEPT_CR4_READ); in init_vmcb()
1111 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); in init_vmcb()
1112 svm_set_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1113 svm_set_intercept(svm, INTERCEPT_CR4_WRITE); in init_vmcb()
1114 if (!kvm_vcpu_apicv_active(&svm->vcpu)) in init_vmcb()
1115 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); in init_vmcb()
1117 set_dr_intercepts(svm); in init_vmcb()
1119 set_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1120 set_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1121 set_exception_intercept(svm, MC_VECTOR); in init_vmcb()
1122 set_exception_intercept(svm, AC_VECTOR); in init_vmcb()
1123 set_exception_intercept(svm, DB_VECTOR); in init_vmcb()
1131 set_exception_intercept(svm, GP_VECTOR); in init_vmcb()
1133 svm_set_intercept(svm, INTERCEPT_INTR); in init_vmcb()
1134 svm_set_intercept(svm, INTERCEPT_NMI); in init_vmcb()
1135 svm_set_intercept(svm, INTERCEPT_SMI); in init_vmcb()
1136 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); in init_vmcb()
1137 svm_set_intercept(svm, INTERCEPT_RDPMC); in init_vmcb()
1138 svm_set_intercept(svm, INTERCEPT_CPUID); in init_vmcb()
1139 svm_set_intercept(svm, INTERCEPT_INVD); in init_vmcb()
1140 svm_set_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1141 svm_set_intercept(svm, INTERCEPT_INVLPGA); in init_vmcb()
1142 svm_set_intercept(svm, INTERCEPT_IOIO_PROT); in init_vmcb()
1143 svm_set_intercept(svm, INTERCEPT_MSR_PROT); in init_vmcb()
1144 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); in init_vmcb()
1145 svm_set_intercept(svm, INTERCEPT_SHUTDOWN); in init_vmcb()
1146 svm_set_intercept(svm, INTERCEPT_VMRUN); in init_vmcb()
1147 svm_set_intercept(svm, INTERCEPT_VMMCALL); in init_vmcb()
1148 svm_set_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1149 svm_set_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1150 svm_set_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1151 svm_set_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1152 svm_set_intercept(svm, INTERCEPT_SKINIT); in init_vmcb()
1153 svm_set_intercept(svm, INTERCEPT_WBINVD); in init_vmcb()
1154 svm_set_intercept(svm, INTERCEPT_XSETBV); in init_vmcb()
1155 svm_set_intercept(svm, INTERCEPT_RDPRU); in init_vmcb()
1156 svm_set_intercept(svm, INTERCEPT_RSM); in init_vmcb()
1158 if (!kvm_mwait_in_guest(svm->vcpu.kvm)) { in init_vmcb()
1159 svm_set_intercept(svm, INTERCEPT_MONITOR); in init_vmcb()
1160 svm_set_intercept(svm, INTERCEPT_MWAIT); in init_vmcb()
1163 if (!kvm_hlt_in_guest(svm->vcpu.kvm)) in init_vmcb()
1164 svm_set_intercept(svm, INTERCEPT_HLT); in init_vmcb()
1167 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); in init_vmcb()
1189 svm_set_cr4(&svm->vcpu, 0); in init_vmcb()
1190 svm_set_efer(&svm->vcpu, 0); in init_vmcb()
1192 kvm_set_rflags(&svm->vcpu, 2); in init_vmcb()
1194 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; in init_vmcb()
1200 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); in init_vmcb()
1201 kvm_mmu_reset_context(&svm->vcpu); in init_vmcb()
1209 svm_clr_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1210 clr_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1211 svm_clr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1212 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1213 save->g_pat = svm->vcpu.arch.pat; in init_vmcb()
1217 svm->asid_generation = 0; in init_vmcb()
1219 svm->nested.vmcb12_gpa = 0; in init_vmcb()
1220 svm->vcpu.arch.hflags = 0; in init_vmcb()
1222 if (!kvm_pause_in_guest(svm->vcpu.kvm)) { in init_vmcb()
1226 svm_set_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1228 svm_clr_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1231 svm_check_invpcid(svm); in init_vmcb()
1233 if (kvm_vcpu_apicv_active(&svm->vcpu)) in init_vmcb()
1234 avic_init_vmcb(svm); in init_vmcb()
1241 svm_clr_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1242 svm_clr_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1243 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in init_vmcb()
1247 svm_clr_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1248 svm_clr_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1249 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; in init_vmcb()
1252 if (sev_guest(svm->vcpu.kvm)) { in init_vmcb()
1253 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; in init_vmcb()
1254 clr_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1257 vmcb_mark_all_dirty(svm->vmcb); in init_vmcb()
1259 enable_gif(svm); in init_vmcb()
1265 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_reset() local
1269 svm->spec_ctrl = 0; in svm_vcpu_reset()
1270 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
1273 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | in svm_vcpu_reset()
1275 if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) in svm_vcpu_reset()
1276 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; in svm_vcpu_reset()
1278 init_vmcb(svm); in svm_vcpu_reset()
1284 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); in svm_vcpu_reset()
1289 struct vcpu_svm *svm; in svm_create_vcpu() local
1294 svm = to_svm(vcpu); in svm_create_vcpu()
1301 err = avic_init_vcpu(svm); in svm_create_vcpu()
1309 svm->avic_is_running = true; in svm_create_vcpu()
1311 svm->msrpm = svm_vcpu_alloc_msrpm(); in svm_create_vcpu()
1312 if (!svm->msrpm) { in svm_create_vcpu()
1317 svm_vcpu_init_msrpm(vcpu, svm->msrpm); in svm_create_vcpu()
1319 svm->vmcb = page_address(vmcb_page); in svm_create_vcpu()
1320 svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT); in svm_create_vcpu()
1321 svm->asid_generation = 0; in svm_create_vcpu()
1322 init_vmcb(svm); in svm_create_vcpu()
1345 struct vcpu_svm *svm = to_svm(vcpu); in svm_free_vcpu() local
1352 svm_clear_current_vmcb(svm->vmcb); in svm_free_vcpu()
1355 svm_free_nested(svm); in svm_free_vcpu()
1357 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); in svm_free_vcpu()
1358 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); in svm_free_vcpu()
1363 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_load() local
1368 svm->asid_generation = 0; in svm_vcpu_load()
1369 vmcb_mark_all_dirty(svm->vmcb); in svm_vcpu_load()
1375 savesegment(fs, svm->host.fs); in svm_vcpu_load()
1376 savesegment(gs, svm->host.gs); in svm_vcpu_load()
1377 svm->host.ldt = kvm_read_ldt(); in svm_vcpu_load()
1380 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_load()
1391 wrmsrl(MSR_TSC_AUX, svm->tsc_aux); in svm_vcpu_load()
1393 if (sd->current_vmcb != svm->vmcb) { in svm_vcpu_load()
1394 sd->current_vmcb = svm->vmcb; in svm_vcpu_load()
1402 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_put() local
1408 kvm_load_ldt(svm->host.ldt); in svm_vcpu_put()
1410 loadsegment(fs, svm->host.fs); in svm_vcpu_put()
1412 load_gs_index(svm->host.gs); in svm_vcpu_put()
1415 loadsegment(gs, svm->host.gs); in svm_vcpu_put()
1419 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_put()
1424 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_rflags() local
1425 unsigned long rflags = svm->vmcb->save.rflags; in svm_get_rflags()
1427 if (svm->nmi_singlestep) { in svm_get_rflags()
1429 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in svm_get_rflags()
1431 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in svm_get_rflags()
1462 static void svm_set_vintr(struct vcpu_svm *svm) in svm_set_vintr() argument
1467 WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu)); in svm_set_vintr()
1468 svm_set_intercept(svm, INTERCEPT_VINTR); in svm_set_vintr()
1474 control = &svm->vmcb->control; in svm_set_vintr()
1479 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_set_vintr()
1482 static void svm_clear_vintr(struct vcpu_svm *svm) in svm_clear_vintr() argument
1484 svm_clr_intercept(svm, INTERCEPT_VINTR); in svm_clear_vintr()
1487 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1488 if (is_guest_mode(&svm->vcpu)) { in svm_clear_vintr()
1489 svm->nested.hsave->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1491 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != in svm_clear_vintr()
1492 (svm->nested.ctl.int_ctl & V_TPR_MASK)); in svm_clear_vintr()
1493 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & in svm_clear_vintr()
1496 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; in svm_clear_vintr()
1499 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_clear_vintr()
1605 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_idt() local
1607 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
1608 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
1613 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_idt() local
1615 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
1616 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
1617 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_idt()
1622 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_gdt() local
1624 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
1625 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
1630 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_gdt() local
1632 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
1633 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
1634 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_gdt()
1637 static void update_cr0_intercept(struct vcpu_svm *svm) in update_cr0_intercept() argument
1639 ulong gcr0 = svm->vcpu.arch.cr0; in update_cr0_intercept()
1640 u64 *hcr0 = &svm->vmcb->save.cr0; in update_cr0_intercept()
1645 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in update_cr0_intercept()
1648 svm_clr_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
1649 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
1651 svm_set_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
1652 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
1658 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr0() local
1664 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
1669 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
1685 svm->vmcb->save.cr0 = cr0; in svm_set_cr0()
1686 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr0()
1687 update_cr0_intercept(svm); in svm_set_cr0()
1714 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_segment() local
1737 svm->vmcb->save.cpl = (var->dpl & 3); in svm_set_segment()
1739 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); in svm_set_segment()
1744 struct vcpu_svm *svm = to_svm(vcpu); in update_exception_bitmap() local
1746 clr_exception_intercept(svm, BP_VECTOR); in update_exception_bitmap()
1750 set_exception_intercept(svm, BP_VECTOR); in update_exception_bitmap()
1754 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) in new_asid() argument
1759 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; in new_asid()
1762 svm->asid_generation = sd->asid_generation; in new_asid()
1763 svm->vmcb->control.asid = sd->next_asid++; in new_asid()
1765 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in new_asid()
1768 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) in svm_set_dr6() argument
1770 struct vmcb *vmcb = svm->vmcb; in svm_set_dr6()
1780 struct vcpu_svm *svm = to_svm(vcpu); in svm_sync_dirty_debug_regs() local
1790 vcpu->arch.dr6 = svm->vmcb->save.dr6; in svm_sync_dirty_debug_regs()
1791 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
1793 set_dr_intercepts(svm); in svm_sync_dirty_debug_regs()
1798 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr7() local
1800 svm->vmcb->save.dr7 = value; in svm_set_dr7()
1801 vmcb_mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr7()
1804 static int pf_interception(struct vcpu_svm *svm) in pf_interception() argument
1806 u64 fault_address = svm->vmcb->control.exit_info_2; in pf_interception()
1807 u64 error_code = svm->vmcb->control.exit_info_1; in pf_interception()
1809 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, in pf_interception()
1811 svm->vmcb->control.insn_bytes : NULL, in pf_interception()
1812 svm->vmcb->control.insn_len); in pf_interception()
1815 static int npf_interception(struct vcpu_svm *svm) in npf_interception() argument
1817 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); in npf_interception()
1818 u64 error_code = svm->vmcb->control.exit_info_1; in npf_interception()
1821 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, in npf_interception()
1823 svm->vmcb->control.insn_bytes : NULL, in npf_interception()
1824 svm->vmcb->control.insn_len); in npf_interception()
1827 static int db_interception(struct vcpu_svm *svm) in db_interception() argument
1829 struct kvm_run *kvm_run = svm->vcpu.run; in db_interception()
1830 struct kvm_vcpu *vcpu = &svm->vcpu; in db_interception()
1832 if (!(svm->vcpu.guest_debug & in db_interception()
1834 !svm->nmi_singlestep) { in db_interception()
1835 u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1; in db_interception()
1836 kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload); in db_interception()
1840 if (svm->nmi_singlestep) { in db_interception()
1841 disable_nmi_singlestep(svm); in db_interception()
1846 if (svm->vcpu.guest_debug & in db_interception()
1849 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; in db_interception()
1850 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; in db_interception()
1852 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
1860 static int bp_interception(struct vcpu_svm *svm) in bp_interception() argument
1862 struct kvm_run *kvm_run = svm->vcpu.run; in bp_interception()
1865 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
1870 static int ud_interception(struct vcpu_svm *svm) in ud_interception() argument
1872 return handle_ud(&svm->vcpu); in ud_interception()
1875 static int ac_interception(struct vcpu_svm *svm) in ac_interception() argument
1877 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); in ac_interception()
1881 static int gp_interception(struct vcpu_svm *svm) in gp_interception() argument
1883 struct kvm_vcpu *vcpu = &svm->vcpu; in gp_interception()
1884 u32 error_code = svm->vmcb->control.exit_info_1; in gp_interception()
1957 static void svm_handle_mce(struct vcpu_svm *svm) in svm_handle_mce() argument
1966 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); in svm_handle_mce()
1978 static int mc_interception(struct vcpu_svm *svm) in mc_interception() argument
1983 static int shutdown_interception(struct vcpu_svm *svm) in shutdown_interception() argument
1985 struct kvm_run *kvm_run = svm->vcpu.run; in shutdown_interception()
1991 clear_page(svm->vmcb); in shutdown_interception()
1992 init_vmcb(svm); in shutdown_interception()
1998 static int io_interception(struct vcpu_svm *svm) in io_interception() argument
2000 struct kvm_vcpu *vcpu = &svm->vcpu; in io_interception()
2001 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ in io_interception()
2005 ++svm->vcpu.stat.io_exits; in io_interception()
2013 svm->next_rip = svm->vmcb->control.exit_info_2; in io_interception()
2015 return kvm_fast_pio(&svm->vcpu, size, port, in); in io_interception()
2018 static int nmi_interception(struct vcpu_svm *svm) in nmi_interception() argument
2023 static int intr_interception(struct vcpu_svm *svm) in intr_interception() argument
2025 ++svm->vcpu.stat.irq_exits; in intr_interception()
2029 static int nop_on_interception(struct vcpu_svm *svm) in nop_on_interception() argument
2034 static int halt_interception(struct vcpu_svm *svm) in halt_interception() argument
2036 return kvm_emulate_halt(&svm->vcpu); in halt_interception()
2039 static int vmmcall_interception(struct vcpu_svm *svm) in vmmcall_interception() argument
2041 return kvm_emulate_hypercall(&svm->vcpu); in vmmcall_interception()
2044 static int vmload_interception(struct vcpu_svm *svm) in vmload_interception() argument
2050 if (nested_svm_check_permissions(svm)) in vmload_interception()
2053 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmload_interception()
2056 kvm_inject_gp(&svm->vcpu, 0); in vmload_interception()
2062 ret = kvm_skip_emulated_instruction(&svm->vcpu); in vmload_interception()
2064 nested_svm_vmloadsave(nested_vmcb, svm->vmcb); in vmload_interception()
2065 kvm_vcpu_unmap(&svm->vcpu, &map, true); in vmload_interception()
2070 static int vmsave_interception(struct vcpu_svm *svm) in vmsave_interception() argument
2076 if (nested_svm_check_permissions(svm)) in vmsave_interception()
2079 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmsave_interception()
2082 kvm_inject_gp(&svm->vcpu, 0); in vmsave_interception()
2088 ret = kvm_skip_emulated_instruction(&svm->vcpu); in vmsave_interception()
2090 nested_svm_vmloadsave(svm->vmcb, nested_vmcb); in vmsave_interception()
2091 kvm_vcpu_unmap(&svm->vcpu, &map, true); in vmsave_interception()
2096 static int vmrun_interception(struct vcpu_svm *svm) in vmrun_interception() argument
2098 if (nested_svm_check_permissions(svm)) in vmrun_interception()
2101 return nested_svm_vmrun(svm); in vmrun_interception()
2104 void svm_set_gif(struct vcpu_svm *svm, bool value) in svm_set_gif() argument
2113 if (vgif_enabled(svm)) in svm_set_gif()
2114 svm_clr_intercept(svm, INTERCEPT_STGI); in svm_set_gif()
2115 if (svm_is_intercept(svm, INTERCEPT_VINTR)) in svm_set_gif()
2116 svm_clear_vintr(svm); in svm_set_gif()
2118 enable_gif(svm); in svm_set_gif()
2119 if (svm->vcpu.arch.smi_pending || in svm_set_gif()
2120 svm->vcpu.arch.nmi_pending || in svm_set_gif()
2121 kvm_cpu_has_injectable_intr(&svm->vcpu)) in svm_set_gif()
2122 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_set_gif()
2124 disable_gif(svm); in svm_set_gif()
2131 if (!vgif_enabled(svm)) in svm_set_gif()
2132 svm_clear_vintr(svm); in svm_set_gif()
2136 static int stgi_interception(struct vcpu_svm *svm) in stgi_interception() argument
2140 if (nested_svm_check_permissions(svm)) in stgi_interception()
2143 ret = kvm_skip_emulated_instruction(&svm->vcpu); in stgi_interception()
2144 svm_set_gif(svm, true); in stgi_interception()
2148 static int clgi_interception(struct vcpu_svm *svm) in clgi_interception() argument
2152 if (nested_svm_check_permissions(svm)) in clgi_interception()
2155 ret = kvm_skip_emulated_instruction(&svm->vcpu); in clgi_interception()
2156 svm_set_gif(svm, false); in clgi_interception()
2160 static int invlpga_interception(struct vcpu_svm *svm) in invlpga_interception() argument
2162 struct kvm_vcpu *vcpu = &svm->vcpu; in invlpga_interception()
2164 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu), in invlpga_interception()
2165 kvm_rax_read(&svm->vcpu)); in invlpga_interception()
2168 kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu)); in invlpga_interception()
2170 return kvm_skip_emulated_instruction(&svm->vcpu); in invlpga_interception()
2173 static int skinit_interception(struct vcpu_svm *svm) in skinit_interception() argument
2175 trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu)); in skinit_interception()
2177 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in skinit_interception()
2181 static int wbinvd_interception(struct vcpu_svm *svm) in wbinvd_interception() argument
2183 return kvm_emulate_wbinvd(&svm->vcpu); in wbinvd_interception()
2186 static int xsetbv_interception(struct vcpu_svm *svm) in xsetbv_interception() argument
2188 u64 new_bv = kvm_read_edx_eax(&svm->vcpu); in xsetbv_interception()
2189 u32 index = kvm_rcx_read(&svm->vcpu); in xsetbv_interception()
2191 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { in xsetbv_interception()
2192 return kvm_skip_emulated_instruction(&svm->vcpu); in xsetbv_interception()
2198 static int rdpru_interception(struct vcpu_svm *svm) in rdpru_interception() argument
2200 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in rdpru_interception()
2204 static int task_switch_interception(struct vcpu_svm *svm) in task_switch_interception() argument
2208 int int_type = svm->vmcb->control.exit_int_info & in task_switch_interception()
2210 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; in task_switch_interception()
2212 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; in task_switch_interception()
2214 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; in task_switch_interception()
2218 tss_selector = (u16)svm->vmcb->control.exit_info_1; in task_switch_interception()
2220 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2223 else if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2234 svm->vcpu.arch.nmi_injected = false; in task_switch_interception()
2237 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2241 (u32)svm->vmcb->control.exit_info_2; in task_switch_interception()
2243 kvm_clear_exception_queue(&svm->vcpu); in task_switch_interception()
2246 kvm_clear_interrupt_queue(&svm->vcpu); in task_switch_interception()
2257 if (!skip_emulated_instruction(&svm->vcpu)) in task_switch_interception()
2264 return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, in task_switch_interception()
2268 static int cpuid_interception(struct vcpu_svm *svm) in cpuid_interception() argument
2270 return kvm_emulate_cpuid(&svm->vcpu); in cpuid_interception()
2273 static int iret_interception(struct vcpu_svm *svm) in iret_interception() argument
2275 ++svm->vcpu.stat.nmi_window_exits; in iret_interception()
2276 svm_clr_intercept(svm, INTERCEPT_IRET); in iret_interception()
2277 svm->vcpu.arch.hflags |= HF_IRET_MASK; in iret_interception()
2278 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); in iret_interception()
2279 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in iret_interception()
2283 static int invd_interception(struct vcpu_svm *svm) in invd_interception() argument
2286 return kvm_skip_emulated_instruction(&svm->vcpu); in invd_interception()
2289 static int invlpg_interception(struct vcpu_svm *svm) in invlpg_interception() argument
2292 return kvm_emulate_instruction(&svm->vcpu, 0); in invlpg_interception()
2294 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); in invlpg_interception()
2295 return kvm_skip_emulated_instruction(&svm->vcpu); in invlpg_interception()
2298 static int emulate_on_interception(struct vcpu_svm *svm) in emulate_on_interception() argument
2300 return kvm_emulate_instruction(&svm->vcpu, 0); in emulate_on_interception()
2303 static int rsm_interception(struct vcpu_svm *svm) in rsm_interception() argument
2305 return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2); in rsm_interception()
2308 static int rdpmc_interception(struct vcpu_svm *svm) in rdpmc_interception() argument
2313 return emulate_on_interception(svm); in rdpmc_interception()
2315 err = kvm_rdpmc(&svm->vcpu); in rdpmc_interception()
2316 return kvm_complete_insn_gp(&svm->vcpu, err); in rdpmc_interception()
2319 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, in check_selective_cr0_intercepted() argument
2322 unsigned long cr0 = svm->vcpu.arch.cr0; in check_selective_cr0_intercepted()
2325 if (!is_guest_mode(&svm->vcpu) || in check_selective_cr0_intercepted()
2326 (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) in check_selective_cr0_intercepted()
2333 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; in check_selective_cr0_intercepted()
2334 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); in check_selective_cr0_intercepted()
2342 static int cr_interception(struct vcpu_svm *svm) in cr_interception() argument
2349 return emulate_on_interception(svm); in cr_interception()
2351 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2352 return emulate_on_interception(svm); in cr_interception()
2354 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in cr_interception()
2355 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) in cr_interception()
2358 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; in cr_interception()
2363 val = kvm_register_readl(&svm->vcpu, reg); in cr_interception()
2367 if (!check_selective_cr0_intercepted(svm, val)) in cr_interception()
2368 err = kvm_set_cr0(&svm->vcpu, val); in cr_interception()
2374 err = kvm_set_cr3(&svm->vcpu, val); in cr_interception()
2377 err = kvm_set_cr4(&svm->vcpu, val); in cr_interception()
2380 err = kvm_set_cr8(&svm->vcpu, val); in cr_interception()
2384 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2390 val = kvm_read_cr0(&svm->vcpu); in cr_interception()
2393 val = svm->vcpu.arch.cr2; in cr_interception()
2396 val = kvm_read_cr3(&svm->vcpu); in cr_interception()
2399 val = kvm_read_cr4(&svm->vcpu); in cr_interception()
2402 val = kvm_get_cr8(&svm->vcpu); in cr_interception()
2406 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2409 kvm_register_writel(&svm->vcpu, reg, val); in cr_interception()
2412 return kvm_complete_insn_gp(&svm->vcpu, err); in cr_interception()
2415 static int dr_interception(struct vcpu_svm *svm) in dr_interception() argument
2420 if (svm->vcpu.guest_debug == 0) { in dr_interception()
2426 clr_dr_intercepts(svm); in dr_interception()
2427 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in dr_interception()
2432 return emulate_on_interception(svm); in dr_interception()
2434 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in dr_interception()
2435 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; in dr_interception()
2438 if (!kvm_require_dr(&svm->vcpu, dr - 16)) in dr_interception()
2440 val = kvm_register_readl(&svm->vcpu, reg); in dr_interception()
2441 kvm_set_dr(&svm->vcpu, dr - 16, val); in dr_interception()
2443 if (!kvm_require_dr(&svm->vcpu, dr)) in dr_interception()
2445 kvm_get_dr(&svm->vcpu, dr, &val); in dr_interception()
2446 kvm_register_writel(&svm->vcpu, reg, val); in dr_interception()
2449 return kvm_skip_emulated_instruction(&svm->vcpu); in dr_interception()
2452 static int cr8_write_interception(struct vcpu_svm *svm) in cr8_write_interception() argument
2454 struct kvm_run *kvm_run = svm->vcpu.run; in cr8_write_interception()
2457 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); in cr8_write_interception()
2459 r = cr_interception(svm); in cr8_write_interception()
2460 if (lapic_in_kernel(&svm->vcpu)) in cr8_write_interception()
2462 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) in cr8_write_interception()
2488 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_msr() local
2492 msr_info->data = svm->vmcb->save.star; in svm_get_msr()
2496 msr_info->data = svm->vmcb->save.lstar; in svm_get_msr()
2499 msr_info->data = svm->vmcb->save.cstar; in svm_get_msr()
2502 msr_info->data = svm->vmcb->save.kernel_gs_base; in svm_get_msr()
2505 msr_info->data = svm->vmcb->save.sfmask; in svm_get_msr()
2509 msr_info->data = svm->vmcb->save.sysenter_cs; in svm_get_msr()
2512 msr_info->data = svm->sysenter_eip; in svm_get_msr()
2515 msr_info->data = svm->sysenter_esp; in svm_get_msr()
2523 msr_info->data = svm->tsc_aux; in svm_get_msr()
2531 msr_info->data = svm->vmcb->save.dbgctl; in svm_get_msr()
2534 msr_info->data = svm->vmcb->save.br_from; in svm_get_msr()
2537 msr_info->data = svm->vmcb->save.br_to; in svm_get_msr()
2540 msr_info->data = svm->vmcb->save.last_excp_from; in svm_get_msr()
2543 msr_info->data = svm->vmcb->save.last_excp_to; in svm_get_msr()
2546 msr_info->data = svm->nested.hsave_msr; in svm_get_msr()
2549 msr_info->data = svm->nested.vm_cr_msr; in svm_get_msr()
2556 msr_info->data = svm->spec_ctrl; in svm_get_msr()
2563 msr_info->data = svm->virt_spec_ctrl; in svm_get_msr()
2583 msr_info->data = svm->msr_decfg; in svm_get_msr()
2591 static int rdmsr_interception(struct vcpu_svm *svm) in rdmsr_interception() argument
2593 return kvm_emulate_rdmsr(&svm->vcpu); in rdmsr_interception()
2598 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vm_cr() local
2606 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
2609 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
2610 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
2612 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
2623 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_msr() local
2632 svm->vmcb->save.g_pat = data; in svm_set_msr()
2633 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_set_msr()
2643 svm->spec_ctrl = data; in svm_set_msr()
2658 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); in svm_set_msr()
2673 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); in svm_set_msr()
2683 svm->virt_spec_ctrl = data; in svm_set_msr()
2686 svm->vmcb->save.star = data; in svm_set_msr()
2690 svm->vmcb->save.lstar = data; in svm_set_msr()
2693 svm->vmcb->save.cstar = data; in svm_set_msr()
2696 svm->vmcb->save.kernel_gs_base = data; in svm_set_msr()
2699 svm->vmcb->save.sfmask = data; in svm_set_msr()
2703 svm->vmcb->save.sysenter_cs = data; in svm_set_msr()
2706 svm->sysenter_eip = data; in svm_set_msr()
2707 svm->vmcb->save.sysenter_eip = data; in svm_set_msr()
2710 svm->sysenter_esp = data; in svm_set_msr()
2711 svm->vmcb->save.sysenter_esp = data; in svm_set_msr()
2726 svm->tsc_aux = data; in svm_set_msr()
2727 wrmsrl(MSR_TSC_AUX, svm->tsc_aux); in svm_set_msr()
2738 svm->vmcb->save.dbgctl = data; in svm_set_msr()
2739 vmcb_mark_dirty(svm->vmcb, VMCB_LBR); in svm_set_msr()
2755 svm->nested.hsave_msr = data & PAGE_MASK; in svm_set_msr()
2777 svm->msr_decfg = data; in svm_set_msr()
2790 static int wrmsr_interception(struct vcpu_svm *svm) in wrmsr_interception() argument
2792 return kvm_emulate_wrmsr(&svm->vcpu); in wrmsr_interception()
2795 static int msr_interception(struct vcpu_svm *svm) in msr_interception() argument
2797 if (svm->vmcb->control.exit_info_1) in msr_interception()
2798 return wrmsr_interception(svm); in msr_interception()
2800 return rdmsr_interception(svm); in msr_interception()
2803 static int interrupt_window_interception(struct vcpu_svm *svm) in interrupt_window_interception() argument
2805 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in interrupt_window_interception()
2806 svm_clear_vintr(svm); in interrupt_window_interception()
2813 svm_toggle_avic_for_irq_window(&svm->vcpu, true); in interrupt_window_interception()
2815 ++svm->vcpu.stat.irq_window_exits; in interrupt_window_interception()
2819 static int pause_interception(struct vcpu_svm *svm) in pause_interception() argument
2821 struct kvm_vcpu *vcpu = &svm->vcpu; in pause_interception()
2831 static int nop_interception(struct vcpu_svm *svm) in nop_interception() argument
2833 return kvm_skip_emulated_instruction(&(svm->vcpu)); in nop_interception()
2836 static int monitor_interception(struct vcpu_svm *svm) in monitor_interception() argument
2839 return nop_interception(svm); in monitor_interception()
2842 static int mwait_interception(struct vcpu_svm *svm) in mwait_interception() argument
2845 return nop_interception(svm); in mwait_interception()
2848 static int invpcid_interception(struct vcpu_svm *svm) in invpcid_interception() argument
2850 struct kvm_vcpu *vcpu = &svm->vcpu; in invpcid_interception()
2864 type = svm->vmcb->control.exit_info_2; in invpcid_interception()
2865 gva = svm->vmcb->control.exit_info_1; in invpcid_interception()
2875 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
2946 struct vcpu_svm *svm = to_svm(vcpu); in dump_vmcb() local
2947 struct vmcb_control_area *control = &svm->vmcb->control; in dump_vmcb()
2948 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb()
3079 struct vcpu_svm *svm = to_svm(vcpu); in handle_exit() local
3081 u32 exit_code = svm->vmcb->control.exit_code; in handle_exit()
3085 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) in handle_exit()
3086 vcpu->arch.cr0 = svm->vmcb->save.cr0; in handle_exit()
3088 vcpu->arch.cr3 = svm->vmcb->save.cr3; in handle_exit()
3095 vmexit = nested_svm_exit_special(svm); in handle_exit()
3098 vmexit = nested_svm_exit_handled(svm); in handle_exit()
3104 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { in handle_exit()
3107 = svm->vmcb->control.exit_code; in handle_exit()
3131 return msr_interception(svm); in handle_exit()
3133 return interrupt_window_interception(svm); in handle_exit()
3135 return intr_interception(svm); in handle_exit()
3137 return halt_interception(svm); in handle_exit()
3139 return npf_interception(svm); in handle_exit()
3141 return svm_exit_handlers[exit_code](svm); in handle_exit()
3152 static void pre_svm_run(struct vcpu_svm *svm) in pre_svm_run() argument
3154 struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu); in pre_svm_run()
3156 if (sev_guest(svm->vcpu.kvm)) in pre_svm_run()
3157 return pre_sev_run(svm, svm->vcpu.cpu); in pre_svm_run()
3160 if (svm->asid_generation != sd->asid_generation) in pre_svm_run()
3161 new_asid(svm, sd); in pre_svm_run()
3166 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_nmi() local
3168 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in svm_inject_nmi()
3170 svm_set_intercept(svm, INTERCEPT_IRET); in svm_inject_nmi()
3176 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_irq() local
3181 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_set_irq()
3187 struct vcpu_svm *svm = to_svm(vcpu); in update_cr8_intercept() local
3192 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
3198 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
3203 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_blocked() local
3204 struct vmcb *vmcb = svm->vmcb; in svm_nmi_blocked()
3207 if (!gif_set(svm)) in svm_nmi_blocked()
3210 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) in svm_nmi_blocked()
3214 (svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_nmi_blocked()
3221 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_allowed() local
3222 if (svm->nested.nested_run_pending) in svm_nmi_allowed()
3226 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) in svm_nmi_allowed()
3234 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nmi_mask() local
3236 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_get_nmi_mask()
3241 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nmi_mask() local
3244 svm->vcpu.arch.hflags |= HF_NMI_MASK; in svm_set_nmi_mask()
3245 svm_set_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3247 svm->vcpu.arch.hflags &= ~HF_NMI_MASK; in svm_set_nmi_mask()
3248 svm_clr_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3254 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_blocked() local
3255 struct vmcb *vmcb = svm->vmcb; in svm_interrupt_blocked()
3257 if (!gif_set(svm)) in svm_interrupt_blocked()
3262 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) in svm_interrupt_blocked()
3263 ? !(svm->nested.hsave->save.rflags & X86_EFLAGS_IF) in svm_interrupt_blocked()
3268 if (nested_exit_on_intr(svm)) in svm_interrupt_blocked()
3280 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_allowed() local
3281 if (svm->nested.nested_run_pending) in svm_interrupt_allowed()
3288 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) in svm_interrupt_allowed()
3296 struct vcpu_svm *svm = to_svm(vcpu); in enable_irq_window() local
3306 if (vgif_enabled(svm) || gif_set(svm)) { in enable_irq_window()
3314 svm_set_vintr(svm); in enable_irq_window()
3320 struct vcpu_svm *svm = to_svm(vcpu); in enable_nmi_window() local
3322 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) in enable_nmi_window()
3326 if (!gif_set(svm)) { in enable_nmi_window()
3327 if (vgif_enabled(svm)) in enable_nmi_window()
3328 svm_set_intercept(svm, INTERCEPT_STGI); in enable_nmi_window()
3336 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); in enable_nmi_window()
3337 svm->nmi_singlestep = true; in enable_nmi_window()
3338 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in enable_nmi_window()
3353 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb() local
3363 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in svm_flush_tlb()
3365 svm->asid_generation--; in svm_flush_tlb()
3370 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb_gva() local
3372 invlpga(gva, svm->vmcb->control.asid); in svm_flush_tlb_gva()
3381 struct vcpu_svm *svm = to_svm(vcpu); in sync_cr8_to_lapic() local
3386 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { in sync_cr8_to_lapic()
3387 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; in sync_cr8_to_lapic()
3394 struct vcpu_svm *svm = to_svm(vcpu); in sync_lapic_to_cr8() local
3402 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; in sync_lapic_to_cr8()
3403 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; in sync_lapic_to_cr8()
3406 static void svm_complete_interrupts(struct vcpu_svm *svm) in svm_complete_interrupts() argument
3410 u32 exitintinfo = svm->vmcb->control.exit_int_info; in svm_complete_interrupts()
3411 unsigned int3_injected = svm->int3_injected; in svm_complete_interrupts()
3413 svm->int3_injected = 0; in svm_complete_interrupts()
3419 if ((svm->vcpu.arch.hflags & HF_IRET_MASK) in svm_complete_interrupts()
3420 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
3421 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); in svm_complete_interrupts()
3422 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3425 svm->vcpu.arch.nmi_injected = false; in svm_complete_interrupts()
3426 kvm_clear_exception_queue(&svm->vcpu); in svm_complete_interrupts()
3427 kvm_clear_interrupt_queue(&svm->vcpu); in svm_complete_interrupts()
3432 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3439 svm->vcpu.arch.nmi_injected = true; in svm_complete_interrupts()
3449 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) in svm_complete_interrupts()
3450 kvm_rip_write(&svm->vcpu, in svm_complete_interrupts()
3451 kvm_rip_read(&svm->vcpu) - in svm_complete_interrupts()
3456 u32 err = svm->vmcb->control.exit_int_info_err; in svm_complete_interrupts()
3457 kvm_requeue_exception_e(&svm->vcpu, vector, err); in svm_complete_interrupts()
3460 kvm_requeue_exception(&svm->vcpu, vector); in svm_complete_interrupts()
3463 kvm_queue_interrupt(&svm->vcpu, vector, false); in svm_complete_interrupts()
3472 struct vcpu_svm *svm = to_svm(vcpu); in svm_cancel_injection() local
3473 struct vmcb_control_area *control = &svm->vmcb->control; in svm_cancel_injection()
3478 svm_complete_interrupts(svm); in svm_cancel_injection()
3493 struct vcpu_svm *svm) in svm_vcpu_enter_exit() argument
3515 __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); in svm_vcpu_enter_exit()
3518 native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); in svm_vcpu_enter_exit()
3520 loadsegment(fs, svm->host.fs); in svm_vcpu_enter_exit()
3522 loadsegment(gs, svm->host.gs); in svm_vcpu_enter_exit()
3548 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_run() local
3550 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
3551 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
3552 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
3560 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { in svm_vcpu_run()
3566 disable_nmi_singlestep(svm); in svm_vcpu_run()
3570 pre_svm_run(svm); in svm_vcpu_run()
3574 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
3580 if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) in svm_vcpu_run()
3581 svm_set_dr6(svm, vcpu->arch.dr6); in svm_vcpu_run()
3583 svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM); in svm_vcpu_run()
3596 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); in svm_vcpu_run()
3598 svm_vcpu_enter_exit(vcpu, svm); in svm_vcpu_run()
3616 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); in svm_vcpu_run()
3620 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); in svm_vcpu_run()
3622 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
3623 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
3624 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
3625 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
3627 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
3628 kvm_before_interrupt(&svm->vcpu); in svm_vcpu_run()
3635 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
3636 kvm_after_interrupt(&svm->vcpu); in svm_vcpu_run()
3640 svm->next_rip = 0; in svm_vcpu_run()
3641 if (is_guest_mode(&svm->vcpu)) { in svm_vcpu_run()
3642 sync_nested_vmcb_control(svm); in svm_vcpu_run()
3643 svm->nested.nested_run_pending = 0; in svm_vcpu_run()
3646 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in svm_vcpu_run()
3647 vmcb_mark_all_clean(svm->vmcb); in svm_vcpu_run()
3650 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) in svm_vcpu_run()
3651 svm->vcpu.arch.apf.host_apf_flags = in svm_vcpu_run()
3663 if (unlikely(svm->vmcb->control.exit_code == in svm_vcpu_run()
3665 svm_handle_mce(svm); in svm_vcpu_run()
3667 svm_complete_interrupts(svm); in svm_vcpu_run()
3678 struct vcpu_svm *svm = to_svm(vcpu); in svm_load_mmu_pgd() local
3683 svm->vmcb->control.nested_cr3 = cr3; in svm_load_mmu_pgd()
3684 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_load_mmu_pgd()
3692 svm->vmcb->save.cr3 = cr3; in svm_load_mmu_pgd()
3693 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_load_mmu_pgd()
3748 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_after_set_cpuid() local
3756 svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && in svm_vcpu_after_set_cpuid()
3757 guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); in svm_vcpu_after_set_cpuid()
3760 svm_check_invpcid(svm); in svm_vcpu_after_set_cpuid()
3863 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_intercept() local
3866 struct vmcb *vmcb = svm->vmcb; in svm_check_intercept()
3891 if (!(vmcb_is_intercept(&svm->nested.ctl, in svm_check_intercept()
3969 vmexit = nested_svm_exit_handled(svm); in svm_check_intercept()
3996 struct vcpu_svm *svm = to_svm(vcpu); in svm_smi_blocked() local
3999 if (!gif_set(svm)) in svm_smi_blocked()
4007 struct vcpu_svm *svm = to_svm(vcpu); in svm_smi_allowed() local
4008 if (svm->nested.nested_run_pending) in svm_smi_allowed()
4012 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) in svm_smi_allowed()
4020 struct vcpu_svm *svm = to_svm(vcpu); in svm_pre_enter_smm() local
4027 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); in svm_pre_enter_smm()
4029 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_pre_enter_smm()
4030 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_pre_enter_smm()
4031 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_pre_enter_smm()
4033 ret = nested_svm_vmexit(svm); in svm_pre_enter_smm()
4042 struct vcpu_svm *svm = to_svm(vcpu); in svm_pre_leave_smm() local
4058 if (kvm_vcpu_map(&svm->vcpu, in svm_pre_leave_smm()
4062 if (svm_allocate_nested(svm)) in svm_pre_leave_smm()
4065 ret = enter_svm_guest_mode(svm, vmcb12_gpa, map.hva); in svm_pre_leave_smm()
4066 kvm_vcpu_unmap(&svm->vcpu, &map, true); in svm_pre_leave_smm()
4075 struct vcpu_svm *svm = to_svm(vcpu); in enable_smi_window() local
4077 if (!gif_set(svm)) { in enable_smi_window()
4078 if (vgif_enabled(svm)) in enable_smi_window()
4079 svm_set_intercept(svm, INTERCEPT_STGI); in enable_smi_window()
4166 struct vcpu_svm *svm = to_svm(vcpu); in svm_apic_init_signal_blocked() local
4175 return !gif_set(svm) || in svm_apic_init_signal_blocked()
4176 (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT)); in svm_apic_init_signal_blocked()