Lines Matching +full:0 +full:x8000000a

72 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
74 #define TSC_RATIO_RSVD 0xffffff0000000000ULL
75 #define TSC_RATIO_MIN 0x0000000000000001ULL
76 #define TSC_RATIO_MAX 0x000000ffffffffffULL
89 #define TSC_RATIO_DEFAULT 0x0100000000ULL
193 static bool __read_mostly dump_invalid_vmcb = 0;
213 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
224 for (i = 0; i < NUM_MSR_MAPS; i++) { in svm_msrpm_offset()
254 asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr)); in invlpga()
305 return 0; in svm_set_efer()
311 u32 ret = 0; in svm_get_interrupt_shadow()
322 if (mask == 0) in svm_set_interrupt_shadow()
333 if (nrips && svm->vmcb->control.next_rip != 0) { in skip_emulated_instruction()
340 return 0; in skip_emulated_instruction()
344 svm_set_interrupt_shadow(vcpu, 0); in skip_emulated_instruction()
376 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) in svm_queue_exception()
416 * all osvw.status bits inside that length, including bit 0 (which is in svm_init_osvw()
418 * osvw_len is 0 then osvw_status[0] carries no information. We need to in svm_init_osvw()
422 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) in svm_init_osvw()
432 return 0; in has_svm()
437 return 0; in has_svm()
504 uint64_t len, status = 0; in svm_hardware_enable()
513 osvw_status = osvw_len = 0; in svm_hardware_enable()
521 osvw_status = osvw_len = 0; in svm_hardware_enable()
527 return 0; in svm_hardware_enable()
565 return 0; in svm_cpu_init()
579 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) in direct_access_msr_slot()
623 bit_write = 2 * (msr & 0x0f) + 1; in msr_write_intercepted()
646 read = 0; in set_msr_interception_bitmap()
649 write = 0; in set_msr_interception_bitmap()
652 bit_read = 2 * (msr & 0x0f); in set_msr_interception_bitmap()
653 bit_write = 2 * (msr & 0x0f) + 1; in set_msr_interception_bitmap()
680 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); in svm_vcpu_alloc_msrpm()
689 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { in svm_vcpu_init_msrpm()
712 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { in svm_msr_filter_changed()
725 for (i = 0; i < MSRPM_OFFSETS; ++i) { in add_msr_offset()
752 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); in init_msrpm_offsets()
754 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { in init_msrpm_offsets()
780 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); in svm_disable_lbrv()
781 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); in svm_disable_lbrv()
782 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); in svm_disable_lbrv()
783 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); in svm_disable_lbrv()
847 if (cpuid_eax(0x80000000) < 0x8000001f) in svm_adjust_mmio_mask()
855 enc_bit = cpuid_ebx(0x8000001f) & 0x3f; in svm_adjust_mmio_mask()
871 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; in svm_adjust_mmio_mask()
887 iopm_base = 0; in svm_hardware_teardown()
894 supported_xss = 0; in svm_set_cpu_caps()
896 /* CPUID 0x80000001 and 0x8000000A (SVM features) */ in svm_set_cpu_caps()
907 /* CPUID 0x80000008 */ in svm_set_cpu_caps()
929 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); in svm_hardware_setup()
950 pause_filter_count = 0; in svm_hardware_setup()
951 pause_filter_thresh = 0; in svm_hardware_setup()
953 pause_filter_thresh = 0; in svm_hardware_setup()
1040 return 0; in svm_hardware_setup()
1049 seg->selector = 0; in init_seg()
1052 seg->limit = 0xffff; in init_seg()
1053 seg->base = 0; in init_seg()
1058 seg->selector = 0; in init_sys_seg()
1060 seg->limit = 0xffff; in init_sys_seg()
1061 seg->base = 0; in init_sys_seg()
1067 u64 g_tsc_offset = 0; in svm_write_l1_tsc_offset()
1106 svm->vcpu.arch.hflags = 0; in init_vmcb()
1176 save->cs.selector = 0xf000; in init_vmcb()
1177 save->cs.base = 0xffff0000; in init_vmcb()
1181 save->cs.limit = 0xffff; in init_vmcb()
1183 save->gdtr.limit = 0xffff; in init_vmcb()
1184 save->idtr.limit = 0xffff; in init_vmcb()
1189 svm_set_cr4(&svm->vcpu, 0); in init_vmcb()
1190 svm_set_efer(&svm->vcpu, 0); in init_vmcb()
1191 save->dr6 = 0xffff0ff0; in init_vmcb()
1193 save->rip = 0x0000fff0; in init_vmcb()
1214 save->cr3 = 0; in init_vmcb()
1215 save->cr4 = 0; in init_vmcb()
1217 svm->asid_generation = 0; in init_vmcb()
1219 svm->nested.vmcb12_gpa = 0; in init_vmcb()
1220 svm->vcpu.arch.hflags = 0; in init_vmcb()
1269 svm->spec_ctrl = 0; in svm_vcpu_reset()
1270 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
1293 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); in svm_create_vcpu()
1321 svm->asid_generation = 0; in svm_create_vcpu()
1325 vcpu->arch.microcode_version = 0x01000065; in svm_create_vcpu()
1327 return 0; in svm_create_vcpu()
1368 svm->asid_generation = 0; in svm_vcpu_load()
1379 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) in svm_vcpu_load()
1418 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) in svm_vcpu_put()
1475 control->int_vector = 0x0; in svm_set_vintr()
1478 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); in svm_set_vintr()
1551 var->g = s->limit > 0xfffff; in svm_get_segment()
1565 var->type |= 0x2; in svm_get_segment()
1579 var->type |= 0x1; in svm_get_segment()
1589 var->db = 0; in svm_get_segment()
1782 get_debugreg(vcpu->arch.db[0], 0); in svm_sync_dirty_debug_regs()
1854 return 0; in db_interception()
1867 return 0; in bp_interception()
1877 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); in ac_interception()
1914 if (value != 0xb600000000010015ULL) in is_erratum_383()
1918 for (i = 0; i < 6; ++i) in is_erratum_383()
1919 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); in is_erratum_383()
1995 return 0; in shutdown_interception()
2006 string = (io_info & SVM_IOIO_STR_MASK) != 0; in io_interception()
2007 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; in io_interception()
2009 return kvm_emulate_instruction(vcpu, 0); in io_interception()
2056 kvm_inject_gp(&svm->vcpu, 0); in vmload_interception()
2082 kvm_inject_gp(&svm->vcpu, 0); in vmsave_interception()
2191 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { in xsetbv_interception()
2216 u32 error_code = 0; in task_switch_interception()
2258 return 0; in task_switch_interception()
2292 return kvm_emulate_instruction(&svm->vcpu, 0); in invlpg_interception()
2300 return kvm_emulate_instruction(&svm->vcpu, 0); in emulate_on_interception()
2351 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2360 err = 0; in cr_interception()
2366 case 0: in cr_interception()
2389 case 0: in cr_interception()
2420 if (svm->vcpu.guest_debug == 0) { in dr_interception()
2465 return 0; in cr8_write_interception()
2470 msr->data = 0; in svm_get_msr_feature()
2478 return 0; in svm_get_msr_feature()
2483 return 0; in svm_get_msr_feature()
2527 * safely return them on rdmsr. They will always be 0 until LBRV is in svm_get_msr()
2572 if (family < 0 || model < 0) in svm_get_msr()
2575 msr_info->data = 0; in svm_get_msr()
2577 if (family == 0x15 && in svm_get_msr()
2578 (model >= 0x2 && model < 0x20)) in svm_get_msr()
2579 msr_info->data = 0x1E; in svm_get_msr()
2588 return 0; in svm_get_msr()
2618 return 0; in svm_set_vm_cr()
2673 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); in svm_set_msr()
2731 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", in svm_set_msr()
2740 if (data & (1ULL<<0)) in svm_set_msr()
2760 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); in svm_set_msr()
2787 return 0; in svm_set_msr()
2822 bool in_kernel = (svm_get_cpl(vcpu) == 0); in pause_interception()
2868 kvm_inject_gp(vcpu, 0); in invpcid_interception()
2956 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); in dump_vmcb()
2958 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); in dump_vmcb()
3074 *error_code = 0; in svm_get_exit_info()
3110 return 0; in handle_exit()
3118 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code); in handle_exit()
3124 vcpu->run->internal.data[0] = exit_code; in handle_exit()
3126 return 0; in handle_exit()
3299 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes in enable_irq_window()
3343 return 0; in svm_set_tss_addr()
3348 return 0; in svm_set_identity_map_addr()
3413 svm->int3_injected = 0; in svm_complete_interrupts()
3477 control->event_inj = 0; in svm_cancel_injection()
3640 svm->next_rip = 0; in svm_vcpu_run()
3643 svm->nested.nested_run_pending = 0; in svm_vcpu_run()
3704 return 0; in is_disabled()
3713 hypercall[0] = 0x0f; in svm_patch_hypercall()
3714 hypercall[1] = 0x01; in svm_patch_hypercall()
3715 hypercall[2] = 0xd9; in svm_patch_hypercall()
3720 return 0; in svm_check_processor_compat()
3743 return 0; in svm_get_mt_mask()
3764 best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0); in svm_vcpu_after_set_cpuid()
3766 vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f)); in svm_vcpu_after_set_cpuid()
3899 cr0 &= 0xfUL; in svm_check_intercept()
3900 val &= 0xfUL; in svm_check_intercept()
3919 vmcb->control.exit_info_1 = 0; in svm_check_intercept()
3935 exit_info = ((info->src_val & 0xffff) << 16) | in svm_check_intercept()
3939 exit_info = (info->dst_val & 0xffff) << 16; in svm_check_intercept()
3991 vcpu->arch.mcg_cap &= 0x1ff; in svm_setup_mce()
4025 put_smstate(u64, smstate, 0x7ed8, 1); in svm_pre_enter_smm()
4027 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); in svm_pre_enter_smm()
4037 return 0; in svm_pre_enter_smm()
4044 int ret = 0; in svm_pre_leave_smm()
4047 u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); in svm_pre_leave_smm()
4048 u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8); in svm_pre_leave_smm()
4049 u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); in svm_pre_leave_smm()
4103 * return 0 instead of the correct guest instruction bytes. in svm_can_emulate_instruction()
4106 * uses a special opcode which attempts to read data using CPL=0 in svm_can_emulate_instruction()
4112 * returned 0 in GuestIntrBytes field of the VMCB. in svm_can_emulate_instruction()
4117 * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL. in svm_can_emulate_instruction()
4156 kvm_inject_gp(vcpu, 0); in svm_can_emulate_instruction()
4197 return 0; in svm_vm_init()