Searched refs:kern_hyp_va (Results 1 – 12 of 12) sorted by relevance
28 cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu)); in handle___kvm_vcpu_run()42 __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level); in handle___kvm_tlb_flush_vmid_ipa()49 __kvm_tlb_flush_vmid(kern_hyp_va(mmu)); in handle___kvm_tlb_flush_vmid()56 __kvm_flush_cpu_context(kern_hyp_va(mmu)); in handle___kvm_flush_cpu_context()102 __vgic_v3_save_aprs(kern_hyp_va(cpu_if)); in handle___vgic_v3_save_aprs()109 __vgic_v3_restore_aprs(kern_hyp_va(cpu_if)); in handle___vgic_v3_restore_aprs()
37 this_cpu_base = kern_hyp_va(cpu_base_array[cpu]); in __hyp_per_cpu_offset()
217 __load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu)); in __kvm_vcpu_run()
113 start = (void *)kern_hyp_va(per_cpu_base[i]); in recreate_hyp_mappings()
65 .macro kern_hyp_va reg136 #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) macro288 __load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr); in __load_guest_stage2()
378 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
57 vcpu->arch.host_thread_info = kern_hyp_va(ti); in kvm_arch_vcpu_run_map_fp()58 vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); in kvm_arch_vcpu_run_map_fp()
289 unsigned long start = kern_hyp_va((unsigned long)from); in create_hyp_mappings()290 unsigned long end = kern_hyp_va((unsigned long)to); in create_hyp_mappings()1327 kern_hyp_va(PAGE_OFFSET), in kvm_mmu_init()1328 kern_hyp_va((unsigned long)high_memory - 1)); in kvm_mmu_init()1330 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && in kvm_mmu_init()1331 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) && in kvm_mmu_init()
1384 base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); in kvm_init_vector_slots()1387 base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); in kvm_init_vector_slots()1437 params->stack_hyp_va = kern_hyp_va(per_cpu(kvm_arm_hyp_stack_page, cpu) + PAGE_SIZE); in cpu_prepare_hyp_mode()1739 num_possible_cpus(), kern_hyp_va(per_cpu_base), in do_pkvm_init()
141 guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); in __debug_switch_to_guest_common()160 guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); in __debug_switch_to_host_common()
39 struct kvm *kvm = kern_hyp_va(vcpu->kvm); in __vgic_v2_perform_cpuif_access()
100 random) offset from the linear mapping. See the kern_hyp_va macro and