xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/kvm_hyp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015 - ARM Ltd
4*4882a593Smuzhiyun  * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef __ARM64_KVM_HYP_H__
8*4882a593Smuzhiyun #define __ARM64_KVM_HYP_H__
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/compiler.h>
11*4882a593Smuzhiyun #include <linux/kvm_host.h>
12*4882a593Smuzhiyun #include <asm/alternative.h>
13*4882a593Smuzhiyun #include <asm/sysreg.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
16*4882a593Smuzhiyun DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
17*4882a593Smuzhiyun DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define read_sysreg_elx(r,nvh,vh)					\
20*4882a593Smuzhiyun 	({								\
21*4882a593Smuzhiyun 		u64 reg;						\
22*4882a593Smuzhiyun 		asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh),	\
23*4882a593Smuzhiyun 					 __mrs_s("%0", r##vh),		\
24*4882a593Smuzhiyun 					 ARM64_HAS_VIRT_HOST_EXTN)	\
25*4882a593Smuzhiyun 			     : "=r" (reg));				\
26*4882a593Smuzhiyun 		reg;							\
27*4882a593Smuzhiyun 	})
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define write_sysreg_elx(v,r,nvh,vh)					\
30*4882a593Smuzhiyun 	do {								\
31*4882a593Smuzhiyun 		u64 __val = (u64)(v);					\
32*4882a593Smuzhiyun 		asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"),	\
33*4882a593Smuzhiyun 					 __msr_s(r##vh, "%x0"),		\
34*4882a593Smuzhiyun 					 ARM64_HAS_VIRT_HOST_EXTN)	\
35*4882a593Smuzhiyun 					 : : "rZ" (__val));		\
36*4882a593Smuzhiyun 	} while (0)
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * Unified accessors for registers that have a different encoding
40*4882a593Smuzhiyun  * between VHE and non-VHE. They must be specified without their "ELx"
41*4882a593Smuzhiyun  * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define read_sysreg_el0(r)	read_sysreg_elx(r, _EL0, _EL02)
45*4882a593Smuzhiyun #define write_sysreg_el0(v,r)	write_sysreg_elx(v, r, _EL0, _EL02)
46*4882a593Smuzhiyun #define read_sysreg_el1(r)	read_sysreg_elx(r, _EL1, _EL12)
47*4882a593Smuzhiyun #define write_sysreg_el1(v,r)	write_sysreg_elx(v, r, _EL1, _EL12)
48*4882a593Smuzhiyun #define read_sysreg_el2(r)	read_sysreg_elx(r, _EL2, _EL1)
49*4882a593Smuzhiyun #define write_sysreg_el2(v,r)	write_sysreg_elx(v, r, _EL2, _EL1)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
53*4882a593Smuzhiyun  * static inline can allow the compiler to out-of-line this. KVM always wants
54*4882a593Smuzhiyun  * the macro version as its always inlined.
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun #define __kvm_swab32(x)	___constant_swab32(x)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
61*4882a593Smuzhiyun void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
62*4882a593Smuzhiyun void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
63*4882a593Smuzhiyun void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
64*4882a593Smuzhiyun void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
65*4882a593Smuzhiyun void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
66*4882a593Smuzhiyun int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #ifdef __KVM_NVHE_HYPERVISOR__
69*4882a593Smuzhiyun void __timer_enable_traps(struct kvm_vcpu *vcpu);
70*4882a593Smuzhiyun void __timer_disable_traps(struct kvm_vcpu *vcpu);
71*4882a593Smuzhiyun #endif
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #ifdef __KVM_NVHE_HYPERVISOR__
74*4882a593Smuzhiyun void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
75*4882a593Smuzhiyun void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
76*4882a593Smuzhiyun #else
77*4882a593Smuzhiyun void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
78*4882a593Smuzhiyun void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
79*4882a593Smuzhiyun void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
80*4882a593Smuzhiyun void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
81*4882a593Smuzhiyun #endif
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
84*4882a593Smuzhiyun void __debug_switch_to_host(struct kvm_vcpu *vcpu);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #ifdef __KVM_NVHE_HYPERVISOR__
87*4882a593Smuzhiyun void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
88*4882a593Smuzhiyun void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
89*4882a593Smuzhiyun #endif
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
92*4882a593Smuzhiyun void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
93*4882a593Smuzhiyun void __sve_save_state(void *sve_pffr, u32 *fpsr);
94*4882a593Smuzhiyun void __sve_restore_state(void *sve_pffr, u32 *fpsr);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #ifndef __KVM_NVHE_HYPERVISOR__
97*4882a593Smuzhiyun void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
98*4882a593Smuzhiyun void deactivate_traps_vhe_put(void);
99*4882a593Smuzhiyun #endif
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun u64 __guest_enter(struct kvm_vcpu *vcpu);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun void __noreturn hyp_panic(void);
106*4882a593Smuzhiyun #ifdef __KVM_NVHE_HYPERVISOR__
107*4882a593Smuzhiyun void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
108*4882a593Smuzhiyun 			       u64 elr, u64 par);
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun #ifdef __KVM_NVHE_HYPERVISOR__
112*4882a593Smuzhiyun void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
113*4882a593Smuzhiyun 			    phys_addr_t pgd, void *sp, void *cont_fn);
114*4882a593Smuzhiyun int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
115*4882a593Smuzhiyun 		unsigned long *per_cpu_base, u32 hyp_va_bits);
116*4882a593Smuzhiyun void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
117*4882a593Smuzhiyun #endif
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
120*4882a593Smuzhiyun extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #endif /* __ARM64_KVM_HYP_H__ */
123