xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/kvm_asm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012,2013 - ARM Ltd
4*4882a593Smuzhiyun  * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef __ARM_KVM_ASM_H__
8*4882a593Smuzhiyun #define __ARM_KVM_ASM_H__
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <asm/hyp_image.h>
11*4882a593Smuzhiyun #include <asm/virt.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define ARM_EXIT_WITH_SERROR_BIT  31
14*4882a593Smuzhiyun #define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
15*4882a593Smuzhiyun #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
16*4882a593Smuzhiyun #define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define ARM_EXCEPTION_IRQ	  0
19*4882a593Smuzhiyun #define ARM_EXCEPTION_EL1_SERROR  1
20*4882a593Smuzhiyun #define ARM_EXCEPTION_TRAP	  2
21*4882a593Smuzhiyun #define ARM_EXCEPTION_IL	  3
22*4882a593Smuzhiyun /* The hyp-stub will return this for any kvm_call_hyp() call */
23*4882a593Smuzhiyun #define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define kvm_arm_exception_type					\
26*4882a593Smuzhiyun 	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
27*4882a593Smuzhiyun 	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
28*4882a593Smuzhiyun 	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
29*4882a593Smuzhiyun 	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
33*4882a593Smuzhiyun  * that jumps over this.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun #define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define KVM_HOST_SMCCC_ID(id)						\
38*4882a593Smuzhiyun 	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
39*4882a593Smuzhiyun 			   ARM_SMCCC_SMC_64,				\
40*4882a593Smuzhiyun 			   ARM_SMCCC_OWNER_VENDOR_HYP,			\
41*4882a593Smuzhiyun 			   (id))
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
46*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run			1
47*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context		2
48*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa		3
49*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid		4
50*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context		5
51*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff		6
52*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs			7
53*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config		8
54*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr		9
55*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr		10
56*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs		11
57*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2		12
58*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs		13
59*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs		14
60*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___pkvm_init			15
61*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___pkvm_create_mappings		16
62*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping	17
63*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector		18
64*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize		19
65*4882a593Smuzhiyun #define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp			20
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #ifndef __ASSEMBLY__
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #include <linux/mm.h>
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
72*4882a593Smuzhiyun #define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun  * Define a pair of symbols sharing the same name but one defined in
76*4882a593Smuzhiyun  * VHE and the other in nVHE hyp implementations.
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun #define DECLARE_KVM_HYP_SYM(sym)		\
79*4882a593Smuzhiyun 	DECLARE_KVM_VHE_SYM(sym);		\
80*4882a593Smuzhiyun 	DECLARE_KVM_NVHE_SYM(sym)
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #define DECLARE_KVM_VHE_PER_CPU(type, sym)	\
83*4882a593Smuzhiyun 	DECLARE_PER_CPU(type, sym)
84*4882a593Smuzhiyun #define DECLARE_KVM_NVHE_PER_CPU(type, sym)	\
85*4882a593Smuzhiyun 	DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #define DECLARE_KVM_HYP_PER_CPU(type, sym)	\
88*4882a593Smuzhiyun 	DECLARE_KVM_VHE_PER_CPU(type, sym);	\
89*4882a593Smuzhiyun 	DECLARE_KVM_NVHE_PER_CPU(type, sym)
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun  * Compute pointer to a symbol defined in nVHE percpu region.
93*4882a593Smuzhiyun  * Returns NULL if percpu memory has not been allocated yet.
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun #define this_cpu_ptr_nvhe_sym(sym)	per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
96*4882a593Smuzhiyun #define per_cpu_ptr_nvhe_sym(sym, cpu)						\
97*4882a593Smuzhiyun 	({									\
98*4882a593Smuzhiyun 		unsigned long base, off;					\
99*4882a593Smuzhiyun 		base = kvm_arm_hyp_percpu_base[cpu];				\
100*4882a593Smuzhiyun 		off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -			\
101*4882a593Smuzhiyun 		      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);		\
102*4882a593Smuzhiyun 		base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;	\
103*4882a593Smuzhiyun 	})
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun #if defined(__KVM_NVHE_HYPERVISOR__)
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #define CHOOSE_NVHE_SYM(sym)	sym
108*4882a593Smuzhiyun #define CHOOSE_HYP_SYM(sym)	CHOOSE_NVHE_SYM(sym)
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /* The nVHE hypervisor shouldn't even try to access VHE symbols */
111*4882a593Smuzhiyun extern void *__nvhe_undefined_symbol;
112*4882a593Smuzhiyun #define CHOOSE_VHE_SYM(sym)		__nvhe_undefined_symbol
113*4882a593Smuzhiyun #define this_cpu_ptr_hyp_sym(sym)	(&__nvhe_undefined_symbol)
114*4882a593Smuzhiyun #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__nvhe_undefined_symbol)
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #elif defined(__KVM_VHE_HYPERVISOR__)
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun #define CHOOSE_VHE_SYM(sym)	sym
119*4882a593Smuzhiyun #define CHOOSE_HYP_SYM(sym)	CHOOSE_VHE_SYM(sym)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* The VHE hypervisor shouldn't even try to access nVHE symbols */
122*4882a593Smuzhiyun extern void *__vhe_undefined_symbol;
123*4882a593Smuzhiyun #define CHOOSE_NVHE_SYM(sym)		__vhe_undefined_symbol
124*4882a593Smuzhiyun #define this_cpu_ptr_hyp_sym(sym)	(&__vhe_undefined_symbol)
125*4882a593Smuzhiyun #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__vhe_undefined_symbol)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #else
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun  * BIG FAT WARNINGS:
131*4882a593Smuzhiyun  *
132*4882a593Smuzhiyun  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
133*4882a593Smuzhiyun  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
134*4882a593Smuzhiyun  *   while this is used early at boot time, when the capabilities are
135*4882a593Smuzhiyun  *   not final yet....
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * - Don't let the nVHE hypervisor have access to this, as it will
138*4882a593Smuzhiyun  *   pick the *wrong* symbol (yes, it runs at EL2...).
139*4882a593Smuzhiyun  */
140*4882a593Smuzhiyun #define CHOOSE_HYP_SYM(sym)		(is_kernel_in_hyp_mode()	\
141*4882a593Smuzhiyun 					   ? CHOOSE_VHE_SYM(sym)	\
142*4882a593Smuzhiyun 					   : CHOOSE_NVHE_SYM(sym))
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun #define this_cpu_ptr_hyp_sym(sym)	(is_kernel_in_hyp_mode()	\
145*4882a593Smuzhiyun 					   ? this_cpu_ptr(&sym)		\
146*4882a593Smuzhiyun 					   : this_cpu_ptr_nvhe_sym(sym))
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun #define per_cpu_ptr_hyp_sym(sym, cpu)	(is_kernel_in_hyp_mode()	\
149*4882a593Smuzhiyun 					   ? per_cpu_ptr(&sym, cpu)	\
150*4882a593Smuzhiyun 					   : per_cpu_ptr_nvhe_sym(sym, cpu))
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #define CHOOSE_VHE_SYM(sym)	sym
153*4882a593Smuzhiyun #define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun struct kvm_nvhe_init_params {
158*4882a593Smuzhiyun 	unsigned long mair_el2;
159*4882a593Smuzhiyun 	unsigned long tcr_el2;
160*4882a593Smuzhiyun 	unsigned long tpidr_el2;
161*4882a593Smuzhiyun 	unsigned long stack_hyp_va;
162*4882a593Smuzhiyun 	phys_addr_t pgd_pa;
163*4882a593Smuzhiyun 	unsigned long hcr_el2;
164*4882a593Smuzhiyun 	unsigned long vttbr;
165*4882a593Smuzhiyun 	unsigned long vtcr;
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /* Translate a kernel address @ptr into its equivalent linear mapping */
169*4882a593Smuzhiyun #define kvm_ksym_ref(ptr)						\
170*4882a593Smuzhiyun 	({								\
171*4882a593Smuzhiyun 		void *val = (ptr);					\
172*4882a593Smuzhiyun 		if (!is_kernel_in_hyp_mode())				\
173*4882a593Smuzhiyun 			val = lm_alias((ptr));				\
174*4882a593Smuzhiyun 		val;							\
175*4882a593Smuzhiyun 	 })
176*4882a593Smuzhiyun #define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(__va_function(kvm_nvhe_sym(sym)))
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun struct kvm;
179*4882a593Smuzhiyun struct kvm_vcpu;
180*4882a593Smuzhiyun struct kvm_s2_mmu;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
183*4882a593Smuzhiyun DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
184*4882a593Smuzhiyun #define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
185*4882a593Smuzhiyun #define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
188*4882a593Smuzhiyun DECLARE_KVM_NVHE_SYM(__per_cpu_start);
189*4882a593Smuzhiyun DECLARE_KVM_NVHE_SYM(__per_cpu_end);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
192*4882a593Smuzhiyun #define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun extern void __kvm_flush_vm_context(void);
195*4882a593Smuzhiyun extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
196*4882a593Smuzhiyun extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
197*4882a593Smuzhiyun 				     int level);
198*4882a593Smuzhiyun extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun extern void __kvm_timer_set_cntvoff(u64 cntvoff);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun extern u64 __vgic_v3_get_gic_config(void);
205*4882a593Smuzhiyun extern u64 __vgic_v3_read_vmcr(void);
206*4882a593Smuzhiyun extern void __vgic_v3_write_vmcr(u32 vmcr);
207*4882a593Smuzhiyun extern void __vgic_v3_init_lrs(void);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun extern u32 __kvm_get_mdcr_el2(void);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun #define __KVM_EXTABLE(from, to)						\
212*4882a593Smuzhiyun 	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
213*4882a593Smuzhiyun 	"	.align		3\n"					\
214*4882a593Smuzhiyun 	"	.long		(" #from " - .), (" #to " - .)\n"	\
215*4882a593Smuzhiyun 	"	.popsection\n"
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun #define __kvm_at(at_op, addr)						\
219*4882a593Smuzhiyun ( { 									\
220*4882a593Smuzhiyun 	int __kvm_at_err = 0;						\
221*4882a593Smuzhiyun 	u64 spsr, elr;							\
222*4882a593Smuzhiyun 	asm volatile(							\
223*4882a593Smuzhiyun 	"	mrs	%1, spsr_el2\n"					\
224*4882a593Smuzhiyun 	"	mrs	%2, elr_el2\n"					\
225*4882a593Smuzhiyun 	"1:	at	"at_op", %3\n"					\
226*4882a593Smuzhiyun 	"	isb\n"							\
227*4882a593Smuzhiyun 	"	b	9f\n"						\
228*4882a593Smuzhiyun 	"2:	msr	spsr_el2, %1\n"					\
229*4882a593Smuzhiyun 	"	msr	elr_el2, %2\n"					\
230*4882a593Smuzhiyun 	"	mov	%w0, %4\n"					\
231*4882a593Smuzhiyun 	"9:\n"								\
232*4882a593Smuzhiyun 	__KVM_EXTABLE(1b, 2b)						\
233*4882a593Smuzhiyun 	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
234*4882a593Smuzhiyun 	: "r" (addr), "i" (-EFAULT));					\
235*4882a593Smuzhiyun 	__kvm_at_err;							\
236*4882a593Smuzhiyun } )
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun #else /* __ASSEMBLY__ */
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun .macro get_host_ctxt reg, tmp
242*4882a593Smuzhiyun 	adr_this_cpu \reg, kvm_host_data, \tmp
243*4882a593Smuzhiyun 	add	\reg, \reg, #HOST_DATA_CONTEXT
244*4882a593Smuzhiyun .endm
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun .macro get_vcpu_ptr vcpu, ctxt
247*4882a593Smuzhiyun 	get_host_ctxt \ctxt, \vcpu
248*4882a593Smuzhiyun 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
249*4882a593Smuzhiyun .endm
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun .macro get_loaded_vcpu vcpu, ctxt
252*4882a593Smuzhiyun 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
253*4882a593Smuzhiyun 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
254*4882a593Smuzhiyun .endm
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun .macro set_loaded_vcpu vcpu, ctxt, tmp
257*4882a593Smuzhiyun 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
258*4882a593Smuzhiyun 	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
259*4882a593Smuzhiyun .endm
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun  * KVM extable for unexpected exceptions.
263*4882a593Smuzhiyun  * In the same format _asm_extable, but output to a different section so that
264*4882a593Smuzhiyun  * it can be mapped to EL2. The KVM version is not sorted. The caller must
265*4882a593Smuzhiyun  * ensure:
266*4882a593Smuzhiyun  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
267*4882a593Smuzhiyun  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
268*4882a593Smuzhiyun  */
269*4882a593Smuzhiyun .macro	_kvm_extable, from, to
270*4882a593Smuzhiyun 	.pushsection	__kvm_ex_table, "a"
271*4882a593Smuzhiyun 	.align		3
272*4882a593Smuzhiyun 	.long		(\from - .), (\to - .)
273*4882a593Smuzhiyun 	.popsection
274*4882a593Smuzhiyun .endm
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
277*4882a593Smuzhiyun #define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
278*4882a593Smuzhiyun #define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun  * We treat x18 as callee-saved as the host may use it as a platform
282*4882a593Smuzhiyun  * register (e.g. for shadow call stack).
283*4882a593Smuzhiyun  */
284*4882a593Smuzhiyun .macro save_callee_saved_regs ctxt
285*4882a593Smuzhiyun 	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
286*4882a593Smuzhiyun 	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
287*4882a593Smuzhiyun 	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
288*4882a593Smuzhiyun 	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
289*4882a593Smuzhiyun 	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
290*4882a593Smuzhiyun 	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
291*4882a593Smuzhiyun 	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
292*4882a593Smuzhiyun .endm
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun .macro restore_callee_saved_regs ctxt
295*4882a593Smuzhiyun 	// We require \ctxt is not x18-x28
296*4882a593Smuzhiyun 	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
297*4882a593Smuzhiyun 	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
298*4882a593Smuzhiyun 	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
299*4882a593Smuzhiyun 	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
300*4882a593Smuzhiyun 	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
301*4882a593Smuzhiyun 	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
302*4882a593Smuzhiyun 	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
303*4882a593Smuzhiyun .endm
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun .macro save_sp_el0 ctxt, tmp
306*4882a593Smuzhiyun 	mrs	\tmp,	sp_el0
307*4882a593Smuzhiyun 	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
308*4882a593Smuzhiyun .endm
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun .macro restore_sp_el0 ctxt, tmp
311*4882a593Smuzhiyun 	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
312*4882a593Smuzhiyun 	msr	sp_el0, \tmp
313*4882a593Smuzhiyun .endm
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun #endif
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #endif /* __ARM_KVM_ASM_H__ */
318