xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/kvm_ptrauth.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* arch/arm64/include/asm/kvm_ptrauth.h: Guest/host ptrauth save/restore
3*4882a593Smuzhiyun  * Copyright 2019 Arm Limited
4*4882a593Smuzhiyun  * Authors: Mark Rutland <mark.rutland@arm.com>
5*4882a593Smuzhiyun  *         Amit Daniel Kachhap <amit.kachhap@arm.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #ifndef __ASM_KVM_PTRAUTH_H
9*4882a593Smuzhiyun #define __ASM_KVM_PTRAUTH_H
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #ifdef __ASSEMBLY__
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <asm/sysreg.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #ifdef	CONFIG_ARM64_PTR_AUTH
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define PTRAUTH_REG_OFFSET(x)	(x - CPU_APIAKEYLO_EL1)
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * CPU_AP*_EL1 values exceed immediate offset range (512) for stp
21*4882a593Smuzhiyun  * instruction so below macros takes CPU_APIAKEYLO_EL1 as base and
22*4882a593Smuzhiyun  * calculates the offset of the keys from this base to avoid an extra add
23*4882a593Smuzhiyun  * instruction. These macros assumes the keys offsets follow the order of
24*4882a593Smuzhiyun  * the sysreg enum in kvm_host.h.
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun .macro	ptrauth_save_state base, reg1, reg2
27*4882a593Smuzhiyun 	mrs_s	\reg1, SYS_APIAKEYLO_EL1
28*4882a593Smuzhiyun 	mrs_s	\reg2, SYS_APIAKEYHI_EL1
29*4882a593Smuzhiyun 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
30*4882a593Smuzhiyun 	mrs_s	\reg1, SYS_APIBKEYLO_EL1
31*4882a593Smuzhiyun 	mrs_s	\reg2, SYS_APIBKEYHI_EL1
32*4882a593Smuzhiyun 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
33*4882a593Smuzhiyun 	mrs_s	\reg1, SYS_APDAKEYLO_EL1
34*4882a593Smuzhiyun 	mrs_s	\reg2, SYS_APDAKEYHI_EL1
35*4882a593Smuzhiyun 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
36*4882a593Smuzhiyun 	mrs_s	\reg1, SYS_APDBKEYLO_EL1
37*4882a593Smuzhiyun 	mrs_s	\reg2, SYS_APDBKEYHI_EL1
38*4882a593Smuzhiyun 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
39*4882a593Smuzhiyun 	mrs_s	\reg1, SYS_APGAKEYLO_EL1
40*4882a593Smuzhiyun 	mrs_s	\reg2, SYS_APGAKEYHI_EL1
41*4882a593Smuzhiyun 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
42*4882a593Smuzhiyun .endm
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun .macro	ptrauth_restore_state base, reg1, reg2
45*4882a593Smuzhiyun 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
46*4882a593Smuzhiyun 	msr_s	SYS_APIAKEYLO_EL1, \reg1
47*4882a593Smuzhiyun 	msr_s	SYS_APIAKEYHI_EL1, \reg2
48*4882a593Smuzhiyun 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
49*4882a593Smuzhiyun 	msr_s	SYS_APIBKEYLO_EL1, \reg1
50*4882a593Smuzhiyun 	msr_s	SYS_APIBKEYHI_EL1, \reg2
51*4882a593Smuzhiyun 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
52*4882a593Smuzhiyun 	msr_s	SYS_APDAKEYLO_EL1, \reg1
53*4882a593Smuzhiyun 	msr_s	SYS_APDAKEYHI_EL1, \reg2
54*4882a593Smuzhiyun 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
55*4882a593Smuzhiyun 	msr_s	SYS_APDBKEYLO_EL1, \reg1
56*4882a593Smuzhiyun 	msr_s	SYS_APDBKEYHI_EL1, \reg2
57*4882a593Smuzhiyun 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
58*4882a593Smuzhiyun 	msr_s	SYS_APGAKEYLO_EL1, \reg1
59*4882a593Smuzhiyun 	msr_s	SYS_APGAKEYHI_EL1, \reg2
60*4882a593Smuzhiyun .endm
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun  * Both ptrauth_switch_to_guest and ptrauth_switch_to_hyp macros will
64*4882a593Smuzhiyun  * check for the presence ARM64_HAS_ADDRESS_AUTH, which is defined as
65*4882a593Smuzhiyun  * (ARM64_HAS_ADDRESS_AUTH_ARCH || ARM64_HAS_ADDRESS_AUTH_IMP_DEF) and
66*4882a593Smuzhiyun  * then proceed ahead with the save/restore of Pointer Authentication
67*4882a593Smuzhiyun  * key registers if enabled for the guest.
68*4882a593Smuzhiyun  */
69*4882a593Smuzhiyun .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
70*4882a593Smuzhiyun alternative_if_not ARM64_HAS_ADDRESS_AUTH
71*4882a593Smuzhiyun 	b	.L__skip_switch\@
72*4882a593Smuzhiyun alternative_else_nop_endif
73*4882a593Smuzhiyun 	mrs	\reg1, hcr_el2
74*4882a593Smuzhiyun 	and	\reg1, \reg1, #(HCR_API | HCR_APK)
75*4882a593Smuzhiyun 	cbz	\reg1, .L__skip_switch\@
76*4882a593Smuzhiyun 	add	\reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
77*4882a593Smuzhiyun 	ptrauth_restore_state	\reg1, \reg2, \reg3
78*4882a593Smuzhiyun .L__skip_switch\@:
79*4882a593Smuzhiyun .endm
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun .macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3
82*4882a593Smuzhiyun alternative_if_not ARM64_HAS_ADDRESS_AUTH
83*4882a593Smuzhiyun 	b	.L__skip_switch\@
84*4882a593Smuzhiyun alternative_else_nop_endif
85*4882a593Smuzhiyun 	mrs	\reg1, hcr_el2
86*4882a593Smuzhiyun 	and	\reg1, \reg1, #(HCR_API | HCR_APK)
87*4882a593Smuzhiyun 	cbz	\reg1, .L__skip_switch\@
88*4882a593Smuzhiyun 	add	\reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
89*4882a593Smuzhiyun 	ptrauth_save_state	\reg1, \reg2, \reg3
90*4882a593Smuzhiyun 	add	\reg1, \h_ctxt, #CPU_APIAKEYLO_EL1
91*4882a593Smuzhiyun 	ptrauth_restore_state	\reg1, \reg2, \reg3
92*4882a593Smuzhiyun 	isb
93*4882a593Smuzhiyun .L__skip_switch\@:
94*4882a593Smuzhiyun .endm
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #else /* !CONFIG_ARM64_PTR_AUTH */
97*4882a593Smuzhiyun .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
98*4882a593Smuzhiyun .endm
99*4882a593Smuzhiyun .macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3
100*4882a593Smuzhiyun .endm
101*4882a593Smuzhiyun #endif /* CONFIG_ARM64_PTR_AUTH */
102*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
103*4882a593Smuzhiyun #endif /* __ASM_KVM_PTRAUTH_H */
104