xref: /optee_os/core/arch/arm/kernel/thread_optee_smc_a64.S (revision 0e84f8ac75798113fc7170d3f1e4d4f8b25c34bf)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2019, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/thread.h>
12#include <sm/optee_smc.h>
13#include <sm/teesmc_opteed.h>
14#include <sm/teesmc_opteed_macros.h>
15
16/*
17 * If ASLR is configured the identity mapped code may be mapped at two
18 * locations, the identity location where virtual and physical address is
19 * the same and at the runtime selected location to which OP-TEE has been
20 * relocated.  Code executing at a location different compared to the
21 * runtime selected location works OK as long as it doesn't do relative
22 * addressing outside the identity mapped range. To allow relative
23 * addressing this macro jumps to the runtime selected location.
24 *
25 * Note that the identity mapped range and the runtime selected range can
26 * only differ if ASLR is configured.
27 */
28	.macro readjust_pc
29#ifdef CFG_CORE_ASLR
30	adr	x16, 1111f
31	ldr	x17, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
32	add	x16, x16, x17
33	br	x16
341111:
35BTI(	bti	j)
36#endif
37	.endm
38
39LOCAL_FUNC vector_std_smc_entry , : , .identity_map
40	readjust_pc
41	bl	thread_handle_std_smc
42	/*
43	 * Normally thread_handle_std_smc() should return via
44	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
45	 * hasn't switched stack (error detected) it will do a normal "C"
46	 * return.
47	 */
48	mov	w1, w0
49	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
50	smc	#0
51	/* SMC should not return */
52	panic_at_smc_return
53END_FUNC vector_std_smc_entry
54
55LOCAL_FUNC vector_fast_smc_entry , : , .identity_map
56	readjust_pc
57	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
58	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
59	mov	x0, sp
60	bl	thread_handle_fast_smc
61	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
62	add	sp, sp, #THREAD_SMC_ARGS_SIZE
63	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
64	smc	#0
65	/* SMC should not return */
66	panic_at_smc_return
67END_FUNC vector_fast_smc_entry
68
69LOCAL_FUNC vector_fiq_entry , : , .identity_map
70	readjust_pc
71	/* Secure Monitor received a FIQ and passed control to us. */
72	bl	thread_check_canaries
73	bl	interrupt_main_handler
74	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
75	smc	#0
76	/* SMC should not return */
77	panic_at_smc_return
78END_FUNC vector_fiq_entry
79
80LOCAL_FUNC vector_cpu_on_entry , : , .identity_map
81	bl	cpu_on_handler
82	mov	x1, x0
83	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
84	smc	#0
85	/* SMC should not return */
86	panic_at_smc_return
87END_FUNC vector_cpu_on_entry
88
89LOCAL_FUNC vector_cpu_off_entry , : , .identity_map
90	readjust_pc
91	bl	thread_cpu_off_handler
92	mov	x1, x0
93	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
94	smc	#0
95	/* SMC should not return */
96	panic_at_smc_return
97END_FUNC vector_cpu_off_entry
98
99LOCAL_FUNC vector_cpu_suspend_entry , : , .identity_map
100	readjust_pc
101	bl	thread_cpu_suspend_handler
102	mov	x1, x0
103	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
104	smc	#0
105	/* SMC should not return */
106	panic_at_smc_return
107END_FUNC vector_cpu_suspend_entry
108
109LOCAL_FUNC vector_cpu_resume_entry , : , .identity_map
110	readjust_pc
111	bl	thread_cpu_resume_handler
112	mov	x1, x0
113	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
114	smc	#0
115	/* SMC should not return */
116	panic_at_smc_return
117END_FUNC vector_cpu_resume_entry
118
119LOCAL_FUNC vector_system_off_entry , : , .identity_map
120	readjust_pc
121	bl	thread_system_off_handler
122	mov	x1, x0
123	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
124	smc	#0
125	/* SMC should not return */
126	panic_at_smc_return
127END_FUNC vector_system_off_entry
128
129LOCAL_FUNC vector_system_reset_entry , : , .identity_map
130	readjust_pc
131	bl	thread_system_reset_handler
132	mov	x1, x0
133	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
134	smc	#0
135	/* SMC should not return */
136	panic_at_smc_return
137END_FUNC vector_system_reset_entry
138
139/*
140 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
141 * initialization.
142 *
143 * Note that ARM-TF depends on the layout of this vector table, any change
144 * in layout has to be synced with ARM-TF.
145 */
146FUNC thread_vector_table , : , .identity_map, , nobti
147	b	vector_std_smc_entry
148	b	vector_fast_smc_entry
149	b	vector_cpu_on_entry
150	b	vector_cpu_off_entry
151	b	vector_cpu_resume_entry
152	b	vector_cpu_suspend_entry
153	b	vector_fiq_entry
154	b	vector_system_off_entry
155	b	vector_system_reset_entry
156END_FUNC thread_vector_table
157DECLARE_KEEP_PAGER thread_vector_table
158
159FUNC thread_std_smc_entry , :
160	bl	__thread_std_smc_entry
161	mov	w20, w0	/* Save return value for later */
162
163	/* Mask all maskable exceptions before switching to temporary stack */
164	msr	daifset, #DAIFBIT_ALL
165	bl	thread_get_tmp_sp
166	mov	sp, x0
167
168	bl	thread_state_free
169
170	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
171	mov	w1, w20
172	mov	x2, #0
173	mov	x3, #0
174	mov	x4, #0
175	smc	#0
176	/* SMC should not return */
177	panic_at_smc_return
178END_FUNC thread_std_smc_entry
179
180/* void thread_rpc_spsr(uint32_t rv[THREAD_RPC_NUM_ARGS], uint64_t spsr) */
181FUNC thread_rpc_spsr , :
182	/* Mask all maskable exceptions before switching to temporary stack */
183	msr	daifset, #DAIFBIT_ALL
184	push	x0, xzr
185	push	x1, x30
186	bl	thread_get_ctx_regs
187	ldr	x30, [sp, #8]
188	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
189	mov	x19, x0
190
191#if defined(CFG_CORE_PAUTH)
192	/* Save APIAKEY */
193	read_apiakeyhi  x1
194	read_apiakeylo  x2
195	store_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
196#endif
197
198	bl	thread_get_tmp_sp
199	pop	x1, xzr		/* Match "push x1, x30" above */
200	mov	x2, sp
201	str	x2, [x19, #THREAD_CTX_REGS_SP]
202	ldr	x20, [sp]	/* Get pointer to rv[] */
203	mov	sp, x0		/* Switch to tmp stack */
204	/*
205	 * We need to read rv[] early, because thread_state_suspend
206	 * can invoke virt_unset_guest() which will unmap pages,
207	 * where rv[] resides
208	 */
209	load_wregs x20, 0, 21, 23	/* Load rv[] into w20-w22 */
210
211	adr	x2, .thread_rpc_return
212	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
213	bl	thread_state_suspend
214	mov	x4, x0		/* Supply thread index */
215	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
216	mov	x1, x21
217	mov	x2, x22
218	mov	x3, x23
219	smc	#0
220	/* SMC should not return */
221	panic_at_smc_return
222
223.thread_rpc_return:
224	/*
225	 * At this point has the stack pointer been restored to the value
226	 * stored in THREAD_CTX above.
227	 *
228	 * Jumps here from thread_resume above when RPC has returned. The
229	 * IRQ and FIQ bits are restored to what they where when this
230	 * function was originally entered.
231	 */
232	pop	x16, xzr	/* Get pointer to rv[] */
233	store_wregs x16, 0, 0, 3	/* Store w0-w3 into rv[] */
234	ret
235END_FUNC thread_rpc_spsr
236DECLARE_KEEP_PAGER thread_rpc_spsr
237
238/*
239 * void thread_foreign_intr_exit(uint32_t thread_index)
240 *
241 * This function is jumped to at the end of macro foreign_intr_handler().
242 * The current thread as indicated by @thread_index has just been
243 * suspended.  The job here is just to inform normal world the thread id to
244 * resume when returning.
245 */
246FUNC thread_foreign_intr_exit , :
247	mov	w4, w0
248	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
249	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
250	mov	w2, #0
251	mov	w3, #0
252	smc	#0
253	/* SMC should not return */
254	panic_at_smc_return
255END_FUNC thread_foreign_intr_exit
256
257BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
258