xref: /optee_os/core/arch/arm/kernel/thread_spmc_a64.S (revision 539836f97e405cff8984ac722bb12bcc30e4c9e3)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2020, Linaro Limited
4 * Copyright (c) 2019-2021, Arm Limited
5 */
6
7#include <platform_config.h>
8
9#include <arm64_macros.S>
10#include <arm.h>
11#include <asm.S>
12#include <ffa.h>
13#include <generated/asm-defines.h>
14#include <kernel/thread.h>
15#include <optee_ffa.h>
16
17#ifdef CFG_SECURE_PARTITION
18LOCAL_FUNC thread_ffa_interrupt , :
19	mov_imm	x0, FFA_INTERRUPT		/* FID */
20	/* X1: Endpoint/vCPU IDs is set by caller */
21	mov	x2, #FFA_PARAM_MBZ		/* Param MBZ */
22	mov	x3, #FFA_PARAM_MBZ		/* Param MBZ */
23	mov	x4, #FFA_PARAM_MBZ		/* Param MBZ */
24	mov	x5, #FFA_PARAM_MBZ		/* Param MBZ */
25	mov	x6, #FFA_PARAM_MBZ		/* Param MBZ */
26	mov	x7, #FFA_PARAM_MBZ		/* Param MBZ */
27	b	.ffa_msg_loop
28END_FUNC thread_ffa_interrupt
29#endif /* CFG_SECURE_PARTITION */
30
31FUNC thread_ffa_msg_wait , :
32	mov_imm	x0, FFA_MSG_WAIT		/* FID */
33	mov	x1, #FFA_TARGET_INFO_MBZ	/* Target info MBZ */
34	mov	x2, #FFA_PARAM_MBZ		/* Param MBZ */
35	mov	x3, #FFA_PARAM_MBZ		/* Param MBZ */
36	mov	x4, #FFA_PARAM_MBZ		/* Param MBZ */
37	mov	x5, #FFA_PARAM_MBZ		/* Param MBZ */
38	mov	x6, #FFA_PARAM_MBZ		/* Param MBZ */
39	mov	x7, #FFA_PARAM_MBZ		/* Param MBZ */
40	b	.ffa_msg_loop
41END_FUNC thread_ffa_msg_wait
42
43	/* Caller provides x1, x3-x7 params */
44LOCAL_FUNC ffa_msg_send_direct_resp , :
45	msr	spsel, #1
46	ldr	w0, [sp, #THREAD_CORE_LOCAL_DIRECT_RESP_FID]
47	msr	spsel, #0
48	mov	x2, #FFA_PARAM_MBZ			/* RES MBZ */
49	/* x8-x17 are SBZ */
50	mov	x8, #FFA_PARAM_MBZ
51	mov	x9, #FFA_PARAM_MBZ
52	mov	x10, #FFA_PARAM_MBZ
53	mov	x11, #FFA_PARAM_MBZ
54	mov	x12, #FFA_PARAM_MBZ
55	mov	x13, #FFA_PARAM_MBZ
56	mov	x14, #FFA_PARAM_MBZ
57	mov	x15, #FFA_PARAM_MBZ
58	mov	x16, #FFA_PARAM_MBZ
59	mov	x17, #FFA_PARAM_MBZ
60
61.ffa_msg_loop:
62	/*
63	 * Native interrupts unmasked while invoking SMC with caller
64	 * provided parameters.
65	 */
66	msr	daifclr, #DAIFBIT_NATIVE_INTR
67	smc	#0
68	msr	daifset, #DAIFBIT_NATIVE_INTR
69
70	/* Store the parameters as struct thread_smc_1_2_regs on stack */
71	sub	sp, sp, #THREAD_SMC_1_2_REGS_SIZE
72	store_xregs sp, 0, 0, 17
73	mov	x0, sp
74
75	/* parse and handle message */
76	bl	thread_spmc_msg_recv
77
78	/* Load struct thread_smc_args into registers */
79	load_xregs sp, 0, 0, 17
80	add	sp, sp, #THREAD_SMC_1_2_REGS_SIZE
81	b	.ffa_msg_loop
82END_FUNC ffa_msg_send_direct_resp
83
84FUNC thread_std_smc_entry , :
85	ror	w19, w0, #16 /* Save target info with src and dst swapped */
86	bl	__thread_std_smc_entry
87	mov	w20, w0	/* Save return value */
88
89	/* Mask all maskable exceptions before switching to temporary stack */
90	msr	daifset, #DAIFBIT_ALL
91	bl	thread_get_tmp_sp
92	mov	sp, x0
93
94	bl	thread_state_free
95
96	mov	w1, w19				/* Target info */
97	mov	w3, w20				/* Return value */
98	mov	x4, #FFA_PARAM_MBZ		/* Unused parameter */
99	mov	x5, #FFA_PARAM_MBZ		/* Unused parameter */
100	mov	x6, #FFA_PARAM_MBZ		/* Unused parameter */
101	mov	x7, #FFA_PARAM_MBZ		/* Unused parameter */
102	b	ffa_msg_send_direct_resp
103END_FUNC thread_std_smc_entry
104
105#ifdef CFG_SECURE_PARTITION
106/* void spmc_sp_thread_entry(args) */
107FUNC spmc_sp_thread_entry , :
108	/* Store the parameter registers x0-x17 on the thread stack */
109	sub     sp, sp, #THREAD_SMC_1_2_REGS_SIZE
110	store_xregs sp, 0, 0, 17
111	mov     x0, sp
112	mov     x1, #0 /* Pass NULL pointer for caller_sp, coming from NW */
113	bl      spmc_sp_msg_handler
114
115	/* Mask all maskable exceptions before switching to temporary stack */
116	msr     daifset, #DAIFBIT_ALL
117	bl      thread_get_tmp_sp
118
119	/*
120	 * Copy the result registers x0-x17 from the thread stack into the
121	 * tmp stack.
122	 */
123	load_xregs sp, 0, 2, 19
124	mov     sp, x0
125	sub     sp, sp, #THREAD_SMC_1_2_REGS_SIZE
126	store_xregs sp, 0, 2, 19
127
128	bl      thread_state_free
129
130	/* Load the FF-A result before the SMC instruction. */
131	load_xregs sp, 0, 0, 17
132	add	sp, sp, #THREAD_SMC_1_2_REGS_SIZE
133	b .ffa_msg_loop
134END_FUNC spmc_sp_thread_entry
135#endif
136
137/* void thread_rpc_spsr(uint32_t rv[THREAD_RPC_NUM_ARGS], uint64_t spsr) */
138FUNC thread_rpc_spsr , :
139	/* Mask all maskable exceptions before switching to temporary stack */
140	msr	daifset, #DAIFBIT_ALL
141	push	x0, xzr
142	push	x1, x30
143	bl	thread_get_ctx_regs
144	ldr	x30, [sp, #8]
145	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
146	mov	x19, x0
147
148#if defined(CFG_CORE_PAUTH)
149	/* Save APIAKEY */
150	read_apiakeyhi  x1
151	read_apiakeylo  x2
152	store_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
153#endif
154
155	bl	thread_get_tmp_sp
156	pop	x1, xzr		/* Match "push x1, x30" above */
157	mov	x2, sp
158	str	x2, [x19, #THREAD_CTX_REGS_SP]
159	ldr	x20, [sp]	/* Get pointer to rpc_arg[] */
160	mov	sp, x0		/* Switch to tmp stack */
161	/*
162	 * We need to read rpc_arg[] early, because thread_state_suspend
163	 * can invoke virt_unset_guest() which will unmap pages,
164	 * where rpc_arg[] resides
165	 */
166	load_wregs x20, 0, 21, 24	/* Load rpc_arg[] into w21-w24 */
167
168	adr	x2, .thread_rpc_return
169	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
170	bl	thread_state_suspend
171	mov	w1, w21
172	mov	w3, #0		/* Error code = 0 */
173	mov	w4, w22
174	mov	w5, w23
175	mov	w6, w24
176	mov	w7, w0		/* Supply thread index */
177	b	ffa_msg_send_direct_resp
178
179.thread_rpc_return:
180	/*
181	 * At this point has the stack pointer been restored to the value
182	 * stored in THREAD_CTX above.
183	 *
184	 * Jumps here from thread_resume above when RPC has returned. The
185	 * IRQ and FIQ bits are restored to what they where when this
186	 * function was originally entered. w0-w3 holds the values supplied
187	 * to thread_resume_from_rpc() in a0-a3.
188	 */
189	pop	x16, xzr	/* Get pointer to rv[] */
190	store_wregs x16, 0, 0, 3	/* Store w0-w3 into rv[] */
191	ret
192END_FUNC thread_rpc_spsr
193
194/*
195 * void thread_foreign_intr_exit(uint32_t thread_index,
196 *				 uint32_t rpc_target_info, uint32_t flags);
197 *
198 * This function is jumped to at the end of macro foreign_intr_handler().
199 * The current thread as indicated by @thread_index has just been
200 * suspended.  The job here is just to inform normal world the thread id to
201 * resume when returning.
202 * If the active FF-A endpoint is OP-TEE (or a TA) then an this function send an
203 * OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT message to the normal world via the
204 * FFA_MSG_SEND_DIRECT_RESP interface. This is handled by the OP-TEE
205 * driver in Linux so it can schedule task to the thread.
206 * If the active endpoint is an SP the function sends an FFA_INTERRUPT. This is
207 * handled by the FF-A driver and after taking care of the NWd interrupts it
208 * returns via an FFA_RUN call.
209 * The active endpoint is determined by checking the THREAD_FLAGS_FFA_ONLY flag
210 * in threads[w0].flags. This is only set for the thread which handles SPs.
211 */
212FUNC thread_foreign_intr_exit , :
213#ifdef CFG_SECURE_PARTITION
214	and     w2, w2, #THREAD_FLAGS_FFA_ONLY
215	cbnz	w2, thread_ffa_interrupt
216#endif /* CFG_SECURE_PARTITION */
217	mov	w3, #FFA_PARAM_MBZ
218	mov	w4, #OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT
219	mov	x5, #FFA_PARAM_MBZ
220	mov	w6, #FFA_PARAM_MBZ
221	mov	w7, w0
222	b	ffa_msg_send_direct_resp
223END_FUNC thread_foreign_intr_exit
224
225BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
226