xref: /optee_os/core/arch/riscv/kernel/thread_rv.S (revision cb5f271c1eaed4c18fd26873f152afc0590b0413)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright 2022-2023 NXP
4 */
5
6#include <asm.S>
7#include <generated/asm-defines.h>
8#include <keep.h>
9#include <kernel/thread.h>
10#include <kernel/thread_private.h>
11#include <mm/core_mmu.h>
12#include <riscv.h>
13#include <riscv_macros.S>
14
15.macro get_thread_ctx res, tmp0
16	lw	\tmp0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
17	la	\res, threads
181:
19	beqz	\tmp0, 2f
20	addi	\res, \res, THREAD_CTX_SIZE
21	addi	\tmp0, \tmp0, -1
22	bnez	\tmp0, 1b
232:
24.endm
25
26.macro save_regs, mode
27	addi	sp, sp, -THREAD_TRAP_REGS_SIZE
28.if \mode == TRAP_MODE_USER
29
30	/* Save user thread pointer and load kernel thread pointer */
31	store_xregs sp, THREAD_TRAP_REG_TP, REG_TP
32	load_xregs sp, (THREAD_TRAP_REGS_SIZE - RISCV_XLEN_BYTES), REG_TP
33
34	store_xregs sp, THREAD_TRAP_REG_GP, REG_GP
35
36	/*
37	 * Set the scratch register to 0 such in case of a recursive
38	 * exception thread_trap_vect() knows that it is emitted from kernel.
39	 */
40	csrrw	gp, CSR_XSCRATCH, zero
41	store_xregs sp, THREAD_TRAP_REG_SP, REG_GP
42.option push
43.option norelax
44	la	gp, __global_pointer$
45.option pop
46.endif
47	store_xregs sp, THREAD_TRAP_REG_T3, REG_T3, REG_T6
48	store_xregs sp, THREAD_TRAP_REG_T0, REG_T0, REG_T2
49	store_xregs sp, THREAD_TRAP_REG_A0, REG_A0, REG_A7
50	store_xregs sp, THREAD_TRAP_REG_RA, REG_RA
51
52	csrr	t0, CSR_XSTATUS
53	store_xregs sp, THREAD_TRAP_REG_STATUS, REG_T0
54
55	csrr	a0, CSR_XCAUSE
56	csrr	a1, CSR_XEPC
57
58	store_xregs sp, THREAD_TRAP_REG_EPC, REG_A1
59
60	mv	a2, sp
61
62	/* a0 = cause
63	 * a1 = epc
64	 * a2 = sp
65	 * a3 = user
66	 * thread_trap_handler(cause, epc, sp, user)
67	 */
68.endm
69
70.macro restore_regs, mode
71	load_xregs sp, THREAD_TRAP_REG_EPC, REG_T0
72	csrw	CSR_XEPC, t0
73
74	load_xregs sp, THREAD_TRAP_REG_STATUS, REG_T0
75	csrw	CSR_XSTATUS, t0
76
77	load_xregs sp, THREAD_TRAP_REG_RA, REG_RA
78	load_xregs sp, THREAD_TRAP_REG_A0, REG_A0, REG_A7
79	load_xregs sp, THREAD_TRAP_REG_T0, REG_T0, REG_T2
80	load_xregs sp, THREAD_TRAP_REG_T3, REG_T3, REG_T6
81
82.if \mode == TRAP_MODE_USER
83	addi	gp, sp, THREAD_TRAP_REGS_SIZE
84
85	store_xregs gp, REGOFF(-1), REG_TP
86	csrw	CSR_XSCRATCH, gp
87
88	load_xregs sp, THREAD_TRAP_REG_TP, REG_TP
89	load_xregs sp, THREAD_TRAP_REG_GP, REG_GP
90	load_xregs sp, THREAD_TRAP_REG_SP, REG_SP
91
92.else
93	addi	sp, sp, THREAD_TRAP_REGS_SIZE
94.endif
95.endm
96
97/* size_t __get_core_pos(void); */
98FUNC __get_core_pos , : , .identity_map
99	lw	a0, THREAD_CORE_LOCAL_HART_ID(tp)
100	ret
101END_FUNC __get_core_pos
102
103FUNC thread_trap_vect , :
104	csrrw	sp, CSR_XSCRATCH, sp
105	bnez	sp, 0f
106	csrrw	sp, CSR_XSCRATCH, sp
107	j	trap_from_kernel
1080:
109	j	trap_from_user
110thread_trap_vect_end:
111END_FUNC thread_trap_vect
112
113LOCAL_FUNC trap_from_kernel, :
114	save_regs TRAP_MODE_KERNEL
115	li	a3, 0
116	jal	thread_trap_handler
117	restore_regs TRAP_MODE_KERNEL
118	XRET
119END_FUNC trap_from_kernel
120
121LOCAL_FUNC trap_from_user, :
122	save_regs TRAP_MODE_USER
123	li	a3, 1
124	jal	thread_trap_handler
125	restore_regs TRAP_MODE_USER
126	XRET
127END_FUNC trap_from_user
128
129/*
130 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
131 * 		uint32_t exit_status1);
132 * See description in thread.h
133 */
134FUNC thread_unwind_user_mode , :
135
136	/* Store the exit status */
137	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A3, REG_A5
138	sw	a1, (a4)
139	sw	a2, (a5)
140
141	/* Save user callee regs */
142	store_xregs a3, THREAD_CTX_REG_S0, REG_S0, REG_S1
143	store_xregs a3, THREAD_CTX_REG_S2, REG_S2, REG_S11
144	store_xregs a3, THREAD_CTX_REG_SP, REG_SP, REG_TP
145
146	/* Restore kernel callee regs */
147	mv	a1, sp
148
149	load_xregs a1, THREAD_USER_MODE_REC_X1, REG_RA, REG_TP
150	load_xregs a1, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
151	load_xregs a1, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
152
153	add	sp, sp, THREAD_USER_MODE_REC_SIZE
154
155	/*
156	 * Zeroize xSCRATCH to indicate to thread_trap_vect()
157	 * that we are executing in kernel.
158	 */
159	csrw	CSR_XSCRATCH, zero
160
161	/* Return from the call of thread_enter_user_mode() */
162	ret
163END_FUNC thread_unwind_user_mode
164
165/*
166 * void thread_exit_user_mode(unsigned long a0, unsigned long a1,
167 *			       unsigned long a2, unsigned long a3,
168 *			       unsigned long sp, unsigned long pc,
169 *			       unsigned long status);
170 */
171FUNC thread_exit_user_mode , :
172	/* Set kernel stack pointer */
173	mv	sp, a4
174
175	/* Set xSTATUS */
176	csrw	CSR_XSTATUS, a6
177
178	/* Set return address thread_unwind_user_mode() */
179	mv	ra, a5
180	ret
181END_FUNC thread_exit_user_mode
182
183/*
184 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
185 *				     uint32_t *exit_status0,
186 *				     uint32_t *exit_status1);
187 */
188FUNC __thread_enter_user_mode , :
189	/*
190	 * Create and fill in the struct thread_user_mode_rec
191	 */
192	addi	sp, sp, -THREAD_USER_MODE_REC_SIZE
193	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A0, REG_A2
194	store_xregs sp, THREAD_USER_MODE_REC_X1, REG_RA, REG_TP
195	store_xregs sp, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
196	store_xregs sp, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
197
198	/*
199	 * Save the kernel stack pointer in the thread context
200	 */
201
202	/* Get pointer to current thread context */
203	get_thread_ctx s0, s1
204
205	/*
206	 * Save kernel stack pointer to ensure that
207	 * thread_exit_user_mode() uses correct stack pointer.
208	 */
209
210	store_xregs s0, THREAD_CTX_KERN_SP, REG_SP
211	/*
212	 * Save kernel stack pointer in xSCRATCH to ensure that
213	 * thread_trap_vect() uses correct stack pointer.
214	 */
215	csrw	CSR_XSCRATCH, sp
216
217	/*
218	 * Save kernel thread pointer below of the kernel stack pointer
219	 * to enure that thread_trap_vect() uses correct tp when traps
220	 * come from user.
221	 */
222	store_xregs sp, REGOFF(-1), REG_TP
223
224	/* Set user status */
225	load_xregs a0, THREAD_CTX_REG_STATUS, REG_S0
226	csrw	CSR_XSTATUS, s0
227
228	/*
229	 * Save the values for a1 and a2 in struct thread_core_local to be
230	 * restored later just before the xRET.
231	 */
232	store_xregs tp, THREAD_CORE_LOCAL_X10, REG_A1, REG_A2
233
234	/* Load the rest of the general purpose registers */
235	load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_TP
236	load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2
237	load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
238	load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
239	load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6
240	load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7
241
242	/* Set exception program counter */
243	csrw		CSR_XEPC, ra
244
245	/* Jump into user mode */
246	XRET
247END_FUNC __thread_enter_user_mode
248
249/*
250 * Implement based on the transport method used to communicate between
251 * untrusted domain and trusted domain. It could be an SBI/ECALL-based to
252 * a security monitor running in M-Mode and panic or messaging-based across
253 * domains where we return to a messaging callback which parses and handles
254 * messages.
255 */
256LOCAL_FUNC thread_return_from_nsec_call , :
257	/* Implement */
258	j	.
259END_FUNC thread_return_from_nsec_call
260
261
262FUNC thread_std_smc_entry , :
263	jal	__thread_std_smc_entry
264
265	/* Save return value */
266	mv	s0, a0
267
268	/* Disable all interrupts */
269	csrc	CSR_XSTATUS, CSR_XSTATUS_IE
270
271	/* Switch to temporary stack */
272	jal	thread_get_tmp_sp
273	mv	sp, a0
274
275	/*
276	 * We are returning from thread_alloc_and_run()
277	 * set thread state as free
278	 */
279	jal	thread_state_free
280
281	/* Restore __thread_std_smc_entry() return value */
282	mv	a1, s0
283	li	a2, 0
284	li	a3, 0
285	li	a4, 0
286	li	a0, TEESMC_OPTEED_RETURN_CALL_DONE
287
288	/* Return to untrusted domain */
289	jal	thread_return_from_nsec_call
290END_FUNC thread_std_smc_entry
291
292/* void thread_resume(struct thread_ctx_regs *regs) */
293FUNC thread_resume , :
294	/*
295	 * Restore all registers assuming that GP
296	 * and TP were not changed.
297	 */
298	load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_SP
299	load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2
300	load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
301	load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
302	load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6
303	load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7
304	store_xregs tp, THREAD_CORE_LOCAL_X10, REG_A0, REG_A1
305	ret
306END_FUNC thread_resume
307
308/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
309FUNC thread_rpc , :
310	 /* Use stack for temporary storage */
311	addi	sp, sp, -16
312
313	/* Read xSTATUS */
314	csrr	a1, CSR_XSTATUS
315
316	/* Save return address xSTATUS and pointer to rv */
317	STR	a0, REGOFF(0)(sp)
318	STR	a1, REGOFF(1)(sp)
319	STR	ra, REGOFF(2)(sp)
320
321	/* Save thread state */
322	jal	thread_get_ctx_regs
323	store_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_SP
324	store_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
325	store_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
326
327	/* Get to tmp stack */
328	jal	thread_get_tmp_sp
329
330	/* Get pointer to rv */
331	LDR	s0, REGOFF(0)(sp)
332
333	/* xSTATUS to restore */
334	LDR	a1, REGOFF(1)(sp)
335	/* Switch to tmp stack */
336	mv	sp, a0
337
338	/* Early load rv[] into s1-s3 */
339	lw	s1, 0(s0)
340	lw	s2, 4(s0)
341	lw	s3, 8(s0)
342
343	li	a0, THREAD_FLAGS_COPY_ARGS_ON_RETURN
344	la	a2, .thread_rpc_return
345	jal	thread_state_suspend
346
347	mv	a4, a0	/* thread index */
348	mv	a1, s1	/* rv[0] */
349	mv	a2, s2	/* rv[1] */
350	mv	a3, s3	/* rv[2] */
351	li	a0, TEESMC_OPTEED_RETURN_CALL_DONE
352
353	/* Return to untrusted domain */
354	jal	thread_return_from_nsec_call
355.thread_rpc_return:
356	/*
357	 * Jumps here from thread_resume() above when RPC has returned.
358	 * At this point has the stack pointer been restored to the value
359	 * stored in THREAD_CTX above.
360	 */
361
362	/* Get pointer to rv[] */
363	LDR	a4, REGOFF(0)(sp)
364
365	/* Store a0-a3 into rv[] */
366	sw	a0, 0(a4)
367	sw	a1, 4(a4)
368	sw	a2, 8(a4)
369	sw	a3, 12(a4)
370
371	/* Pop return address from stack */
372	LDR	ra, REGOFF(2)(sp)
373
374	addi	sp, sp, 16
375	ret
376END_FUNC thread_rpc
377