xref: /optee_os/core/arch/riscv/kernel/thread_rv.S (revision 4edd96e6d7a7228e907cf498b23e5b5fbdaf39a0)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright 2022-2023 NXP
4 */
5
6#include <asm.S>
7#include <generated/asm-defines.h>
8#include <keep.h>
9#include <kernel/thread.h>
10#include <kernel/thread_private.h>
11#include <mm/core_mmu.h>
12#include <riscv.h>
13#include <riscv_macros.S>
14
15.macro get_thread_ctx res, tmp0
16	lw	\tmp0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
17	la	\res, threads
181:
19	beqz	\tmp0, 2f
20	addi	\res, \res, THREAD_CTX_SIZE
21	addi	\tmp0, \tmp0, -1
22	bnez	\tmp0, 1b
232:
24.endm
25
26.macro save_regs, mode
27	addi	sp, sp, -THREAD_TRAP_REGS_SIZE
28.if \mode == TRAP_MODE_USER
29
30	/* Save user thread pointer and load kernel thread pointer */
31	store_xregs sp, THREAD_TRAP_REG_TP, REG_TP
32	addi	tp, sp, THREAD_TRAP_REGS_SIZE
33	/* Now tp is at struct thread_user_mode_rec, which has kernel tp */
34	load_xregs tp, THREAD_USER_MODE_REC_X4, REG_TP
35
36	store_xregs sp, THREAD_TRAP_REG_GP, REG_GP
37
38	/*
39	 * Set the scratch register to 0 such in case of a recursive
40	 * exception thread_trap_vect() knows that it is emitted from kernel.
41	 */
42	csrrw	gp, CSR_XSCRATCH, zero
43	store_xregs sp, THREAD_TRAP_REG_SP, REG_GP
44.option push
45.option norelax
46	la	gp, __global_pointer$
47.option pop
48.endif
49	store_xregs sp, THREAD_TRAP_REG_T3, REG_T3, REG_T6
50	store_xregs sp, THREAD_TRAP_REG_T0, REG_T0, REG_T2
51	store_xregs sp, THREAD_TRAP_REG_A0, REG_A0, REG_A7
52	store_xregs sp, THREAD_TRAP_REG_RA, REG_RA
53#if defined(CFG_UNWIND)
54	/* To unwind stack we need s0, which is frame pointer. */
55	store_xregs sp, THREAD_TRAP_REG_S0, REG_S0
56#endif
57
58	csrr	t0, CSR_XSTATUS
59	store_xregs sp, THREAD_TRAP_REG_STATUS, REG_T0
60
61	csrr	a0, CSR_XCAUSE
62	csrr	a1, CSR_XEPC
63
64	store_xregs sp, THREAD_TRAP_REG_EPC, REG_A1
65
66	mv	a2, sp
67
68	/* a0 = cause
69	 * a1 = epc
70	 * a2 = sp
71	 * a3 = user
72	 * thread_trap_handler(cause, epc, sp, user)
73	 */
74.endm
75
76.macro restore_regs, mode
77	load_xregs sp, THREAD_TRAP_REG_EPC, REG_T0
78	csrw	CSR_XEPC, t0
79
80	load_xregs sp, THREAD_TRAP_REG_STATUS, REG_T0
81	csrw	CSR_XSTATUS, t0
82
83	load_xregs sp, THREAD_TRAP_REG_RA, REG_RA
84	load_xregs sp, THREAD_TRAP_REG_A0, REG_A0, REG_A7
85	load_xregs sp, THREAD_TRAP_REG_T0, REG_T0, REG_T2
86	load_xregs sp, THREAD_TRAP_REG_T3, REG_T3, REG_T6
87#if defined(CFG_UNWIND)
88	/* To unwind stack we need s0, which is frame pointer. */
89	load_xregs sp, THREAD_TRAP_REG_S0, REG_S0
90#endif
91
92.if \mode == TRAP_MODE_USER
93	addi	gp, sp, THREAD_TRAP_REGS_SIZE
94	csrw	CSR_XSCRATCH, gp
95
96	load_xregs sp, THREAD_TRAP_REG_TP, REG_TP
97	load_xregs sp, THREAD_TRAP_REG_GP, REG_GP
98	load_xregs sp, THREAD_TRAP_REG_SP, REG_SP
99
100.else
101	addi	sp, sp, THREAD_TRAP_REGS_SIZE
102.endif
103.endm
104
105/* size_t __get_core_pos(void); */
106FUNC __get_core_pos , : , .identity_map
107	lw	a0, THREAD_CORE_LOCAL_HART_ID(tp)
108	ret
109END_FUNC __get_core_pos
110
111FUNC thread_trap_vect , :
112	csrrw	sp, CSR_XSCRATCH, sp
113	bnez	sp, 0f
114	csrrw	sp, CSR_XSCRATCH, sp
115	j	trap_from_kernel
1160:
117	j	trap_from_user
118thread_trap_vect_end:
119END_FUNC thread_trap_vect
120
121LOCAL_FUNC trap_from_kernel, :
122	save_regs TRAP_MODE_KERNEL
123	li	a3, 0
124	jal	thread_trap_handler
125	restore_regs TRAP_MODE_KERNEL
126	XRET
127END_FUNC trap_from_kernel
128
129LOCAL_FUNC trap_from_user, :
130	save_regs TRAP_MODE_USER
131	li	a3, 1
132	jal	thread_trap_handler
133	restore_regs TRAP_MODE_USER
134	XRET
135END_FUNC trap_from_user
136
137/*
138 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
139 * 		uint32_t exit_status1);
140 * See description in thread.h
141 */
142FUNC thread_unwind_user_mode , :
143
144	/* Store the exit status */
145	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A3, REG_A5
146	sw	a1, (a4)
147	sw	a2, (a5)
148
149	/* Save user callee regs */
150	store_xregs a3, THREAD_CTX_REG_S0, REG_S0, REG_S1
151	store_xregs a3, THREAD_CTX_REG_S2, REG_S2, REG_S11
152	store_xregs a3, THREAD_CTX_REG_SP, REG_SP, REG_TP
153
154	/* Restore kernel callee regs */
155	mv	a1, sp
156
157	load_xregs a1, THREAD_USER_MODE_REC_X1, REG_RA, REG_TP
158	load_xregs a1, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
159	load_xregs a1, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
160
161	add	sp, sp, THREAD_USER_MODE_REC_SIZE
162
163	/*
164	 * Zeroize xSCRATCH to indicate to thread_trap_vect()
165	 * that we are executing in kernel.
166	 */
167	csrw	CSR_XSCRATCH, zero
168
169	/* Return from the call of thread_enter_user_mode() */
170	ret
171END_FUNC thread_unwind_user_mode
172
173/*
174 * void thread_exit_user_mode(unsigned long a0, unsigned long a1,
175 *			       unsigned long a2, unsigned long a3,
176 *			       unsigned long sp, unsigned long pc,
177 *			       unsigned long status);
178 */
179FUNC thread_exit_user_mode , :
180	/* Set kernel stack pointer */
181	mv	sp, a4
182
183	/* Set xSTATUS */
184	csrw	CSR_XSTATUS, a6
185
186	/* Set return address thread_unwind_user_mode() */
187	mv	ra, a5
188	ret
189END_FUNC thread_exit_user_mode
190
191/*
192 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
193 *				     uint32_t *exit_status0,
194 *				     uint32_t *exit_status1);
195 */
196FUNC __thread_enter_user_mode , :
197	/*
198	 * Create and fill in the struct thread_user_mode_rec
199	 */
200	addi	sp, sp, -THREAD_USER_MODE_REC_SIZE
201	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A0, REG_A2
202	store_xregs sp, THREAD_USER_MODE_REC_X1, REG_RA, REG_TP
203	store_xregs sp, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
204	store_xregs sp, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
205
206	/*
207	 * Save the kernel stack pointer in the thread context
208	 */
209
210	/* Get pointer to current thread context */
211	get_thread_ctx s0, s1
212
213	/*
214	 * Save kernel stack pointer to ensure that
215	 * thread_exit_user_mode() uses correct stack pointer.
216	 */
217
218	store_xregs s0, THREAD_CTX_KERN_SP, REG_SP
219	/*
220	 * Save kernel stack pointer in xSCRATCH to ensure that
221	 * thread_trap_vect() uses correct stack pointer.
222	 */
223	csrw	CSR_XSCRATCH, sp
224
225	/* Set user status */
226	load_xregs a0, THREAD_CTX_REG_STATUS, REG_S0
227	csrw	CSR_XSTATUS, s0
228
229	/*
230	 * Save the values for a1 and a2 in struct thread_core_local to be
231	 * restored later just before the xRET.
232	 */
233	store_xregs tp, THREAD_CORE_LOCAL_X10, REG_A1, REG_A2
234
235	/* Load the rest of the general purpose registers */
236	load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_TP
237	load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2
238	load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
239	load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
240	load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6
241	load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7
242
243	/* Set exception program counter */
244	csrw		CSR_XEPC, ra
245
246	/* Jump into user mode */
247	XRET
248END_FUNC __thread_enter_user_mode
249
250/*
251 * Implement based on the transport method used to communicate between
252 * untrusted domain and trusted domain. It could be an SBI/ECALL-based to
253 * a security monitor running in M-Mode and panic or messaging-based across
254 * domains where we return to a messaging callback which parses and handles
255 * messages.
256 */
257LOCAL_FUNC thread_return_from_nsec_call , :
258	/* Implement */
259	j	.
260END_FUNC thread_return_from_nsec_call
261
262
263FUNC thread_std_smc_entry , :
264	jal	__thread_std_smc_entry
265
266	/* Save return value */
267	mv	s0, a0
268
269	/* Disable all interrupts */
270	csrc	CSR_XSTATUS, CSR_XSTATUS_IE
271
272	/* Switch to temporary stack */
273	jal	thread_get_tmp_sp
274	mv	sp, a0
275
276	/*
277	 * We are returning from thread_alloc_and_run()
278	 * set thread state as free
279	 */
280	jal	thread_state_free
281
282	/* Restore __thread_std_smc_entry() return value */
283	mv	a1, s0
284	li	a2, 0
285	li	a3, 0
286	li	a4, 0
287	li	a0, TEESMC_OPTEED_RETURN_CALL_DONE
288
289	/* Return to untrusted domain */
290	jal	thread_return_from_nsec_call
291END_FUNC thread_std_smc_entry
292
293/* void thread_resume(struct thread_ctx_regs *regs) */
294FUNC thread_resume , :
295	/*
296	 * Restore all registers assuming that GP
297	 * and TP were not changed.
298	 */
299	load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_SP
300	load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2
301	load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
302	load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
303	load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6
304	load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7
305	store_xregs tp, THREAD_CORE_LOCAL_X10, REG_A0, REG_A1
306	ret
307END_FUNC thread_resume
308
309/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
310FUNC thread_rpc , :
311	 /* Use stack for temporary storage */
312	addi	sp, sp, -REGOFF(4)
313
314	/* Read xSTATUS */
315	csrr	a1, CSR_XSTATUS
316
317	/* Mask all maskable exceptions before switching to temporary stack */
318	csrc	CSR_XSTATUS, CSR_XSTATUS_IE
319
320	/* Save return address xSTATUS and pointer to rv */
321	STR	a0, REGOFF(0)(sp)
322	STR	a1, REGOFF(1)(sp)
323	STR	s0, REGOFF(2)(sp)
324	STR	ra, REGOFF(3)(sp)
325	addi	s0, sp, REGOFF(4)
326
327	/* Save thread state */
328	jal	thread_get_ctx_regs
329	store_xregs a0, THREAD_CTX_REG_SP, REG_SP
330	store_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
331	store_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
332
333	/* Get to tmp stack */
334	jal	thread_get_tmp_sp
335
336	/* Get pointer to rv */
337	LDR	s1, REGOFF(0)(sp)
338
339	/* xSTATUS to restore */
340	LDR	a1, REGOFF(1)(sp)
341	/* Switch to tmp stack */
342	mv	sp, a0
343
344	/* Early load rv[] into s2-s4 */
345	lw	s2, 0(s1)
346	lw	s3, 4(s1)
347	lw	s4, 8(s1)
348
349	li	a0, THREAD_FLAGS_COPY_ARGS_ON_RETURN
350	la	a2, .thread_rpc_return
351	jal	thread_state_suspend
352
353	mv	a4, a0	/* thread index */
354	mv	a1, s2	/* rv[0] */
355	mv	a2, s3	/* rv[1] */
356	mv	a3, s4	/* rv[2] */
357	li	a0, TEESMC_OPTEED_RETURN_CALL_DONE
358
359	/* Return to untrusted domain */
360	jal	thread_return_from_nsec_call
361.thread_rpc_return:
362	/*
363	 * Jumps here from thread_resume() above when RPC has returned.
364	 * At this point has the stack pointer been restored to the value
365	 * stored in THREAD_CTX above.
366	 */
367
368	/* Get pointer to rv[] */
369	LDR	a4, REGOFF(0)(sp)
370
371	/* Store a0-a3 into rv[] */
372	sw	a0, 0(a4)
373	sw	a1, 4(a4)
374	sw	a2, 8(a4)
375	sw	a3, 12(a4)
376
377	/* Pop saved XSTATUS from stack */
378	LDR	s0, REGOFF(1)(sp)
379	csrw	CSR_XSTATUS, s0
380
381	/* Pop return address and s0 from stack */
382	LDR	ra, REGOFF(3)(sp)
383	LDR	s0, REGOFF(2)(sp)
384
385	addi	sp, sp, REGOFF(4)
386	ret
387END_FUNC thread_rpc
388