xref: /optee_os/core/arch/riscv/kernel/thread_rv.S (revision 0ae5ef34a082abbd4f2e3248ce09e091b2e95b21)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright 2022-2023 NXP
4 * Copyright 2024 Andes Technology Corporation
5 */
6
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread.h>
11#include <kernel/thread_private.h>
12#include <mm/core_mmu.h>
13#include <riscv.h>
14#include <riscv_macros.S>
15#include <tee/optee_abi.h>
16#include <tee/teeabi_opteed.h>
17#include <tee/teeabi_opteed_macros.h>
18
19.macro get_thread_ctx res, tmp0
20	lw	\tmp0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
21	la	\res, threads
221:
23	beqz	\tmp0, 2f
24	addi	\res, \res, THREAD_CTX_SIZE
25	addi	\tmp0, \tmp0, -1
26	bnez	\tmp0, 1b
272:
28.endm
29
30.macro b_if_prev_priv_is_u reg, label
31	andi	\reg, \reg, CSR_XSTATUS_SPP
32	beqz	\reg, \label
33.endm
34
35/* size_t __get_core_pos(void); */
36FUNC __get_core_pos , : , .identity_map
37	lw	a0, THREAD_CORE_LOCAL_HART_INDEX(tp)
38	ret
39END_FUNC __get_core_pos
40
41FUNC thread_trap_vect , :
42	csrrw	tp, CSR_XSCRATCH, tp
43	bnez	tp, 0f
44	/* Read tp back */
45	csrrw	tp, CSR_XSCRATCH, tp
46	j	trap_from_kernel
470:
48	/* Now tp is thread_core_local */
49	j	trap_from_user
50thread_trap_vect_end:
51END_FUNC thread_trap_vect
52
53LOCAL_FUNC trap_from_kernel, :
54	/* Save sp, a0, a1 into temporary spaces of thread_core_local */
55	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
56	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
57
58	csrr	a0, CSR_XCAUSE
59	/* MSB of cause differentiates between interrupts and exceptions */
60	bge	a0, zero, exception_from_kernel
61
62interrupt_from_kernel:
63	/* Get thread context as sp */
64	get_thread_ctx sp, a0
65
66	/* Load and save kernel sp */
67	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
68	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
69
70	/* Restore user a0, a1 which can be saved later */
71	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
72
73	/* Save all other GPRs */
74	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
75	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
76	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
77	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
78	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
79	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
80	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
81	/* Save XIE */
82	csrr	t0, CSR_XIE
83	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
84	/* Mask all interrupts */
85	csrw	CSR_XIE, x0
86	/* Save XSTATUS */
87	csrr	t0, CSR_XSTATUS
88	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
89	/* Save XEPC */
90	csrr	t0, CSR_XEPC
91	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
92
93	/*
94	 * a0 = struct thread_ctx_regs *regs
95	 * a1 = cause
96	 */
97	mv	a0, sp
98	csrr	a1, CSR_XCAUSE
99	/* Load tmp_stack_va_end as current sp. */
100	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
101
102	/*
103	 * Get interrupt code from XCAUSE and build XIP. For example, if the
104	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
105	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
106	 */
107	li	a2, CSR_XCAUSE_INTR_FLAG
108	sub	a2, a1, a2
109	li	a3, 1
110	sll	a3, a3, a2
111	/*
112	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
113	 * THREAD_EXCP_FOREIGN_INTR, we call thread_foreign_interrupt_handler().
114	 */
115	li	a2, THREAD_EXCP_FOREIGN_INTR
116	and	a2, a3, a2
117	beqz	a2, native_interrupt_from_kernel
118
119foreign_interrupt_from_kernel:
120	/*
121	 * a0 = struct thread_ctx_regs *regs
122	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
123	 * return to here.
124	 */
125	tail	thread_foreign_interrupt_handler
126
127native_interrupt_from_kernel:
128	/* Update 32-bit core local flags */
129	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
130	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
131	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
132	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
133
134	/*
135	 * a0 = struct thread_ctx_regs *regs
136	 * a1 = cause
137	 * Call thread_native_interrupt_handler(regs, cause)
138	 */
139	call	thread_native_interrupt_handler
140
141	/* Update 32-bit core local flags */
142	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
143	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
144	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
145
146	/* Get thread context as sp */
147	get_thread_ctx sp, t0
148	/* Restore XEPC */
149	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
150	csrw	CSR_XEPC, t0
151	/* Restore XSTATUS */
152	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
153	csrw	CSR_XSTATUS, t0
154	/* Restore XIE */
155	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
156	csrw	CSR_XIE, t0
157	/* We are going to XRET to kernel mode. Set XSCRATCH as 0 */
158	csrw	CSR_XSCRATCH, 0
159	/* Restore all GPRs */
160	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
161	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
162	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
163	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
164	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
165	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
166	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
167	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
168	XRET
169
170exception_from_kernel:
171	/*
172	 * Update core local flags.
173	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
174	 */
175	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
176	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
177	ori	a0, a0, THREAD_CLF_ABORT
178	li	a1, (THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
179	and	a1, a0, a1
180	bnez	a1, sel_tmp_sp
181
182	/* Select abort stack */
183	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_A1
184	j	set_sp
185
186sel_tmp_sp:
187	/* We have an abort while using the abort stack, select tmp stack */
188	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_A1
189	ori	a0, a0, THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
190
191set_sp:
192	mv	sp, a1
193	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
194
195	/*
196	 * Save state on stack
197	 */
198	addi	sp, sp, -THREAD_ABT_REGS_SIZE
199
200	/* Save kernel sp */
201	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
202	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
203
204	/* Restore kernel a0, a1 which can be saved later */
205	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
206
207	/* Save all other GPRs */
208	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
209	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
210	store_xregs sp, THREAD_ABT_REG_TP, REG_TP
211	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
212	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
213	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
214	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
215	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
216	/* Save XIE */
217	csrr	t0, CSR_XIE
218	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
219	/* Mask all interrupts */
220	csrw	CSR_XIE, x0
221	/* Save XSTATUS */
222	csrr	t0, CSR_XSTATUS
223	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
224	/* Save XEPC */
225	csrr	t0, CSR_XEPC
226	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
227	/* Save XTVAL */
228	csrr	t0, CSR_XTVAL
229	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
230	/* Save XCAUSE */
231	csrr	a0, CSR_XCAUSE
232	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
233
234	/*
235	 * a0 = cause
236	 * a1 = sp (struct thread_abort_regs *regs)
237	 * Call abort_handler(cause, regs)
238	 */
239	mv	a1, sp
240	call	abort_handler
241
242	/*
243	 * Restore state from stack
244	 */
245
246	/* Restore XEPC */
247	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
248	csrw	CSR_XEPC, t0
249	/* Restore XSTATUS */
250	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
251	csrw	CSR_XSTATUS, t0
252	/* Restore XIE */
253	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
254	csrw	CSR_XIE, t0
255	/* We are going to XRET to kernel mode. Set XSCRATCH as 0 */
256	csrw	CSR_XSCRATCH, 0
257
258	/* Update core local flags */
259	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
260	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
261	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
262
263	/* Restore all GPRs */
264	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
265	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
266	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
267	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
268	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
269	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
270	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
271	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
272	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
273	XRET
274END_FUNC trap_from_kernel
275
276LOCAL_FUNC trap_from_user, :
277	/* Save user sp, a0, a1 into temporary spaces of thread_core_local */
278	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
279	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
280
281	csrr	a0, CSR_XCAUSE
282	/* MSB of cause differentiates between interrupts and exceptions */
283	bge	a0, zero, exception_from_user
284
285interrupt_from_user:
286	/* Get thread context as sp */
287	get_thread_ctx sp, a0
288
289	/* Save user sp */
290	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
291	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
292
293	/* Restore user a0, a1 which can be saved later */
294	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
295
296	/* Save user gp */
297	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
298
299	/*
300	 * Set the scratch register to 0 such in case of a recursive
301	 * exception thread_trap_vect() knows that it is emitted from kernel.
302	 */
303	csrrw	gp, CSR_XSCRATCH, zero
304	/* Save user tp we previously swapped into CSR_XSCRATCH */
305	store_xregs sp, THREAD_CTX_REG_TP, REG_GP
306	/* Set kernel gp */
307.option push
308.option norelax
309	la	gp, __global_pointer$
310.option pop
311	/* Save all other GPRs */
312	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
313	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
314	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
315	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
316	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
317	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
318	/* Save XIE */
319	csrr	t0, CSR_XIE
320	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
321	/* Mask all interrupts */
322	csrw	CSR_XIE, x0
323	/* Save XSTATUS */
324	csrr	t0, CSR_XSTATUS
325	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
326	/* Save XEPC */
327	csrr	t0, CSR_XEPC
328	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
329
330	/*
331	 * a0 = struct thread_ctx_regs *regs
332	 * a1 = cause
333	 */
334	mv	a0, sp
335	csrr	a1, CSR_XCAUSE
336	/* Load tmp_stack_va_end as current sp. */
337	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
338
339	/*
340	 * Get interrupt code from XCAUSE and build XIP. For example, if the
341	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
342	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
343	 */
344	li	a2, CSR_XCAUSE_INTR_FLAG
345	sub	a2, a1, a2
346	li	a3, 1
347	sll	a3, a3, a2
348	/*
349	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
350	 * THREAD_EXCP_FOREIGN_INTR, call thread_foreign_interrupt_handler().
351	 */
352	li	a2, THREAD_EXCP_FOREIGN_INTR
353	and	a2, a3, a2
354	beqz	a2, native_interrupt_from_user
355
356foreign_interrupt_from_user:
357	/*
358	 * a0 = struct thread_ctx_regs *regs
359	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
360	 * return to here.
361	 */
362	tail	thread_foreign_interrupt_handler
363
364native_interrupt_from_user:
365	/* Update 32-bit core local flags */
366	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
367	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
368	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
369	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
370
371	/*
372	 * a0 = struct thread_ctx_regs *regs
373	 * a1 = cause
374	 * Call thread_native_interrupt_handler(regs, cause)
375	 */
376	call	thread_native_interrupt_handler
377
378	/* Update 32-bit core local flags */
379	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
380	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
381	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
382
383	/* Get thread context as sp */
384	get_thread_ctx sp, t0
385	/* Restore XEPC */
386	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
387	csrw	CSR_XEPC, t0
388	/* Restore XSTATUS */
389	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
390	csrw	CSR_XSTATUS, t0
391	/* Restore XIE */
392	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
393	csrw	CSR_XIE, t0
394	/* Set scratch as thread_core_local */
395	csrw	CSR_XSCRATCH, tp
396	/* Restore all GPRs */
397	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
398	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
399	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
400	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
401	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
402	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
403	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
404	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
405	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
406	XRET
407
408exception_from_user:
409	/* a0 is CSR_XCAUSE */
410	li	a1, CAUSE_USER_ECALL
411	bne	a0, a1, abort_from_user
412ecall_from_user:
413	/* Load and set kernel sp from thread context */
414	get_thread_ctx a0, a1
415	load_xregs a0, THREAD_CTX_KERN_SP, REG_SP
416
417	/* Now sp is kernel sp, create stack for struct thread_scall_regs */
418	addi	sp, sp, -THREAD_SCALL_REGS_SIZE
419	/* Save user sp */
420	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
421	store_xregs sp, THREAD_SCALL_REG_SP, REG_A0
422
423	/* Restore user a0, a1 which can be saved later */
424	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
425
426	/* Save user gp */
427	store_xregs sp, THREAD_SCALL_REG_GP, REG_GP
428	/*
429	 * Set the scratch register to 0 such in case of a recursive
430	 * exception thread_trap_vect() knows that it is emitted from kernel.
431	 */
432	csrrw	gp, CSR_XSCRATCH, zero
433	/* Save user tp we previously swapped into CSR_XSCRATCH */
434	store_xregs sp, THREAD_SCALL_REG_TP, REG_GP
435	/* Set kernel gp */
436.option push
437.option norelax
438	la	gp, __global_pointer$
439.option pop
440
441	/* Save other caller-saved registers */
442	store_xregs sp, THREAD_SCALL_REG_RA, REG_RA
443	store_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
444	store_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
445	store_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
446	/* Save XIE */
447	csrr	a0, CSR_XIE
448	store_xregs sp, THREAD_SCALL_REG_IE, REG_A0
449	/* Mask all interrupts */
450	csrw	CSR_XIE, zero
451	/* Save XSTATUS */
452	csrr	a0, CSR_XSTATUS
453	store_xregs sp, THREAD_SCALL_REG_STATUS, REG_A0
454	/* Save XEPC */
455	csrr	a0, CSR_XEPC
456	store_xregs sp, THREAD_SCALL_REG_EPC, REG_A0
457
458	/*
459	 * a0 = struct thread_scall_regs *regs
460	 * Call thread_scall_handler(regs)
461	 */
462	mv	a0, sp
463	call	thread_scall_handler
464
465	/*
466	 * Save kernel sp we'll had at the beginning of this function.
467	 * This is when this TA has called another TA because
468	 * __thread_enter_user_mode() also saves the stack pointer in this
469	 * field.
470	 */
471	get_thread_ctx a0, a1
472	addi	t0, sp, THREAD_SCALL_REGS_SIZE
473	store_xregs a0, THREAD_CTX_KERN_SP, REG_T0
474
475	/* Restore XEPC */
476	load_xregs sp, THREAD_SCALL_REG_EPC, REG_T0
477	csrw	CSR_XEPC, t0
478	/* Restore XSTATUS */
479	load_xregs sp, THREAD_SCALL_REG_STATUS, REG_T0
480	csrw	CSR_XSTATUS, t0
481	/* Restore XIE */
482	load_xregs sp, THREAD_SCALL_REG_IE, REG_T0
483	csrw	CSR_XIE, t0
484	/* Check previous privilege mode by status.SPP */
485	csrr	t0, CSR_XSTATUS
486	b_if_prev_priv_is_u t0, 1f
487	/*
488	 * We are going to XRET to kernel mode.
489	 * XSCRATCH is already zero to indicate that we are in kernel mode.
490	 * We must keep kernel gp & tp, so skip restoring user gp & tp.
491	 */
492	j	2f
4931:
494	/*
495	 * We are going to XRET to user mode.
496	 * XSCRATCH must be tp(thread_core_local) to be used in next trap.
497	 * We also need to restore user gp & tp
498	 */
499	csrw	CSR_XSCRATCH, tp
500	load_xregs sp, THREAD_SCALL_REG_GP, REG_GP
501	load_xregs sp, THREAD_SCALL_REG_TP, REG_TP
5022:
503	/* Restore remaining caller-saved registers */
504	load_xregs sp, THREAD_SCALL_REG_RA, REG_RA
505	load_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
506	load_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
507	load_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
508	load_xregs sp, THREAD_SCALL_REG_SP, REG_SP
509	XRET
510
511abort_from_user:
512	/*
513	 * Update core local flags
514	 */
515	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
516	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
517	ori	a0, a0, THREAD_CLF_ABORT
518	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
519
520	/*
521	 * Save state on stack
522	 */
523
524	/* Load abt_stack_va_end and set it as sp */
525	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_SP
526
527	/* Now sp is abort sp, create stack for struct thread_abort_regs */
528	addi	sp, sp, -THREAD_ABT_REGS_SIZE
529
530	/* Save user sp */
531	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
532	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
533
534	/* Restore user a0, a1 which can be saved later */
535	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
536
537	/* Save user gp */
538	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
539
540	/*
541	 * Set the scratch register to 0 such in case of a recursive
542	 * exception thread_trap_vect() knows that it is emitted from kernel.
543	 */
544	csrrw	gp, CSR_XSCRATCH, zero
545	/* Save user tp we previously swapped into CSR_XSCRATCH */
546	store_xregs sp, THREAD_ABT_REG_TP, REG_GP
547	/* Set kernel gp */
548.option push
549.option norelax
550	la	gp, __global_pointer$
551.option pop
552	/* Save all other GPRs */
553	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
554	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
555	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
556	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
557	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
558	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
559	/* Save XIE */
560	csrr	t0, CSR_XIE
561	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
562	/* Mask all interrupts */
563	csrw	CSR_XIE, x0
564	/* Save XSTATUS */
565	csrr	t0, CSR_XSTATUS
566	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
567	/* Save XEPC */
568	csrr	t0, CSR_XEPC
569	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
570	/* Save XTVAL */
571	csrr	t0, CSR_XTVAL
572	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
573	/* Save XCAUSE */
574	csrr	a0, CSR_XCAUSE
575	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
576
577	/*
578	 * a0 = cause
579	 * a1 = sp (struct thread_abort_regs *regs)
580	 * Call abort_handler(cause, regs)
581	 */
582	mv	a1, sp
583	call	abort_handler
584
585	/*
586	 * Restore state from stack
587	 */
588
589	/* Restore XEPC */
590	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
591	csrw	CSR_XEPC, t0
592	/* Restore XSTATUS */
593	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
594	csrw	CSR_XSTATUS, t0
595	/* Restore XIE */
596	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
597	csrw	CSR_XIE, t0
598
599	/* Update core local flags */
600	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
601	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
602	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
603
604	/* Check previous privilege mode by status.SPP */
605	csrr	t0, CSR_XSTATUS
606	b_if_prev_priv_is_u t0, 1f
607	/*
608	 * We are going to XRET to kernel mode.
609	 * XSCRATCH is already zero to indicate that we are in kernel mode.
610	 * We must keep kernel gp & tp, so skip restoring user gp & tp.
611	 */
612	j	2f
6131:
614	/*
615	 * We are going to XRET to user mode.
616	 * XSCRATCH must be tp(thread_core_local) to be used in next trap.
617	 * We also need to restore user gp & tp
618	 */
619	csrw	CSR_XSCRATCH, tp
620	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
621	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
6222:
623	/* Restore remaining GPRs */
624	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
625	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
626	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
627	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
628	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
629	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
630	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
631	XRET
632END_FUNC trap_from_user
633
634/*
635 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
636 * 				uint32_t exit_status1);
637 * See description in thread.h
638 */
639FUNC thread_unwind_user_mode , :
640	/* Store the exit status */
641	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A3, REG_A5
642	sw	a1, (a4)
643	sw	a2, (a5)
644	/* Save user callee-saved regs */
645	store_xregs a3, THREAD_CTX_REG_S0, REG_S0, REG_S1
646	store_xregs a3, THREAD_CTX_REG_S2, REG_S2, REG_S11
647	/* Restore kernel ra(thread_enter_user_mode()) & callee-saved regs */
648	load_xregs sp, THREAD_USER_MODE_REC_RA, REG_RA
649	load_xregs sp, THREAD_USER_MODE_REC_S0, REG_S0, REG_S1
650	load_xregs sp, THREAD_USER_MODE_REC_S2, REG_S2, REG_S11
651	add	sp, sp, THREAD_USER_MODE_REC_SIZE
652	/* Return from the call of thread_enter_user_mode() */
653	ret
654END_FUNC thread_unwind_user_mode
655
656/*
657 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
658 *				     uint32_t *exit_status0,
659 *				     uint32_t *exit_status1);
660 */
661FUNC __thread_enter_user_mode , :
662	/*
663	 * Create and fill in the struct thread_user_mode_rec
664	 */
665	addi	sp, sp, -THREAD_USER_MODE_REC_SIZE
666	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A0, REG_A2
667	store_xregs sp, THREAD_USER_MODE_REC_RA, REG_RA
668	store_xregs sp, THREAD_USER_MODE_REC_S0, REG_S0, REG_S1
669	store_xregs sp, THREAD_USER_MODE_REC_S2, REG_S2, REG_S11
670
671	/*
672	 * Save the kernel stack pointer in the thread context
673	 */
674
675	/* Get pointer to current thread context */
676	get_thread_ctx s0, s1
677
678	/*
679	 * Save kernel stack pointer to ensure that
680	 * exception_from_user() uses correct stack pointer.
681	 */
682
683	store_xregs s0, THREAD_CTX_KERN_SP, REG_SP
684	/*
685	 * Save thread_core_local in xSCRATCH to ensure that thread_trap_vect()
686	 * uses correct core local structure.
687	 */
688	csrw	CSR_XSCRATCH, tp
689
690	/* Move struct thread_ctx_regs *regs to sp to reduce code size */
691	mv	sp, a0
692
693	/* Set exception return PC */
694	load_xregs sp, THREAD_CTX_REG_EPC, REG_S0
695	csrw	CSR_XEPC, s0
696	/* Set user status */
697	load_xregs sp, THREAD_CTX_REG_STATUS, REG_S0
698	csrw	CSR_XSTATUS, s0
699	/* Set user ie */
700	load_xregs sp, THREAD_CTX_REG_IE, REG_S0
701	csrw	CSR_XIE, s0
702	/* Load the rest of the general purpose registers */
703	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
704	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
705	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
706	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
707	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
708	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
709	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
710	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
711	load_xregs sp, THREAD_CTX_REG_SP, REG_SP /* sp must be last one */
712
713	/* Jump into user mode */
714	XRET
715END_FUNC __thread_enter_user_mode
716
717/* void thread_resume(struct thread_ctx_regs *regs) */
718FUNC thread_resume , :
719	/* Move struct thread_ctx_regs *regs to sp to reduce code size */
720	mv	sp, a0
721
722	/* Restore epc */
723	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
724	csrw	CSR_XEPC, t0
725	/* Restore status */
726	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
727	csrw	CSR_XSTATUS, t0
728	/* Restore ie */
729	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
730	csrw	CSR_XIE, t0
731
732	/* Check if previous privilege mode by status.SPP */
733	csrr	t0, CSR_XSTATUS
734	b_if_prev_priv_is_u t0, 1f
735	/* Set scratch as zero to indicate that we are in kernel mode */
736	csrw	CSR_XSCRATCH, zero
737	j	2f
7381:
739	/* Resume to U-mode, set scratch as tp to be used in the trap handler */
740	csrw	CSR_XSCRATCH, tp
7412:
742	/* Restore all general-purpose registers */
743	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
744	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
745	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
746	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
747	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
748	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
749	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
750	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
751	load_xregs sp, THREAD_CTX_REG_SP, REG_SP /* sp must be last one */
752
753	XRET
754END_FUNC thread_resume
755
756/* void thread_foreign_interrupt_handler(struct thread_ctx_regs *regs) */
757FUNC thread_foreign_interrupt_handler , :
758	/* Update 32-bit core local flags */
759	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
760	slli	s1, s1, THREAD_CLF_SAVED_SHIFT
761	ori	s1, s1, (THREAD_CLF_TMP | THREAD_CLF_FIQ)
762	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
763
764	/*
765	 * Mark current thread as suspended.
766	 * a0 = THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
767	 * a1 = status
768	 * a2 = epc
769	 * thread_state_suspend(flags, status, pc)
770	 */
771	LDR	a1, THREAD_CTX_REG_STATUS(a0)
772	LDR	a2, THREAD_CTX_REG_EPC(a0)
773	li	a0, THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
774	call	thread_state_suspend
775	/* Now return value a0 contains suspended thread ID. */
776
777	/* Update core local flags */
778	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
779	srli	s1, s1, THREAD_CLF_SAVED_SHIFT
780	ori	s1, s1, THREAD_CLF_TMP
781	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
782
783	/* Passing thread index in a0, and return to untrusted domain. */
784	mv	a4, a0
785	li	a0, TEEABI_OPTEED_RETURN_CALL_DONE
786	li	a1, OPTEE_ABI_RETURN_RPC_FOREIGN_INTR
787	li	a2, 0
788	li	a3, 0
789	li	a5, 0
790	j	thread_return_to_udomain
791END_FUNC thread_foreign_interrupt_handler
792