xref: /optee_os/core/arch/riscv/kernel/thread_rv.S (revision 19662e417055e7b115edcd3253e4df920162b859)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright 2022-2023 NXP
4 * Copyright 2024 Andes Technology Corporation
5 */
6
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread.h>
11#include <kernel/thread_private.h>
12#include <mm/core_mmu.h>
13#include <riscv.h>
14#include <riscv_macros.S>
15#include <tee/optee_abi.h>
16#include <tee/teeabi_opteed.h>
17#include <tee/teeabi_opteed_macros.h>
18
19.macro get_thread_ctx res, tmp0
20	lw	\tmp0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
21	la	\res, threads
221:
23	beqz	\tmp0, 2f
24	addi	\res, \res, THREAD_CTX_SIZE
25	addi	\tmp0, \tmp0, -1
26	bnez	\tmp0, 1b
272:
28.endm
29
30.macro b_if_prev_priv_is_u reg, label
31	andi	\reg, \reg, CSR_XSTATUS_SPP
32	beqz	\reg, \label
33.endm
34
35/* size_t __get_core_pos(void); */
36FUNC __get_core_pos , : , .identity_map
37	lw	a0, THREAD_CORE_LOCAL_HART_ID(tp)
38	ret
39END_FUNC __get_core_pos
40
41FUNC thread_trap_vect , :
42	csrrw	tp, CSR_XSCRATCH, tp
43	bnez	tp, 0f
44	/* Read tp back */
45	csrrw	tp, CSR_XSCRATCH, tp
46	j	trap_from_kernel
470:
48	/* Now tp is thread_core_local */
49	j	trap_from_user
50thread_trap_vect_end:
51END_FUNC thread_trap_vect
52
53LOCAL_FUNC trap_from_kernel, :
54	/* Save sp, a0, a1 into temporary spaces of thread_core_local */
55	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
56	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
57
58	csrr	a0, CSR_XCAUSE
59	/* MSB of cause differentiates between interrupts and exceptions */
60	bge	a0, zero, exception_from_kernel
61
62interrupt_from_kernel:
63	/* Get thread context as sp */
64	get_thread_ctx sp, a0
65
66	/* Load and save kernel sp */
67	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
68	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
69
70	/* Restore user a0, a1 which can be saved later */
71	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
72
73	/* Save all other GPRs */
74	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
75	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
76	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
77	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
78	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
79	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
80	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
81	/* Save XIE */
82	csrr	t0, CSR_XIE
83	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
84	/* Mask all interrupts */
85	csrw	CSR_XIE, x0
86	/* Save XSTATUS */
87	csrr	t0, CSR_XSTATUS
88	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
89	/* Save XEPC */
90	csrr	t0, CSR_XEPC
91	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
92
93	/*
94	 * a0 = struct thread_ctx_regs *regs
95	 * a1 = cause
96	 */
97	mv	a0, sp
98	csrr	a1, CSR_XCAUSE
99	/* Load tmp_stack_va_end as current sp. */
100	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
101
102	/*
103	 * Get interrupt code from XCAUSE and build XIP. For example, if the
104	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
105	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
106	 */
107	li	a2, CSR_XCAUSE_INTR_FLAG
108	sub	a2, a1, a2
109	li	a3, 1
110	sll	a3, a3, a2
111	/*
112	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
113	 * THREAD_EXCP_FOREIGN_INTR, we call thread_foreign_interrupt_handler().
114	 */
115	li	a2, THREAD_EXCP_FOREIGN_INTR
116	and	a2, a3, a2
117	beqz	a2, native_interrupt_from_kernel
118
119foreign_interrupt_from_kernel:
120	/*
121	 * a0 = struct thread_ctx_regs *regs
122	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
123	 * return to here.
124	 */
125	tail	thread_foreign_interrupt_handler
126
127native_interrupt_from_kernel:
128	/* Update 32-bit core local flags */
129	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
130	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
131	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
132	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
133
134	/*
135	 * a0 = struct thread_ctx_regs *regs
136	 * a1 = cause
137	 * Call thread_native_interrupt_handler(regs, cause)
138	 */
139	call	thread_native_interrupt_handler
140
141	/* Update 32-bit core local flags */
142	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
143	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
144	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
145
146	/* Get thread context as sp */
147	get_thread_ctx sp, t0
148	/* Restore XEPC */
149	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
150	csrw	CSR_XEPC, t0
151	/* Restore XIE */
152	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
153	csrw	CSR_XIE, t0
154	/* Restore XSTATUS */
155	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
156	csrw	CSR_XSTATUS, t0
157	/* We are going to XRET to kernel mode. Set XSCRATCH as 0 */
158	csrw	CSR_XSCRATCH, 0
159	/* Restore all GPRs */
160	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
161	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
162	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
163	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
164	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
165	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
166	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
167	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
168	XRET
169
170exception_from_kernel:
171	/*
172	 * Update core local flags.
173	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
174	 */
175	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
176	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
177	ori	a0, a0, THREAD_CLF_ABORT
178	li	a1, (THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
179	and	a1, a0, a1
180	bnez	a1, sel_tmp_sp
181
182	/* Select abort stack */
183	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_A1
184	j	set_sp
185
186sel_tmp_sp:
187	/* We have an abort while using the abort stack, select tmp stack */
188	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_A1
189	ori	a0, a0, THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
190
191set_sp:
192	mv	sp, a1
193	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
194
195	/*
196	 * Save state on stack
197	 */
198	addi	sp, sp, -THREAD_ABT_REGS_SIZE
199
200	/* Save kernel sp */
201	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
202	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
203
204	/* Restore kernel a0, a1 which can be saved later */
205	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
206
207	/* Save all other GPRs */
208	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
209	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
210	store_xregs sp, THREAD_ABT_REG_TP, REG_TP
211	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
212	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
213	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
214	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
215	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
216	/* Save XIE */
217	csrr	t0, CSR_XIE
218	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
219	/* Mask all interrupts */
220	csrw	CSR_XIE, x0
221	/* Save XSTATUS */
222	csrr	t0, CSR_XSTATUS
223	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
224	/* Save XEPC */
225	csrr	t0, CSR_XEPC
226	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
227	/* Save XTVAL */
228	csrr	t0, CSR_XTVAL
229	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
230	/* Save XCAUSE */
231	csrr	a0, CSR_XCAUSE
232	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
233
234	/*
235	 * a0 = cause
236	 * a1 = sp (struct thread_abort_regs *regs)
237	 * Call abort_handler(cause, regs)
238	 */
239	mv	a1, sp
240	call	abort_handler
241
242	/*
243	 * Restore state from stack
244	 */
245
246	/* Restore XEPC */
247	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
248	csrw	CSR_XEPC, t0
249	/* Restore XIE */
250	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
251	csrw	CSR_XIE, t0
252	/* Restore XSTATUS */
253	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
254	csrw	CSR_XSTATUS, t0
255	/* We are going to XRET to kernel mode. Set XSCRATCH as 0 */
256	csrw	CSR_XSCRATCH, 0
257
258	/* Update core local flags */
259	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
260	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
261	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
262
263	/* Restore all GPRs */
264	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
265	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
266	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
267	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
268	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
269	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
270	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
271	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
272	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
273	XRET
274END_FUNC trap_from_kernel
275
276LOCAL_FUNC trap_from_user, :
277	/* Save user sp, a0, a1 into temporary spaces of thread_core_local */
278	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
279	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
280
281	csrr	a0, CSR_XCAUSE
282	/* MSB of cause differentiates between interrupts and exceptions */
283	bge	a0, zero, exception_from_user
284
285interrupt_from_user:
286	/* Get thread context as sp */
287	get_thread_ctx sp, a0
288
289	/* Save user sp */
290	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
291	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
292
293	/* Restore user a0, a1 which can be saved later */
294	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
295
296	/* Save user gp */
297	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
298
299	/*
300	 * Set the scratch register to 0 such in case of a recursive
301	 * exception thread_trap_vect() knows that it is emitted from kernel.
302	 */
303	csrrw	gp, CSR_XSCRATCH, zero
304	/* Save user tp we previously swapped into CSR_XSCRATCH */
305	store_xregs sp, THREAD_CTX_REG_TP, REG_GP
306	/* Set kernel gp */
307.option push
308.option norelax
309	la	gp, __global_pointer$
310.option pop
311	/* Save all other GPRs */
312	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
313	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
314	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
315	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
316	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
317	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
318	/* Save XIE */
319	csrr	t0, CSR_XIE
320	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
321	/* Mask all interrupts */
322	csrw	CSR_XIE, x0
323	/* Save XSTATUS */
324	csrr	t0, CSR_XSTATUS
325	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
326	/* Save XEPC */
327	csrr	t0, CSR_XEPC
328	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
329
330	/*
331	 * a0 = struct thread_ctx_regs *regs
332	 * a1 = cause
333	 */
334	mv	a0, sp
335	csrr	a1, CSR_XCAUSE
336	/* Load tmp_stack_va_end as current sp. */
337	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
338
339	/*
340	 * Get interrupt code from XCAUSE and build XIP. For example, if the
341	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
342	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
343	 */
344	li	a2, CSR_XCAUSE_INTR_FLAG
345	sub	a2, a1, a2
346	li	a3, 1
347	sll	a3, a3, a2
348	/*
349	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
350	 * THREAD_EXCP_FOREIGN_INTR, call thread_foreign_interrupt_handler().
351	 */
352	li	a2, THREAD_EXCP_FOREIGN_INTR
353	and	a2, a3, a2
354	beqz	a2, native_interrupt_from_user
355
356foreign_interrupt_from_user:
357	/*
358	 * a0 = struct thread_ctx_regs *regs
359	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
360	 * return to here.
361	 */
362	tail	thread_foreign_interrupt_handler
363
364native_interrupt_from_user:
365	/* Update 32-bit core local flags */
366	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
367	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
368	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
369	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
370
371	/*
372	 * a0 = struct thread_ctx_regs *regs
373	 * a1 = cause
374	 * Call thread_native_interrupt_handler(regs, cause)
375	 */
376	call	thread_native_interrupt_handler
377
378	/* Update 32-bit core local flags */
379	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
380	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
381	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
382
383	/* Get thread context as sp */
384	get_thread_ctx sp, t0
385	/* Restore XEPC */
386	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
387	csrw	CSR_XEPC, t0
388	/* Restore XIE */
389	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
390	csrw	CSR_XIE, t0
391	/* Restore XSTATUS */
392	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
393	csrw	CSR_XSTATUS, t0
394	/* Set scratch as thread_core_local */
395	csrw	CSR_XSCRATCH, tp
396	/* Restore all GPRs */
397	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
398	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
399	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
400	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
401	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
402	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
403	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
404	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
405	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
406	XRET
407
408exception_from_user:
409	/* a0 is CSR_XCAUSE */
410	li	a1, CAUSE_USER_ECALL
411	bne	a0, a1, abort_from_user
412ecall_from_user:
413	/* Load and set kernel sp from thread context */
414	get_thread_ctx a0, a1
415	load_xregs a0, THREAD_CTX_KERN_SP, REG_SP
416
417	/* Now sp is kernel sp, create stack for struct thread_scall_regs */
418	addi	sp, sp, -THREAD_SCALL_REGS_SIZE
419	/* Save user sp */
420	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
421	store_xregs sp, THREAD_SCALL_REG_SP, REG_A0
422
423	/* Restore user a0, a1 which can be saved later */
424	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
425
426	/* Save user gp */
427	store_xregs sp, THREAD_SCALL_REG_GP, REG_GP
428	/*
429	 * Set the scratch register to 0 such in case of a recursive
430	 * exception thread_trap_vect() knows that it is emitted from kernel.
431	 */
432	csrrw	gp, CSR_XSCRATCH, zero
433	/* Save user tp we previously swapped into CSR_XSCRATCH */
434	store_xregs sp, THREAD_SCALL_REG_TP, REG_GP
435	/* Set kernel gp */
436.option push
437.option norelax
438	la	gp, __global_pointer$
439.option pop
440
441	/* Save other caller-saved registers */
442	store_xregs sp, THREAD_SCALL_REG_RA, REG_RA
443	store_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
444	store_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
445	store_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
446	/* Save XIE */
447	csrr	a0, CSR_XIE
448	store_xregs sp, THREAD_SCALL_REG_IE, REG_A0
449	/* Mask all interrupts */
450	csrw	CSR_XIE, zero
451	/* Save XSTATUS */
452	csrr	a0, CSR_XSTATUS
453	store_xregs sp, THREAD_SCALL_REG_STATUS, REG_A0
454	/* Save XEPC */
455	csrr	a0, CSR_XEPC
456	store_xregs sp, THREAD_SCALL_REG_EPC, REG_A0
457
458	/*
459	 * a0 = struct thread_scall_regs *regs
460	 * Call thread_scall_handler(regs)
461	 */
462	mv	a0, sp
463	call	thread_scall_handler
464
465	/*
466	 * Save kernel sp we'll had at the beginning of this function.
467	 * This is when this TA has called another TA because
468	 * __thread_enter_user_mode() also saves the stack pointer in this
469	 * field.
470	 */
471	get_thread_ctx a0, a1
472	addi	t0, sp, THREAD_SCALL_REGS_SIZE
473	store_xregs a0, THREAD_CTX_KERN_SP, REG_T0
474
475	/* Restore XEPC */
476	load_xregs sp, THREAD_SCALL_REG_EPC, REG_T0
477	csrw	CSR_XEPC, t0
478	/* Restore XIE */
479	load_xregs sp, THREAD_SCALL_REG_IE, REG_T0
480	csrw	CSR_XIE, t0
481	/* Restore XSTATUS */
482	load_xregs sp, THREAD_SCALL_REG_STATUS, REG_T0
483	csrw	CSR_XSTATUS, t0
484	/* Check previous privilege mode by status.SPP */
485	b_if_prev_priv_is_u t0, 1f
486	/*
487	 * We are going to XRET to kernel mode.
488	 * XSCRATCH is already zero to indicate that we are in kernel mode.
489	 * We must keep kernel gp & tp, so skip restoring user gp & tp.
490	 */
491	j	2f
4921:
493	/*
494	 * We are going to XRET to user mode.
495	 * XSCRATCH must be tp(thread_core_local) to be used in next trap.
496	 * We also need to restore user gp & tp
497	 */
498	csrw	CSR_XSCRATCH, tp
499	load_xregs sp, THREAD_SCALL_REG_GP, REG_GP
500	load_xregs sp, THREAD_SCALL_REG_TP, REG_TP
5012:
502	/* Restore remaining caller-saved registers */
503	load_xregs sp, THREAD_SCALL_REG_RA, REG_RA
504	load_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
505	load_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
506	load_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
507	load_xregs sp, THREAD_SCALL_REG_SP, REG_SP
508	XRET
509
510abort_from_user:
511	/*
512	 * Update core local flags
513	 */
514	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
515	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
516	ori	a0, a0, THREAD_CLF_ABORT
517	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
518
519	/*
520	 * Save state on stack
521	 */
522
523	/* Load abt_stack_va_end and set it as sp */
524	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_SP
525
526	/* Now sp is abort sp, create stack for struct thread_abort_regs */
527	addi	sp, sp, -THREAD_ABT_REGS_SIZE
528
529	/* Save user sp */
530	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
531	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
532
533	/* Restore user a0, a1 which can be saved later */
534	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
535
536	/* Save user gp */
537	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
538
539	/*
540	 * Set the scratch register to 0 such in case of a recursive
541	 * exception thread_trap_vect() knows that it is emitted from kernel.
542	 */
543	csrrw	gp, CSR_XSCRATCH, zero
544	/* Save user tp we previously swapped into CSR_XSCRATCH */
545	store_xregs sp, THREAD_ABT_REG_TP, REG_GP
546	/* Set kernel gp */
547.option push
548.option norelax
549	la	gp, __global_pointer$
550.option pop
551	/* Save all other GPRs */
552	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
553	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
554	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
555	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
556	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
557	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
558	/* Save XIE */
559	csrr	t0, CSR_XIE
560	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
561	/* Mask all interrupts */
562	csrw	CSR_XIE, x0
563	/* Save XSTATUS */
564	csrr	t0, CSR_XSTATUS
565	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
566	/* Save XEPC */
567	csrr	t0, CSR_XEPC
568	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
569	/* Save XTVAL */
570	csrr	t0, CSR_XTVAL
571	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
572	/* Save XCAUSE */
573	csrr	a0, CSR_XCAUSE
574	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
575
576	/*
577	 * a0 = cause
578	 * a1 = sp (struct thread_abort_regs *regs)
579	 * Call abort_handler(cause, regs)
580	 */
581	mv	a1, sp
582	call	abort_handler
583
584	/*
585	 * Restore state from stack
586	 */
587
588	/* Restore XEPC */
589	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
590	csrw	CSR_XEPC, t0
591	/* Restore XIE */
592	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
593	csrw	CSR_XIE, t0
594	/* Restore XSTATUS */
595	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
596	csrw	CSR_XSTATUS, t0
597
598	/* Update core local flags */
599	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
600	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
601	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
602
603	/* Check previous privilege mode by status.SPP */
604	b_if_prev_priv_is_u t0, 1f
605	/*
606	 * We are going to XRET to kernel mode.
607	 * XSCRATCH is already zero to indicate that we are in kernel mode.
608	 * We must keep kernel gp & tp, so skip restoring user gp & tp.
609	 */
610	j	2f
6111:
612	/*
613	 * We are going to XRET to user mode.
614	 * XSCRATCH must be tp(thread_core_local) to be used in next trap.
615	 * We also need to restore user gp & tp
616	 */
617	csrw	CSR_XSCRATCH, tp
618	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
619	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
6202:
621	/* Restore remaining GPRs */
622	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
623	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
624	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
625	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
626	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
627	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
628	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
629	XRET
630END_FUNC trap_from_user
631
632/*
633 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
634 * 		uint32_t exit_status1);
635 * See description in thread.h
636 */
637FUNC thread_unwind_user_mode , :
638
639	/* Store the exit status */
640	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A3, REG_A5
641	sw	a1, (a4)
642	sw	a2, (a5)
643
644	/* Save user callee regs */
645	store_xregs a3, THREAD_CTX_REG_S0, REG_S0, REG_S1
646	store_xregs a3, THREAD_CTX_REG_S2, REG_S2, REG_S11
647	store_xregs a3, THREAD_CTX_REG_SP, REG_SP, REG_TP
648
649	/* Restore kernel callee regs */
650	mv	a1, sp
651
652	load_xregs a1, THREAD_USER_MODE_REC_X1, REG_RA, REG_GP
653	load_xregs a1, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
654	load_xregs a1, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
655
656	add	sp, sp, THREAD_USER_MODE_REC_SIZE
657
658	/* Return from the call of thread_enter_user_mode() */
659	ret
660END_FUNC thread_unwind_user_mode
661
662/*
663 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
664 *				     uint32_t *exit_status0,
665 *				     uint32_t *exit_status1);
666 */
667FUNC __thread_enter_user_mode , :
668	/* Disable kernel mode exceptions first */
669	csrc	CSR_XSTATUS, CSR_XSTATUS_IE
670
671	/*
672	 * Create and fill in the struct thread_user_mode_rec
673	 */
674	addi	sp, sp, -THREAD_USER_MODE_REC_SIZE
675	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A0, REG_A2
676	store_xregs sp, THREAD_USER_MODE_REC_X1, REG_RA, REG_GP
677	store_xregs sp, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
678	store_xregs sp, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
679
680	/*
681	 * Save the kernel stack pointer in the thread context
682	 */
683
684	/* Get pointer to current thread context */
685	get_thread_ctx s0, s1
686
687	/*
688	 * Save kernel stack pointer to ensure that
689	 * exception_from_user() uses correct stack pointer.
690	 */
691
692	store_xregs s0, THREAD_CTX_KERN_SP, REG_SP
693	/*
694	 * Save thread_core_local in xSCRATCH to ensure that thread_trap_vect()
695	 * uses correct core local structure.
696	 */
697	csrw	CSR_XSCRATCH, tp
698
699	/* Move struct thread_ctx_regs *regs to sp to reduce code size */
700	mv	sp, a0
701
702	/* Set exception return PC */
703	load_xregs sp, THREAD_CTX_REG_EPC, REG_S0
704	csrw	CSR_XEPC, s0
705	/* Set user ie */
706	load_xregs sp, THREAD_CTX_REG_IE, REG_S0
707	csrw	CSR_XIE, s0
708	/* Set user status */
709	load_xregs sp, THREAD_CTX_REG_STATUS, REG_S0
710	csrw	CSR_XSTATUS, s0
711	/* Load the rest of the general purpose registers */
712	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
713	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
714	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
715	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
716	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
717	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
718	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
719	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
720	load_xregs sp, THREAD_CTX_REG_SP, REG_SP /* sp must be last one */
721
722	/* Jump into user mode */
723	XRET
724END_FUNC __thread_enter_user_mode
725
726/* void thread_resume(struct thread_ctx_regs *regs) */
727FUNC thread_resume , :
728	/* Disable global interrupts first */
729	csrc	CSR_XSTATUS, CSR_XSTATUS_IE
730
731	/* Move struct thread_ctx_regs *regs to sp to reduce code size */
732	mv	sp, a0
733
734	/* Restore epc */
735	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
736	csrw	CSR_XEPC, t0
737	/* Restore ie */
738	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
739	csrw	CSR_XIE, t0
740	/* Restore status */
741	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
742	csrw	CSR_XSTATUS, t0
743
744	/* Check if previous privilege mode by status.SPP */
745	b_if_prev_priv_is_u t0, 1f
746	/* Set scratch as zero to indicate that we are in kernel mode */
747	csrw	CSR_XSCRATCH, zero
748	j	2f
7491:
750	/* Resume to U-mode, set scratch as tp to be used in the trap handler */
751	csrw	CSR_XSCRATCH, tp
7522:
753	/* Restore all general-purpose registers */
754	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
755	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
756	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
757	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
758	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
759	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
760	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
761	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
762	load_xregs sp, THREAD_CTX_REG_SP, REG_SP /* sp must be last one */
763
764	XRET
765END_FUNC thread_resume
766
767/* void thread_foreign_interrupt_handler(struct thread_ctx_regs *regs) */
768FUNC thread_foreign_interrupt_handler , :
769	/* Update 32-bit core local flags */
770	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
771	slli	s1, s1, THREAD_CLF_SAVED_SHIFT
772	ori	s1, s1, (THREAD_CLF_TMP | THREAD_CLF_FIQ)
773	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
774
775	/*
776	 * Mark current thread as suspended.
777	 * a0 = THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
778	 * a1 = status
779	 * a2 = epc
780	 * thread_state_suspend(flags, status, pc)
781	 */
782	LDR	a1, THREAD_CTX_REG_STATUS(a0)
783	LDR	a2, THREAD_CTX_REG_EPC(a0)
784	li	a0, THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
785	call	thread_state_suspend
786	/* Now return value a0 contains suspended thread ID. */
787
788	/* Update core local flags */
789	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
790	srli	s1, s1, THREAD_CLF_SAVED_SHIFT
791	ori	s1, s1, THREAD_CLF_TMP
792	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
793
794	/* Passing thread index in a0, and return to untrusted domain. */
795	mv	a4, a0
796	li	a0, TEEABI_OPTEED_RETURN_CALL_DONE
797	li	a1, OPTEE_ABI_RETURN_RPC_FOREIGN_INTR
798	li	a2, 0
799	li	a3, 0
800	li	a5, 0
801	j	thread_return_to_udomain
802END_FUNC thread_foreign_interrupt_handler
803