xref: /optee_os/core/arch/riscv/kernel/thread_rv.S (revision 9ce47d06d92acb63da786d7911620e4a724a49d4)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright 2022-2023 NXP
4 * Copyright 2024 Andes Technology Corporation
5 */
6
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread.h>
11#include <kernel/thread_private.h>
12#include <mm/core_mmu.h>
13#include <riscv.h>
14#include <riscv_macros.S>
15#include <tee/optee_abi.h>
16#include <tee/teeabi_opteed.h>
17#include <tee/teeabi_opteed_macros.h>
18
19.macro get_thread_ctx res, tmp0
20	lw	\tmp0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
21.option push
22.option norelax
23	la	\res, threads
24.option pop
25	LDR	\res, 0(\res)
261:
27	beqz	\tmp0, 2f
28	addi	\res, \res, THREAD_CTX_SIZE
29	addi	\tmp0, \tmp0, -1
30	bnez	\tmp0, 1b
312:
32.endm
33
34.macro b_if_prev_priv_is_u reg, label
35	andi	\reg, \reg, CSR_XSTATUS_SPP
36	beqz	\reg, \label
37.endm
38
39/* size_t __get_core_pos(void); */
40FUNC __get_core_pos , : , .identity_map
41	/*
42	 * Get the logical core position in the range
43	 * [0, CFG_TEE_CORE_NB_CORE) for indexing into
44	 * per-core data structures.
45	 */
46	lw	a0, THREAD_CORE_LOCAL_HART_INDEX(tp)
47#ifdef CFG_TEE_CORE_DEBUG
48	/* Sanity check: ensure hart index is valid */
49	li	t0, CFG_TEE_CORE_NB_CORE
50	bltu	a0, t0, out
51fail:
52	wfi
53	j	fail
54out:
55#endif
56	ret
57END_FUNC __get_core_pos
58
59FUNC thread_trap_vect , :
60	csrrw	tp, CSR_XSCRATCH, tp
61	bnez	tp, 0f
62	/* Read tp back */
63	csrrw	tp, CSR_XSCRATCH, tp
64	j	trap_from_kernel
650:
66	/* Now tp is thread_core_local */
67	j	trap_from_user
68thread_trap_vect_end:
69END_FUNC thread_trap_vect
70
71LOCAL_FUNC trap_from_kernel, :
72	/* Save sp, a0, a1 into temporary spaces of thread_core_local */
73	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
74	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
75
76	csrr	a0, CSR_XCAUSE
77	/* MSB of cause differentiates between interrupts and exceptions */
78	bge	a0, zero, exception_from_kernel
79
80interrupt_from_kernel:
81	/* Get thread context as sp */
82	get_thread_ctx sp, a0
83
84	/* Load and save kernel sp */
85	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
86	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
87
88	/* Restore user a0, a1 which can be saved later */
89	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
90
91	/* Save all other GPRs */
92	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
93	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
94	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
95	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
96	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
97	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
98	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
99	/* Save XIE */
100	csrr	t0, CSR_XIE
101	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
102	/* Mask all interrupts */
103	csrw	CSR_XIE, x0
104	/* Save XSTATUS */
105	csrr	t0, CSR_XSTATUS
106	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
107	/* Save XEPC */
108	csrr	t0, CSR_XEPC
109	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
110
111	/*
112	 * a0 = struct thread_ctx_regs *regs
113	 * a1 = cause
114	 */
115	mv	a0, sp
116	csrr	a1, CSR_XCAUSE
117	/* Load tmp_stack_va_end as current sp. */
118	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
119
120	/*
121	 * Get interrupt code from XCAUSE and build XIP. For example, if the
122	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
123	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
124	 */
125	li	a2, CSR_XCAUSE_INTR_FLAG
126	sub	a2, a1, a2
127	li	a3, 1
128	sll	a3, a3, a2
129	/*
130	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
131	 * THREAD_EXCP_FOREIGN_INTR, we call thread_foreign_interrupt_handler().
132	 */
133	li	a2, THREAD_EXCP_FOREIGN_INTR
134	and	a2, a3, a2
135	beqz	a2, native_interrupt_from_kernel
136
137foreign_interrupt_from_kernel:
138	/*
139	 * a0 = struct thread_ctx_regs *regs
140	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
141	 * return to here.
142	 */
143	tail	thread_foreign_interrupt_handler
144
145native_interrupt_from_kernel:
146	/* Update 32-bit core local flags */
147	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
148	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
149	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
150	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
151
152	/*
153	 * a0 = struct thread_ctx_regs *regs
154	 * a1 = cause
155	 * Call thread_native_interrupt_handler(regs, cause)
156	 */
157	call	thread_native_interrupt_handler
158
159	/* Update 32-bit core local flags */
160	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
161	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
162	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
163
164	/* Get thread context as sp */
165	get_thread_ctx sp, t0
166	/* Restore XEPC */
167	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
168	csrw	CSR_XEPC, t0
169	/* Restore XSTATUS */
170	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
171	csrw	CSR_XSTATUS, t0
172	/* Restore XIE */
173	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
174	csrw	CSR_XIE, t0
175	/* We are going to XRET to kernel mode. Set XSCRATCH as 0 */
176	csrw	CSR_XSCRATCH, 0
177	/* Restore all GPRs */
178	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
179	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
180	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
181	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
182	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
183	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
184	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
185	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
186	XRET
187
188exception_from_kernel:
189	/*
190	 * Update core local flags.
191	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
192	 */
193	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
194	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
195	ori	a0, a0, THREAD_CLF_ABORT
196	li	a1, (THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
197	and	a1, a0, a1
198	bnez	a1, sel_tmp_sp
199
200	/* Select abort stack */
201	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_A1
202	j	set_sp
203
204sel_tmp_sp:
205	/* We have an abort while using the abort stack, select tmp stack */
206	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_A1
207	ori	a0, a0, THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
208
209set_sp:
210	mv	sp, a1
211	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
212
213	/*
214	 * Save state on stack
215	 */
216	addi	sp, sp, -THREAD_ABT_REGS_SIZE
217
218	/* Save kernel sp */
219	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
220	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
221
222	/* Restore kernel a0, a1 which can be saved later */
223	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
224
225	/* Save all other GPRs */
226	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
227	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
228	store_xregs sp, THREAD_ABT_REG_TP, REG_TP
229	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
230	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
231	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
232	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
233	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
234	/* Save XIE */
235	csrr	t0, CSR_XIE
236	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
237	/* Mask all interrupts */
238	csrw	CSR_XIE, x0
239	/* Save XSTATUS */
240	csrr	t0, CSR_XSTATUS
241	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
242	/* Save XEPC */
243	csrr	t0, CSR_XEPC
244	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
245	/* Save XTVAL */
246	csrr	t0, CSR_XTVAL
247	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
248	/* Save XCAUSE */
249	csrr	a0, CSR_XCAUSE
250	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
251
252	/*
253	 * a0 = cause
254	 * a1 = sp (struct thread_abort_regs *regs)
255	 * Call abort_handler(cause, regs)
256	 */
257	mv	a1, sp
258	call	abort_handler
259
260	/*
261	 * Restore state from stack
262	 */
263
264	/* Restore XEPC */
265	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
266	csrw	CSR_XEPC, t0
267	/* Restore XSTATUS */
268	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
269	csrw	CSR_XSTATUS, t0
270	/* Restore XIE */
271	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
272	csrw	CSR_XIE, t0
273	/* We are going to XRET to kernel mode. Set XSCRATCH as 0 */
274	csrw	CSR_XSCRATCH, 0
275
276	/* Update core local flags */
277	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
278	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
279	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
280
281	/* Restore all GPRs */
282	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
283	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
284	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
285	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
286	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
287	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
288	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
289	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
290	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
291	XRET
292END_FUNC trap_from_kernel
293
294LOCAL_FUNC trap_from_user, :
295	/* Save user sp, a0, a1 into temporary spaces of thread_core_local */
296	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
297	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
298
299	csrr	a0, CSR_XCAUSE
300	/* MSB of cause differentiates between interrupts and exceptions */
301	bge	a0, zero, exception_from_user
302
303interrupt_from_user:
304	/* Get thread context as sp */
305	get_thread_ctx sp, a0
306
307	/* Save user sp */
308	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
309	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
310
311	/* Restore user a0, a1 which can be saved later */
312	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
313
314	/* Save user gp */
315	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
316
317	/*
318	 * Set the scratch register to 0 such in case of a recursive
319	 * exception thread_trap_vect() knows that it is emitted from kernel.
320	 */
321	csrrw	gp, CSR_XSCRATCH, zero
322	/* Save user tp we previously swapped into CSR_XSCRATCH */
323	store_xregs sp, THREAD_CTX_REG_TP, REG_GP
324	/* Set kernel gp */
325.option push
326.option norelax
327	la	gp, __global_pointer$
328.option pop
329	/* Save all other GPRs */
330	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
331	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
332	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
333	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
334	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
335	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
336	/* Save XIE */
337	csrr	t0, CSR_XIE
338	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
339	/* Mask all interrupts */
340	csrw	CSR_XIE, x0
341	/* Save XSTATUS */
342	csrr	t0, CSR_XSTATUS
343	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
344	/* Save XEPC */
345	csrr	t0, CSR_XEPC
346	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
347
348	/*
349	 * a0 = struct thread_ctx_regs *regs
350	 * a1 = cause
351	 */
352	mv	a0, sp
353	csrr	a1, CSR_XCAUSE
354	/* Load tmp_stack_va_end as current sp. */
355	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
356
357	/*
358	 * Get interrupt code from XCAUSE and build XIP. For example, if the
359	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
360	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
361	 */
362	li	a2, CSR_XCAUSE_INTR_FLAG
363	sub	a2, a1, a2
364	li	a3, 1
365	sll	a3, a3, a2
366	/*
367	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
368	 * THREAD_EXCP_FOREIGN_INTR, call thread_foreign_interrupt_handler().
369	 */
370	li	a2, THREAD_EXCP_FOREIGN_INTR
371	and	a2, a3, a2
372	beqz	a2, native_interrupt_from_user
373
374foreign_interrupt_from_user:
375	/*
376	 * a0 = struct thread_ctx_regs *regs
377	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
378	 * return to here.
379	 */
380	tail	thread_foreign_interrupt_handler
381
382native_interrupt_from_user:
383	/* Update 32-bit core local flags */
384	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
385	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
386	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
387	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
388
389	/*
390	 * a0 = struct thread_ctx_regs *regs
391	 * a1 = cause
392	 * Call thread_native_interrupt_handler(regs, cause)
393	 */
394	call	thread_native_interrupt_handler
395
396	/* Update 32-bit core local flags */
397	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
398	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
399	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
400
401	/* Get thread context as sp */
402	get_thread_ctx sp, t0
403	/* Restore XEPC */
404	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
405	csrw	CSR_XEPC, t0
406	/* Restore XSTATUS */
407	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
408	csrw	CSR_XSTATUS, t0
409	/* Restore XIE */
410	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
411	csrw	CSR_XIE, t0
412	/* Set scratch as thread_core_local */
413	csrw	CSR_XSCRATCH, tp
414	/* Restore all GPRs */
415	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
416	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
417	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
418	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
419	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
420	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
421	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
422	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
423	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
424	XRET
425
426exception_from_user:
427	/* a0 is CSR_XCAUSE */
428	li	a1, CAUSE_USER_ECALL
429	bne	a0, a1, abort_from_user
430ecall_from_user:
431	/* Load and set kernel sp from thread context */
432	get_thread_ctx a0, a1
433	load_xregs a0, THREAD_CTX_KERN_SP, REG_SP
434
435	/* Now sp is kernel sp, create stack for struct thread_scall_regs */
436	addi	sp, sp, -THREAD_SCALL_REGS_SIZE
437	/* Save user sp */
438	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
439	store_xregs sp, THREAD_SCALL_REG_SP, REG_A0
440
441	/* Restore user a0, a1 which can be saved later */
442	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
443
444	/* Save user gp */
445	store_xregs sp, THREAD_SCALL_REG_GP, REG_GP
446	/*
447	 * Set the scratch register to 0 such in case of a recursive
448	 * exception thread_trap_vect() knows that it is emitted from kernel.
449	 */
450	csrrw	gp, CSR_XSCRATCH, zero
451	/* Save user tp we previously swapped into CSR_XSCRATCH */
452	store_xregs sp, THREAD_SCALL_REG_TP, REG_GP
453	/* Set kernel gp */
454.option push
455.option norelax
456	la	gp, __global_pointer$
457.option pop
458
459	/* Save other caller-saved registers */
460	store_xregs sp, THREAD_SCALL_REG_RA, REG_RA
461	store_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
462	store_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
463	store_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
464	/* Save XIE */
465	csrr	a0, CSR_XIE
466	store_xregs sp, THREAD_SCALL_REG_IE, REG_A0
467	/* Mask all interrupts */
468	csrw	CSR_XIE, zero
469	/* Save XSTATUS */
470	csrr	a0, CSR_XSTATUS
471	store_xregs sp, THREAD_SCALL_REG_STATUS, REG_A0
472	/* Save XEPC */
473	csrr	a0, CSR_XEPC
474	store_xregs sp, THREAD_SCALL_REG_EPC, REG_A0
475
476	/*
477	 * a0 = struct thread_scall_regs *regs
478	 * Call thread_scall_handler(regs)
479	 */
480	mv	a0, sp
481	call	thread_scall_handler
482
483	/*
484	 * Save kernel sp we'll had at the beginning of this function.
485	 * This is when this TA has called another TA because
486	 * __thread_enter_user_mode() also saves the stack pointer in this
487	 * field.
488	 */
489	get_thread_ctx a0, a1
490	addi	t0, sp, THREAD_SCALL_REGS_SIZE
491	store_xregs a0, THREAD_CTX_KERN_SP, REG_T0
492
493	/* Restore XEPC */
494	load_xregs sp, THREAD_SCALL_REG_EPC, REG_T0
495	csrw	CSR_XEPC, t0
496	/* Restore XSTATUS */
497	load_xregs sp, THREAD_SCALL_REG_STATUS, REG_T0
498	csrw	CSR_XSTATUS, t0
499	/* Restore XIE */
500	load_xregs sp, THREAD_SCALL_REG_IE, REG_T0
501	csrw	CSR_XIE, t0
502	/* Check previous privilege mode by status.SPP */
503	csrr	t0, CSR_XSTATUS
504	b_if_prev_priv_is_u t0, 1f
505	/*
506	 * We are going to XRET to kernel mode.
507	 * XSCRATCH is already zero to indicate that we are in kernel mode.
508	 * We must keep kernel gp & tp, so skip restoring user gp & tp.
509	 */
510	j	2f
5111:
512	/*
513	 * We are going to XRET to user mode.
514	 * XSCRATCH must be tp(thread_core_local) to be used in next trap.
515	 * We also need to restore user gp & tp
516	 */
517	csrw	CSR_XSCRATCH, tp
518	load_xregs sp, THREAD_SCALL_REG_GP, REG_GP
519	load_xregs sp, THREAD_SCALL_REG_TP, REG_TP
5202:
521	/* Restore remaining caller-saved registers */
522	load_xregs sp, THREAD_SCALL_REG_RA, REG_RA
523	load_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
524	load_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
525	load_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
526	load_xregs sp, THREAD_SCALL_REG_SP, REG_SP
527	XRET
528
529abort_from_user:
530	/*
531	 * Update core local flags
532	 */
533	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
534	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
535	ori	a0, a0, THREAD_CLF_ABORT
536	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
537
538	/*
539	 * Save state on stack
540	 */
541
542	/* Load abt_stack_va_end and set it as sp */
543	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_SP
544
545	/* Now sp is abort sp, create stack for struct thread_abort_regs */
546	addi	sp, sp, -THREAD_ABT_REGS_SIZE
547
548	/* Save user sp */
549	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
550	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
551
552	/* Restore user a0, a1 which can be saved later */
553	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
554
555	/* Save user gp */
556	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
557
558	/*
559	 * Set the scratch register to 0 such in case of a recursive
560	 * exception thread_trap_vect() knows that it is emitted from kernel.
561	 */
562	csrrw	gp, CSR_XSCRATCH, zero
563	/* Save user tp we previously swapped into CSR_XSCRATCH */
564	store_xregs sp, THREAD_ABT_REG_TP, REG_GP
565	/* Set kernel gp */
566.option push
567.option norelax
568	la	gp, __global_pointer$
569.option pop
570	/* Save all other GPRs */
571	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
572	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
573	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
574	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
575	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
576	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
577	/* Save XIE */
578	csrr	t0, CSR_XIE
579	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
580	/* Mask all interrupts */
581	csrw	CSR_XIE, x0
582	/* Save XSTATUS */
583	csrr	t0, CSR_XSTATUS
584	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
585	/* Save XEPC */
586	csrr	t0, CSR_XEPC
587	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
588	/* Save XTVAL */
589	csrr	t0, CSR_XTVAL
590	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
591	/* Save XCAUSE */
592	csrr	a0, CSR_XCAUSE
593	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
594
595	/*
596	 * a0 = cause
597	 * a1 = sp (struct thread_abort_regs *regs)
598	 * Call abort_handler(cause, regs)
599	 */
600	mv	a1, sp
601	call	abort_handler
602
603	/*
604	 * Restore state from stack
605	 */
606
607	/* Restore XEPC */
608	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
609	csrw	CSR_XEPC, t0
610	/* Restore XSTATUS */
611	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
612	csrw	CSR_XSTATUS, t0
613	/* Restore XIE */
614	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
615	csrw	CSR_XIE, t0
616
617	/* Update core local flags */
618	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
619	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
620	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
621
622	/* Check previous privilege mode by status.SPP */
623	csrr	t0, CSR_XSTATUS
624	b_if_prev_priv_is_u t0, 1f
625	/*
626	 * We are going to XRET to kernel mode.
627	 * XSCRATCH is already zero to indicate that we are in kernel mode.
628	 * We must keep kernel gp & tp, so skip restoring user gp & tp.
629	 */
630	j	2f
6311:
632	/*
633	 * We are going to XRET to user mode.
634	 * XSCRATCH must be tp(thread_core_local) to be used in next trap.
635	 * We also need to restore user gp & tp
636	 */
637	csrw	CSR_XSCRATCH, tp
638	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
639	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
6402:
641	/* Restore remaining GPRs */
642	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
643	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
644	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
645	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
646	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
647	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
648	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
649	XRET
650END_FUNC trap_from_user
651
652/*
653 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
654 * 				uint32_t exit_status1);
655 * See description in thread.h
656 */
657FUNC thread_unwind_user_mode , :
658	/* Store the exit status */
659	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A3, REG_A5
660	sw	a1, (a4)
661	sw	a2, (a5)
662	/* Save user callee-saved regs */
663	store_xregs a3, THREAD_CTX_REG_S0, REG_S0, REG_S1
664	store_xregs a3, THREAD_CTX_REG_S2, REG_S2, REG_S11
665	/* Restore kernel ra(thread_enter_user_mode()) & callee-saved regs */
666	load_xregs sp, THREAD_USER_MODE_REC_RA, REG_RA
667	load_xregs sp, THREAD_USER_MODE_REC_S0, REG_S0, REG_S1
668	load_xregs sp, THREAD_USER_MODE_REC_S2, REG_S2, REG_S11
669	add	sp, sp, THREAD_USER_MODE_REC_SIZE
670	/* Return from the call of thread_enter_user_mode() */
671	ret
672END_FUNC thread_unwind_user_mode
673
674/*
675 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
676 *				     uint32_t *exit_status0,
677 *				     uint32_t *exit_status1);
678 */
679FUNC __thread_enter_user_mode , :
680	/*
681	 * Create and fill in the struct thread_user_mode_rec
682	 */
683	addi	sp, sp, -THREAD_USER_MODE_REC_SIZE
684	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A0, REG_A2
685	store_xregs sp, THREAD_USER_MODE_REC_RA, REG_RA
686	store_xregs sp, THREAD_USER_MODE_REC_S0, REG_S0, REG_S1
687	store_xregs sp, THREAD_USER_MODE_REC_S2, REG_S2, REG_S11
688
689	/*
690	 * Save the kernel stack pointer in the thread context
691	 */
692
693	/* Get pointer to current thread context */
694	get_thread_ctx s0, s1
695
696	/*
697	 * Save kernel stack pointer to ensure that
698	 * exception_from_user() uses correct stack pointer.
699	 */
700
701	store_xregs s0, THREAD_CTX_KERN_SP, REG_SP
702	/*
703	 * Save thread_core_local in xSCRATCH to ensure that thread_trap_vect()
704	 * uses correct core local structure.
705	 */
706	csrw	CSR_XSCRATCH, tp
707
708	/* Move struct thread_ctx_regs *regs to sp to reduce code size */
709	mv	sp, a0
710
711	/* Set exception return PC */
712	load_xregs sp, THREAD_CTX_REG_EPC, REG_S0
713	csrw	CSR_XEPC, s0
714	/* Set user status */
715	load_xregs sp, THREAD_CTX_REG_STATUS, REG_S0
716	csrw	CSR_XSTATUS, s0
717	/* Set user ie */
718	load_xregs sp, THREAD_CTX_REG_IE, REG_S0
719	csrw	CSR_XIE, s0
720	/* Load the rest of the general purpose registers */
721	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
722	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
723	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
724	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
725	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
726	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
727	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
728	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
729	load_xregs sp, THREAD_CTX_REG_SP, REG_SP /* sp must be last one */
730
731	/* Jump into user mode */
732	XRET
733END_FUNC __thread_enter_user_mode
734
735/* void thread_resume(struct thread_ctx_regs *regs) */
736FUNC thread_resume , :
737	/* Move struct thread_ctx_regs *regs to sp to reduce code size */
738	mv	sp, a0
739
740	/* Restore epc */
741	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
742	csrw	CSR_XEPC, t0
743	/* Restore status */
744	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
745	csrw	CSR_XSTATUS, t0
746	/* Restore ie */
747	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
748	csrw	CSR_XIE, t0
749
750	/* Check if previous privilege mode by status.SPP */
751	csrr	t0, CSR_XSTATUS
752	b_if_prev_priv_is_u t0, 1f
753	/* Set scratch as zero to indicate that we are in kernel mode */
754	csrw	CSR_XSCRATCH, zero
755	j	2f
7561:
757	/* Resume to U-mode, set scratch as tp to be used in the trap handler */
758	csrw	CSR_XSCRATCH, tp
7592:
760	/* Restore all general-purpose registers */
761	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
762	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
763	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
764	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
765	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
766	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
767	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
768	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
769	load_xregs sp, THREAD_CTX_REG_SP, REG_SP /* sp must be last one */
770
771	XRET
772END_FUNC thread_resume
773
774/* void thread_foreign_interrupt_handler(struct thread_ctx_regs *regs) */
775FUNC thread_foreign_interrupt_handler , :
776	/* Update 32-bit core local flags */
777	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
778	slli	s1, s1, THREAD_CLF_SAVED_SHIFT
779	ori	s1, s1, (THREAD_CLF_TMP | THREAD_CLF_FIQ)
780	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
781
782	/*
783	 * Mark current thread as suspended.
784	 * a0 = THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
785	 * a1 = status
786	 * a2 = epc
787	 * thread_state_suspend(flags, status, pc)
788	 */
789	LDR	a1, THREAD_CTX_REG_STATUS(a0)
790	LDR	a2, THREAD_CTX_REG_EPC(a0)
791	li	a0, THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
792	call	thread_state_suspend
793	/* Now return value a0 contains suspended thread ID. */
794
795	/* Update core local flags */
796	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
797	srli	s1, s1, THREAD_CLF_SAVED_SHIFT
798	ori	s1, s1, THREAD_CLF_TMP
799	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
800
801	/* Passing thread index in a0, and return to untrusted domain. */
802	mv	a4, a0
803	li	a0, TEEABI_OPTEED_RETURN_CALL_DONE
804	li	a1, OPTEE_ABI_RETURN_RPC_FOREIGN_INTR
805	li	a2, 0
806	li	a3, 0
807	li	a5, 0
808	j	thread_return_to_udomain
809END_FUNC thread_foreign_interrupt_handler
810