xref: /optee_os/core/arch/riscv/kernel/thread_rv.S (revision 91d4649de98c6beeb8217d40f1fafa50720fe785)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright 2022-2023 NXP
4 * Copyright 2024 Andes Technology Corporation
5 */
6
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread.h>
11#include <kernel/thread_private.h>
12#include <mm/core_mmu.h>
13#include <riscv.h>
14#include <riscv_macros.S>
15#include <tee/optee_abi.h>
16#include <tee/teeabi_opteed.h>
17#include <tee/teeabi_opteed_macros.h>
18
19.macro get_thread_ctx res, tmp0
20	lw	\tmp0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
21.option push
22.option norelax
23	la	\res, threads
24.option pop
25	LDR	\res, 0(\res)
261:
27	beqz	\tmp0, 2f
28	addi	\res, \res, THREAD_CTX_SIZE
29	addi	\tmp0, \tmp0, -1
30	bnez	\tmp0, 1b
312:
32.endm
33
34.macro b_if_prev_priv_is_u reg, label
35	andi	\reg, \reg, CSR_XSTATUS_SPP
36	beqz	\reg, \label
37.endm
38
39/* size_t __get_core_pos(void); */
40FUNC __get_core_pos , : , .identity_map
41	lw	a0, THREAD_CORE_LOCAL_HART_INDEX(tp)
42	ret
43END_FUNC __get_core_pos
44
45FUNC thread_trap_vect , :
46	csrrw	tp, CSR_XSCRATCH, tp
47	bnez	tp, 0f
48	/* Read tp back */
49	csrrw	tp, CSR_XSCRATCH, tp
50	j	trap_from_kernel
510:
52	/* Now tp is thread_core_local */
53	j	trap_from_user
54thread_trap_vect_end:
55END_FUNC thread_trap_vect
56
57LOCAL_FUNC trap_from_kernel, :
58	/* Save sp, a0, a1 into temporary spaces of thread_core_local */
59	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
60	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
61
62	csrr	a0, CSR_XCAUSE
63	/* MSB of cause differentiates between interrupts and exceptions */
64	bge	a0, zero, exception_from_kernel
65
66interrupt_from_kernel:
67	/* Get thread context as sp */
68	get_thread_ctx sp, a0
69
70	/* Load and save kernel sp */
71	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
72	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
73
74	/* Restore user a0, a1 which can be saved later */
75	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
76
77	/* Save all other GPRs */
78	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
79	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
80	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
81	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
82	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
83	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
84	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
85	/* Save XIE */
86	csrr	t0, CSR_XIE
87	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
88	/* Mask all interrupts */
89	csrw	CSR_XIE, x0
90	/* Save XSTATUS */
91	csrr	t0, CSR_XSTATUS
92	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
93	/* Save XEPC */
94	csrr	t0, CSR_XEPC
95	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
96
97	/*
98	 * a0 = struct thread_ctx_regs *regs
99	 * a1 = cause
100	 */
101	mv	a0, sp
102	csrr	a1, CSR_XCAUSE
103	/* Load tmp_stack_va_end as current sp. */
104	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
105
106	/*
107	 * Get interrupt code from XCAUSE and build XIP. For example, if the
108	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
109	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
110	 */
111	li	a2, CSR_XCAUSE_INTR_FLAG
112	sub	a2, a1, a2
113	li	a3, 1
114	sll	a3, a3, a2
115	/*
116	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
117	 * THREAD_EXCP_FOREIGN_INTR, we call thread_foreign_interrupt_handler().
118	 */
119	li	a2, THREAD_EXCP_FOREIGN_INTR
120	and	a2, a3, a2
121	beqz	a2, native_interrupt_from_kernel
122
123foreign_interrupt_from_kernel:
124	/*
125	 * a0 = struct thread_ctx_regs *regs
126	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
127	 * return to here.
128	 */
129	tail	thread_foreign_interrupt_handler
130
131native_interrupt_from_kernel:
132	/* Update 32-bit core local flags */
133	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
134	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
135	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
136	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
137
138	/*
139	 * a0 = struct thread_ctx_regs *regs
140	 * a1 = cause
141	 * Call thread_native_interrupt_handler(regs, cause)
142	 */
143	call	thread_native_interrupt_handler
144
145	/* Update 32-bit core local flags */
146	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
147	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
148	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
149
150	/* Get thread context as sp */
151	get_thread_ctx sp, t0
152	/* Restore XEPC */
153	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
154	csrw	CSR_XEPC, t0
155	/* Restore XSTATUS */
156	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
157	csrw	CSR_XSTATUS, t0
158	/* Restore XIE */
159	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
160	csrw	CSR_XIE, t0
161	/* We are going to XRET to kernel mode. Set XSCRATCH as 0 */
162	csrw	CSR_XSCRATCH, 0
163	/* Restore all GPRs */
164	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
165	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
166	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
167	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
168	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
169	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
170	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
171	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
172	XRET
173
174exception_from_kernel:
175	/*
176	 * Update core local flags.
177	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
178	 */
179	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
180	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
181	ori	a0, a0, THREAD_CLF_ABORT
182	li	a1, (THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
183	and	a1, a0, a1
184	bnez	a1, sel_tmp_sp
185
186	/* Select abort stack */
187	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_A1
188	j	set_sp
189
190sel_tmp_sp:
191	/* We have an abort while using the abort stack, select tmp stack */
192	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_A1
193	ori	a0, a0, THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
194
195set_sp:
196	mv	sp, a1
197	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
198
199	/*
200	 * Save state on stack
201	 */
202	addi	sp, sp, -THREAD_ABT_REGS_SIZE
203
204	/* Save kernel sp */
205	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
206	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
207
208	/* Restore kernel a0, a1 which can be saved later */
209	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
210
211	/* Save all other GPRs */
212	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
213	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
214	store_xregs sp, THREAD_ABT_REG_TP, REG_TP
215	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
216	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
217	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
218	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
219	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
220	/* Save XIE */
221	csrr	t0, CSR_XIE
222	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
223	/* Mask all interrupts */
224	csrw	CSR_XIE, x0
225	/* Save XSTATUS */
226	csrr	t0, CSR_XSTATUS
227	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
228	/* Save XEPC */
229	csrr	t0, CSR_XEPC
230	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
231	/* Save XTVAL */
232	csrr	t0, CSR_XTVAL
233	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
234	/* Save XCAUSE */
235	csrr	a0, CSR_XCAUSE
236	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
237
238	/*
239	 * a0 = cause
240	 * a1 = sp (struct thread_abort_regs *regs)
241	 * Call abort_handler(cause, regs)
242	 */
243	mv	a1, sp
244	call	abort_handler
245
246	/*
247	 * Restore state from stack
248	 */
249
250	/* Restore XEPC */
251	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
252	csrw	CSR_XEPC, t0
253	/* Restore XSTATUS */
254	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
255	csrw	CSR_XSTATUS, t0
256	/* Restore XIE */
257	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
258	csrw	CSR_XIE, t0
259	/* We are going to XRET to kernel mode. Set XSCRATCH as 0 */
260	csrw	CSR_XSCRATCH, 0
261
262	/* Update core local flags */
263	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
264	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
265	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
266
267	/* Restore all GPRs */
268	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
269	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
270	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
271	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
272	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
273	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
274	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
275	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
276	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
277	XRET
278END_FUNC trap_from_kernel
279
280LOCAL_FUNC trap_from_user, :
281	/* Save user sp, a0, a1 into temporary spaces of thread_core_local */
282	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
283	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
284
285	csrr	a0, CSR_XCAUSE
286	/* MSB of cause differentiates between interrupts and exceptions */
287	bge	a0, zero, exception_from_user
288
289interrupt_from_user:
290	/* Get thread context as sp */
291	get_thread_ctx sp, a0
292
293	/* Save user sp */
294	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
295	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
296
297	/* Restore user a0, a1 which can be saved later */
298	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
299
300	/* Save user gp */
301	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
302
303	/*
304	 * Set the scratch register to 0 such in case of a recursive
305	 * exception thread_trap_vect() knows that it is emitted from kernel.
306	 */
307	csrrw	gp, CSR_XSCRATCH, zero
308	/* Save user tp we previously swapped into CSR_XSCRATCH */
309	store_xregs sp, THREAD_CTX_REG_TP, REG_GP
310	/* Set kernel gp */
311.option push
312.option norelax
313	la	gp, __global_pointer$
314.option pop
315	/* Save all other GPRs */
316	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
317	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
318	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
319	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
320	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
321	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
322	/* Save XIE */
323	csrr	t0, CSR_XIE
324	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
325	/* Mask all interrupts */
326	csrw	CSR_XIE, x0
327	/* Save XSTATUS */
328	csrr	t0, CSR_XSTATUS
329	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
330	/* Save XEPC */
331	csrr	t0, CSR_XEPC
332	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
333
334	/*
335	 * a0 = struct thread_ctx_regs *regs
336	 * a1 = cause
337	 */
338	mv	a0, sp
339	csrr	a1, CSR_XCAUSE
340	/* Load tmp_stack_va_end as current sp. */
341	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
342
343	/*
344	 * Get interrupt code from XCAUSE and build XIP. For example, if the
345	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
346	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
347	 */
348	li	a2, CSR_XCAUSE_INTR_FLAG
349	sub	a2, a1, a2
350	li	a3, 1
351	sll	a3, a3, a2
352	/*
353	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
354	 * THREAD_EXCP_FOREIGN_INTR, call thread_foreign_interrupt_handler().
355	 */
356	li	a2, THREAD_EXCP_FOREIGN_INTR
357	and	a2, a3, a2
358	beqz	a2, native_interrupt_from_user
359
360foreign_interrupt_from_user:
361	/*
362	 * a0 = struct thread_ctx_regs *regs
363	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
364	 * return to here.
365	 */
366	tail	thread_foreign_interrupt_handler
367
368native_interrupt_from_user:
369	/* Update 32-bit core local flags */
370	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
371	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
372	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
373	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
374
375	/*
376	 * a0 = struct thread_ctx_regs *regs
377	 * a1 = cause
378	 * Call thread_native_interrupt_handler(regs, cause)
379	 */
380	call	thread_native_interrupt_handler
381
382	/* Update 32-bit core local flags */
383	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
384	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
385	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
386
387	/* Get thread context as sp */
388	get_thread_ctx sp, t0
389	/* Restore XEPC */
390	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
391	csrw	CSR_XEPC, t0
392	/* Restore XSTATUS */
393	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
394	csrw	CSR_XSTATUS, t0
395	/* Restore XIE */
396	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
397	csrw	CSR_XIE, t0
398	/* Set scratch as thread_core_local */
399	csrw	CSR_XSCRATCH, tp
400	/* Restore all GPRs */
401	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
402	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
403	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
404	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
405	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
406	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
407	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
408	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
409	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
410	XRET
411
412exception_from_user:
413	/* a0 is CSR_XCAUSE */
414	li	a1, CAUSE_USER_ECALL
415	bne	a0, a1, abort_from_user
416ecall_from_user:
417	/* Load and set kernel sp from thread context */
418	get_thread_ctx a0, a1
419	load_xregs a0, THREAD_CTX_KERN_SP, REG_SP
420
421	/* Now sp is kernel sp, create stack for struct thread_scall_regs */
422	addi	sp, sp, -THREAD_SCALL_REGS_SIZE
423	/* Save user sp */
424	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
425	store_xregs sp, THREAD_SCALL_REG_SP, REG_A0
426
427	/* Restore user a0, a1 which can be saved later */
428	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
429
430	/* Save user gp */
431	store_xregs sp, THREAD_SCALL_REG_GP, REG_GP
432	/*
433	 * Set the scratch register to 0 such in case of a recursive
434	 * exception thread_trap_vect() knows that it is emitted from kernel.
435	 */
436	csrrw	gp, CSR_XSCRATCH, zero
437	/* Save user tp we previously swapped into CSR_XSCRATCH */
438	store_xregs sp, THREAD_SCALL_REG_TP, REG_GP
439	/* Set kernel gp */
440.option push
441.option norelax
442	la	gp, __global_pointer$
443.option pop
444
445	/* Save other caller-saved registers */
446	store_xregs sp, THREAD_SCALL_REG_RA, REG_RA
447	store_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
448	store_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
449	store_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
450	/* Save XIE */
451	csrr	a0, CSR_XIE
452	store_xregs sp, THREAD_SCALL_REG_IE, REG_A0
453	/* Mask all interrupts */
454	csrw	CSR_XIE, zero
455	/* Save XSTATUS */
456	csrr	a0, CSR_XSTATUS
457	store_xregs sp, THREAD_SCALL_REG_STATUS, REG_A0
458	/* Save XEPC */
459	csrr	a0, CSR_XEPC
460	store_xregs sp, THREAD_SCALL_REG_EPC, REG_A0
461
462	/*
463	 * a0 = struct thread_scall_regs *regs
464	 * Call thread_scall_handler(regs)
465	 */
466	mv	a0, sp
467	call	thread_scall_handler
468
469	/*
470	 * Save kernel sp we'll had at the beginning of this function.
471	 * This is when this TA has called another TA because
472	 * __thread_enter_user_mode() also saves the stack pointer in this
473	 * field.
474	 */
475	get_thread_ctx a0, a1
476	addi	t0, sp, THREAD_SCALL_REGS_SIZE
477	store_xregs a0, THREAD_CTX_KERN_SP, REG_T0
478
479	/* Restore XEPC */
480	load_xregs sp, THREAD_SCALL_REG_EPC, REG_T0
481	csrw	CSR_XEPC, t0
482	/* Restore XSTATUS */
483	load_xregs sp, THREAD_SCALL_REG_STATUS, REG_T0
484	csrw	CSR_XSTATUS, t0
485	/* Restore XIE */
486	load_xregs sp, THREAD_SCALL_REG_IE, REG_T0
487	csrw	CSR_XIE, t0
488	/* Check previous privilege mode by status.SPP */
489	csrr	t0, CSR_XSTATUS
490	b_if_prev_priv_is_u t0, 1f
491	/*
492	 * We are going to XRET to kernel mode.
493	 * XSCRATCH is already zero to indicate that we are in kernel mode.
494	 * We must keep kernel gp & tp, so skip restoring user gp & tp.
495	 */
496	j	2f
4971:
498	/*
499	 * We are going to XRET to user mode.
500	 * XSCRATCH must be tp(thread_core_local) to be used in next trap.
501	 * We also need to restore user gp & tp
502	 */
503	csrw	CSR_XSCRATCH, tp
504	load_xregs sp, THREAD_SCALL_REG_GP, REG_GP
505	load_xregs sp, THREAD_SCALL_REG_TP, REG_TP
5062:
507	/* Restore remaining caller-saved registers */
508	load_xregs sp, THREAD_SCALL_REG_RA, REG_RA
509	load_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
510	load_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
511	load_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
512	load_xregs sp, THREAD_SCALL_REG_SP, REG_SP
513	XRET
514
515abort_from_user:
516	/*
517	 * Update core local flags
518	 */
519	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
520	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
521	ori	a0, a0, THREAD_CLF_ABORT
522	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
523
524	/*
525	 * Save state on stack
526	 */
527
528	/* Load abt_stack_va_end and set it as sp */
529	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_SP
530
531	/* Now sp is abort sp, create stack for struct thread_abort_regs */
532	addi	sp, sp, -THREAD_ABT_REGS_SIZE
533
534	/* Save user sp */
535	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
536	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
537
538	/* Restore user a0, a1 which can be saved later */
539	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
540
541	/* Save user gp */
542	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
543
544	/*
545	 * Set the scratch register to 0 such in case of a recursive
546	 * exception thread_trap_vect() knows that it is emitted from kernel.
547	 */
548	csrrw	gp, CSR_XSCRATCH, zero
549	/* Save user tp we previously swapped into CSR_XSCRATCH */
550	store_xregs sp, THREAD_ABT_REG_TP, REG_GP
551	/* Set kernel gp */
552.option push
553.option norelax
554	la	gp, __global_pointer$
555.option pop
556	/* Save all other GPRs */
557	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
558	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
559	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
560	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
561	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
562	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
563	/* Save XIE */
564	csrr	t0, CSR_XIE
565	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
566	/* Mask all interrupts */
567	csrw	CSR_XIE, x0
568	/* Save XSTATUS */
569	csrr	t0, CSR_XSTATUS
570	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
571	/* Save XEPC */
572	csrr	t0, CSR_XEPC
573	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
574	/* Save XTVAL */
575	csrr	t0, CSR_XTVAL
576	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
577	/* Save XCAUSE */
578	csrr	a0, CSR_XCAUSE
579	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
580
581	/*
582	 * a0 = cause
583	 * a1 = sp (struct thread_abort_regs *regs)
584	 * Call abort_handler(cause, regs)
585	 */
586	mv	a1, sp
587	call	abort_handler
588
589	/*
590	 * Restore state from stack
591	 */
592
593	/* Restore XEPC */
594	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
595	csrw	CSR_XEPC, t0
596	/* Restore XSTATUS */
597	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
598	csrw	CSR_XSTATUS, t0
599	/* Restore XIE */
600	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
601	csrw	CSR_XIE, t0
602
603	/* Update core local flags */
604	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
605	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
606	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
607
608	/* Check previous privilege mode by status.SPP */
609	csrr	t0, CSR_XSTATUS
610	b_if_prev_priv_is_u t0, 1f
611	/*
612	 * We are going to XRET to kernel mode.
613	 * XSCRATCH is already zero to indicate that we are in kernel mode.
614	 * We must keep kernel gp & tp, so skip restoring user gp & tp.
615	 */
616	j	2f
6171:
618	/*
619	 * We are going to XRET to user mode.
620	 * XSCRATCH must be tp(thread_core_local) to be used in next trap.
621	 * We also need to restore user gp & tp
622	 */
623	csrw	CSR_XSCRATCH, tp
624	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
625	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
6262:
627	/* Restore remaining GPRs */
628	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
629	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
630	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
631	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
632	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
633	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
634	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
635	XRET
636END_FUNC trap_from_user
637
638/*
639 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
640 * 				uint32_t exit_status1);
641 * See description in thread.h
642 */
643FUNC thread_unwind_user_mode , :
644	/* Store the exit status */
645	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A3, REG_A5
646	sw	a1, (a4)
647	sw	a2, (a5)
648	/* Save user callee-saved regs */
649	store_xregs a3, THREAD_CTX_REG_S0, REG_S0, REG_S1
650	store_xregs a3, THREAD_CTX_REG_S2, REG_S2, REG_S11
651	/* Restore kernel ra(thread_enter_user_mode()) & callee-saved regs */
652	load_xregs sp, THREAD_USER_MODE_REC_RA, REG_RA
653	load_xregs sp, THREAD_USER_MODE_REC_S0, REG_S0, REG_S1
654	load_xregs sp, THREAD_USER_MODE_REC_S2, REG_S2, REG_S11
655	add	sp, sp, THREAD_USER_MODE_REC_SIZE
656	/* Return from the call of thread_enter_user_mode() */
657	ret
658END_FUNC thread_unwind_user_mode
659
660/*
661 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
662 *				     uint32_t *exit_status0,
663 *				     uint32_t *exit_status1);
664 */
665FUNC __thread_enter_user_mode , :
666	/*
667	 * Create and fill in the struct thread_user_mode_rec
668	 */
669	addi	sp, sp, -THREAD_USER_MODE_REC_SIZE
670	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A0, REG_A2
671	store_xregs sp, THREAD_USER_MODE_REC_RA, REG_RA
672	store_xregs sp, THREAD_USER_MODE_REC_S0, REG_S0, REG_S1
673	store_xregs sp, THREAD_USER_MODE_REC_S2, REG_S2, REG_S11
674
675	/*
676	 * Save the kernel stack pointer in the thread context
677	 */
678
679	/* Get pointer to current thread context */
680	get_thread_ctx s0, s1
681
682	/*
683	 * Save kernel stack pointer to ensure that
684	 * exception_from_user() uses correct stack pointer.
685	 */
686
687	store_xregs s0, THREAD_CTX_KERN_SP, REG_SP
688	/*
689	 * Save thread_core_local in xSCRATCH to ensure that thread_trap_vect()
690	 * uses correct core local structure.
691	 */
692	csrw	CSR_XSCRATCH, tp
693
694	/* Move struct thread_ctx_regs *regs to sp to reduce code size */
695	mv	sp, a0
696
697	/* Set exception return PC */
698	load_xregs sp, THREAD_CTX_REG_EPC, REG_S0
699	csrw	CSR_XEPC, s0
700	/* Set user status */
701	load_xregs sp, THREAD_CTX_REG_STATUS, REG_S0
702	csrw	CSR_XSTATUS, s0
703	/* Set user ie */
704	load_xregs sp, THREAD_CTX_REG_IE, REG_S0
705	csrw	CSR_XIE, s0
706	/* Load the rest of the general purpose registers */
707	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
708	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
709	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
710	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
711	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
712	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
713	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
714	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
715	load_xregs sp, THREAD_CTX_REG_SP, REG_SP /* sp must be last one */
716
717	/* Jump into user mode */
718	XRET
719END_FUNC __thread_enter_user_mode
720
721/* void thread_resume(struct thread_ctx_regs *regs) */
722FUNC thread_resume , :
723	/* Move struct thread_ctx_regs *regs to sp to reduce code size */
724	mv	sp, a0
725
726	/* Restore epc */
727	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
728	csrw	CSR_XEPC, t0
729	/* Restore status */
730	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
731	csrw	CSR_XSTATUS, t0
732	/* Restore ie */
733	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
734	csrw	CSR_XIE, t0
735
736	/* Check if previous privilege mode by status.SPP */
737	csrr	t0, CSR_XSTATUS
738	b_if_prev_priv_is_u t0, 1f
739	/* Set scratch as zero to indicate that we are in kernel mode */
740	csrw	CSR_XSCRATCH, zero
741	j	2f
7421:
743	/* Resume to U-mode, set scratch as tp to be used in the trap handler */
744	csrw	CSR_XSCRATCH, tp
7452:
746	/* Restore all general-purpose registers */
747	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
748	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
749	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
750	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
751	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
752	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
753	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
754	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
755	load_xregs sp, THREAD_CTX_REG_SP, REG_SP /* sp must be last one */
756
757	XRET
758END_FUNC thread_resume
759
760/* void thread_foreign_interrupt_handler(struct thread_ctx_regs *regs) */
761FUNC thread_foreign_interrupt_handler , :
762	/* Update 32-bit core local flags */
763	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
764	slli	s1, s1, THREAD_CLF_SAVED_SHIFT
765	ori	s1, s1, (THREAD_CLF_TMP | THREAD_CLF_FIQ)
766	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
767
768	/*
769	 * Mark current thread as suspended.
770	 * a0 = THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
771	 * a1 = status
772	 * a2 = epc
773	 * thread_state_suspend(flags, status, pc)
774	 */
775	LDR	a1, THREAD_CTX_REG_STATUS(a0)
776	LDR	a2, THREAD_CTX_REG_EPC(a0)
777	li	a0, THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
778	call	thread_state_suspend
779	/* Now return value a0 contains suspended thread ID. */
780
781	/* Update core local flags */
782	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
783	srli	s1, s1, THREAD_CLF_SAVED_SHIFT
784	ori	s1, s1, THREAD_CLF_TMP
785	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
786
787	/* Passing thread index in a0, and return to untrusted domain. */
788	mv	a4, a0
789	li	a0, TEEABI_OPTEED_RETURN_CALL_DONE
790	li	a1, OPTEE_ABI_RETURN_RPC_FOREIGN_INTR
791	li	a2, 0
792	li	a3, 0
793	li	a5, 0
794	j	thread_return_to_udomain
795END_FUNC thread_foreign_interrupt_handler
796