xref: /optee_os/core/arch/riscv/kernel/thread_rv.S (revision 5232a3488aebb96330bf4085be9daf44d3b9f919)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright 2022-2023 NXP
4 * Copyright 2024 Andes Technology Corporation
5 */
6
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread.h>
11#include <kernel/thread_private.h>
12#include <mm/core_mmu.h>
13#include <riscv.h>
14#include <riscv_macros.S>
15#include <tee/optee_abi.h>
16#include <tee/teeabi_opteed.h>
17#include <tee/teeabi_opteed_macros.h>
18
19.macro get_thread_ctx res, tmp0
20	lw	\tmp0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
21	la	\res, threads
221:
23	beqz	\tmp0, 2f
24	addi	\res, \res, THREAD_CTX_SIZE
25	addi	\tmp0, \tmp0, -1
26	bnez	\tmp0, 1b
272:
28.endm
29
30.macro b_if_prev_priv_is_u reg, label
31	andi	\reg, \reg, CSR_XSTATUS_SPP
32	beqz	\reg, \label
33.endm
34
35/* size_t __get_core_pos(void); */
36FUNC __get_core_pos , : , .identity_map
37	lw	a0, THREAD_CORE_LOCAL_HART_ID(tp)
38	ret
39END_FUNC __get_core_pos
40
41FUNC thread_trap_vect , :
42	csrrw	tp, CSR_XSCRATCH, tp
43	bnez	tp, 0f
44	/* Read tp back */
45	csrrw	tp, CSR_XSCRATCH, tp
46	j	trap_from_kernel
470:
48	/* Now tp is thread_core_local */
49	j	trap_from_user
50thread_trap_vect_end:
51END_FUNC thread_trap_vect
52
53LOCAL_FUNC trap_from_kernel, :
54	/* Save sp, a0, a1 into temporary spaces of thread_core_local */
55	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
56	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
57
58	csrr	a0, CSR_XCAUSE
59	/* MSB of cause differentiates between interrupts and exceptions */
60	bge	a0, zero, exception_from_kernel
61
62interrupt_from_kernel:
63	/* Get thread context as sp */
64	get_thread_ctx sp, a0
65
66	/* Load and save kernel sp */
67	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
68	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
69
70	/* Restore user a0, a1 which can be saved later */
71	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
72
73	/* Save all other GPRs */
74	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
75	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
76	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
77	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
78	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
79	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
80	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
81	/* Save XIE */
82	csrr	t0, CSR_XIE
83	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
84	/* Mask all interrupts */
85	csrw	CSR_XIE, x0
86	/* Save XSTATUS */
87	csrr	t0, CSR_XSTATUS
88	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
89	/* Save XEPC */
90	csrr	t0, CSR_XEPC
91	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
92
93	/*
94	 * a0 = struct thread_ctx_regs *regs
95	 * a1 = cause
96	 */
97	mv	a0, sp
98	csrr	a1, CSR_XCAUSE
99	/* Load tmp_stack_va_end as current sp. */
100	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
101
102	/*
103	 * Get interrupt code from XCAUSE and build XIP. For example, if the
104	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
105	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
106	 */
107	li	a2, CSR_XCAUSE_INTR_FLAG
108	sub	a2, a1, a2
109	li	a3, 1
110	sll	a3, a3, a2
111	/*
112	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
113	 * THREAD_EXCP_FOREIGN_INTR, we call thread_foreign_interrupt_handler().
114	 */
115	li	a2, THREAD_EXCP_FOREIGN_INTR
116	and	a2, a3, a2
117	beqz	a2, native_interrupt_from_kernel
118
119foreign_interrupt_from_kernel:
120	/*
121	 * a0 = struct thread_ctx_regs *regs
122	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
123	 * return to here.
124	 */
125	tail	thread_foreign_interrupt_handler
126
127native_interrupt_from_kernel:
128	/*
129	 * a0 = struct thread_ctx_regs *regs
130	 * a1 = cause
131	 * Call thread_native_interrupt_handler(regs, cause)
132	 */
133	call	thread_native_interrupt_handler
134
135	/* Get thread context as sp */
136	get_thread_ctx sp, t0
137	/* Restore XEPC */
138	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
139	csrw	CSR_XEPC, t0
140	/* Restore XIE */
141	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
142	csrw	CSR_XIE, t0
143	/* Restore XSTATUS */
144	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
145	csrw	CSR_XSTATUS, t0
146	/* Set scratch as thread_core_local */
147	csrw	CSR_XSCRATCH, tp
148	/* Restore all GPRs */
149	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
150	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
151	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
152	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
153	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
154	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
155	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
156	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
157	XRET
158
159exception_from_kernel:
160	/*
161	 * Update core local flags.
162	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
163	 */
164	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
165	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
166	ori	a0, a0, THREAD_CLF_ABORT
167	li	a1, (THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
168	and	a1, a0, a1
169	bnez	a1, sel_tmp_sp
170
171	/* Select abort stack */
172	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_A1
173	j	set_sp
174
175sel_tmp_sp:
176	/* We have an abort while using the abort stack, select tmp stack */
177	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_A1
178	ori	a0, a0, THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
179
180set_sp:
181	mv	sp, a1
182	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
183
184	/*
185	 * Save state on stack
186	 */
187	addi	sp, sp, -THREAD_ABT_REGS_SIZE
188
189	/* Save kernel sp */
190	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
191	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
192
193	/* Restore kernel a0, a1 which can be saved later */
194	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
195
196	/* Save all other GPRs */
197	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
198	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
199	store_xregs sp, THREAD_ABT_REG_TP, REG_TP
200	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
201	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
202	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
203	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
204	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
205	/* Save XIE */
206	csrr	t0, CSR_XIE
207	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
208	/* Mask all interrupts */
209	csrw	CSR_XIE, x0
210	/* Save XSTATUS */
211	csrr	t0, CSR_XSTATUS
212	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
213	/* Save XEPC */
214	csrr	t0, CSR_XEPC
215	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
216	/* Save XTVAL */
217	csrr	t0, CSR_XTVAL
218	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
219	/* Save XCAUSE */
220	csrr	a0, CSR_XCAUSE
221	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
222
223	/*
224	 * a0 = cause
225	 * a1 = sp (struct thread_abort_regs *regs)
226	 * Call abort_handler(cause, regs)
227	 */
228	mv	a1, sp
229	call	abort_handler
230
231	/*
232	 * Restore state from stack
233	 */
234
235	/* Restore XEPC */
236	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
237	csrw	CSR_XEPC, t0
238	/* Restore XIE */
239	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
240	csrw	CSR_XIE, t0
241	/* Restore XSTATUS */
242	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
243	csrw	CSR_XSTATUS, t0
244	/* Set scratch as thread_core_local */
245	csrw	CSR_XSCRATCH, tp
246
247	/* Update core local flags */
248	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
249	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
250	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
251
252	/* Restore all GPRs */
253	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
254	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
255	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
256	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
257	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
258	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
259	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
260	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
261	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
262	XRET
263END_FUNC trap_from_kernel
264
265LOCAL_FUNC trap_from_user, :
266	/* Save user sp, a0, a1 into temporary spaces of thread_core_local */
267	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
268	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
269
270	csrr	a0, CSR_XCAUSE
271	/* MSB of cause differentiates between interrupts and exceptions */
272	bge	a0, zero, exception_from_user
273
274interrupt_from_user:
275	/* Get thread context as sp */
276	get_thread_ctx sp, a0
277
278	/* Save user sp */
279	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
280	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
281
282	/* Restore user a0, a1 which can be saved later */
283	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
284
285	/* Save user gp */
286	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
287
288	/*
289	 * Set the scratch register to 0 such in case of a recursive
290	 * exception thread_trap_vect() knows that it is emitted from kernel.
291	 */
292	csrrw	gp, CSR_XSCRATCH, zero
293	/* Save user tp we previously swapped into CSR_XSCRATCH */
294	store_xregs sp, THREAD_CTX_REG_TP, REG_GP
295	/* Set kernel gp */
296.option push
297.option norelax
298	la	gp, __global_pointer$
299.option pop
300	/* Save all other GPRs */
301	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
302	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
303	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
304	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
305	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
306	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
307	/* Save XIE */
308	csrr	t0, CSR_XIE
309	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
310	/* Mask all interrupts */
311	csrw	CSR_XIE, x0
312	/* Save XSTATUS */
313	csrr	t0, CSR_XSTATUS
314	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
315	/* Save XEPC */
316	csrr	t0, CSR_XEPC
317	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
318
319	/*
320	 * a0 = struct thread_ctx_regs *regs
321	 * a1 = cause
322	 */
323	mv	a0, sp
324	csrr	a1, CSR_XCAUSE
325	/* Load tmp_stack_va_end as current sp. */
326	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
327
328	/*
329	 * Get interrupt code from XCAUSE and build XIP. For example, if the
330	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
331	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
332	 */
333	li	a2, CSR_XCAUSE_INTR_FLAG
334	sub	a2, a1, a2
335	li	a3, 1
336	sll	a3, a3, a2
337	/*
338	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
339	 * THREAD_EXCP_FOREIGN_INTR, call thread_foreign_interrupt_handler().
340	 */
341	li	a2, THREAD_EXCP_FOREIGN_INTR
342	and	a2, a3, a2
343	beqz	a2, native_interrupt_from_user
344
345foreign_interrupt_from_user:
346	/*
347	 * a0 = struct thread_ctx_regs *regs
348	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
349	 * return to here.
350	 */
351	tail	thread_foreign_interrupt_handler
352
353native_interrupt_from_user:
354	/*
355	 * a0 = struct thread_ctx_regs *regs
356	 * a1 = cause
357	 * Call thread_native_interrupt_handler(regs, cause)
358	 */
359	call	thread_native_interrupt_handler
360
361	/* Get thread context as sp */
362	get_thread_ctx sp, t0
363	/* Restore XEPC */
364	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
365	csrw	CSR_XEPC, t0
366	/* Restore XIE */
367	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
368	csrw	CSR_XIE, t0
369	/* Restore XSTATUS */
370	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
371	csrw	CSR_XSTATUS, t0
372	/* Set scratch as thread_core_local */
373	csrw	CSR_XSCRATCH, tp
374	/* Restore all GPRs */
375	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
376	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
377	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
378	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
379	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
380	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
381	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
382	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
383	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
384	XRET
385
386exception_from_user:
387	/* a0 is CSR_XCAUSE */
388	li	a1, CAUSE_USER_ECALL
389	bne	a0, a1, abort_from_user
390ecall_from_user:
391	/* Load and set kernel sp from thread context */
392	get_thread_ctx a0, a1
393	load_xregs a0, THREAD_CTX_KERN_SP, REG_SP
394
395	/* Now sp is kernel sp, create stack for struct thread_scall_regs */
396	addi	sp, sp, -THREAD_SCALL_REGS_SIZE
397	/* Save user sp */
398	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
399	store_xregs sp, THREAD_SCALL_REG_SP, REG_A0
400
401	/* Restore user a0, a1 which can be saved later */
402	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
403
404	/* Save user gp */
405	store_xregs sp, THREAD_SCALL_REG_GP, REG_GP
406	/*
407	 * Set the scratch register to 0 such in case of a recursive
408	 * exception thread_trap_vect() knows that it is emitted from kernel.
409	 */
410	csrrw	gp, CSR_XSCRATCH, zero
411	/* Save user tp we previously swapped into CSR_XSCRATCH */
412	store_xregs sp, THREAD_SCALL_REG_TP, REG_GP
413	/* Set kernel gp */
414.option push
415.option norelax
416	la	gp, __global_pointer$
417.option pop
418
419	/* Save other caller-saved registers */
420	store_xregs sp, THREAD_SCALL_REG_RA, REG_RA
421	store_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
422	store_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
423	store_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
424	/* Save XIE */
425	csrr	a0, CSR_XIE
426	store_xregs sp, THREAD_SCALL_REG_IE, REG_A0
427	/* Mask all interrupts */
428	csrw	CSR_XIE, zero
429	/* Save XSTATUS */
430	csrr	a0, CSR_XSTATUS
431	store_xregs sp, THREAD_SCALL_REG_STATUS, REG_A0
432	/* Save XEPC */
433	csrr	a0, CSR_XEPC
434	store_xregs sp, THREAD_SCALL_REG_EPC, REG_A0
435
436	/*
437	 * a0 = struct thread_scall_regs *regs
438	 * Call thread_scall_handler(regs)
439	 */
440	mv	a0, sp
441	call	thread_scall_handler
442
443	/*
444	 * Save kernel sp we'll had at the beginning of this function.
445	 * This is when this TA has called another TA because
446	 * __thread_enter_user_mode() also saves the stack pointer in this
447	 * field.
448	 */
449	get_thread_ctx a0, a1
450	addi	t0, sp, THREAD_SCALL_REGS_SIZE
451	store_xregs a0, THREAD_CTX_KERN_SP, REG_T0
452
453	/*
454	 * We are returning to U-Mode, on return, the program counter
455	 * is set to xsepc (pc=xepc), we add 4 (size of an instruction)
456	 * to continue to next instruction.
457	 */
458	load_xregs sp, THREAD_SCALL_REG_EPC, REG_T0
459	addi	t0, t0, 4
460	csrw	CSR_XEPC, t0
461
462	/* Restore XIE */
463	load_xregs sp, THREAD_SCALL_REG_IE, REG_T0
464	csrw	CSR_XIE, t0
465	/* Restore XSTATUS */
466	load_xregs sp, THREAD_SCALL_REG_STATUS, REG_T0
467	csrw	CSR_XSTATUS, t0
468	/* Set scratch as thread_core_local */
469	csrw	CSR_XSCRATCH, tp
470	/* Restore caller-saved registers */
471	load_xregs sp, THREAD_SCALL_REG_RA, REG_RA
472	load_xregs sp, THREAD_SCALL_REG_GP, REG_GP
473	load_xregs sp, THREAD_SCALL_REG_TP, REG_TP
474	load_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
475	load_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
476	load_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
477	load_xregs sp, THREAD_SCALL_REG_SP, REG_SP
478	XRET
479
480abort_from_user:
481	/*
482	 * Update core local flags
483	 */
484	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
485	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
486	ori	a0, a0, THREAD_CLF_ABORT
487	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
488
489	/*
490	 * Save state on stack
491	 */
492
493	/* Load abt_stack_va_end and set it as sp */
494	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_SP
495
496	/* Now sp is abort sp, create stack for struct thread_abort_regs */
497	addi	sp, sp, -THREAD_ABT_REGS_SIZE
498
499	/* Save user sp */
500	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
501	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
502
503	/* Restore user a0, a1 which can be saved later */
504	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
505
506	/* Save user gp */
507	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
508
509	/*
510	 * Set the scratch register to 0 such in case of a recursive
511	 * exception thread_trap_vect() knows that it is emitted from kernel.
512	 */
513	csrrw	gp, CSR_XSCRATCH, zero
514	/* Save user tp we previously swapped into CSR_XSCRATCH */
515	store_xregs sp, THREAD_ABT_REG_TP, REG_GP
516	/* Set kernel gp */
517.option push
518.option norelax
519	la	gp, __global_pointer$
520.option pop
521	/* Save all other GPRs */
522	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
523	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
524	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
525	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
526	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
527	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
528	/* Save XIE */
529	csrr	t0, CSR_XIE
530	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
531	/* Mask all interrupts */
532	csrw	CSR_XIE, x0
533	/* Save XSTATUS */
534	csrr	t0, CSR_XSTATUS
535	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
536	/* Save XEPC */
537	csrr	t0, CSR_XEPC
538	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
539	/* Save XTVAL */
540	csrr	t0, CSR_XTVAL
541	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
542	/* Save XCAUSE */
543	csrr	a0, CSR_XCAUSE
544	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
545
546	/*
547	 * a0 = cause
548	 * a1 = sp (struct thread_abort_regs *regs)
549	 * Call abort_handler(cause, regs)
550	 */
551	mv	a1, sp
552	call	abort_handler
553
554	/*
555	 * Restore state from stack
556	 */
557
558	/* Restore XEPC */
559	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
560	csrw	CSR_XEPC, t0
561	/* Restore XIE */
562	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
563	csrw	CSR_XIE, t0
564	/* Restore XSTATUS */
565	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
566	csrw	CSR_XSTATUS, t0
567	/* Set scratch as thread_core_local */
568	csrw	CSR_XSCRATCH, tp
569
570	/* Update core local flags */
571	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
572	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
573	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
574
575	/* Restore all GPRs */
576	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
577	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
578	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
579	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
580	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
581	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
582	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
583	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
584	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
585	XRET
586END_FUNC trap_from_user
587
588/*
589 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
590 * 		uint32_t exit_status1);
591 * See description in thread.h
592 */
593FUNC thread_unwind_user_mode , :
594
595	/* Store the exit status */
596	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A3, REG_A5
597	sw	a1, (a4)
598	sw	a2, (a5)
599
600	/* Save user callee regs */
601	store_xregs a3, THREAD_CTX_REG_S0, REG_S0, REG_S1
602	store_xregs a3, THREAD_CTX_REG_S2, REG_S2, REG_S11
603	store_xregs a3, THREAD_CTX_REG_SP, REG_SP, REG_TP
604
605	/* Restore kernel callee regs */
606	mv	a1, sp
607
608	load_xregs a1, THREAD_USER_MODE_REC_X1, REG_RA, REG_GP
609	load_xregs a1, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
610	load_xregs a1, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
611
612	add	sp, sp, THREAD_USER_MODE_REC_SIZE
613
614	/* Return from the call of thread_enter_user_mode() */
615	ret
616END_FUNC thread_unwind_user_mode
617
618/*
619 * void thread_exit_user_mode(unsigned long a0, unsigned long a1,
620 *			       unsigned long a2, unsigned long a3,
621 *			       unsigned long sp, unsigned long pc,
622 *			       unsigned long status);
623 */
624FUNC thread_exit_user_mode , :
625	/* Set kernel stack pointer */
626	mv	sp, a4
627
628	/* Set xSTATUS */
629	csrw	CSR_XSTATUS, a6
630
631	/*
632	 * Zeroize xSCRATCH to indicate to thread_trap_vect()
633	 * that we are executing in kernel.
634	 */
635	csrw	CSR_XSCRATCH, zero
636
637	/*
638	 * Mask all interrupts first. Interrupts will be unmasked after
639	 * returning from __thread_enter_user_mode().
640	 */
641	csrw	CSR_XIE, zero
642
643	/* Set epc as thread_unwind_user_mode() */
644	csrw	CSR_XEPC, a5
645
646	XRET
647END_FUNC thread_exit_user_mode
648
649/*
650 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
651 *				     uint32_t *exit_status0,
652 *				     uint32_t *exit_status1);
653 */
654FUNC __thread_enter_user_mode , :
655	/* Disable kernel mode exceptions first */
656	csrc	CSR_XSTATUS, CSR_XSTATUS_IE
657
658	/*
659	 * Create and fill in the struct thread_user_mode_rec
660	 */
661	addi	sp, sp, -THREAD_USER_MODE_REC_SIZE
662	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A0, REG_A2
663	store_xregs sp, THREAD_USER_MODE_REC_X1, REG_RA, REG_GP
664	store_xregs sp, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
665	store_xregs sp, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
666
667	/*
668	 * Save the kernel stack pointer in the thread context
669	 */
670
671	/* Get pointer to current thread context */
672	get_thread_ctx s0, s1
673
674	/*
675	 * Save kernel stack pointer to ensure that
676	 * thread_exit_user_mode() uses correct stack pointer.
677	 */
678
679	store_xregs s0, THREAD_CTX_KERN_SP, REG_SP
680	/*
681	 * Save thread_core_local in xSCRATCH to ensure that thread_trap_vect()
682	 * uses correct core local structure.
683	 */
684	csrw	CSR_XSCRATCH, tp
685
686	/* Set user ie */
687	load_xregs a0, THREAD_CTX_REG_IE, REG_S0
688	csrw	CSR_XIE, s0
689
690	/* Set user status */
691	load_xregs a0, THREAD_CTX_REG_STATUS, REG_S0
692	csrw	CSR_XSTATUS, s0
693
694	/* Load the rest of the general purpose registers */
695	load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_TP
696	load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2
697	load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
698	load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
699	load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6
700	load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7
701
702	/* Set exception program counter */
703	csrw		CSR_XEPC, ra
704
705	/* Jump into user mode */
706	XRET
707END_FUNC __thread_enter_user_mode
708
709/* void thread_resume(struct thread_ctx_regs *regs) */
710FUNC thread_resume , :
711	/* Disable global interrupts first */
712	csrc	CSR_XSTATUS, CSR_XSTATUS_IE
713
714	/* Restore epc */
715	load_xregs a0, THREAD_CTX_REG_EPC, REG_T0
716	csrw	CSR_XEPC, t0
717
718	/* Restore ie */
719	load_xregs a0, THREAD_CTX_REG_IE, REG_T0
720	csrw	CSR_XIE, t0
721
722	/* Restore status */
723	load_xregs a0, THREAD_CTX_REG_STATUS, REG_T0
724	csrw	CSR_XSTATUS, t0
725
726	/* Check if previous privilege mode by status.SPP */
727	b_if_prev_priv_is_u t0, 1f
728	/* Set scratch as zero to indicate that we are in kernel mode */
729	csrw	CSR_XSCRATCH, zero
730	j	2f
7311:
732	/* Resume to U-mode, set scratch as tp to be used in the trap handler */
733	csrw	CSR_XSCRATCH, tp
7342:
735	/* Restore all general-purpose registers */
736	load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_TP
737	load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2
738	load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
739	load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
740	load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6
741	load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7
742
743	XRET
744END_FUNC thread_resume
745
746/* void thread_foreign_interrupt_handler(struct thread_ctx_regs *regs) */
747FUNC thread_foreign_interrupt_handler , :
748	/* Update 32-bit core local flags */
749	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
750	slli	s1, s1, THREAD_CLF_SAVED_SHIFT
751	ori	s1, s1, (THREAD_CLF_TMP | THREAD_CLF_FIQ)
752	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
753
754	/*
755	 * Mark current thread as suspended.
756	 * a0 = THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
757	 * a1 = status
758	 * a2 = epc
759	 * thread_state_suspend(flags, status, pc)
760	 */
761	LDR	a1, THREAD_CTX_REG_STATUS(a0)
762	LDR	a2, THREAD_CTX_REG_EPC(a0)
763	li	a0, THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
764	call	thread_state_suspend
765	/* Now return value a0 contains suspended thread ID. */
766
767	/* Update core local flags */
768	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
769	srli	s1, s1, THREAD_CLF_SAVED_SHIFT
770	ori	s1, s1, THREAD_CLF_TMP
771	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
772
773	/* Passing thread index in a0, and return to untrusted domain. */
774	mv	a4, a0
775	li	a0, TEEABI_OPTEED_RETURN_CALL_DONE
776	li	a1, OPTEE_ABI_RETURN_RPC_FOREIGN_INTR
777	li	a2, 0
778	li	a3, 0
779	li	a5, 0
780	j	thread_return_to_udomain
781END_FUNC thread_foreign_interrupt_handler
782