xref: /optee_os/core/arch/riscv/kernel/thread_rv.S (revision 55a4d839310ce46aca79a12015ab8e1da9f110e5)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright 2022-2023 NXP
4 * Copyright 2024 Andes Technology Corporation
5 */
6
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread.h>
11#include <kernel/thread_private.h>
12#include <mm/core_mmu.h>
13#include <riscv.h>
14#include <riscv_macros.S>
15#include <tee/optee_abi.h>
16#include <tee/teeabi_opteed.h>
17#include <tee/teeabi_opteed_macros.h>
18
19.macro get_thread_ctx res, tmp0
20	lw	\tmp0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
21	la	\res, threads
221:
23	beqz	\tmp0, 2f
24	addi	\res, \res, THREAD_CTX_SIZE
25	addi	\tmp0, \tmp0, -1
26	bnez	\tmp0, 1b
272:
28.endm
29
30.macro b_if_prev_priv_is_u reg, label
31	andi	\reg, \reg, CSR_XSTATUS_SPP
32	beqz	\reg, \label
33.endm
34
35/* size_t __get_core_pos(void); */
36FUNC __get_core_pos , : , .identity_map
37	lw	a0, THREAD_CORE_LOCAL_HART_ID(tp)
38	ret
39END_FUNC __get_core_pos
40
41FUNC thread_trap_vect , :
42	csrrw	tp, CSR_XSCRATCH, tp
43	bnez	tp, 0f
44	/* Read tp back */
45	csrrw	tp, CSR_XSCRATCH, tp
46	j	trap_from_kernel
470:
48	/* Now tp is thread_core_local */
49	j	trap_from_user
50thread_trap_vect_end:
51END_FUNC thread_trap_vect
52
53LOCAL_FUNC trap_from_kernel, :
54	/* Save sp, a0, a1 into temporary spaces of thread_core_local */
55	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
56	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
57
58	csrr	a0, CSR_XCAUSE
59	/* MSB of cause differentiates between interrupts and exceptions */
60	bge	a0, zero, exception_from_kernel
61
62interrupt_from_kernel:
63	/* Get thread context as sp */
64	get_thread_ctx sp, a0
65
66	/* Load and save kernel sp */
67	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
68	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
69
70	/* Restore user a0, a1 which can be saved later */
71	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
72
73	/* Save all other GPRs */
74	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
75	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
76	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
77	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
78	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
79	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
80	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
81	/* Save XIE */
82	csrr	t0, CSR_XIE
83	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
84	/* Mask all interrupts */
85	csrw	CSR_XIE, x0
86	/* Save XSTATUS */
87	csrr	t0, CSR_XSTATUS
88	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
89	/* Save XEPC */
90	csrr	t0, CSR_XEPC
91	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
92
93	/*
94	 * a0 = struct thread_ctx_regs *regs
95	 * a1 = cause
96	 */
97	mv	a0, sp
98	csrr	a1, CSR_XCAUSE
99	/* Load tmp_stack_va_end as current sp. */
100	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
101
102	/*
103	 * Get interrupt code from XCAUSE and build XIP. For example, if the
104	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
105	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
106	 */
107	li	a2, CSR_XCAUSE_INTR_FLAG
108	sub	a2, a1, a2
109	li	a3, 1
110	sll	a3, a3, a2
111	/*
112	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
113	 * THREAD_EXCP_FOREIGN_INTR, we call thread_foreign_interrupt_handler().
114	 */
115	li	a2, THREAD_EXCP_FOREIGN_INTR
116	and	a2, a3, a2
117	beqz	a2, native_interrupt_from_kernel
118
119foreign_interrupt_from_kernel:
120	/*
121	 * a0 = struct thread_ctx_regs *regs
122	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
123	 * return to here.
124	 */
125	tail	thread_foreign_interrupt_handler
126
127native_interrupt_from_kernel:
128	/* Update 32-bit core local flags */
129	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
130	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
131	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
132	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
133
134	/*
135	 * a0 = struct thread_ctx_regs *regs
136	 * a1 = cause
137	 * Call thread_native_interrupt_handler(regs, cause)
138	 */
139	call	thread_native_interrupt_handler
140
141	/* Update 32-bit core local flags */
142	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
143	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
144	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
145
146	/* Get thread context as sp */
147	get_thread_ctx sp, t0
148	/* Restore XEPC */
149	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
150	csrw	CSR_XEPC, t0
151	/* Restore XIE */
152	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
153	csrw	CSR_XIE, t0
154	/* Restore XSTATUS */
155	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
156	csrw	CSR_XSTATUS, t0
157	/* Set scratch as thread_core_local */
158	csrw	CSR_XSCRATCH, tp
159	/* Restore all GPRs */
160	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
161	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
162	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
163	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
164	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
165	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
166	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
167	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
168	XRET
169
170exception_from_kernel:
171	/*
172	 * Update core local flags.
173	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
174	 */
175	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
176	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
177	ori	a0, a0, THREAD_CLF_ABORT
178	li	a1, (THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
179	and	a1, a0, a1
180	bnez	a1, sel_tmp_sp
181
182	/* Select abort stack */
183	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_A1
184	j	set_sp
185
186sel_tmp_sp:
187	/* We have an abort while using the abort stack, select tmp stack */
188	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_A1
189	ori	a0, a0, THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
190
191set_sp:
192	mv	sp, a1
193	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
194
195	/*
196	 * Save state on stack
197	 */
198	addi	sp, sp, -THREAD_ABT_REGS_SIZE
199
200	/* Save kernel sp */
201	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
202	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
203
204	/* Restore kernel a0, a1 which can be saved later */
205	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
206
207	/* Save all other GPRs */
208	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
209	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
210	store_xregs sp, THREAD_ABT_REG_TP, REG_TP
211	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
212	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
213	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
214	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
215	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
216	/* Save XIE */
217	csrr	t0, CSR_XIE
218	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
219	/* Mask all interrupts */
220	csrw	CSR_XIE, x0
221	/* Save XSTATUS */
222	csrr	t0, CSR_XSTATUS
223	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
224	/* Save XEPC */
225	csrr	t0, CSR_XEPC
226	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
227	/* Save XTVAL */
228	csrr	t0, CSR_XTVAL
229	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
230	/* Save XCAUSE */
231	csrr	a0, CSR_XCAUSE
232	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
233
234	/*
235	 * a0 = cause
236	 * a1 = sp (struct thread_abort_regs *regs)
237	 * Call abort_handler(cause, regs)
238	 */
239	mv	a1, sp
240	call	abort_handler
241
242	/*
243	 * Restore state from stack
244	 */
245
246	/* Restore XEPC */
247	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
248	csrw	CSR_XEPC, t0
249	/* Restore XIE */
250	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
251	csrw	CSR_XIE, t0
252	/* Restore XSTATUS */
253	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
254	csrw	CSR_XSTATUS, t0
255	/* Set scratch as thread_core_local */
256	csrw	CSR_XSCRATCH, tp
257
258	/* Update core local flags */
259	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
260	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
261	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
262
263	/* Restore all GPRs */
264	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
265	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
266	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
267	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
268	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
269	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
270	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
271	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
272	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
273	XRET
274END_FUNC trap_from_kernel
275
276LOCAL_FUNC trap_from_user, :
277	/* Save user sp, a0, a1 into temporary spaces of thread_core_local */
278	store_xregs tp, THREAD_CORE_LOCAL_X0, REG_SP
279	store_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
280
281	csrr	a0, CSR_XCAUSE
282	/* MSB of cause differentiates between interrupts and exceptions */
283	bge	a0, zero, exception_from_user
284
285interrupt_from_user:
286	/* Get thread context as sp */
287	get_thread_ctx sp, a0
288
289	/* Save user sp */
290	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
291	store_xregs sp, THREAD_CTX_REG_SP, REG_A0
292
293	/* Restore user a0, a1 which can be saved later */
294	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
295
296	/* Save user gp */
297	store_xregs sp, THREAD_CTX_REG_GP, REG_GP
298
299	/*
300	 * Set the scratch register to 0 such in case of a recursive
301	 * exception thread_trap_vect() knows that it is emitted from kernel.
302	 */
303	csrrw	gp, CSR_XSCRATCH, zero
304	/* Save user tp we previously swapped into CSR_XSCRATCH */
305	store_xregs sp, THREAD_CTX_REG_TP, REG_GP
306	/* Set kernel gp */
307.option push
308.option norelax
309	la	gp, __global_pointer$
310.option pop
311	/* Save all other GPRs */
312	store_xregs sp, THREAD_CTX_REG_RA, REG_RA
313	store_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
314	store_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
315	store_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
316	store_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
317	store_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
318	/* Save XIE */
319	csrr	t0, CSR_XIE
320	store_xregs sp, THREAD_CTX_REG_IE, REG_T0
321	/* Mask all interrupts */
322	csrw	CSR_XIE, x0
323	/* Save XSTATUS */
324	csrr	t0, CSR_XSTATUS
325	store_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
326	/* Save XEPC */
327	csrr	t0, CSR_XEPC
328	store_xregs sp, THREAD_CTX_REG_EPC, REG_T0
329
330	/*
331	 * a0 = struct thread_ctx_regs *regs
332	 * a1 = cause
333	 */
334	mv	a0, sp
335	csrr	a1, CSR_XCAUSE
336	/* Load tmp_stack_va_end as current sp. */
337	load_xregs tp, THREAD_CORE_LOCAL_TMP_STACK_VA_END, REG_SP
338
339	/*
340	 * Get interrupt code from XCAUSE and build XIP. For example, if the
341	 * value of XCAUSE is 0x8000000000000005 (supervisor timer interrupt),
342	 * we build 0x20, which is (1 << 5) and indicates the sip.STIP signal.
343	 */
344	li	a2, CSR_XCAUSE_INTR_FLAG
345	sub	a2, a1, a2
346	li	a3, 1
347	sll	a3, a3, a2
348	/*
349	 * Compare built XIP with THREAD_EXCP_FOREIGN_INTR. If XIP is one of
350	 * THREAD_EXCP_FOREIGN_INTR, call thread_foreign_interrupt_handler().
351	 */
352	li	a2, THREAD_EXCP_FOREIGN_INTR
353	and	a2, a3, a2
354	beqz	a2, native_interrupt_from_user
355
356foreign_interrupt_from_user:
357	/*
358	 * a0 = struct thread_ctx_regs *regs
359	 * Tail call thread_foreign_interrupt_handler(regs) since we will not
360	 * return to here.
361	 */
362	tail	thread_foreign_interrupt_handler
363
364native_interrupt_from_user:
365	/* Update 32-bit core local flags */
366	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
367	slli	a2, a2, THREAD_CLF_SAVED_SHIFT
368	ori	a2, a2, (THREAD_CLF_TMP | THREAD_CLF_IRQ)
369	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
370
371	/*
372	 * a0 = struct thread_ctx_regs *regs
373	 * a1 = cause
374	 * Call thread_native_interrupt_handler(regs, cause)
375	 */
376	call	thread_native_interrupt_handler
377
378	/* Update 32-bit core local flags */
379	lw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
380	srli	a2, a2, THREAD_CLF_SAVED_SHIFT
381	sw	a2, THREAD_CORE_LOCAL_FLAGS(tp)
382
383	/* Get thread context as sp */
384	get_thread_ctx sp, t0
385	/* Restore XEPC */
386	load_xregs sp, THREAD_CTX_REG_EPC, REG_T0
387	csrw	CSR_XEPC, t0
388	/* Restore XIE */
389	load_xregs sp, THREAD_CTX_REG_IE, REG_T0
390	csrw	CSR_XIE, t0
391	/* Restore XSTATUS */
392	load_xregs sp, THREAD_CTX_REG_STATUS, REG_T0
393	csrw	CSR_XSTATUS, t0
394	/* Set scratch as thread_core_local */
395	csrw	CSR_XSCRATCH, tp
396	/* Restore all GPRs */
397	load_xregs sp, THREAD_CTX_REG_RA, REG_RA
398	load_xregs sp, THREAD_CTX_REG_GP, REG_GP
399	load_xregs sp, THREAD_CTX_REG_TP, REG_TP
400	load_xregs sp, THREAD_CTX_REG_T0, REG_T0, REG_T2
401	load_xregs sp, THREAD_CTX_REG_S0, REG_S0, REG_S1
402	load_xregs sp, THREAD_CTX_REG_A0, REG_A0, REG_A7
403	load_xregs sp, THREAD_CTX_REG_S2, REG_S2, REG_S11
404	load_xregs sp, THREAD_CTX_REG_T3, REG_T3, REG_T6
405	load_xregs sp, THREAD_CTX_REG_SP, REG_SP
406	XRET
407
408exception_from_user:
409	/* a0 is CSR_XCAUSE */
410	li	a1, CAUSE_USER_ECALL
411	bne	a0, a1, abort_from_user
412ecall_from_user:
413	/* Load and set kernel sp from thread context */
414	get_thread_ctx a0, a1
415	load_xregs a0, THREAD_CTX_KERN_SP, REG_SP
416
417	/* Now sp is kernel sp, create stack for struct thread_scall_regs */
418	addi	sp, sp, -THREAD_SCALL_REGS_SIZE
419	/* Save user sp */
420	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
421	store_xregs sp, THREAD_SCALL_REG_SP, REG_A0
422
423	/* Restore user a0, a1 which can be saved later */
424	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
425
426	/* Save user gp */
427	store_xregs sp, THREAD_SCALL_REG_GP, REG_GP
428	/*
429	 * Set the scratch register to 0 such in case of a recursive
430	 * exception thread_trap_vect() knows that it is emitted from kernel.
431	 */
432	csrrw	gp, CSR_XSCRATCH, zero
433	/* Save user tp we previously swapped into CSR_XSCRATCH */
434	store_xregs sp, THREAD_SCALL_REG_TP, REG_GP
435	/* Set kernel gp */
436.option push
437.option norelax
438	la	gp, __global_pointer$
439.option pop
440
441	/* Save other caller-saved registers */
442	store_xregs sp, THREAD_SCALL_REG_RA, REG_RA
443	store_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
444	store_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
445	store_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
446	/* Save XIE */
447	csrr	a0, CSR_XIE
448	store_xregs sp, THREAD_SCALL_REG_IE, REG_A0
449	/* Mask all interrupts */
450	csrw	CSR_XIE, zero
451	/* Save XSTATUS */
452	csrr	a0, CSR_XSTATUS
453	store_xregs sp, THREAD_SCALL_REG_STATUS, REG_A0
454	/* Save XEPC */
455	csrr	a0, CSR_XEPC
456	store_xregs sp, THREAD_SCALL_REG_EPC, REG_A0
457
458	/*
459	 * a0 = struct thread_scall_regs *regs
460	 * Call thread_scall_handler(regs)
461	 */
462	mv	a0, sp
463	call	thread_scall_handler
464
465	/*
466	 * Save kernel sp we'll had at the beginning of this function.
467	 * This is when this TA has called another TA because
468	 * __thread_enter_user_mode() also saves the stack pointer in this
469	 * field.
470	 */
471	get_thread_ctx a0, a1
472	addi	t0, sp, THREAD_SCALL_REGS_SIZE
473	store_xregs a0, THREAD_CTX_KERN_SP, REG_T0
474
475	/*
476	 * We are returning to U-Mode, on return, the program counter
477	 * is set to xsepc (pc=xepc), we add 4 (size of an instruction)
478	 * to continue to next instruction.
479	 */
480	load_xregs sp, THREAD_SCALL_REG_EPC, REG_T0
481	addi	t0, t0, 4
482	csrw	CSR_XEPC, t0
483
484	/* Restore XIE */
485	load_xregs sp, THREAD_SCALL_REG_IE, REG_T0
486	csrw	CSR_XIE, t0
487	/* Restore XSTATUS */
488	load_xregs sp, THREAD_SCALL_REG_STATUS, REG_T0
489	csrw	CSR_XSTATUS, t0
490	/* Set scratch as thread_core_local */
491	csrw	CSR_XSCRATCH, tp
492	/* Restore caller-saved registers */
493	load_xregs sp, THREAD_SCALL_REG_RA, REG_RA
494	load_xregs sp, THREAD_SCALL_REG_GP, REG_GP
495	load_xregs sp, THREAD_SCALL_REG_TP, REG_TP
496	load_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2
497	load_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7
498	load_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6
499	load_xregs sp, THREAD_SCALL_REG_SP, REG_SP
500	XRET
501
502abort_from_user:
503	/*
504	 * Update core local flags
505	 */
506	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
507	slli	a0, a0, THREAD_CLF_SAVED_SHIFT
508	ori	a0, a0, THREAD_CLF_ABORT
509	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
510
511	/*
512	 * Save state on stack
513	 */
514
515	/* Load abt_stack_va_end and set it as sp */
516	load_xregs tp, THREAD_CORE_LOCAL_ABT_STACK_VA_END, REG_SP
517
518	/* Now sp is abort sp, create stack for struct thread_abort_regs */
519	addi	sp, sp, -THREAD_ABT_REGS_SIZE
520
521	/* Save user sp */
522	load_xregs tp, THREAD_CORE_LOCAL_X0, REG_A0
523	store_xregs sp, THREAD_ABT_REG_SP, REG_A0
524
525	/* Restore user a0, a1 which can be saved later */
526	load_xregs tp, THREAD_CORE_LOCAL_X1, REG_A0, REG_A1
527
528	/* Save user gp */
529	store_xregs sp, THREAD_ABT_REG_GP, REG_GP
530
531	/*
532	 * Set the scratch register to 0 such in case of a recursive
533	 * exception thread_trap_vect() knows that it is emitted from kernel.
534	 */
535	csrrw	gp, CSR_XSCRATCH, zero
536	/* Save user tp we previously swapped into CSR_XSCRATCH */
537	store_xregs sp, THREAD_ABT_REG_TP, REG_GP
538	/* Set kernel gp */
539.option push
540.option norelax
541	la	gp, __global_pointer$
542.option pop
543	/* Save all other GPRs */
544	store_xregs sp, THREAD_ABT_REG_RA, REG_RA
545	store_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
546	store_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
547	store_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
548	store_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
549	store_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
550	/* Save XIE */
551	csrr	t0, CSR_XIE
552	store_xregs sp, THREAD_ABT_REG_IE, REG_T0
553	/* Mask all interrupts */
554	csrw	CSR_XIE, x0
555	/* Save XSTATUS */
556	csrr	t0, CSR_XSTATUS
557	store_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
558	/* Save XEPC */
559	csrr	t0, CSR_XEPC
560	store_xregs sp, THREAD_ABT_REG_EPC, REG_T0
561	/* Save XTVAL */
562	csrr	t0, CSR_XTVAL
563	store_xregs sp, THREAD_ABT_REG_TVAL, REG_T0
564	/* Save XCAUSE */
565	csrr	a0, CSR_XCAUSE
566	store_xregs sp, THREAD_ABT_REG_CAUSE, REG_A0
567
568	/*
569	 * a0 = cause
570	 * a1 = sp (struct thread_abort_regs *regs)
571	 * Call abort_handler(cause, regs)
572	 */
573	mv	a1, sp
574	call	abort_handler
575
576	/*
577	 * Restore state from stack
578	 */
579
580	/* Restore XEPC */
581	load_xregs sp, THREAD_ABT_REG_EPC, REG_T0
582	csrw	CSR_XEPC, t0
583	/* Restore XIE */
584	load_xregs sp, THREAD_ABT_REG_IE, REG_T0
585	csrw	CSR_XIE, t0
586	/* Restore XSTATUS */
587	load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0
588	csrw	CSR_XSTATUS, t0
589	/* Set scratch as thread_core_local */
590	csrw	CSR_XSCRATCH, tp
591
592	/* Update core local flags */
593	lw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
594	srli	a0, a0, THREAD_CLF_SAVED_SHIFT
595	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
596
597	/* Restore all GPRs */
598	load_xregs sp, THREAD_ABT_REG_RA, REG_RA
599	load_xregs sp, THREAD_ABT_REG_GP, REG_GP
600	load_xregs sp, THREAD_ABT_REG_TP, REG_TP
601	load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2
602	load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1
603	load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7
604	load_xregs sp, THREAD_ABT_REG_S2, REG_S2, REG_S11
605	load_xregs sp, THREAD_ABT_REG_T3, REG_T3, REG_T6
606	load_xregs sp, THREAD_ABT_REG_SP, REG_SP
607	XRET
608END_FUNC trap_from_user
609
610/*
611 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
612 * 		uint32_t exit_status1);
613 * See description in thread.h
614 */
615FUNC thread_unwind_user_mode , :
616
617	/* Store the exit status */
618	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A3, REG_A5
619	sw	a1, (a4)
620	sw	a2, (a5)
621
622	/* Save user callee regs */
623	store_xregs a3, THREAD_CTX_REG_S0, REG_S0, REG_S1
624	store_xregs a3, THREAD_CTX_REG_S2, REG_S2, REG_S11
625	store_xregs a3, THREAD_CTX_REG_SP, REG_SP, REG_TP
626
627	/* Restore kernel callee regs */
628	mv	a1, sp
629
630	load_xregs a1, THREAD_USER_MODE_REC_X1, REG_RA, REG_GP
631	load_xregs a1, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
632	load_xregs a1, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
633
634	add	sp, sp, THREAD_USER_MODE_REC_SIZE
635
636	/* Return from the call of thread_enter_user_mode() */
637	ret
638END_FUNC thread_unwind_user_mode
639
640/*
641 * void thread_exit_user_mode(unsigned long a0, unsigned long a1,
642 *			       unsigned long a2, unsigned long a3,
643 *			       unsigned long sp, unsigned long pc,
644 *			       unsigned long status);
645 */
646FUNC thread_exit_user_mode , :
647	/* Set kernel stack pointer */
648	mv	sp, a4
649
650	/* Set xSTATUS */
651	csrw	CSR_XSTATUS, a6
652
653	/*
654	 * Zeroize xSCRATCH to indicate to thread_trap_vect()
655	 * that we are executing in kernel.
656	 */
657	csrw	CSR_XSCRATCH, zero
658
659	/*
660	 * Mask all interrupts first. Interrupts will be unmasked after
661	 * returning from __thread_enter_user_mode().
662	 */
663	csrw	CSR_XIE, zero
664
665	/* Set epc as thread_unwind_user_mode() */
666	csrw	CSR_XEPC, a5
667
668	XRET
669END_FUNC thread_exit_user_mode
670
671/*
672 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
673 *				     uint32_t *exit_status0,
674 *				     uint32_t *exit_status1);
675 */
676FUNC __thread_enter_user_mode , :
677	/* Disable kernel mode exceptions first */
678	csrc	CSR_XSTATUS, CSR_XSTATUS_IE
679
680	/*
681	 * Create and fill in the struct thread_user_mode_rec
682	 */
683	addi	sp, sp, -THREAD_USER_MODE_REC_SIZE
684	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A0, REG_A2
685	store_xregs sp, THREAD_USER_MODE_REC_X1, REG_RA, REG_GP
686	store_xregs sp, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1
687	store_xregs sp, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11
688
689	/*
690	 * Save the kernel stack pointer in the thread context
691	 */
692
693	/* Get pointer to current thread context */
694	get_thread_ctx s0, s1
695
696	/*
697	 * Save kernel stack pointer to ensure that
698	 * thread_exit_user_mode() uses correct stack pointer.
699	 */
700
701	store_xregs s0, THREAD_CTX_KERN_SP, REG_SP
702	/*
703	 * Save thread_core_local in xSCRATCH to ensure that thread_trap_vect()
704	 * uses correct core local structure.
705	 */
706	csrw	CSR_XSCRATCH, tp
707
708	/* Set user ie */
709	load_xregs a0, THREAD_CTX_REG_IE, REG_S0
710	csrw	CSR_XIE, s0
711
712	/* Set user status */
713	load_xregs a0, THREAD_CTX_REG_STATUS, REG_S0
714	csrw	CSR_XSTATUS, s0
715
716	/* Load the rest of the general purpose registers */
717	load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_TP
718	load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2
719	load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
720	load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
721	load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6
722	load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7
723
724	/* Set exception program counter */
725	csrw		CSR_XEPC, ra
726
727	/* Jump into user mode */
728	XRET
729END_FUNC __thread_enter_user_mode
730
731/* void thread_resume(struct thread_ctx_regs *regs) */
732FUNC thread_resume , :
733	/* Disable global interrupts first */
734	csrc	CSR_XSTATUS, CSR_XSTATUS_IE
735
736	/* Restore epc */
737	load_xregs a0, THREAD_CTX_REG_EPC, REG_T0
738	csrw	CSR_XEPC, t0
739
740	/* Restore ie */
741	load_xregs a0, THREAD_CTX_REG_IE, REG_T0
742	csrw	CSR_XIE, t0
743
744	/* Restore status */
745	load_xregs a0, THREAD_CTX_REG_STATUS, REG_T0
746	csrw	CSR_XSTATUS, t0
747
748	/* Check if previous privilege mode by status.SPP */
749	b_if_prev_priv_is_u t0, 1f
750	/* Set scratch as zero to indicate that we are in kernel mode */
751	csrw	CSR_XSCRATCH, zero
752	j	2f
7531:
754	/* Resume to U-mode, set scratch as tp to be used in the trap handler */
755	csrw	CSR_XSCRATCH, tp
7562:
757	/* Restore all general-purpose registers */
758	load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_TP
759	load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2
760	load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1
761	load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11
762	load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6
763	load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7
764
765	XRET
766END_FUNC thread_resume
767
768/* void thread_foreign_interrupt_handler(struct thread_ctx_regs *regs) */
769FUNC thread_foreign_interrupt_handler , :
770	/* Update 32-bit core local flags */
771	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
772	slli	s1, s1, THREAD_CLF_SAVED_SHIFT
773	ori	s1, s1, (THREAD_CLF_TMP | THREAD_CLF_FIQ)
774	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
775
776	/*
777	 * Mark current thread as suspended.
778	 * a0 = THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
779	 * a1 = status
780	 * a2 = epc
781	 * thread_state_suspend(flags, status, pc)
782	 */
783	LDR	a1, THREAD_CTX_REG_STATUS(a0)
784	LDR	a2, THREAD_CTX_REG_EPC(a0)
785	li	a0, THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
786	call	thread_state_suspend
787	/* Now return value a0 contains suspended thread ID. */
788
789	/* Update core local flags */
790	lw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
791	srli	s1, s1, THREAD_CLF_SAVED_SHIFT
792	ori	s1, s1, THREAD_CLF_TMP
793	sw	s1, THREAD_CORE_LOCAL_FLAGS(tp)
794
795	/* Passing thread index in a0, and return to untrusted domain. */
796	mv	a4, a0
797	li	a0, TEEABI_OPTEED_RETURN_CALL_DONE
798	li	a1, OPTEE_ABI_RETURN_RPC_FOREIGN_INTR
799	li	a2, 0
800	li	a3, 0
801	li	a5, 0
802	j	thread_return_to_udomain
803END_FUNC thread_foreign_interrupt_handler
804