xref: /optee_os/core/arch/riscv/kernel/thread_arch.c (revision 25675979615c01f3c6bfbe105f53e07e939dd739)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 NXP
4  * Copyright (c) 2016-2022, Linaro Limited
5  * Copyright (c) 2014, STMicroelectronics International N.V.
6  * Copyright (c) 2020-2021, Arm Limited
7  */
8 
9 #include <platform_config.h>
10 
11 #include <assert.h>
12 #include <config.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/linker.h>
19 #include <kernel/lockdep.h>
20 #include <kernel/misc.h>
21 #include <kernel/panic.h>
22 #include <kernel/spinlock.h>
23 #include <kernel/tee_ta_manager.h>
24 #include <kernel/thread.h>
25 #include <kernel/thread_private.h>
26 #include <kernel/user_mode_ctx_struct.h>
27 #include <kernel/virtualization.h>
28 #include <mm/core_memprot.h>
29 #include <mm/mobj.h>
30 #include <mm/tee_mm.h>
31 #include <mm/vm.h>
32 #include <riscv.h>
33 #include <trace.h>
34 #include <util.h>
35 
36 /*
37  * This function is called as a guard after each ABI call which is not
38  * supposed to return.
39  */
40 void __noreturn __panic_at_abi_return(void)
41 {
42 	panic();
43 }
44 
45 /* This function returns current masked exception bits. */
46 uint32_t __nostackcheck thread_get_exceptions(void)
47 {
48 	uint32_t xie = read_csr(CSR_XIE) & THREAD_EXCP_ALL;
49 
50 	return xie ^ THREAD_EXCP_ALL;
51 }
52 
53 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
54 {
55 	/* Foreign interrupts must not be unmasked while holding a spinlock */
56 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
57 		assert_have_no_spinlock();
58 
59 	/*
60 	 * In ARM, the bits in DAIF register are used to mask the exceptions.
61 	 * While in RISC-V, the bits in CSR XIE are used to enable(unmask)
62 	 * corresponding interrupt sources. To not modify the function of
63 	 * thread_set_exceptions(), we should "invert" the bits in "exceptions".
64 	 * The corresponding bits in "exceptions" will be inverted so they will
65 	 * be cleared when we write the final value into CSR XIE. So that we
66 	 * can mask those exceptions.
67 	 */
68 	exceptions &= THREAD_EXCP_ALL;
69 	exceptions ^= THREAD_EXCP_ALL;
70 
71 	barrier();
72 	write_csr(CSR_XIE, exceptions);
73 	barrier();
74 }
75 
76 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions)
77 {
78 	uint32_t state = thread_get_exceptions();
79 
80 	thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
81 	return state;
82 }
83 
84 void __nostackcheck thread_unmask_exceptions(uint32_t state)
85 {
86 	thread_set_exceptions(state & THREAD_EXCP_ALL);
87 }
88 
89 static void thread_lazy_save_ns_vfp(void)
90 {
91 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
92 }
93 
94 static void thread_lazy_restore_ns_vfp(void)
95 {
96 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
97 }
98 
99 static void setup_unwind_user_mode(struct thread_scall_regs *regs)
100 {
101 	regs->ra = (uintptr_t)thread_unwind_user_mode;
102 	regs->status = xstatus_for_xret(true, PRV_S);
103 	regs->sp = thread_get_saved_thread_sp();
104 }
105 
106 static void thread_unhandled_trap(struct thread_trap_regs *regs __unused,
107 				  unsigned long cause __unused)
108 {
109 	DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx",
110 	     read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL));
111 	panic();
112 }
113 
114 void  thread_scall_handler(struct thread_scall_regs *regs)
115 {
116 	struct ts_session *sess = NULL;
117 	uint32_t state = 0;
118 
119 	/* Enable native interrupts */
120 	state = thread_get_exceptions();
121 	thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR);
122 
123 	thread_user_save_vfp();
124 
125 	sess = ts_get_current_session();
126 
127 	/* Restore foreign interrupts which are disabled on exception entry */
128 	thread_restore_foreign_intr();
129 
130 	assert(sess && sess->handle_scall);
131 
132 	if (!sess->handle_scall(regs)) {
133 		setup_unwind_user_mode(regs);
134 		thread_exit_user_mode(regs->a0, regs->a1, regs->a2,
135 				      regs->a3, regs->sp, regs->ra,
136 				      regs->status);
137 	}
138 }
139 
140 static void copy_scall_to_trap(struct thread_scall_regs *scall_regs,
141 			       struct thread_trap_regs *trap_regs)
142 {
143 	trap_regs->a0 = scall_regs->a0;
144 	trap_regs->a1 = scall_regs->a1;
145 	trap_regs->a2 = scall_regs->a2;
146 	trap_regs->a3 = scall_regs->a3;
147 	trap_regs->a4 = scall_regs->a4;
148 	trap_regs->a5 = scall_regs->a5;
149 	trap_regs->a6 = scall_regs->a6;
150 	trap_regs->a7 = scall_regs->a7;
151 	trap_regs->t0 = scall_regs->t0;
152 	trap_regs->t1 = scall_regs->t1;
153 }
154 
155 static void copy_trap_to_scall(struct thread_trap_regs *trap_regs,
156 			       struct thread_scall_regs *scall_regs)
157 {
158 	*scall_regs = (struct thread_scall_regs) {
159 		.status = trap_regs->status,
160 		.ra = trap_regs->ra,
161 		.a0 = trap_regs->a0,
162 		.a1 = trap_regs->a1,
163 		.a2 = trap_regs->a2,
164 		.a3 = trap_regs->a3,
165 		.a4 = trap_regs->a4,
166 		.a5 = trap_regs->a5,
167 		.a6 = trap_regs->a6,
168 		.a7 = trap_regs->a7,
169 		.t0 = trap_regs->t0,
170 		.t1 = trap_regs->t1,
171 	};
172 }
173 
174 static void thread_user_ecall_handler(struct thread_trap_regs *trap_regs)
175 {
176 	struct thread_scall_regs scall_regs;
177 	struct thread_core_local *l = thread_get_core_local();
178 	int ct = l->curr_thread;
179 
180 	copy_trap_to_scall(trap_regs, &scall_regs);
181 	thread_scall_handler(&scall_regs);
182 	copy_scall_to_trap(&scall_regs, trap_regs);
183 	/*
184 	 * Save kernel sp we'll had at the beginning of this function.
185 	 * This is when this TA has called another TA because
186 	 * __thread_enter_user_mode() also saves the stack pointer in this
187 	 * field.
188 	 */
189 	threads[ct].kern_sp = (unsigned long)(trap_regs + 1);
190 	/*
191 	 * We are returning to U-Mode, on return, the program counter
192 	 * is set to xsepc (pc=xepc), we add 4 (size of an instruction)
193 	 * to continue to next instruction.
194 	 */
195 	trap_regs->epc += 4;
196 }
197 
198 static void copy_trap_to_abort(struct thread_trap_regs *trap_regs,
199 			       struct thread_abort_regs *abort_regs)
200 {
201 	*abort_regs = (struct thread_abort_regs) {
202 		.status = trap_regs->status,
203 		.ra = trap_regs->ra,
204 		.sp = trap_regs->sp,
205 		.gp = trap_regs->gp,
206 		.tp = trap_regs->tp,
207 		.t0 = trap_regs->t0,
208 		.t1 = trap_regs->t1,
209 		.t2 = trap_regs->t2,
210 		.s0 = trap_regs->s0,
211 		.s1 = trap_regs->s1,
212 		.a0 = trap_regs->a0,
213 		.a1 = trap_regs->a1,
214 		.a2 = trap_regs->a2,
215 		.a3 = trap_regs->a3,
216 		.a4 = trap_regs->a4,
217 		.a5 = trap_regs->a5,
218 		.a6 = trap_regs->a6,
219 		.a7 = trap_regs->a7,
220 		.s2 = trap_regs->s2,
221 		.s3 = trap_regs->s3,
222 		.s4 = trap_regs->s4,
223 		.s5 = trap_regs->s5,
224 		.s6 = trap_regs->s6,
225 		.s7 = trap_regs->s7,
226 		.s8 = trap_regs->s8,
227 		.s9 = trap_regs->s9,
228 		.s10 = trap_regs->s10,
229 		.s11 = trap_regs->s11,
230 		.t3 = trap_regs->t3,
231 		.t4 = trap_regs->t4,
232 		.t5 = trap_regs->t5,
233 		.t6 = trap_regs->t6,
234 	};
235 }
236 
237 static void thread_abort_handler(struct thread_trap_regs *trap_regs,
238 				 unsigned long cause)
239 {
240 	struct thread_abort_regs abort_regs = { };
241 
242 	assert(cause == read_csr(CSR_XCAUSE));
243 	copy_trap_to_abort(trap_regs, &abort_regs);
244 	abort_regs.cause = read_csr(CSR_XCAUSE);
245 	abort_regs.epc = read_csr(CSR_XEPC);
246 	abort_regs.tval = read_csr(CSR_XTVAL);
247 	abort_regs.satp = read_csr(CSR_SATP);
248 	abort_handler(cause, &abort_regs);
249 }
250 
251 static void thread_exception_handler(unsigned long cause,
252 				     struct thread_trap_regs *regs)
253 {
254 	switch (cause) {
255 	case CAUSE_USER_ECALL:
256 		thread_user_ecall_handler(regs);
257 		break;
258 	default:
259 		thread_abort_handler(regs, cause);
260 		break;
261 	}
262 }
263 
264 static void thread_irq_handler(void)
265 {
266 	interrupt_main_handler();
267 }
268 
269 static void thread_interrupt_handler(unsigned long cause,
270 				     struct thread_trap_regs *regs)
271 {
272 	switch (cause & LONG_MAX) {
273 	case IRQ_XTIMER:
274 		clear_csr(CSR_XIE, CSR_XIE_TIE);
275 		break;
276 	case IRQ_XSOFT:
277 		thread_unhandled_trap(regs, cause);
278 		break;
279 	case IRQ_XEXT:
280 		thread_irq_handler();
281 		break;
282 	default:
283 		thread_unhandled_trap(regs, cause);
284 	}
285 }
286 
287 void thread_trap_handler(long cause, unsigned long epc __unused,
288 			 struct thread_trap_regs *regs,
289 			 bool user __maybe_unused)
290 {
291 	/*
292 	 * The Interrupt bit (XLEN-1) in the cause register is set
293 	 * if the trap was caused by an interrupt.
294 	 */
295 	if (cause < 0)
296 		thread_interrupt_handler(cause, regs);
297 	/*
298 	 * Otherwise, cause is never written by the implementation,
299 	 * though it may be explicitly written by software.
300 	 */
301 	else
302 		thread_exception_handler(cause, regs);
303 }
304 
305 unsigned long xstatus_for_xret(uint8_t pie, uint8_t pp)
306 {
307 	unsigned long xstatus = read_csr(CSR_XSTATUS);
308 
309 	assert(pp == PRV_M || pp == PRV_S || pp == PRV_U);
310 
311 #ifdef RV32
312 	xstatus = set_field_u32(xstatus, CSR_XSTATUS_IE, 0);
313 	xstatus = set_field_u32(xstatus, CSR_XSTATUS_PIE, pie);
314 	xstatus = set_field_u32(xstatus, CSR_XSTATUS_SPP, pp);
315 #else	/* RV64 */
316 	xstatus = set_field_u64(xstatus, CSR_XSTATUS_IE, 0);
317 	xstatus = set_field_u64(xstatus, CSR_XSTATUS_PIE, pie);
318 	xstatus = set_field_u64(xstatus, CSR_XSTATUS_SPP, pp);
319 #endif
320 
321 	return xstatus;
322 }
323 
324 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
325 		      uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
326 		      uint32_t a6, uint32_t a7, void *pc)
327 {
328 	memset(&thread->regs, 0, sizeof(thread->regs));
329 
330 	thread->regs.epc = (uintptr_t)pc;
331 
332 	/* Set up xstatus */
333 	thread->regs.status = xstatus_for_xret(true, PRV_S);
334 
335 	/* Enable native interrupt */
336 	thread->regs.ie = THREAD_EXCP_NATIVE_INTR;
337 
338 	/* Reinitialize stack pointer */
339 	thread->regs.sp = thread->stack_va_end;
340 
341 	/* Set up GP and TP */
342 	thread->regs.gp = read_gp();
343 	thread->regs.tp = read_tp();
344 
345 	/*
346 	 * Copy arguments into context. This will make the
347 	 * arguments appear in a0-a7 when thread is started.
348 	 */
349 	thread->regs.a0 = a0;
350 	thread->regs.a1 = a1;
351 	thread->regs.a2 = a2;
352 	thread->regs.a3 = a3;
353 	thread->regs.a4 = a4;
354 	thread->regs.a5 = a5;
355 	thread->regs.a6 = a6;
356 	thread->regs.a7 = a7;
357 }
358 
359 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2,
360 				   uint32_t a3, uint32_t a4, uint32_t a5,
361 				   uint32_t a6, uint32_t a7,
362 				   void *pc)
363 {
364 	struct thread_core_local *l = thread_get_core_local();
365 	bool found_thread = false;
366 	size_t n = 0;
367 
368 	assert(l->curr_thread == THREAD_ID_INVALID);
369 
370 	thread_lock_global();
371 
372 	for (n = 0; n < CFG_NUM_THREADS; n++) {
373 		if (threads[n].state == THREAD_STATE_FREE) {
374 			threads[n].state = THREAD_STATE_ACTIVE;
375 			found_thread = true;
376 			break;
377 		}
378 	}
379 
380 	thread_unlock_global();
381 
382 	if (!found_thread)
383 		return;
384 
385 	l->curr_thread = n;
386 
387 	threads[n].flags = 0;
388 	init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc);
389 
390 	thread_lazy_save_ns_vfp();
391 
392 	l->flags &= ~THREAD_CLF_TMP;
393 
394 	thread_resume(&threads[n].regs);
395 	/*NOTREACHED*/
396 	panic();
397 }
398 
399 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
400 			  uint32_t a4, uint32_t a5)
401 {
402 	__thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0,
403 			       thread_std_abi_entry);
404 }
405 
406 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
407 			  uint32_t a1, uint32_t a2, uint32_t a3)
408 {
409 	regs->a0 = a0;
410 	regs->a1 = a1;
411 	regs->a2 = a2;
412 	regs->a3 = a3;
413 }
414 
415 static bool is_from_user(unsigned long status)
416 {
417 	return (status & CSR_XSTATUS_SPP) == 0;
418 }
419 
420 #ifdef CFG_SYSCALL_FTRACE
421 static void __noprof ftrace_suspend(void)
422 {
423 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
424 
425 	if (s && s->fbuf)
426 		s->fbuf->syscall_trace_suspended = true;
427 }
428 
429 static void __noprof ftrace_resume(void)
430 {
431 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
432 
433 	if (s && s->fbuf)
434 		s->fbuf->syscall_trace_suspended = false;
435 }
436 #else
437 static void __maybe_unused __noprof ftrace_suspend(void)
438 {
439 }
440 
441 static void __noprof ftrace_resume(void)
442 {
443 }
444 #endif
445 
446 static bool is_user_mode(struct thread_ctx_regs *regs)
447 {
448 	return is_from_user((uint32_t)regs->status);
449 }
450 
451 vaddr_t thread_get_saved_thread_sp(void)
452 {
453 	struct thread_core_local *l = thread_get_core_local();
454 	int ct = l->curr_thread;
455 
456 	assert(ct != THREAD_ID_INVALID);
457 	return threads[ct].kern_sp;
458 }
459 
460 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
461 			    uint32_t a2, uint32_t a3)
462 {
463 	size_t n = thread_id;
464 	struct thread_core_local *l = thread_get_core_local();
465 	bool found_thread = false;
466 
467 	assert(l->curr_thread == THREAD_ID_INVALID);
468 
469 	thread_lock_global();
470 
471 	if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
472 		threads[n].state = THREAD_STATE_ACTIVE;
473 		found_thread = true;
474 	}
475 
476 	thread_unlock_global();
477 
478 	if (!found_thread)
479 		return;
480 
481 	l->curr_thread = n;
482 
483 	if (threads[n].have_user_map) {
484 		core_mmu_set_user_map(&threads[n].user_map);
485 		if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
486 			tee_ta_ftrace_update_times_resume();
487 	}
488 
489 	if (is_user_mode(&threads[n].regs))
490 		tee_ta_update_session_utime_resume();
491 
492 	/*
493 	 * We may resume thread at another hart, so we need to re-assign value
494 	 * of tp to be current hart's thread_core_local.
495 	 */
496 	if (!is_user_mode(&threads[n].regs))
497 		threads[n].regs.tp = read_tp();
498 
499 	/*
500 	 * Return from RPC to request service of a foreign interrupt must not
501 	 * get parameters from non-secure world.
502 	 */
503 	if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
504 		copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
505 		threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
506 	}
507 
508 	thread_lazy_save_ns_vfp();
509 
510 	if (threads[n].have_user_map)
511 		ftrace_resume();
512 
513 	l->flags &= ~THREAD_CLF_TMP;
514 	thread_resume(&threads[n].regs);
515 	/*NOTREACHED*/
516 	panic();
517 }
518 
519 void thread_state_free(void)
520 {
521 	struct thread_core_local *l = thread_get_core_local();
522 	int ct = l->curr_thread;
523 
524 	assert(ct != THREAD_ID_INVALID);
525 
526 	thread_lazy_restore_ns_vfp();
527 
528 	thread_lock_global();
529 
530 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
531 	threads[ct].state = THREAD_STATE_FREE;
532 	threads[ct].flags = 0;
533 	l->curr_thread = THREAD_ID_INVALID;
534 
535 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
536 		virt_unset_guest();
537 	thread_unlock_global();
538 }
539 
540 int thread_state_suspend(uint32_t flags, unsigned long status, vaddr_t pc)
541 {
542 	struct thread_core_local *l = thread_get_core_local();
543 	int ct = l->curr_thread;
544 
545 	assert(ct != THREAD_ID_INVALID);
546 
547 	if (core_mmu_user_mapping_is_active())
548 		ftrace_suspend();
549 
550 	thread_check_canaries();
551 
552 	if (is_from_user(status)) {
553 		thread_user_save_vfp();
554 		tee_ta_update_session_utime_suspend();
555 		tee_ta_gprof_sample_pc(pc);
556 	}
557 	thread_lazy_restore_ns_vfp();
558 
559 	thread_lock_global();
560 
561 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
562 	threads[ct].flags |= flags;
563 	threads[ct].regs.status = status;
564 	threads[ct].regs.epc = pc;
565 	threads[ct].state = THREAD_STATE_SUSPENDED;
566 
567 	threads[ct].have_user_map = core_mmu_user_mapping_is_active();
568 	if (threads[ct].have_user_map) {
569 		if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
570 			tee_ta_ftrace_update_times_suspend();
571 		core_mmu_get_user_map(&threads[ct].user_map);
572 		core_mmu_set_user_map(NULL);
573 	}
574 
575 	l->curr_thread = THREAD_ID_INVALID;
576 
577 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
578 		virt_unset_guest();
579 
580 	thread_unlock_global();
581 
582 	return ct;
583 }
584 
585 bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
586 {
587 	if (thread_id >= CFG_NUM_THREADS)
588 		return false;
589 	threads[thread_id].stack_va_end = sp;
590 	return true;
591 }
592 
593 static void init_user_kcode(void)
594 {
595 }
596 
597 void thread_init_primary(void)
598 {
599 	/* Initialize canaries around the stacks */
600 	thread_init_canaries();
601 
602 	init_user_kcode();
603 }
604 
605 static vaddr_t get_trap_vect(void)
606 {
607 	return (vaddr_t)thread_trap_vect;
608 }
609 
610 void thread_init_tvec(void)
611 {
612 	unsigned long tvec = (unsigned long)get_trap_vect();
613 
614 	static_assert(sizeof(struct thread_trap_regs) % 16 == 0);
615 	write_csr(CSR_XTVEC, tvec);
616 	assert(read_csr(CSR_XTVEC) == tvec);
617 }
618 
619 void thread_init_per_cpu(void)
620 {
621 	thread_init_tvec();
622 	/*
623 	 * We may receive traps from now, therefore, zeroize xSCRATCH such
624 	 * that thread_trap_vect() can distinguish between user traps
625 	 * and kernel traps.
626 	 */
627 	write_csr(CSR_XSCRATCH, 0);
628 #ifndef CFG_PAN
629 	/*
630 	 * Allow access to user pages. When CFG_PAN is enabled, the SUM bit will
631 	 * be set and clear at runtime when necessary.
632 	 */
633 	set_csr(CSR_XSTATUS, CSR_XSTATUS_SUM);
634 #endif
635 }
636 
637 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
638 			 unsigned long a1, unsigned long a2, unsigned long a3,
639 			 unsigned long user_sp, unsigned long entry_func,
640 			 unsigned long status, unsigned long ie,
641 			 struct thread_pauth_keys *keys __unused)
642 {
643 	*regs = (struct thread_ctx_regs){
644 		.a0 = a0,
645 		.a1 = a1,
646 		.a2 = a2,
647 		.a3 = a3,
648 		.s0 = 0,
649 		.sp = user_sp,
650 		.ra = entry_func,
651 		.status = status,
652 		.ie = ie,
653 	};
654 }
655 
656 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
657 				unsigned long a2, unsigned long a3,
658 				unsigned long user_sp,
659 				unsigned long entry_func,
660 				bool is_32bit __unused,
661 				uint32_t *exit_status0,
662 				uint32_t *exit_status1)
663 {
664 	unsigned long status = 0;
665 	unsigned long ie = 0;
666 	uint32_t exceptions = 0;
667 	uint32_t rc = 0;
668 	struct thread_ctx_regs *regs = NULL;
669 
670 	tee_ta_update_session_utime_resume();
671 
672 	/* Read current interrupt masks */
673 	ie = read_csr(CSR_XIE);
674 
675 	/*
676 	 * Mask all exceptions, the CSR_XSTATUS.IE will be set from
677 	 * setup_unwind_user_mode() after exiting.
678 	 */
679 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
680 	regs = thread_get_ctx_regs();
681 	status = xstatus_for_xret(true, PRV_U);
682 	set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, status, ie,
683 		     NULL);
684 	rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
685 	thread_unmask_exceptions(exceptions);
686 
687 	return rc;
688 }
689 
690 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])
691 {
692 	thread_rpc_xstatus(rv, xstatus_for_xret(false, PRV_S));
693 }
694