xref: /optee_os/core/arch/riscv/kernel/thread_arch.c (revision 731185b11620a6de1f824278ab3c166c7853ef66)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 NXP
4  * Copyright (c) 2016-2022, Linaro Limited
5  * Copyright (c) 2014, STMicroelectronics International N.V.
6  * Copyright (c) 2020-2021, Arm Limited
7  */
8 
9 #include <platform_config.h>
10 
11 #include <assert.h>
12 #include <config.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/linker.h>
18 #include <kernel/lockdep.h>
19 #include <kernel/misc.h>
20 #include <kernel/panic.h>
21 #include <kernel/spinlock.h>
22 #include <kernel/tee_ta_manager.h>
23 #include <kernel/thread.h>
24 #include <kernel/thread_private.h>
25 #include <kernel/user_mode_ctx_struct.h>
26 #include <kernel/virtualization.h>
27 #include <mm/core_memprot.h>
28 #include <mm/mobj.h>
29 #include <mm/tee_mm.h>
30 #include <mm/vm.h>
31 #include <riscv.h>
32 #include <trace.h>
33 #include <util.h>
34 
35 /*
36  * This function is called as a guard after each ABI call which is not
37  * supposed to return.
38  */
39 void __noreturn __panic_at_abi_return(void)
40 {
41 	panic();
42 }
43 
44 /* This function returns current masked exception bits. */
45 uint32_t __nostackcheck thread_get_exceptions(void)
46 {
47 	uint32_t xie = read_csr(CSR_XIE) & THREAD_EXCP_ALL;
48 
49 	return xie ^ THREAD_EXCP_ALL;
50 }
51 
52 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
53 {
54 	/* Foreign interrupts must not be unmasked while holding a spinlock */
55 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
56 		assert_have_no_spinlock();
57 
58 	/*
59 	 * In ARM, the bits in DAIF register are used to mask the exceptions.
60 	 * While in RISC-V, the bits in CSR XIE are used to enable(unmask)
61 	 * corresponding interrupt sources. To not modify the function of
62 	 * thread_set_exceptions(), we should "invert" the bits in "exceptions".
63 	 * The corresponding bits in "exceptions" will be inverted so they will
64 	 * be cleared when we write the final value into CSR XIE. So that we
65 	 * can mask those exceptions.
66 	 */
67 	exceptions &= THREAD_EXCP_ALL;
68 	exceptions ^= THREAD_EXCP_ALL;
69 
70 	barrier();
71 	write_csr(CSR_XIE, exceptions);
72 	barrier();
73 }
74 
75 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions)
76 {
77 	uint32_t state = thread_get_exceptions();
78 
79 	thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
80 	return state;
81 }
82 
83 void __nostackcheck thread_unmask_exceptions(uint32_t state)
84 {
85 	thread_set_exceptions(state & THREAD_EXCP_ALL);
86 }
87 
88 static void thread_lazy_save_ns_vfp(void)
89 {
90 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
91 }
92 
93 static void thread_lazy_restore_ns_vfp(void)
94 {
95 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
96 }
97 
98 static void setup_unwind_user_mode(struct thread_scall_regs *regs)
99 {
100 	regs->ra = (uintptr_t)thread_unwind_user_mode;
101 	regs->status = read_csr(CSR_XSTATUS);
102 	regs->sp = thread_get_saved_thread_sp();
103 }
104 
105 static void thread_unhandled_trap(struct thread_trap_regs *regs __unused,
106 				  unsigned long cause __unused)
107 {
108 	DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx",
109 	     read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL));
110 	panic();
111 }
112 
113 void  thread_scall_handler(struct thread_scall_regs *regs)
114 {
115 	struct ts_session *sess = NULL;
116 	uint32_t state = 0;
117 
118 	/* Enable native interrupts */
119 	state = thread_get_exceptions();
120 	thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR);
121 
122 	thread_user_save_vfp();
123 
124 	sess = ts_get_current_session();
125 
126 	/* Restore foreign interrupts which are disabled on exception entry */
127 	thread_restore_foreign_intr();
128 
129 	assert(sess && sess->handle_scall);
130 
131 	if (!sess->handle_scall(regs)) {
132 		setup_unwind_user_mode(regs);
133 		thread_exit_user_mode(regs->a0, regs->a1, regs->a2,
134 				      regs->a3, regs->sp, regs->ra,
135 				      regs->status);
136 	}
137 }
138 
139 static void copy_scall_to_trap(struct thread_scall_regs *scall_regs,
140 			       struct thread_trap_regs *trap_regs)
141 {
142 	trap_regs->a0 = scall_regs->a0;
143 	trap_regs->a1 = scall_regs->a1;
144 	trap_regs->a2 = scall_regs->a2;
145 	trap_regs->a3 = scall_regs->a3;
146 	trap_regs->a4 = scall_regs->a4;
147 	trap_regs->a5 = scall_regs->a5;
148 	trap_regs->a6 = scall_regs->a6;
149 	trap_regs->a7 = scall_regs->a7;
150 	trap_regs->t0 = scall_regs->t0;
151 	trap_regs->t1 = scall_regs->t1;
152 }
153 
154 static void copy_trap_to_scall(struct thread_trap_regs *trap_regs,
155 			       struct thread_scall_regs *scall_regs)
156 {
157 	*scall_regs = (struct thread_scall_regs) {
158 		.status = trap_regs->status,
159 		.ra = trap_regs->ra,
160 		.a0 = trap_regs->a0,
161 		.a1 = trap_regs->a1,
162 		.a2 = trap_regs->a2,
163 		.a3 = trap_regs->a3,
164 		.a4 = trap_regs->a4,
165 		.a5 = trap_regs->a5,
166 		.a6 = trap_regs->a6,
167 		.a7 = trap_regs->a7,
168 		.t0 = trap_regs->t0,
169 		.t1 = trap_regs->t1,
170 	};
171 }
172 
173 static void thread_user_ecall_handler(struct thread_trap_regs *trap_regs)
174 {
175 	struct thread_scall_regs scall_regs;
176 	struct thread_core_local *l = thread_get_core_local();
177 	int ct = l->curr_thread;
178 
179 	copy_trap_to_scall(trap_regs, &scall_regs);
180 	thread_scall_handler(&scall_regs);
181 	copy_scall_to_trap(&scall_regs, trap_regs);
182 	/*
183 	 * Save kernel sp we'll had at the beginning of this function.
184 	 * This is when this TA has called another TA because
185 	 * __thread_enter_user_mode() also saves the stack pointer in this
186 	 * field.
187 	 */
188 	threads[ct].kern_sp = (unsigned long)(trap_regs + 1);
189 	/*
190 	 * We are returning to U-Mode, on return, the program counter
191 	 * is set to xsepc (pc=xepc), we add 4 (size of an instruction)
192 	 * to continue to next instruction.
193 	 */
194 	trap_regs->epc += 4;
195 }
196 
197 static void copy_trap_to_abort(struct thread_trap_regs *trap_regs,
198 			       struct thread_abort_regs *abort_regs)
199 {
200 	*abort_regs = (struct thread_abort_regs) {
201 		.status = trap_regs->status,
202 		.ra = trap_regs->ra,
203 		.sp = trap_regs->sp,
204 		.gp = trap_regs->gp,
205 		.tp = trap_regs->tp,
206 		.t0 = trap_regs->t0,
207 		.t1 = trap_regs->t1,
208 		.t2 = trap_regs->t2,
209 		.s0 = trap_regs->s0,
210 		.s1 = trap_regs->s1,
211 		.a0 = trap_regs->a0,
212 		.a1 = trap_regs->a1,
213 		.a2 = trap_regs->a2,
214 		.a3 = trap_regs->a3,
215 		.a4 = trap_regs->a4,
216 		.a5 = trap_regs->a5,
217 		.a6 = trap_regs->a6,
218 		.a7 = trap_regs->a7,
219 		.s2 = trap_regs->s2,
220 		.s3 = trap_regs->s3,
221 		.s4 = trap_regs->s4,
222 		.s5 = trap_regs->s5,
223 		.s6 = trap_regs->s6,
224 		.s7 = trap_regs->s7,
225 		.s8 = trap_regs->s8,
226 		.s9 = trap_regs->s9,
227 		.s10 = trap_regs->s10,
228 		.s11 = trap_regs->s11,
229 		.t3 = trap_regs->t3,
230 		.t4 = trap_regs->t4,
231 		.t5 = trap_regs->t5,
232 		.t6 = trap_regs->t6,
233 	};
234 }
235 
236 static void thread_abort_handler(struct thread_trap_regs *trap_regs,
237 				 unsigned long cause)
238 {
239 	struct thread_abort_regs abort_regs = { };
240 
241 	assert(cause == read_csr(CSR_XCAUSE));
242 	copy_trap_to_abort(trap_regs, &abort_regs);
243 	abort_regs.cause = read_csr(CSR_XCAUSE);
244 	abort_regs.epc = read_csr(CSR_XEPC);
245 	abort_regs.tval = read_csr(CSR_XTVAL);
246 	abort_regs.satp = read_csr(CSR_SATP);
247 	abort_handler(cause, &abort_regs);
248 }
249 
250 static void thread_exception_handler(unsigned long cause,
251 				     struct thread_trap_regs *regs)
252 {
253 	switch (cause) {
254 	case CAUSE_USER_ECALL:
255 		thread_user_ecall_handler(regs);
256 		break;
257 	default:
258 		thread_abort_handler(regs, cause);
259 		break;
260 	}
261 }
262 
263 static void thread_irq_handler(void)
264 {
265 	interrupt_main_handler();
266 }
267 
268 static void thread_interrupt_handler(unsigned long cause,
269 				     struct thread_trap_regs *regs)
270 {
271 	switch (cause & LONG_MAX) {
272 	case IRQ_XTIMER:
273 		clear_csr(CSR_XIE, CSR_XIE_TIE);
274 		break;
275 	case IRQ_XSOFT:
276 		thread_unhandled_trap(regs, cause);
277 		break;
278 	case IRQ_XEXT:
279 		thread_irq_handler();
280 		break;
281 	default:
282 		thread_unhandled_trap(regs, cause);
283 	}
284 }
285 
286 void thread_trap_handler(long cause, unsigned long epc __unused,
287 			 struct thread_trap_regs *regs,
288 			 bool user __maybe_unused)
289 {
290 	/*
291 	 * The Interrupt bit (XLEN-1) in the cause register is set
292 	 * if the trap was caused by an interrupt.
293 	 */
294 	if (cause < 0)
295 		thread_interrupt_handler(cause, regs);
296 	/*
297 	 * Otherwise, cause is never written by the implementation,
298 	 * though it may be explicitly written by software.
299 	 */
300 	else
301 		thread_exception_handler(cause, regs);
302 }
303 
304 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
305 		      uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
306 		      uint32_t a6, uint32_t a7, void *pc)
307 {
308 	thread->regs.ra = (uintptr_t)pc;
309 
310 	/* Set up xstatus */
311 	thread->regs.status = read_csr(CSR_XSTATUS);
312 
313 	/* Reinitialize stack pointer */
314 	thread->regs.sp = thread->stack_va_end;
315 
316 	/*
317 	 * Copy arguments into context. This will make the
318 	 * arguments appear in a0-a7 when thread is started.
319 	 */
320 	thread->regs.a0 = a0;
321 	thread->regs.a1 = a1;
322 	thread->regs.a2 = a2;
323 	thread->regs.a3 = a3;
324 	thread->regs.a4 = a4;
325 	thread->regs.a5 = a5;
326 	thread->regs.a6 = a6;
327 	thread->regs.a7 = a7;
328 }
329 
330 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2,
331 				   uint32_t a3, uint32_t a4, uint32_t a5,
332 				   uint32_t a6, uint32_t a7,
333 				   void *pc)
334 {
335 	struct thread_core_local *l = thread_get_core_local();
336 	bool found_thread = false;
337 	size_t n = 0;
338 
339 	assert(l->curr_thread == THREAD_ID_INVALID);
340 
341 	thread_lock_global();
342 
343 	for (n = 0; n < CFG_NUM_THREADS; n++) {
344 		if (threads[n].state == THREAD_STATE_FREE) {
345 			threads[n].state = THREAD_STATE_ACTIVE;
346 			found_thread = true;
347 			break;
348 		}
349 	}
350 
351 	thread_unlock_global();
352 
353 	if (!found_thread)
354 		return;
355 
356 	l->curr_thread = n;
357 
358 	threads[n].flags = 0;
359 	init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc);
360 
361 	thread_lazy_save_ns_vfp();
362 
363 	l->flags &= ~THREAD_CLF_TMP;
364 
365 	thread_resume(&threads[n].regs);
366 	/*NOTREACHED*/
367 	panic();
368 }
369 
370 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
371 			  uint32_t a4, uint32_t a5)
372 {
373 	__thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0,
374 			       thread_std_abi_entry);
375 }
376 
377 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
378 			  uint32_t a1, uint32_t a2, uint32_t a3)
379 {
380 	regs->a0 = a0;
381 	regs->a1 = a1;
382 	regs->a2 = a2;
383 	regs->a3 = a3;
384 }
385 
386 static bool is_from_user(unsigned long status)
387 {
388 	return (status & CSR_XSTATUS_SPP) == 0;
389 }
390 
391 #ifdef CFG_SYSCALL_FTRACE
392 static void __noprof ftrace_suspend(void)
393 {
394 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
395 
396 	if (s && s->fbuf)
397 		s->fbuf->syscall_trace_suspended = true;
398 }
399 
400 static void __noprof ftrace_resume(void)
401 {
402 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
403 
404 	if (s && s->fbuf)
405 		s->fbuf->syscall_trace_suspended = false;
406 }
407 #else
408 static void __maybe_unused __noprof ftrace_suspend(void)
409 {
410 }
411 
412 static void __noprof ftrace_resume(void)
413 {
414 }
415 #endif
416 
417 static bool is_user_mode(struct thread_ctx_regs *regs)
418 {
419 	return is_from_user((uint32_t)regs->status);
420 }
421 
422 vaddr_t thread_get_saved_thread_sp(void)
423 {
424 	struct thread_core_local *l = thread_get_core_local();
425 	int ct = l->curr_thread;
426 
427 	assert(ct != THREAD_ID_INVALID);
428 	return threads[ct].kern_sp;
429 }
430 
431 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
432 			    uint32_t a2, uint32_t a3)
433 {
434 	size_t n = thread_id;
435 	struct thread_core_local *l = thread_get_core_local();
436 	bool found_thread = false;
437 
438 	assert(l->curr_thread == THREAD_ID_INVALID);
439 
440 	thread_lock_global();
441 
442 	if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
443 		threads[n].state = THREAD_STATE_ACTIVE;
444 		found_thread = true;
445 	}
446 
447 	thread_unlock_global();
448 
449 	if (!found_thread)
450 		return;
451 
452 	l->curr_thread = n;
453 
454 	if (threads[n].have_user_map) {
455 		core_mmu_set_user_map(&threads[n].user_map);
456 		if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
457 			tee_ta_ftrace_update_times_resume();
458 	}
459 
460 	if (is_user_mode(&threads[n].regs))
461 		tee_ta_update_session_utime_resume();
462 
463 	/*
464 	 * Return from RPC to request service of a foreign interrupt must not
465 	 * get parameters from non-secure world.
466 	 */
467 	if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
468 		copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
469 		threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
470 	}
471 
472 	thread_lazy_save_ns_vfp();
473 
474 	if (threads[n].have_user_map)
475 		ftrace_resume();
476 
477 	l->flags &= ~THREAD_CLF_TMP;
478 	thread_resume(&threads[n].regs);
479 	/*NOTREACHED*/
480 	panic();
481 }
482 
483 void thread_state_free(void)
484 {
485 	struct thread_core_local *l = thread_get_core_local();
486 	int ct = l->curr_thread;
487 
488 	assert(ct != THREAD_ID_INVALID);
489 
490 	thread_lazy_restore_ns_vfp();
491 
492 	thread_lock_global();
493 
494 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
495 	threads[ct].state = THREAD_STATE_FREE;
496 	threads[ct].flags = 0;
497 	l->curr_thread = THREAD_ID_INVALID;
498 
499 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
500 		virt_unset_guest();
501 	thread_unlock_global();
502 }
503 
504 int thread_state_suspend(uint32_t flags, unsigned long status, vaddr_t pc)
505 {
506 	struct thread_core_local *l = thread_get_core_local();
507 	int ct = l->curr_thread;
508 
509 	assert(ct != THREAD_ID_INVALID);
510 
511 	if (core_mmu_user_mapping_is_active())
512 		ftrace_suspend();
513 
514 	thread_check_canaries();
515 
516 	if (is_from_user(status)) {
517 		thread_user_save_vfp();
518 		tee_ta_update_session_utime_suspend();
519 		tee_ta_gprof_sample_pc(pc);
520 	}
521 	thread_lazy_restore_ns_vfp();
522 
523 	thread_lock_global();
524 
525 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
526 	threads[ct].flags |= flags;
527 	threads[ct].regs.status = status;
528 	threads[ct].regs.ra = pc;
529 	threads[ct].state = THREAD_STATE_SUSPENDED;
530 
531 	threads[ct].have_user_map = core_mmu_user_mapping_is_active();
532 	if (threads[ct].have_user_map) {
533 		if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
534 			tee_ta_ftrace_update_times_suspend();
535 		core_mmu_get_user_map(&threads[ct].user_map);
536 		core_mmu_set_user_map(NULL);
537 	}
538 
539 	l->curr_thread = THREAD_ID_INVALID;
540 
541 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
542 		virt_unset_guest();
543 
544 	thread_unlock_global();
545 
546 	return ct;
547 }
548 
549 bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
550 {
551 	if (thread_id >= CFG_NUM_THREADS)
552 		return false;
553 	threads[thread_id].stack_va_end = sp;
554 	return true;
555 }
556 
557 static void init_user_kcode(void)
558 {
559 }
560 
561 void thread_init_primary(void)
562 {
563 	/* Initialize canaries around the stacks */
564 	thread_init_canaries();
565 
566 	init_user_kcode();
567 }
568 
569 static vaddr_t get_trap_vect(void)
570 {
571 	return (vaddr_t)thread_trap_vect;
572 }
573 
574 void thread_init_tvec(void)
575 {
576 	unsigned long tvec = (unsigned long)get_trap_vect();
577 
578 	static_assert(sizeof(struct thread_trap_regs) % 16 == 0);
579 	write_csr(CSR_XTVEC, tvec);
580 	assert(read_csr(CSR_XTVEC) == tvec);
581 }
582 
583 void thread_init_per_cpu(void)
584 {
585 	thread_init_tvec();
586 	/*
587 	 * We may receive traps from now, therefore, zeroize xSCRATCH such
588 	 * that thread_trap_vect() can distinguish between user traps
589 	 * and kernel traps.
590 	 */
591 	write_csr(CSR_XSCRATCH, 0);
592 #ifndef CFG_PAN
593 	/*
594 	 * Allow access to user pages. When CFG_PAN is enabled, the SUM bit will
595 	 * be set and clear at runtime when necessary.
596 	 */
597 	set_csr(CSR_XSTATUS, CSR_XSTATUS_SUM);
598 #endif
599 }
600 
601 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
602 			 unsigned long a1, unsigned long a2, unsigned long a3,
603 			 unsigned long user_sp, unsigned long entry_func,
604 			 unsigned long status,
605 			 struct thread_pauth_keys *keys __unused)
606 {
607 	*regs = (struct thread_ctx_regs){
608 		.a0 = a0,
609 		.a1 = a1,
610 		.a2 = a2,
611 		.a3 = a3,
612 		.sp = user_sp,
613 		.ra = entry_func,
614 		.status = status
615 	};
616 }
617 
618 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
619 				unsigned long a2, unsigned long a3,
620 				unsigned long user_sp,
621 				unsigned long entry_func,
622 				bool is_32bit __unused,
623 				uint32_t *exit_status0,
624 				uint32_t *exit_status1)
625 {
626 	unsigned long status = 0;
627 	uint32_t exceptions = 0;
628 	uint32_t rc = 0;
629 	struct thread_ctx_regs *regs = NULL;
630 
631 	tee_ta_update_session_utime_resume();
632 
633 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
634 	regs = thread_get_ctx_regs();
635 	status = read_csr(CSR_XSTATUS);
636 	status |= CSR_XSTATUS_PIE;	/* Previous interrupt is enabled */
637 	status = set_field_u64(status, CSR_XSTATUS_SPP, PRV_U);
638 	set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, status, NULL);
639 	rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
640 	thread_unmask_exceptions(exceptions);
641 
642 	return rc;
643 }
644