xref: /optee_os/core/arch/riscv/kernel/thread_arch.c (revision d7b20c1ef25f6ac67c8889c4db9348fae29aaa14)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 NXP
4  * Copyright (c) 2016-2022, Linaro Limited
5  * Copyright (c) 2014, STMicroelectronics International N.V.
6  * Copyright (c) 2020-2021, Arm Limited
7  */
8 
9 #include <platform_config.h>
10 
11 #include <assert.h>
12 #include <config.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/linker.h>
18 #include <kernel/lockdep.h>
19 #include <kernel/misc.h>
20 #include <kernel/panic.h>
21 #include <kernel/spinlock.h>
22 #include <kernel/tee_ta_manager.h>
23 #include <kernel/thread.h>
24 #include <kernel/thread_private.h>
25 #include <kernel/user_mode_ctx_struct.h>
26 #include <kernel/virtualization.h>
27 #include <mm/core_memprot.h>
28 #include <mm/mobj.h>
29 #include <mm/tee_mm.h>
30 #include <mm/vm.h>
31 #include <riscv.h>
32 #include <trace.h>
33 #include <util.h>
34 
35 /*
36  * This function is called as a guard after each ABI call which is not
37  * supposed to return.
38  */
39 void __noreturn __panic_at_abi_return(void)
40 {
41 	panic();
42 }
43 
44 uint32_t __nostackcheck thread_get_exceptions(void)
45 {
46 	return read_csr(CSR_XIE) & THREAD_EXCP_ALL;
47 }
48 
49 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
50 {
51 	/* Foreign interrupts must not be unmasked while holding a spinlock */
52 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
53 		assert_have_no_spinlock();
54 
55 	barrier();
56 	write_csr(CSR_XIE, exceptions);
57 	barrier();
58 }
59 
60 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions)
61 {
62 	uint32_t state = thread_get_exceptions();
63 
64 	thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
65 	return state;
66 }
67 
68 void __nostackcheck thread_unmask_exceptions(uint32_t state)
69 {
70 	thread_set_exceptions(state & THREAD_EXCP_ALL);
71 }
72 
73 static void thread_lazy_save_ns_vfp(void)
74 {
75 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
76 }
77 
78 static void thread_lazy_restore_ns_vfp(void)
79 {
80 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
81 }
82 
83 static void setup_unwind_user_mode(struct thread_scall_regs *regs)
84 {
85 	regs->ra = (uintptr_t)thread_unwind_user_mode;
86 	regs->status = read_csr(CSR_XSTATUS);
87 	regs->sp = thread_get_saved_thread_sp();
88 }
89 
90 static void thread_unhandled_trap(struct thread_trap_regs *regs __unused,
91 				  unsigned long cause __unused)
92 {
93 	DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx",
94 	     read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL));
95 	panic();
96 }
97 
98 void  thread_scall_handler(struct thread_scall_regs *regs)
99 {
100 	struct ts_session *sess = NULL;
101 	uint32_t state = 0;
102 
103 	/* Enable native interrupts */
104 	state = thread_get_exceptions();
105 	thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR);
106 
107 	thread_user_save_vfp();
108 
109 	sess = ts_get_current_session();
110 
111 	/* Restore foreign interrupts which are disabled on exception entry */
112 	thread_restore_foreign_intr();
113 
114 	assert(sess && sess->handle_scall);
115 
116 	if (!sess->handle_scall(regs)) {
117 		setup_unwind_user_mode(regs);
118 		thread_exit_user_mode(regs->a0, regs->a1, regs->a2,
119 				      regs->a3, regs->sp, regs->ra,
120 				      regs->status);
121 	}
122 }
123 
124 static void copy_scall_to_trap(struct thread_scall_regs *scall_regs,
125 			       struct thread_trap_regs *trap_regs)
126 {
127 	trap_regs->a0 = scall_regs->a0;
128 	trap_regs->a1 = scall_regs->a1;
129 	trap_regs->a2 = scall_regs->a2;
130 	trap_regs->a3 = scall_regs->a3;
131 	trap_regs->a4 = scall_regs->a4;
132 	trap_regs->a5 = scall_regs->a5;
133 	trap_regs->a6 = scall_regs->a6;
134 	trap_regs->a7 = scall_regs->a7;
135 	trap_regs->t0 = scall_regs->t0;
136 	trap_regs->t1 = scall_regs->t1;
137 }
138 
139 static void copy_trap_to_scall(struct thread_trap_regs *trap_regs,
140 			       struct thread_scall_regs *scall_regs)
141 {
142 	*scall_regs = (struct thread_scall_regs) {
143 		.status = trap_regs->status,
144 		.ra = trap_regs->ra,
145 		.a0 = trap_regs->a0,
146 		.a1 = trap_regs->a1,
147 		.a2 = trap_regs->a2,
148 		.a3 = trap_regs->a3,
149 		.a4 = trap_regs->a4,
150 		.a5 = trap_regs->a5,
151 		.a6 = trap_regs->a6,
152 		.a7 = trap_regs->a7,
153 		.t0 = trap_regs->t0,
154 		.t1 = trap_regs->t1,
155 	};
156 }
157 
158 static void thread_user_ecall_handler(struct thread_trap_regs *trap_regs)
159 {
160 	struct thread_scall_regs scall_regs;
161 	struct thread_core_local *l = thread_get_core_local();
162 	int ct = l->curr_thread;
163 
164 	copy_trap_to_scall(trap_regs, &scall_regs);
165 	thread_scall_handler(&scall_regs);
166 	copy_scall_to_trap(&scall_regs, trap_regs);
167 	/*
168 	 * Save kernel sp we'll had at the beginning of this function.
169 	 * This is when this TA has called another TA because
170 	 * __thread_enter_user_mode() also saves the stack pointer in this
171 	 * field.
172 	 */
173 	threads[ct].kern_sp = (unsigned long)(trap_regs + 1);
174 	/*
175 	 * We are returning to U-Mode, on return, the program counter
176 	 * is set to xsepc (pc=xepc), we add 4 (size of an instruction)
177 	 * to continue to next instruction.
178 	 */
179 	trap_regs->epc += 4;
180 }
181 
182 static void copy_trap_to_abort(struct thread_trap_regs *trap_regs,
183 			       struct thread_abort_regs *abort_regs)
184 {
185 	*abort_regs = (struct thread_abort_regs) {
186 		.status = trap_regs->status,
187 		.ra = trap_regs->ra,
188 		.sp = trap_regs->sp,
189 		.gp = trap_regs->gp,
190 		.tp = trap_regs->tp,
191 		.t0 = trap_regs->t0,
192 		.t1 = trap_regs->t1,
193 		.t2 = trap_regs->t2,
194 		.s0 = trap_regs->s0,
195 		.s1 = trap_regs->s1,
196 		.a0 = trap_regs->a0,
197 		.a1 = trap_regs->a1,
198 		.a2 = trap_regs->a2,
199 		.a3 = trap_regs->a3,
200 		.a4 = trap_regs->a4,
201 		.a5 = trap_regs->a5,
202 		.a6 = trap_regs->a6,
203 		.a7 = trap_regs->a7,
204 		.s2 = trap_regs->s2,
205 		.s3 = trap_regs->s3,
206 		.s4 = trap_regs->s4,
207 		.s5 = trap_regs->s5,
208 		.s6 = trap_regs->s6,
209 		.s7 = trap_regs->s7,
210 		.s8 = trap_regs->s8,
211 		.s9 = trap_regs->s9,
212 		.s10 = trap_regs->s10,
213 		.s11 = trap_regs->s11,
214 		.t3 = trap_regs->t3,
215 		.t4 = trap_regs->t4,
216 		.t5 = trap_regs->t5,
217 		.t6 = trap_regs->t6,
218 	};
219 }
220 
221 static void thread_abort_handler(struct thread_trap_regs *trap_regs,
222 				 unsigned long cause)
223 {
224 	struct thread_abort_regs abort_regs = { };
225 
226 	assert(cause == read_csr(CSR_XCAUSE));
227 	copy_trap_to_abort(trap_regs, &abort_regs);
228 	abort_regs.cause = read_csr(CSR_XCAUSE);
229 	abort_regs.epc = read_csr(CSR_XEPC);
230 	abort_regs.tval = read_csr(CSR_XTVAL);
231 	abort_regs.satp = read_csr(CSR_SATP);
232 	abort_handler(cause, &abort_regs);
233 }
234 
235 static void thread_exception_handler(unsigned long cause,
236 				     struct thread_trap_regs *regs)
237 {
238 	switch (cause) {
239 	case CAUSE_USER_ECALL:
240 		thread_user_ecall_handler(regs);
241 		break;
242 	default:
243 		thread_abort_handler(regs, cause);
244 		break;
245 	}
246 }
247 
248 static void thread_irq_handler(void)
249 {
250 	interrupt_main_handler();
251 }
252 
253 static void thread_interrupt_handler(unsigned long cause,
254 				     struct thread_trap_regs *regs)
255 {
256 	switch (cause & LONG_MAX) {
257 	case IRQ_XTIMER:
258 		clear_csr(CSR_XIE, CSR_XIE_TIE);
259 		break;
260 	case IRQ_XSOFT:
261 		thread_unhandled_trap(regs, cause);
262 		break;
263 	case IRQ_XEXT:
264 		thread_irq_handler();
265 		break;
266 	default:
267 		thread_unhandled_trap(regs, cause);
268 	}
269 }
270 
271 void thread_trap_handler(long cause, unsigned long epc __unused,
272 			 struct thread_trap_regs *regs,
273 			 bool user __maybe_unused)
274 {
275 	/*
276 	 * The Interrupt bit (XLEN-1) in the cause register is set
277 	 * if the trap was caused by an interrupt.
278 	 */
279 	if (cause < 0)
280 		thread_interrupt_handler(cause, regs);
281 	/*
282 	 * Otherwise, cause is never written by the implementation,
283 	 * though it may be explicitly written by software.
284 	 */
285 	else
286 		thread_exception_handler(cause, regs);
287 }
288 
289 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
290 		      uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
291 		      uint32_t a6, uint32_t a7, void *pc)
292 {
293 	thread->regs.ra = (uintptr_t)pc;
294 
295 	/* Set up xstatus */
296 	thread->regs.status = read_csr(CSR_XSTATUS);
297 
298 	/* Reinitialize stack pointer */
299 	thread->regs.sp = thread->stack_va_end;
300 
301 	/*
302 	 * Copy arguments into context. This will make the
303 	 * arguments appear in a0-a7 when thread is started.
304 	 */
305 	thread->regs.a0 = a0;
306 	thread->regs.a1 = a1;
307 	thread->regs.a2 = a2;
308 	thread->regs.a3 = a3;
309 	thread->regs.a4 = a4;
310 	thread->regs.a5 = a5;
311 	thread->regs.a6 = a6;
312 	thread->regs.a7 = a7;
313 }
314 
315 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2,
316 				   uint32_t a3, uint32_t a4, uint32_t a5,
317 				   uint32_t a6, uint32_t a7,
318 				   void *pc)
319 {
320 	struct thread_core_local *l = thread_get_core_local();
321 	bool found_thread = false;
322 	size_t n = 0;
323 
324 	assert(l->curr_thread == THREAD_ID_INVALID);
325 
326 	thread_lock_global();
327 
328 	for (n = 0; n < CFG_NUM_THREADS; n++) {
329 		if (threads[n].state == THREAD_STATE_FREE) {
330 			threads[n].state = THREAD_STATE_ACTIVE;
331 			found_thread = true;
332 			break;
333 		}
334 	}
335 
336 	thread_unlock_global();
337 
338 	if (!found_thread)
339 		return;
340 
341 	l->curr_thread = n;
342 
343 	threads[n].flags = 0;
344 	init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc);
345 
346 	thread_lazy_save_ns_vfp();
347 
348 	l->flags &= ~THREAD_CLF_TMP;
349 
350 	thread_resume(&threads[n].regs);
351 	/*NOTREACHED*/
352 	panic();
353 }
354 
355 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
356 			  uint32_t a4, uint32_t a5)
357 {
358 	__thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0,
359 			       thread_std_abi_entry);
360 }
361 
362 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
363 			  uint32_t a1, uint32_t a2, uint32_t a3)
364 {
365 	regs->a0 = a0;
366 	regs->a1 = a1;
367 	regs->a2 = a2;
368 	regs->a3 = a3;
369 }
370 
371 static bool is_from_user(unsigned long status)
372 {
373 	return (status & CSR_XSTATUS_SPP) == 0;
374 }
375 
376 #ifdef CFG_SYSCALL_FTRACE
377 static void __noprof ftrace_suspend(void)
378 {
379 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
380 
381 	if (s && s->fbuf)
382 		s->fbuf->syscall_trace_suspended = true;
383 }
384 
385 static void __noprof ftrace_resume(void)
386 {
387 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
388 
389 	if (s && s->fbuf)
390 		s->fbuf->syscall_trace_suspended = false;
391 }
392 #else
393 static void __maybe_unused __noprof ftrace_suspend(void)
394 {
395 }
396 
397 static void __noprof ftrace_resume(void)
398 {
399 }
400 #endif
401 
402 static bool is_user_mode(struct thread_ctx_regs *regs)
403 {
404 	return is_from_user((uint32_t)regs->status);
405 }
406 
407 vaddr_t thread_get_saved_thread_sp(void)
408 {
409 	struct thread_core_local *l = thread_get_core_local();
410 	int ct = l->curr_thread;
411 
412 	assert(ct != THREAD_ID_INVALID);
413 	return threads[ct].kern_sp;
414 }
415 
416 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
417 			    uint32_t a2, uint32_t a3)
418 {
419 	size_t n = thread_id;
420 	struct thread_core_local *l = thread_get_core_local();
421 	bool found_thread = false;
422 
423 	assert(l->curr_thread == THREAD_ID_INVALID);
424 
425 	thread_lock_global();
426 
427 	if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
428 		threads[n].state = THREAD_STATE_ACTIVE;
429 		found_thread = true;
430 	}
431 
432 	thread_unlock_global();
433 
434 	if (!found_thread)
435 		return;
436 
437 	l->curr_thread = n;
438 
439 	if (threads[n].have_user_map) {
440 		core_mmu_set_user_map(&threads[n].user_map);
441 		if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
442 			tee_ta_ftrace_update_times_resume();
443 	}
444 
445 	if (is_user_mode(&threads[n].regs))
446 		tee_ta_update_session_utime_resume();
447 
448 	/*
449 	 * Return from RPC to request service of a foreign interrupt must not
450 	 * get parameters from non-secure world.
451 	 */
452 	if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
453 		copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
454 		threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
455 	}
456 
457 	thread_lazy_save_ns_vfp();
458 
459 	if (threads[n].have_user_map)
460 		ftrace_resume();
461 
462 	l->flags &= ~THREAD_CLF_TMP;
463 	thread_resume(&threads[n].regs);
464 	/*NOTREACHED*/
465 	panic();
466 }
467 
468 void thread_state_free(void)
469 {
470 	struct thread_core_local *l = thread_get_core_local();
471 	int ct = l->curr_thread;
472 
473 	assert(ct != THREAD_ID_INVALID);
474 
475 	thread_lazy_restore_ns_vfp();
476 
477 	thread_lock_global();
478 
479 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
480 	threads[ct].state = THREAD_STATE_FREE;
481 	threads[ct].flags = 0;
482 	l->curr_thread = THREAD_ID_INVALID;
483 
484 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
485 		virt_unset_guest();
486 	thread_unlock_global();
487 }
488 
489 int thread_state_suspend(uint32_t flags, unsigned long status, vaddr_t pc)
490 {
491 	struct thread_core_local *l = thread_get_core_local();
492 	int ct = l->curr_thread;
493 
494 	assert(ct != THREAD_ID_INVALID);
495 
496 	if (core_mmu_user_mapping_is_active())
497 		ftrace_suspend();
498 
499 	thread_check_canaries();
500 
501 	if (is_from_user(status)) {
502 		thread_user_save_vfp();
503 		tee_ta_update_session_utime_suspend();
504 		tee_ta_gprof_sample_pc(pc);
505 	}
506 	thread_lazy_restore_ns_vfp();
507 
508 	thread_lock_global();
509 
510 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
511 	threads[ct].flags |= flags;
512 	threads[ct].regs.status = status;
513 	threads[ct].regs.ra = pc;
514 	threads[ct].state = THREAD_STATE_SUSPENDED;
515 
516 	threads[ct].have_user_map = core_mmu_user_mapping_is_active();
517 	if (threads[ct].have_user_map) {
518 		if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
519 			tee_ta_ftrace_update_times_suspend();
520 		core_mmu_get_user_map(&threads[ct].user_map);
521 		core_mmu_set_user_map(NULL);
522 	}
523 
524 	l->curr_thread = THREAD_ID_INVALID;
525 
526 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
527 		virt_unset_guest();
528 
529 	thread_unlock_global();
530 
531 	return ct;
532 }
533 
534 bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
535 {
536 	if (thread_id >= CFG_NUM_THREADS)
537 		return false;
538 	threads[thread_id].stack_va_end = sp;
539 	return true;
540 }
541 
542 static void init_user_kcode(void)
543 {
544 }
545 
546 void thread_init_primary(void)
547 {
548 	/* Initialize canaries around the stacks */
549 	thread_init_canaries();
550 
551 	init_user_kcode();
552 }
553 
554 static vaddr_t get_trap_vect(void)
555 {
556 	return (vaddr_t)thread_trap_vect;
557 }
558 
559 void thread_init_tvec(void)
560 {
561 	unsigned long tvec = (unsigned long)get_trap_vect();
562 
563 	static_assert(sizeof(struct thread_trap_regs) % 16 == 0);
564 	write_csr(CSR_XTVEC, tvec);
565 	assert(read_csr(CSR_XTVEC) == tvec);
566 }
567 
568 void thread_init_per_cpu(void)
569 {
570 	thread_init_tvec();
571 	/*
572 	 * We may receive traps from now, therefore, zeroize xSCRATCH such
573 	 * that thread_trap_vect() can distinguish between user traps
574 	 * and kernel traps.
575 	 */
576 	write_csr(CSR_XSCRATCH, 0);
577 #ifndef CFG_PAN
578 	/*
579 	 * Allow access to user pages. When CFG_PAN is enabled, the SUM bit will
580 	 * be set and clear at runtime when necessary.
581 	 */
582 	set_csr(CSR_XSTATUS, CSR_XSTATUS_SUM);
583 #endif
584 }
585 
586 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
587 			 unsigned long a1, unsigned long a2, unsigned long a3,
588 			 unsigned long user_sp, unsigned long entry_func,
589 			 unsigned long status,
590 			 struct thread_pauth_keys *keys __unused)
591 {
592 	*regs = (struct thread_ctx_regs){
593 		.a0 = a0,
594 		.a1 = a1,
595 		.a2 = a2,
596 		.a3 = a3,
597 		.sp = user_sp,
598 		.ra = entry_func,
599 		.status = status
600 	};
601 }
602 
603 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
604 				unsigned long a2, unsigned long a3,
605 				unsigned long user_sp,
606 				unsigned long entry_func,
607 				bool is_32bit __unused,
608 				uint32_t *exit_status0,
609 				uint32_t *exit_status1)
610 {
611 	unsigned long status = 0;
612 	uint32_t exceptions = 0;
613 	uint32_t rc = 0;
614 	struct thread_ctx_regs *regs = NULL;
615 
616 	tee_ta_update_session_utime_resume();
617 
618 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
619 	regs = thread_get_ctx_regs();
620 	status = read_csr(CSR_XSTATUS);
621 	status |= CSR_XSTATUS_PIE;	/* Previous interrupt is enabled */
622 	status = set_field_u64(status, CSR_XSTATUS_SPP, PRV_U);
623 	set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, status, NULL);
624 	rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
625 	thread_unmask_exceptions(exceptions);
626 
627 	return rc;
628 }
629