xref: /optee_os/core/arch/riscv/kernel/thread_arch.c (revision fdc4a8bef4978835f05b1687c99e090c85b84b7c)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 NXP
4  * Copyright (c) 2016-2022, Linaro Limited
5  * Copyright (c) 2014, STMicroelectronics International N.V.
6  * Copyright (c) 2020-2021, Arm Limited
7  */
8 
9 #include <platform_config.h>
10 
11 #include <assert.h>
12 #include <config.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/linker.h>
18 #include <kernel/lockdep.h>
19 #include <kernel/misc.h>
20 #include <kernel/panic.h>
21 #include <kernel/spinlock.h>
22 #include <kernel/tee_ta_manager.h>
23 #include <kernel/thread.h>
24 #include <kernel/thread_private.h>
25 #include <kernel/user_mode_ctx_struct.h>
26 #include <kernel/virtualization.h>
27 #include <mm/core_memprot.h>
28 #include <mm/mobj.h>
29 #include <mm/tee_mm.h>
30 #include <mm/vm.h>
31 #include <riscv.h>
32 #include <trace.h>
33 #include <util.h>
34 
35 uint32_t __nostackcheck thread_get_exceptions(void)
36 {
37 	return read_csr(CSR_XIE) & THREAD_EXCP_ALL;
38 }
39 
40 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
41 {
42 	/* Foreign interrupts must not be unmasked while holding a spinlock */
43 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
44 		assert_have_no_spinlock();
45 
46 	barrier();
47 	write_csr(CSR_XIE, exceptions);
48 	barrier();
49 }
50 
51 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions)
52 {
53 	uint32_t state = thread_get_exceptions();
54 
55 	thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
56 	return state;
57 }
58 
59 void __nostackcheck thread_unmask_exceptions(uint32_t state)
60 {
61 	thread_set_exceptions(state & THREAD_EXCP_ALL);
62 }
63 
64 static void thread_lazy_save_ns_vfp(void)
65 {
66 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
67 }
68 
69 static void thread_lazy_restore_ns_vfp(void)
70 {
71 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
72 }
73 
74 static void setup_unwind_user_mode(struct thread_scall_regs *regs)
75 {
76 	regs->ra = (uintptr_t)thread_unwind_user_mode;
77 	regs->status = read_csr(CSR_XSTATUS);
78 	regs->sp = thread_get_saved_thread_sp();
79 }
80 
81 static void thread_unhandled_trap(struct thread_trap_regs *regs __unused,
82 				  unsigned long cause __unused)
83 {
84 	DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx",
85 	     read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL));
86 	panic();
87 }
88 
89 void  thread_scall_handler(struct thread_scall_regs *regs)
90 {
91 	struct ts_session *sess = NULL;
92 	uint32_t state = 0;
93 
94 	/* Enable native interrupts */
95 	state = thread_get_exceptions();
96 	thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR);
97 
98 	thread_user_save_vfp();
99 
100 	sess = ts_get_current_session();
101 
102 	/* Restore foreign interrupts which are disabled on exception entry */
103 	thread_restore_foreign_intr();
104 
105 	assert(sess && sess->handle_scall);
106 
107 	if (!sess->handle_scall(regs)) {
108 		setup_unwind_user_mode(regs);
109 		thread_exit_user_mode(regs->a0, regs->a1, regs->a2,
110 				      regs->a3, regs->sp, regs->ra,
111 				      regs->status);
112 	}
113 }
114 
115 static void copy_scall_to_trap(struct thread_scall_regs *scall_regs,
116 			       struct thread_trap_regs *trap_regs)
117 {
118 	trap_regs->a0 = scall_regs->a0;
119 	trap_regs->a1 = scall_regs->a1;
120 	trap_regs->a2 = scall_regs->a2;
121 	trap_regs->a3 = scall_regs->a3;
122 	trap_regs->a4 = scall_regs->a4;
123 	trap_regs->a5 = scall_regs->a5;
124 	trap_regs->a6 = scall_regs->a6;
125 	trap_regs->a7 = scall_regs->a7;
126 	trap_regs->t0 = scall_regs->t0;
127 	trap_regs->t1 = scall_regs->t1;
128 }
129 
130 static void copy_trap_to_scall(struct thread_trap_regs *trap_regs,
131 			       struct thread_scall_regs *scall_regs)
132 {
133 	*scall_regs = (struct thread_scall_regs) {
134 		.status = trap_regs->status,
135 		.ra = trap_regs->ra,
136 		.a0 = trap_regs->a0,
137 		.a1 = trap_regs->a1,
138 		.a2 = trap_regs->a2,
139 		.a3 = trap_regs->a3,
140 		.a4 = trap_regs->a4,
141 		.a5 = trap_regs->a5,
142 		.a6 = trap_regs->a6,
143 		.a7 = trap_regs->a7,
144 		.t0 = trap_regs->t0,
145 		.t1 = trap_regs->t1,
146 	};
147 }
148 
149 static void thread_user_ecall_handler(struct thread_trap_regs *trap_regs)
150 {
151 	struct thread_scall_regs scall_regs;
152 	struct thread_core_local *l = thread_get_core_local();
153 	int ct = l->curr_thread;
154 
155 	copy_trap_to_scall(trap_regs, &scall_regs);
156 	thread_scall_handler(&scall_regs);
157 	copy_scall_to_trap(&scall_regs, trap_regs);
158 	/*
159 	 * Save kernel sp we'll had at the beginning of this function.
160 	 * This is when this TA has called another TA because
161 	 * __thread_enter_user_mode() also saves the stack pointer in this
162 	 * field.
163 	 */
164 	threads[ct].kern_sp = (unsigned long)(trap_regs + 1);
165 	/*
166 	 * We are returning to U-Mode, on return, the program counter
167 	 * is set to xsepc (pc=xepc), we add 4 (size of an instruction)
168 	 * to continue to next instruction.
169 	 */
170 	trap_regs->epc += 4;
171 }
172 
173 static void copy_trap_to_abort(struct thread_trap_regs *trap_regs,
174 			       struct thread_abort_regs *abort_regs)
175 {
176 	*abort_regs = (struct thread_abort_regs) {
177 		.status = trap_regs->status,
178 		.ra = trap_regs->ra,
179 		.sp = trap_regs->sp,
180 		.gp = trap_regs->gp,
181 		.tp = trap_regs->tp,
182 		.t0 = trap_regs->t0,
183 		.t1 = trap_regs->t1,
184 		.t2 = trap_regs->t2,
185 		.s0 = trap_regs->s0,
186 		.s1 = trap_regs->s1,
187 		.a0 = trap_regs->a0,
188 		.a1 = trap_regs->a1,
189 		.a2 = trap_regs->a2,
190 		.a3 = trap_regs->a3,
191 		.a4 = trap_regs->a4,
192 		.a5 = trap_regs->a5,
193 		.a6 = trap_regs->a6,
194 		.a7 = trap_regs->a7,
195 		.s2 = trap_regs->s2,
196 		.s3 = trap_regs->s3,
197 		.s4 = trap_regs->s4,
198 		.s5 = trap_regs->s5,
199 		.s6 = trap_regs->s6,
200 		.s7 = trap_regs->s7,
201 		.s8 = trap_regs->s8,
202 		.s9 = trap_regs->s9,
203 		.s10 = trap_regs->s10,
204 		.s11 = trap_regs->s11,
205 		.t3 = trap_regs->t3,
206 		.t4 = trap_regs->t4,
207 		.t5 = trap_regs->t5,
208 		.t6 = trap_regs->t6,
209 	};
210 }
211 
212 static void thread_abort_handler(struct thread_trap_regs *trap_regs,
213 				 unsigned long cause)
214 {
215 	struct thread_abort_regs abort_regs = { };
216 
217 	assert(cause == read_csr(CSR_XCAUSE));
218 	copy_trap_to_abort(trap_regs, &abort_regs);
219 	abort_regs.cause = read_csr(CSR_XCAUSE);
220 	abort_regs.epc = read_csr(CSR_XEPC);
221 	abort_regs.tval = read_csr(CSR_XTVAL);
222 	abort_regs.satp = read_csr(CSR_SATP);
223 	abort_handler(cause, &abort_regs);
224 }
225 
226 static void thread_exception_handler(unsigned long cause,
227 				     struct thread_trap_regs *regs)
228 {
229 	switch (cause) {
230 	case CAUSE_USER_ECALL:
231 		thread_user_ecall_handler(regs);
232 		break;
233 	default:
234 		thread_abort_handler(regs, cause);
235 		break;
236 	}
237 }
238 
239 static void thread_irq_handler(void)
240 {
241 	itr_core_handler();
242 }
243 
244 static void thread_interrupt_handler(unsigned long cause,
245 				     struct thread_trap_regs *regs)
246 {
247 	switch (cause & LONG_MAX) {
248 	case IRQ_XTIMER:
249 		clear_csr(CSR_XIE, CSR_XIE_TIE);
250 		break;
251 	case IRQ_XSOFT:
252 		thread_unhandled_trap(regs, cause);
253 		break;
254 	case IRQ_XEXT:
255 		thread_irq_handler();
256 		break;
257 	default:
258 		thread_unhandled_trap(regs, cause);
259 	}
260 }
261 
262 void thread_trap_handler(long cause, unsigned long epc __unused,
263 			 struct thread_trap_regs *regs,
264 			 bool user __maybe_unused)
265 {
266 	/*
267 	 * The Interrupt bit (XLEN-1) in the cause register is set
268 	 * if the trap was caused by an interrupt.
269 	 */
270 	if (cause < 0)
271 		thread_interrupt_handler(cause, regs);
272 	/*
273 	 * Otherwise, cause is never written by the implementation,
274 	 * though it may be explicitly written by software.
275 	 */
276 	else
277 		thread_exception_handler(cause, regs);
278 }
279 
280 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
281 		      uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
282 		      uint32_t a6, uint32_t a7, void *pc)
283 {
284 	thread->regs.ra = (uintptr_t)pc;
285 
286 	/* Set up xstatus */
287 	thread->regs.status = read_csr(CSR_XSTATUS);
288 
289 	/* Reinitialize stack pointer */
290 	thread->regs.sp = thread->stack_va_end;
291 
292 	/*
293 	 * Copy arguments into context. This will make the
294 	 * arguments appear in a0-a7 when thread is started.
295 	 */
296 	thread->regs.a0 = a0;
297 	thread->regs.a1 = a1;
298 	thread->regs.a2 = a2;
299 	thread->regs.a3 = a3;
300 	thread->regs.a4 = a4;
301 	thread->regs.a5 = a5;
302 	thread->regs.a6 = a6;
303 	thread->regs.a7 = a7;
304 }
305 
306 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2,
307 				   uint32_t a3, uint32_t a4, uint32_t a5,
308 				   uint32_t a6, uint32_t a7,
309 				   void *pc)
310 {
311 	struct thread_core_local *l = thread_get_core_local();
312 	bool found_thread = false;
313 	size_t n = 0;
314 
315 	assert(l->curr_thread == THREAD_ID_INVALID);
316 
317 	thread_lock_global();
318 
319 	for (n = 0; n < CFG_NUM_THREADS; n++) {
320 		if (threads[n].state == THREAD_STATE_FREE) {
321 			threads[n].state = THREAD_STATE_ACTIVE;
322 			found_thread = true;
323 			break;
324 		}
325 	}
326 
327 	thread_unlock_global();
328 
329 	if (!found_thread)
330 		return;
331 
332 	l->curr_thread = n;
333 
334 	threads[n].flags = 0;
335 	init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc);
336 
337 	thread_lazy_save_ns_vfp();
338 
339 	l->flags &= ~THREAD_CLF_TMP;
340 
341 	thread_resume(&threads[n].regs);
342 	/*NOTREACHED*/
343 	panic();
344 }
345 
346 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
347 			  uint32_t a4, uint32_t a5)
348 {
349 	__thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0,
350 			       thread_std_smc_entry);
351 }
352 
353 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
354 			  uint32_t a1, uint32_t a2, uint32_t a3)
355 {
356 	regs->a0 = a0;
357 	regs->a1 = a1;
358 	regs->a2 = a2;
359 	regs->a3 = a3;
360 }
361 
362 static bool is_from_user(unsigned long status)
363 {
364 	return status & CSR_XSTATUS_SPP;
365 }
366 
367 #ifdef CFG_SYSCALL_FTRACE
368 static void __noprof ftrace_suspend(void)
369 {
370 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
371 
372 	if (s && s->fbuf)
373 		s->fbuf->syscall_trace_suspended = true;
374 }
375 
376 static void __noprof ftrace_resume(void)
377 {
378 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
379 
380 	if (s && s->fbuf)
381 		s->fbuf->syscall_trace_suspended = false;
382 }
383 #else
384 static void __maybe_unused __noprof ftrace_suspend(void)
385 {
386 }
387 
388 static void __noprof ftrace_resume(void)
389 {
390 }
391 #endif
392 
393 static bool is_user_mode(struct thread_ctx_regs *regs)
394 {
395 	return is_from_user((uint32_t)regs->status);
396 }
397 
398 vaddr_t thread_get_saved_thread_sp(void)
399 {
400 	struct thread_core_local *l = thread_get_core_local();
401 	int ct = l->curr_thread;
402 
403 	assert(ct != THREAD_ID_INVALID);
404 	return threads[ct].kern_sp;
405 }
406 
407 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
408 			    uint32_t a2, uint32_t a3)
409 {
410 	size_t n = thread_id;
411 	struct thread_core_local *l = thread_get_core_local();
412 	bool found_thread = false;
413 
414 	assert(l->curr_thread == THREAD_ID_INVALID);
415 
416 	thread_lock_global();
417 
418 	if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
419 		threads[n].state = THREAD_STATE_ACTIVE;
420 		found_thread = true;
421 	}
422 
423 	thread_unlock_global();
424 
425 	if (!found_thread)
426 		return;
427 
428 	l->curr_thread = n;
429 
430 	if (threads[n].have_user_map) {
431 		core_mmu_set_user_map(&threads[n].user_map);
432 		if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
433 			tee_ta_ftrace_update_times_resume();
434 	}
435 
436 	if (is_user_mode(&threads[n].regs))
437 		tee_ta_update_session_utime_resume();
438 
439 	/*
440 	 * Return from RPC to request service of a foreign interrupt must not
441 	 * get parameters from non-secure world.
442 	 */
443 	if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
444 		copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
445 		threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
446 	}
447 
448 	thread_lazy_save_ns_vfp();
449 
450 	if (threads[n].have_user_map)
451 		ftrace_resume();
452 
453 	l->flags &= ~THREAD_CLF_TMP;
454 	thread_resume(&threads[n].regs);
455 	/*NOTREACHED*/
456 	panic();
457 }
458 
459 void thread_state_free(void)
460 {
461 	struct thread_core_local *l = thread_get_core_local();
462 	int ct = l->curr_thread;
463 
464 	assert(ct != THREAD_ID_INVALID);
465 
466 	thread_lazy_restore_ns_vfp();
467 
468 	thread_lock_global();
469 
470 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
471 	threads[ct].state = THREAD_STATE_FREE;
472 	threads[ct].flags = 0;
473 	l->curr_thread = THREAD_ID_INVALID;
474 
475 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
476 		virt_unset_guest();
477 	thread_unlock_global();
478 }
479 
480 int thread_state_suspend(uint32_t flags, uint32_t status, vaddr_t pc)
481 {
482 	struct thread_core_local *l = thread_get_core_local();
483 	int ct = l->curr_thread;
484 
485 	assert(ct != THREAD_ID_INVALID);
486 
487 	if (core_mmu_user_mapping_is_active())
488 		ftrace_suspend();
489 
490 	thread_check_canaries();
491 
492 	if (is_from_user(status)) {
493 		thread_user_save_vfp();
494 		tee_ta_update_session_utime_suspend();
495 		tee_ta_gprof_sample_pc(pc);
496 	}
497 	thread_lazy_restore_ns_vfp();
498 
499 	thread_lock_global();
500 
501 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
502 	threads[ct].flags |= flags;
503 	threads[ct].regs.status = status;
504 	threads[ct].regs.ra = pc;
505 	threads[ct].state = THREAD_STATE_SUSPENDED;
506 
507 	threads[ct].have_user_map = core_mmu_user_mapping_is_active();
508 	if (threads[ct].have_user_map) {
509 		if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
510 			tee_ta_ftrace_update_times_suspend();
511 		core_mmu_get_user_map(&threads[ct].user_map);
512 		core_mmu_set_user_map(NULL);
513 	}
514 
515 	l->curr_thread = THREAD_ID_INVALID;
516 
517 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
518 		virt_unset_guest();
519 
520 	thread_unlock_global();
521 
522 	return ct;
523 }
524 
525 bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
526 {
527 	if (thread_id >= CFG_NUM_THREADS)
528 		return false;
529 	threads[thread_id].stack_va_end = sp;
530 	return true;
531 }
532 
533 static void init_user_kcode(void)
534 {
535 }
536 
537 void thread_init_primary(void)
538 {
539 	/* Initialize canaries around the stacks */
540 	thread_init_canaries();
541 
542 	init_user_kcode();
543 }
544 
545 static vaddr_t get_trap_vect(void)
546 {
547 	return (vaddr_t)thread_trap_vect;
548 }
549 
550 void thread_init_tvec(void)
551 {
552 	unsigned long tvec = (unsigned long)get_trap_vect();
553 
554 	static_assert(sizeof(struct thread_trap_regs) % 16 == 0);
555 	write_csr(CSR_XTVEC, tvec);
556 	assert(read_csr(CSR_XTVEC) == tvec);
557 }
558 
559 void thread_init_per_cpu(void)
560 {
561 	thread_init_tvec();
562 	/*
563 	 * We may receive traps from now, therefore, zeroize xSCRATCH such
564 	 * that thread_trap_vect() can distinguish between user traps
565 	 * and kernel traps.
566 	 */
567 	write_csr(CSR_XSCRATCH, 0);
568 	/* Allow access to user pages */
569 	set_csr(CSR_XSTATUS, CSR_XSTATUS_SUM);
570 }
571 
572 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
573 			 unsigned long a1, unsigned long a2, unsigned long a3,
574 			 unsigned long user_sp, unsigned long entry_func,
575 			 uint32_t status,
576 			 struct thread_pauth_keys *keys __unused)
577 {
578 	*regs = (struct thread_ctx_regs){
579 		.a0 = a0,
580 		.a1 = a1,
581 		.a2 = a2,
582 		.a3 = a3,
583 		.sp = user_sp,
584 		.ra = entry_func,
585 		.status = status
586 	};
587 }
588 
589 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
590 				unsigned long a2, unsigned long a3,
591 				unsigned long user_sp,
592 				unsigned long entry_func,
593 				bool is_32bit __unused,
594 				uint32_t *exit_status0,
595 				uint32_t *exit_status1)
596 {
597 	uint32_t status = 0;
598 	uint32_t exceptions = 0;
599 	uint32_t rc = 0;
600 	struct thread_ctx_regs *regs = NULL;
601 
602 	tee_ta_update_session_utime_resume();
603 
604 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
605 	regs = thread_get_ctx_regs();
606 	status = CSR_XSTATUS_SUM | CSR_XSTATUS_PIE;
607 	set_field_u64(status, CSR_XSTATUS_SPP, PRV_U);
608 	set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, status, NULL);
609 	rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
610 	thread_unmask_exceptions(exceptions);
611 
612 	return rc;
613 }
614