xref: /optee_os/core/arch/riscv/kernel/thread_arch.c (revision cd7384a0a94b2eba741a797f27974338b063f391)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 NXP
4  * Copyright (c) 2016-2022, Linaro Limited
5  * Copyright (c) 2014, STMicroelectronics International N.V.
6  * Copyright (c) 2020-2021, Arm Limited
7  */
8 
9 #include <platform_config.h>
10 
11 #include <assert.h>
12 #include <config.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/linker.h>
19 #include <kernel/lockdep.h>
20 #include <kernel/misc.h>
21 #include <kernel/panic.h>
22 #include <kernel/spinlock.h>
23 #include <kernel/tee_ta_manager.h>
24 #include <kernel/thread.h>
25 #include <kernel/thread_private.h>
26 #include <kernel/user_mode_ctx_struct.h>
27 #include <kernel/virtualization.h>
28 #include <mm/core_memprot.h>
29 #include <mm/mobj.h>
30 #include <mm/tee_mm.h>
31 #include <mm/vm.h>
32 #include <riscv.h>
33 #include <trace.h>
34 #include <util.h>
35 
36 /*
37  * This function is called as a guard after each ABI call which is not
38  * supposed to return.
39  */
40 void __noreturn __panic_at_abi_return(void)
41 {
42 	panic();
43 }
44 
45 /* This function returns current masked exception bits. */
46 uint32_t __nostackcheck thread_get_exceptions(void)
47 {
48 	uint32_t xie = read_csr(CSR_XIE) & THREAD_EXCP_ALL;
49 
50 	return xie ^ THREAD_EXCP_ALL;
51 }
52 
53 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
54 {
55 	/* Foreign interrupts must not be unmasked while holding a spinlock */
56 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
57 		assert_have_no_spinlock();
58 
59 	/*
60 	 * In ARM, the bits in DAIF register are used to mask the exceptions.
61 	 * While in RISC-V, the bits in CSR XIE are used to enable(unmask)
62 	 * corresponding interrupt sources. To not modify the function of
63 	 * thread_set_exceptions(), we should "invert" the bits in "exceptions".
64 	 * The corresponding bits in "exceptions" will be inverted so they will
65 	 * be cleared when we write the final value into CSR XIE. So that we
66 	 * can mask those exceptions.
67 	 */
68 	exceptions &= THREAD_EXCP_ALL;
69 	exceptions ^= THREAD_EXCP_ALL;
70 
71 	barrier();
72 	write_csr(CSR_XIE, exceptions);
73 	barrier();
74 }
75 
76 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions)
77 {
78 	uint32_t state = thread_get_exceptions();
79 
80 	thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
81 	return state;
82 }
83 
84 void __nostackcheck thread_unmask_exceptions(uint32_t state)
85 {
86 	thread_set_exceptions(state & THREAD_EXCP_ALL);
87 }
88 
89 static void thread_lazy_save_ns_vfp(void)
90 {
91 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
92 }
93 
94 static void thread_lazy_restore_ns_vfp(void)
95 {
96 	static_assert(!IS_ENABLED(CFG_WITH_VFP));
97 }
98 
99 static void setup_unwind_user_mode(struct thread_scall_regs *regs)
100 {
101 	regs->ra = (uintptr_t)thread_unwind_user_mode;
102 	regs->status = xstatus_for_xret(true, PRV_S);
103 	/*
104 	 * We are going to exit user mode. The stack pointer must be set as the
105 	 * original value it had before allocating space of scall "regs" and
106 	 * calling thread_scall_handler(). Thus, we can simply set stack pointer
107 	 * as (regs + 1) value.
108 	 */
109 	regs->sp = (uintptr_t)(regs + 1);
110 }
111 
112 static void thread_unhandled_trap(unsigned long cause __unused,
113 				  struct thread_ctx_regs *regs __unused)
114 {
115 	DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx",
116 	     read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL));
117 	panic();
118 }
119 
120 void thread_scall_handler(struct thread_scall_regs *regs)
121 {
122 	struct ts_session *sess = NULL;
123 	uint32_t state = 0;
124 
125 	/* Enable native interrupts */
126 	state = thread_get_exceptions();
127 	thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR);
128 
129 	thread_user_save_vfp();
130 
131 	sess = ts_get_current_session();
132 
133 	/* Restore foreign interrupts which are disabled on exception entry */
134 	thread_restore_foreign_intr();
135 
136 	assert(sess && sess->handle_scall);
137 
138 	if (!sess->handle_scall(regs)) {
139 		setup_unwind_user_mode(regs);
140 		thread_exit_user_mode(regs->a0, regs->a1, regs->a2,
141 				      regs->a3, regs->sp, regs->ra,
142 				      regs->status);
143 	}
144 }
145 
146 static void thread_irq_handler(void)
147 {
148 	interrupt_main_handler();
149 }
150 
151 void thread_interrupt_handler(unsigned long cause, struct thread_ctx_regs *regs)
152 {
153 	switch (cause & LONG_MAX) {
154 	case IRQ_XTIMER:
155 		clear_csr(CSR_XIE, CSR_XIE_TIE);
156 		break;
157 	case IRQ_XSOFT:
158 		thread_unhandled_trap(cause, regs);
159 		break;
160 	case IRQ_XEXT:
161 		thread_irq_handler();
162 		break;
163 	default:
164 		thread_unhandled_trap(cause, regs);
165 	}
166 }
167 
168 unsigned long xstatus_for_xret(uint8_t pie, uint8_t pp)
169 {
170 	unsigned long xstatus = read_csr(CSR_XSTATUS);
171 
172 	assert(pp == PRV_M || pp == PRV_S || pp == PRV_U);
173 
174 #ifdef RV32
175 	xstatus = set_field_u32(xstatus, CSR_XSTATUS_IE, 0);
176 	xstatus = set_field_u32(xstatus, CSR_XSTATUS_PIE, pie);
177 	xstatus = set_field_u32(xstatus, CSR_XSTATUS_SPP, pp);
178 #else	/* RV64 */
179 	xstatus = set_field_u64(xstatus, CSR_XSTATUS_IE, 0);
180 	xstatus = set_field_u64(xstatus, CSR_XSTATUS_PIE, pie);
181 	xstatus = set_field_u64(xstatus, CSR_XSTATUS_SPP, pp);
182 #endif
183 
184 	return xstatus;
185 }
186 
187 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
188 		      uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
189 		      uint32_t a6, uint32_t a7, void *pc)
190 {
191 	memset(&thread->regs, 0, sizeof(thread->regs));
192 
193 	thread->regs.epc = (uintptr_t)pc;
194 
195 	/* Set up xstatus */
196 	thread->regs.status = xstatus_for_xret(true, PRV_S);
197 
198 	/* Enable native interrupt */
199 	thread->regs.ie = THREAD_EXCP_NATIVE_INTR;
200 
201 	/* Reinitialize stack pointer */
202 	thread->regs.sp = thread->stack_va_end;
203 
204 	/* Set up GP and TP */
205 	thread->regs.gp = read_gp();
206 	thread->regs.tp = read_tp();
207 
208 	/*
209 	 * Copy arguments into context. This will make the
210 	 * arguments appear in a0-a7 when thread is started.
211 	 */
212 	thread->regs.a0 = a0;
213 	thread->regs.a1 = a1;
214 	thread->regs.a2 = a2;
215 	thread->regs.a3 = a3;
216 	thread->regs.a4 = a4;
217 	thread->regs.a5 = a5;
218 	thread->regs.a6 = a6;
219 	thread->regs.a7 = a7;
220 }
221 
222 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2,
223 				   uint32_t a3, uint32_t a4, uint32_t a5,
224 				   uint32_t a6, uint32_t a7,
225 				   void *pc)
226 {
227 	struct thread_core_local *l = thread_get_core_local();
228 	bool found_thread = false;
229 	size_t n = 0;
230 
231 	assert(l->curr_thread == THREAD_ID_INVALID);
232 
233 	thread_lock_global();
234 
235 	for (n = 0; n < CFG_NUM_THREADS; n++) {
236 		if (threads[n].state == THREAD_STATE_FREE) {
237 			threads[n].state = THREAD_STATE_ACTIVE;
238 			found_thread = true;
239 			break;
240 		}
241 	}
242 
243 	thread_unlock_global();
244 
245 	if (!found_thread)
246 		return;
247 
248 	l->curr_thread = n;
249 
250 	threads[n].flags = 0;
251 	init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc);
252 
253 	thread_lazy_save_ns_vfp();
254 
255 	l->flags &= ~THREAD_CLF_TMP;
256 
257 	thread_resume(&threads[n].regs);
258 	/*NOTREACHED*/
259 	panic();
260 }
261 
262 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
263 			  uint32_t a4, uint32_t a5)
264 {
265 	__thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0,
266 			       thread_std_abi_entry);
267 }
268 
269 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
270 			  uint32_t a1, uint32_t a2, uint32_t a3)
271 {
272 	regs->a0 = a0;
273 	regs->a1 = a1;
274 	regs->a2 = a2;
275 	regs->a3 = a3;
276 }
277 
278 static bool is_from_user(unsigned long status)
279 {
280 	return (status & CSR_XSTATUS_SPP) == 0;
281 }
282 
283 #ifdef CFG_SYSCALL_FTRACE
284 static void __noprof ftrace_suspend(void)
285 {
286 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
287 
288 	if (s && s->fbuf)
289 		s->fbuf->syscall_trace_suspended = true;
290 }
291 
292 static void __noprof ftrace_resume(void)
293 {
294 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
295 
296 	if (s && s->fbuf)
297 		s->fbuf->syscall_trace_suspended = false;
298 }
299 #else
300 static void __maybe_unused __noprof ftrace_suspend(void)
301 {
302 }
303 
304 static void __noprof ftrace_resume(void)
305 {
306 }
307 #endif
308 
309 static bool is_user_mode(struct thread_ctx_regs *regs)
310 {
311 	return is_from_user((uint32_t)regs->status);
312 }
313 
314 vaddr_t thread_get_saved_thread_sp(void)
315 {
316 	struct thread_core_local *l = thread_get_core_local();
317 	int ct = l->curr_thread;
318 
319 	assert(ct != THREAD_ID_INVALID);
320 	return threads[ct].kern_sp;
321 }
322 
323 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
324 			    uint32_t a2, uint32_t a3)
325 {
326 	size_t n = thread_id;
327 	struct thread_core_local *l = thread_get_core_local();
328 	bool found_thread = false;
329 
330 	assert(l->curr_thread == THREAD_ID_INVALID);
331 
332 	thread_lock_global();
333 
334 	if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
335 		threads[n].state = THREAD_STATE_ACTIVE;
336 		found_thread = true;
337 	}
338 
339 	thread_unlock_global();
340 
341 	if (!found_thread)
342 		return;
343 
344 	l->curr_thread = n;
345 
346 	if (threads[n].have_user_map) {
347 		core_mmu_set_user_map(&threads[n].user_map);
348 		if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
349 			tee_ta_ftrace_update_times_resume();
350 	}
351 
352 	if (is_user_mode(&threads[n].regs))
353 		tee_ta_update_session_utime_resume();
354 
355 	/*
356 	 * We may resume thread at another hart, so we need to re-assign value
357 	 * of tp to be current hart's thread_core_local.
358 	 */
359 	if (!is_user_mode(&threads[n].regs))
360 		threads[n].regs.tp = read_tp();
361 
362 	/*
363 	 * Return from RPC to request service of a foreign interrupt must not
364 	 * get parameters from non-secure world.
365 	 */
366 	if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
367 		copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
368 		threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
369 	}
370 
371 	thread_lazy_save_ns_vfp();
372 
373 	if (threads[n].have_user_map)
374 		ftrace_resume();
375 
376 	l->flags &= ~THREAD_CLF_TMP;
377 	thread_resume(&threads[n].regs);
378 	/*NOTREACHED*/
379 	panic();
380 }
381 
382 void thread_state_free(void)
383 {
384 	struct thread_core_local *l = thread_get_core_local();
385 	int ct = l->curr_thread;
386 
387 	assert(ct != THREAD_ID_INVALID);
388 
389 	thread_lazy_restore_ns_vfp();
390 
391 	thread_lock_global();
392 
393 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
394 	threads[ct].state = THREAD_STATE_FREE;
395 	threads[ct].flags = 0;
396 	l->curr_thread = THREAD_ID_INVALID;
397 
398 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
399 		virt_unset_guest();
400 	thread_unlock_global();
401 }
402 
403 int thread_state_suspend(uint32_t flags, unsigned long status, vaddr_t pc)
404 {
405 	struct thread_core_local *l = thread_get_core_local();
406 	int ct = l->curr_thread;
407 
408 	assert(ct != THREAD_ID_INVALID);
409 
410 	if (core_mmu_user_mapping_is_active())
411 		ftrace_suspend();
412 
413 	thread_check_canaries();
414 
415 	if (is_from_user(status)) {
416 		thread_user_save_vfp();
417 		tee_ta_update_session_utime_suspend();
418 		tee_ta_gprof_sample_pc(pc);
419 	}
420 	thread_lazy_restore_ns_vfp();
421 
422 	thread_lock_global();
423 
424 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
425 	threads[ct].flags |= flags;
426 	threads[ct].regs.status = status;
427 	threads[ct].regs.epc = pc;
428 	threads[ct].state = THREAD_STATE_SUSPENDED;
429 
430 	threads[ct].have_user_map = core_mmu_user_mapping_is_active();
431 	if (threads[ct].have_user_map) {
432 		if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
433 			tee_ta_ftrace_update_times_suspend();
434 		core_mmu_get_user_map(&threads[ct].user_map);
435 		core_mmu_set_user_map(NULL);
436 	}
437 
438 	l->curr_thread = THREAD_ID_INVALID;
439 
440 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
441 		virt_unset_guest();
442 
443 	thread_unlock_global();
444 
445 	return ct;
446 }
447 
448 static void init_user_kcode(void)
449 {
450 }
451 
452 void thread_init_primary(void)
453 {
454 	/* Initialize canaries around the stacks */
455 	thread_init_canaries();
456 
457 	init_user_kcode();
458 }
459 
460 static vaddr_t get_trap_vect(void)
461 {
462 	return (vaddr_t)thread_trap_vect;
463 }
464 
465 void thread_init_tvec(void)
466 {
467 	unsigned long tvec = (unsigned long)get_trap_vect();
468 
469 	write_csr(CSR_XTVEC, tvec);
470 	assert(read_csr(CSR_XTVEC) == tvec);
471 }
472 
473 void thread_init_per_cpu(void)
474 {
475 	thread_init_tvec();
476 	/*
477 	 * We may receive traps from now, therefore, zeroize xSCRATCH such
478 	 * that thread_trap_vect() can distinguish between user traps
479 	 * and kernel traps.
480 	 */
481 	write_csr(CSR_XSCRATCH, 0);
482 #ifndef CFG_PAN
483 	/*
484 	 * Allow access to user pages. When CFG_PAN is enabled, the SUM bit will
485 	 * be set and clear at runtime when necessary.
486 	 */
487 	set_csr(CSR_XSTATUS, CSR_XSTATUS_SUM);
488 #endif
489 }
490 
491 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
492 			 unsigned long a1, unsigned long a2, unsigned long a3,
493 			 unsigned long user_sp, unsigned long entry_func,
494 			 unsigned long status, unsigned long ie,
495 			 struct thread_pauth_keys *keys __unused)
496 {
497 	*regs = (struct thread_ctx_regs){
498 		.a0 = a0,
499 		.a1 = a1,
500 		.a2 = a2,
501 		.a3 = a3,
502 		.s0 = 0,
503 		.sp = user_sp,
504 		.ra = entry_func,
505 		.status = status,
506 		.ie = ie,
507 	};
508 }
509 
510 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
511 				unsigned long a2, unsigned long a3,
512 				unsigned long user_sp,
513 				unsigned long entry_func,
514 				bool is_32bit __unused,
515 				uint32_t *exit_status0,
516 				uint32_t *exit_status1)
517 {
518 	unsigned long status = 0;
519 	unsigned long ie = 0;
520 	uint32_t exceptions = 0;
521 	uint32_t rc = 0;
522 	struct thread_ctx_regs *regs = NULL;
523 
524 	tee_ta_update_session_utime_resume();
525 
526 	/* Read current interrupt masks */
527 	ie = read_csr(CSR_XIE);
528 
529 	/*
530 	 * Mask all exceptions, the CSR_XSTATUS.IE will be set from
531 	 * setup_unwind_user_mode() after exiting.
532 	 */
533 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
534 	regs = thread_get_ctx_regs();
535 	status = xstatus_for_xret(true, PRV_U);
536 	set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, status, ie,
537 		     NULL);
538 	rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
539 	thread_unmask_exceptions(exceptions);
540 
541 	return rc;
542 }
543 
544 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])
545 {
546 	thread_rpc_xstatus(rv, xstatus_for_xret(false, PRV_S));
547 }
548