1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright 2022-2023 NXP
4 * Copyright (c) 2016-2022, Linaro Limited
5 * Copyright (c) 2014, STMicroelectronics International N.V.
6 * Copyright (c) 2020-2021, Arm Limited
7 */
8
9 #include <platform_config.h>
10
11 #include <assert.h>
12 #include <config.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/linker.h>
19 #include <kernel/lockdep.h>
20 #include <kernel/misc.h>
21 #include <kernel/panic.h>
22 #include <kernel/spinlock.h>
23 #include <kernel/tee_ta_manager.h>
24 #include <kernel/thread.h>
25 #include <kernel/thread_private.h>
26 #include <kernel/user_mode_ctx_struct.h>
27 #include <kernel/virtualization.h>
28 #include <mm/core_memprot.h>
29 #include <mm/mobj.h>
30 #include <mm/tee_mm.h>
31 #include <mm/vm.h>
32 #include <riscv.h>
33 #include <trace.h>
34 #include <util.h>
35
36 /*
37 * This function is called as a guard after each ABI call which is not
38 * supposed to return.
39 */
__panic_at_abi_return(void)40 void __noreturn __panic_at_abi_return(void)
41 {
42 panic();
43 }
44
45 /* This function returns current masked exception bits. */
thread_get_exceptions(void)46 uint32_t __nostackcheck thread_get_exceptions(void)
47 {
48 uint32_t xie = read_csr(CSR_XIE) & THREAD_EXCP_ALL;
49
50 return xie ^ THREAD_EXCP_ALL;
51 }
52
thread_set_exceptions(uint32_t exceptions)53 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
54 {
55 /* Foreign interrupts must not be unmasked while holding a spinlock */
56 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
57 assert_have_no_spinlock();
58
59 /*
60 * In ARM, the bits in DAIF register are used to mask the exceptions.
61 * While in RISC-V, the bits in CSR XIE are used to enable(unmask)
62 * corresponding interrupt sources. To not modify the function of
63 * thread_set_exceptions(), we should "invert" the bits in "exceptions".
64 * The corresponding bits in "exceptions" will be inverted so they will
65 * be cleared when we write the final value into CSR XIE. So that we
66 * can mask those exceptions.
67 */
68 exceptions &= THREAD_EXCP_ALL;
69 exceptions ^= THREAD_EXCP_ALL;
70
71 barrier();
72 write_csr(CSR_XIE, exceptions);
73 barrier();
74 }
75
thread_mask_exceptions(uint32_t exceptions)76 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions)
77 {
78 uint32_t state = thread_get_exceptions();
79
80 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
81 return state;
82 }
83
thread_unmask_exceptions(uint32_t state)84 void __nostackcheck thread_unmask_exceptions(uint32_t state)
85 {
86 thread_set_exceptions(state & THREAD_EXCP_ALL);
87 }
88
thread_lazy_save_ns_vfp(void)89 static void thread_lazy_save_ns_vfp(void)
90 {
91 static_assert(!IS_ENABLED(CFG_WITH_VFP));
92 }
93
thread_lazy_restore_ns_vfp(void)94 static void thread_lazy_restore_ns_vfp(void)
95 {
96 static_assert(!IS_ENABLED(CFG_WITH_VFP));
97 }
98
setup_unwind_user_mode(struct thread_scall_regs * regs)99 static void setup_unwind_user_mode(struct thread_scall_regs *regs)
100 {
101 regs->epc = (uintptr_t)thread_unwind_user_mode;
102 regs->status = xstatus_for_xret(true, PRV_S);
103 regs->ie = 0;
104 /*
105 * We are going to exit user mode. The stack pointer must be set as the
106 * original value it had before allocating space of scall "regs" and
107 * calling thread_scall_handler(). Thus, we can simply set stack pointer
108 * as (regs + 1) value.
109 */
110 regs->sp = (uintptr_t)(regs + 1);
111 }
112
thread_unhandled_trap(struct thread_ctx_regs * regs __unused,unsigned long cause __unused)113 static void thread_unhandled_trap(struct thread_ctx_regs *regs __unused,
114 unsigned long cause __unused)
115 {
116 DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx",
117 read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL));
118 panic();
119 }
120
thread_scall_handler(struct thread_scall_regs * regs)121 void thread_scall_handler(struct thread_scall_regs *regs)
122 {
123 struct ts_session *sess = NULL;
124 uint32_t state = 0;
125
126 /* Enable native interrupts */
127 state = thread_get_exceptions();
128 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR);
129
130 thread_user_save_vfp();
131
132 sess = ts_get_current_session();
133
134 /* Restore foreign interrupts which are disabled on exception entry */
135 thread_restore_foreign_intr();
136
137 assert(sess && sess->handle_scall);
138
139 if (sess->handle_scall(regs)) {
140 /*
141 * We're about to switch back to next instruction of ecall in
142 * user-mode
143 */
144 regs->epc += 4;
145 } else {
146 /* We're returning from __thread_enter_user_mode() */
147 setup_unwind_user_mode(regs);
148 }
149 }
150
thread_irq_handler(void)151 static void thread_irq_handler(void)
152 {
153 interrupt_main_handler();
154 }
155
thread_native_interrupt_handler(struct thread_ctx_regs * regs,unsigned long cause)156 void thread_native_interrupt_handler(struct thread_ctx_regs *regs,
157 unsigned long cause)
158 {
159 switch (cause & LONG_MAX) {
160 case IRQ_XTIMER:
161 clear_csr(CSR_XIE, CSR_XIE_TIE);
162 break;
163 case IRQ_XSOFT:
164 thread_unhandled_trap(regs, cause);
165 break;
166 case IRQ_XEXT:
167 thread_irq_handler();
168 break;
169 default:
170 thread_unhandled_trap(regs, cause);
171 }
172 }
173
xstatus_for_xret(uint8_t pie,uint8_t pp)174 unsigned long xstatus_for_xret(uint8_t pie, uint8_t pp)
175 {
176 unsigned long xstatus = read_csr(CSR_XSTATUS);
177
178 assert(pp == PRV_M || pp == PRV_S || pp == PRV_U);
179
180 #ifdef RV32
181 xstatus = set_field_u32(xstatus, CSR_XSTATUS_IE, 0);
182 xstatus = set_field_u32(xstatus, CSR_XSTATUS_PIE, pie);
183 xstatus = set_field_u32(xstatus, CSR_XSTATUS_SPP, pp);
184 #else /* RV64 */
185 xstatus = set_field_u64(xstatus, CSR_XSTATUS_IE, 0);
186 xstatus = set_field_u64(xstatus, CSR_XSTATUS_PIE, pie);
187 xstatus = set_field_u64(xstatus, CSR_XSTATUS_SPP, pp);
188 #endif
189
190 return xstatus;
191 }
192
init_regs(struct thread_ctx * thread,uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5,uint32_t a6,uint32_t a7,void * pc)193 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
194 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
195 uint32_t a6, uint32_t a7, void *pc)
196 {
197 memset(&thread->regs, 0, sizeof(thread->regs));
198
199 thread->regs.epc = (uintptr_t)pc;
200
201 /* Set up xstatus */
202 thread->regs.status = xstatus_for_xret(true, PRV_S);
203
204 /* Enable native interrupt */
205 thread->regs.ie = THREAD_EXCP_NATIVE_INTR;
206
207 /* Reinitialize stack pointer */
208 thread->regs.sp = thread->stack_va_end;
209
210 /* Set up GP and TP */
211 thread->regs.gp = read_gp();
212 thread->regs.tp = read_tp();
213
214 /*
215 * Copy arguments into context. This will make the
216 * arguments appear in a0-a7 when thread is started.
217 */
218 thread->regs.a0 = a0;
219 thread->regs.a1 = a1;
220 thread->regs.a2 = a2;
221 thread->regs.a3 = a3;
222 thread->regs.a4 = a4;
223 thread->regs.a5 = a5;
224 thread->regs.a6 = a6;
225 thread->regs.a7 = a7;
226 }
227
__thread_alloc_and_run(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5,uint32_t a6,uint32_t a7,void * pc)228 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2,
229 uint32_t a3, uint32_t a4, uint32_t a5,
230 uint32_t a6, uint32_t a7,
231 void *pc)
232 {
233 struct thread_core_local *l = thread_get_core_local();
234 bool found_thread = false;
235 size_t n = 0;
236
237 assert(l->curr_thread == THREAD_ID_INVALID);
238
239 thread_lock_global();
240
241 for (n = 0; n < CFG_NUM_THREADS; n++) {
242 if (threads[n].state == THREAD_STATE_FREE) {
243 threads[n].state = THREAD_STATE_ACTIVE;
244 found_thread = true;
245 break;
246 }
247 }
248
249 thread_unlock_global();
250
251 if (!found_thread)
252 return;
253
254 l->curr_thread = n;
255
256 threads[n].flags = 0;
257 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc);
258
259 thread_lazy_save_ns_vfp();
260
261 l->flags &= ~THREAD_CLF_TMP;
262
263 thread_resume(&threads[n].regs);
264 /*NOTREACHED*/
265 panic();
266 }
267
thread_alloc_and_run(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5)268 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
269 uint32_t a4, uint32_t a5)
270 {
271 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0,
272 thread_std_abi_entry);
273 }
274
copy_a0_to_a3(struct thread_ctx_regs * regs,uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3)275 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
276 uint32_t a1, uint32_t a2, uint32_t a3)
277 {
278 regs->a0 = a0;
279 regs->a1 = a1;
280 regs->a2 = a2;
281 regs->a3 = a3;
282 }
283
is_from_user(unsigned long status)284 static bool is_from_user(unsigned long status)
285 {
286 return (status & CSR_XSTATUS_SPP) == 0;
287 }
288
289 #ifdef CFG_SYSCALL_FTRACE
ftrace_suspend(void)290 static void __noprof ftrace_suspend(void)
291 {
292 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
293
294 if (s && s->fbuf)
295 s->fbuf->syscall_trace_suspended = true;
296 }
297
ftrace_resume(void)298 static void __noprof ftrace_resume(void)
299 {
300 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
301
302 if (s && s->fbuf)
303 s->fbuf->syscall_trace_suspended = false;
304 }
305 #else
ftrace_suspend(void)306 static void __maybe_unused __noprof ftrace_suspend(void)
307 {
308 }
309
ftrace_resume(void)310 static void __noprof ftrace_resume(void)
311 {
312 }
313 #endif
314
is_user_mode(struct thread_ctx_regs * regs)315 static bool is_user_mode(struct thread_ctx_regs *regs)
316 {
317 return is_from_user((uint32_t)regs->status);
318 }
319
thread_get_saved_thread_sp(void)320 vaddr_t thread_get_saved_thread_sp(void)
321 {
322 struct thread_core_local *l = thread_get_core_local();
323 int ct = l->curr_thread;
324
325 assert(ct != THREAD_ID_INVALID);
326 return threads[ct].kern_sp;
327 }
328
thread_get_hartid_by_hartindex(uint32_t hartidx)329 uint32_t thread_get_hartid_by_hartindex(uint32_t hartidx)
330 {
331 assert(hartidx < CFG_TEE_CORE_NB_CORE);
332
333 return thread_core_local[hartidx].hart_id;
334 }
335
thread_resume_from_rpc(uint32_t thread_id,uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3)336 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
337 uint32_t a2, uint32_t a3)
338 {
339 size_t n = thread_id;
340 struct thread_core_local *l = thread_get_core_local();
341 bool found_thread = false;
342
343 assert(l->curr_thread == THREAD_ID_INVALID);
344
345 thread_lock_global();
346
347 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
348 threads[n].state = THREAD_STATE_ACTIVE;
349 found_thread = true;
350 }
351
352 thread_unlock_global();
353
354 if (!found_thread)
355 return;
356
357 l->curr_thread = n;
358
359 if (threads[n].have_user_map) {
360 core_mmu_set_user_map(&threads[n].user_map);
361 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
362 tee_ta_ftrace_update_times_resume();
363 }
364
365 if (is_user_mode(&threads[n].regs))
366 tee_ta_update_session_utime_resume();
367
368 /*
369 * We may resume thread at another hart, so we need to re-assign value
370 * of tp to be current hart's thread_core_local.
371 */
372 if (!is_user_mode(&threads[n].regs))
373 threads[n].regs.tp = read_tp();
374
375 /*
376 * Return from RPC to request service of a foreign interrupt must not
377 * get parameters from non-secure world.
378 */
379 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
380 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
381 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
382 }
383
384 thread_lazy_save_ns_vfp();
385
386 if (threads[n].have_user_map)
387 ftrace_resume();
388
389 l->flags &= ~THREAD_CLF_TMP;
390 thread_resume(&threads[n].regs);
391 /*NOTREACHED*/
392 panic();
393 }
394
thread_state_free(void)395 void thread_state_free(void)
396 {
397 struct thread_core_local *l = thread_get_core_local();
398 int ct = l->curr_thread;
399
400 assert(ct != THREAD_ID_INVALID);
401
402 thread_lazy_restore_ns_vfp();
403
404 thread_lock_global();
405
406 assert(threads[ct].state == THREAD_STATE_ACTIVE);
407 threads[ct].state = THREAD_STATE_FREE;
408 threads[ct].flags = 0;
409 l->curr_thread = THREAD_ID_INVALID;
410
411 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
412 virt_unset_guest();
413 thread_unlock_global();
414 }
415
thread_state_suspend(uint32_t flags,unsigned long status,vaddr_t pc)416 int thread_state_suspend(uint32_t flags, unsigned long status, vaddr_t pc)
417 {
418 struct thread_core_local *l = thread_get_core_local();
419 int ct = l->curr_thread;
420
421 assert(ct != THREAD_ID_INVALID);
422
423 if (core_mmu_user_mapping_is_active())
424 ftrace_suspend();
425
426 thread_check_canaries();
427
428 if (is_from_user(status)) {
429 thread_user_save_vfp();
430 tee_ta_update_session_utime_suspend();
431 tee_ta_gprof_sample_pc(pc);
432 }
433 thread_lazy_restore_ns_vfp();
434
435 thread_lock_global();
436
437 assert(threads[ct].state == THREAD_STATE_ACTIVE);
438 threads[ct].flags |= flags;
439 threads[ct].regs.status = status;
440 threads[ct].regs.epc = pc;
441 threads[ct].state = THREAD_STATE_SUSPENDED;
442
443 threads[ct].have_user_map = core_mmu_user_mapping_is_active();
444 if (threads[ct].have_user_map) {
445 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
446 tee_ta_ftrace_update_times_suspend();
447 core_mmu_get_user_map(&threads[ct].user_map);
448 core_mmu_set_user_map(NULL);
449 }
450
451 l->curr_thread = THREAD_ID_INVALID;
452
453 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
454 virt_unset_guest();
455
456 thread_unlock_global();
457
458 return ct;
459 }
460
init_user_kcode(void)461 static void init_user_kcode(void)
462 {
463 }
464
thread_init_primary(void)465 void thread_init_primary(void)
466 {
467 init_user_kcode();
468 }
469
get_trap_vect(void)470 static vaddr_t get_trap_vect(void)
471 {
472 return (vaddr_t)thread_trap_vect;
473 }
474
thread_init_tvec(void)475 void thread_init_tvec(void)
476 {
477 unsigned long tvec = (unsigned long)get_trap_vect();
478
479 write_csr(CSR_XTVEC, tvec);
480 assert(read_csr(CSR_XTVEC) == tvec);
481 }
482
thread_init_per_cpu(void)483 void thread_init_per_cpu(void)
484 {
485 thread_init_tvec();
486 /*
487 * We may receive traps from now, therefore, zeroize xSCRATCH such
488 * that thread_trap_vect() can distinguish between user traps
489 * and kernel traps.
490 */
491 write_csr(CSR_XSCRATCH, 0);
492 #ifndef CFG_PAN
493 /*
494 * Allow access to user pages. When CFG_PAN is enabled, the SUM bit will
495 * be set and clear at runtime when necessary.
496 */
497 set_csr(CSR_XSTATUS, CSR_XSTATUS_SUM);
498 #endif
499 }
500
set_ctx_regs(struct thread_ctx_regs * regs,unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long user_sp,unsigned long entry_func,unsigned long status,unsigned long ie,struct thread_pauth_keys * keys __unused)501 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
502 unsigned long a1, unsigned long a2, unsigned long a3,
503 unsigned long user_sp, unsigned long entry_func,
504 unsigned long status, unsigned long ie,
505 struct thread_pauth_keys *keys __unused)
506 {
507 *regs = (struct thread_ctx_regs){
508 .a0 = a0,
509 .a1 = a1,
510 .a2 = a2,
511 .a3 = a3,
512 .s0 = 0,
513 .sp = user_sp,
514 .epc = entry_func,
515 .status = status,
516 .ie = ie,
517 };
518 }
519
thread_enter_user_mode(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long user_sp,unsigned long entry_func,bool is_32bit __unused,uint32_t * exit_status0,uint32_t * exit_status1)520 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
521 unsigned long a2, unsigned long a3,
522 unsigned long user_sp,
523 unsigned long entry_func,
524 bool is_32bit __unused,
525 uint32_t *exit_status0,
526 uint32_t *exit_status1)
527 {
528 unsigned long status = 0;
529 unsigned long ie = 0;
530 uint32_t exceptions = 0;
531 uint32_t rc = 0;
532 struct thread_ctx_regs *regs = NULL;
533
534 tee_ta_update_session_utime_resume();
535
536 /* Read current interrupt masks */
537 ie = read_csr(CSR_XIE);
538
539 /*
540 * Mask all exceptions, the CSR_XSTATUS.IE will be set from
541 * setup_unwind_user_mode() after exiting.
542 */
543 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
544 regs = thread_get_ctx_regs();
545 status = xstatus_for_xret(true, PRV_U);
546 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, status, ie,
547 NULL);
548 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
549 thread_unmask_exceptions(exceptions);
550
551 return rc;
552 }
553
__thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])554 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])
555 {
556 thread_rpc_xstatus(rv, xstatus_for_xret(false, PRV_S));
557 }
558