1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright 2022-2023 NXP 4 * Copyright (c) 2016-2022, Linaro Limited 5 * Copyright (c) 2014, STMicroelectronics International N.V. 6 * Copyright (c) 2020-2021, Arm Limited 7 */ 8 9 #include <platform_config.h> 10 11 #include <assert.h> 12 #include <config.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/interrupt.h> 18 #include <kernel/linker.h> 19 #include <kernel/lockdep.h> 20 #include <kernel/misc.h> 21 #include <kernel/panic.h> 22 #include <kernel/spinlock.h> 23 #include <kernel/tee_ta_manager.h> 24 #include <kernel/thread.h> 25 #include <kernel/thread_private.h> 26 #include <kernel/user_mode_ctx_struct.h> 27 #include <kernel/virtualization.h> 28 #include <mm/core_memprot.h> 29 #include <mm/mobj.h> 30 #include <mm/tee_mm.h> 31 #include <mm/vm.h> 32 #include <riscv.h> 33 #include <trace.h> 34 #include <util.h> 35 36 /* 37 * This function is called as a guard after each ABI call which is not 38 * supposed to return. 39 */ 40 void __noreturn __panic_at_abi_return(void) 41 { 42 panic(); 43 } 44 45 /* This function returns current masked exception bits. */ 46 uint32_t __nostackcheck thread_get_exceptions(void) 47 { 48 uint32_t xie = read_csr(CSR_XIE) & THREAD_EXCP_ALL; 49 50 return xie ^ THREAD_EXCP_ALL; 51 } 52 53 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 54 { 55 /* Foreign interrupts must not be unmasked while holding a spinlock */ 56 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 57 assert_have_no_spinlock(); 58 59 /* 60 * In ARM, the bits in DAIF register are used to mask the exceptions. 61 * While in RISC-V, the bits in CSR XIE are used to enable(unmask) 62 * corresponding interrupt sources. To not modify the function of 63 * thread_set_exceptions(), we should "invert" the bits in "exceptions". 64 * The corresponding bits in "exceptions" will be inverted so they will 65 * be cleared when we write the final value into CSR XIE. So that we 66 * can mask those exceptions. 67 */ 68 exceptions &= THREAD_EXCP_ALL; 69 exceptions ^= THREAD_EXCP_ALL; 70 71 barrier(); 72 write_csr(CSR_XIE, exceptions); 73 barrier(); 74 } 75 76 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions) 77 { 78 uint32_t state = thread_get_exceptions(); 79 80 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 81 return state; 82 } 83 84 void __nostackcheck thread_unmask_exceptions(uint32_t state) 85 { 86 thread_set_exceptions(state & THREAD_EXCP_ALL); 87 } 88 89 static void thread_lazy_save_ns_vfp(void) 90 { 91 static_assert(!IS_ENABLED(CFG_WITH_VFP)); 92 } 93 94 static void thread_lazy_restore_ns_vfp(void) 95 { 96 static_assert(!IS_ENABLED(CFG_WITH_VFP)); 97 } 98 99 static void setup_unwind_user_mode(struct thread_scall_regs *regs) 100 { 101 regs->epc = (uintptr_t)thread_unwind_user_mode; 102 regs->status = xstatus_for_xret(true, PRV_S); 103 regs->ie = 0; 104 /* 105 * We are going to exit user mode. The stack pointer must be set as the 106 * original value it had before allocating space of scall "regs" and 107 * calling thread_scall_handler(). Thus, we can simply set stack pointer 108 * as (regs + 1) value. 109 */ 110 regs->sp = (uintptr_t)(regs + 1); 111 } 112 113 static void thread_unhandled_trap(struct thread_ctx_regs *regs __unused, 114 unsigned long cause __unused) 115 { 116 DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx", 117 read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL)); 118 panic(); 119 } 120 121 void thread_scall_handler(struct thread_scall_regs *regs) 122 { 123 struct ts_session *sess = NULL; 124 uint32_t state = 0; 125 126 /* Enable native interrupts */ 127 state = thread_get_exceptions(); 128 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 129 130 thread_user_save_vfp(); 131 132 sess = ts_get_current_session(); 133 134 /* Restore foreign interrupts which are disabled on exception entry */ 135 thread_restore_foreign_intr(); 136 137 assert(sess && sess->handle_scall); 138 139 if (sess->handle_scall(regs)) { 140 /* 141 * We're about to switch back to next instruction of ecall in 142 * user-mode 143 */ 144 regs->epc += 4; 145 } else { 146 /* We're returning from __thread_enter_user_mode() */ 147 setup_unwind_user_mode(regs); 148 } 149 } 150 151 static void thread_irq_handler(void) 152 { 153 interrupt_main_handler(); 154 } 155 156 void thread_native_interrupt_handler(struct thread_ctx_regs *regs, 157 unsigned long cause) 158 { 159 switch (cause & LONG_MAX) { 160 case IRQ_XTIMER: 161 clear_csr(CSR_XIE, CSR_XIE_TIE); 162 break; 163 case IRQ_XSOFT: 164 thread_unhandled_trap(regs, cause); 165 break; 166 case IRQ_XEXT: 167 thread_irq_handler(); 168 break; 169 default: 170 thread_unhandled_trap(regs, cause); 171 } 172 } 173 174 unsigned long xstatus_for_xret(uint8_t pie, uint8_t pp) 175 { 176 unsigned long xstatus = read_csr(CSR_XSTATUS); 177 178 assert(pp == PRV_M || pp == PRV_S || pp == PRV_U); 179 180 #ifdef RV32 181 xstatus = set_field_u32(xstatus, CSR_XSTATUS_IE, 0); 182 xstatus = set_field_u32(xstatus, CSR_XSTATUS_PIE, pie); 183 xstatus = set_field_u32(xstatus, CSR_XSTATUS_SPP, pp); 184 #else /* RV64 */ 185 xstatus = set_field_u64(xstatus, CSR_XSTATUS_IE, 0); 186 xstatus = set_field_u64(xstatus, CSR_XSTATUS_PIE, pie); 187 xstatus = set_field_u64(xstatus, CSR_XSTATUS_SPP, pp); 188 #endif 189 190 return xstatus; 191 } 192 193 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 194 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 195 uint32_t a6, uint32_t a7, void *pc) 196 { 197 memset(&thread->regs, 0, sizeof(thread->regs)); 198 199 thread->regs.epc = (uintptr_t)pc; 200 201 /* Set up xstatus */ 202 thread->regs.status = xstatus_for_xret(true, PRV_S); 203 204 /* Enable native interrupt */ 205 thread->regs.ie = THREAD_EXCP_NATIVE_INTR; 206 207 /* Reinitialize stack pointer */ 208 thread->regs.sp = thread->stack_va_end; 209 210 /* Set up GP and TP */ 211 thread->regs.gp = read_gp(); 212 thread->regs.tp = read_tp(); 213 214 /* 215 * Copy arguments into context. This will make the 216 * arguments appear in a0-a7 when thread is started. 217 */ 218 thread->regs.a0 = a0; 219 thread->regs.a1 = a1; 220 thread->regs.a2 = a2; 221 thread->regs.a3 = a3; 222 thread->regs.a4 = a4; 223 thread->regs.a5 = a5; 224 thread->regs.a6 = a6; 225 thread->regs.a7 = a7; 226 } 227 228 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, 229 uint32_t a3, uint32_t a4, uint32_t a5, 230 uint32_t a6, uint32_t a7, 231 void *pc) 232 { 233 struct thread_core_local *l = thread_get_core_local(); 234 bool found_thread = false; 235 size_t n = 0; 236 237 assert(l->curr_thread == THREAD_ID_INVALID); 238 239 thread_lock_global(); 240 241 for (n = 0; n < CFG_NUM_THREADS; n++) { 242 if (threads[n].state == THREAD_STATE_FREE) { 243 threads[n].state = THREAD_STATE_ACTIVE; 244 found_thread = true; 245 break; 246 } 247 } 248 249 thread_unlock_global(); 250 251 if (!found_thread) 252 return; 253 254 l->curr_thread = n; 255 256 threads[n].flags = 0; 257 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); 258 259 thread_lazy_save_ns_vfp(); 260 261 l->flags &= ~THREAD_CLF_TMP; 262 263 thread_resume(&threads[n].regs); 264 /*NOTREACHED*/ 265 panic(); 266 } 267 268 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 269 uint32_t a4, uint32_t a5) 270 { 271 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0, 272 thread_std_abi_entry); 273 } 274 275 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 276 uint32_t a1, uint32_t a2, uint32_t a3) 277 { 278 regs->a0 = a0; 279 regs->a1 = a1; 280 regs->a2 = a2; 281 regs->a3 = a3; 282 } 283 284 static bool is_from_user(unsigned long status) 285 { 286 return (status & CSR_XSTATUS_SPP) == 0; 287 } 288 289 #ifdef CFG_SYSCALL_FTRACE 290 static void __noprof ftrace_suspend(void) 291 { 292 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 293 294 if (s && s->fbuf) 295 s->fbuf->syscall_trace_suspended = true; 296 } 297 298 static void __noprof ftrace_resume(void) 299 { 300 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 301 302 if (s && s->fbuf) 303 s->fbuf->syscall_trace_suspended = false; 304 } 305 #else 306 static void __maybe_unused __noprof ftrace_suspend(void) 307 { 308 } 309 310 static void __noprof ftrace_resume(void) 311 { 312 } 313 #endif 314 315 static bool is_user_mode(struct thread_ctx_regs *regs) 316 { 317 return is_from_user((uint32_t)regs->status); 318 } 319 320 vaddr_t thread_get_saved_thread_sp(void) 321 { 322 struct thread_core_local *l = thread_get_core_local(); 323 int ct = l->curr_thread; 324 325 assert(ct != THREAD_ID_INVALID); 326 return threads[ct].kern_sp; 327 } 328 329 uint32_t thread_get_hartid_by_hartindex(uint32_t hartidx) 330 { 331 assert(hartidx < CFG_TEE_CORE_NB_CORE); 332 333 return thread_core_local[hartidx].hart_id; 334 } 335 336 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 337 uint32_t a2, uint32_t a3) 338 { 339 size_t n = thread_id; 340 struct thread_core_local *l = thread_get_core_local(); 341 bool found_thread = false; 342 343 assert(l->curr_thread == THREAD_ID_INVALID); 344 345 thread_lock_global(); 346 347 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 348 threads[n].state = THREAD_STATE_ACTIVE; 349 found_thread = true; 350 } 351 352 thread_unlock_global(); 353 354 if (!found_thread) 355 return; 356 357 l->curr_thread = n; 358 359 if (threads[n].have_user_map) { 360 core_mmu_set_user_map(&threads[n].user_map); 361 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 362 tee_ta_ftrace_update_times_resume(); 363 } 364 365 if (is_user_mode(&threads[n].regs)) 366 tee_ta_update_session_utime_resume(); 367 368 /* 369 * We may resume thread at another hart, so we need to re-assign value 370 * of tp to be current hart's thread_core_local. 371 */ 372 if (!is_user_mode(&threads[n].regs)) 373 threads[n].regs.tp = read_tp(); 374 375 /* 376 * Return from RPC to request service of a foreign interrupt must not 377 * get parameters from non-secure world. 378 */ 379 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 380 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 381 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 382 } 383 384 thread_lazy_save_ns_vfp(); 385 386 if (threads[n].have_user_map) 387 ftrace_resume(); 388 389 l->flags &= ~THREAD_CLF_TMP; 390 thread_resume(&threads[n].regs); 391 /*NOTREACHED*/ 392 panic(); 393 } 394 395 void thread_state_free(void) 396 { 397 struct thread_core_local *l = thread_get_core_local(); 398 int ct = l->curr_thread; 399 400 assert(ct != THREAD_ID_INVALID); 401 402 thread_lazy_restore_ns_vfp(); 403 404 thread_lock_global(); 405 406 assert(threads[ct].state == THREAD_STATE_ACTIVE); 407 threads[ct].state = THREAD_STATE_FREE; 408 threads[ct].flags = 0; 409 l->curr_thread = THREAD_ID_INVALID; 410 411 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 412 virt_unset_guest(); 413 thread_unlock_global(); 414 } 415 416 int thread_state_suspend(uint32_t flags, unsigned long status, vaddr_t pc) 417 { 418 struct thread_core_local *l = thread_get_core_local(); 419 int ct = l->curr_thread; 420 421 assert(ct != THREAD_ID_INVALID); 422 423 if (core_mmu_user_mapping_is_active()) 424 ftrace_suspend(); 425 426 thread_check_canaries(); 427 428 if (is_from_user(status)) { 429 thread_user_save_vfp(); 430 tee_ta_update_session_utime_suspend(); 431 tee_ta_gprof_sample_pc(pc); 432 } 433 thread_lazy_restore_ns_vfp(); 434 435 thread_lock_global(); 436 437 assert(threads[ct].state == THREAD_STATE_ACTIVE); 438 threads[ct].flags |= flags; 439 threads[ct].regs.status = status; 440 threads[ct].regs.epc = pc; 441 threads[ct].state = THREAD_STATE_SUSPENDED; 442 443 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 444 if (threads[ct].have_user_map) { 445 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 446 tee_ta_ftrace_update_times_suspend(); 447 core_mmu_get_user_map(&threads[ct].user_map); 448 core_mmu_set_user_map(NULL); 449 } 450 451 l->curr_thread = THREAD_ID_INVALID; 452 453 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 454 virt_unset_guest(); 455 456 thread_unlock_global(); 457 458 return ct; 459 } 460 461 static void init_user_kcode(void) 462 { 463 } 464 465 void thread_init_primary(void) 466 { 467 /* Initialize canaries around the stacks */ 468 thread_init_canaries(); 469 470 init_user_kcode(); 471 } 472 473 static vaddr_t get_trap_vect(void) 474 { 475 return (vaddr_t)thread_trap_vect; 476 } 477 478 void thread_init_tvec(void) 479 { 480 unsigned long tvec = (unsigned long)get_trap_vect(); 481 482 write_csr(CSR_XTVEC, tvec); 483 assert(read_csr(CSR_XTVEC) == tvec); 484 } 485 486 void thread_init_per_cpu(void) 487 { 488 thread_init_tvec(); 489 /* 490 * We may receive traps from now, therefore, zeroize xSCRATCH such 491 * that thread_trap_vect() can distinguish between user traps 492 * and kernel traps. 493 */ 494 write_csr(CSR_XSCRATCH, 0); 495 #ifndef CFG_PAN 496 /* 497 * Allow access to user pages. When CFG_PAN is enabled, the SUM bit will 498 * be set and clear at runtime when necessary. 499 */ 500 set_csr(CSR_XSTATUS, CSR_XSTATUS_SUM); 501 #endif 502 } 503 504 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 505 unsigned long a1, unsigned long a2, unsigned long a3, 506 unsigned long user_sp, unsigned long entry_func, 507 unsigned long status, unsigned long ie, 508 struct thread_pauth_keys *keys __unused) 509 { 510 *regs = (struct thread_ctx_regs){ 511 .a0 = a0, 512 .a1 = a1, 513 .a2 = a2, 514 .a3 = a3, 515 .s0 = 0, 516 .sp = user_sp, 517 .epc = entry_func, 518 .status = status, 519 .ie = ie, 520 }; 521 } 522 523 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 524 unsigned long a2, unsigned long a3, 525 unsigned long user_sp, 526 unsigned long entry_func, 527 bool is_32bit __unused, 528 uint32_t *exit_status0, 529 uint32_t *exit_status1) 530 { 531 unsigned long status = 0; 532 unsigned long ie = 0; 533 uint32_t exceptions = 0; 534 uint32_t rc = 0; 535 struct thread_ctx_regs *regs = NULL; 536 537 tee_ta_update_session_utime_resume(); 538 539 /* Read current interrupt masks */ 540 ie = read_csr(CSR_XIE); 541 542 /* 543 * Mask all exceptions, the CSR_XSTATUS.IE will be set from 544 * setup_unwind_user_mode() after exiting. 545 */ 546 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 547 regs = thread_get_ctx_regs(); 548 status = xstatus_for_xret(true, PRV_U); 549 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, status, ie, 550 NULL); 551 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 552 thread_unmask_exceptions(exceptions); 553 554 return rc; 555 } 556 557 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) 558 { 559 thread_rpc_xstatus(rv, xstatus_for_xret(false, PRV_S)); 560 } 561