1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright 2022-2023 NXP 4 * Copyright (c) 2016-2022, Linaro Limited 5 * Copyright (c) 2014, STMicroelectronics International N.V. 6 * Copyright (c) 2020-2021, Arm Limited 7 */ 8 9 #include <platform_config.h> 10 11 #include <assert.h> 12 #include <config.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/interrupt.h> 18 #include <kernel/linker.h> 19 #include <kernel/lockdep.h> 20 #include <kernel/misc.h> 21 #include <kernel/panic.h> 22 #include <kernel/spinlock.h> 23 #include <kernel/tee_ta_manager.h> 24 #include <kernel/thread.h> 25 #include <kernel/thread_private.h> 26 #include <kernel/user_mode_ctx_struct.h> 27 #include <kernel/virtualization.h> 28 #include <mm/core_memprot.h> 29 #include <mm/mobj.h> 30 #include <mm/tee_mm.h> 31 #include <mm/vm.h> 32 #include <riscv.h> 33 #include <trace.h> 34 #include <util.h> 35 36 /* 37 * This function is called as a guard after each ABI call which is not 38 * supposed to return. 39 */ 40 void __noreturn __panic_at_abi_return(void) 41 { 42 panic(); 43 } 44 45 /* This function returns current masked exception bits. */ 46 uint32_t __nostackcheck thread_get_exceptions(void) 47 { 48 uint32_t xie = read_csr(CSR_XIE) & THREAD_EXCP_ALL; 49 50 return xie ^ THREAD_EXCP_ALL; 51 } 52 53 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 54 { 55 /* Foreign interrupts must not be unmasked while holding a spinlock */ 56 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 57 assert_have_no_spinlock(); 58 59 /* 60 * In ARM, the bits in DAIF register are used to mask the exceptions. 61 * While in RISC-V, the bits in CSR XIE are used to enable(unmask) 62 * corresponding interrupt sources. To not modify the function of 63 * thread_set_exceptions(), we should "invert" the bits in "exceptions". 64 * The corresponding bits in "exceptions" will be inverted so they will 65 * be cleared when we write the final value into CSR XIE. So that we 66 * can mask those exceptions. 67 */ 68 exceptions &= THREAD_EXCP_ALL; 69 exceptions ^= THREAD_EXCP_ALL; 70 71 barrier(); 72 write_csr(CSR_XIE, exceptions); 73 barrier(); 74 } 75 76 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions) 77 { 78 uint32_t state = thread_get_exceptions(); 79 80 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 81 return state; 82 } 83 84 void __nostackcheck thread_unmask_exceptions(uint32_t state) 85 { 86 thread_set_exceptions(state & THREAD_EXCP_ALL); 87 } 88 89 static void thread_lazy_save_ns_vfp(void) 90 { 91 static_assert(!IS_ENABLED(CFG_WITH_VFP)); 92 } 93 94 static void thread_lazy_restore_ns_vfp(void) 95 { 96 static_assert(!IS_ENABLED(CFG_WITH_VFP)); 97 } 98 99 static void setup_unwind_user_mode(struct thread_scall_regs *regs) 100 { 101 regs->ra = (uintptr_t)thread_unwind_user_mode; 102 regs->status = read_csr(CSR_XSTATUS); 103 regs->sp = thread_get_saved_thread_sp(); 104 } 105 106 static void thread_unhandled_trap(struct thread_trap_regs *regs __unused, 107 unsigned long cause __unused) 108 { 109 DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx", 110 read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL)); 111 panic(); 112 } 113 114 void thread_scall_handler(struct thread_scall_regs *regs) 115 { 116 struct ts_session *sess = NULL; 117 uint32_t state = 0; 118 119 /* Enable native interrupts */ 120 state = thread_get_exceptions(); 121 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 122 123 thread_user_save_vfp(); 124 125 sess = ts_get_current_session(); 126 127 /* Restore foreign interrupts which are disabled on exception entry */ 128 thread_restore_foreign_intr(); 129 130 assert(sess && sess->handle_scall); 131 132 if (!sess->handle_scall(regs)) { 133 setup_unwind_user_mode(regs); 134 thread_exit_user_mode(regs->a0, regs->a1, regs->a2, 135 regs->a3, regs->sp, regs->ra, 136 regs->status); 137 } 138 } 139 140 static void copy_scall_to_trap(struct thread_scall_regs *scall_regs, 141 struct thread_trap_regs *trap_regs) 142 { 143 trap_regs->a0 = scall_regs->a0; 144 trap_regs->a1 = scall_regs->a1; 145 trap_regs->a2 = scall_regs->a2; 146 trap_regs->a3 = scall_regs->a3; 147 trap_regs->a4 = scall_regs->a4; 148 trap_regs->a5 = scall_regs->a5; 149 trap_regs->a6 = scall_regs->a6; 150 trap_regs->a7 = scall_regs->a7; 151 trap_regs->t0 = scall_regs->t0; 152 trap_regs->t1 = scall_regs->t1; 153 } 154 155 static void copy_trap_to_scall(struct thread_trap_regs *trap_regs, 156 struct thread_scall_regs *scall_regs) 157 { 158 *scall_regs = (struct thread_scall_regs) { 159 .status = trap_regs->status, 160 .ra = trap_regs->ra, 161 .a0 = trap_regs->a0, 162 .a1 = trap_regs->a1, 163 .a2 = trap_regs->a2, 164 .a3 = trap_regs->a3, 165 .a4 = trap_regs->a4, 166 .a5 = trap_regs->a5, 167 .a6 = trap_regs->a6, 168 .a7 = trap_regs->a7, 169 .t0 = trap_regs->t0, 170 .t1 = trap_regs->t1, 171 }; 172 } 173 174 static void thread_user_ecall_handler(struct thread_trap_regs *trap_regs) 175 { 176 struct thread_scall_regs scall_regs; 177 struct thread_core_local *l = thread_get_core_local(); 178 int ct = l->curr_thread; 179 180 copy_trap_to_scall(trap_regs, &scall_regs); 181 thread_scall_handler(&scall_regs); 182 copy_scall_to_trap(&scall_regs, trap_regs); 183 /* 184 * Save kernel sp we'll had at the beginning of this function. 185 * This is when this TA has called another TA because 186 * __thread_enter_user_mode() also saves the stack pointer in this 187 * field. 188 */ 189 threads[ct].kern_sp = (unsigned long)(trap_regs + 1); 190 /* 191 * We are returning to U-Mode, on return, the program counter 192 * is set to xsepc (pc=xepc), we add 4 (size of an instruction) 193 * to continue to next instruction. 194 */ 195 trap_regs->epc += 4; 196 } 197 198 static void copy_trap_to_abort(struct thread_trap_regs *trap_regs, 199 struct thread_abort_regs *abort_regs) 200 { 201 *abort_regs = (struct thread_abort_regs) { 202 .status = trap_regs->status, 203 .ra = trap_regs->ra, 204 .sp = trap_regs->sp, 205 .gp = trap_regs->gp, 206 .tp = trap_regs->tp, 207 .t0 = trap_regs->t0, 208 .t1 = trap_regs->t1, 209 .t2 = trap_regs->t2, 210 .s0 = trap_regs->s0, 211 .s1 = trap_regs->s1, 212 .a0 = trap_regs->a0, 213 .a1 = trap_regs->a1, 214 .a2 = trap_regs->a2, 215 .a3 = trap_regs->a3, 216 .a4 = trap_regs->a4, 217 .a5 = trap_regs->a5, 218 .a6 = trap_regs->a6, 219 .a7 = trap_regs->a7, 220 .s2 = trap_regs->s2, 221 .s3 = trap_regs->s3, 222 .s4 = trap_regs->s4, 223 .s5 = trap_regs->s5, 224 .s6 = trap_regs->s6, 225 .s7 = trap_regs->s7, 226 .s8 = trap_regs->s8, 227 .s9 = trap_regs->s9, 228 .s10 = trap_regs->s10, 229 .s11 = trap_regs->s11, 230 .t3 = trap_regs->t3, 231 .t4 = trap_regs->t4, 232 .t5 = trap_regs->t5, 233 .t6 = trap_regs->t6, 234 }; 235 } 236 237 static void thread_abort_handler(struct thread_trap_regs *trap_regs, 238 unsigned long cause) 239 { 240 struct thread_abort_regs abort_regs = { }; 241 242 assert(cause == read_csr(CSR_XCAUSE)); 243 copy_trap_to_abort(trap_regs, &abort_regs); 244 abort_regs.cause = read_csr(CSR_XCAUSE); 245 abort_regs.epc = read_csr(CSR_XEPC); 246 abort_regs.tval = read_csr(CSR_XTVAL); 247 abort_regs.satp = read_csr(CSR_SATP); 248 abort_handler(cause, &abort_regs); 249 } 250 251 static void thread_exception_handler(unsigned long cause, 252 struct thread_trap_regs *regs) 253 { 254 switch (cause) { 255 case CAUSE_USER_ECALL: 256 thread_user_ecall_handler(regs); 257 break; 258 default: 259 thread_abort_handler(regs, cause); 260 break; 261 } 262 } 263 264 static void thread_irq_handler(void) 265 { 266 interrupt_main_handler(); 267 } 268 269 static void thread_interrupt_handler(unsigned long cause, 270 struct thread_trap_regs *regs) 271 { 272 switch (cause & LONG_MAX) { 273 case IRQ_XTIMER: 274 clear_csr(CSR_XIE, CSR_XIE_TIE); 275 break; 276 case IRQ_XSOFT: 277 thread_unhandled_trap(regs, cause); 278 break; 279 case IRQ_XEXT: 280 thread_irq_handler(); 281 break; 282 default: 283 thread_unhandled_trap(regs, cause); 284 } 285 } 286 287 void thread_trap_handler(long cause, unsigned long epc __unused, 288 struct thread_trap_regs *regs, 289 bool user __maybe_unused) 290 { 291 /* 292 * The Interrupt bit (XLEN-1) in the cause register is set 293 * if the trap was caused by an interrupt. 294 */ 295 if (cause < 0) 296 thread_interrupt_handler(cause, regs); 297 /* 298 * Otherwise, cause is never written by the implementation, 299 * though it may be explicitly written by software. 300 */ 301 else 302 thread_exception_handler(cause, regs); 303 } 304 305 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 306 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 307 uint32_t a6, uint32_t a7, void *pc) 308 { 309 thread->regs.ra = (uintptr_t)pc; 310 311 /* Set up xstatus */ 312 thread->regs.status = read_csr(CSR_XSTATUS); 313 314 /* Reinitialize stack pointer */ 315 thread->regs.sp = thread->stack_va_end; 316 317 /* 318 * Copy arguments into context. This will make the 319 * arguments appear in a0-a7 when thread is started. 320 */ 321 thread->regs.a0 = a0; 322 thread->regs.a1 = a1; 323 thread->regs.a2 = a2; 324 thread->regs.a3 = a3; 325 thread->regs.a4 = a4; 326 thread->regs.a5 = a5; 327 thread->regs.a6 = a6; 328 thread->regs.a7 = a7; 329 } 330 331 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, 332 uint32_t a3, uint32_t a4, uint32_t a5, 333 uint32_t a6, uint32_t a7, 334 void *pc) 335 { 336 struct thread_core_local *l = thread_get_core_local(); 337 bool found_thread = false; 338 size_t n = 0; 339 340 assert(l->curr_thread == THREAD_ID_INVALID); 341 342 thread_lock_global(); 343 344 for (n = 0; n < CFG_NUM_THREADS; n++) { 345 if (threads[n].state == THREAD_STATE_FREE) { 346 threads[n].state = THREAD_STATE_ACTIVE; 347 found_thread = true; 348 break; 349 } 350 } 351 352 thread_unlock_global(); 353 354 if (!found_thread) 355 return; 356 357 l->curr_thread = n; 358 359 threads[n].flags = 0; 360 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); 361 362 thread_lazy_save_ns_vfp(); 363 364 l->flags &= ~THREAD_CLF_TMP; 365 366 thread_resume(&threads[n].regs); 367 /*NOTREACHED*/ 368 panic(); 369 } 370 371 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 372 uint32_t a4, uint32_t a5) 373 { 374 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0, 375 thread_std_abi_entry); 376 } 377 378 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 379 uint32_t a1, uint32_t a2, uint32_t a3) 380 { 381 regs->a0 = a0; 382 regs->a1 = a1; 383 regs->a2 = a2; 384 regs->a3 = a3; 385 } 386 387 static bool is_from_user(unsigned long status) 388 { 389 return (status & CSR_XSTATUS_SPP) == 0; 390 } 391 392 #ifdef CFG_SYSCALL_FTRACE 393 static void __noprof ftrace_suspend(void) 394 { 395 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 396 397 if (s && s->fbuf) 398 s->fbuf->syscall_trace_suspended = true; 399 } 400 401 static void __noprof ftrace_resume(void) 402 { 403 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 404 405 if (s && s->fbuf) 406 s->fbuf->syscall_trace_suspended = false; 407 } 408 #else 409 static void __maybe_unused __noprof ftrace_suspend(void) 410 { 411 } 412 413 static void __noprof ftrace_resume(void) 414 { 415 } 416 #endif 417 418 static bool is_user_mode(struct thread_ctx_regs *regs) 419 { 420 return is_from_user((uint32_t)regs->status); 421 } 422 423 vaddr_t thread_get_saved_thread_sp(void) 424 { 425 struct thread_core_local *l = thread_get_core_local(); 426 int ct = l->curr_thread; 427 428 assert(ct != THREAD_ID_INVALID); 429 return threads[ct].kern_sp; 430 } 431 432 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 433 uint32_t a2, uint32_t a3) 434 { 435 size_t n = thread_id; 436 struct thread_core_local *l = thread_get_core_local(); 437 bool found_thread = false; 438 439 assert(l->curr_thread == THREAD_ID_INVALID); 440 441 thread_lock_global(); 442 443 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 444 threads[n].state = THREAD_STATE_ACTIVE; 445 found_thread = true; 446 } 447 448 thread_unlock_global(); 449 450 if (!found_thread) 451 return; 452 453 l->curr_thread = n; 454 455 if (threads[n].have_user_map) { 456 core_mmu_set_user_map(&threads[n].user_map); 457 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 458 tee_ta_ftrace_update_times_resume(); 459 } 460 461 if (is_user_mode(&threads[n].regs)) 462 tee_ta_update_session_utime_resume(); 463 464 /* 465 * Return from RPC to request service of a foreign interrupt must not 466 * get parameters from non-secure world. 467 */ 468 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 469 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 470 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 471 } 472 473 thread_lazy_save_ns_vfp(); 474 475 if (threads[n].have_user_map) 476 ftrace_resume(); 477 478 l->flags &= ~THREAD_CLF_TMP; 479 thread_resume(&threads[n].regs); 480 /*NOTREACHED*/ 481 panic(); 482 } 483 484 void thread_state_free(void) 485 { 486 struct thread_core_local *l = thread_get_core_local(); 487 int ct = l->curr_thread; 488 489 assert(ct != THREAD_ID_INVALID); 490 491 thread_lazy_restore_ns_vfp(); 492 493 thread_lock_global(); 494 495 assert(threads[ct].state == THREAD_STATE_ACTIVE); 496 threads[ct].state = THREAD_STATE_FREE; 497 threads[ct].flags = 0; 498 l->curr_thread = THREAD_ID_INVALID; 499 500 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 501 virt_unset_guest(); 502 thread_unlock_global(); 503 } 504 505 int thread_state_suspend(uint32_t flags, unsigned long status, vaddr_t pc) 506 { 507 struct thread_core_local *l = thread_get_core_local(); 508 int ct = l->curr_thread; 509 510 assert(ct != THREAD_ID_INVALID); 511 512 if (core_mmu_user_mapping_is_active()) 513 ftrace_suspend(); 514 515 thread_check_canaries(); 516 517 if (is_from_user(status)) { 518 thread_user_save_vfp(); 519 tee_ta_update_session_utime_suspend(); 520 tee_ta_gprof_sample_pc(pc); 521 } 522 thread_lazy_restore_ns_vfp(); 523 524 thread_lock_global(); 525 526 assert(threads[ct].state == THREAD_STATE_ACTIVE); 527 threads[ct].flags |= flags; 528 threads[ct].regs.status = status; 529 threads[ct].regs.ra = pc; 530 threads[ct].state = THREAD_STATE_SUSPENDED; 531 532 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 533 if (threads[ct].have_user_map) { 534 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 535 tee_ta_ftrace_update_times_suspend(); 536 core_mmu_get_user_map(&threads[ct].user_map); 537 core_mmu_set_user_map(NULL); 538 } 539 540 l->curr_thread = THREAD_ID_INVALID; 541 542 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 543 virt_unset_guest(); 544 545 thread_unlock_global(); 546 547 return ct; 548 } 549 550 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 551 { 552 if (thread_id >= CFG_NUM_THREADS) 553 return false; 554 threads[thread_id].stack_va_end = sp; 555 return true; 556 } 557 558 static void init_user_kcode(void) 559 { 560 } 561 562 void thread_init_primary(void) 563 { 564 /* Initialize canaries around the stacks */ 565 thread_init_canaries(); 566 567 init_user_kcode(); 568 } 569 570 static vaddr_t get_trap_vect(void) 571 { 572 return (vaddr_t)thread_trap_vect; 573 } 574 575 void thread_init_tvec(void) 576 { 577 unsigned long tvec = (unsigned long)get_trap_vect(); 578 579 static_assert(sizeof(struct thread_trap_regs) % 16 == 0); 580 write_csr(CSR_XTVEC, tvec); 581 assert(read_csr(CSR_XTVEC) == tvec); 582 } 583 584 void thread_init_per_cpu(void) 585 { 586 thread_init_tvec(); 587 /* 588 * We may receive traps from now, therefore, zeroize xSCRATCH such 589 * that thread_trap_vect() can distinguish between user traps 590 * and kernel traps. 591 */ 592 write_csr(CSR_XSCRATCH, 0); 593 #ifndef CFG_PAN 594 /* 595 * Allow access to user pages. When CFG_PAN is enabled, the SUM bit will 596 * be set and clear at runtime when necessary. 597 */ 598 set_csr(CSR_XSTATUS, CSR_XSTATUS_SUM); 599 #endif 600 } 601 602 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 603 unsigned long a1, unsigned long a2, unsigned long a3, 604 unsigned long user_sp, unsigned long entry_func, 605 unsigned long status, 606 struct thread_pauth_keys *keys __unused) 607 { 608 *regs = (struct thread_ctx_regs){ 609 .a0 = a0, 610 .a1 = a1, 611 .a2 = a2, 612 .a3 = a3, 613 .sp = user_sp, 614 .ra = entry_func, 615 .status = status 616 }; 617 } 618 619 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 620 unsigned long a2, unsigned long a3, 621 unsigned long user_sp, 622 unsigned long entry_func, 623 bool is_32bit __unused, 624 uint32_t *exit_status0, 625 uint32_t *exit_status1) 626 { 627 unsigned long status = 0; 628 uint32_t exceptions = 0; 629 uint32_t rc = 0; 630 struct thread_ctx_regs *regs = NULL; 631 632 tee_ta_update_session_utime_resume(); 633 634 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 635 regs = thread_get_ctx_regs(); 636 status = read_csr(CSR_XSTATUS); 637 status |= CSR_XSTATUS_PIE; /* Previous interrupt is enabled */ 638 status = set_field_u64(status, CSR_XSTATUS_SPP, PRV_U); 639 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, status, NULL); 640 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 641 thread_unmask_exceptions(exceptions); 642 643 return rc; 644 } 645