1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright 2022-2023 NXP 4 * Copyright (c) 2016-2022, Linaro Limited 5 * Copyright (c) 2014, STMicroelectronics International N.V. 6 * Copyright (c) 2020-2021, Arm Limited 7 */ 8 9 #include <platform_config.h> 10 11 #include <assert.h> 12 #include <config.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/interrupt.h> 18 #include <kernel/linker.h> 19 #include <kernel/lockdep.h> 20 #include <kernel/misc.h> 21 #include <kernel/panic.h> 22 #include <kernel/spinlock.h> 23 #include <kernel/tee_ta_manager.h> 24 #include <kernel/thread.h> 25 #include <kernel/thread_private.h> 26 #include <kernel/user_mode_ctx_struct.h> 27 #include <kernel/virtualization.h> 28 #include <mm/core_memprot.h> 29 #include <mm/mobj.h> 30 #include <mm/tee_mm.h> 31 #include <mm/vm.h> 32 #include <riscv.h> 33 #include <trace.h> 34 #include <util.h> 35 36 /* 37 * This function is called as a guard after each ABI call which is not 38 * supposed to return. 39 */ 40 void __noreturn __panic_at_abi_return(void) 41 { 42 panic(); 43 } 44 45 /* This function returns current masked exception bits. */ 46 uint32_t __nostackcheck thread_get_exceptions(void) 47 { 48 uint32_t xie = read_csr(CSR_XIE) & THREAD_EXCP_ALL; 49 50 return xie ^ THREAD_EXCP_ALL; 51 } 52 53 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 54 { 55 /* Foreign interrupts must not be unmasked while holding a spinlock */ 56 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 57 assert_have_no_spinlock(); 58 59 /* 60 * In ARM, the bits in DAIF register are used to mask the exceptions. 61 * While in RISC-V, the bits in CSR XIE are used to enable(unmask) 62 * corresponding interrupt sources. To not modify the function of 63 * thread_set_exceptions(), we should "invert" the bits in "exceptions". 64 * The corresponding bits in "exceptions" will be inverted so they will 65 * be cleared when we write the final value into CSR XIE. So that we 66 * can mask those exceptions. 67 */ 68 exceptions &= THREAD_EXCP_ALL; 69 exceptions ^= THREAD_EXCP_ALL; 70 71 barrier(); 72 write_csr(CSR_XIE, exceptions); 73 barrier(); 74 } 75 76 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions) 77 { 78 uint32_t state = thread_get_exceptions(); 79 80 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 81 return state; 82 } 83 84 void __nostackcheck thread_unmask_exceptions(uint32_t state) 85 { 86 thread_set_exceptions(state & THREAD_EXCP_ALL); 87 } 88 89 static void thread_lazy_save_ns_vfp(void) 90 { 91 static_assert(!IS_ENABLED(CFG_WITH_VFP)); 92 } 93 94 static void thread_lazy_restore_ns_vfp(void) 95 { 96 static_assert(!IS_ENABLED(CFG_WITH_VFP)); 97 } 98 99 static void setup_unwind_user_mode(struct thread_scall_regs *regs) 100 { 101 regs->ra = (uintptr_t)thread_unwind_user_mode; 102 regs->status = xstatus_for_xret(true, PRV_S); 103 regs->sp = thread_get_saved_thread_sp(); 104 } 105 106 static void thread_unhandled_trap(unsigned long cause __unused, 107 struct thread_ctx_regs *regs __unused) 108 { 109 DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx", 110 read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL)); 111 panic(); 112 } 113 114 void thread_scall_handler(struct thread_scall_regs *regs) 115 { 116 struct ts_session *sess = NULL; 117 uint32_t state = 0; 118 119 /* Enable native interrupts */ 120 state = thread_get_exceptions(); 121 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 122 123 thread_user_save_vfp(); 124 125 sess = ts_get_current_session(); 126 127 /* Restore foreign interrupts which are disabled on exception entry */ 128 thread_restore_foreign_intr(); 129 130 assert(sess && sess->handle_scall); 131 132 if (!sess->handle_scall(regs)) { 133 setup_unwind_user_mode(regs); 134 thread_exit_user_mode(regs->a0, regs->a1, regs->a2, 135 regs->a3, regs->sp, regs->ra, 136 regs->status); 137 } 138 } 139 140 static void thread_irq_handler(void) 141 { 142 interrupt_main_handler(); 143 } 144 145 void thread_interrupt_handler(unsigned long cause, struct thread_ctx_regs *regs) 146 { 147 switch (cause & LONG_MAX) { 148 case IRQ_XTIMER: 149 clear_csr(CSR_XIE, CSR_XIE_TIE); 150 break; 151 case IRQ_XSOFT: 152 thread_unhandled_trap(cause, regs); 153 break; 154 case IRQ_XEXT: 155 thread_irq_handler(); 156 break; 157 default: 158 thread_unhandled_trap(cause, regs); 159 } 160 } 161 162 unsigned long xstatus_for_xret(uint8_t pie, uint8_t pp) 163 { 164 unsigned long xstatus = read_csr(CSR_XSTATUS); 165 166 assert(pp == PRV_M || pp == PRV_S || pp == PRV_U); 167 168 #ifdef RV32 169 xstatus = set_field_u32(xstatus, CSR_XSTATUS_IE, 0); 170 xstatus = set_field_u32(xstatus, CSR_XSTATUS_PIE, pie); 171 xstatus = set_field_u32(xstatus, CSR_XSTATUS_SPP, pp); 172 #else /* RV64 */ 173 xstatus = set_field_u64(xstatus, CSR_XSTATUS_IE, 0); 174 xstatus = set_field_u64(xstatus, CSR_XSTATUS_PIE, pie); 175 xstatus = set_field_u64(xstatus, CSR_XSTATUS_SPP, pp); 176 #endif 177 178 return xstatus; 179 } 180 181 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 182 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 183 uint32_t a6, uint32_t a7, void *pc) 184 { 185 memset(&thread->regs, 0, sizeof(thread->regs)); 186 187 thread->regs.epc = (uintptr_t)pc; 188 189 /* Set up xstatus */ 190 thread->regs.status = xstatus_for_xret(true, PRV_S); 191 192 /* Enable native interrupt */ 193 thread->regs.ie = THREAD_EXCP_NATIVE_INTR; 194 195 /* Reinitialize stack pointer */ 196 thread->regs.sp = thread->stack_va_end; 197 198 /* Set up GP and TP */ 199 thread->regs.gp = read_gp(); 200 thread->regs.tp = read_tp(); 201 202 /* 203 * Copy arguments into context. This will make the 204 * arguments appear in a0-a7 when thread is started. 205 */ 206 thread->regs.a0 = a0; 207 thread->regs.a1 = a1; 208 thread->regs.a2 = a2; 209 thread->regs.a3 = a3; 210 thread->regs.a4 = a4; 211 thread->regs.a5 = a5; 212 thread->regs.a6 = a6; 213 thread->regs.a7 = a7; 214 } 215 216 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, 217 uint32_t a3, uint32_t a4, uint32_t a5, 218 uint32_t a6, uint32_t a7, 219 void *pc) 220 { 221 struct thread_core_local *l = thread_get_core_local(); 222 bool found_thread = false; 223 size_t n = 0; 224 225 assert(l->curr_thread == THREAD_ID_INVALID); 226 227 thread_lock_global(); 228 229 for (n = 0; n < CFG_NUM_THREADS; n++) { 230 if (threads[n].state == THREAD_STATE_FREE) { 231 threads[n].state = THREAD_STATE_ACTIVE; 232 found_thread = true; 233 break; 234 } 235 } 236 237 thread_unlock_global(); 238 239 if (!found_thread) 240 return; 241 242 l->curr_thread = n; 243 244 threads[n].flags = 0; 245 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); 246 247 thread_lazy_save_ns_vfp(); 248 249 l->flags &= ~THREAD_CLF_TMP; 250 251 thread_resume(&threads[n].regs); 252 /*NOTREACHED*/ 253 panic(); 254 } 255 256 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 257 uint32_t a4, uint32_t a5) 258 { 259 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0, 260 thread_std_abi_entry); 261 } 262 263 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 264 uint32_t a1, uint32_t a2, uint32_t a3) 265 { 266 regs->a0 = a0; 267 regs->a1 = a1; 268 regs->a2 = a2; 269 regs->a3 = a3; 270 } 271 272 static bool is_from_user(unsigned long status) 273 { 274 return (status & CSR_XSTATUS_SPP) == 0; 275 } 276 277 #ifdef CFG_SYSCALL_FTRACE 278 static void __noprof ftrace_suspend(void) 279 { 280 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 281 282 if (s && s->fbuf) 283 s->fbuf->syscall_trace_suspended = true; 284 } 285 286 static void __noprof ftrace_resume(void) 287 { 288 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 289 290 if (s && s->fbuf) 291 s->fbuf->syscall_trace_suspended = false; 292 } 293 #else 294 static void __maybe_unused __noprof ftrace_suspend(void) 295 { 296 } 297 298 static void __noprof ftrace_resume(void) 299 { 300 } 301 #endif 302 303 static bool is_user_mode(struct thread_ctx_regs *regs) 304 { 305 return is_from_user((uint32_t)regs->status); 306 } 307 308 vaddr_t thread_get_saved_thread_sp(void) 309 { 310 struct thread_core_local *l = thread_get_core_local(); 311 int ct = l->curr_thread; 312 313 assert(ct != THREAD_ID_INVALID); 314 return threads[ct].kern_sp; 315 } 316 317 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 318 uint32_t a2, uint32_t a3) 319 { 320 size_t n = thread_id; 321 struct thread_core_local *l = thread_get_core_local(); 322 bool found_thread = false; 323 324 assert(l->curr_thread == THREAD_ID_INVALID); 325 326 thread_lock_global(); 327 328 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 329 threads[n].state = THREAD_STATE_ACTIVE; 330 found_thread = true; 331 } 332 333 thread_unlock_global(); 334 335 if (!found_thread) 336 return; 337 338 l->curr_thread = n; 339 340 if (threads[n].have_user_map) { 341 core_mmu_set_user_map(&threads[n].user_map); 342 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 343 tee_ta_ftrace_update_times_resume(); 344 } 345 346 if (is_user_mode(&threads[n].regs)) 347 tee_ta_update_session_utime_resume(); 348 349 /* 350 * We may resume thread at another hart, so we need to re-assign value 351 * of tp to be current hart's thread_core_local. 352 */ 353 if (!is_user_mode(&threads[n].regs)) 354 threads[n].regs.tp = read_tp(); 355 356 /* 357 * Return from RPC to request service of a foreign interrupt must not 358 * get parameters from non-secure world. 359 */ 360 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 361 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 362 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 363 } 364 365 thread_lazy_save_ns_vfp(); 366 367 if (threads[n].have_user_map) 368 ftrace_resume(); 369 370 l->flags &= ~THREAD_CLF_TMP; 371 thread_resume(&threads[n].regs); 372 /*NOTREACHED*/ 373 panic(); 374 } 375 376 void thread_state_free(void) 377 { 378 struct thread_core_local *l = thread_get_core_local(); 379 int ct = l->curr_thread; 380 381 assert(ct != THREAD_ID_INVALID); 382 383 thread_lazy_restore_ns_vfp(); 384 385 thread_lock_global(); 386 387 assert(threads[ct].state == THREAD_STATE_ACTIVE); 388 threads[ct].state = THREAD_STATE_FREE; 389 threads[ct].flags = 0; 390 l->curr_thread = THREAD_ID_INVALID; 391 392 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 393 virt_unset_guest(); 394 thread_unlock_global(); 395 } 396 397 int thread_state_suspend(uint32_t flags, unsigned long status, vaddr_t pc) 398 { 399 struct thread_core_local *l = thread_get_core_local(); 400 int ct = l->curr_thread; 401 402 assert(ct != THREAD_ID_INVALID); 403 404 if (core_mmu_user_mapping_is_active()) 405 ftrace_suspend(); 406 407 thread_check_canaries(); 408 409 if (is_from_user(status)) { 410 thread_user_save_vfp(); 411 tee_ta_update_session_utime_suspend(); 412 tee_ta_gprof_sample_pc(pc); 413 } 414 thread_lazy_restore_ns_vfp(); 415 416 thread_lock_global(); 417 418 assert(threads[ct].state == THREAD_STATE_ACTIVE); 419 threads[ct].flags |= flags; 420 threads[ct].regs.status = status; 421 threads[ct].regs.epc = pc; 422 threads[ct].state = THREAD_STATE_SUSPENDED; 423 424 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 425 if (threads[ct].have_user_map) { 426 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 427 tee_ta_ftrace_update_times_suspend(); 428 core_mmu_get_user_map(&threads[ct].user_map); 429 core_mmu_set_user_map(NULL); 430 } 431 432 l->curr_thread = THREAD_ID_INVALID; 433 434 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 435 virt_unset_guest(); 436 437 thread_unlock_global(); 438 439 return ct; 440 } 441 442 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 443 { 444 if (thread_id >= CFG_NUM_THREADS) 445 return false; 446 threads[thread_id].stack_va_end = sp; 447 return true; 448 } 449 450 static void init_user_kcode(void) 451 { 452 } 453 454 void thread_init_primary(void) 455 { 456 /* Initialize canaries around the stacks */ 457 thread_init_canaries(); 458 459 init_user_kcode(); 460 } 461 462 static vaddr_t get_trap_vect(void) 463 { 464 return (vaddr_t)thread_trap_vect; 465 } 466 467 void thread_init_tvec(void) 468 { 469 unsigned long tvec = (unsigned long)get_trap_vect(); 470 471 write_csr(CSR_XTVEC, tvec); 472 assert(read_csr(CSR_XTVEC) == tvec); 473 } 474 475 void thread_init_per_cpu(void) 476 { 477 thread_init_tvec(); 478 /* 479 * We may receive traps from now, therefore, zeroize xSCRATCH such 480 * that thread_trap_vect() can distinguish between user traps 481 * and kernel traps. 482 */ 483 write_csr(CSR_XSCRATCH, 0); 484 #ifndef CFG_PAN 485 /* 486 * Allow access to user pages. When CFG_PAN is enabled, the SUM bit will 487 * be set and clear at runtime when necessary. 488 */ 489 set_csr(CSR_XSTATUS, CSR_XSTATUS_SUM); 490 #endif 491 } 492 493 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 494 unsigned long a1, unsigned long a2, unsigned long a3, 495 unsigned long user_sp, unsigned long entry_func, 496 unsigned long status, unsigned long ie, 497 struct thread_pauth_keys *keys __unused) 498 { 499 *regs = (struct thread_ctx_regs){ 500 .a0 = a0, 501 .a1 = a1, 502 .a2 = a2, 503 .a3 = a3, 504 .s0 = 0, 505 .sp = user_sp, 506 .ra = entry_func, 507 .status = status, 508 .ie = ie, 509 }; 510 } 511 512 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 513 unsigned long a2, unsigned long a3, 514 unsigned long user_sp, 515 unsigned long entry_func, 516 bool is_32bit __unused, 517 uint32_t *exit_status0, 518 uint32_t *exit_status1) 519 { 520 unsigned long status = 0; 521 unsigned long ie = 0; 522 uint32_t exceptions = 0; 523 uint32_t rc = 0; 524 struct thread_ctx_regs *regs = NULL; 525 526 tee_ta_update_session_utime_resume(); 527 528 /* Read current interrupt masks */ 529 ie = read_csr(CSR_XIE); 530 531 /* 532 * Mask all exceptions, the CSR_XSTATUS.IE will be set from 533 * setup_unwind_user_mode() after exiting. 534 */ 535 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 536 regs = thread_get_ctx_regs(); 537 status = xstatus_for_xret(true, PRV_U); 538 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, status, ie, 539 NULL); 540 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 541 thread_unmask_exceptions(exceptions); 542 543 return rc; 544 } 545 546 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) 547 { 548 thread_rpc_xstatus(rv, xstatus_for_xret(false, PRV_S)); 549 } 550