1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2022, Linaro Limited 4 */ 5 6 #include <arm.h> 7 #include <kernel/abort.h> 8 #include <kernel/linker.h> 9 #include <kernel/misc.h> 10 #include <kernel/panic.h> 11 #include <kernel/tee_ta_manager.h> 12 #include <kernel/thread_private.h> 13 #include <kernel/user_mode_ctx.h> 14 #include <memtag.h> 15 #include <mm/core_mmu.h> 16 #include <mm/tee_pager.h> 17 #include <trace.h> 18 #include <unw/unwind.h> 19 20 enum fault_type { 21 FAULT_TYPE_USER_MODE_PANIC, 22 FAULT_TYPE_USER_MODE_VFP, 23 FAULT_TYPE_PAGEABLE, 24 FAULT_TYPE_IGNORE, 25 }; 26 27 #ifdef CFG_UNWIND 28 29 #ifdef ARM32 30 /* 31 * Kernel or user mode unwind (32-bit execution state). 32 */ 33 static void __print_stack_unwind(struct abort_info *ai) 34 { 35 struct unwind_state_arm32 state = { }; 36 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK; 37 uint32_t sp = 0; 38 uint32_t lr = 0; 39 40 assert(!abort_is_user_exception(ai)); 41 42 if (mode == CPSR_MODE_SYS) { 43 sp = ai->regs->usr_sp; 44 lr = ai->regs->usr_lr; 45 } else { 46 sp = read_mode_sp(mode); 47 lr = read_mode_lr(mode); 48 } 49 50 memset(&state, 0, sizeof(state)); 51 state.registers[0] = ai->regs->r0; 52 state.registers[1] = ai->regs->r1; 53 state.registers[2] = ai->regs->r2; 54 state.registers[3] = ai->regs->r3; 55 state.registers[4] = ai->regs->r4; 56 state.registers[5] = ai->regs->r5; 57 state.registers[6] = ai->regs->r6; 58 state.registers[7] = ai->regs->r7; 59 state.registers[8] = ai->regs->r8; 60 state.registers[9] = ai->regs->r9; 61 state.registers[10] = ai->regs->r10; 62 state.registers[11] = ai->regs->r11; 63 state.registers[13] = sp; 64 state.registers[14] = lr; 65 state.registers[15] = ai->pc; 66 67 print_stack_arm32(&state, thread_stack_start(), thread_stack_size()); 68 } 69 #endif /* ARM32 */ 70 71 #ifdef ARM64 72 /* Kernel mode unwind (64-bit execution state) */ 73 static void __print_stack_unwind(struct abort_info *ai) 74 { 75 struct unwind_state_arm64 state = { 76 .pc = ai->regs->elr, 77 .fp = ai->regs->x29, 78 }; 79 80 print_stack_arm64(&state, thread_stack_start(), thread_stack_size()); 81 } 82 #endif /*ARM64*/ 83 84 #else /* CFG_UNWIND */ 85 static void __print_stack_unwind(struct abort_info *ai __unused) 86 { 87 } 88 #endif /* CFG_UNWIND */ 89 90 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type) 91 { 92 if (abort_type == ABORT_TYPE_DATA) 93 return "data"; 94 if (abort_type == ABORT_TYPE_PREFETCH) 95 return "prefetch"; 96 return "undef"; 97 } 98 99 static __maybe_unused const char *fault_to_str(uint32_t abort_type, 100 uint32_t fault_descr) 101 { 102 /* fault_descr is only valid for data or prefetch abort */ 103 if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH) 104 return ""; 105 106 switch (core_mmu_get_fault_type(fault_descr)) { 107 case CORE_MMU_FAULT_ALIGNMENT: 108 return " (alignment fault)"; 109 case CORE_MMU_FAULT_TRANSLATION: 110 return " (translation fault)"; 111 case CORE_MMU_FAULT_READ_PERMISSION: 112 return " (read permission fault)"; 113 case CORE_MMU_FAULT_WRITE_PERMISSION: 114 return " (write permission fault)"; 115 case CORE_MMU_FAULT_TAG_CHECK: 116 return " (tag check fault)"; 117 default: 118 return ""; 119 } 120 } 121 122 static __maybe_unused void 123 __print_abort_info(struct abort_info *ai __maybe_unused, 124 const char *ctx __maybe_unused) 125 { 126 __maybe_unused size_t core_pos = 0; 127 #ifdef ARM32 128 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK; 129 __maybe_unused uint32_t sp = 0; 130 __maybe_unused uint32_t lr = 0; 131 132 if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) { 133 sp = ai->regs->usr_sp; 134 lr = ai->regs->usr_lr; 135 core_pos = thread_get_tsd()->abort_core; 136 } else { 137 sp = read_mode_sp(mode); 138 lr = read_mode_lr(mode); 139 core_pos = get_core_pos(); 140 } 141 #endif /*ARM32*/ 142 #ifdef ARM64 143 if (abort_is_user_exception(ai)) 144 core_pos = thread_get_tsd()->abort_core; 145 else 146 core_pos = get_core_pos(); 147 #endif /*ARM64*/ 148 149 EMSG_RAW(""); 150 if (IS_ENABLED(CFG_MEMTAG)) 151 EMSG_RAW("%s %s-abort at address 0x%" PRIxVA 152 " [tagged 0x%" PRIxVA "]%s", ctx, 153 abort_type_to_str(ai->abort_type), 154 memtag_strip_tag_vaddr((void *)ai->va), ai->va, 155 fault_to_str(ai->abort_type, ai->fault_descr)); 156 else 157 EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s", ctx, 158 abort_type_to_str(ai->abort_type), ai->va, 159 fault_to_str(ai->abort_type, ai->fault_descr)); 160 #ifdef ARM32 161 EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X", 162 ai->fault_descr, read_ttbr0(), read_ttbr1(), 163 read_contextidr()); 164 EMSG_RAW(" cpu #%zu cpsr 0x%08x", 165 core_pos, ai->regs->spsr); 166 EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x", 167 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip); 168 EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x", 169 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp); 170 EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x", 171 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr); 172 EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x", 173 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc); 174 #endif /*ARM32*/ 175 #ifdef ARM64 176 EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64 177 " cidr 0x%X", ai->fault_descr, read_ttbr0_el1(), 178 read_ttbr1_el1(), read_contextidr_el1()); 179 EMSG_RAW(" cpu #%zu cpsr 0x%08x", 180 core_pos, (uint32_t)ai->regs->spsr); 181 EMSG_RAW(" x0 %016" PRIx64 " x1 %016" PRIx64, 182 ai->regs->x0, ai->regs->x1); 183 EMSG_RAW(" x2 %016" PRIx64 " x3 %016" PRIx64, 184 ai->regs->x2, ai->regs->x3); 185 EMSG_RAW(" x4 %016" PRIx64 " x5 %016" PRIx64, 186 ai->regs->x4, ai->regs->x5); 187 EMSG_RAW(" x6 %016" PRIx64 " x7 %016" PRIx64, 188 ai->regs->x6, ai->regs->x7); 189 EMSG_RAW(" x8 %016" PRIx64 " x9 %016" PRIx64, 190 ai->regs->x8, ai->regs->x9); 191 EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64, 192 ai->regs->x10, ai->regs->x11); 193 EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64, 194 ai->regs->x12, ai->regs->x13); 195 EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64, 196 ai->regs->x14, ai->regs->x15); 197 EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64, 198 ai->regs->x16, ai->regs->x17); 199 EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64, 200 ai->regs->x18, ai->regs->x19); 201 EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64, 202 ai->regs->x20, ai->regs->x21); 203 EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64, 204 ai->regs->x22, ai->regs->x23); 205 EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64, 206 ai->regs->x24, ai->regs->x25); 207 EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64, 208 ai->regs->x26, ai->regs->x27); 209 EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64, 210 ai->regs->x28, ai->regs->x29); 211 EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64, 212 ai->regs->x30, ai->regs->elr); 213 EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0); 214 #endif /*ARM64*/ 215 } 216 217 /* 218 * Print abort info and (optionally) stack dump to the console 219 * @ai kernel-mode abort info. 220 * @stack_dump true to show a stack trace 221 */ 222 static void __abort_print(struct abort_info *ai, bool stack_dump) 223 { 224 assert(!abort_is_user_exception(ai)); 225 226 __print_abort_info(ai, "Core"); 227 228 if (stack_dump) { 229 trace_printf_helper_raw(TRACE_ERROR, true, 230 "TEE load address @ %#"PRIxVA, 231 VCORE_START_VA); 232 __print_stack_unwind(ai); 233 } 234 } 235 236 void abort_print(struct abort_info *ai) 237 { 238 __abort_print(ai, false); 239 } 240 241 void abort_print_error(struct abort_info *ai) 242 { 243 __abort_print(ai, true); 244 } 245 246 /* This function must be called from a normal thread */ 247 void abort_print_current_ts(void) 248 { 249 struct thread_specific_data *tsd = thread_get_tsd(); 250 struct abort_info ai = { }; 251 struct ts_session *s = ts_get_current_session(); 252 253 ai.abort_type = tsd->abort_type; 254 ai.fault_descr = tsd->abort_descr; 255 ai.va = tsd->abort_va; 256 ai.pc = tsd->abort_regs.elr; 257 ai.regs = &tsd->abort_regs; 258 259 if (ai.abort_type != ABORT_TYPE_USER_MODE_PANIC) 260 __print_abort_info(&ai, "User mode"); 261 262 s->ctx->ops->dump_state(s->ctx); 263 264 #if defined(CFG_FTRACE_SUPPORT) 265 if (s->ctx->ops->dump_ftrace) { 266 s->fbuf = NULL; 267 s->ctx->ops->dump_ftrace(s->ctx); 268 } 269 #endif 270 } 271 272 static void save_abort_info_in_tsd(struct abort_info *ai) 273 { 274 struct thread_specific_data *tsd = thread_get_tsd(); 275 276 tsd->abort_type = ai->abort_type; 277 tsd->abort_descr = ai->fault_descr; 278 tsd->abort_va = ai->va; 279 tsd->abort_regs = *ai->regs; 280 tsd->abort_core = get_core_pos(); 281 } 282 283 #ifdef ARM32 284 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs, 285 struct abort_info *ai) 286 { 287 switch (abort_type) { 288 case ABORT_TYPE_DATA: 289 ai->fault_descr = read_dfsr(); 290 ai->va = read_dfar(); 291 break; 292 case ABORT_TYPE_PREFETCH: 293 ai->fault_descr = read_ifsr(); 294 ai->va = read_ifar(); 295 break; 296 default: 297 ai->fault_descr = 0; 298 ai->va = regs->elr; 299 break; 300 } 301 ai->abort_type = abort_type; 302 ai->pc = regs->elr; 303 ai->regs = regs; 304 } 305 #endif /*ARM32*/ 306 307 #ifdef ARM64 308 static void set_abort_info(uint32_t abort_type __unused, 309 struct thread_abort_regs *regs, struct abort_info *ai) 310 { 311 ai->fault_descr = read_esr_el1(); 312 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) { 313 case ESR_EC_IABT_EL0: 314 case ESR_EC_IABT_EL1: 315 ai->abort_type = ABORT_TYPE_PREFETCH; 316 ai->va = read_far_el1(); 317 break; 318 case ESR_EC_DABT_EL0: 319 case ESR_EC_DABT_EL1: 320 case ESR_EC_SP_ALIGN: 321 ai->abort_type = ABORT_TYPE_DATA; 322 ai->va = read_far_el1(); 323 break; 324 default: 325 ai->abort_type = ABORT_TYPE_UNDEF; 326 ai->va = regs->elr; 327 } 328 ai->pc = regs->elr; 329 ai->regs = regs; 330 } 331 #endif /*ARM64*/ 332 333 #ifdef ARM32 334 static void handle_user_mode_panic(struct abort_info *ai) 335 { 336 /* 337 * It was a user exception, stop user execution and return 338 * to TEE Core. 339 */ 340 ai->regs->r0 = TEE_ERROR_TARGET_DEAD; 341 ai->regs->r1 = true; 342 ai->regs->r2 = 0xdeadbeef; 343 ai->regs->elr = (uint32_t)thread_unwind_user_mode; 344 ai->regs->spsr &= CPSR_FIA; 345 ai->regs->spsr &= ~CPSR_MODE_MASK; 346 ai->regs->spsr |= CPSR_MODE_SVC; 347 /* Select Thumb or ARM mode */ 348 if (ai->regs->elr & 1) 349 ai->regs->spsr |= CPSR_T; 350 else 351 ai->regs->spsr &= ~CPSR_T; 352 } 353 #endif /*ARM32*/ 354 355 #ifdef ARM64 356 static void handle_user_mode_panic(struct abort_info *ai) 357 { 358 struct thread_ctx *tc __maybe_unused = NULL; 359 uint32_t daif = 0; 360 uint32_t pan_bit = 0; 361 362 /* 363 * It was a user exception, stop user execution and return 364 * to TEE Core. 365 */ 366 ai->regs->x0 = TEE_ERROR_TARGET_DEAD; 367 ai->regs->x1 = true; 368 ai->regs->x2 = 0xdeadbeef; 369 ai->regs->elr = (vaddr_t)thread_unwind_user_mode; 370 ai->regs->sp_el0 = thread_get_saved_thread_sp(); 371 372 #if defined(CFG_CORE_PAUTH) 373 /* 374 * We're going to return to the privileged core thread, update the 375 * APIA key to match the key used by the thread. 376 */ 377 tc = threads + thread_get_id(); 378 ai->regs->apiakey_hi = tc->keys.apia_hi; 379 ai->regs->apiakey_lo = tc->keys.apia_lo; 380 #endif 381 382 if (IS_ENABLED(CFG_PAN) && feat_pan_implemented() && read_pan()) 383 pan_bit = SPSR_64_PAN; 384 daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK; 385 /* XXX what about DAIF_D? */ 386 ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif) | 387 pan_bit; 388 } 389 #endif /*ARM64*/ 390 391 #ifdef CFG_WITH_VFP 392 static void handle_user_mode_vfp(void) 393 { 394 struct ts_session *s = ts_get_current_session(); 395 396 thread_user_enable_vfp(&to_user_mode_ctx(s->ctx)->vfp); 397 } 398 #endif /*CFG_WITH_VFP*/ 399 400 #ifdef CFG_WITH_USER_TA 401 #ifdef ARM32 402 /* Returns true if the exception originated from user mode */ 403 bool abort_is_user_exception(struct abort_info *ai) 404 { 405 return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 406 } 407 #endif /*ARM32*/ 408 409 #ifdef ARM64 410 /* Returns true if the exception originated from user mode */ 411 bool abort_is_user_exception(struct abort_info *ai) 412 { 413 uint32_t spsr = ai->regs->spsr; 414 415 if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 416 return true; 417 if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 418 SPSR_64_MODE_EL0) 419 return true; 420 return false; 421 } 422 #endif /*ARM64*/ 423 #else /*CFG_WITH_USER_TA*/ 424 bool abort_is_user_exception(struct abort_info *ai __unused) 425 { 426 return false; 427 } 428 #endif /*CFG_WITH_USER_TA*/ 429 430 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA) 431 #ifdef ARM32 432 static bool is_vfp_fault(struct abort_info *ai) 433 { 434 if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled()) 435 return false; 436 437 /* 438 * Not entirely accurate, but if it's a truly undefined instruction 439 * we'll end up in this function again, except this time 440 * vfp_is_enabled() so we'll return false. 441 */ 442 return true; 443 } 444 #endif /*ARM32*/ 445 446 #ifdef ARM64 447 static bool is_vfp_fault(struct abort_info *ai) 448 { 449 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) { 450 case ESR_EC_FP_ASIMD: 451 case ESR_EC_AARCH32_FP: 452 case ESR_EC_AARCH64_FP: 453 return true; 454 default: 455 return false; 456 } 457 } 458 #endif /*ARM64*/ 459 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/ 460 static bool is_vfp_fault(struct abort_info *ai __unused) 461 { 462 return false; 463 } 464 #endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/ 465 466 bool abort_is_write_fault(struct abort_info *ai) 467 { 468 #ifdef ARM32 469 unsigned int write_not_read = 11; 470 #endif 471 #ifdef ARM64 472 unsigned int write_not_read = 6; 473 #endif 474 475 return ai->abort_type == ABORT_TYPE_DATA && 476 (ai->fault_descr & BIT(write_not_read)); 477 } 478 479 static enum fault_type get_fault_type(struct abort_info *ai) 480 { 481 if (abort_is_user_exception(ai)) { 482 if (is_vfp_fault(ai)) 483 return FAULT_TYPE_USER_MODE_VFP; 484 #ifndef CFG_WITH_PAGER 485 return FAULT_TYPE_USER_MODE_PANIC; 486 #endif 487 } 488 489 if (thread_is_from_abort_mode()) { 490 abort_print_error(ai); 491 panic("[abort] abort in abort handler (trap CPU)"); 492 } 493 494 if (ai->abort_type == ABORT_TYPE_UNDEF) { 495 if (abort_is_user_exception(ai)) 496 return FAULT_TYPE_USER_MODE_PANIC; 497 abort_print_error(ai); 498 panic("[abort] undefined abort (trap CPU)"); 499 } 500 501 switch (core_mmu_get_fault_type(ai->fault_descr)) { 502 case CORE_MMU_FAULT_ALIGNMENT: 503 if (abort_is_user_exception(ai)) 504 return FAULT_TYPE_USER_MODE_PANIC; 505 abort_print_error(ai); 506 panic("[abort] alignement fault! (trap CPU)"); 507 break; 508 509 case CORE_MMU_FAULT_ACCESS_BIT: 510 if (abort_is_user_exception(ai)) 511 return FAULT_TYPE_USER_MODE_PANIC; 512 abort_print_error(ai); 513 panic("[abort] access bit fault! (trap CPU)"); 514 break; 515 516 case CORE_MMU_FAULT_DEBUG_EVENT: 517 if (!abort_is_user_exception(ai)) 518 abort_print(ai); 519 DMSG("[abort] Ignoring debug event!"); 520 return FAULT_TYPE_IGNORE; 521 522 case CORE_MMU_FAULT_TRANSLATION: 523 case CORE_MMU_FAULT_WRITE_PERMISSION: 524 case CORE_MMU_FAULT_READ_PERMISSION: 525 return FAULT_TYPE_PAGEABLE; 526 527 case CORE_MMU_FAULT_ASYNC_EXTERNAL: 528 if (!abort_is_user_exception(ai)) 529 abort_print(ai); 530 DMSG("[abort] Ignoring async external abort!"); 531 return FAULT_TYPE_IGNORE; 532 533 case CORE_MMU_FAULT_TAG_CHECK: 534 if (abort_is_user_exception(ai)) 535 return FAULT_TYPE_USER_MODE_PANIC; 536 abort_print_error(ai); 537 panic("[abort] Tag check fault! (trap CPU)"); 538 break; 539 540 case CORE_MMU_FAULT_OTHER: 541 default: 542 if (!abort_is_user_exception(ai)) 543 abort_print(ai); 544 DMSG("[abort] Unhandled fault!"); 545 return FAULT_TYPE_IGNORE; 546 } 547 } 548 549 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs) 550 { 551 struct abort_info ai; 552 bool handled; 553 554 set_abort_info(abort_type, regs, &ai); 555 556 switch (get_fault_type(&ai)) { 557 case FAULT_TYPE_IGNORE: 558 break; 559 case FAULT_TYPE_USER_MODE_PANIC: 560 DMSG("[abort] abort in User mode (TA will panic)"); 561 save_abort_info_in_tsd(&ai); 562 vfp_disable(); 563 handle_user_mode_panic(&ai); 564 break; 565 #ifdef CFG_WITH_VFP 566 case FAULT_TYPE_USER_MODE_VFP: 567 handle_user_mode_vfp(); 568 break; 569 #endif 570 case FAULT_TYPE_PAGEABLE: 571 default: 572 if (thread_get_id_may_fail() < 0) { 573 abort_print_error(&ai); 574 panic("abort outside thread context"); 575 } 576 thread_kernel_save_vfp(); 577 handled = tee_pager_handle_fault(&ai); 578 thread_kernel_restore_vfp(); 579 if (!handled) { 580 if (!abort_is_user_exception(&ai)) { 581 abort_print_error(&ai); 582 panic("unhandled pageable abort"); 583 } 584 DMSG("[abort] abort in User mode (TA will panic)"); 585 save_abort_info_in_tsd(&ai); 586 vfp_disable(); 587 handle_user_mode_panic(&ai); 588 } 589 break; 590 } 591 } 592