1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015, Linaro Limited 4 */ 5 6 #include <arm.h> 7 #include <kernel/abort.h> 8 #include <kernel/linker.h> 9 #include <kernel/misc.h> 10 #include <kernel/panic.h> 11 #include <kernel/tee_ta_manager.h> 12 #include <kernel/user_mode_ctx.h> 13 #include <mm/core_mmu.h> 14 #include <mm/mobj.h> 15 #include <mm/tee_pager.h> 16 #include <tee/tee_svc.h> 17 #include <trace.h> 18 #include <unw/unwind.h> 19 20 #include "thread_private.h" 21 22 enum fault_type { 23 FAULT_TYPE_USER_MODE_PANIC, 24 FAULT_TYPE_USER_MODE_VFP, 25 FAULT_TYPE_PAGEABLE, 26 FAULT_TYPE_IGNORE, 27 }; 28 29 #ifdef CFG_UNWIND 30 31 #ifdef ARM32 32 /* 33 * Kernel or user mode unwind (32-bit execution state). 34 */ 35 static void __print_stack_unwind(struct abort_info *ai) 36 { 37 struct unwind_state_arm32 state = { }; 38 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK; 39 uint32_t sp = 0; 40 uint32_t lr = 0; 41 42 assert(!abort_is_user_exception(ai)); 43 44 if (mode == CPSR_MODE_SYS) { 45 sp = ai->regs->usr_sp; 46 lr = ai->regs->usr_lr; 47 } else { 48 sp = read_mode_sp(mode); 49 lr = read_mode_lr(mode); 50 } 51 52 memset(&state, 0, sizeof(state)); 53 state.registers[0] = ai->regs->r0; 54 state.registers[1] = ai->regs->r1; 55 state.registers[2] = ai->regs->r2; 56 state.registers[3] = ai->regs->r3; 57 state.registers[4] = ai->regs->r4; 58 state.registers[5] = ai->regs->r5; 59 state.registers[6] = ai->regs->r6; 60 state.registers[7] = ai->regs->r7; 61 state.registers[8] = ai->regs->r8; 62 state.registers[9] = ai->regs->r9; 63 state.registers[10] = ai->regs->r10; 64 state.registers[11] = ai->regs->r11; 65 state.registers[13] = sp; 66 state.registers[14] = lr; 67 state.registers[15] = ai->pc; 68 69 print_stack_arm32(&state, thread_stack_start(), thread_stack_size()); 70 } 71 #endif /* ARM32 */ 72 73 #ifdef ARM64 74 /* Kernel mode unwind (64-bit execution state) */ 75 static void __print_stack_unwind(struct abort_info *ai) 76 { 77 struct unwind_state_arm64 state = { 78 .pc = ai->regs->elr, 79 .fp = ai->regs->x29, 80 }; 81 82 print_stack_arm64(&state, thread_stack_start(), thread_stack_size()); 83 } 84 #endif /*ARM64*/ 85 86 #else /* CFG_UNWIND */ 87 static void __print_stack_unwind(struct abort_info *ai __unused) 88 { 89 } 90 #endif /* CFG_UNWIND */ 91 92 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type) 93 { 94 if (abort_type == ABORT_TYPE_DATA) 95 return "data"; 96 if (abort_type == ABORT_TYPE_PREFETCH) 97 return "prefetch"; 98 return "undef"; 99 } 100 101 static __maybe_unused const char *fault_to_str(uint32_t abort_type, 102 uint32_t fault_descr) 103 { 104 /* fault_descr is only valid for data or prefetch abort */ 105 if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH) 106 return ""; 107 108 switch (core_mmu_get_fault_type(fault_descr)) { 109 case CORE_MMU_FAULT_ALIGNMENT: 110 return " (alignment fault)"; 111 case CORE_MMU_FAULT_TRANSLATION: 112 return " (translation fault)"; 113 case CORE_MMU_FAULT_READ_PERMISSION: 114 return " (read permission fault)"; 115 case CORE_MMU_FAULT_WRITE_PERMISSION: 116 return " (write permission fault)"; 117 default: 118 return ""; 119 } 120 } 121 122 static __maybe_unused void 123 __print_abort_info(struct abort_info *ai __maybe_unused, 124 const char *ctx __maybe_unused) 125 { 126 __maybe_unused size_t core_pos = 0; 127 #ifdef ARM32 128 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK; 129 __maybe_unused uint32_t sp = 0; 130 __maybe_unused uint32_t lr = 0; 131 132 if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) { 133 sp = ai->regs->usr_sp; 134 lr = ai->regs->usr_lr; 135 core_pos = thread_get_tsd()->abort_core; 136 } else { 137 sp = read_mode_sp(mode); 138 lr = read_mode_lr(mode); 139 core_pos = get_core_pos(); 140 } 141 #endif /*ARM32*/ 142 #ifdef ARM64 143 if (abort_is_user_exception(ai)) 144 core_pos = thread_get_tsd()->abort_core; 145 else 146 core_pos = get_core_pos(); 147 #endif /*ARM64*/ 148 149 EMSG_RAW(""); 150 EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s", 151 ctx, abort_type_to_str(ai->abort_type), ai->va, 152 fault_to_str(ai->abort_type, ai->fault_descr)); 153 #ifdef ARM32 154 EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X", 155 ai->fault_descr, read_ttbr0(), read_ttbr1(), 156 read_contextidr()); 157 EMSG_RAW(" cpu #%zu cpsr 0x%08x", 158 core_pos, ai->regs->spsr); 159 EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x", 160 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip); 161 EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x", 162 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp); 163 EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x", 164 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr); 165 EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x", 166 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc); 167 #endif /*ARM32*/ 168 #ifdef ARM64 169 EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64 170 " cidr 0x%X", ai->fault_descr, read_ttbr0_el1(), 171 read_ttbr1_el1(), read_contextidr_el1()); 172 EMSG_RAW(" cpu #%zu cpsr 0x%08x", 173 core_pos, (uint32_t)ai->regs->spsr); 174 EMSG_RAW(" x0 %016" PRIx64 " x1 %016" PRIx64, 175 ai->regs->x0, ai->regs->x1); 176 EMSG_RAW(" x2 %016" PRIx64 " x3 %016" PRIx64, 177 ai->regs->x2, ai->regs->x3); 178 EMSG_RAW(" x4 %016" PRIx64 " x5 %016" PRIx64, 179 ai->regs->x4, ai->regs->x5); 180 EMSG_RAW(" x6 %016" PRIx64 " x7 %016" PRIx64, 181 ai->regs->x6, ai->regs->x7); 182 EMSG_RAW(" x8 %016" PRIx64 " x9 %016" PRIx64, 183 ai->regs->x8, ai->regs->x9); 184 EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64, 185 ai->regs->x10, ai->regs->x11); 186 EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64, 187 ai->regs->x12, ai->regs->x13); 188 EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64, 189 ai->regs->x14, ai->regs->x15); 190 EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64, 191 ai->regs->x16, ai->regs->x17); 192 EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64, 193 ai->regs->x18, ai->regs->x19); 194 EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64, 195 ai->regs->x20, ai->regs->x21); 196 EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64, 197 ai->regs->x22, ai->regs->x23); 198 EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64, 199 ai->regs->x24, ai->regs->x25); 200 EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64, 201 ai->regs->x26, ai->regs->x27); 202 EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64, 203 ai->regs->x28, ai->regs->x29); 204 EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64, 205 ai->regs->x30, ai->regs->elr); 206 EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0); 207 #endif /*ARM64*/ 208 } 209 210 /* 211 * Print abort info and (optionally) stack dump to the console 212 * @ai kernel-mode abort info. 213 * @stack_dump true to show a stack trace 214 */ 215 static void __abort_print(struct abort_info *ai, bool stack_dump) 216 { 217 assert(!abort_is_user_exception(ai)); 218 219 __print_abort_info(ai, "Core"); 220 221 if (stack_dump) { 222 trace_printf_helper_raw(TRACE_ERROR, true, 223 "TEE load address @ %#"PRIxVA, 224 VCORE_START_VA); 225 __print_stack_unwind(ai); 226 } 227 } 228 229 void abort_print(struct abort_info *ai) 230 { 231 __abort_print(ai, false); 232 } 233 234 void abort_print_error(struct abort_info *ai) 235 { 236 __abort_print(ai, true); 237 } 238 239 /* This function must be called from a normal thread */ 240 void abort_print_current_ta(void) 241 { 242 struct thread_specific_data *tsd = thread_get_tsd(); 243 struct abort_info ai = { }; 244 struct ts_session *s = ts_get_current_session(); 245 246 ai.abort_type = tsd->abort_type; 247 ai.fault_descr = tsd->abort_descr; 248 ai.va = tsd->abort_va; 249 ai.pc = tsd->abort_regs.elr; 250 ai.regs = &tsd->abort_regs; 251 252 if (ai.abort_type != ABORT_TYPE_USER_MODE_PANIC) 253 __print_abort_info(&ai, "User mode"); 254 255 s->ctx->ops->dump_state(s->ctx); 256 257 #if defined(CFG_FTRACE_SUPPORT) 258 if (s->ctx->ops->dump_ftrace) { 259 s->fbuf = NULL; 260 s->ctx->ops->dump_ftrace(s->ctx); 261 } 262 #endif 263 } 264 265 static void save_abort_info_in_tsd(struct abort_info *ai) 266 { 267 struct thread_specific_data *tsd = thread_get_tsd(); 268 269 tsd->abort_type = ai->abort_type; 270 tsd->abort_descr = ai->fault_descr; 271 tsd->abort_va = ai->va; 272 tsd->abort_regs = *ai->regs; 273 tsd->abort_core = get_core_pos(); 274 } 275 276 #ifdef ARM32 277 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs, 278 struct abort_info *ai) 279 { 280 switch (abort_type) { 281 case ABORT_TYPE_DATA: 282 ai->fault_descr = read_dfsr(); 283 ai->va = read_dfar(); 284 break; 285 case ABORT_TYPE_PREFETCH: 286 ai->fault_descr = read_ifsr(); 287 ai->va = read_ifar(); 288 break; 289 default: 290 ai->fault_descr = 0; 291 ai->va = regs->elr; 292 break; 293 } 294 ai->abort_type = abort_type; 295 ai->pc = regs->elr; 296 ai->regs = regs; 297 } 298 #endif /*ARM32*/ 299 300 #ifdef ARM64 301 static void set_abort_info(uint32_t abort_type __unused, 302 struct thread_abort_regs *regs, struct abort_info *ai) 303 { 304 ai->fault_descr = read_esr_el1(); 305 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) { 306 case ESR_EC_IABT_EL0: 307 case ESR_EC_IABT_EL1: 308 ai->abort_type = ABORT_TYPE_PREFETCH; 309 ai->va = read_far_el1(); 310 break; 311 case ESR_EC_DABT_EL0: 312 case ESR_EC_DABT_EL1: 313 case ESR_EC_SP_ALIGN: 314 ai->abort_type = ABORT_TYPE_DATA; 315 ai->va = read_far_el1(); 316 break; 317 default: 318 ai->abort_type = ABORT_TYPE_UNDEF; 319 ai->va = regs->elr; 320 } 321 ai->pc = regs->elr; 322 ai->regs = regs; 323 } 324 #endif /*ARM64*/ 325 326 #ifdef ARM32 327 static void handle_user_mode_panic(struct abort_info *ai) 328 { 329 /* 330 * It was a user exception, stop user execution and return 331 * to TEE Core. 332 */ 333 ai->regs->r0 = TEE_ERROR_TARGET_DEAD; 334 ai->regs->r1 = true; 335 ai->regs->r2 = 0xdeadbeef; 336 ai->regs->elr = (uint32_t)thread_unwind_user_mode; 337 ai->regs->spsr &= CPSR_FIA; 338 ai->regs->spsr &= ~CPSR_MODE_MASK; 339 ai->regs->spsr |= CPSR_MODE_SVC; 340 /* Select Thumb or ARM mode */ 341 if (ai->regs->elr & 1) 342 ai->regs->spsr |= CPSR_T; 343 else 344 ai->regs->spsr &= ~CPSR_T; 345 } 346 #endif /*ARM32*/ 347 348 #ifdef ARM64 349 static void handle_user_mode_panic(struct abort_info *ai) 350 { 351 uint32_t daif; 352 353 /* 354 * It was a user exception, stop user execution and return 355 * to TEE Core. 356 */ 357 ai->regs->x0 = TEE_ERROR_TARGET_DEAD; 358 ai->regs->x1 = true; 359 ai->regs->x2 = 0xdeadbeef; 360 ai->regs->elr = (vaddr_t)thread_unwind_user_mode; 361 ai->regs->sp_el0 = thread_get_saved_thread_sp(); 362 363 daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK; 364 /* XXX what about DAIF_D? */ 365 ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif); 366 } 367 #endif /*ARM64*/ 368 369 #ifdef CFG_WITH_VFP 370 static void handle_user_mode_vfp(void) 371 { 372 struct ts_session *s = ts_get_current_session(); 373 374 thread_user_enable_vfp(&to_user_mode_ctx(s->ctx)->vfp); 375 } 376 #endif /*CFG_WITH_VFP*/ 377 378 #ifdef CFG_WITH_USER_TA 379 #ifdef ARM32 380 /* Returns true if the exception originated from user mode */ 381 bool abort_is_user_exception(struct abort_info *ai) 382 { 383 return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 384 } 385 #endif /*ARM32*/ 386 387 #ifdef ARM64 388 /* Returns true if the exception originated from user mode */ 389 bool abort_is_user_exception(struct abort_info *ai) 390 { 391 uint32_t spsr = ai->regs->spsr; 392 393 if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 394 return true; 395 if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 396 SPSR_64_MODE_EL0) 397 return true; 398 return false; 399 } 400 #endif /*ARM64*/ 401 #else /*CFG_WITH_USER_TA*/ 402 bool abort_is_user_exception(struct abort_info *ai __unused) 403 { 404 return false; 405 } 406 #endif /*CFG_WITH_USER_TA*/ 407 408 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA) 409 #ifdef ARM32 410 static bool is_vfp_fault(struct abort_info *ai) 411 { 412 if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled()) 413 return false; 414 415 /* 416 * Not entirely accurate, but if it's a truly undefined instruction 417 * we'll end up in this function again, except this time 418 * vfp_is_enabled() so we'll return false. 419 */ 420 return true; 421 } 422 #endif /*ARM32*/ 423 424 #ifdef ARM64 425 static bool is_vfp_fault(struct abort_info *ai) 426 { 427 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) { 428 case ESR_EC_FP_ASIMD: 429 case ESR_EC_AARCH32_FP: 430 case ESR_EC_AARCH64_FP: 431 return true; 432 default: 433 return false; 434 } 435 } 436 #endif /*ARM64*/ 437 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/ 438 static bool is_vfp_fault(struct abort_info *ai __unused) 439 { 440 return false; 441 } 442 #endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/ 443 444 static enum fault_type get_fault_type(struct abort_info *ai) 445 { 446 if (abort_is_user_exception(ai)) { 447 if (is_vfp_fault(ai)) 448 return FAULT_TYPE_USER_MODE_VFP; 449 #ifndef CFG_WITH_PAGER 450 return FAULT_TYPE_USER_MODE_PANIC; 451 #endif 452 } 453 454 if (thread_is_from_abort_mode()) { 455 abort_print_error(ai); 456 panic("[abort] abort in abort handler (trap CPU)"); 457 } 458 459 if (ai->abort_type == ABORT_TYPE_UNDEF) { 460 if (abort_is_user_exception(ai)) 461 return FAULT_TYPE_USER_MODE_PANIC; 462 abort_print_error(ai); 463 panic("[abort] undefined abort (trap CPU)"); 464 } 465 466 switch (core_mmu_get_fault_type(ai->fault_descr)) { 467 case CORE_MMU_FAULT_ALIGNMENT: 468 if (abort_is_user_exception(ai)) 469 return FAULT_TYPE_USER_MODE_PANIC; 470 abort_print_error(ai); 471 panic("[abort] alignement fault! (trap CPU)"); 472 break; 473 474 case CORE_MMU_FAULT_ACCESS_BIT: 475 if (abort_is_user_exception(ai)) 476 return FAULT_TYPE_USER_MODE_PANIC; 477 abort_print_error(ai); 478 panic("[abort] access bit fault! (trap CPU)"); 479 break; 480 481 case CORE_MMU_FAULT_DEBUG_EVENT: 482 if (!abort_is_user_exception(ai)) 483 abort_print(ai); 484 DMSG("[abort] Ignoring debug event!"); 485 return FAULT_TYPE_IGNORE; 486 487 case CORE_MMU_FAULT_TRANSLATION: 488 case CORE_MMU_FAULT_WRITE_PERMISSION: 489 case CORE_MMU_FAULT_READ_PERMISSION: 490 return FAULT_TYPE_PAGEABLE; 491 492 case CORE_MMU_FAULT_ASYNC_EXTERNAL: 493 if (!abort_is_user_exception(ai)) 494 abort_print(ai); 495 DMSG("[abort] Ignoring async external abort!"); 496 return FAULT_TYPE_IGNORE; 497 498 case CORE_MMU_FAULT_OTHER: 499 default: 500 if (!abort_is_user_exception(ai)) 501 abort_print(ai); 502 DMSG("[abort] Unhandled fault!"); 503 return FAULT_TYPE_IGNORE; 504 } 505 } 506 507 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs) 508 { 509 struct abort_info ai; 510 bool handled; 511 512 set_abort_info(abort_type, regs, &ai); 513 514 switch (get_fault_type(&ai)) { 515 case FAULT_TYPE_IGNORE: 516 break; 517 case FAULT_TYPE_USER_MODE_PANIC: 518 DMSG("[abort] abort in User mode (TA will panic)"); 519 save_abort_info_in_tsd(&ai); 520 vfp_disable(); 521 handle_user_mode_panic(&ai); 522 break; 523 #ifdef CFG_WITH_VFP 524 case FAULT_TYPE_USER_MODE_VFP: 525 handle_user_mode_vfp(); 526 break; 527 #endif 528 case FAULT_TYPE_PAGEABLE: 529 default: 530 if (thread_get_id_may_fail() < 0) { 531 abort_print_error(&ai); 532 panic("abort outside thread context"); 533 } 534 thread_kernel_save_vfp(); 535 handled = tee_pager_handle_fault(&ai); 536 thread_kernel_restore_vfp(); 537 if (!handled) { 538 if (!abort_is_user_exception(&ai)) { 539 abort_print_error(&ai); 540 panic("unhandled pageable abort"); 541 } 542 DMSG("[abort] abort in User mode (TA will panic)"); 543 save_abort_info_in_tsd(&ai); 544 vfp_disable(); 545 handle_user_mode_panic(&ai); 546 } 547 break; 548 } 549 } 550