1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015, Linaro Limited 4 */ 5 6 #include <arm.h> 7 #include <kernel/abort.h> 8 #include <kernel/misc.h> 9 #include <kernel/panic.h> 10 #include <kernel/tee_ta_manager.h> 11 #include <kernel/user_mode_ctx.h> 12 #include <mm/core_mmu.h> 13 #include <mm/mobj.h> 14 #include <mm/tee_pager.h> 15 #include <tee/tee_svc.h> 16 #include <trace.h> 17 #include <unw/unwind.h> 18 19 #include "thread_private.h" 20 21 enum fault_type { 22 FAULT_TYPE_USER_TA_PANIC, 23 FAULT_TYPE_USER_TA_VFP, 24 FAULT_TYPE_PAGEABLE, 25 FAULT_TYPE_IGNORE, 26 }; 27 28 #ifdef CFG_UNWIND 29 30 #ifdef ARM32 31 /* 32 * Kernel or user mode unwind (32-bit execution state). 33 */ 34 static void __print_stack_unwind(struct abort_info *ai) 35 { 36 struct unwind_state_arm32 state = { }; 37 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK; 38 uint32_t sp = 0; 39 uint32_t lr = 0; 40 41 assert(!abort_is_user_exception(ai)); 42 43 if (mode == CPSR_MODE_SYS) { 44 sp = ai->regs->usr_sp; 45 lr = ai->regs->usr_lr; 46 } else { 47 sp = read_mode_sp(mode); 48 lr = read_mode_lr(mode); 49 } 50 51 memset(&state, 0, sizeof(state)); 52 state.registers[0] = ai->regs->r0; 53 state.registers[1] = ai->regs->r1; 54 state.registers[2] = ai->regs->r2; 55 state.registers[3] = ai->regs->r3; 56 state.registers[4] = ai->regs->r4; 57 state.registers[5] = ai->regs->r5; 58 state.registers[6] = ai->regs->r6; 59 state.registers[7] = ai->regs->r7; 60 state.registers[8] = ai->regs->r8; 61 state.registers[9] = ai->regs->r9; 62 state.registers[10] = ai->regs->r10; 63 state.registers[11] = ai->regs->r11; 64 state.registers[13] = sp; 65 state.registers[14] = lr; 66 state.registers[15] = ai->pc; 67 68 print_stack_arm32(&state, thread_stack_start(), thread_stack_size()); 69 } 70 #endif /* ARM32 */ 71 72 #ifdef ARM64 73 /* Kernel mode unwind (64-bit execution state) */ 74 static void __print_stack_unwind(struct abort_info *ai) 75 { 76 struct unwind_state_arm64 state = { 77 .pc = ai->regs->elr, 78 .fp = ai->regs->x29, 79 }; 80 81 print_stack_arm64(&state, thread_stack_start(), thread_stack_size()); 82 } 83 #endif /*ARM64*/ 84 85 #else /* CFG_UNWIND */ 86 static void __print_stack_unwind(struct abort_info *ai __unused) 87 { 88 } 89 #endif /* CFG_UNWIND */ 90 91 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type) 92 { 93 if (abort_type == ABORT_TYPE_DATA) 94 return "data"; 95 if (abort_type == ABORT_TYPE_PREFETCH) 96 return "prefetch"; 97 return "undef"; 98 } 99 100 static __maybe_unused const char *fault_to_str(uint32_t abort_type, 101 uint32_t fault_descr) 102 { 103 /* fault_descr is only valid for data or prefetch abort */ 104 if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH) 105 return ""; 106 107 switch (core_mmu_get_fault_type(fault_descr)) { 108 case CORE_MMU_FAULT_ALIGNMENT: 109 return " (alignment fault)"; 110 case CORE_MMU_FAULT_TRANSLATION: 111 return " (translation fault)"; 112 case CORE_MMU_FAULT_READ_PERMISSION: 113 return " (read permission fault)"; 114 case CORE_MMU_FAULT_WRITE_PERMISSION: 115 return " (write permission fault)"; 116 default: 117 return ""; 118 } 119 } 120 121 static __maybe_unused void 122 __print_abort_info(struct abort_info *ai __maybe_unused, 123 const char *ctx __maybe_unused) 124 { 125 __maybe_unused size_t core_pos = 0; 126 #ifdef ARM32 127 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK; 128 __maybe_unused uint32_t sp = 0; 129 __maybe_unused uint32_t lr = 0; 130 131 if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) { 132 sp = ai->regs->usr_sp; 133 lr = ai->regs->usr_lr; 134 core_pos = thread_get_tsd()->abort_core; 135 } else { 136 sp = read_mode_sp(mode); 137 lr = read_mode_lr(mode); 138 core_pos = get_core_pos(); 139 } 140 #endif /*ARM32*/ 141 #ifdef ARM64 142 if (abort_is_user_exception(ai)) 143 core_pos = thread_get_tsd()->abort_core; 144 else 145 core_pos = get_core_pos(); 146 #endif /*ARM64*/ 147 148 EMSG_RAW(""); 149 EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s", 150 ctx, abort_type_to_str(ai->abort_type), ai->va, 151 fault_to_str(ai->abort_type, ai->fault_descr)); 152 #ifdef ARM32 153 EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X", 154 ai->fault_descr, read_ttbr0(), read_ttbr1(), 155 read_contextidr()); 156 EMSG_RAW(" cpu #%zu cpsr 0x%08x", 157 core_pos, ai->regs->spsr); 158 EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x", 159 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip); 160 EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x", 161 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp); 162 EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x", 163 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr); 164 EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x", 165 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc); 166 #endif /*ARM32*/ 167 #ifdef ARM64 168 EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64 169 " cidr 0x%X", ai->fault_descr, read_ttbr0_el1(), 170 read_ttbr1_el1(), read_contextidr_el1()); 171 EMSG_RAW(" cpu #%zu cpsr 0x%08x", 172 core_pos, (uint32_t)ai->regs->spsr); 173 EMSG_RAW(" x0 %016" PRIx64 " x1 %016" PRIx64, 174 ai->regs->x0, ai->regs->x1); 175 EMSG_RAW(" x2 %016" PRIx64 " x3 %016" PRIx64, 176 ai->regs->x2, ai->regs->x3); 177 EMSG_RAW(" x4 %016" PRIx64 " x5 %016" PRIx64, 178 ai->regs->x4, ai->regs->x5); 179 EMSG_RAW(" x6 %016" PRIx64 " x7 %016" PRIx64, 180 ai->regs->x6, ai->regs->x7); 181 EMSG_RAW(" x8 %016" PRIx64 " x9 %016" PRIx64, 182 ai->regs->x8, ai->regs->x9); 183 EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64, 184 ai->regs->x10, ai->regs->x11); 185 EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64, 186 ai->regs->x12, ai->regs->x13); 187 EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64, 188 ai->regs->x14, ai->regs->x15); 189 EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64, 190 ai->regs->x16, ai->regs->x17); 191 EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64, 192 ai->regs->x18, ai->regs->x19); 193 EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64, 194 ai->regs->x20, ai->regs->x21); 195 EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64, 196 ai->regs->x22, ai->regs->x23); 197 EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64, 198 ai->regs->x24, ai->regs->x25); 199 EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64, 200 ai->regs->x26, ai->regs->x27); 201 EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64, 202 ai->regs->x28, ai->regs->x29); 203 EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64, 204 ai->regs->x30, ai->regs->elr); 205 EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0); 206 #endif /*ARM64*/ 207 } 208 209 /* 210 * Print abort info and (optionally) stack dump to the console 211 * @ai kernel-mode abort info. 212 * @stack_dump true to show a stack trace 213 */ 214 static void __abort_print(struct abort_info *ai, bool stack_dump) 215 { 216 assert(!abort_is_user_exception(ai)); 217 218 __print_abort_info(ai, "Core"); 219 220 if (stack_dump) 221 __print_stack_unwind(ai); 222 } 223 224 void abort_print(struct abort_info *ai) 225 { 226 __abort_print(ai, false); 227 } 228 229 void abort_print_error(struct abort_info *ai) 230 { 231 __abort_print(ai, true); 232 } 233 234 /* This function must be called from a normal thread */ 235 void abort_print_current_ta(void) 236 { 237 struct thread_specific_data *tsd = thread_get_tsd(); 238 struct abort_info ai = { }; 239 struct tee_ta_session *s = NULL; 240 241 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) 242 panic(); 243 244 ai.abort_type = tsd->abort_type; 245 ai.fault_descr = tsd->abort_descr; 246 ai.va = tsd->abort_va; 247 ai.pc = tsd->abort_regs.elr; 248 ai.regs = &tsd->abort_regs; 249 250 if (ai.abort_type != ABORT_TYPE_TA_PANIC) 251 __print_abort_info(&ai, "User TA"); 252 253 s->ctx->ops->dump_state(s->ctx); 254 255 #if defined(CFG_FTRACE_SUPPORT) 256 if (s->ctx->ops->dump_ftrace) { 257 s->fbuf = NULL; 258 s->ctx->ops->dump_ftrace(s->ctx); 259 } 260 #endif 261 } 262 263 static void save_abort_info_in_tsd(struct abort_info *ai) 264 { 265 struct thread_specific_data *tsd = thread_get_tsd(); 266 267 tsd->abort_type = ai->abort_type; 268 tsd->abort_descr = ai->fault_descr; 269 tsd->abort_va = ai->va; 270 tsd->abort_regs = *ai->regs; 271 tsd->abort_core = get_core_pos(); 272 } 273 274 #ifdef ARM32 275 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs, 276 struct abort_info *ai) 277 { 278 switch (abort_type) { 279 case ABORT_TYPE_DATA: 280 ai->fault_descr = read_dfsr(); 281 ai->va = read_dfar(); 282 break; 283 case ABORT_TYPE_PREFETCH: 284 ai->fault_descr = read_ifsr(); 285 ai->va = read_ifar(); 286 break; 287 default: 288 ai->fault_descr = 0; 289 ai->va = regs->elr; 290 break; 291 } 292 ai->abort_type = abort_type; 293 ai->pc = regs->elr; 294 ai->regs = regs; 295 } 296 #endif /*ARM32*/ 297 298 #ifdef ARM64 299 static void set_abort_info(uint32_t abort_type __unused, 300 struct thread_abort_regs *regs, struct abort_info *ai) 301 { 302 ai->fault_descr = read_esr_el1(); 303 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) { 304 case ESR_EC_IABT_EL0: 305 case ESR_EC_IABT_EL1: 306 ai->abort_type = ABORT_TYPE_PREFETCH; 307 ai->va = read_far_el1(); 308 break; 309 case ESR_EC_DABT_EL0: 310 case ESR_EC_DABT_EL1: 311 case ESR_EC_SP_ALIGN: 312 ai->abort_type = ABORT_TYPE_DATA; 313 ai->va = read_far_el1(); 314 break; 315 default: 316 ai->abort_type = ABORT_TYPE_UNDEF; 317 ai->va = regs->elr; 318 } 319 ai->pc = regs->elr; 320 ai->regs = regs; 321 } 322 #endif /*ARM64*/ 323 324 #ifdef ARM32 325 static void handle_user_ta_panic(struct abort_info *ai) 326 { 327 /* 328 * It was a user exception, stop user execution and return 329 * to TEE Core. 330 */ 331 ai->regs->r0 = TEE_ERROR_TARGET_DEAD; 332 ai->regs->r1 = true; 333 ai->regs->r2 = 0xdeadbeef; 334 ai->regs->elr = (uint32_t)thread_unwind_user_mode; 335 ai->regs->spsr &= CPSR_FIA; 336 ai->regs->spsr &= ~CPSR_MODE_MASK; 337 ai->regs->spsr |= CPSR_MODE_SVC; 338 /* Select Thumb or ARM mode */ 339 if (ai->regs->elr & 1) 340 ai->regs->spsr |= CPSR_T; 341 else 342 ai->regs->spsr &= ~CPSR_T; 343 } 344 #endif /*ARM32*/ 345 346 #ifdef ARM64 347 static void handle_user_ta_panic(struct abort_info *ai) 348 { 349 uint32_t daif; 350 351 /* 352 * It was a user exception, stop user execution and return 353 * to TEE Core. 354 */ 355 ai->regs->x0 = TEE_ERROR_TARGET_DEAD; 356 ai->regs->x1 = true; 357 ai->regs->x2 = 0xdeadbeef; 358 ai->regs->elr = (vaddr_t)thread_unwind_user_mode; 359 ai->regs->sp_el0 = thread_get_saved_thread_sp(); 360 361 daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK; 362 /* XXX what about DAIF_D? */ 363 ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif); 364 } 365 #endif /*ARM64*/ 366 367 #ifdef CFG_WITH_VFP 368 static void handle_user_ta_vfp(void) 369 { 370 struct tee_ta_session *s; 371 372 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) 373 panic(); 374 375 thread_user_enable_vfp(&to_user_mode_ctx(s->ctx)->vfp); 376 } 377 #endif /*CFG_WITH_VFP*/ 378 379 #ifdef CFG_WITH_USER_TA 380 #ifdef ARM32 381 /* Returns true if the exception originated from user mode */ 382 bool abort_is_user_exception(struct abort_info *ai) 383 { 384 return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 385 } 386 #endif /*ARM32*/ 387 388 #ifdef ARM64 389 /* Returns true if the exception originated from user mode */ 390 bool abort_is_user_exception(struct abort_info *ai) 391 { 392 uint32_t spsr = ai->regs->spsr; 393 394 if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 395 return true; 396 if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 397 SPSR_64_MODE_EL0) 398 return true; 399 return false; 400 } 401 #endif /*ARM64*/ 402 #else /*CFG_WITH_USER_TA*/ 403 bool abort_is_user_exception(struct abort_info *ai __unused) 404 { 405 return false; 406 } 407 #endif /*CFG_WITH_USER_TA*/ 408 409 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA) 410 #ifdef ARM32 411 static bool is_vfp_fault(struct abort_info *ai) 412 { 413 if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled()) 414 return false; 415 416 /* 417 * Not entirely accurate, but if it's a truly undefined instruction 418 * we'll end up in this function again, except this time 419 * vfp_is_enabled() so we'll return false. 420 */ 421 return true; 422 } 423 #endif /*ARM32*/ 424 425 #ifdef ARM64 426 static bool is_vfp_fault(struct abort_info *ai) 427 { 428 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) { 429 case ESR_EC_FP_ASIMD: 430 case ESR_EC_AARCH32_FP: 431 case ESR_EC_AARCH64_FP: 432 return true; 433 default: 434 return false; 435 } 436 } 437 #endif /*ARM64*/ 438 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/ 439 static bool is_vfp_fault(struct abort_info *ai __unused) 440 { 441 return false; 442 } 443 #endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/ 444 445 static enum fault_type get_fault_type(struct abort_info *ai) 446 { 447 if (abort_is_user_exception(ai)) { 448 if (is_vfp_fault(ai)) 449 return FAULT_TYPE_USER_TA_VFP; 450 #ifndef CFG_WITH_PAGER 451 return FAULT_TYPE_USER_TA_PANIC; 452 #endif 453 } 454 455 if (thread_is_from_abort_mode()) { 456 abort_print_error(ai); 457 panic("[abort] abort in abort handler (trap CPU)"); 458 } 459 460 if (ai->abort_type == ABORT_TYPE_UNDEF) { 461 if (abort_is_user_exception(ai)) 462 return FAULT_TYPE_USER_TA_PANIC; 463 abort_print_error(ai); 464 panic("[abort] undefined abort (trap CPU)"); 465 } 466 467 switch (core_mmu_get_fault_type(ai->fault_descr)) { 468 case CORE_MMU_FAULT_ALIGNMENT: 469 if (abort_is_user_exception(ai)) 470 return FAULT_TYPE_USER_TA_PANIC; 471 abort_print_error(ai); 472 panic("[abort] alignement fault! (trap CPU)"); 473 break; 474 475 case CORE_MMU_FAULT_ACCESS_BIT: 476 if (abort_is_user_exception(ai)) 477 return FAULT_TYPE_USER_TA_PANIC; 478 abort_print_error(ai); 479 panic("[abort] access bit fault! (trap CPU)"); 480 break; 481 482 case CORE_MMU_FAULT_DEBUG_EVENT: 483 if (!abort_is_user_exception(ai)) 484 abort_print(ai); 485 DMSG("[abort] Ignoring debug event!"); 486 return FAULT_TYPE_IGNORE; 487 488 case CORE_MMU_FAULT_TRANSLATION: 489 case CORE_MMU_FAULT_WRITE_PERMISSION: 490 case CORE_MMU_FAULT_READ_PERMISSION: 491 return FAULT_TYPE_PAGEABLE; 492 493 case CORE_MMU_FAULT_ASYNC_EXTERNAL: 494 if (!abort_is_user_exception(ai)) 495 abort_print(ai); 496 DMSG("[abort] Ignoring async external abort!"); 497 return FAULT_TYPE_IGNORE; 498 499 case CORE_MMU_FAULT_OTHER: 500 default: 501 if (!abort_is_user_exception(ai)) 502 abort_print(ai); 503 DMSG("[abort] Unhandled fault!"); 504 return FAULT_TYPE_IGNORE; 505 } 506 } 507 508 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs) 509 { 510 struct abort_info ai; 511 bool handled; 512 513 set_abort_info(abort_type, regs, &ai); 514 515 switch (get_fault_type(&ai)) { 516 case FAULT_TYPE_IGNORE: 517 break; 518 case FAULT_TYPE_USER_TA_PANIC: 519 DMSG("[abort] abort in User mode (TA will panic)"); 520 save_abort_info_in_tsd(&ai); 521 vfp_disable(); 522 handle_user_ta_panic(&ai); 523 break; 524 #ifdef CFG_WITH_VFP 525 case FAULT_TYPE_USER_TA_VFP: 526 handle_user_ta_vfp(); 527 break; 528 #endif 529 case FAULT_TYPE_PAGEABLE: 530 default: 531 if (thread_get_id_may_fail() < 0) { 532 abort_print_error(&ai); 533 panic("abort outside thread context"); 534 } 535 thread_kernel_save_vfp(); 536 handled = tee_pager_handle_fault(&ai); 537 thread_kernel_restore_vfp(); 538 if (!handled) { 539 if (!abort_is_user_exception(&ai)) { 540 abort_print_error(&ai); 541 panic("unhandled pageable abort"); 542 } 543 DMSG("[abort] abort in User mode (TA will panic)"); 544 save_abort_info_in_tsd(&ai); 545 vfp_disable(); 546 handle_user_ta_panic(&ai); 547 } 548 break; 549 } 550 } 551