1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015, Linaro Limited 4 */ 5 6 #include <arm.h> 7 #include <kernel/abort.h> 8 #include <kernel/linker.h> 9 #include <kernel/misc.h> 10 #include <kernel/panic.h> 11 #include <kernel/tee_ta_manager.h> 12 #include <kernel/unwind.h> 13 #include <kernel/user_ta.h> 14 #include <mm/core_mmu.h> 15 #include <mm/mobj.h> 16 #include <mm/tee_pager.h> 17 #include <tee/tee_svc.h> 18 #include <trace.h> 19 20 #include "thread_private.h" 21 22 enum fault_type { 23 FAULT_TYPE_USER_TA_PANIC, 24 FAULT_TYPE_USER_TA_VFP, 25 FAULT_TYPE_PAGEABLE, 26 FAULT_TYPE_IGNORE, 27 }; 28 29 #ifdef CFG_UNWIND 30 31 static void get_current_ta_exidx_stack(vaddr_t *exidx, size_t *exidx_sz, 32 vaddr_t *stack, size_t *stack_size) 33 { 34 struct tee_ta_session *s; 35 struct user_ta_ctx *utc; 36 37 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) 38 panic(); 39 40 utc = to_user_ta_ctx(s->ctx); 41 42 /* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */ 43 assert(utc->is_32bit); 44 45 *exidx = utc->exidx_start; /* NULL if TA has no unwind tables */ 46 if (*exidx) 47 *exidx += utc->load_addr; 48 *exidx_sz = utc->exidx_size; 49 50 *stack = utc->stack_addr; 51 *stack_size = utc->mobj_stack->size; 52 } 53 54 #ifdef ARM32 55 56 /* 57 * Kernel or user mode unwind (32-bit execution state). 58 */ 59 static void __print_stack_unwind_arm32(struct abort_info *ai) 60 { 61 struct unwind_state_arm32 state; 62 vaddr_t exidx; 63 size_t exidx_sz; 64 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK; 65 uint32_t sp; 66 uint32_t lr; 67 vaddr_t stack; 68 size_t stack_size; 69 bool kernel_stack; 70 71 if (abort_is_user_exception(ai)) { 72 get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack, 73 &stack_size); 74 if (!exidx) { 75 EMSG_RAW("Call stack not available"); 76 return; 77 } 78 kernel_stack = false; 79 } else { 80 exidx = (vaddr_t)__exidx_start; 81 exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start; 82 /* Kernel stack */ 83 stack = thread_stack_start(); 84 stack_size = thread_stack_size(); 85 kernel_stack = true; 86 } 87 88 if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) { 89 sp = ai->regs->usr_sp; 90 lr = ai->regs->usr_lr; 91 } else { 92 sp = read_mode_sp(mode); 93 lr = read_mode_lr(mode); 94 } 95 96 memset(&state, 0, sizeof(state)); 97 state.registers[0] = ai->regs->r0; 98 state.registers[1] = ai->regs->r1; 99 state.registers[2] = ai->regs->r2; 100 state.registers[3] = ai->regs->r3; 101 state.registers[4] = ai->regs->r4; 102 state.registers[5] = ai->regs->r5; 103 state.registers[6] = ai->regs->r6; 104 state.registers[7] = ai->regs->r7; 105 state.registers[8] = ai->regs->r8; 106 state.registers[9] = ai->regs->r9; 107 state.registers[10] = ai->regs->r10; 108 state.registers[11] = ai->regs->r11; 109 state.registers[13] = sp; 110 state.registers[14] = lr; 111 state.registers[15] = ai->pc; 112 113 print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz, kernel_stack, 114 stack, stack_size); 115 } 116 #else /* ARM32 */ 117 118 static void __print_stack_unwind_arm32(struct abort_info *ai __unused) 119 { 120 struct unwind_state_arm32 state; 121 vaddr_t exidx; 122 size_t exidx_sz; 123 vaddr_t stack; 124 size_t stack_size; 125 126 /* 64-bit kernel, hence 32-bit unwind must be for user mode */ 127 assert(abort_is_user_exception(ai)); 128 129 get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack, &stack_size); 130 131 memset(&state, 0, sizeof(state)); 132 state.registers[0] = ai->regs->x0; 133 state.registers[1] = ai->regs->x1; 134 state.registers[2] = ai->regs->x2; 135 state.registers[3] = ai->regs->x3; 136 state.registers[4] = ai->regs->x4; 137 state.registers[5] = ai->regs->x5; 138 state.registers[6] = ai->regs->x6; 139 state.registers[7] = ai->regs->x7; 140 state.registers[8] = ai->regs->x8; 141 state.registers[9] = ai->regs->x9; 142 state.registers[10] = ai->regs->x10; 143 state.registers[11] = ai->regs->x11; 144 145 state.registers[13] = ai->regs->x13; 146 state.registers[14] = ai->regs->x14; 147 state.registers[15] = ai->pc; 148 149 print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz, 150 false /*!kernel_stack*/, stack, stack_size); 151 } 152 #endif /* ARM32 */ 153 #ifdef ARM64 154 /* Kernel or user mode unwind (64-bit execution state) */ 155 static void __print_stack_unwind_arm64(struct abort_info *ai) 156 { 157 struct unwind_state_arm64 state; 158 bool kernel_stack; 159 uaddr_t stack; 160 size_t stack_size; 161 162 if (abort_is_user_exception(ai)) { 163 struct tee_ta_session *s; 164 struct user_ta_ctx *utc; 165 166 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) 167 panic(); 168 169 utc = to_user_ta_ctx(s->ctx); 170 /* User stack */ 171 stack = utc->stack_addr; 172 stack_size = utc->mobj_stack->size; 173 kernel_stack = false; 174 } else { 175 /* Kernel stack */ 176 stack = thread_stack_start(); 177 stack_size = thread_stack_size(); 178 kernel_stack = true; 179 } 180 181 memset(&state, 0, sizeof(state)); 182 state.pc = ai->regs->elr; 183 state.fp = ai->regs->x29; 184 185 print_stack_arm64(TRACE_ERROR, &state, kernel_stack, stack, stack_size); 186 } 187 #else 188 static void __print_stack_unwind_arm64(struct abort_info *ai __unused) 189 { 190 191 } 192 #endif /*ARM64*/ 193 #else /* CFG_UNWIND */ 194 static void __print_stack_unwind_arm32(struct abort_info *ai __unused) 195 { 196 } 197 198 static void __print_stack_unwind_arm64(struct abort_info *ai __unused) 199 { 200 } 201 #endif /* CFG_UNWIND */ 202 203 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type) 204 { 205 if (abort_type == ABORT_TYPE_DATA) 206 return "data"; 207 if (abort_type == ABORT_TYPE_PREFETCH) 208 return "prefetch"; 209 return "undef"; 210 } 211 212 static __maybe_unused const char *fault_to_str(uint32_t abort_type, 213 uint32_t fault_descr) 214 { 215 /* fault_descr is only valid for data or prefetch abort */ 216 if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH) 217 return ""; 218 219 switch (core_mmu_get_fault_type(fault_descr)) { 220 case CORE_MMU_FAULT_ALIGNMENT: 221 return " (alignment fault)"; 222 case CORE_MMU_FAULT_TRANSLATION: 223 return " (translation fault)"; 224 case CORE_MMU_FAULT_READ_PERMISSION: 225 return " (read permission fault)"; 226 case CORE_MMU_FAULT_WRITE_PERMISSION: 227 return " (write permission fault)"; 228 default: 229 return ""; 230 } 231 } 232 233 static __maybe_unused void 234 __print_abort_info(struct abort_info *ai __maybe_unused, 235 const char *ctx __maybe_unused) 236 { 237 #ifdef ARM32 238 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK; 239 __maybe_unused uint32_t sp; 240 __maybe_unused uint32_t lr; 241 242 if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) { 243 sp = ai->regs->usr_sp; 244 lr = ai->regs->usr_lr; 245 } else { 246 sp = read_mode_sp(mode); 247 lr = read_mode_lr(mode); 248 } 249 #endif /*ARM32*/ 250 251 EMSG_RAW(""); 252 EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s", 253 ctx, abort_type_to_str(ai->abort_type), ai->va, 254 fault_to_str(ai->abort_type, ai->fault_descr)); 255 #ifdef ARM32 256 EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X", 257 ai->fault_descr, read_ttbr0(), read_ttbr1(), 258 read_contextidr()); 259 EMSG_RAW(" cpu #%zu cpsr 0x%08x", 260 get_core_pos(), ai->regs->spsr); 261 EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x", 262 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip); 263 EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x", 264 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp); 265 EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x", 266 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr); 267 EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x", 268 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc); 269 #endif /*ARM32*/ 270 #ifdef ARM64 271 EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64 272 " cidr 0x%X", ai->fault_descr, read_ttbr0_el1(), 273 read_ttbr1_el1(), read_contextidr_el1()); 274 EMSG_RAW(" cpu #%zu cpsr 0x%08x", 275 get_core_pos(), (uint32_t)ai->regs->spsr); 276 EMSG_RAW(" x0 %016" PRIx64 " x1 %016" PRIx64, 277 ai->regs->x0, ai->regs->x1); 278 EMSG_RAW(" x2 %016" PRIx64 " x3 %016" PRIx64, 279 ai->regs->x2, ai->regs->x3); 280 EMSG_RAW(" x4 %016" PRIx64 " x5 %016" PRIx64, 281 ai->regs->x4, ai->regs->x5); 282 EMSG_RAW(" x6 %016" PRIx64 " x7 %016" PRIx64, 283 ai->regs->x6, ai->regs->x7); 284 EMSG_RAW(" x8 %016" PRIx64 " x9 %016" PRIx64, 285 ai->regs->x8, ai->regs->x9); 286 EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64, 287 ai->regs->x10, ai->regs->x11); 288 EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64, 289 ai->regs->x12, ai->regs->x13); 290 EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64, 291 ai->regs->x14, ai->regs->x15); 292 EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64, 293 ai->regs->x16, ai->regs->x17); 294 EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64, 295 ai->regs->x18, ai->regs->x19); 296 EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64, 297 ai->regs->x20, ai->regs->x21); 298 EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64, 299 ai->regs->x22, ai->regs->x23); 300 EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64, 301 ai->regs->x24, ai->regs->x25); 302 EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64, 303 ai->regs->x26, ai->regs->x27); 304 EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64, 305 ai->regs->x28, ai->regs->x29); 306 EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64, 307 ai->regs->x30, ai->regs->elr); 308 EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0); 309 #endif /*ARM64*/ 310 } 311 312 #if defined(ARM32) 313 static const bool kernel_is32bit = true; 314 #elif defined(ARM64) 315 static const bool kernel_is32bit; 316 #endif 317 318 /* 319 * Print abort info and (optionally) stack dump to the console 320 * @ai user-mode or kernel-mode abort info. If user mode, the current session 321 * must be the one of the TA that caused the abort. 322 * @stack_dump true to show a stack trace 323 */ 324 static void __abort_print(struct abort_info *ai, bool stack_dump) 325 { 326 bool is_32bit; 327 bool paged_ta_abort = false; 328 329 if (abort_is_user_exception(ai)) { 330 struct tee_ta_session *s; 331 struct user_ta_ctx *utc; 332 333 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) 334 panic(); 335 336 utc = to_user_ta_ctx(s->ctx); 337 is_32bit = utc->is_32bit; 338 #ifdef CFG_PAGED_USER_TA 339 /* 340 * It is not safe to unwind paged TAs that received an abort, 341 * because we currently don't handle page faults that could 342 * occur when accessing the TA memory (unwind tables for 343 * instance). 344 */ 345 if (ai->abort_type != ABORT_TYPE_TA_PANIC) 346 paged_ta_abort = true; 347 #endif 348 if (ai->abort_type != ABORT_TYPE_TA_PANIC) 349 __print_abort_info(ai, "User TA"); 350 tee_ta_dump_current(); 351 } else { 352 is_32bit = kernel_is32bit; 353 354 __print_abort_info(ai, "Core"); 355 } 356 357 if (!stack_dump || paged_ta_abort) 358 return; 359 360 if (is_32bit) 361 __print_stack_unwind_arm32(ai); 362 else 363 __print_stack_unwind_arm64(ai); 364 } 365 366 void abort_print(struct abort_info *ai) 367 { 368 __abort_print(ai, false); 369 } 370 371 void abort_print_error(struct abort_info *ai) 372 { 373 __abort_print(ai, true); 374 } 375 376 #ifdef ARM32 377 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs, 378 struct abort_info *ai) 379 { 380 switch (abort_type) { 381 case ABORT_TYPE_DATA: 382 ai->fault_descr = read_dfsr(); 383 ai->va = read_dfar(); 384 break; 385 case ABORT_TYPE_PREFETCH: 386 ai->fault_descr = read_ifsr(); 387 ai->va = read_ifar(); 388 break; 389 default: 390 ai->fault_descr = 0; 391 ai->va = regs->elr; 392 break; 393 } 394 ai->abort_type = abort_type; 395 ai->pc = regs->elr; 396 ai->regs = regs; 397 } 398 #endif /*ARM32*/ 399 400 #ifdef ARM64 401 static void set_abort_info(uint32_t abort_type __unused, 402 struct thread_abort_regs *regs, struct abort_info *ai) 403 { 404 ai->fault_descr = read_esr_el1(); 405 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) { 406 case ESR_EC_IABT_EL0: 407 case ESR_EC_IABT_EL1: 408 ai->abort_type = ABORT_TYPE_PREFETCH; 409 ai->va = read_far_el1(); 410 break; 411 case ESR_EC_DABT_EL0: 412 case ESR_EC_DABT_EL1: 413 case ESR_EC_SP_ALIGN: 414 ai->abort_type = ABORT_TYPE_DATA; 415 ai->va = read_far_el1(); 416 break; 417 default: 418 ai->abort_type = ABORT_TYPE_UNDEF; 419 ai->va = regs->elr; 420 } 421 ai->pc = regs->elr; 422 ai->regs = regs; 423 } 424 #endif /*ARM64*/ 425 426 #ifdef ARM32 427 static void handle_user_ta_panic(struct abort_info *ai) 428 { 429 /* 430 * It was a user exception, stop user execution and return 431 * to TEE Core. 432 */ 433 ai->regs->r0 = TEE_ERROR_TARGET_DEAD; 434 ai->regs->r1 = true; 435 ai->regs->r2 = 0xdeadbeef; 436 ai->regs->elr = (uint32_t)thread_unwind_user_mode; 437 ai->regs->spsr &= CPSR_FIA; 438 ai->regs->spsr &= ~CPSR_MODE_MASK; 439 ai->regs->spsr |= CPSR_MODE_SVC; 440 /* Select Thumb or ARM mode */ 441 if (ai->regs->elr & 1) 442 ai->regs->spsr |= CPSR_T; 443 else 444 ai->regs->spsr &= ~CPSR_T; 445 } 446 #endif /*ARM32*/ 447 448 #ifdef ARM64 449 static void handle_user_ta_panic(struct abort_info *ai) 450 { 451 uint32_t daif; 452 453 /* 454 * It was a user exception, stop user execution and return 455 * to TEE Core. 456 */ 457 ai->regs->x0 = TEE_ERROR_TARGET_DEAD; 458 ai->regs->x1 = true; 459 ai->regs->x2 = 0xdeadbeef; 460 ai->regs->elr = (vaddr_t)thread_unwind_user_mode; 461 ai->regs->sp_el0 = thread_get_saved_thread_sp(); 462 463 daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK; 464 /* XXX what about DAIF_D? */ 465 ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif); 466 } 467 #endif /*ARM64*/ 468 469 #ifdef CFG_WITH_VFP 470 static void handle_user_ta_vfp(void) 471 { 472 struct tee_ta_session *s; 473 474 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) 475 panic(); 476 477 thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp); 478 } 479 #endif /*CFG_WITH_VFP*/ 480 481 #ifdef CFG_WITH_USER_TA 482 #ifdef ARM32 483 /* Returns true if the exception originated from user mode */ 484 bool abort_is_user_exception(struct abort_info *ai) 485 { 486 return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 487 } 488 #endif /*ARM32*/ 489 490 #ifdef ARM64 491 /* Returns true if the exception originated from user mode */ 492 bool abort_is_user_exception(struct abort_info *ai) 493 { 494 uint32_t spsr = ai->regs->spsr; 495 496 if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 497 return true; 498 if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 499 SPSR_64_MODE_EL0) 500 return true; 501 return false; 502 } 503 #endif /*ARM64*/ 504 #else /*CFG_WITH_USER_TA*/ 505 bool abort_is_user_exception(struct abort_info *ai __unused) 506 { 507 return false; 508 } 509 #endif /*CFG_WITH_USER_TA*/ 510 511 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA) 512 #ifdef ARM32 513 514 #define T32_INSTR(w1, w0) \ 515 ((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff)) 516 517 #define T32_VTRANS32_MASK T32_INSTR(0xff << 8, (7 << 9) | 1 << 4) 518 #define T32_VTRANS32_VAL T32_INSTR(0xee << 8, (5 << 9) | 1 << 4) 519 520 #define T32_VTRANS64_MASK T32_INSTR((0xff << 8) | (7 << 5), 7 << 9) 521 #define T32_VTRANS64_VAL T32_INSTR((0xec << 8) | (2 << 5), 5 << 9) 522 523 #define T32_VLDST_MASK T32_INSTR((0xff << 8) | (1 << 4), 0) 524 #define T32_VLDST_VAL T32_INSTR( 0xf9 << 8 , 0) 525 526 #define T32_VXLDST_MASK T32_INSTR(0xfc << 8, 7 << 9) 527 #define T32_VXLDST_VAL T32_INSTR(0xec << 8, 5 << 9) 528 529 #define T32_VPROC_MASK T32_INSTR(0xef << 8, 0) 530 #define T32_VPROC_VAL T32_VPROC_MASK 531 532 #define A32_INSTR(x) ((uint32_t)(x)) 533 534 #define A32_VTRANS32_MASK A32_INSTR(SHIFT_U32(0xf, 24) | \ 535 SHIFT_U32(7, 9) | BIT32(4)) 536 #define A32_VTRANS32_VAL A32_INSTR(SHIFT_U32(0xe, 24) | \ 537 SHIFT_U32(5, 9) | BIT32(4)) 538 539 #define A32_VTRANS64_MASK A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9)) 540 #define A32_VTRANS64_VAL A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9)) 541 542 #define A32_VLDST_MASK A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20)) 543 #define A32_VLDST_VAL A32_INSTR(SHIFT_U32(0xf4, 24)) 544 #define A32_VXLDST_MASK A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9)) 545 #define A32_VXLDST_VAL A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9)) 546 547 #define A32_VPROC_MASK A32_INSTR(SHIFT_U32(0x7f, 25)) 548 #define A32_VPROC_VAL A32_INSTR(SHIFT_U32(0x79, 25)) 549 550 static bool is_vfp_fault(struct abort_info *ai) 551 { 552 TEE_Result res; 553 uint32_t instr; 554 555 if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled()) 556 return false; 557 558 res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr)); 559 if (res != TEE_SUCCESS) 560 return false; 561 562 if (ai->regs->spsr & CPSR_T) { 563 /* Thumb mode */ 564 return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) || 565 ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) || 566 ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) || 567 ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) || 568 ((instr & T32_VPROC_MASK) == T32_VPROC_VAL); 569 } else { 570 /* ARM mode */ 571 return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) || 572 ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) || 573 ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) || 574 ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) || 575 ((instr & A32_VPROC_MASK) == A32_VPROC_VAL); 576 } 577 } 578 #endif /*ARM32*/ 579 580 #ifdef ARM64 581 static bool is_vfp_fault(struct abort_info *ai) 582 { 583 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) { 584 case ESR_EC_FP_ASIMD: 585 case ESR_EC_AARCH32_FP: 586 case ESR_EC_AARCH64_FP: 587 return true; 588 default: 589 return false; 590 } 591 } 592 #endif /*ARM64*/ 593 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/ 594 static bool is_vfp_fault(struct abort_info *ai __unused) 595 { 596 return false; 597 } 598 #endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/ 599 600 static enum fault_type get_fault_type(struct abort_info *ai) 601 { 602 if (abort_is_user_exception(ai)) { 603 if (is_vfp_fault(ai)) 604 return FAULT_TYPE_USER_TA_VFP; 605 #ifndef CFG_WITH_PAGER 606 return FAULT_TYPE_USER_TA_PANIC; 607 #endif 608 } 609 610 if (thread_is_from_abort_mode()) { 611 abort_print_error(ai); 612 panic("[abort] abort in abort handler (trap CPU)"); 613 } 614 615 if (ai->abort_type == ABORT_TYPE_UNDEF) { 616 if (abort_is_user_exception(ai)) 617 return FAULT_TYPE_USER_TA_PANIC; 618 abort_print_error(ai); 619 panic("[abort] undefined abort (trap CPU)"); 620 } 621 622 switch (core_mmu_get_fault_type(ai->fault_descr)) { 623 case CORE_MMU_FAULT_ALIGNMENT: 624 if (abort_is_user_exception(ai)) 625 return FAULT_TYPE_USER_TA_PANIC; 626 abort_print_error(ai); 627 panic("[abort] alignement fault! (trap CPU)"); 628 break; 629 630 case CORE_MMU_FAULT_ACCESS_BIT: 631 if (abort_is_user_exception(ai)) 632 return FAULT_TYPE_USER_TA_PANIC; 633 abort_print_error(ai); 634 panic("[abort] access bit fault! (trap CPU)"); 635 break; 636 637 case CORE_MMU_FAULT_DEBUG_EVENT: 638 abort_print(ai); 639 DMSG("[abort] Ignoring debug event!"); 640 return FAULT_TYPE_IGNORE; 641 642 case CORE_MMU_FAULT_TRANSLATION: 643 case CORE_MMU_FAULT_WRITE_PERMISSION: 644 case CORE_MMU_FAULT_READ_PERMISSION: 645 return FAULT_TYPE_PAGEABLE; 646 647 case CORE_MMU_FAULT_ASYNC_EXTERNAL: 648 abort_print(ai); 649 DMSG("[abort] Ignoring async external abort!"); 650 return FAULT_TYPE_IGNORE; 651 652 case CORE_MMU_FAULT_OTHER: 653 default: 654 abort_print(ai); 655 DMSG("[abort] Unhandled fault!"); 656 return FAULT_TYPE_IGNORE; 657 } 658 } 659 660 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs) 661 { 662 struct abort_info ai; 663 bool handled; 664 665 set_abort_info(abort_type, regs, &ai); 666 667 switch (get_fault_type(&ai)) { 668 case FAULT_TYPE_IGNORE: 669 break; 670 case FAULT_TYPE_USER_TA_PANIC: 671 DMSG("[abort] abort in User mode (TA will panic)"); 672 abort_print_error(&ai); 673 vfp_disable(); 674 handle_user_ta_panic(&ai); 675 break; 676 #ifdef CFG_WITH_VFP 677 case FAULT_TYPE_USER_TA_VFP: 678 handle_user_ta_vfp(); 679 break; 680 #endif 681 case FAULT_TYPE_PAGEABLE: 682 default: 683 thread_kernel_save_vfp(); 684 handled = tee_pager_handle_fault(&ai); 685 thread_kernel_restore_vfp(); 686 if (!handled) { 687 abort_print_error(&ai); 688 if (!abort_is_user_exception(&ai)) 689 panic("unhandled pageable abort"); 690 DMSG("[abort] abort in User mode (TA will panic)"); 691 vfp_disable(); 692 handle_user_ta_panic(&ai); 693 } 694 break; 695 } 696 } 697