1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2015-2022, Linaro Limited
4 */
5
6 #include <arm.h>
7 #include <kernel/abort.h>
8 #include <kernel/linker.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/tee_ta_manager.h>
12 #include <kernel/thread_private.h>
13 #include <kernel/user_mode_ctx.h>
14 #include <memtag.h>
15 #include <mm/core_mmu.h>
16 #include <mm/tee_pager.h>
17 #include <trace.h>
18 #include <unw/unwind.h>
19
20 enum fault_type {
21 FAULT_TYPE_USER_MODE_PANIC,
22 FAULT_TYPE_USER_MODE_VFP,
23 FAULT_TYPE_PAGEABLE,
24 FAULT_TYPE_IGNORE,
25 FAULT_TYPE_EXTERNAL_ABORT,
26 };
27
28 #ifdef CFG_UNWIND
29
30 #ifdef ARM32
31 /*
32 * Kernel or user mode unwind (32-bit execution state).
33 */
__print_stack_unwind(struct abort_info * ai)34 static void __print_stack_unwind(struct abort_info *ai)
35 {
36 struct unwind_state_arm32 state = { };
37 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
38 uint32_t sp = 0;
39 uint32_t lr = 0;
40
41 assert(!abort_is_user_exception(ai));
42
43 if (mode == CPSR_MODE_SYS) {
44 sp = ai->regs->usr_sp;
45 lr = ai->regs->usr_lr;
46 } else {
47 sp = read_mode_sp(mode);
48 lr = read_mode_lr(mode);
49 }
50
51 memset(&state, 0, sizeof(state));
52 state.registers[0] = ai->regs->r0;
53 state.registers[1] = ai->regs->r1;
54 state.registers[2] = ai->regs->r2;
55 state.registers[3] = ai->regs->r3;
56 state.registers[4] = ai->regs->r4;
57 state.registers[5] = ai->regs->r5;
58 state.registers[6] = ai->regs->r6;
59 state.registers[7] = ai->regs->r7;
60 state.registers[8] = ai->regs->r8;
61 state.registers[9] = ai->regs->r9;
62 state.registers[10] = ai->regs->r10;
63 state.registers[11] = ai->regs->r11;
64 state.registers[13] = sp;
65 state.registers[14] = lr;
66 state.registers[15] = ai->pc;
67
68 print_stack_arm32(&state, thread_stack_start(), thread_stack_size());
69 }
70 #endif /* ARM32 */
71
72 #ifdef ARM64
73 /* Kernel mode unwind (64-bit execution state) */
__print_stack_unwind(struct abort_info * ai)74 static void __print_stack_unwind(struct abort_info *ai)
75 {
76 struct unwind_state_arm64 state = {
77 .pc = ai->regs->elr,
78 .fp = ai->regs->x29,
79 };
80
81 print_stack_arm64(&state, thread_stack_start(), thread_stack_size());
82 }
83 #endif /*ARM64*/
84
85 #else /* CFG_UNWIND */
__print_stack_unwind(struct abort_info * ai __unused)86 static void __print_stack_unwind(struct abort_info *ai __unused)
87 {
88 }
89 #endif /* CFG_UNWIND */
90
abort_type_to_str(uint32_t abort_type)91 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
92 {
93 if (abort_type == ABORT_TYPE_DATA)
94 return "data";
95 if (abort_type == ABORT_TYPE_PREFETCH)
96 return "prefetch";
97 return "undef";
98 }
99
fault_to_str(uint32_t abort_type,uint32_t fault_descr)100 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
101 uint32_t fault_descr)
102 {
103 /* fault_descr is only valid for data or prefetch abort */
104 if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
105 return "";
106
107 switch (core_mmu_get_fault_type(fault_descr)) {
108 case CORE_MMU_FAULT_ALIGNMENT:
109 return " (alignment fault)";
110 case CORE_MMU_FAULT_TRANSLATION:
111 return " (translation fault)";
112 case CORE_MMU_FAULT_READ_PERMISSION:
113 return " (read permission fault)";
114 case CORE_MMU_FAULT_WRITE_PERMISSION:
115 return " (write permission fault)";
116 case CORE_MMU_FAULT_TAG_CHECK:
117 return " (tag check fault)";
118 case CORE_MMU_FAULT_SYNC_EXTERNAL:
119 return " (Synchronous external abort)";
120 case CORE_MMU_FAULT_ASYNC_EXTERNAL:
121 return " (Asynchronous external abort)";
122 default:
123 return "";
124 }
125 }
126
127 static __maybe_unused void
__print_abort_info(struct abort_info * ai __maybe_unused,const char * ctx __maybe_unused)128 __print_abort_info(struct abort_info *ai __maybe_unused,
129 const char *ctx __maybe_unused)
130 {
131 __maybe_unused size_t core_pos = 0;
132 #ifdef ARM32
133 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
134 __maybe_unused uint32_t sp = 0;
135 __maybe_unused uint32_t lr = 0;
136
137 if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
138 sp = ai->regs->usr_sp;
139 lr = ai->regs->usr_lr;
140 core_pos = thread_get_tsd()->abort_core;
141 } else {
142 sp = read_mode_sp(mode);
143 lr = read_mode_lr(mode);
144 core_pos = get_core_pos();
145 }
146 #endif /*ARM32*/
147 #ifdef ARM64
148 if (abort_is_user_exception(ai))
149 core_pos = thread_get_tsd()->abort_core;
150 else
151 core_pos = get_core_pos();
152 #endif /*ARM64*/
153
154 EMSG_RAW("");
155 if (IS_ENABLED(CFG_MEMTAG))
156 EMSG_RAW("%s %s-abort at address 0x%" PRIxVA
157 " [tagged 0x%" PRIxVA "]%s", ctx,
158 abort_type_to_str(ai->abort_type),
159 memtag_strip_tag_vaddr((void *)ai->va), ai->va,
160 fault_to_str(ai->abort_type, ai->fault_descr));
161 else
162 EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s", ctx,
163 abort_type_to_str(ai->abort_type), ai->va,
164 fault_to_str(ai->abort_type, ai->fault_descr));
165 #ifdef ARM32
166 EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X",
167 ai->fault_descr, read_ttbr0(), read_ttbr1(),
168 read_contextidr());
169 EMSG_RAW(" cpu #%zu cpsr 0x%08x",
170 core_pos, ai->regs->spsr);
171 EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x",
172 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
173 EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x",
174 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
175 EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x",
176 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
177 EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x",
178 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
179 #endif /*ARM32*/
180 #ifdef ARM64
181 EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64
182 " cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
183 read_ttbr1_el1(), read_contextidr_el1());
184 EMSG_RAW(" cpu #%zu cpsr 0x%08x",
185 core_pos, (uint32_t)ai->regs->spsr);
186 EMSG_RAW(" x0 %016" PRIx64 " x1 %016" PRIx64,
187 ai->regs->x0, ai->regs->x1);
188 EMSG_RAW(" x2 %016" PRIx64 " x3 %016" PRIx64,
189 ai->regs->x2, ai->regs->x3);
190 EMSG_RAW(" x4 %016" PRIx64 " x5 %016" PRIx64,
191 ai->regs->x4, ai->regs->x5);
192 EMSG_RAW(" x6 %016" PRIx64 " x7 %016" PRIx64,
193 ai->regs->x6, ai->regs->x7);
194 EMSG_RAW(" x8 %016" PRIx64 " x9 %016" PRIx64,
195 ai->regs->x8, ai->regs->x9);
196 EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
197 ai->regs->x10, ai->regs->x11);
198 EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
199 ai->regs->x12, ai->regs->x13);
200 EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
201 ai->regs->x14, ai->regs->x15);
202 EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
203 ai->regs->x16, ai->regs->x17);
204 EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
205 ai->regs->x18, ai->regs->x19);
206 EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
207 ai->regs->x20, ai->regs->x21);
208 EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
209 ai->regs->x22, ai->regs->x23);
210 EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
211 ai->regs->x24, ai->regs->x25);
212 EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
213 ai->regs->x26, ai->regs->x27);
214 EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
215 ai->regs->x28, ai->regs->x29);
216 EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
217 ai->regs->x30, ai->regs->elr);
218 EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
219 #endif /*ARM64*/
220 }
221
222 /*
223 * Print abort info and (optionally) stack dump to the console
224 * @ai kernel-mode abort info.
225 * @stack_dump true to show a stack trace
226 */
__abort_print(struct abort_info * ai,bool stack_dump)227 static void __abort_print(struct abort_info *ai, bool stack_dump)
228 {
229 assert(!abort_is_user_exception(ai));
230
231 __print_abort_info(ai, "Core");
232
233 if (stack_dump) {
234 trace_printf_helper_raw(TRACE_ERROR, true,
235 "TEE load address @ %#"PRIxVA,
236 VCORE_START_VA);
237 __print_stack_unwind(ai);
238 }
239 }
240
abort_print(struct abort_info * ai)241 void abort_print(struct abort_info *ai)
242 {
243 __abort_print(ai, false);
244 }
245
abort_print_error(struct abort_info * ai)246 void abort_print_error(struct abort_info *ai)
247 {
248 __abort_print(ai, true);
249 }
250
251 /* This function must be called from a normal thread */
abort_print_current_ts(void)252 void abort_print_current_ts(void)
253 {
254 struct thread_specific_data *tsd = thread_get_tsd();
255 struct abort_info ai = { };
256 struct ts_session *s = ts_get_current_session();
257
258 ai.abort_type = tsd->abort_type;
259 ai.fault_descr = tsd->abort_descr;
260 ai.va = tsd->abort_va;
261 ai.pc = tsd->abort_regs.elr;
262 ai.regs = &tsd->abort_regs;
263
264 if (ai.abort_type != ABORT_TYPE_USER_MODE_PANIC)
265 __print_abort_info(&ai, "User mode");
266
267 s->ctx->ops->dump_state(s->ctx);
268
269 #if defined(CFG_FTRACE_SUPPORT)
270 if (s->ctx->ops->dump_ftrace)
271 s->ctx->ops->dump_ftrace(s->ctx);
272 #endif
273 }
274
save_abort_info_in_tsd(struct abort_info * ai)275 static void save_abort_info_in_tsd(struct abort_info *ai)
276 {
277 struct thread_specific_data *tsd = thread_get_tsd();
278
279 tsd->abort_type = ai->abort_type;
280 tsd->abort_descr = ai->fault_descr;
281 tsd->abort_va = ai->va;
282 tsd->abort_regs = *ai->regs;
283 tsd->abort_core = get_core_pos();
284 }
285
286 #ifdef ARM32
set_abort_info(uint32_t abort_type,struct thread_abort_regs * regs,struct abort_info * ai)287 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
288 struct abort_info *ai)
289 {
290 switch (abort_type) {
291 case ABORT_TYPE_DATA:
292 ai->fault_descr = read_dfsr();
293 ai->va = read_dfar();
294 break;
295 case ABORT_TYPE_PREFETCH:
296 ai->fault_descr = read_ifsr();
297 ai->va = read_ifar();
298 break;
299 default:
300 ai->fault_descr = 0;
301 ai->va = regs->elr;
302 break;
303 }
304 ai->abort_type = abort_type;
305 ai->pc = regs->elr;
306 ai->regs = regs;
307 }
308 #endif /*ARM32*/
309
310 #ifdef ARM64
set_abort_info(uint32_t abort_type __unused,struct thread_abort_regs * regs,struct abort_info * ai)311 static void set_abort_info(uint32_t abort_type __unused,
312 struct thread_abort_regs *regs, struct abort_info *ai)
313 {
314 ai->fault_descr = read_esr_el1();
315 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
316 case ESR_EC_IABT_EL0:
317 case ESR_EC_IABT_EL1:
318 ai->abort_type = ABORT_TYPE_PREFETCH;
319 ai->va = read_far_el1();
320 break;
321 case ESR_EC_DABT_EL0:
322 case ESR_EC_DABT_EL1:
323 case ESR_EC_SP_ALIGN:
324 ai->abort_type = ABORT_TYPE_DATA;
325 ai->va = read_far_el1();
326 break;
327 default:
328 ai->abort_type = ABORT_TYPE_UNDEF;
329 ai->va = regs->elr;
330 }
331 ai->pc = regs->elr;
332 ai->regs = regs;
333 }
334 #endif /*ARM64*/
335
336 #ifdef ARM32
handle_user_mode_panic(struct abort_info * ai)337 static void handle_user_mode_panic(struct abort_info *ai)
338 {
339 /*
340 * It was a user exception, stop user execution and return
341 * to TEE Core.
342 */
343 ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
344 ai->regs->r1 = true;
345 ai->regs->r2 = 0xdeadbeef;
346 ai->regs->elr = (uint32_t)thread_unwind_user_mode;
347 ai->regs->spsr &= CPSR_FIA;
348 ai->regs->spsr &= ~CPSR_MODE_MASK;
349 ai->regs->spsr |= CPSR_MODE_SVC;
350 /* Select Thumb or ARM mode */
351 if (ai->regs->elr & 1)
352 ai->regs->spsr |= CPSR_T;
353 else
354 ai->regs->spsr &= ~CPSR_T;
355 }
356 #endif /*ARM32*/
357
358 #ifdef ARM64
handle_user_mode_panic(struct abort_info * ai)359 static void handle_user_mode_panic(struct abort_info *ai)
360 {
361 struct thread_ctx *tc __maybe_unused = NULL;
362 uint32_t daif = 0;
363 uint32_t pan_bit = 0;
364
365 /*
366 * It was a user exception, stop user execution and return
367 * to TEE Core.
368 */
369 ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
370 ai->regs->x1 = true;
371 ai->regs->x2 = 0xdeadbeef;
372 ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
373 ai->regs->sp_el0 = thread_get_saved_thread_sp();
374
375 #if defined(CFG_CORE_PAUTH)
376 /*
377 * We're going to return to the privileged core thread, update the
378 * APIA key to match the key used by the thread.
379 */
380 tc = threads + thread_get_id();
381 ai->regs->apiakey_hi = tc->keys.apia_hi;
382 ai->regs->apiakey_lo = tc->keys.apia_lo;
383 #endif
384
385 if (IS_ENABLED(CFG_PAN) && feat_pan_implemented() && read_pan())
386 pan_bit = SPSR_64_PAN;
387 daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
388 /* XXX what about DAIF_D? */
389 ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif) |
390 pan_bit;
391 }
392 #endif /*ARM64*/
393
394 #ifdef CFG_WITH_VFP
handle_user_mode_vfp(void)395 static void handle_user_mode_vfp(void)
396 {
397 struct ts_session *s = ts_get_current_session();
398
399 thread_user_enable_vfp(&to_user_mode_ctx(s->ctx)->vfp);
400 }
401 #endif /*CFG_WITH_VFP*/
402
403 #ifdef CFG_WITH_USER_TA
404 #ifdef ARM32
405 /* Returns true if the exception originated from user mode */
abort_is_user_exception(struct abort_info * ai)406 bool abort_is_user_exception(struct abort_info *ai)
407 {
408 return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
409 }
410 #endif /*ARM32*/
411
412 #ifdef ARM64
413 /* Returns true if the exception originated from user mode */
abort_is_user_exception(struct abort_info * ai)414 bool abort_is_user_exception(struct abort_info *ai)
415 {
416 uint32_t spsr = ai->regs->spsr;
417
418 if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
419 return true;
420 if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
421 SPSR_64_MODE_EL0)
422 return true;
423 return false;
424 }
425 #endif /*ARM64*/
426 #else /*CFG_WITH_USER_TA*/
abort_is_user_exception(struct abort_info * ai __unused)427 bool abort_is_user_exception(struct abort_info *ai __unused)
428 {
429 return false;
430 }
431 #endif /*CFG_WITH_USER_TA*/
432
433 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
434 #ifdef ARM32
is_vfp_fault(struct abort_info * ai)435 static bool is_vfp_fault(struct abort_info *ai)
436 {
437 if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
438 return false;
439
440 /*
441 * Not entirely accurate, but if it's a truly undefined instruction
442 * we'll end up in this function again, except this time
443 * vfp_is_enabled() so we'll return false.
444 */
445 return true;
446 }
447 #endif /*ARM32*/
448
449 #ifdef ARM64
is_vfp_fault(struct abort_info * ai)450 static bool is_vfp_fault(struct abort_info *ai)
451 {
452 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
453 case ESR_EC_FP_ASIMD:
454 case ESR_EC_AARCH32_FP:
455 case ESR_EC_AARCH64_FP:
456 return true;
457 default:
458 return false;
459 }
460 }
461 #endif /*ARM64*/
462 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
is_vfp_fault(struct abort_info * ai __unused)463 static bool is_vfp_fault(struct abort_info *ai __unused)
464 {
465 return false;
466 }
467 #endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
468
abort_is_write_fault(struct abort_info * ai)469 bool abort_is_write_fault(struct abort_info *ai)
470 {
471 #ifdef ARM32
472 unsigned int write_not_read = 11;
473 #endif
474 #ifdef ARM64
475 unsigned int write_not_read = 6;
476 #endif
477
478 return ai->abort_type == ABORT_TYPE_DATA &&
479 (ai->fault_descr & BIT(write_not_read));
480 }
481
get_fault_type(struct abort_info * ai)482 static enum fault_type get_fault_type(struct abort_info *ai)
483 {
484 if (abort_is_user_exception(ai)) {
485 if (is_vfp_fault(ai))
486 return FAULT_TYPE_USER_MODE_VFP;
487 #ifndef CFG_WITH_PAGER
488 return FAULT_TYPE_USER_MODE_PANIC;
489 #endif
490 }
491
492 if (thread_is_from_abort_mode()) {
493 abort_print_error(ai);
494 panic("[abort] abort in abort handler (trap CPU)");
495 }
496
497 if (ai->abort_type == ABORT_TYPE_UNDEF) {
498 if (abort_is_user_exception(ai))
499 return FAULT_TYPE_USER_MODE_PANIC;
500 abort_print_error(ai);
501 panic("[abort] undefined abort (trap CPU)");
502 }
503
504 switch (core_mmu_get_fault_type(ai->fault_descr)) {
505 case CORE_MMU_FAULT_ALIGNMENT:
506 if (abort_is_user_exception(ai))
507 return FAULT_TYPE_USER_MODE_PANIC;
508 abort_print_error(ai);
509 panic("[abort] alignement fault! (trap CPU)");
510 break;
511
512 case CORE_MMU_FAULT_ACCESS_BIT:
513 if (abort_is_user_exception(ai))
514 return FAULT_TYPE_USER_MODE_PANIC;
515 abort_print_error(ai);
516 panic("[abort] access bit fault! (trap CPU)");
517 break;
518
519 case CORE_MMU_FAULT_DEBUG_EVENT:
520 if (!abort_is_user_exception(ai))
521 abort_print(ai);
522 DMSG("[abort] Ignoring debug event!");
523 return FAULT_TYPE_IGNORE;
524
525 case CORE_MMU_FAULT_TRANSLATION:
526 case CORE_MMU_FAULT_WRITE_PERMISSION:
527 case CORE_MMU_FAULT_READ_PERMISSION:
528 return FAULT_TYPE_PAGEABLE;
529
530 case CORE_MMU_FAULT_ASYNC_EXTERNAL:
531 case CORE_MMU_FAULT_SYNC_EXTERNAL:
532 if (!abort_is_user_exception(ai))
533 abort_print(ai);
534 DMSG("[abort]%s", fault_to_str(ai->abort_type,
535 ai->fault_descr));
536 return FAULT_TYPE_EXTERNAL_ABORT;
537
538 case CORE_MMU_FAULT_TAG_CHECK:
539 if (abort_is_user_exception(ai))
540 return FAULT_TYPE_USER_MODE_PANIC;
541 abort_print_error(ai);
542 panic("[abort] Tag check fault! (trap CPU)");
543 break;
544
545 case CORE_MMU_FAULT_OTHER:
546 default:
547 if (!abort_is_user_exception(ai))
548 abort_print(ai);
549 DMSG("[abort] Unhandled fault!");
550 return FAULT_TYPE_IGNORE;
551 }
552 }
553
abort_handler(uint32_t abort_type,struct thread_abort_regs * regs)554 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
555 {
556 struct abort_info ai;
557 bool handled;
558
559 set_abort_info(abort_type, regs, &ai);
560
561 switch (get_fault_type(&ai)) {
562 case FAULT_TYPE_IGNORE:
563 break;
564 case FAULT_TYPE_USER_MODE_PANIC:
565 DMSG("[abort] abort in User mode (TA will panic)");
566 save_abort_info_in_tsd(&ai);
567 vfp_disable();
568 handle_user_mode_panic(&ai);
569 break;
570 case FAULT_TYPE_EXTERNAL_ABORT:
571 #ifdef CFG_EXTERNAL_ABORT_PLAT_HANDLER
572 /* Allow platform-specific handling */
573 plat_external_abort_handler(&ai);
574 #endif
575 break;
576 #ifdef CFG_WITH_VFP
577 case FAULT_TYPE_USER_MODE_VFP:
578 handle_user_mode_vfp();
579 break;
580 #endif
581 case FAULT_TYPE_PAGEABLE:
582 default:
583 if (thread_get_id_may_fail() < 0) {
584 abort_print_error(&ai);
585 panic("abort outside thread context");
586 }
587 thread_kernel_save_vfp();
588 handled = tee_pager_handle_fault(&ai);
589 thread_kernel_restore_vfp();
590 if (!handled) {
591 if (!abort_is_user_exception(&ai)) {
592 abort_print_error(&ai);
593 panic("unhandled pageable abort");
594 }
595 DMSG("[abort] abort in User mode (TA will panic)");
596 save_abort_info_in_tsd(&ai);
597 vfp_disable();
598 handle_user_mode_panic(&ai);
599 }
600 break;
601 }
602 }
603