xref: /optee_os/core/arch/arm/kernel/abort.c (revision c3deb3d6f3b13d0e17fc9efe5880aec039e47594)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2022, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <kernel/abort.h>
8 #include <kernel/linker.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/tee_ta_manager.h>
12 #include <kernel/thread_private.h>
13 #include <kernel/user_mode_ctx.h>
14 #include <memtag.h>
15 #include <mm/core_mmu.h>
16 #include <mm/tee_pager.h>
17 #include <trace.h>
18 #include <unw/unwind.h>
19 
20 enum fault_type {
21 	FAULT_TYPE_USER_MODE_PANIC,
22 	FAULT_TYPE_USER_MODE_VFP,
23 	FAULT_TYPE_PAGEABLE,
24 	FAULT_TYPE_IGNORE,
25 	FAULT_TYPE_EXTERNAL_ABORT,
26 };
27 
28 #ifdef CFG_UNWIND
29 
30 #ifdef ARM32
31 /*
32  * Kernel or user mode unwind (32-bit execution state).
33  */
34 static void __print_stack_unwind(struct abort_info *ai)
35 {
36 	struct unwind_state_arm32 state = { };
37 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
38 	uint32_t sp = 0;
39 	uint32_t lr = 0;
40 
41 	assert(!abort_is_user_exception(ai));
42 
43 	if (mode == CPSR_MODE_SYS) {
44 		sp = ai->regs->usr_sp;
45 		lr = ai->regs->usr_lr;
46 	} else {
47 		sp = read_mode_sp(mode);
48 		lr = read_mode_lr(mode);
49 	}
50 
51 	memset(&state, 0, sizeof(state));
52 	state.registers[0] = ai->regs->r0;
53 	state.registers[1] = ai->regs->r1;
54 	state.registers[2] = ai->regs->r2;
55 	state.registers[3] = ai->regs->r3;
56 	state.registers[4] = ai->regs->r4;
57 	state.registers[5] = ai->regs->r5;
58 	state.registers[6] = ai->regs->r6;
59 	state.registers[7] = ai->regs->r7;
60 	state.registers[8] = ai->regs->r8;
61 	state.registers[9] = ai->regs->r9;
62 	state.registers[10] = ai->regs->r10;
63 	state.registers[11] = ai->regs->r11;
64 	state.registers[13] = sp;
65 	state.registers[14] = lr;
66 	state.registers[15] = ai->pc;
67 
68 	print_stack_arm32(&state, thread_stack_start(), thread_stack_size());
69 }
70 #endif /* ARM32 */
71 
72 #ifdef ARM64
73 /* Kernel mode unwind (64-bit execution state) */
74 static void __print_stack_unwind(struct abort_info *ai)
75 {
76 	struct unwind_state_arm64 state = {
77 		.pc = ai->regs->elr,
78 		.fp = ai->regs->x29,
79 	};
80 
81 	print_stack_arm64(&state, thread_stack_start(), thread_stack_size());
82 }
83 #endif /*ARM64*/
84 
85 #else /* CFG_UNWIND */
86 static void __print_stack_unwind(struct abort_info *ai __unused)
87 {
88 }
89 #endif /* CFG_UNWIND */
90 
91 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
92 {
93 	if (abort_type == ABORT_TYPE_DATA)
94 		return "data";
95 	if (abort_type == ABORT_TYPE_PREFETCH)
96 		return "prefetch";
97 	return "undef";
98 }
99 
100 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
101 			uint32_t fault_descr)
102 {
103 	/* fault_descr is only valid for data or prefetch abort */
104 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
105 		return "";
106 
107 	switch (core_mmu_get_fault_type(fault_descr)) {
108 	case CORE_MMU_FAULT_ALIGNMENT:
109 		return " (alignment fault)";
110 	case CORE_MMU_FAULT_TRANSLATION:
111 		return " (translation fault)";
112 	case CORE_MMU_FAULT_READ_PERMISSION:
113 		return " (read permission fault)";
114 	case CORE_MMU_FAULT_WRITE_PERMISSION:
115 		return " (write permission fault)";
116 	case CORE_MMU_FAULT_TAG_CHECK:
117 		return " (tag check fault)";
118 	case CORE_MMU_FAULT_SYNC_EXTERNAL:
119 		return " (Synchronous external abort)";
120 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
121 		return " (Asynchronous external abort)";
122 	default:
123 		return "";
124 	}
125 }
126 
127 static __maybe_unused void
128 __print_abort_info(struct abort_info *ai __maybe_unused,
129 		   const char *ctx __maybe_unused)
130 {
131 	__maybe_unused size_t core_pos = 0;
132 #ifdef ARM32
133 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
134 	__maybe_unused uint32_t sp = 0;
135 	__maybe_unused uint32_t lr = 0;
136 
137 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
138 		sp = ai->regs->usr_sp;
139 		lr = ai->regs->usr_lr;
140 		core_pos = thread_get_tsd()->abort_core;
141 	} else {
142 		sp = read_mode_sp(mode);
143 		lr = read_mode_lr(mode);
144 		core_pos = get_core_pos();
145 	}
146 #endif /*ARM32*/
147 #ifdef ARM64
148 	if (abort_is_user_exception(ai))
149 		core_pos = thread_get_tsd()->abort_core;
150 	else
151 		core_pos = get_core_pos();
152 #endif /*ARM64*/
153 
154 	EMSG_RAW("");
155 	if (IS_ENABLED(CFG_MEMTAG))
156 		EMSG_RAW("%s %s-abort at address 0x%" PRIxVA
157 			 " [tagged 0x%" PRIxVA "]%s", ctx,
158 			 abort_type_to_str(ai->abort_type),
159 			 memtag_strip_tag_vaddr((void *)ai->va), ai->va,
160 			 fault_to_str(ai->abort_type, ai->fault_descr));
161 	else
162 		EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s", ctx,
163 			 abort_type_to_str(ai->abort_type), ai->va,
164 			 fault_to_str(ai->abort_type, ai->fault_descr));
165 #ifdef ARM32
166 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
167 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
168 		 read_contextidr());
169 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
170 		 core_pos, ai->regs->spsr);
171 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
172 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
173 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
174 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
175 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
176 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
177 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
178 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
179 #endif /*ARM32*/
180 #ifdef ARM64
181 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
182 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
183 		 read_ttbr1_el1(), read_contextidr_el1());
184 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
185 		 core_pos, (uint32_t)ai->regs->spsr);
186 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
187 		 ai->regs->x0, ai->regs->x1);
188 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
189 		 ai->regs->x2, ai->regs->x3);
190 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
191 		 ai->regs->x4, ai->regs->x5);
192 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
193 		 ai->regs->x6, ai->regs->x7);
194 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
195 		 ai->regs->x8, ai->regs->x9);
196 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
197 		 ai->regs->x10, ai->regs->x11);
198 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
199 		 ai->regs->x12, ai->regs->x13);
200 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
201 		 ai->regs->x14, ai->regs->x15);
202 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
203 		 ai->regs->x16, ai->regs->x17);
204 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
205 		 ai->regs->x18, ai->regs->x19);
206 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
207 		 ai->regs->x20, ai->regs->x21);
208 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
209 		 ai->regs->x22, ai->regs->x23);
210 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
211 		 ai->regs->x24, ai->regs->x25);
212 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
213 		 ai->regs->x26, ai->regs->x27);
214 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
215 		 ai->regs->x28, ai->regs->x29);
216 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
217 		 ai->regs->x30, ai->regs->elr);
218 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
219 #endif /*ARM64*/
220 }
221 
222 /*
223  * Print abort info and (optionally) stack dump to the console
224  * @ai kernel-mode abort info.
225  * @stack_dump true to show a stack trace
226  */
227 static void __abort_print(struct abort_info *ai, bool stack_dump)
228 {
229 	assert(!abort_is_user_exception(ai));
230 
231 	__print_abort_info(ai, "Core");
232 
233 	if (stack_dump) {
234 		trace_printf_helper_raw(TRACE_ERROR, true,
235 					"TEE load address @ %#"PRIxVA,
236 					VCORE_START_VA);
237 		__print_stack_unwind(ai);
238 	}
239 }
240 
241 void abort_print(struct abort_info *ai)
242 {
243 	__abort_print(ai, false);
244 }
245 
246 void abort_print_error(struct abort_info *ai)
247 {
248 	__abort_print(ai, true);
249 }
250 
251 /* This function must be called from a normal thread */
252 void abort_print_current_ts(void)
253 {
254 	struct thread_specific_data *tsd = thread_get_tsd();
255 	struct abort_info ai = { };
256 	struct ts_session *s = ts_get_current_session();
257 
258 	ai.abort_type = tsd->abort_type;
259 	ai.fault_descr = tsd->abort_descr;
260 	ai.va = tsd->abort_va;
261 	ai.pc = tsd->abort_regs.elr;
262 	ai.regs = &tsd->abort_regs;
263 
264 	if (ai.abort_type != ABORT_TYPE_USER_MODE_PANIC)
265 		__print_abort_info(&ai, "User mode");
266 
267 	s->ctx->ops->dump_state(s->ctx);
268 
269 #if defined(CFG_FTRACE_SUPPORT)
270 	if (s->ctx->ops->dump_ftrace) {
271 		s->fbuf = NULL;
272 		s->ctx->ops->dump_ftrace(s->ctx);
273 	}
274 #endif
275 }
276 
277 static void save_abort_info_in_tsd(struct abort_info *ai)
278 {
279 	struct thread_specific_data *tsd = thread_get_tsd();
280 
281 	tsd->abort_type = ai->abort_type;
282 	tsd->abort_descr = ai->fault_descr;
283 	tsd->abort_va = ai->va;
284 	tsd->abort_regs = *ai->regs;
285 	tsd->abort_core = get_core_pos();
286 }
287 
288 #ifdef ARM32
289 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
290 		struct abort_info *ai)
291 {
292 	switch (abort_type) {
293 	case ABORT_TYPE_DATA:
294 		ai->fault_descr = read_dfsr();
295 		ai->va = read_dfar();
296 		break;
297 	case ABORT_TYPE_PREFETCH:
298 		ai->fault_descr = read_ifsr();
299 		ai->va = read_ifar();
300 		break;
301 	default:
302 		ai->fault_descr = 0;
303 		ai->va = regs->elr;
304 		break;
305 	}
306 	ai->abort_type = abort_type;
307 	ai->pc = regs->elr;
308 	ai->regs = regs;
309 }
310 #endif /*ARM32*/
311 
312 #ifdef ARM64
313 static void set_abort_info(uint32_t abort_type __unused,
314 		struct thread_abort_regs *regs, struct abort_info *ai)
315 {
316 	ai->fault_descr = read_esr_el1();
317 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
318 	case ESR_EC_IABT_EL0:
319 	case ESR_EC_IABT_EL1:
320 		ai->abort_type = ABORT_TYPE_PREFETCH;
321 		ai->va = read_far_el1();
322 		break;
323 	case ESR_EC_DABT_EL0:
324 	case ESR_EC_DABT_EL1:
325 	case ESR_EC_SP_ALIGN:
326 		ai->abort_type = ABORT_TYPE_DATA;
327 		ai->va = read_far_el1();
328 		break;
329 	default:
330 		ai->abort_type = ABORT_TYPE_UNDEF;
331 		ai->va = regs->elr;
332 	}
333 	ai->pc = regs->elr;
334 	ai->regs = regs;
335 }
336 #endif /*ARM64*/
337 
338 #ifdef ARM32
339 static void handle_user_mode_panic(struct abort_info *ai)
340 {
341 	/*
342 	 * It was a user exception, stop user execution and return
343 	 * to TEE Core.
344 	 */
345 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
346 	ai->regs->r1 = true;
347 	ai->regs->r2 = 0xdeadbeef;
348 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
349 	ai->regs->spsr &= CPSR_FIA;
350 	ai->regs->spsr &= ~CPSR_MODE_MASK;
351 	ai->regs->spsr |= CPSR_MODE_SVC;
352 	/* Select Thumb or ARM mode */
353 	if (ai->regs->elr & 1)
354 		ai->regs->spsr |= CPSR_T;
355 	else
356 		ai->regs->spsr &= ~CPSR_T;
357 }
358 #endif /*ARM32*/
359 
360 #ifdef ARM64
361 static void handle_user_mode_panic(struct abort_info *ai)
362 {
363 	struct thread_ctx *tc __maybe_unused = NULL;
364 	uint32_t daif = 0;
365 	uint32_t pan_bit = 0;
366 
367 	/*
368 	 * It was a user exception, stop user execution and return
369 	 * to TEE Core.
370 	 */
371 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
372 	ai->regs->x1 = true;
373 	ai->regs->x2 = 0xdeadbeef;
374 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
375 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
376 
377 #if defined(CFG_CORE_PAUTH)
378 	/*
379 	 * We're going to return to the privileged core thread, update the
380 	 * APIA key to match the key used by the thread.
381 	 */
382 	tc = threads + thread_get_id();
383 	ai->regs->apiakey_hi = tc->keys.apia_hi;
384 	ai->regs->apiakey_lo = tc->keys.apia_lo;
385 #endif
386 
387 	if (IS_ENABLED(CFG_PAN) && feat_pan_implemented() && read_pan())
388 		pan_bit = SPSR_64_PAN;
389 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
390 	/* XXX what about DAIF_D? */
391 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif) |
392 			 pan_bit;
393 }
394 #endif /*ARM64*/
395 
396 #ifdef CFG_WITH_VFP
397 static void handle_user_mode_vfp(void)
398 {
399 	struct ts_session *s = ts_get_current_session();
400 
401 	thread_user_enable_vfp(&to_user_mode_ctx(s->ctx)->vfp);
402 }
403 #endif /*CFG_WITH_VFP*/
404 
405 #ifdef CFG_WITH_USER_TA
406 #ifdef ARM32
407 /* Returns true if the exception originated from user mode */
408 bool abort_is_user_exception(struct abort_info *ai)
409 {
410 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
411 }
412 #endif /*ARM32*/
413 
414 #ifdef ARM64
415 /* Returns true if the exception originated from user mode */
416 bool abort_is_user_exception(struct abort_info *ai)
417 {
418 	uint32_t spsr = ai->regs->spsr;
419 
420 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
421 		return true;
422 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
423 	    SPSR_64_MODE_EL0)
424 		return true;
425 	return false;
426 }
427 #endif /*ARM64*/
428 #else /*CFG_WITH_USER_TA*/
429 bool abort_is_user_exception(struct abort_info *ai __unused)
430 {
431 	return false;
432 }
433 #endif /*CFG_WITH_USER_TA*/
434 
435 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
436 #ifdef ARM32
437 static bool is_vfp_fault(struct abort_info *ai)
438 {
439 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
440 		return false;
441 
442 	/*
443 	 * Not entirely accurate, but if it's a truly undefined instruction
444 	 * we'll end up in this function again, except this time
445 	 * vfp_is_enabled() so we'll return false.
446 	 */
447 	return true;
448 }
449 #endif /*ARM32*/
450 
451 #ifdef ARM64
452 static bool is_vfp_fault(struct abort_info *ai)
453 {
454 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
455 	case ESR_EC_FP_ASIMD:
456 	case ESR_EC_AARCH32_FP:
457 	case ESR_EC_AARCH64_FP:
458 		return true;
459 	default:
460 		return false;
461 	}
462 }
463 #endif /*ARM64*/
464 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
465 static bool is_vfp_fault(struct abort_info *ai __unused)
466 {
467 	return false;
468 }
469 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
470 
471 bool abort_is_write_fault(struct abort_info *ai)
472 {
473 #ifdef ARM32
474 	unsigned int write_not_read = 11;
475 #endif
476 #ifdef ARM64
477 	unsigned int write_not_read = 6;
478 #endif
479 
480 	return ai->abort_type == ABORT_TYPE_DATA &&
481 	       (ai->fault_descr & BIT(write_not_read));
482 }
483 
484 static enum fault_type get_fault_type(struct abort_info *ai)
485 {
486 	if (abort_is_user_exception(ai)) {
487 		if (is_vfp_fault(ai))
488 			return FAULT_TYPE_USER_MODE_VFP;
489 #ifndef CFG_WITH_PAGER
490 		return FAULT_TYPE_USER_MODE_PANIC;
491 #endif
492 	}
493 
494 	if (thread_is_from_abort_mode()) {
495 		abort_print_error(ai);
496 		panic("[abort] abort in abort handler (trap CPU)");
497 	}
498 
499 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
500 		if (abort_is_user_exception(ai))
501 			return FAULT_TYPE_USER_MODE_PANIC;
502 		abort_print_error(ai);
503 		panic("[abort] undefined abort (trap CPU)");
504 	}
505 
506 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
507 	case CORE_MMU_FAULT_ALIGNMENT:
508 		if (abort_is_user_exception(ai))
509 			return FAULT_TYPE_USER_MODE_PANIC;
510 		abort_print_error(ai);
511 		panic("[abort] alignement fault!  (trap CPU)");
512 		break;
513 
514 	case CORE_MMU_FAULT_ACCESS_BIT:
515 		if (abort_is_user_exception(ai))
516 			return FAULT_TYPE_USER_MODE_PANIC;
517 		abort_print_error(ai);
518 		panic("[abort] access bit fault!  (trap CPU)");
519 		break;
520 
521 	case CORE_MMU_FAULT_DEBUG_EVENT:
522 		if (!abort_is_user_exception(ai))
523 			abort_print(ai);
524 		DMSG("[abort] Ignoring debug event!");
525 		return FAULT_TYPE_IGNORE;
526 
527 	case CORE_MMU_FAULT_TRANSLATION:
528 	case CORE_MMU_FAULT_WRITE_PERMISSION:
529 	case CORE_MMU_FAULT_READ_PERMISSION:
530 		return FAULT_TYPE_PAGEABLE;
531 
532 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
533 	case CORE_MMU_FAULT_SYNC_EXTERNAL:
534 		if (!abort_is_user_exception(ai))
535 			abort_print(ai);
536 		DMSG("[abort]%s", fault_to_str(ai->abort_type,
537 					       ai->fault_descr));
538 		return FAULT_TYPE_EXTERNAL_ABORT;
539 
540 	case CORE_MMU_FAULT_TAG_CHECK:
541 		if (abort_is_user_exception(ai))
542 			return FAULT_TYPE_USER_MODE_PANIC;
543 		abort_print_error(ai);
544 		panic("[abort] Tag check fault! (trap CPU)");
545 		break;
546 
547 	case CORE_MMU_FAULT_OTHER:
548 	default:
549 		if (!abort_is_user_exception(ai))
550 			abort_print(ai);
551 		DMSG("[abort] Unhandled fault!");
552 		return FAULT_TYPE_IGNORE;
553 	}
554 }
555 
556 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
557 {
558 	struct abort_info ai;
559 	bool handled;
560 
561 	set_abort_info(abort_type, regs, &ai);
562 
563 	switch (get_fault_type(&ai)) {
564 	case FAULT_TYPE_IGNORE:
565 		break;
566 	case FAULT_TYPE_USER_MODE_PANIC:
567 		DMSG("[abort] abort in User mode (TA will panic)");
568 		save_abort_info_in_tsd(&ai);
569 		vfp_disable();
570 		handle_user_mode_panic(&ai);
571 		break;
572 	case FAULT_TYPE_EXTERNAL_ABORT:
573 #ifdef CFG_EXTERNAL_ABORT_PLAT_HANDLER
574 		/* Allow platform-specific handling */
575 		plat_external_abort_handler(&ai);
576 #endif
577 		break;
578 #ifdef CFG_WITH_VFP
579 	case FAULT_TYPE_USER_MODE_VFP:
580 		handle_user_mode_vfp();
581 		break;
582 #endif
583 	case FAULT_TYPE_PAGEABLE:
584 	default:
585 		if (thread_get_id_may_fail() < 0) {
586 			abort_print_error(&ai);
587 			panic("abort outside thread context");
588 		}
589 		thread_kernel_save_vfp();
590 		handled = tee_pager_handle_fault(&ai);
591 		thread_kernel_restore_vfp();
592 		if (!handled) {
593 			if (!abort_is_user_exception(&ai)) {
594 				abort_print_error(&ai);
595 				panic("unhandled pageable abort");
596 			}
597 			DMSG("[abort] abort in User mode (TA will panic)");
598 			save_abort_info_in_tsd(&ai);
599 			vfp_disable();
600 			handle_user_mode_panic(&ai);
601 		}
602 		break;
603 	}
604 }
605