xref: /optee_os/core/arch/arm/kernel/abort.c (revision d1911a85142da16fef5ebdcdac0348d29ce37cd8)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <kernel/abort.h>
8 #include <kernel/ftrace.h>
9 #include <kernel/linker.h>
10 #include <kernel/misc.h>
11 #include <kernel/panic.h>
12 #include <kernel/tee_ta_manager.h>
13 #include <kernel/unwind.h>
14 #include <kernel/user_ta.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <tee/tee_svc.h>
19 #include <trace.h>
20 
21 #include "thread_private.h"
22 
23 enum fault_type {
24 	FAULT_TYPE_USER_TA_PANIC,
25 	FAULT_TYPE_USER_TA_VFP,
26 	FAULT_TYPE_PAGEABLE,
27 	FAULT_TYPE_IGNORE,
28 };
29 
30 #ifdef CFG_UNWIND
31 
32 static void get_current_ta_exidx_stack(vaddr_t *exidx, size_t *exidx_sz,
33 				       vaddr_t *stack, size_t *stack_size)
34 {
35 	*exidx = 0;
36 	*exidx_sz = 0;
37 	*stack = 0;
38 	*stack_size = 0;
39 }
40 
41 #ifdef ARM32
42 
43 /*
44  * Kernel or user mode unwind (32-bit execution state).
45  */
46 static void __print_stack_unwind_arm32(struct abort_info *ai)
47 {
48 	struct unwind_state_arm32 state;
49 	vaddr_t exidx;
50 	size_t exidx_sz;
51 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
52 	uint32_t sp;
53 	uint32_t lr;
54 	vaddr_t stack;
55 	size_t stack_size;
56 	bool kernel_stack;
57 
58 	if (abort_is_user_exception(ai)) {
59 		get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack,
60 					   &stack_size);
61 		if (!exidx) {
62 			EMSG_RAW("Call stack not available");
63 			return;
64 		}
65 		kernel_stack = false;
66 	} else {
67 		exidx = (vaddr_t)__exidx_start;
68 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
69 		/* Kernel stack */
70 		stack = thread_stack_start();
71 		stack_size = thread_stack_size();
72 		kernel_stack = true;
73 	}
74 
75 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
76 		sp = ai->regs->usr_sp;
77 		lr = ai->regs->usr_lr;
78 	} else {
79 		sp = read_mode_sp(mode);
80 		lr = read_mode_lr(mode);
81 	}
82 
83 	memset(&state, 0, sizeof(state));
84 	state.registers[0] = ai->regs->r0;
85 	state.registers[1] = ai->regs->r1;
86 	state.registers[2] = ai->regs->r2;
87 	state.registers[3] = ai->regs->r3;
88 	state.registers[4] = ai->regs->r4;
89 	state.registers[5] = ai->regs->r5;
90 	state.registers[6] = ai->regs->r6;
91 	state.registers[7] = ai->regs->r7;
92 	state.registers[8] = ai->regs->r8;
93 	state.registers[9] = ai->regs->r9;
94 	state.registers[10] = ai->regs->r10;
95 	state.registers[11] = ai->regs->r11;
96 	state.registers[13] = sp;
97 	state.registers[14] = lr;
98 	state.registers[15] = ai->pc;
99 
100 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz, kernel_stack,
101 			  stack, stack_size);
102 }
103 #else /* ARM32 */
104 
105 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
106 {
107 	struct unwind_state_arm32 state;
108 	vaddr_t exidx;
109 	size_t exidx_sz;
110 	vaddr_t stack;
111 	size_t stack_size;
112 
113 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
114 	assert(abort_is_user_exception(ai));
115 
116 	get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack, &stack_size);
117 
118 	memset(&state, 0, sizeof(state));
119 	state.registers[0] = ai->regs->x0;
120 	state.registers[1] = ai->regs->x1;
121 	state.registers[2] = ai->regs->x2;
122 	state.registers[3] = ai->regs->x3;
123 	state.registers[4] = ai->regs->x4;
124 	state.registers[5] = ai->regs->x5;
125 	state.registers[6] = ai->regs->x6;
126 	state.registers[7] = ai->regs->x7;
127 	state.registers[8] = ai->regs->x8;
128 	state.registers[9] = ai->regs->x9;
129 	state.registers[10] = ai->regs->x10;
130 	state.registers[11] = ai->regs->x11;
131 
132 	state.registers[13] = ai->regs->x13;
133 	state.registers[14] = ai->regs->x14;
134 	state.registers[15] = ai->pc;
135 
136 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz,
137 			  false /*!kernel_stack*/, stack, stack_size);
138 }
139 #endif /* ARM32 */
140 #ifdef ARM64
141 /* Kernel or user mode unwind (64-bit execution state) */
142 static void __print_stack_unwind_arm64(struct abort_info *ai)
143 {
144 	struct unwind_state_arm64 state = { };
145 	bool kernel_stack = false;
146 	uaddr_t stack = 0;
147 	size_t stack_size = 0;
148 
149 	if (abort_is_user_exception(ai)) {
150 		/* User stack */
151 		stack = 0;
152 		stack_size = 0;
153 	} else {
154 		/* Kernel stack */
155 		stack = thread_stack_start();
156 		stack_size = thread_stack_size();
157 		kernel_stack = true;
158 	}
159 
160 	state.pc = ai->regs->elr;
161 	state.fp = ai->regs->x29;
162 
163 	print_stack_arm64(TRACE_ERROR, &state, kernel_stack, stack, stack_size);
164 }
165 #else
166 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
167 {
168 
169 }
170 #endif /*ARM64*/
171 #else /* CFG_UNWIND */
172 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
173 {
174 }
175 
176 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
177 {
178 }
179 #endif /* CFG_UNWIND */
180 
181 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
182 {
183 	if (abort_type == ABORT_TYPE_DATA)
184 		return "data";
185 	if (abort_type == ABORT_TYPE_PREFETCH)
186 		return "prefetch";
187 	return "undef";
188 }
189 
190 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
191 			uint32_t fault_descr)
192 {
193 	/* fault_descr is only valid for data or prefetch abort */
194 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
195 		return "";
196 
197 	switch (core_mmu_get_fault_type(fault_descr)) {
198 	case CORE_MMU_FAULT_ALIGNMENT:
199 		return " (alignment fault)";
200 	case CORE_MMU_FAULT_TRANSLATION:
201 		return " (translation fault)";
202 	case CORE_MMU_FAULT_READ_PERMISSION:
203 		return " (read permission fault)";
204 	case CORE_MMU_FAULT_WRITE_PERMISSION:
205 		return " (write permission fault)";
206 	default:
207 		return "";
208 	}
209 }
210 
211 static __maybe_unused void
212 __print_abort_info(struct abort_info *ai __maybe_unused,
213 		   const char *ctx __maybe_unused)
214 {
215 	__maybe_unused size_t core_pos = 0;
216 #ifdef ARM32
217 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
218 	__maybe_unused uint32_t sp = 0;
219 	__maybe_unused uint32_t lr = 0;
220 
221 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
222 		sp = ai->regs->usr_sp;
223 		lr = ai->regs->usr_lr;
224 		core_pos = thread_get_tsd()->abort_core;
225 	} else {
226 		sp = read_mode_sp(mode);
227 		lr = read_mode_lr(mode);
228 		core_pos = get_core_pos();
229 	}
230 #endif /*ARM32*/
231 #ifdef ARM64
232 	if (abort_is_user_exception(ai))
233 		core_pos = thread_get_tsd()->abort_core;
234 	else
235 		core_pos = get_core_pos();
236 #endif /*ARM64*/
237 
238 	EMSG_RAW("");
239 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
240 		ctx, abort_type_to_str(ai->abort_type), ai->va,
241 		fault_to_str(ai->abort_type, ai->fault_descr));
242 #ifdef ARM32
243 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
244 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
245 		 read_contextidr());
246 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
247 		 core_pos, ai->regs->spsr);
248 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
249 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
250 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
251 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
252 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
253 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
254 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
255 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
256 #endif /*ARM32*/
257 #ifdef ARM64
258 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
259 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
260 		 read_ttbr1_el1(), read_contextidr_el1());
261 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
262 		 core_pos, (uint32_t)ai->regs->spsr);
263 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
264 		 ai->regs->x0, ai->regs->x1);
265 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
266 		 ai->regs->x2, ai->regs->x3);
267 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
268 		 ai->regs->x4, ai->regs->x5);
269 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
270 		 ai->regs->x6, ai->regs->x7);
271 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
272 		 ai->regs->x8, ai->regs->x9);
273 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
274 		 ai->regs->x10, ai->regs->x11);
275 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
276 		 ai->regs->x12, ai->regs->x13);
277 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
278 		 ai->regs->x14, ai->regs->x15);
279 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
280 		 ai->regs->x16, ai->regs->x17);
281 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
282 		 ai->regs->x18, ai->regs->x19);
283 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
284 		 ai->regs->x20, ai->regs->x21);
285 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
286 		 ai->regs->x22, ai->regs->x23);
287 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
288 		 ai->regs->x24, ai->regs->x25);
289 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
290 		 ai->regs->x26, ai->regs->x27);
291 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
292 		 ai->regs->x28, ai->regs->x29);
293 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
294 		 ai->regs->x30, ai->regs->elr);
295 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
296 #endif /*ARM64*/
297 }
298 
299 /*
300  * Print abort info and (optionally) stack dump to the console
301  * @ai kernel-mode abort info.
302  * @stack_dump true to show a stack trace
303  */
304 static void __abort_print(struct abort_info *ai, bool stack_dump)
305 {
306 	assert(!abort_is_user_exception(ai));
307 
308 	__print_abort_info(ai, "Core");
309 
310 	if (stack_dump) {
311 #if defined(ARM32)
312 		__print_stack_unwind_arm32(ai);
313 #else
314 		__print_stack_unwind_arm64(ai);
315 #endif
316 	}
317 }
318 
319 void abort_print(struct abort_info *ai)
320 {
321 	__abort_print(ai, false);
322 }
323 
324 void abort_print_error(struct abort_info *ai)
325 {
326 	__abort_print(ai, true);
327 }
328 
329 /* This function must be called from a normal thread */
330 void abort_print_current_ta(void)
331 {
332 	struct thread_specific_data *tsd = thread_get_tsd();
333 	struct abort_info ai = { };
334 	struct tee_ta_session *s = NULL;
335 
336 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
337 		panic();
338 
339 	ai.abort_type = tsd->abort_type;
340 	ai.fault_descr = tsd->abort_descr;
341 	ai.va = tsd->abort_va;
342 	ai.pc = tsd->abort_regs.elr;
343 	ai.regs = &tsd->abort_regs;
344 
345 	if (ai.abort_type != ABORT_TYPE_TA_PANIC)
346 		__print_abort_info(&ai, "User TA");
347 
348 	EMSG_RAW("Status of TA %pUl (%p)",
349 		 (void *)&s->ctx->uuid, (void *)s->ctx);
350 	s->ctx->ops->dump_state(s->ctx);
351 
352 	ta_fbuf_dump(s);
353 
354 	if (to_user_ta_ctx(s->ctx)->is_32bit)
355 		__print_stack_unwind_arm32(&ai);
356 	else
357 		__print_stack_unwind_arm64(&ai);
358 }
359 
360 static void save_abort_info_in_tsd(struct abort_info *ai)
361 {
362 	struct thread_specific_data *tsd = thread_get_tsd();
363 
364 	tsd->abort_type = ai->abort_type;
365 	tsd->abort_descr = ai->fault_descr;
366 	tsd->abort_va = ai->va;
367 	tsd->abort_regs = *ai->regs;
368 	tsd->abort_core = get_core_pos();
369 }
370 
371 #ifdef ARM32
372 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
373 		struct abort_info *ai)
374 {
375 	switch (abort_type) {
376 	case ABORT_TYPE_DATA:
377 		ai->fault_descr = read_dfsr();
378 		ai->va = read_dfar();
379 		break;
380 	case ABORT_TYPE_PREFETCH:
381 		ai->fault_descr = read_ifsr();
382 		ai->va = read_ifar();
383 		break;
384 	default:
385 		ai->fault_descr = 0;
386 		ai->va = regs->elr;
387 		break;
388 	}
389 	ai->abort_type = abort_type;
390 	ai->pc = regs->elr;
391 	ai->regs = regs;
392 }
393 #endif /*ARM32*/
394 
395 #ifdef ARM64
396 static void set_abort_info(uint32_t abort_type __unused,
397 		struct thread_abort_regs *regs, struct abort_info *ai)
398 {
399 	ai->fault_descr = read_esr_el1();
400 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
401 	case ESR_EC_IABT_EL0:
402 	case ESR_EC_IABT_EL1:
403 		ai->abort_type = ABORT_TYPE_PREFETCH;
404 		ai->va = read_far_el1();
405 		break;
406 	case ESR_EC_DABT_EL0:
407 	case ESR_EC_DABT_EL1:
408 	case ESR_EC_SP_ALIGN:
409 		ai->abort_type = ABORT_TYPE_DATA;
410 		ai->va = read_far_el1();
411 		break;
412 	default:
413 		ai->abort_type = ABORT_TYPE_UNDEF;
414 		ai->va = regs->elr;
415 	}
416 	ai->pc = regs->elr;
417 	ai->regs = regs;
418 }
419 #endif /*ARM64*/
420 
421 #ifdef ARM32
422 static void handle_user_ta_panic(struct abort_info *ai)
423 {
424 	/*
425 	 * It was a user exception, stop user execution and return
426 	 * to TEE Core.
427 	 */
428 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
429 	ai->regs->r1 = true;
430 	ai->regs->r2 = 0xdeadbeef;
431 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
432 	ai->regs->spsr &= CPSR_FIA;
433 	ai->regs->spsr &= ~CPSR_MODE_MASK;
434 	ai->regs->spsr |= CPSR_MODE_SVC;
435 	/* Select Thumb or ARM mode */
436 	if (ai->regs->elr & 1)
437 		ai->regs->spsr |= CPSR_T;
438 	else
439 		ai->regs->spsr &= ~CPSR_T;
440 }
441 #endif /*ARM32*/
442 
443 #ifdef ARM64
444 static void handle_user_ta_panic(struct abort_info *ai)
445 {
446 	uint32_t daif;
447 
448 	/*
449 	 * It was a user exception, stop user execution and return
450 	 * to TEE Core.
451 	 */
452 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
453 	ai->regs->x1 = true;
454 	ai->regs->x2 = 0xdeadbeef;
455 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
456 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
457 
458 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
459 	/* XXX what about DAIF_D? */
460 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
461 }
462 #endif /*ARM64*/
463 
464 #ifdef CFG_WITH_VFP
465 static void handle_user_ta_vfp(void)
466 {
467 	struct tee_ta_session *s;
468 
469 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
470 		panic();
471 
472 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
473 }
474 #endif /*CFG_WITH_VFP*/
475 
476 #ifdef CFG_WITH_USER_TA
477 #ifdef ARM32
478 /* Returns true if the exception originated from user mode */
479 bool abort_is_user_exception(struct abort_info *ai)
480 {
481 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
482 }
483 #endif /*ARM32*/
484 
485 #ifdef ARM64
486 /* Returns true if the exception originated from user mode */
487 bool abort_is_user_exception(struct abort_info *ai)
488 {
489 	uint32_t spsr = ai->regs->spsr;
490 
491 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
492 		return true;
493 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
494 	    SPSR_64_MODE_EL0)
495 		return true;
496 	return false;
497 }
498 #endif /*ARM64*/
499 #else /*CFG_WITH_USER_TA*/
500 bool abort_is_user_exception(struct abort_info *ai __unused)
501 {
502 	return false;
503 }
504 #endif /*CFG_WITH_USER_TA*/
505 
506 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
507 #ifdef ARM32
508 static bool is_vfp_fault(struct abort_info *ai)
509 {
510 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
511 		return false;
512 
513 	/*
514 	 * Not entirely accurate, but if it's a truly undefined instruction
515 	 * we'll end up in this function again, except this time
516 	 * vfp_is_enabled() so we'll return false.
517 	 */
518 	return true;
519 }
520 #endif /*ARM32*/
521 
522 #ifdef ARM64
523 static bool is_vfp_fault(struct abort_info *ai)
524 {
525 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
526 	case ESR_EC_FP_ASIMD:
527 	case ESR_EC_AARCH32_FP:
528 	case ESR_EC_AARCH64_FP:
529 		return true;
530 	default:
531 		return false;
532 	}
533 }
534 #endif /*ARM64*/
535 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
536 static bool is_vfp_fault(struct abort_info *ai __unused)
537 {
538 	return false;
539 }
540 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
541 
542 static enum fault_type get_fault_type(struct abort_info *ai)
543 {
544 	if (abort_is_user_exception(ai)) {
545 		if (is_vfp_fault(ai))
546 			return FAULT_TYPE_USER_TA_VFP;
547 #ifndef CFG_WITH_PAGER
548 		return FAULT_TYPE_USER_TA_PANIC;
549 #endif
550 	}
551 
552 	if (thread_is_from_abort_mode()) {
553 		abort_print_error(ai);
554 		panic("[abort] abort in abort handler (trap CPU)");
555 	}
556 
557 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
558 		if (abort_is_user_exception(ai))
559 			return FAULT_TYPE_USER_TA_PANIC;
560 		abort_print_error(ai);
561 		panic("[abort] undefined abort (trap CPU)");
562 	}
563 
564 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
565 	case CORE_MMU_FAULT_ALIGNMENT:
566 		if (abort_is_user_exception(ai))
567 			return FAULT_TYPE_USER_TA_PANIC;
568 		abort_print_error(ai);
569 		panic("[abort] alignement fault!  (trap CPU)");
570 		break;
571 
572 	case CORE_MMU_FAULT_ACCESS_BIT:
573 		if (abort_is_user_exception(ai))
574 			return FAULT_TYPE_USER_TA_PANIC;
575 		abort_print_error(ai);
576 		panic("[abort] access bit fault!  (trap CPU)");
577 		break;
578 
579 	case CORE_MMU_FAULT_DEBUG_EVENT:
580 		if (!abort_is_user_exception(ai))
581 			abort_print(ai);
582 		DMSG("[abort] Ignoring debug event!");
583 		return FAULT_TYPE_IGNORE;
584 
585 	case CORE_MMU_FAULT_TRANSLATION:
586 	case CORE_MMU_FAULT_WRITE_PERMISSION:
587 	case CORE_MMU_FAULT_READ_PERMISSION:
588 		return FAULT_TYPE_PAGEABLE;
589 
590 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
591 		if (!abort_is_user_exception(ai))
592 			abort_print(ai);
593 		DMSG("[abort] Ignoring async external abort!");
594 		return FAULT_TYPE_IGNORE;
595 
596 	case CORE_MMU_FAULT_OTHER:
597 	default:
598 		if (!abort_is_user_exception(ai))
599 			abort_print(ai);
600 		DMSG("[abort] Unhandled fault!");
601 		return FAULT_TYPE_IGNORE;
602 	}
603 }
604 
605 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
606 {
607 	struct abort_info ai;
608 	bool handled;
609 
610 	set_abort_info(abort_type, regs, &ai);
611 
612 	switch (get_fault_type(&ai)) {
613 	case FAULT_TYPE_IGNORE:
614 		break;
615 	case FAULT_TYPE_USER_TA_PANIC:
616 		DMSG("[abort] abort in User mode (TA will panic)");
617 		save_abort_info_in_tsd(&ai);
618 		vfp_disable();
619 		handle_user_ta_panic(&ai);
620 		break;
621 #ifdef CFG_WITH_VFP
622 	case FAULT_TYPE_USER_TA_VFP:
623 		handle_user_ta_vfp();
624 		break;
625 #endif
626 	case FAULT_TYPE_PAGEABLE:
627 	default:
628 		if (thread_get_id_may_fail() < 0) {
629 			abort_print_error(&ai);
630 			panic("abort outside thread context");
631 		}
632 		thread_kernel_save_vfp();
633 		handled = tee_pager_handle_fault(&ai);
634 		thread_kernel_restore_vfp();
635 		if (!handled) {
636 			if (!abort_is_user_exception(&ai)) {
637 				abort_print_error(&ai);
638 				panic("unhandled pageable abort");
639 			}
640 			DMSG("[abort] abort in User mode (TA will panic)");
641 			save_abort_info_in_tsd(&ai);
642 			vfp_disable();
643 			handle_user_ta_panic(&ai);
644 		}
645 		break;
646 	}
647 }
648