xref: /optee_os/core/arch/arm/kernel/abort.c (revision 8e64b181dc1c40f15e8ff691b5c676dfe14631b9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <kernel/abort.h>
8 #include <kernel/ftrace.h>
9 #include <kernel/linker.h>
10 #include <kernel/misc.h>
11 #include <kernel/panic.h>
12 #include <kernel/tee_ta_manager.h>
13 #include <kernel/unwind.h>
14 #include <kernel/user_ta.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <tee/tee_svc.h>
19 #include <trace.h>
20 
21 #include "thread_private.h"
22 
23 enum fault_type {
24 	FAULT_TYPE_USER_TA_PANIC,
25 	FAULT_TYPE_USER_TA_VFP,
26 	FAULT_TYPE_PAGEABLE,
27 	FAULT_TYPE_IGNORE,
28 };
29 
30 #ifdef CFG_UNWIND
31 
32 static void get_current_ta_exidx_stack(vaddr_t *exidx, size_t *exidx_sz,
33 				       vaddr_t *stack, size_t *stack_size)
34 {
35 	struct tee_ta_session *s;
36 	struct user_ta_ctx *utc;
37 
38 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
39 		panic();
40 
41 	utc = to_user_ta_ctx(s->ctx);
42 
43 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
44 	assert(utc->is_32bit);
45 
46 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
47 	if (*exidx)
48 		*exidx += utc->load_addr;
49 	*exidx_sz = utc->exidx_size;
50 
51 	*stack = utc->stack_addr;
52 	*stack_size = utc->mobj_stack->size;
53 }
54 
55 #ifdef ARM32
56 
57 /*
58  * Kernel or user mode unwind (32-bit execution state).
59  */
60 static void __print_stack_unwind_arm32(struct abort_info *ai)
61 {
62 	struct unwind_state_arm32 state;
63 	vaddr_t exidx;
64 	size_t exidx_sz;
65 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
66 	uint32_t sp;
67 	uint32_t lr;
68 	vaddr_t stack;
69 	size_t stack_size;
70 	bool kernel_stack;
71 
72 	if (abort_is_user_exception(ai)) {
73 		get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack,
74 					   &stack_size);
75 		if (!exidx) {
76 			EMSG_RAW("Call stack not available");
77 			return;
78 		}
79 		kernel_stack = false;
80 	} else {
81 		exidx = (vaddr_t)__exidx_start;
82 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
83 		/* Kernel stack */
84 		stack = thread_stack_start();
85 		stack_size = thread_stack_size();
86 		kernel_stack = true;
87 	}
88 
89 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
90 		sp = ai->regs->usr_sp;
91 		lr = ai->regs->usr_lr;
92 	} else {
93 		sp = read_mode_sp(mode);
94 		lr = read_mode_lr(mode);
95 	}
96 
97 	memset(&state, 0, sizeof(state));
98 	state.registers[0] = ai->regs->r0;
99 	state.registers[1] = ai->regs->r1;
100 	state.registers[2] = ai->regs->r2;
101 	state.registers[3] = ai->regs->r3;
102 	state.registers[4] = ai->regs->r4;
103 	state.registers[5] = ai->regs->r5;
104 	state.registers[6] = ai->regs->r6;
105 	state.registers[7] = ai->regs->r7;
106 	state.registers[8] = ai->regs->r8;
107 	state.registers[9] = ai->regs->r9;
108 	state.registers[10] = ai->regs->r10;
109 	state.registers[11] = ai->regs->r11;
110 	state.registers[13] = sp;
111 	state.registers[14] = lr;
112 	state.registers[15] = ai->pc;
113 
114 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz, kernel_stack,
115 			  stack, stack_size);
116 }
117 #else /* ARM32 */
118 
119 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
120 {
121 	struct unwind_state_arm32 state;
122 	vaddr_t exidx;
123 	size_t exidx_sz;
124 	vaddr_t stack;
125 	size_t stack_size;
126 
127 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
128 	assert(abort_is_user_exception(ai));
129 
130 	get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack, &stack_size);
131 
132 	memset(&state, 0, sizeof(state));
133 	state.registers[0] = ai->regs->x0;
134 	state.registers[1] = ai->regs->x1;
135 	state.registers[2] = ai->regs->x2;
136 	state.registers[3] = ai->regs->x3;
137 	state.registers[4] = ai->regs->x4;
138 	state.registers[5] = ai->regs->x5;
139 	state.registers[6] = ai->regs->x6;
140 	state.registers[7] = ai->regs->x7;
141 	state.registers[8] = ai->regs->x8;
142 	state.registers[9] = ai->regs->x9;
143 	state.registers[10] = ai->regs->x10;
144 	state.registers[11] = ai->regs->x11;
145 
146 	state.registers[13] = ai->regs->x13;
147 	state.registers[14] = ai->regs->x14;
148 	state.registers[15] = ai->pc;
149 
150 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz,
151 			  false /*!kernel_stack*/, stack, stack_size);
152 }
153 #endif /* ARM32 */
154 #ifdef ARM64
155 /* Kernel or user mode unwind (64-bit execution state) */
156 static void __print_stack_unwind_arm64(struct abort_info *ai)
157 {
158 	struct unwind_state_arm64 state = { };
159 	bool kernel_stack = false;
160 	uaddr_t stack = 0;
161 	size_t stack_size = 0;
162 
163 	if (abort_is_user_exception(ai)) {
164 		struct tee_ta_session *s = NULL;
165 		struct user_ta_ctx *utc = NULL;
166 
167 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
168 			panic();
169 
170 		utc = to_user_ta_ctx(s->ctx);
171 		/* User stack */
172 		stack = utc->stack_addr;
173 		stack_size = utc->mobj_stack->size;
174 	} else {
175 		/* Kernel stack */
176 		stack = thread_stack_start();
177 		stack_size = thread_stack_size();
178 		kernel_stack = true;
179 	}
180 
181 	state.pc = ai->regs->elr;
182 	state.fp = ai->regs->x29;
183 
184 	print_stack_arm64(TRACE_ERROR, &state, kernel_stack, stack, stack_size);
185 }
186 #else
187 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
188 {
189 
190 }
191 #endif /*ARM64*/
192 #else /* CFG_UNWIND */
193 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
194 {
195 }
196 
197 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
198 {
199 }
200 #endif /* CFG_UNWIND */
201 
202 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
203 {
204 	if (abort_type == ABORT_TYPE_DATA)
205 		return "data";
206 	if (abort_type == ABORT_TYPE_PREFETCH)
207 		return "prefetch";
208 	return "undef";
209 }
210 
211 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
212 			uint32_t fault_descr)
213 {
214 	/* fault_descr is only valid for data or prefetch abort */
215 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
216 		return "";
217 
218 	switch (core_mmu_get_fault_type(fault_descr)) {
219 	case CORE_MMU_FAULT_ALIGNMENT:
220 		return " (alignment fault)";
221 	case CORE_MMU_FAULT_TRANSLATION:
222 		return " (translation fault)";
223 	case CORE_MMU_FAULT_READ_PERMISSION:
224 		return " (read permission fault)";
225 	case CORE_MMU_FAULT_WRITE_PERMISSION:
226 		return " (write permission fault)";
227 	default:
228 		return "";
229 	}
230 }
231 
232 static __maybe_unused void
233 __print_abort_info(struct abort_info *ai __maybe_unused,
234 		   const char *ctx __maybe_unused)
235 {
236 	__maybe_unused size_t core_pos = 0;
237 #ifdef ARM32
238 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
239 	__maybe_unused uint32_t sp = 0;
240 	__maybe_unused uint32_t lr = 0;
241 
242 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
243 		sp = ai->regs->usr_sp;
244 		lr = ai->regs->usr_lr;
245 		core_pos = thread_get_tsd()->abort_core;
246 	} else {
247 		sp = read_mode_sp(mode);
248 		lr = read_mode_lr(mode);
249 		core_pos = get_core_pos();
250 	}
251 #endif /*ARM32*/
252 #ifdef ARM64
253 	if (abort_is_user_exception(ai))
254 		core_pos = thread_get_tsd()->abort_core;
255 	else
256 		core_pos = get_core_pos();
257 #endif /*ARM64*/
258 
259 	EMSG_RAW("");
260 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
261 		ctx, abort_type_to_str(ai->abort_type), ai->va,
262 		fault_to_str(ai->abort_type, ai->fault_descr));
263 #ifdef ARM32
264 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
265 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
266 		 read_contextidr());
267 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
268 		 core_pos, ai->regs->spsr);
269 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
270 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
271 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
272 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
273 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
274 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
275 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
276 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
277 #endif /*ARM32*/
278 #ifdef ARM64
279 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
280 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
281 		 read_ttbr1_el1(), read_contextidr_el1());
282 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
283 		 core_pos, (uint32_t)ai->regs->spsr);
284 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
285 		 ai->regs->x0, ai->regs->x1);
286 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
287 		 ai->regs->x2, ai->regs->x3);
288 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
289 		 ai->regs->x4, ai->regs->x5);
290 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
291 		 ai->regs->x6, ai->regs->x7);
292 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
293 		 ai->regs->x8, ai->regs->x9);
294 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
295 		 ai->regs->x10, ai->regs->x11);
296 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
297 		 ai->regs->x12, ai->regs->x13);
298 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
299 		 ai->regs->x14, ai->regs->x15);
300 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
301 		 ai->regs->x16, ai->regs->x17);
302 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
303 		 ai->regs->x18, ai->regs->x19);
304 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
305 		 ai->regs->x20, ai->regs->x21);
306 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
307 		 ai->regs->x22, ai->regs->x23);
308 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
309 		 ai->regs->x24, ai->regs->x25);
310 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
311 		 ai->regs->x26, ai->regs->x27);
312 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
313 		 ai->regs->x28, ai->regs->x29);
314 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
315 		 ai->regs->x30, ai->regs->elr);
316 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
317 #endif /*ARM64*/
318 }
319 
320 /*
321  * Print abort info and (optionally) stack dump to the console
322  * @ai kernel-mode abort info.
323  * @stack_dump true to show a stack trace
324  */
325 static void __abort_print(struct abort_info *ai, bool stack_dump)
326 {
327 	assert(!abort_is_user_exception(ai));
328 
329 	__print_abort_info(ai, "Core");
330 
331 	if (stack_dump) {
332 #if defined(ARM32)
333 		__print_stack_unwind_arm32(ai);
334 #else
335 		__print_stack_unwind_arm64(ai);
336 #endif
337 	}
338 }
339 
340 void abort_print(struct abort_info *ai)
341 {
342 	__abort_print(ai, false);
343 }
344 
345 void abort_print_error(struct abort_info *ai)
346 {
347 	__abort_print(ai, true);
348 }
349 
350 /* This function must be called from a normal thread */
351 void abort_print_current_ta(void)
352 {
353 	struct thread_specific_data *tsd = thread_get_tsd();
354 	struct abort_info ai = { };
355 	struct tee_ta_session *s = NULL;
356 
357 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
358 		panic();
359 
360 	ai.abort_type = tsd->abort_type;
361 	ai.fault_descr = tsd->abort_descr;
362 	ai.va = tsd->abort_va;
363 	ai.pc = tsd->abort_regs.elr;
364 	ai.regs = &tsd->abort_regs;
365 
366 	if (ai.abort_type != ABORT_TYPE_TA_PANIC)
367 		__print_abort_info(&ai, "User TA");
368 
369 	EMSG_RAW("Status of TA %pUl (%p)",
370 		 (void *)&s->ctx->uuid, (void *)s->ctx);
371 	s->ctx->ops->dump_state(s->ctx);
372 
373 	ta_fbuf_dump(s);
374 
375 	if (to_user_ta_ctx(s->ctx)->is_32bit)
376 		__print_stack_unwind_arm32(&ai);
377 	else
378 		__print_stack_unwind_arm64(&ai);
379 }
380 
381 static void save_abort_info_in_tsd(struct abort_info *ai)
382 {
383 	struct thread_specific_data *tsd = thread_get_tsd();
384 
385 	tsd->abort_type = ai->abort_type;
386 	tsd->abort_descr = ai->fault_descr;
387 	tsd->abort_va = ai->va;
388 	tsd->abort_regs = *ai->regs;
389 	tsd->abort_core = get_core_pos();
390 }
391 
392 #ifdef ARM32
393 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
394 		struct abort_info *ai)
395 {
396 	switch (abort_type) {
397 	case ABORT_TYPE_DATA:
398 		ai->fault_descr = read_dfsr();
399 		ai->va = read_dfar();
400 		break;
401 	case ABORT_TYPE_PREFETCH:
402 		ai->fault_descr = read_ifsr();
403 		ai->va = read_ifar();
404 		break;
405 	default:
406 		ai->fault_descr = 0;
407 		ai->va = regs->elr;
408 		break;
409 	}
410 	ai->abort_type = abort_type;
411 	ai->pc = regs->elr;
412 	ai->regs = regs;
413 }
414 #endif /*ARM32*/
415 
416 #ifdef ARM64
417 static void set_abort_info(uint32_t abort_type __unused,
418 		struct thread_abort_regs *regs, struct abort_info *ai)
419 {
420 	ai->fault_descr = read_esr_el1();
421 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
422 	case ESR_EC_IABT_EL0:
423 	case ESR_EC_IABT_EL1:
424 		ai->abort_type = ABORT_TYPE_PREFETCH;
425 		ai->va = read_far_el1();
426 		break;
427 	case ESR_EC_DABT_EL0:
428 	case ESR_EC_DABT_EL1:
429 	case ESR_EC_SP_ALIGN:
430 		ai->abort_type = ABORT_TYPE_DATA;
431 		ai->va = read_far_el1();
432 		break;
433 	default:
434 		ai->abort_type = ABORT_TYPE_UNDEF;
435 		ai->va = regs->elr;
436 	}
437 	ai->pc = regs->elr;
438 	ai->regs = regs;
439 }
440 #endif /*ARM64*/
441 
442 #ifdef ARM32
443 static void handle_user_ta_panic(struct abort_info *ai)
444 {
445 	/*
446 	 * It was a user exception, stop user execution and return
447 	 * to TEE Core.
448 	 */
449 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
450 	ai->regs->r1 = true;
451 	ai->regs->r2 = 0xdeadbeef;
452 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
453 	ai->regs->spsr &= CPSR_FIA;
454 	ai->regs->spsr &= ~CPSR_MODE_MASK;
455 	ai->regs->spsr |= CPSR_MODE_SVC;
456 	/* Select Thumb or ARM mode */
457 	if (ai->regs->elr & 1)
458 		ai->regs->spsr |= CPSR_T;
459 	else
460 		ai->regs->spsr &= ~CPSR_T;
461 }
462 #endif /*ARM32*/
463 
464 #ifdef ARM64
465 static void handle_user_ta_panic(struct abort_info *ai)
466 {
467 	uint32_t daif;
468 
469 	/*
470 	 * It was a user exception, stop user execution and return
471 	 * to TEE Core.
472 	 */
473 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
474 	ai->regs->x1 = true;
475 	ai->regs->x2 = 0xdeadbeef;
476 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
477 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
478 
479 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
480 	/* XXX what about DAIF_D? */
481 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
482 }
483 #endif /*ARM64*/
484 
485 #ifdef CFG_WITH_VFP
486 static void handle_user_ta_vfp(void)
487 {
488 	struct tee_ta_session *s;
489 
490 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
491 		panic();
492 
493 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
494 }
495 #endif /*CFG_WITH_VFP*/
496 
497 #ifdef CFG_WITH_USER_TA
498 #ifdef ARM32
499 /* Returns true if the exception originated from user mode */
500 bool abort_is_user_exception(struct abort_info *ai)
501 {
502 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
503 }
504 #endif /*ARM32*/
505 
506 #ifdef ARM64
507 /* Returns true if the exception originated from user mode */
508 bool abort_is_user_exception(struct abort_info *ai)
509 {
510 	uint32_t spsr = ai->regs->spsr;
511 
512 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
513 		return true;
514 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
515 	    SPSR_64_MODE_EL0)
516 		return true;
517 	return false;
518 }
519 #endif /*ARM64*/
520 #else /*CFG_WITH_USER_TA*/
521 bool abort_is_user_exception(struct abort_info *ai __unused)
522 {
523 	return false;
524 }
525 #endif /*CFG_WITH_USER_TA*/
526 
527 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
528 #ifdef ARM32
529 static bool is_vfp_fault(struct abort_info *ai)
530 {
531 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
532 		return false;
533 
534 	/*
535 	 * Not entirely accurate, but if it's a truly undefined instruction
536 	 * we'll end up in this function again, except this time
537 	 * vfp_is_enabled() so we'll return false.
538 	 */
539 	return true;
540 }
541 #endif /*ARM32*/
542 
543 #ifdef ARM64
544 static bool is_vfp_fault(struct abort_info *ai)
545 {
546 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
547 	case ESR_EC_FP_ASIMD:
548 	case ESR_EC_AARCH32_FP:
549 	case ESR_EC_AARCH64_FP:
550 		return true;
551 	default:
552 		return false;
553 	}
554 }
555 #endif /*ARM64*/
556 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
557 static bool is_vfp_fault(struct abort_info *ai __unused)
558 {
559 	return false;
560 }
561 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
562 
563 static enum fault_type get_fault_type(struct abort_info *ai)
564 {
565 	if (abort_is_user_exception(ai)) {
566 		if (is_vfp_fault(ai))
567 			return FAULT_TYPE_USER_TA_VFP;
568 #ifndef CFG_WITH_PAGER
569 		return FAULT_TYPE_USER_TA_PANIC;
570 #endif
571 	}
572 
573 	if (thread_is_from_abort_mode()) {
574 		abort_print_error(ai);
575 		panic("[abort] abort in abort handler (trap CPU)");
576 	}
577 
578 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
579 		if (abort_is_user_exception(ai))
580 			return FAULT_TYPE_USER_TA_PANIC;
581 		abort_print_error(ai);
582 		panic("[abort] undefined abort (trap CPU)");
583 	}
584 
585 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
586 	case CORE_MMU_FAULT_ALIGNMENT:
587 		if (abort_is_user_exception(ai))
588 			return FAULT_TYPE_USER_TA_PANIC;
589 		abort_print_error(ai);
590 		panic("[abort] alignement fault!  (trap CPU)");
591 		break;
592 
593 	case CORE_MMU_FAULT_ACCESS_BIT:
594 		if (abort_is_user_exception(ai))
595 			return FAULT_TYPE_USER_TA_PANIC;
596 		abort_print_error(ai);
597 		panic("[abort] access bit fault!  (trap CPU)");
598 		break;
599 
600 	case CORE_MMU_FAULT_DEBUG_EVENT:
601 		if (!abort_is_user_exception(ai))
602 			abort_print(ai);
603 		DMSG("[abort] Ignoring debug event!");
604 		return FAULT_TYPE_IGNORE;
605 
606 	case CORE_MMU_FAULT_TRANSLATION:
607 	case CORE_MMU_FAULT_WRITE_PERMISSION:
608 	case CORE_MMU_FAULT_READ_PERMISSION:
609 		return FAULT_TYPE_PAGEABLE;
610 
611 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
612 		if (!abort_is_user_exception(ai))
613 			abort_print(ai);
614 		DMSG("[abort] Ignoring async external abort!");
615 		return FAULT_TYPE_IGNORE;
616 
617 	case CORE_MMU_FAULT_OTHER:
618 	default:
619 		if (!abort_is_user_exception(ai))
620 			abort_print(ai);
621 		DMSG("[abort] Unhandled fault!");
622 		return FAULT_TYPE_IGNORE;
623 	}
624 }
625 
626 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
627 {
628 	struct abort_info ai;
629 	bool handled;
630 
631 	set_abort_info(abort_type, regs, &ai);
632 
633 	switch (get_fault_type(&ai)) {
634 	case FAULT_TYPE_IGNORE:
635 		break;
636 	case FAULT_TYPE_USER_TA_PANIC:
637 		DMSG("[abort] abort in User mode (TA will panic)");
638 		save_abort_info_in_tsd(&ai);
639 		vfp_disable();
640 		handle_user_ta_panic(&ai);
641 		break;
642 #ifdef CFG_WITH_VFP
643 	case FAULT_TYPE_USER_TA_VFP:
644 		handle_user_ta_vfp();
645 		break;
646 #endif
647 	case FAULT_TYPE_PAGEABLE:
648 	default:
649 		if (thread_get_id_may_fail() < 0) {
650 			abort_print_error(&ai);
651 			panic("abort outside thread context");
652 		}
653 		thread_kernel_save_vfp();
654 		handled = tee_pager_handle_fault(&ai);
655 		thread_kernel_restore_vfp();
656 		if (!handled) {
657 			if (!abort_is_user_exception(&ai)) {
658 				abort_print_error(&ai);
659 				panic("unhandled pageable abort");
660 			}
661 			DMSG("[abort] abort in User mode (TA will panic)");
662 			save_abort_info_in_tsd(&ai);
663 			vfp_disable();
664 			handle_user_ta_panic(&ai);
665 		}
666 		break;
667 	}
668 }
669