xref: /optee_os/core/arch/arm/kernel/abort.c (revision a50cb361d9e5735f197ccc87beb0d24af8315369)
1 /*
2  * Copyright (c) 2015, Linaro Limited
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <kernel/abort.h>
29 #include <kernel/misc.h>
30 #include <kernel/tee_ta_manager.h>
31 #include <kernel/panic.h>
32 #include <kernel/user_ta.h>
33 #include <kernel/unwind.h>
34 #include <mm/core_mmu.h>
35 #include <mm/tee_pager.h>
36 #include <tee/tee_svc.h>
37 #include <trace.h>
38 #include <arm.h>
39 
40 enum fault_type {
41 	FAULT_TYPE_USER_TA_PANIC,
42 	FAULT_TYPE_USER_TA_VFP,
43 	FAULT_TYPE_PAGEABLE,
44 	FAULT_TYPE_IGNORE,
45 };
46 
47 #ifdef CFG_CORE_UNWIND
48 #ifdef ARM32
49 static void __print_stack_unwind(struct abort_info *ai)
50 {
51 	struct unwind_state state;
52 
53 	memset(&state, 0, sizeof(state));
54 	state.registers[0] = ai->regs->r0;
55 	state.registers[1] = ai->regs->r1;
56 	state.registers[2] = ai->regs->r2;
57 	state.registers[3] = ai->regs->r3;
58 	state.registers[4] = ai->regs->r4;
59 	state.registers[5] = ai->regs->r5;
60 	state.registers[6] = ai->regs->r6;
61 	state.registers[7] = ai->regs->r7;
62 	state.registers[8] = ai->regs->r8;
63 	state.registers[9] = ai->regs->r9;
64 	state.registers[10] = ai->regs->r10;
65 	state.registers[11] = ai->regs->r11;
66 	state.registers[13] = read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK);
67 	state.registers[14] = read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK);
68 	state.registers[15] = ai->pc;
69 
70 	do {
71 		EMSG_RAW(" pc 0x%08x", state.registers[15]);
72 	} while (unwind_stack(&state));
73 }
74 #endif /*ARM32*/
75 
76 #ifdef ARM64
77 static void __print_stack_unwind(struct abort_info *ai)
78 {
79 	struct unwind_state state;
80 
81 	memset(&state, 0, sizeof(state));
82 	state.pc = ai->regs->elr;
83 	state.fp = ai->regs->x29;
84 
85 	do {
86 		EMSG_RAW("pc  0x%016" PRIx64, state.pc);
87 	} while (unwind_stack(&state));
88 }
89 #endif /*ARM64*/
90 
91 static void print_stack_unwind(struct abort_info *ai)
92 {
93 	EMSG_RAW("Call stack:");
94 	__print_stack_unwind(ai);
95 }
96 #else /*CFG_CORE_UNWIND*/
97 static void print_stack_unwind(struct abort_info *ai __unused)
98 {
99 }
100 #endif /*CFG_CORE_UNWIND*/
101 
102 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
103 {
104 	if (abort_type == ABORT_TYPE_DATA)
105 		return "data";
106 	if (abort_type == ABORT_TYPE_PREFETCH)
107 		return "prefetch";
108 	return "undef";
109 }
110 
111 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
112 			uint32_t fault_descr)
113 {
114 	/* fault_descr is only valid for data or prefetch abort */
115 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
116 		return "";
117 
118 	switch (core_mmu_get_fault_type(fault_descr)) {
119 	case CORE_MMU_FAULT_ALIGNMENT:
120 		return " (alignment fault)";
121 	case CORE_MMU_FAULT_TRANSLATION:
122 		return " (translation fault)";
123 	case CORE_MMU_FAULT_READ_PERMISSION:
124 		return " (read permission fault)";
125 	case CORE_MMU_FAULT_WRITE_PERMISSION:
126 		return " (write permission fault)";
127 	default:
128 		return "";
129 	}
130 }
131 
132 static __maybe_unused void print_detailed_abort(
133 				struct abort_info *ai __maybe_unused,
134 				const char *ctx __maybe_unused)
135 {
136 	EMSG_RAW("\n");
137 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s\n",
138 		ctx, abort_type_to_str(ai->abort_type), ai->va,
139 		fault_to_str(ai->abort_type, ai->fault_descr));
140 #ifdef ARM32
141 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X\n",
142 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
143 		 read_contextidr());
144 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
145 		 get_core_pos(), ai->regs->spsr);
146 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x\n",
147 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
148 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x\n",
149 		 ai->regs->r1, ai->regs->r5, ai->regs->r9,
150 		 read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
151 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x\n",
152 		 ai->regs->r2, ai->regs->r6, ai->regs->r10,
153 		 read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
154 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x\n",
155 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
156 #endif /*ARM32*/
157 #ifdef ARM64
158 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64 "   cidr 0x%X\n",
159 		 ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
160 		 read_contextidr_el1());
161 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
162 		 get_core_pos(), (uint32_t)ai->regs->spsr);
163 	EMSG_RAW("x0  %016" PRIx64 " x1  %016" PRIx64,
164 		 ai->regs->x0, ai->regs->x1);
165 	EMSG_RAW("x2  %016" PRIx64 " x3  %016" PRIx64,
166 		 ai->regs->x2, ai->regs->x3);
167 	EMSG_RAW("x4  %016" PRIx64 " x5  %016" PRIx64,
168 		 ai->regs->x4, ai->regs->x5);
169 	EMSG_RAW("x6  %016" PRIx64 " x7  %016" PRIx64,
170 		 ai->regs->x6, ai->regs->x7);
171 	EMSG_RAW("x8  %016" PRIx64 " x9  %016" PRIx64,
172 		 ai->regs->x8, ai->regs->x9);
173 	EMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64,
174 		 ai->regs->x10, ai->regs->x11);
175 	EMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64,
176 		 ai->regs->x12, ai->regs->x13);
177 	EMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64,
178 		 ai->regs->x14, ai->regs->x15);
179 	EMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64,
180 		 ai->regs->x16, ai->regs->x17);
181 	EMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64,
182 		 ai->regs->x18, ai->regs->x19);
183 	EMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64,
184 		 ai->regs->x20, ai->regs->x21);
185 	EMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64,
186 		 ai->regs->x22, ai->regs->x23);
187 	EMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64,
188 		 ai->regs->x24, ai->regs->x25);
189 	EMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64,
190 		 ai->regs->x26, ai->regs->x27);
191 	EMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64,
192 		 ai->regs->x28, ai->regs->x29);
193 	EMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64,
194 		 ai->regs->x30, ai->regs->elr);
195 	EMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0);
196 #endif /*ARM64*/
197 }
198 
199 static void print_user_abort(struct abort_info *ai __maybe_unused)
200 {
201 #ifdef CFG_TEE_CORE_TA_TRACE
202 	print_detailed_abort(ai, "user TA");
203 	tee_ta_dump_current();
204 #endif
205 }
206 
207 void abort_print(struct abort_info *ai __maybe_unused)
208 {
209 #if (TRACE_LEVEL >= TRACE_INFO)
210 	print_detailed_abort(ai, "core");
211 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
212 }
213 
214 void abort_print_error(struct abort_info *ai)
215 {
216 #if (TRACE_LEVEL >= TRACE_INFO)
217 	/* full verbose log at DEBUG level */
218 	print_detailed_abort(ai, "core");
219 #else
220 #ifdef ARM32
221 	EMSG("%s-abort at 0x%" PRIxVA "\n"
222 	     "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
223 	     "CPUID 0x%x CPSR 0x%x (read from SPSR)",
224 	     abort_type_to_str(ai->abort_type),
225 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
226 	     read_mpidr(), read_spsr());
227 #endif /*ARM32*/
228 #ifdef ARM64
229 	EMSG("%s-abort at 0x%" PRIxVA "\n"
230 	     "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n"
231 	     "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)",
232 	     abort_type_to_str(ai->abort_type),
233 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
234 	     read_contextidr_el1(),
235 	     read_mpidr_el1(), (uint32_t)ai->regs->spsr);
236 #endif /*ARM64*/
237 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
238 	print_stack_unwind(ai);
239 }
240 
241 #ifdef ARM32
242 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
243 		struct abort_info *ai)
244 {
245 	switch (abort_type) {
246 	case ABORT_TYPE_DATA:
247 		ai->fault_descr = read_dfsr();
248 		ai->va = read_dfar();
249 		break;
250 	case ABORT_TYPE_PREFETCH:
251 		ai->fault_descr = read_ifsr();
252 		ai->va = read_ifar();
253 		break;
254 	default:
255 		ai->fault_descr = 0;
256 		ai->va = regs->elr;
257 		break;
258 	}
259 	ai->abort_type = abort_type;
260 	ai->pc = regs->elr;
261 	ai->regs = regs;
262 }
263 #endif /*ARM32*/
264 
265 #ifdef ARM64
266 static void set_abort_info(uint32_t abort_type __unused,
267 		struct thread_abort_regs *regs, struct abort_info *ai)
268 {
269 	ai->fault_descr = read_esr_el1();
270 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
271 	case ESR_EC_IABT_EL0:
272 	case ESR_EC_IABT_EL1:
273 		ai->abort_type = ABORT_TYPE_PREFETCH;
274 		ai->va = read_far_el1();
275 		break;
276 	case ESR_EC_DABT_EL0:
277 	case ESR_EC_DABT_EL1:
278 	case ESR_EC_SP_ALIGN:
279 		ai->abort_type = ABORT_TYPE_DATA;
280 		ai->va = read_far_el1();
281 		break;
282 	default:
283 		ai->abort_type = ABORT_TYPE_UNDEF;
284 		ai->va = regs->elr;
285 	}
286 	ai->pc = regs->elr;
287 	ai->regs = regs;
288 }
289 #endif /*ARM64*/
290 
291 #ifdef ARM32
292 static void handle_user_ta_panic(struct abort_info *ai)
293 {
294 	/*
295 	 * It was a user exception, stop user execution and return
296 	 * to TEE Core.
297 	 */
298 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
299 	ai->regs->r1 = true;
300 	ai->regs->r2 = 0xdeadbeef;
301 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
302 	ai->regs->spsr = read_cpsr();
303 	ai->regs->spsr &= ~CPSR_MODE_MASK;
304 	ai->regs->spsr |= CPSR_MODE_SVC;
305 	ai->regs->spsr &= ~CPSR_FIA;
306 	ai->regs->spsr |= read_spsr() & CPSR_FIA;
307 	/* Select Thumb or ARM mode */
308 	if (ai->regs->elr & 1)
309 		ai->regs->spsr |= CPSR_T;
310 	else
311 		ai->regs->spsr &= ~CPSR_T;
312 }
313 #endif /*ARM32*/
314 
315 #ifdef ARM64
316 static void handle_user_ta_panic(struct abort_info *ai)
317 {
318 	uint32_t daif;
319 
320 	/*
321 	 * It was a user exception, stop user execution and return
322 	 * to TEE Core.
323 	 */
324 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
325 	ai->regs->x1 = true;
326 	ai->regs->x2 = 0xdeadbeef;
327 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
328 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
329 
330 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
331 	/* XXX what about DAIF_D? */
332 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
333 }
334 #endif /*ARM64*/
335 
336 #ifdef CFG_WITH_VFP
337 static void handle_user_ta_vfp(void)
338 {
339 	struct tee_ta_session *s;
340 
341 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
342 		panic();
343 
344 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
345 }
346 #endif /*CFG_WITH_VFP*/
347 
348 #ifdef CFG_WITH_USER_TA
349 #ifdef ARM32
350 /* Returns true if the exception originated from user mode */
351 bool abort_is_user_exception(struct abort_info *ai)
352 {
353 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
354 }
355 #endif /*ARM32*/
356 
357 #ifdef ARM64
358 /* Returns true if the exception originated from user mode */
359 bool abort_is_user_exception(struct abort_info *ai)
360 {
361 	uint32_t spsr = ai->regs->spsr;
362 
363 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
364 		return true;
365 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
366 	    SPSR_64_MODE_EL0)
367 		return true;
368 	return false;
369 }
370 #endif /*ARM64*/
371 #else /*CFG_WITH_USER_TA*/
372 bool abort_is_user_exception(struct abort_info *ai __unused)
373 {
374 	return false;
375 }
376 #endif /*CFG_WITH_USER_TA*/
377 
378 #ifdef ARM32
379 /* Returns true if the exception originated from abort mode */
380 static bool is_abort_in_abort_handler(struct abort_info *ai)
381 {
382 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
383 }
384 #endif /*ARM32*/
385 
386 #ifdef ARM64
387 /* Returns true if the exception originated from abort mode */
388 static bool is_abort_in_abort_handler(struct abort_info *ai __unused)
389 {
390 	return false;
391 }
392 #endif /*ARM64*/
393 
394 
395 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
396 #ifdef ARM32
397 
398 #define T32_INSTR(w1, w0) \
399 	((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
400 
401 #define T32_VTRANS32_MASK	T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
402 #define T32_VTRANS32_VAL	T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
403 
404 #define T32_VTRANS64_MASK	T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
405 #define T32_VTRANS64_VAL	T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
406 
407 #define T32_VLDST_MASK		T32_INSTR((0xff << 8) | (1 << 4), 0)
408 #define T32_VLDST_VAL		T32_INSTR( 0xf9 << 8            , 0)
409 
410 #define T32_VXLDST_MASK		T32_INSTR(0xfc << 8, 7 << 9)
411 #define T32_VXLDST_VAL		T32_INSTR(0xec << 8, 5 << 9)
412 
413 #define T32_VPROC_MASK		T32_INSTR(0xef << 8, 0)
414 #define T32_VPROC_VAL		T32_VPROC_MASK
415 
416 #define A32_INSTR(x)		((uint32_t)(x))
417 
418 #define A32_VTRANS32_MASK	A32_INSTR(SHIFT_U32(0xf, 24) | \
419 					  SHIFT_U32(7, 9) | BIT32(4))
420 #define A32_VTRANS32_VAL	A32_INSTR(SHIFT_U32(0xe, 24) | \
421 					  SHIFT_U32(5, 9) | BIT32(4))
422 
423 #define A32_VTRANS64_MASK	A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
424 #define A32_VTRANS64_VAL	A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
425 
426 #define A32_VLDST_MASK		A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
427 #define A32_VLDST_VAL		A32_INSTR(SHIFT_U32(0xf4, 24))
428 #define A32_VXLDST_MASK		A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
429 #define A32_VXLDST_VAL		A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
430 
431 #define A32_VPROC_MASK		A32_INSTR(SHIFT_U32(0x7f, 25))
432 #define A32_VPROC_VAL		A32_INSTR(SHIFT_U32(0x79, 25))
433 
434 static bool is_vfp_fault(struct abort_info *ai)
435 {
436 	TEE_Result res;
437 	uint32_t instr;
438 
439 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
440 		return false;
441 
442 	res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
443 	if (res != TEE_SUCCESS)
444 		return false;
445 
446 	if (ai->regs->spsr & CPSR_T) {
447 		/* Thumb mode */
448 		return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
449 		       ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
450 		       ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
451 		       ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
452 		       ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
453 	} else {
454 		/* ARM mode */
455 		return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
456 		       ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
457 		       ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
458 		       ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
459 		       ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
460 	}
461 }
462 #endif /*ARM32*/
463 
464 #ifdef ARM64
465 static bool is_vfp_fault(struct abort_info *ai)
466 {
467 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
468 	case ESR_EC_FP_ASIMD:
469 	case ESR_EC_AARCH32_FP:
470 	case ESR_EC_AARCH64_FP:
471 		return true;
472 	default:
473 		return false;
474 	}
475 }
476 #endif /*ARM64*/
477 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
478 static bool is_vfp_fault(struct abort_info *ai __unused)
479 {
480 	return false;
481 }
482 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
483 
484 static enum fault_type get_fault_type(struct abort_info *ai)
485 {
486 	if (abort_is_user_exception(ai)) {
487 		if (is_vfp_fault(ai))
488 			return FAULT_TYPE_USER_TA_VFP;
489 #ifndef CFG_WITH_PAGER
490 		return FAULT_TYPE_USER_TA_PANIC;
491 #endif
492 	}
493 
494 	if (is_abort_in_abort_handler(ai)) {
495 		abort_print_error(ai);
496 		panic("[abort] abort in abort handler (trap CPU)");
497 	}
498 
499 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
500 		if (abort_is_user_exception(ai))
501 			return FAULT_TYPE_USER_TA_PANIC;
502 		abort_print_error(ai);
503 		panic("[abort] undefined abort (trap CPU)");
504 	}
505 
506 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
507 	case CORE_MMU_FAULT_ALIGNMENT:
508 		if (abort_is_user_exception(ai))
509 			return FAULT_TYPE_USER_TA_PANIC;
510 		abort_print_error(ai);
511 		panic("[abort] alignement fault!  (trap CPU)");
512 		break;
513 
514 	case CORE_MMU_FAULT_ACCESS_BIT:
515 		if (abort_is_user_exception(ai))
516 			return FAULT_TYPE_USER_TA_PANIC;
517 		abort_print_error(ai);
518 		panic("[abort] access bit fault!  (trap CPU)");
519 		break;
520 
521 	case CORE_MMU_FAULT_DEBUG_EVENT:
522 		abort_print(ai);
523 		DMSG("[abort] Ignoring debug event!");
524 		return FAULT_TYPE_IGNORE;
525 
526 	case CORE_MMU_FAULT_TRANSLATION:
527 	case CORE_MMU_FAULT_WRITE_PERMISSION:
528 	case CORE_MMU_FAULT_READ_PERMISSION:
529 		return FAULT_TYPE_PAGEABLE;
530 
531 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
532 		abort_print(ai);
533 		DMSG("[abort] Ignoring async external abort!");
534 		return FAULT_TYPE_IGNORE;
535 
536 	case CORE_MMU_FAULT_OTHER:
537 	default:
538 		abort_print(ai);
539 		DMSG("[abort] Unhandled fault!");
540 		return FAULT_TYPE_IGNORE;
541 	}
542 }
543 
544 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
545 {
546 	struct abort_info ai;
547 	bool handled;
548 
549 	set_abort_info(abort_type, regs, &ai);
550 
551 	switch (get_fault_type(&ai)) {
552 	case FAULT_TYPE_IGNORE:
553 		break;
554 	case FAULT_TYPE_USER_TA_PANIC:
555 		DMSG("[abort] abort in User mode (TA will panic)");
556 		print_user_abort(&ai);
557 		vfp_disable();
558 		handle_user_ta_panic(&ai);
559 		break;
560 #ifdef CFG_WITH_VFP
561 	case FAULT_TYPE_USER_TA_VFP:
562 		handle_user_ta_vfp();
563 		break;
564 #endif
565 	case FAULT_TYPE_PAGEABLE:
566 	default:
567 		thread_kernel_save_vfp();
568 		handled = tee_pager_handle_fault(&ai);
569 		thread_kernel_restore_vfp();
570 		if (!handled) {
571 			if (!abort_is_user_exception(&ai)) {
572 				abort_print_error(&ai);
573 				panic("unhandled pageable abort");
574 			}
575 			print_user_abort(&ai);
576 			DMSG("[abort] abort in User mode (TA will panic)");
577 			vfp_disable();
578 			handle_user_ta_panic(&ai);
579 		}
580 		break;
581 	}
582 }
583