xref: /optee_os/core/arch/arm/kernel/abort.c (revision b1d7375c01ec8bcbf3561d27425d320afed23bce)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <kernel/abort.h>
8 #include <kernel/linker.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/tee_ta_manager.h>
12 #include <kernel/unwind.h>
13 #include <kernel/user_ta.h>
14 #include <mm/core_mmu.h>
15 #include <mm/mobj.h>
16 #include <mm/tee_pager.h>
17 #include <tee/tee_svc.h>
18 #include <trace.h>
19 
20 #include "thread_private.h"
21 
22 enum fault_type {
23 	FAULT_TYPE_USER_TA_PANIC,
24 	FAULT_TYPE_USER_TA_VFP,
25 	FAULT_TYPE_PAGEABLE,
26 	FAULT_TYPE_IGNORE,
27 };
28 
29 #ifdef CFG_UNWIND
30 
31 static void get_current_ta_exidx(uaddr_t *exidx, size_t *exidx_sz)
32 {
33 	struct tee_ta_session *s;
34 	struct user_ta_ctx *utc;
35 
36 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
37 		panic();
38 
39 	utc = to_user_ta_ctx(s->ctx);
40 
41 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
42 	assert(utc->is_32bit);
43 
44 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
45 	if (*exidx)
46 		*exidx += utc->load_addr;
47 	*exidx_sz = utc->exidx_size;
48 }
49 
50 #ifdef ARM32
51 
52 /*
53  * Kernel or user mode unwind (32-bit execution state).
54  */
55 static void __print_stack_unwind_arm32(struct abort_info *ai)
56 {
57 	struct unwind_state_arm32 state;
58 	uaddr_t exidx;
59 	size_t exidx_sz;
60 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
61 	uint32_t sp;
62 	uint32_t lr;
63 
64 	if (abort_is_user_exception(ai)) {
65 		get_current_ta_exidx(&exidx, &exidx_sz);
66 	} else {
67 		exidx = (vaddr_t)__exidx_start;
68 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
69 	}
70 
71 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
72 		sp = ai->regs->usr_sp;
73 		lr = ai->regs->usr_lr;
74 	} else {
75 		sp = read_mode_sp(mode);
76 		lr = read_mode_lr(mode);
77 	}
78 
79 	memset(&state, 0, sizeof(state));
80 	state.registers[0] = ai->regs->r0;
81 	state.registers[1] = ai->regs->r1;
82 	state.registers[2] = ai->regs->r2;
83 	state.registers[3] = ai->regs->r3;
84 	state.registers[4] = ai->regs->r4;
85 	state.registers[5] = ai->regs->r5;
86 	state.registers[6] = ai->regs->r6;
87 	state.registers[7] = ai->regs->r7;
88 	state.registers[8] = ai->regs->r8;
89 	state.registers[9] = ai->regs->r9;
90 	state.registers[10] = ai->regs->r10;
91 	state.registers[11] = ai->regs->r11;
92 	state.registers[13] = sp;
93 	state.registers[14] = lr;
94 	state.registers[15] = ai->pc;
95 
96 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
97 }
98 #else /* ARM32 */
99 
100 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
101 {
102 	struct unwind_state_arm32 state;
103 	uaddr_t exidx;
104 	size_t exidx_sz;
105 
106 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
107 	assert(abort_is_user_exception(ai));
108 
109 	get_current_ta_exidx(&exidx, &exidx_sz);
110 
111 	memset(&state, 0, sizeof(state));
112 	state.registers[0] = ai->regs->x0;
113 	state.registers[1] = ai->regs->x1;
114 	state.registers[2] = ai->regs->x2;
115 	state.registers[3] = ai->regs->x3;
116 	state.registers[4] = ai->regs->x4;
117 	state.registers[5] = ai->regs->x5;
118 	state.registers[6] = ai->regs->x6;
119 	state.registers[7] = ai->regs->x7;
120 	state.registers[8] = ai->regs->x8;
121 	state.registers[9] = ai->regs->x9;
122 	state.registers[10] = ai->regs->x10;
123 	state.registers[11] = ai->regs->x11;
124 
125 	state.registers[13] = ai->regs->x13;
126 	state.registers[14] = ai->regs->x14;
127 	state.registers[15] = ai->pc;
128 
129 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
130 }
131 #endif /* ARM32 */
132 #ifdef ARM64
133 /* Kernel or user mode unwind (64-bit execution state) */
134 static void __print_stack_unwind_arm64(struct abort_info *ai)
135 {
136 	struct unwind_state_arm64 state;
137 	uaddr_t stack;
138 	size_t stack_size;
139 
140 	if (abort_is_user_exception(ai)) {
141 		struct tee_ta_session *s;
142 		struct user_ta_ctx *utc;
143 
144 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
145 			panic();
146 
147 		utc = to_user_ta_ctx(s->ctx);
148 		/* User stack */
149 		stack = (uaddr_t)utc->mmu->regions[0].va;
150 		stack_size = utc->mobj_stack->size;
151 	} else {
152 		/* Kernel stack */
153 		stack = thread_stack_start();
154 		stack_size = thread_stack_size();
155 	}
156 
157 	memset(&state, 0, sizeof(state));
158 	state.pc = ai->regs->elr;
159 	state.fp = ai->regs->x29;
160 
161 	print_stack_arm64(TRACE_ERROR, &state, stack, stack_size);
162 }
163 #else
164 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
165 {
166 
167 }
168 #endif /*ARM64*/
169 #else /* CFG_UNWIND */
170 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
171 {
172 }
173 
174 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
175 {
176 }
177 #endif /* CFG_UNWIND */
178 
179 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
180 {
181 	if (abort_type == ABORT_TYPE_DATA)
182 		return "data";
183 	if (abort_type == ABORT_TYPE_PREFETCH)
184 		return "prefetch";
185 	return "undef";
186 }
187 
188 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
189 			uint32_t fault_descr)
190 {
191 	/* fault_descr is only valid for data or prefetch abort */
192 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
193 		return "";
194 
195 	switch (core_mmu_get_fault_type(fault_descr)) {
196 	case CORE_MMU_FAULT_ALIGNMENT:
197 		return " (alignment fault)";
198 	case CORE_MMU_FAULT_TRANSLATION:
199 		return " (translation fault)";
200 	case CORE_MMU_FAULT_READ_PERMISSION:
201 		return " (read permission fault)";
202 	case CORE_MMU_FAULT_WRITE_PERMISSION:
203 		return " (write permission fault)";
204 	default:
205 		return "";
206 	}
207 }
208 
209 static __maybe_unused void
210 __print_abort_info(struct abort_info *ai __maybe_unused,
211 		   const char *ctx __maybe_unused)
212 {
213 #ifdef ARM32
214 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
215 	__maybe_unused uint32_t sp;
216 	__maybe_unused uint32_t lr;
217 
218 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
219 		sp = ai->regs->usr_sp;
220 		lr = ai->regs->usr_lr;
221 	} else {
222 		sp = read_mode_sp(mode);
223 		lr = read_mode_lr(mode);
224 	}
225 #endif /*ARM32*/
226 
227 	EMSG_RAW("");
228 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
229 		ctx, abort_type_to_str(ai->abort_type), ai->va,
230 		fault_to_str(ai->abort_type, ai->fault_descr));
231 #ifdef ARM32
232 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
233 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
234 		 read_contextidr());
235 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
236 		 get_core_pos(), ai->regs->spsr);
237 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
238 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
239 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
240 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
241 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
242 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
243 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
244 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
245 #endif /*ARM32*/
246 #ifdef ARM64
247 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
248 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
249 		 read_ttbr1_el1(), read_contextidr_el1());
250 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
251 		 get_core_pos(), (uint32_t)ai->regs->spsr);
252 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
253 		 ai->regs->x0, ai->regs->x1);
254 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
255 		 ai->regs->x2, ai->regs->x3);
256 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
257 		 ai->regs->x4, ai->regs->x5);
258 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
259 		 ai->regs->x6, ai->regs->x7);
260 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
261 		 ai->regs->x8, ai->regs->x9);
262 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
263 		 ai->regs->x10, ai->regs->x11);
264 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
265 		 ai->regs->x12, ai->regs->x13);
266 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
267 		 ai->regs->x14, ai->regs->x15);
268 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
269 		 ai->regs->x16, ai->regs->x17);
270 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
271 		 ai->regs->x18, ai->regs->x19);
272 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
273 		 ai->regs->x20, ai->regs->x21);
274 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
275 		 ai->regs->x22, ai->regs->x23);
276 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
277 		 ai->regs->x24, ai->regs->x25);
278 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
279 		 ai->regs->x26, ai->regs->x27);
280 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
281 		 ai->regs->x28, ai->regs->x29);
282 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
283 		 ai->regs->x30, ai->regs->elr);
284 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
285 #endif /*ARM64*/
286 }
287 
288 #if defined(ARM32)
289 static const bool kernel_is32bit = true;
290 #elif defined(ARM64)
291 static const bool kernel_is32bit;
292 #endif
293 
294 /*
295  * Print abort info and (optionally) stack dump to the console
296  * @ai user-mode or kernel-mode abort info. If user mode, the current session
297  * must be the one of the TA that caused the abort.
298  * @stack_dump true to show a stack trace
299  */
300 static void __abort_print(struct abort_info *ai, bool stack_dump)
301 {
302 	bool is_32bit;
303 	bool paged_ta_abort = false;
304 
305 	if (abort_is_user_exception(ai)) {
306 		struct tee_ta_session *s;
307 		struct user_ta_ctx *utc;
308 
309 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
310 			panic();
311 
312 		utc = to_user_ta_ctx(s->ctx);
313 		is_32bit = utc->is_32bit;
314 #ifdef CFG_PAGED_USER_TA
315 		/*
316 		 * It is not safe to unwind paged TAs that received an abort,
317 		 * because we currently don't handle page faults that could
318 		 * occur when accessing the TA memory (unwind tables for
319 		 * instance).
320 		 */
321 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
322 			paged_ta_abort = true;
323 #endif
324 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
325 			__print_abort_info(ai, "User TA");
326 		tee_ta_dump_current();
327 	} else {
328 		is_32bit = kernel_is32bit;
329 
330 		__print_abort_info(ai, "Core");
331 	}
332 
333 	if (!stack_dump || paged_ta_abort)
334 		return;
335 
336 	if (is_32bit)
337 		__print_stack_unwind_arm32(ai);
338 	else
339 		__print_stack_unwind_arm64(ai);
340 }
341 
342 void abort_print(struct abort_info *ai)
343 {
344 	__abort_print(ai, false);
345 }
346 
347 void abort_print_error(struct abort_info *ai)
348 {
349 	__abort_print(ai, true);
350 }
351 
352 #ifdef ARM32
353 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
354 		struct abort_info *ai)
355 {
356 	switch (abort_type) {
357 	case ABORT_TYPE_DATA:
358 		ai->fault_descr = read_dfsr();
359 		ai->va = read_dfar();
360 		break;
361 	case ABORT_TYPE_PREFETCH:
362 		ai->fault_descr = read_ifsr();
363 		ai->va = read_ifar();
364 		break;
365 	default:
366 		ai->fault_descr = 0;
367 		ai->va = regs->elr;
368 		break;
369 	}
370 	ai->abort_type = abort_type;
371 	ai->pc = regs->elr;
372 	ai->regs = regs;
373 }
374 #endif /*ARM32*/
375 
376 #ifdef ARM64
377 static void set_abort_info(uint32_t abort_type __unused,
378 		struct thread_abort_regs *regs, struct abort_info *ai)
379 {
380 	ai->fault_descr = read_esr_el1();
381 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
382 	case ESR_EC_IABT_EL0:
383 	case ESR_EC_IABT_EL1:
384 		ai->abort_type = ABORT_TYPE_PREFETCH;
385 		ai->va = read_far_el1();
386 		break;
387 	case ESR_EC_DABT_EL0:
388 	case ESR_EC_DABT_EL1:
389 	case ESR_EC_SP_ALIGN:
390 		ai->abort_type = ABORT_TYPE_DATA;
391 		ai->va = read_far_el1();
392 		break;
393 	default:
394 		ai->abort_type = ABORT_TYPE_UNDEF;
395 		ai->va = regs->elr;
396 	}
397 	ai->pc = regs->elr;
398 	ai->regs = regs;
399 }
400 #endif /*ARM64*/
401 
402 #ifdef ARM32
403 static void handle_user_ta_panic(struct abort_info *ai)
404 {
405 	/*
406 	 * It was a user exception, stop user execution and return
407 	 * to TEE Core.
408 	 */
409 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
410 	ai->regs->r1 = true;
411 	ai->regs->r2 = 0xdeadbeef;
412 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
413 	ai->regs->spsr &= CPSR_FIA;
414 	ai->regs->spsr &= ~CPSR_MODE_MASK;
415 	ai->regs->spsr |= CPSR_MODE_SVC;
416 	/* Select Thumb or ARM mode */
417 	if (ai->regs->elr & 1)
418 		ai->regs->spsr |= CPSR_T;
419 	else
420 		ai->regs->spsr &= ~CPSR_T;
421 }
422 #endif /*ARM32*/
423 
424 #ifdef ARM64
425 static void handle_user_ta_panic(struct abort_info *ai)
426 {
427 	uint32_t daif;
428 
429 	/*
430 	 * It was a user exception, stop user execution and return
431 	 * to TEE Core.
432 	 */
433 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
434 	ai->regs->x1 = true;
435 	ai->regs->x2 = 0xdeadbeef;
436 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
437 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
438 
439 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
440 	/* XXX what about DAIF_D? */
441 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
442 }
443 #endif /*ARM64*/
444 
445 #ifdef CFG_WITH_VFP
446 static void handle_user_ta_vfp(void)
447 {
448 	struct tee_ta_session *s;
449 
450 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
451 		panic();
452 
453 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
454 }
455 #endif /*CFG_WITH_VFP*/
456 
457 #ifdef CFG_WITH_USER_TA
458 #ifdef ARM32
459 /* Returns true if the exception originated from user mode */
460 bool abort_is_user_exception(struct abort_info *ai)
461 {
462 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
463 }
464 #endif /*ARM32*/
465 
466 #ifdef ARM64
467 /* Returns true if the exception originated from user mode */
468 bool abort_is_user_exception(struct abort_info *ai)
469 {
470 	uint32_t spsr = ai->regs->spsr;
471 
472 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
473 		return true;
474 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
475 	    SPSR_64_MODE_EL0)
476 		return true;
477 	return false;
478 }
479 #endif /*ARM64*/
480 #else /*CFG_WITH_USER_TA*/
481 bool abort_is_user_exception(struct abort_info *ai __unused)
482 {
483 	return false;
484 }
485 #endif /*CFG_WITH_USER_TA*/
486 
487 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
488 #ifdef ARM32
489 
490 #define T32_INSTR(w1, w0) \
491 	((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
492 
493 #define T32_VTRANS32_MASK	T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
494 #define T32_VTRANS32_VAL	T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
495 
496 #define T32_VTRANS64_MASK	T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
497 #define T32_VTRANS64_VAL	T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
498 
499 #define T32_VLDST_MASK		T32_INSTR((0xff << 8) | (1 << 4), 0)
500 #define T32_VLDST_VAL		T32_INSTR( 0xf9 << 8            , 0)
501 
502 #define T32_VXLDST_MASK		T32_INSTR(0xfc << 8, 7 << 9)
503 #define T32_VXLDST_VAL		T32_INSTR(0xec << 8, 5 << 9)
504 
505 #define T32_VPROC_MASK		T32_INSTR(0xef << 8, 0)
506 #define T32_VPROC_VAL		T32_VPROC_MASK
507 
508 #define A32_INSTR(x)		((uint32_t)(x))
509 
510 #define A32_VTRANS32_MASK	A32_INSTR(SHIFT_U32(0xf, 24) | \
511 					  SHIFT_U32(7, 9) | BIT32(4))
512 #define A32_VTRANS32_VAL	A32_INSTR(SHIFT_U32(0xe, 24) | \
513 					  SHIFT_U32(5, 9) | BIT32(4))
514 
515 #define A32_VTRANS64_MASK	A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
516 #define A32_VTRANS64_VAL	A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
517 
518 #define A32_VLDST_MASK		A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
519 #define A32_VLDST_VAL		A32_INSTR(SHIFT_U32(0xf4, 24))
520 #define A32_VXLDST_MASK		A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
521 #define A32_VXLDST_VAL		A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
522 
523 #define A32_VPROC_MASK		A32_INSTR(SHIFT_U32(0x7f, 25))
524 #define A32_VPROC_VAL		A32_INSTR(SHIFT_U32(0x79, 25))
525 
526 static bool is_vfp_fault(struct abort_info *ai)
527 {
528 	TEE_Result res;
529 	uint32_t instr;
530 
531 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
532 		return false;
533 
534 	res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
535 	if (res != TEE_SUCCESS)
536 		return false;
537 
538 	if (ai->regs->spsr & CPSR_T) {
539 		/* Thumb mode */
540 		return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
541 		       ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
542 		       ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
543 		       ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
544 		       ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
545 	} else {
546 		/* ARM mode */
547 		return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
548 		       ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
549 		       ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
550 		       ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
551 		       ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
552 	}
553 }
554 #endif /*ARM32*/
555 
556 #ifdef ARM64
557 static bool is_vfp_fault(struct abort_info *ai)
558 {
559 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
560 	case ESR_EC_FP_ASIMD:
561 	case ESR_EC_AARCH32_FP:
562 	case ESR_EC_AARCH64_FP:
563 		return true;
564 	default:
565 		return false;
566 	}
567 }
568 #endif /*ARM64*/
569 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
570 static bool is_vfp_fault(struct abort_info *ai __unused)
571 {
572 	return false;
573 }
574 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
575 
576 static enum fault_type get_fault_type(struct abort_info *ai)
577 {
578 	if (abort_is_user_exception(ai)) {
579 		if (is_vfp_fault(ai))
580 			return FAULT_TYPE_USER_TA_VFP;
581 #ifndef CFG_WITH_PAGER
582 		return FAULT_TYPE_USER_TA_PANIC;
583 #endif
584 	}
585 
586 	if (thread_is_from_abort_mode()) {
587 		abort_print_error(ai);
588 		panic("[abort] abort in abort handler (trap CPU)");
589 	}
590 
591 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
592 		if (abort_is_user_exception(ai))
593 			return FAULT_TYPE_USER_TA_PANIC;
594 		abort_print_error(ai);
595 		panic("[abort] undefined abort (trap CPU)");
596 	}
597 
598 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
599 	case CORE_MMU_FAULT_ALIGNMENT:
600 		if (abort_is_user_exception(ai))
601 			return FAULT_TYPE_USER_TA_PANIC;
602 		abort_print_error(ai);
603 		panic("[abort] alignement fault!  (trap CPU)");
604 		break;
605 
606 	case CORE_MMU_FAULT_ACCESS_BIT:
607 		if (abort_is_user_exception(ai))
608 			return FAULT_TYPE_USER_TA_PANIC;
609 		abort_print_error(ai);
610 		panic("[abort] access bit fault!  (trap CPU)");
611 		break;
612 
613 	case CORE_MMU_FAULT_DEBUG_EVENT:
614 		abort_print(ai);
615 		DMSG("[abort] Ignoring debug event!");
616 		return FAULT_TYPE_IGNORE;
617 
618 	case CORE_MMU_FAULT_TRANSLATION:
619 	case CORE_MMU_FAULT_WRITE_PERMISSION:
620 	case CORE_MMU_FAULT_READ_PERMISSION:
621 		return FAULT_TYPE_PAGEABLE;
622 
623 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
624 		abort_print(ai);
625 		DMSG("[abort] Ignoring async external abort!");
626 		return FAULT_TYPE_IGNORE;
627 
628 	case CORE_MMU_FAULT_OTHER:
629 	default:
630 		abort_print(ai);
631 		DMSG("[abort] Unhandled fault!");
632 		return FAULT_TYPE_IGNORE;
633 	}
634 }
635 
636 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
637 {
638 	struct abort_info ai;
639 	bool handled;
640 
641 	set_abort_info(abort_type, regs, &ai);
642 
643 	switch (get_fault_type(&ai)) {
644 	case FAULT_TYPE_IGNORE:
645 		break;
646 	case FAULT_TYPE_USER_TA_PANIC:
647 		DMSG("[abort] abort in User mode (TA will panic)");
648 		abort_print_error(&ai);
649 		vfp_disable();
650 		handle_user_ta_panic(&ai);
651 		break;
652 #ifdef CFG_WITH_VFP
653 	case FAULT_TYPE_USER_TA_VFP:
654 		handle_user_ta_vfp();
655 		break;
656 #endif
657 	case FAULT_TYPE_PAGEABLE:
658 	default:
659 		thread_kernel_save_vfp();
660 		handled = tee_pager_handle_fault(&ai);
661 		thread_kernel_restore_vfp();
662 		if (!handled) {
663 			abort_print_error(&ai);
664 			if (!abort_is_user_exception(&ai))
665 				panic("unhandled pageable abort");
666 			DMSG("[abort] abort in User mode (TA will panic)");
667 			vfp_disable();
668 			handle_user_ta_panic(&ai);
669 		}
670 		break;
671 	}
672 }
673