xref: /optee_os/core/arch/arm/kernel/abort.c (revision 78b7c7c7653f8bff42fe44d31a79d7f6bbfd4d47)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  * All rights reserved.
5  */
6 
7 #include <arm.h>
8 #include <kernel/abort.h>
9 #include <kernel/linker.h>
10 #include <kernel/misc.h>
11 #include <kernel/panic.h>
12 #include <kernel/tee_ta_manager.h>
13 #include <kernel/unwind.h>
14 #include <kernel/user_ta.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <tee/tee_svc.h>
19 #include <trace.h>
20 
21 #include "thread_private.h"
22 
23 enum fault_type {
24 	FAULT_TYPE_USER_TA_PANIC,
25 	FAULT_TYPE_USER_TA_VFP,
26 	FAULT_TYPE_PAGEABLE,
27 	FAULT_TYPE_IGNORE,
28 };
29 
30 #ifdef CFG_UNWIND
31 
32 static void get_current_ta_exidx(uaddr_t *exidx, size_t *exidx_sz)
33 {
34 	struct tee_ta_session *s;
35 	struct user_ta_ctx *utc;
36 
37 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
38 		panic();
39 
40 	utc = to_user_ta_ctx(s->ctx);
41 
42 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
43 	assert(utc->is_32bit);
44 
45 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
46 	if (*exidx)
47 		*exidx += utc->load_addr;
48 	*exidx_sz = utc->exidx_size;
49 }
50 
51 #ifdef ARM32
52 
53 /*
54  * Kernel or user mode unwind (32-bit execution state).
55  */
56 static void __print_stack_unwind_arm32(struct abort_info *ai)
57 {
58 	struct unwind_state_arm32 state;
59 	uaddr_t exidx;
60 	size_t exidx_sz;
61 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
62 	uint32_t sp;
63 	uint32_t lr;
64 
65 	if (abort_is_user_exception(ai)) {
66 		get_current_ta_exidx(&exidx, &exidx_sz);
67 	} else {
68 		exidx = (vaddr_t)__exidx_start;
69 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
70 	}
71 
72 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
73 		sp = ai->regs->usr_sp;
74 		lr = ai->regs->usr_lr;
75 	} else {
76 		sp = read_mode_sp(mode);
77 		lr = read_mode_lr(mode);
78 	}
79 
80 	memset(&state, 0, sizeof(state));
81 	state.registers[0] = ai->regs->r0;
82 	state.registers[1] = ai->regs->r1;
83 	state.registers[2] = ai->regs->r2;
84 	state.registers[3] = ai->regs->r3;
85 	state.registers[4] = ai->regs->r4;
86 	state.registers[5] = ai->regs->r5;
87 	state.registers[6] = ai->regs->r6;
88 	state.registers[7] = ai->regs->r7;
89 	state.registers[8] = ai->regs->r8;
90 	state.registers[9] = ai->regs->r9;
91 	state.registers[10] = ai->regs->r10;
92 	state.registers[11] = ai->regs->r11;
93 	state.registers[13] = sp;
94 	state.registers[14] = lr;
95 	state.registers[15] = ai->pc;
96 
97 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
98 }
99 #else /* ARM32 */
100 
101 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
102 {
103 	struct unwind_state_arm32 state;
104 	uaddr_t exidx;
105 	size_t exidx_sz;
106 
107 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
108 	assert(abort_is_user_exception(ai));
109 
110 	get_current_ta_exidx(&exidx, &exidx_sz);
111 
112 	memset(&state, 0, sizeof(state));
113 	state.registers[0] = ai->regs->x0;
114 	state.registers[1] = ai->regs->x1;
115 	state.registers[2] = ai->regs->x2;
116 	state.registers[3] = ai->regs->x3;
117 	state.registers[4] = ai->regs->x4;
118 	state.registers[5] = ai->regs->x5;
119 	state.registers[6] = ai->regs->x6;
120 	state.registers[7] = ai->regs->x7;
121 	state.registers[8] = ai->regs->x8;
122 	state.registers[9] = ai->regs->x9;
123 	state.registers[10] = ai->regs->x10;
124 	state.registers[11] = ai->regs->x11;
125 
126 	state.registers[13] = ai->regs->x13;
127 	state.registers[14] = ai->regs->x14;
128 	state.registers[15] = ai->pc;
129 
130 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
131 }
132 #endif /* ARM32 */
133 #ifdef ARM64
134 /* Kernel or user mode unwind (64-bit execution state) */
135 static void __print_stack_unwind_arm64(struct abort_info *ai)
136 {
137 	struct unwind_state_arm64 state;
138 	uaddr_t stack;
139 	size_t stack_size;
140 
141 	if (abort_is_user_exception(ai)) {
142 		struct tee_ta_session *s;
143 		struct user_ta_ctx *utc;
144 
145 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
146 			panic();
147 
148 		utc = to_user_ta_ctx(s->ctx);
149 		/* User stack */
150 		stack = (uaddr_t)utc->mmu->regions[0].va;
151 		stack_size = utc->mobj_stack->size;
152 	} else {
153 		/* Kernel stack */
154 		stack = thread_stack_start();
155 		stack_size = thread_stack_size();
156 	}
157 
158 	memset(&state, 0, sizeof(state));
159 	state.pc = ai->regs->elr;
160 	state.fp = ai->regs->x29;
161 
162 	print_stack_arm64(TRACE_ERROR, &state, stack, stack_size);
163 }
164 #else
165 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
166 {
167 
168 }
169 #endif /*ARM64*/
170 #else /* CFG_UNWIND */
171 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
172 {
173 }
174 
175 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
176 {
177 }
178 #endif /* CFG_UNWIND */
179 
180 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
181 {
182 	if (abort_type == ABORT_TYPE_DATA)
183 		return "data";
184 	if (abort_type == ABORT_TYPE_PREFETCH)
185 		return "prefetch";
186 	return "undef";
187 }
188 
189 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
190 			uint32_t fault_descr)
191 {
192 	/* fault_descr is only valid for data or prefetch abort */
193 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
194 		return "";
195 
196 	switch (core_mmu_get_fault_type(fault_descr)) {
197 	case CORE_MMU_FAULT_ALIGNMENT:
198 		return " (alignment fault)";
199 	case CORE_MMU_FAULT_TRANSLATION:
200 		return " (translation fault)";
201 	case CORE_MMU_FAULT_READ_PERMISSION:
202 		return " (read permission fault)";
203 	case CORE_MMU_FAULT_WRITE_PERMISSION:
204 		return " (write permission fault)";
205 	default:
206 		return "";
207 	}
208 }
209 
210 static __maybe_unused void
211 __print_abort_info(struct abort_info *ai __maybe_unused,
212 		   const char *ctx __maybe_unused)
213 {
214 #ifdef ARM32
215 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
216 	__maybe_unused uint32_t sp;
217 	__maybe_unused uint32_t lr;
218 
219 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
220 		sp = ai->regs->usr_sp;
221 		lr = ai->regs->usr_lr;
222 	} else {
223 		sp = read_mode_sp(mode);
224 		lr = read_mode_lr(mode);
225 	}
226 #endif /*ARM32*/
227 
228 	EMSG_RAW("");
229 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
230 		ctx, abort_type_to_str(ai->abort_type), ai->va,
231 		fault_to_str(ai->abort_type, ai->fault_descr));
232 #ifdef ARM32
233 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
234 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
235 		 read_contextidr());
236 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
237 		 get_core_pos(), ai->regs->spsr);
238 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
239 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
240 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
241 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
242 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
243 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
244 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
245 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
246 #endif /*ARM32*/
247 #ifdef ARM64
248 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
249 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
250 		 read_ttbr1_el1(), read_contextidr_el1());
251 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
252 		 get_core_pos(), (uint32_t)ai->regs->spsr);
253 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
254 		 ai->regs->x0, ai->regs->x1);
255 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
256 		 ai->regs->x2, ai->regs->x3);
257 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
258 		 ai->regs->x4, ai->regs->x5);
259 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
260 		 ai->regs->x6, ai->regs->x7);
261 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
262 		 ai->regs->x8, ai->regs->x9);
263 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
264 		 ai->regs->x10, ai->regs->x11);
265 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
266 		 ai->regs->x12, ai->regs->x13);
267 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
268 		 ai->regs->x14, ai->regs->x15);
269 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
270 		 ai->regs->x16, ai->regs->x17);
271 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
272 		 ai->regs->x18, ai->regs->x19);
273 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
274 		 ai->regs->x20, ai->regs->x21);
275 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
276 		 ai->regs->x22, ai->regs->x23);
277 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
278 		 ai->regs->x24, ai->regs->x25);
279 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
280 		 ai->regs->x26, ai->regs->x27);
281 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
282 		 ai->regs->x28, ai->regs->x29);
283 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
284 		 ai->regs->x30, ai->regs->elr);
285 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
286 #endif /*ARM64*/
287 }
288 
289 #if defined(ARM32)
290 static const bool kernel_is32bit = true;
291 #elif defined(ARM64)
292 static const bool kernel_is32bit;
293 #endif
294 
295 /*
296  * Print abort info and (optionally) stack dump to the console
297  * @ai user-mode or kernel-mode abort info. If user mode, the current session
298  * must be the one of the TA that caused the abort.
299  * @stack_dump true to show a stack trace
300  */
301 static void __abort_print(struct abort_info *ai, bool stack_dump)
302 {
303 	bool is_32bit;
304 	bool paged_ta_abort = false;
305 
306 	if (abort_is_user_exception(ai)) {
307 		struct tee_ta_session *s;
308 		struct user_ta_ctx *utc;
309 
310 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
311 			panic();
312 
313 		utc = to_user_ta_ctx(s->ctx);
314 		is_32bit = utc->is_32bit;
315 #ifdef CFG_PAGED_USER_TA
316 		/*
317 		 * It is not safe to unwind paged TAs that received an abort,
318 		 * because we currently don't handle page faults that could
319 		 * occur when accessing the TA memory (unwind tables for
320 		 * instance).
321 		 */
322 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
323 			paged_ta_abort = true;
324 #endif
325 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
326 			__print_abort_info(ai, "User TA");
327 		tee_ta_dump_current();
328 	} else {
329 		is_32bit = kernel_is32bit;
330 
331 		__print_abort_info(ai, "Core");
332 	}
333 
334 	if (!stack_dump || paged_ta_abort)
335 		return;
336 
337 	if (is_32bit)
338 		__print_stack_unwind_arm32(ai);
339 	else
340 		__print_stack_unwind_arm64(ai);
341 }
342 
343 void abort_print(struct abort_info *ai)
344 {
345 	__abort_print(ai, false);
346 }
347 
348 void abort_print_error(struct abort_info *ai)
349 {
350 	__abort_print(ai, true);
351 }
352 
353 #ifdef ARM32
354 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
355 		struct abort_info *ai)
356 {
357 	switch (abort_type) {
358 	case ABORT_TYPE_DATA:
359 		ai->fault_descr = read_dfsr();
360 		ai->va = read_dfar();
361 		break;
362 	case ABORT_TYPE_PREFETCH:
363 		ai->fault_descr = read_ifsr();
364 		ai->va = read_ifar();
365 		break;
366 	default:
367 		ai->fault_descr = 0;
368 		ai->va = regs->elr;
369 		break;
370 	}
371 	ai->abort_type = abort_type;
372 	ai->pc = regs->elr;
373 	ai->regs = regs;
374 }
375 #endif /*ARM32*/
376 
377 #ifdef ARM64
378 static void set_abort_info(uint32_t abort_type __unused,
379 		struct thread_abort_regs *regs, struct abort_info *ai)
380 {
381 	ai->fault_descr = read_esr_el1();
382 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
383 	case ESR_EC_IABT_EL0:
384 	case ESR_EC_IABT_EL1:
385 		ai->abort_type = ABORT_TYPE_PREFETCH;
386 		ai->va = read_far_el1();
387 		break;
388 	case ESR_EC_DABT_EL0:
389 	case ESR_EC_DABT_EL1:
390 	case ESR_EC_SP_ALIGN:
391 		ai->abort_type = ABORT_TYPE_DATA;
392 		ai->va = read_far_el1();
393 		break;
394 	default:
395 		ai->abort_type = ABORT_TYPE_UNDEF;
396 		ai->va = regs->elr;
397 	}
398 	ai->pc = regs->elr;
399 	ai->regs = regs;
400 }
401 #endif /*ARM64*/
402 
403 #ifdef ARM32
404 static void handle_user_ta_panic(struct abort_info *ai)
405 {
406 	/*
407 	 * It was a user exception, stop user execution and return
408 	 * to TEE Core.
409 	 */
410 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
411 	ai->regs->r1 = true;
412 	ai->regs->r2 = 0xdeadbeef;
413 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
414 	ai->regs->spsr &= CPSR_FIA;
415 	ai->regs->spsr &= ~CPSR_MODE_MASK;
416 	ai->regs->spsr |= CPSR_MODE_SVC;
417 	/* Select Thumb or ARM mode */
418 	if (ai->regs->elr & 1)
419 		ai->regs->spsr |= CPSR_T;
420 	else
421 		ai->regs->spsr &= ~CPSR_T;
422 }
423 #endif /*ARM32*/
424 
425 #ifdef ARM64
426 static void handle_user_ta_panic(struct abort_info *ai)
427 {
428 	uint32_t daif;
429 
430 	/*
431 	 * It was a user exception, stop user execution and return
432 	 * to TEE Core.
433 	 */
434 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
435 	ai->regs->x1 = true;
436 	ai->regs->x2 = 0xdeadbeef;
437 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
438 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
439 
440 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
441 	/* XXX what about DAIF_D? */
442 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
443 }
444 #endif /*ARM64*/
445 
446 #ifdef CFG_WITH_VFP
447 static void handle_user_ta_vfp(void)
448 {
449 	struct tee_ta_session *s;
450 
451 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
452 		panic();
453 
454 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
455 }
456 #endif /*CFG_WITH_VFP*/
457 
458 #ifdef CFG_WITH_USER_TA
459 #ifdef ARM32
460 /* Returns true if the exception originated from user mode */
461 bool abort_is_user_exception(struct abort_info *ai)
462 {
463 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
464 }
465 #endif /*ARM32*/
466 
467 #ifdef ARM64
468 /* Returns true if the exception originated from user mode */
469 bool abort_is_user_exception(struct abort_info *ai)
470 {
471 	uint32_t spsr = ai->regs->spsr;
472 
473 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
474 		return true;
475 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
476 	    SPSR_64_MODE_EL0)
477 		return true;
478 	return false;
479 }
480 #endif /*ARM64*/
481 #else /*CFG_WITH_USER_TA*/
482 bool abort_is_user_exception(struct abort_info *ai __unused)
483 {
484 	return false;
485 }
486 #endif /*CFG_WITH_USER_TA*/
487 
488 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
489 #ifdef ARM32
490 
491 #define T32_INSTR(w1, w0) \
492 	((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
493 
494 #define T32_VTRANS32_MASK	T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
495 #define T32_VTRANS32_VAL	T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
496 
497 #define T32_VTRANS64_MASK	T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
498 #define T32_VTRANS64_VAL	T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
499 
500 #define T32_VLDST_MASK		T32_INSTR((0xff << 8) | (1 << 4), 0)
501 #define T32_VLDST_VAL		T32_INSTR( 0xf9 << 8            , 0)
502 
503 #define T32_VXLDST_MASK		T32_INSTR(0xfc << 8, 7 << 9)
504 #define T32_VXLDST_VAL		T32_INSTR(0xec << 8, 5 << 9)
505 
506 #define T32_VPROC_MASK		T32_INSTR(0xef << 8, 0)
507 #define T32_VPROC_VAL		T32_VPROC_MASK
508 
509 #define A32_INSTR(x)		((uint32_t)(x))
510 
511 #define A32_VTRANS32_MASK	A32_INSTR(SHIFT_U32(0xf, 24) | \
512 					  SHIFT_U32(7, 9) | BIT32(4))
513 #define A32_VTRANS32_VAL	A32_INSTR(SHIFT_U32(0xe, 24) | \
514 					  SHIFT_U32(5, 9) | BIT32(4))
515 
516 #define A32_VTRANS64_MASK	A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
517 #define A32_VTRANS64_VAL	A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
518 
519 #define A32_VLDST_MASK		A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
520 #define A32_VLDST_VAL		A32_INSTR(SHIFT_U32(0xf4, 24))
521 #define A32_VXLDST_MASK		A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
522 #define A32_VXLDST_VAL		A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
523 
524 #define A32_VPROC_MASK		A32_INSTR(SHIFT_U32(0x7f, 25))
525 #define A32_VPROC_VAL		A32_INSTR(SHIFT_U32(0x79, 25))
526 
527 static bool is_vfp_fault(struct abort_info *ai)
528 {
529 	TEE_Result res;
530 	uint32_t instr;
531 
532 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
533 		return false;
534 
535 	res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
536 	if (res != TEE_SUCCESS)
537 		return false;
538 
539 	if (ai->regs->spsr & CPSR_T) {
540 		/* Thumb mode */
541 		return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
542 		       ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
543 		       ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
544 		       ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
545 		       ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
546 	} else {
547 		/* ARM mode */
548 		return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
549 		       ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
550 		       ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
551 		       ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
552 		       ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
553 	}
554 }
555 #endif /*ARM32*/
556 
557 #ifdef ARM64
558 static bool is_vfp_fault(struct abort_info *ai)
559 {
560 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
561 	case ESR_EC_FP_ASIMD:
562 	case ESR_EC_AARCH32_FP:
563 	case ESR_EC_AARCH64_FP:
564 		return true;
565 	default:
566 		return false;
567 	}
568 }
569 #endif /*ARM64*/
570 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
571 static bool is_vfp_fault(struct abort_info *ai __unused)
572 {
573 	return false;
574 }
575 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
576 
577 static enum fault_type get_fault_type(struct abort_info *ai)
578 {
579 	if (abort_is_user_exception(ai)) {
580 		if (is_vfp_fault(ai))
581 			return FAULT_TYPE_USER_TA_VFP;
582 #ifndef CFG_WITH_PAGER
583 		return FAULT_TYPE_USER_TA_PANIC;
584 #endif
585 	}
586 
587 	if (thread_is_from_abort_mode()) {
588 		abort_print_error(ai);
589 		panic("[abort] abort in abort handler (trap CPU)");
590 	}
591 
592 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
593 		if (abort_is_user_exception(ai))
594 			return FAULT_TYPE_USER_TA_PANIC;
595 		abort_print_error(ai);
596 		panic("[abort] undefined abort (trap CPU)");
597 	}
598 
599 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
600 	case CORE_MMU_FAULT_ALIGNMENT:
601 		if (abort_is_user_exception(ai))
602 			return FAULT_TYPE_USER_TA_PANIC;
603 		abort_print_error(ai);
604 		panic("[abort] alignement fault!  (trap CPU)");
605 		break;
606 
607 	case CORE_MMU_FAULT_ACCESS_BIT:
608 		if (abort_is_user_exception(ai))
609 			return FAULT_TYPE_USER_TA_PANIC;
610 		abort_print_error(ai);
611 		panic("[abort] access bit fault!  (trap CPU)");
612 		break;
613 
614 	case CORE_MMU_FAULT_DEBUG_EVENT:
615 		abort_print(ai);
616 		DMSG("[abort] Ignoring debug event!");
617 		return FAULT_TYPE_IGNORE;
618 
619 	case CORE_MMU_FAULT_TRANSLATION:
620 	case CORE_MMU_FAULT_WRITE_PERMISSION:
621 	case CORE_MMU_FAULT_READ_PERMISSION:
622 		return FAULT_TYPE_PAGEABLE;
623 
624 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
625 		abort_print(ai);
626 		DMSG("[abort] Ignoring async external abort!");
627 		return FAULT_TYPE_IGNORE;
628 
629 	case CORE_MMU_FAULT_OTHER:
630 	default:
631 		abort_print(ai);
632 		DMSG("[abort] Unhandled fault!");
633 		return FAULT_TYPE_IGNORE;
634 	}
635 }
636 
637 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
638 {
639 	struct abort_info ai;
640 	bool handled;
641 
642 	set_abort_info(abort_type, regs, &ai);
643 
644 	switch (get_fault_type(&ai)) {
645 	case FAULT_TYPE_IGNORE:
646 		break;
647 	case FAULT_TYPE_USER_TA_PANIC:
648 		DMSG("[abort] abort in User mode (TA will panic)");
649 		abort_print_error(&ai);
650 		vfp_disable();
651 		handle_user_ta_panic(&ai);
652 		break;
653 #ifdef CFG_WITH_VFP
654 	case FAULT_TYPE_USER_TA_VFP:
655 		handle_user_ta_vfp();
656 		break;
657 #endif
658 	case FAULT_TYPE_PAGEABLE:
659 	default:
660 		thread_kernel_save_vfp();
661 		handled = tee_pager_handle_fault(&ai);
662 		thread_kernel_restore_vfp();
663 		if (!handled) {
664 			abort_print_error(&ai);
665 			if (!abort_is_user_exception(&ai))
666 				panic("unhandled pageable abort");
667 			DMSG("[abort] abort in User mode (TA will panic)");
668 			vfp_disable();
669 			handle_user_ta_panic(&ai);
670 		}
671 		break;
672 	}
673 }
674