xref: /optee_os/core/arch/arm/kernel/abort.c (revision aaec75ec87470731e54ff9a1cbf5b72c0d6ee9bd)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <kernel/abort.h>
8 #include <kernel/linker.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/tee_ta_manager.h>
12 #include <kernel/unwind.h>
13 #include <kernel/user_ta.h>
14 #include <mm/core_mmu.h>
15 #include <mm/mobj.h>
16 #include <mm/tee_pager.h>
17 #include <tee/tee_svc.h>
18 #include <trace.h>
19 
20 #include "thread_private.h"
21 
22 enum fault_type {
23 	FAULT_TYPE_USER_TA_PANIC,
24 	FAULT_TYPE_USER_TA_VFP,
25 	FAULT_TYPE_PAGEABLE,
26 	FAULT_TYPE_IGNORE,
27 };
28 
29 #ifdef CFG_UNWIND
30 
31 static void get_current_ta_exidx(uaddr_t *exidx, size_t *exidx_sz)
32 {
33 	struct tee_ta_session *s;
34 	struct user_ta_ctx *utc;
35 
36 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
37 		panic();
38 
39 	utc = to_user_ta_ctx(s->ctx);
40 
41 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
42 	assert(utc->is_32bit);
43 
44 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
45 	if (*exidx)
46 		*exidx += utc->load_addr;
47 	*exidx_sz = utc->exidx_size;
48 }
49 
50 #ifdef ARM32
51 
52 /*
53  * Kernel or user mode unwind (32-bit execution state).
54  */
55 static void __print_stack_unwind_arm32(struct abort_info *ai)
56 {
57 	struct unwind_state_arm32 state;
58 	uaddr_t exidx;
59 	size_t exidx_sz;
60 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
61 	uint32_t sp;
62 	uint32_t lr;
63 
64 	if (abort_is_user_exception(ai)) {
65 		get_current_ta_exidx(&exidx, &exidx_sz);
66 		if (!exidx) {
67 			EMSG_RAW("Call stack not available");
68 			return;
69 		}
70 	} else {
71 		exidx = (vaddr_t)__exidx_start;
72 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
73 	}
74 
75 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
76 		sp = ai->regs->usr_sp;
77 		lr = ai->regs->usr_lr;
78 	} else {
79 		sp = read_mode_sp(mode);
80 		lr = read_mode_lr(mode);
81 	}
82 
83 	memset(&state, 0, sizeof(state));
84 	state.registers[0] = ai->regs->r0;
85 	state.registers[1] = ai->regs->r1;
86 	state.registers[2] = ai->regs->r2;
87 	state.registers[3] = ai->regs->r3;
88 	state.registers[4] = ai->regs->r4;
89 	state.registers[5] = ai->regs->r5;
90 	state.registers[6] = ai->regs->r6;
91 	state.registers[7] = ai->regs->r7;
92 	state.registers[8] = ai->regs->r8;
93 	state.registers[9] = ai->regs->r9;
94 	state.registers[10] = ai->regs->r10;
95 	state.registers[11] = ai->regs->r11;
96 	state.registers[13] = sp;
97 	state.registers[14] = lr;
98 	state.registers[15] = ai->pc;
99 
100 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
101 }
102 #else /* ARM32 */
103 
104 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
105 {
106 	struct unwind_state_arm32 state;
107 	uaddr_t exidx;
108 	size_t exidx_sz;
109 
110 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
111 	assert(abort_is_user_exception(ai));
112 
113 	get_current_ta_exidx(&exidx, &exidx_sz);
114 
115 	memset(&state, 0, sizeof(state));
116 	state.registers[0] = ai->regs->x0;
117 	state.registers[1] = ai->regs->x1;
118 	state.registers[2] = ai->regs->x2;
119 	state.registers[3] = ai->regs->x3;
120 	state.registers[4] = ai->regs->x4;
121 	state.registers[5] = ai->regs->x5;
122 	state.registers[6] = ai->regs->x6;
123 	state.registers[7] = ai->regs->x7;
124 	state.registers[8] = ai->regs->x8;
125 	state.registers[9] = ai->regs->x9;
126 	state.registers[10] = ai->regs->x10;
127 	state.registers[11] = ai->regs->x11;
128 
129 	state.registers[13] = ai->regs->x13;
130 	state.registers[14] = ai->regs->x14;
131 	state.registers[15] = ai->pc;
132 
133 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
134 }
135 #endif /* ARM32 */
136 #ifdef ARM64
137 /* Kernel or user mode unwind (64-bit execution state) */
138 static void __print_stack_unwind_arm64(struct abort_info *ai)
139 {
140 	struct unwind_state_arm64 state;
141 	uaddr_t stack;
142 	size_t stack_size;
143 
144 	if (abort_is_user_exception(ai)) {
145 		struct tee_ta_session *s;
146 		struct user_ta_ctx *utc;
147 
148 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
149 			panic();
150 
151 		utc = to_user_ta_ctx(s->ctx);
152 		/* User stack */
153 		stack = utc->stack_addr;
154 		stack_size = utc->mobj_stack->size;
155 	} else {
156 		/* Kernel stack */
157 		stack = thread_stack_start();
158 		stack_size = thread_stack_size();
159 	}
160 
161 	memset(&state, 0, sizeof(state));
162 	state.pc = ai->regs->elr;
163 	state.fp = ai->regs->x29;
164 
165 	print_stack_arm64(TRACE_ERROR, &state, stack, stack_size);
166 }
167 #else
168 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
169 {
170 
171 }
172 #endif /*ARM64*/
173 #else /* CFG_UNWIND */
174 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
175 {
176 }
177 
178 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
179 {
180 }
181 #endif /* CFG_UNWIND */
182 
183 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
184 {
185 	if (abort_type == ABORT_TYPE_DATA)
186 		return "data";
187 	if (abort_type == ABORT_TYPE_PREFETCH)
188 		return "prefetch";
189 	return "undef";
190 }
191 
192 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
193 			uint32_t fault_descr)
194 {
195 	/* fault_descr is only valid for data or prefetch abort */
196 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
197 		return "";
198 
199 	switch (core_mmu_get_fault_type(fault_descr)) {
200 	case CORE_MMU_FAULT_ALIGNMENT:
201 		return " (alignment fault)";
202 	case CORE_MMU_FAULT_TRANSLATION:
203 		return " (translation fault)";
204 	case CORE_MMU_FAULT_READ_PERMISSION:
205 		return " (read permission fault)";
206 	case CORE_MMU_FAULT_WRITE_PERMISSION:
207 		return " (write permission fault)";
208 	default:
209 		return "";
210 	}
211 }
212 
213 static __maybe_unused void
214 __print_abort_info(struct abort_info *ai __maybe_unused,
215 		   const char *ctx __maybe_unused)
216 {
217 #ifdef ARM32
218 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
219 	__maybe_unused uint32_t sp;
220 	__maybe_unused uint32_t lr;
221 
222 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
223 		sp = ai->regs->usr_sp;
224 		lr = ai->regs->usr_lr;
225 	} else {
226 		sp = read_mode_sp(mode);
227 		lr = read_mode_lr(mode);
228 	}
229 #endif /*ARM32*/
230 
231 	EMSG_RAW("");
232 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
233 		ctx, abort_type_to_str(ai->abort_type), ai->va,
234 		fault_to_str(ai->abort_type, ai->fault_descr));
235 #ifdef ARM32
236 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
237 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
238 		 read_contextidr());
239 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
240 		 get_core_pos(), ai->regs->spsr);
241 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
242 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
243 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
244 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
245 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
246 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
247 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
248 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
249 #endif /*ARM32*/
250 #ifdef ARM64
251 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
252 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
253 		 read_ttbr1_el1(), read_contextidr_el1());
254 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
255 		 get_core_pos(), (uint32_t)ai->regs->spsr);
256 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
257 		 ai->regs->x0, ai->regs->x1);
258 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
259 		 ai->regs->x2, ai->regs->x3);
260 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
261 		 ai->regs->x4, ai->regs->x5);
262 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
263 		 ai->regs->x6, ai->regs->x7);
264 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
265 		 ai->regs->x8, ai->regs->x9);
266 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
267 		 ai->regs->x10, ai->regs->x11);
268 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
269 		 ai->regs->x12, ai->regs->x13);
270 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
271 		 ai->regs->x14, ai->regs->x15);
272 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
273 		 ai->regs->x16, ai->regs->x17);
274 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
275 		 ai->regs->x18, ai->regs->x19);
276 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
277 		 ai->regs->x20, ai->regs->x21);
278 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
279 		 ai->regs->x22, ai->regs->x23);
280 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
281 		 ai->regs->x24, ai->regs->x25);
282 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
283 		 ai->regs->x26, ai->regs->x27);
284 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
285 		 ai->regs->x28, ai->regs->x29);
286 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
287 		 ai->regs->x30, ai->regs->elr);
288 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
289 #endif /*ARM64*/
290 }
291 
292 #if defined(ARM32)
293 static const bool kernel_is32bit = true;
294 #elif defined(ARM64)
295 static const bool kernel_is32bit;
296 #endif
297 
298 /*
299  * Print abort info and (optionally) stack dump to the console
300  * @ai user-mode or kernel-mode abort info. If user mode, the current session
301  * must be the one of the TA that caused the abort.
302  * @stack_dump true to show a stack trace
303  */
304 static void __abort_print(struct abort_info *ai, bool stack_dump)
305 {
306 	bool is_32bit;
307 	bool paged_ta_abort = false;
308 
309 	if (abort_is_user_exception(ai)) {
310 		struct tee_ta_session *s;
311 		struct user_ta_ctx *utc;
312 
313 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
314 			panic();
315 
316 		utc = to_user_ta_ctx(s->ctx);
317 		is_32bit = utc->is_32bit;
318 #ifdef CFG_PAGED_USER_TA
319 		/*
320 		 * It is not safe to unwind paged TAs that received an abort,
321 		 * because we currently don't handle page faults that could
322 		 * occur when accessing the TA memory (unwind tables for
323 		 * instance).
324 		 */
325 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
326 			paged_ta_abort = true;
327 #endif
328 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
329 			__print_abort_info(ai, "User TA");
330 		tee_ta_dump_current();
331 	} else {
332 		is_32bit = kernel_is32bit;
333 
334 		__print_abort_info(ai, "Core");
335 	}
336 
337 	if (!stack_dump || paged_ta_abort)
338 		return;
339 
340 	if (is_32bit)
341 		__print_stack_unwind_arm32(ai);
342 	else
343 		__print_stack_unwind_arm64(ai);
344 }
345 
346 void abort_print(struct abort_info *ai)
347 {
348 	__abort_print(ai, false);
349 }
350 
351 void abort_print_error(struct abort_info *ai)
352 {
353 	__abort_print(ai, true);
354 }
355 
356 #ifdef ARM32
357 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
358 		struct abort_info *ai)
359 {
360 	switch (abort_type) {
361 	case ABORT_TYPE_DATA:
362 		ai->fault_descr = read_dfsr();
363 		ai->va = read_dfar();
364 		break;
365 	case ABORT_TYPE_PREFETCH:
366 		ai->fault_descr = read_ifsr();
367 		ai->va = read_ifar();
368 		break;
369 	default:
370 		ai->fault_descr = 0;
371 		ai->va = regs->elr;
372 		break;
373 	}
374 	ai->abort_type = abort_type;
375 	ai->pc = regs->elr;
376 	ai->regs = regs;
377 }
378 #endif /*ARM32*/
379 
380 #ifdef ARM64
381 static void set_abort_info(uint32_t abort_type __unused,
382 		struct thread_abort_regs *regs, struct abort_info *ai)
383 {
384 	ai->fault_descr = read_esr_el1();
385 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
386 	case ESR_EC_IABT_EL0:
387 	case ESR_EC_IABT_EL1:
388 		ai->abort_type = ABORT_TYPE_PREFETCH;
389 		ai->va = read_far_el1();
390 		break;
391 	case ESR_EC_DABT_EL0:
392 	case ESR_EC_DABT_EL1:
393 	case ESR_EC_SP_ALIGN:
394 		ai->abort_type = ABORT_TYPE_DATA;
395 		ai->va = read_far_el1();
396 		break;
397 	default:
398 		ai->abort_type = ABORT_TYPE_UNDEF;
399 		ai->va = regs->elr;
400 	}
401 	ai->pc = regs->elr;
402 	ai->regs = regs;
403 }
404 #endif /*ARM64*/
405 
406 #ifdef ARM32
407 static void handle_user_ta_panic(struct abort_info *ai)
408 {
409 	/*
410 	 * It was a user exception, stop user execution and return
411 	 * to TEE Core.
412 	 */
413 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
414 	ai->regs->r1 = true;
415 	ai->regs->r2 = 0xdeadbeef;
416 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
417 	ai->regs->spsr &= CPSR_FIA;
418 	ai->regs->spsr &= ~CPSR_MODE_MASK;
419 	ai->regs->spsr |= CPSR_MODE_SVC;
420 	/* Select Thumb or ARM mode */
421 	if (ai->regs->elr & 1)
422 		ai->regs->spsr |= CPSR_T;
423 	else
424 		ai->regs->spsr &= ~CPSR_T;
425 }
426 #endif /*ARM32*/
427 
428 #ifdef ARM64
429 static void handle_user_ta_panic(struct abort_info *ai)
430 {
431 	uint32_t daif;
432 
433 	/*
434 	 * It was a user exception, stop user execution and return
435 	 * to TEE Core.
436 	 */
437 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
438 	ai->regs->x1 = true;
439 	ai->regs->x2 = 0xdeadbeef;
440 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
441 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
442 
443 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
444 	/* XXX what about DAIF_D? */
445 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
446 }
447 #endif /*ARM64*/
448 
449 #ifdef CFG_WITH_VFP
450 static void handle_user_ta_vfp(void)
451 {
452 	struct tee_ta_session *s;
453 
454 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
455 		panic();
456 
457 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
458 }
459 #endif /*CFG_WITH_VFP*/
460 
461 #ifdef CFG_WITH_USER_TA
462 #ifdef ARM32
463 /* Returns true if the exception originated from user mode */
464 bool abort_is_user_exception(struct abort_info *ai)
465 {
466 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
467 }
468 #endif /*ARM32*/
469 
470 #ifdef ARM64
471 /* Returns true if the exception originated from user mode */
472 bool abort_is_user_exception(struct abort_info *ai)
473 {
474 	uint32_t spsr = ai->regs->spsr;
475 
476 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
477 		return true;
478 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
479 	    SPSR_64_MODE_EL0)
480 		return true;
481 	return false;
482 }
483 #endif /*ARM64*/
484 #else /*CFG_WITH_USER_TA*/
485 bool abort_is_user_exception(struct abort_info *ai __unused)
486 {
487 	return false;
488 }
489 #endif /*CFG_WITH_USER_TA*/
490 
491 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
492 #ifdef ARM32
493 
494 #define T32_INSTR(w1, w0) \
495 	((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
496 
497 #define T32_VTRANS32_MASK	T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
498 #define T32_VTRANS32_VAL	T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
499 
500 #define T32_VTRANS64_MASK	T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
501 #define T32_VTRANS64_VAL	T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
502 
503 #define T32_VLDST_MASK		T32_INSTR((0xff << 8) | (1 << 4), 0)
504 #define T32_VLDST_VAL		T32_INSTR( 0xf9 << 8            , 0)
505 
506 #define T32_VXLDST_MASK		T32_INSTR(0xfc << 8, 7 << 9)
507 #define T32_VXLDST_VAL		T32_INSTR(0xec << 8, 5 << 9)
508 
509 #define T32_VPROC_MASK		T32_INSTR(0xef << 8, 0)
510 #define T32_VPROC_VAL		T32_VPROC_MASK
511 
512 #define A32_INSTR(x)		((uint32_t)(x))
513 
514 #define A32_VTRANS32_MASK	A32_INSTR(SHIFT_U32(0xf, 24) | \
515 					  SHIFT_U32(7, 9) | BIT32(4))
516 #define A32_VTRANS32_VAL	A32_INSTR(SHIFT_U32(0xe, 24) | \
517 					  SHIFT_U32(5, 9) | BIT32(4))
518 
519 #define A32_VTRANS64_MASK	A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
520 #define A32_VTRANS64_VAL	A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
521 
522 #define A32_VLDST_MASK		A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
523 #define A32_VLDST_VAL		A32_INSTR(SHIFT_U32(0xf4, 24))
524 #define A32_VXLDST_MASK		A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
525 #define A32_VXLDST_VAL		A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
526 
527 #define A32_VPROC_MASK		A32_INSTR(SHIFT_U32(0x7f, 25))
528 #define A32_VPROC_VAL		A32_INSTR(SHIFT_U32(0x79, 25))
529 
530 static bool is_vfp_fault(struct abort_info *ai)
531 {
532 	TEE_Result res;
533 	uint32_t instr;
534 
535 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
536 		return false;
537 
538 	res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
539 	if (res != TEE_SUCCESS)
540 		return false;
541 
542 	if (ai->regs->spsr & CPSR_T) {
543 		/* Thumb mode */
544 		return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
545 		       ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
546 		       ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
547 		       ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
548 		       ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
549 	} else {
550 		/* ARM mode */
551 		return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
552 		       ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
553 		       ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
554 		       ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
555 		       ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
556 	}
557 }
558 #endif /*ARM32*/
559 
560 #ifdef ARM64
561 static bool is_vfp_fault(struct abort_info *ai)
562 {
563 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
564 	case ESR_EC_FP_ASIMD:
565 	case ESR_EC_AARCH32_FP:
566 	case ESR_EC_AARCH64_FP:
567 		return true;
568 	default:
569 		return false;
570 	}
571 }
572 #endif /*ARM64*/
573 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
574 static bool is_vfp_fault(struct abort_info *ai __unused)
575 {
576 	return false;
577 }
578 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
579 
580 static enum fault_type get_fault_type(struct abort_info *ai)
581 {
582 	if (abort_is_user_exception(ai)) {
583 		if (is_vfp_fault(ai))
584 			return FAULT_TYPE_USER_TA_VFP;
585 #ifndef CFG_WITH_PAGER
586 		return FAULT_TYPE_USER_TA_PANIC;
587 #endif
588 	}
589 
590 	if (thread_is_from_abort_mode()) {
591 		abort_print_error(ai);
592 		panic("[abort] abort in abort handler (trap CPU)");
593 	}
594 
595 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
596 		if (abort_is_user_exception(ai))
597 			return FAULT_TYPE_USER_TA_PANIC;
598 		abort_print_error(ai);
599 		panic("[abort] undefined abort (trap CPU)");
600 	}
601 
602 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
603 	case CORE_MMU_FAULT_ALIGNMENT:
604 		if (abort_is_user_exception(ai))
605 			return FAULT_TYPE_USER_TA_PANIC;
606 		abort_print_error(ai);
607 		panic("[abort] alignement fault!  (trap CPU)");
608 		break;
609 
610 	case CORE_MMU_FAULT_ACCESS_BIT:
611 		if (abort_is_user_exception(ai))
612 			return FAULT_TYPE_USER_TA_PANIC;
613 		abort_print_error(ai);
614 		panic("[abort] access bit fault!  (trap CPU)");
615 		break;
616 
617 	case CORE_MMU_FAULT_DEBUG_EVENT:
618 		abort_print(ai);
619 		DMSG("[abort] Ignoring debug event!");
620 		return FAULT_TYPE_IGNORE;
621 
622 	case CORE_MMU_FAULT_TRANSLATION:
623 	case CORE_MMU_FAULT_WRITE_PERMISSION:
624 	case CORE_MMU_FAULT_READ_PERMISSION:
625 		return FAULT_TYPE_PAGEABLE;
626 
627 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
628 		abort_print(ai);
629 		DMSG("[abort] Ignoring async external abort!");
630 		return FAULT_TYPE_IGNORE;
631 
632 	case CORE_MMU_FAULT_OTHER:
633 	default:
634 		abort_print(ai);
635 		DMSG("[abort] Unhandled fault!");
636 		return FAULT_TYPE_IGNORE;
637 	}
638 }
639 
640 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
641 {
642 	struct abort_info ai;
643 	bool handled;
644 
645 	set_abort_info(abort_type, regs, &ai);
646 
647 	switch (get_fault_type(&ai)) {
648 	case FAULT_TYPE_IGNORE:
649 		break;
650 	case FAULT_TYPE_USER_TA_PANIC:
651 		DMSG("[abort] abort in User mode (TA will panic)");
652 		abort_print_error(&ai);
653 		vfp_disable();
654 		handle_user_ta_panic(&ai);
655 		break;
656 #ifdef CFG_WITH_VFP
657 	case FAULT_TYPE_USER_TA_VFP:
658 		handle_user_ta_vfp();
659 		break;
660 #endif
661 	case FAULT_TYPE_PAGEABLE:
662 	default:
663 		thread_kernel_save_vfp();
664 		handled = tee_pager_handle_fault(&ai);
665 		thread_kernel_restore_vfp();
666 		if (!handled) {
667 			abort_print_error(&ai);
668 			if (!abort_is_user_exception(&ai))
669 				panic("unhandled pageable abort");
670 			DMSG("[abort] abort in User mode (TA will panic)");
671 			vfp_disable();
672 			handle_user_ta_panic(&ai);
673 		}
674 		break;
675 	}
676 }
677