xref: /optee_os/core/arch/arm/kernel/abort.c (revision 9d6ac0978c21afae871e675ed95c825cd7c8ec91)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <kernel/abort.h>
8 #include <kernel/linker.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/tee_ta_manager.h>
12 #include <kernel/unwind.h>
13 #include <kernel/user_ta.h>
14 #include <mm/core_mmu.h>
15 #include <mm/mobj.h>
16 #include <mm/tee_pager.h>
17 #include <tee/tee_svc.h>
18 #include <trace.h>
19 
20 #include "thread_private.h"
21 
22 enum fault_type {
23 	FAULT_TYPE_USER_TA_PANIC,
24 	FAULT_TYPE_USER_TA_VFP,
25 	FAULT_TYPE_PAGEABLE,
26 	FAULT_TYPE_IGNORE,
27 };
28 
29 #ifdef CFG_UNWIND
30 
31 static void get_current_ta_exidx_stack(vaddr_t *exidx, size_t *exidx_sz,
32 				       vaddr_t *stack, size_t *stack_size)
33 {
34 	struct tee_ta_session *s;
35 	struct user_ta_ctx *utc;
36 
37 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
38 		panic();
39 
40 	utc = to_user_ta_ctx(s->ctx);
41 
42 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
43 	assert(utc->is_32bit);
44 
45 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
46 	if (*exidx)
47 		*exidx += utc->load_addr;
48 	*exidx_sz = utc->exidx_size;
49 
50 	*stack = utc->stack_addr;
51 	*stack_size = utc->mobj_stack->size;
52 }
53 
54 #ifdef ARM32
55 
56 /*
57  * Kernel or user mode unwind (32-bit execution state).
58  */
59 static void __print_stack_unwind_arm32(struct abort_info *ai)
60 {
61 	struct unwind_state_arm32 state;
62 	vaddr_t exidx;
63 	size_t exidx_sz;
64 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
65 	uint32_t sp;
66 	uint32_t lr;
67 	vaddr_t stack;
68 	size_t stack_size;
69 	bool kernel_stack;
70 
71 	if (abort_is_user_exception(ai)) {
72 		get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack,
73 					   &stack_size);
74 		if (!exidx) {
75 			EMSG_RAW("Call stack not available");
76 			return;
77 		}
78 		kernel_stack = false;
79 	} else {
80 		exidx = (vaddr_t)__exidx_start;
81 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
82 		/* Kernel stack */
83 		stack = thread_stack_start();
84 		stack_size = thread_stack_size();
85 		kernel_stack = true;
86 	}
87 
88 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
89 		sp = ai->regs->usr_sp;
90 		lr = ai->regs->usr_lr;
91 	} else {
92 		sp = read_mode_sp(mode);
93 		lr = read_mode_lr(mode);
94 	}
95 
96 	memset(&state, 0, sizeof(state));
97 	state.registers[0] = ai->regs->r0;
98 	state.registers[1] = ai->regs->r1;
99 	state.registers[2] = ai->regs->r2;
100 	state.registers[3] = ai->regs->r3;
101 	state.registers[4] = ai->regs->r4;
102 	state.registers[5] = ai->regs->r5;
103 	state.registers[6] = ai->regs->r6;
104 	state.registers[7] = ai->regs->r7;
105 	state.registers[8] = ai->regs->r8;
106 	state.registers[9] = ai->regs->r9;
107 	state.registers[10] = ai->regs->r10;
108 	state.registers[11] = ai->regs->r11;
109 	state.registers[13] = sp;
110 	state.registers[14] = lr;
111 	state.registers[15] = ai->pc;
112 
113 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz, kernel_stack,
114 			  stack, stack_size);
115 }
116 #else /* ARM32 */
117 
118 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
119 {
120 	struct unwind_state_arm32 state;
121 	vaddr_t exidx;
122 	size_t exidx_sz;
123 	vaddr_t stack;
124 	size_t stack_size;
125 
126 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
127 	assert(abort_is_user_exception(ai));
128 
129 	get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack, &stack_size);
130 
131 	memset(&state, 0, sizeof(state));
132 	state.registers[0] = ai->regs->x0;
133 	state.registers[1] = ai->regs->x1;
134 	state.registers[2] = ai->regs->x2;
135 	state.registers[3] = ai->regs->x3;
136 	state.registers[4] = ai->regs->x4;
137 	state.registers[5] = ai->regs->x5;
138 	state.registers[6] = ai->regs->x6;
139 	state.registers[7] = ai->regs->x7;
140 	state.registers[8] = ai->regs->x8;
141 	state.registers[9] = ai->regs->x9;
142 	state.registers[10] = ai->regs->x10;
143 	state.registers[11] = ai->regs->x11;
144 
145 	state.registers[13] = ai->regs->x13;
146 	state.registers[14] = ai->regs->x14;
147 	state.registers[15] = ai->pc;
148 
149 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz,
150 			  false /*!kernel_stack*/, stack, stack_size);
151 }
152 #endif /* ARM32 */
153 #ifdef ARM64
154 /* Kernel or user mode unwind (64-bit execution state) */
155 static void __print_stack_unwind_arm64(struct abort_info *ai)
156 {
157 	struct unwind_state_arm64 state = { };
158 	bool kernel_stack = false;
159 	uaddr_t stack = 0;
160 	size_t stack_size = 0;
161 
162 	if (abort_is_user_exception(ai)) {
163 		struct tee_ta_session *s = NULL;
164 		struct user_ta_ctx *utc = NULL;
165 
166 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
167 			panic();
168 
169 		utc = to_user_ta_ctx(s->ctx);
170 		/* User stack */
171 		stack = utc->stack_addr;
172 		stack_size = utc->mobj_stack->size;
173 	} else {
174 		/* Kernel stack */
175 		stack = thread_stack_start();
176 		stack_size = thread_stack_size();
177 		kernel_stack = true;
178 	}
179 
180 	state.pc = ai->regs->elr;
181 	state.fp = ai->regs->x29;
182 
183 	print_stack_arm64(TRACE_ERROR, &state, kernel_stack, stack, stack_size);
184 }
185 #else
186 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
187 {
188 
189 }
190 #endif /*ARM64*/
191 #else /* CFG_UNWIND */
192 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
193 {
194 }
195 
196 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
197 {
198 }
199 #endif /* CFG_UNWIND */
200 
201 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
202 {
203 	if (abort_type == ABORT_TYPE_DATA)
204 		return "data";
205 	if (abort_type == ABORT_TYPE_PREFETCH)
206 		return "prefetch";
207 	return "undef";
208 }
209 
210 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
211 			uint32_t fault_descr)
212 {
213 	/* fault_descr is only valid for data or prefetch abort */
214 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
215 		return "";
216 
217 	switch (core_mmu_get_fault_type(fault_descr)) {
218 	case CORE_MMU_FAULT_ALIGNMENT:
219 		return " (alignment fault)";
220 	case CORE_MMU_FAULT_TRANSLATION:
221 		return " (translation fault)";
222 	case CORE_MMU_FAULT_READ_PERMISSION:
223 		return " (read permission fault)";
224 	case CORE_MMU_FAULT_WRITE_PERMISSION:
225 		return " (write permission fault)";
226 	default:
227 		return "";
228 	}
229 }
230 
231 static __maybe_unused void
232 __print_abort_info(struct abort_info *ai __maybe_unused,
233 		   const char *ctx __maybe_unused)
234 {
235 	__maybe_unused size_t core_pos = 0;
236 #ifdef ARM32
237 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
238 	__maybe_unused uint32_t sp = 0;
239 	__maybe_unused uint32_t lr = 0;
240 
241 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
242 		sp = ai->regs->usr_sp;
243 		lr = ai->regs->usr_lr;
244 		core_pos = thread_get_tsd()->abort_core;
245 	} else {
246 		sp = read_mode_sp(mode);
247 		lr = read_mode_lr(mode);
248 		core_pos = get_core_pos();
249 	}
250 #endif /*ARM32*/
251 #ifdef ARM64
252 	if (abort_is_user_exception(ai))
253 		core_pos = thread_get_tsd()->abort_core;
254 	else
255 		core_pos = get_core_pos();
256 #endif /*ARM64*/
257 
258 	EMSG_RAW("");
259 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
260 		ctx, abort_type_to_str(ai->abort_type), ai->va,
261 		fault_to_str(ai->abort_type, ai->fault_descr));
262 #ifdef ARM32
263 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
264 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
265 		 read_contextidr());
266 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
267 		 core_pos, ai->regs->spsr);
268 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
269 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
270 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
271 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
272 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
273 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
274 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
275 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
276 #endif /*ARM32*/
277 #ifdef ARM64
278 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
279 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
280 		 read_ttbr1_el1(), read_contextidr_el1());
281 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
282 		 core_pos, (uint32_t)ai->regs->spsr);
283 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
284 		 ai->regs->x0, ai->regs->x1);
285 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
286 		 ai->regs->x2, ai->regs->x3);
287 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
288 		 ai->regs->x4, ai->regs->x5);
289 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
290 		 ai->regs->x6, ai->regs->x7);
291 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
292 		 ai->regs->x8, ai->regs->x9);
293 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
294 		 ai->regs->x10, ai->regs->x11);
295 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
296 		 ai->regs->x12, ai->regs->x13);
297 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
298 		 ai->regs->x14, ai->regs->x15);
299 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
300 		 ai->regs->x16, ai->regs->x17);
301 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
302 		 ai->regs->x18, ai->regs->x19);
303 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
304 		 ai->regs->x20, ai->regs->x21);
305 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
306 		 ai->regs->x22, ai->regs->x23);
307 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
308 		 ai->regs->x24, ai->regs->x25);
309 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
310 		 ai->regs->x26, ai->regs->x27);
311 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
312 		 ai->regs->x28, ai->regs->x29);
313 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
314 		 ai->regs->x30, ai->regs->elr);
315 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
316 #endif /*ARM64*/
317 }
318 
319 /*
320  * Print abort info and (optionally) stack dump to the console
321  * @ai kernel-mode abort info.
322  * @stack_dump true to show a stack trace
323  */
324 static void __abort_print(struct abort_info *ai, bool stack_dump)
325 {
326 	assert(!abort_is_user_exception(ai));
327 
328 	__print_abort_info(ai, "Core");
329 
330 	if (stack_dump) {
331 #if defined(ARM32)
332 		__print_stack_unwind_arm32(ai);
333 #else
334 		__print_stack_unwind_arm64(ai);
335 #endif
336 	}
337 }
338 
339 void abort_print(struct abort_info *ai)
340 {
341 	__abort_print(ai, false);
342 }
343 
344 void abort_print_error(struct abort_info *ai)
345 {
346 	__abort_print(ai, true);
347 }
348 
349 /* This function must be called from a normal thread */
350 void abort_print_current_ta(void)
351 {
352 	struct thread_specific_data *tsd = thread_get_tsd();
353 	struct abort_info ai = { };
354 	struct tee_ta_session *s = NULL;
355 
356 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
357 		panic();
358 
359 	ai.abort_type = tsd->abort_type;
360 	ai.fault_descr = tsd->abort_descr;
361 	ai.va = tsd->abort_va;
362 	ai.pc = tsd->abort_regs.elr;
363 	ai.regs = &tsd->abort_regs;
364 
365 	if (ai.abort_type != ABORT_TYPE_TA_PANIC)
366 		__print_abort_info(&ai, "User TA");
367 	tee_ta_dump_current();
368 
369 	if (to_user_ta_ctx(s->ctx)->is_32bit)
370 		__print_stack_unwind_arm32(&ai);
371 	else
372 		__print_stack_unwind_arm64(&ai);
373 }
374 
375 static void save_abort_info_in_tsd(struct abort_info *ai)
376 {
377 	struct thread_specific_data *tsd = thread_get_tsd();
378 
379 	tsd->abort_type = ai->abort_type;
380 	tsd->abort_descr = ai->fault_descr;
381 	tsd->abort_va = ai->va;
382 	tsd->abort_regs = *ai->regs;
383 	tsd->abort_core = get_core_pos();
384 }
385 
386 #ifdef ARM32
387 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
388 		struct abort_info *ai)
389 {
390 	switch (abort_type) {
391 	case ABORT_TYPE_DATA:
392 		ai->fault_descr = read_dfsr();
393 		ai->va = read_dfar();
394 		break;
395 	case ABORT_TYPE_PREFETCH:
396 		ai->fault_descr = read_ifsr();
397 		ai->va = read_ifar();
398 		break;
399 	default:
400 		ai->fault_descr = 0;
401 		ai->va = regs->elr;
402 		break;
403 	}
404 	ai->abort_type = abort_type;
405 	ai->pc = regs->elr;
406 	ai->regs = regs;
407 }
408 #endif /*ARM32*/
409 
410 #ifdef ARM64
411 static void set_abort_info(uint32_t abort_type __unused,
412 		struct thread_abort_regs *regs, struct abort_info *ai)
413 {
414 	ai->fault_descr = read_esr_el1();
415 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
416 	case ESR_EC_IABT_EL0:
417 	case ESR_EC_IABT_EL1:
418 		ai->abort_type = ABORT_TYPE_PREFETCH;
419 		ai->va = read_far_el1();
420 		break;
421 	case ESR_EC_DABT_EL0:
422 	case ESR_EC_DABT_EL1:
423 	case ESR_EC_SP_ALIGN:
424 		ai->abort_type = ABORT_TYPE_DATA;
425 		ai->va = read_far_el1();
426 		break;
427 	default:
428 		ai->abort_type = ABORT_TYPE_UNDEF;
429 		ai->va = regs->elr;
430 	}
431 	ai->pc = regs->elr;
432 	ai->regs = regs;
433 }
434 #endif /*ARM64*/
435 
436 #ifdef ARM32
437 static void handle_user_ta_panic(struct abort_info *ai)
438 {
439 	/*
440 	 * It was a user exception, stop user execution and return
441 	 * to TEE Core.
442 	 */
443 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
444 	ai->regs->r1 = true;
445 	ai->regs->r2 = 0xdeadbeef;
446 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
447 	ai->regs->spsr &= CPSR_FIA;
448 	ai->regs->spsr &= ~CPSR_MODE_MASK;
449 	ai->regs->spsr |= CPSR_MODE_SVC;
450 	/* Select Thumb or ARM mode */
451 	if (ai->regs->elr & 1)
452 		ai->regs->spsr |= CPSR_T;
453 	else
454 		ai->regs->spsr &= ~CPSR_T;
455 }
456 #endif /*ARM32*/
457 
458 #ifdef ARM64
459 static void handle_user_ta_panic(struct abort_info *ai)
460 {
461 	uint32_t daif;
462 
463 	/*
464 	 * It was a user exception, stop user execution and return
465 	 * to TEE Core.
466 	 */
467 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
468 	ai->regs->x1 = true;
469 	ai->regs->x2 = 0xdeadbeef;
470 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
471 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
472 
473 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
474 	/* XXX what about DAIF_D? */
475 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
476 }
477 #endif /*ARM64*/
478 
479 #ifdef CFG_WITH_VFP
480 static void handle_user_ta_vfp(void)
481 {
482 	struct tee_ta_session *s;
483 
484 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
485 		panic();
486 
487 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
488 }
489 #endif /*CFG_WITH_VFP*/
490 
491 #ifdef CFG_WITH_USER_TA
492 #ifdef ARM32
493 /* Returns true if the exception originated from user mode */
494 bool abort_is_user_exception(struct abort_info *ai)
495 {
496 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
497 }
498 #endif /*ARM32*/
499 
500 #ifdef ARM64
501 /* Returns true if the exception originated from user mode */
502 bool abort_is_user_exception(struct abort_info *ai)
503 {
504 	uint32_t spsr = ai->regs->spsr;
505 
506 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
507 		return true;
508 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
509 	    SPSR_64_MODE_EL0)
510 		return true;
511 	return false;
512 }
513 #endif /*ARM64*/
514 #else /*CFG_WITH_USER_TA*/
515 bool abort_is_user_exception(struct abort_info *ai __unused)
516 {
517 	return false;
518 }
519 #endif /*CFG_WITH_USER_TA*/
520 
521 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
522 #ifdef ARM32
523 static bool is_vfp_fault(struct abort_info *ai)
524 {
525 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
526 		return false;
527 
528 	/*
529 	 * Not entirely accurate, but if it's a truly undefined instruction
530 	 * we'll end up in this function again, except this time
531 	 * vfp_is_enabled() so we'll return false.
532 	 */
533 	return true;
534 }
535 #endif /*ARM32*/
536 
537 #ifdef ARM64
538 static bool is_vfp_fault(struct abort_info *ai)
539 {
540 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
541 	case ESR_EC_FP_ASIMD:
542 	case ESR_EC_AARCH32_FP:
543 	case ESR_EC_AARCH64_FP:
544 		return true;
545 	default:
546 		return false;
547 	}
548 }
549 #endif /*ARM64*/
550 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
551 static bool is_vfp_fault(struct abort_info *ai __unused)
552 {
553 	return false;
554 }
555 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
556 
557 static enum fault_type get_fault_type(struct abort_info *ai)
558 {
559 	if (abort_is_user_exception(ai)) {
560 		if (is_vfp_fault(ai))
561 			return FAULT_TYPE_USER_TA_VFP;
562 #ifndef CFG_WITH_PAGER
563 		return FAULT_TYPE_USER_TA_PANIC;
564 #endif
565 	}
566 
567 	if (thread_is_from_abort_mode()) {
568 		abort_print_error(ai);
569 		panic("[abort] abort in abort handler (trap CPU)");
570 	}
571 
572 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
573 		if (abort_is_user_exception(ai))
574 			return FAULT_TYPE_USER_TA_PANIC;
575 		abort_print_error(ai);
576 		panic("[abort] undefined abort (trap CPU)");
577 	}
578 
579 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
580 	case CORE_MMU_FAULT_ALIGNMENT:
581 		if (abort_is_user_exception(ai))
582 			return FAULT_TYPE_USER_TA_PANIC;
583 		abort_print_error(ai);
584 		panic("[abort] alignement fault!  (trap CPU)");
585 		break;
586 
587 	case CORE_MMU_FAULT_ACCESS_BIT:
588 		if (abort_is_user_exception(ai))
589 			return FAULT_TYPE_USER_TA_PANIC;
590 		abort_print_error(ai);
591 		panic("[abort] access bit fault!  (trap CPU)");
592 		break;
593 
594 	case CORE_MMU_FAULT_DEBUG_EVENT:
595 		if (!abort_is_user_exception(ai))
596 			abort_print(ai);
597 		DMSG("[abort] Ignoring debug event!");
598 		return FAULT_TYPE_IGNORE;
599 
600 	case CORE_MMU_FAULT_TRANSLATION:
601 	case CORE_MMU_FAULT_WRITE_PERMISSION:
602 	case CORE_MMU_FAULT_READ_PERMISSION:
603 		return FAULT_TYPE_PAGEABLE;
604 
605 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
606 		if (!abort_is_user_exception(ai))
607 			abort_print(ai);
608 		DMSG("[abort] Ignoring async external abort!");
609 		return FAULT_TYPE_IGNORE;
610 
611 	case CORE_MMU_FAULT_OTHER:
612 	default:
613 		if (!abort_is_user_exception(ai))
614 			abort_print(ai);
615 		DMSG("[abort] Unhandled fault!");
616 		return FAULT_TYPE_IGNORE;
617 	}
618 }
619 
620 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
621 {
622 	struct abort_info ai;
623 	bool handled;
624 
625 	set_abort_info(abort_type, regs, &ai);
626 
627 	switch (get_fault_type(&ai)) {
628 	case FAULT_TYPE_IGNORE:
629 		break;
630 	case FAULT_TYPE_USER_TA_PANIC:
631 		DMSG("[abort] abort in User mode (TA will panic)");
632 		save_abort_info_in_tsd(&ai);
633 		vfp_disable();
634 		handle_user_ta_panic(&ai);
635 		break;
636 #ifdef CFG_WITH_VFP
637 	case FAULT_TYPE_USER_TA_VFP:
638 		handle_user_ta_vfp();
639 		break;
640 #endif
641 	case FAULT_TYPE_PAGEABLE:
642 	default:
643 		if (thread_get_id_may_fail() < 0) {
644 			abort_print_error(&ai);
645 			panic("abort outside thread context");
646 		}
647 		thread_kernel_save_vfp();
648 		handled = tee_pager_handle_fault(&ai);
649 		thread_kernel_restore_vfp();
650 		if (!handled) {
651 			if (!abort_is_user_exception(&ai)) {
652 				abort_print_error(&ai);
653 				panic("unhandled pageable abort");
654 			}
655 			DMSG("[abort] abort in User mode (TA will panic)");
656 			save_abort_info_in_tsd(&ai);
657 			vfp_disable();
658 			handle_user_ta_panic(&ai);
659 		}
660 		break;
661 	}
662 }
663