xref: /optee_os/core/arch/arm/kernel/abort.c (revision e84e1feccbdbd9deae5ad2dea921f4f624e8ad6d)
1 /*
2  * Copyright (c) 2015, Linaro Limited
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <arm.h>
29 #include <kernel/abort.h>
30 #include <kernel/linker.h>
31 #include <kernel/misc.h>
32 #include <kernel/panic.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/unwind.h>
35 #include <kernel/user_ta.h>
36 #include <mm/core_mmu.h>
37 #include <mm/mobj.h>
38 #include <mm/tee_pager.h>
39 #include <tee/tee_svc.h>
40 #include <trace.h>
41 
42 #include "thread_private.h"
43 
44 enum fault_type {
45 	FAULT_TYPE_USER_TA_PANIC,
46 	FAULT_TYPE_USER_TA_VFP,
47 	FAULT_TYPE_PAGEABLE,
48 	FAULT_TYPE_IGNORE,
49 };
50 
51 #ifdef CFG_UNWIND
52 
53 static void get_current_ta_exidx(uaddr_t *exidx, size_t *exidx_sz)
54 {
55 	struct tee_ta_session *s;
56 	struct user_ta_ctx *utc;
57 
58 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
59 		panic();
60 
61 	utc = to_user_ta_ctx(s->ctx);
62 
63 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
64 	assert(utc->is_32bit);
65 
66 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
67 	if (*exidx)
68 		*exidx += utc->load_addr;
69 	*exidx_sz = utc->exidx_size;
70 }
71 
72 #ifdef ARM32
73 
74 /*
75  * Kernel or user mode unwind (32-bit execution state).
76  */
77 static void __print_stack_unwind_arm32(struct abort_info *ai)
78 {
79 	struct unwind_state_arm32 state;
80 	uaddr_t exidx;
81 	size_t exidx_sz;
82 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
83 	uint32_t sp;
84 	uint32_t lr;
85 
86 	if (abort_is_user_exception(ai)) {
87 		get_current_ta_exidx(&exidx, &exidx_sz);
88 	} else {
89 		exidx = (vaddr_t)__exidx_start;
90 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
91 	}
92 
93 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
94 		sp = ai->regs->usr_sp;
95 		lr = ai->regs->usr_lr;
96 	} else {
97 		sp = read_mode_sp(mode);
98 		lr = read_mode_lr(mode);
99 	}
100 
101 	memset(&state, 0, sizeof(state));
102 	state.registers[0] = ai->regs->r0;
103 	state.registers[1] = ai->regs->r1;
104 	state.registers[2] = ai->regs->r2;
105 	state.registers[3] = ai->regs->r3;
106 	state.registers[4] = ai->regs->r4;
107 	state.registers[5] = ai->regs->r5;
108 	state.registers[6] = ai->regs->r6;
109 	state.registers[7] = ai->regs->r7;
110 	state.registers[8] = ai->regs->r8;
111 	state.registers[9] = ai->regs->r9;
112 	state.registers[10] = ai->regs->r10;
113 	state.registers[11] = ai->regs->r11;
114 	state.registers[13] = sp;
115 	state.registers[14] = lr;
116 	state.registers[15] = ai->pc;
117 
118 	EMSG_RAW("Call stack:");
119 	do {
120 		EMSG_RAW(" 0x%08x", state.registers[15]);
121 	} while (exidx && unwind_stack_arm32(&state, exidx, exidx_sz));
122 }
123 #else /* ARM32 */
124 
125 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
126 {
127 	struct unwind_state_arm32 state;
128 	uaddr_t exidx;
129 	size_t exidx_sz;
130 
131 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
132 	assert(abort_is_user_exception(ai));
133 
134 	get_current_ta_exidx(&exidx, &exidx_sz);
135 
136 	memset(&state, 0, sizeof(state));
137 	state.registers[0] = ai->regs->x0;
138 	state.registers[1] = ai->regs->x1;
139 	state.registers[2] = ai->regs->x2;
140 	state.registers[3] = ai->regs->x3;
141 	state.registers[4] = ai->regs->x4;
142 	state.registers[5] = ai->regs->x5;
143 	state.registers[6] = ai->regs->x6;
144 	state.registers[7] = ai->regs->x7;
145 	state.registers[8] = ai->regs->x8;
146 	state.registers[9] = ai->regs->x9;
147 	state.registers[10] = ai->regs->x10;
148 	state.registers[11] = ai->regs->x11;
149 
150 	state.registers[13] = ai->regs->x13;
151 	state.registers[14] = ai->regs->x14;
152 	state.registers[15] = ai->pc;
153 
154 	EMSG_RAW("Call stack:");
155 	do {
156 		EMSG_RAW(" 0x%08x", state.registers[15]);
157 	} while (exidx && unwind_stack_arm32(&state, exidx, exidx_sz));
158 }
159 #endif /* ARM32 */
160 #ifdef ARM64
161 /* Kernel or user mode unwind (64-bit execution state) */
162 static void __print_stack_unwind_arm64(struct abort_info *ai)
163 {
164 	struct unwind_state_arm64 state;
165 	uaddr_t stack;
166 	size_t stack_size;
167 
168 	if (abort_is_user_exception(ai)) {
169 		struct tee_ta_session *s;
170 		struct user_ta_ctx *utc;
171 
172 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
173 			panic();
174 
175 		utc = to_user_ta_ctx(s->ctx);
176 		/* User stack */
177 		stack = (uaddr_t)utc->mmu->regions[0].va;
178 		stack_size = utc->mobj_stack->size;
179 	} else {
180 		/* Kernel stack */
181 		stack = thread_stack_start();
182 		stack_size = thread_stack_size();
183 	}
184 
185 	memset(&state, 0, sizeof(state));
186 	state.pc = ai->regs->elr;
187 	state.fp = ai->regs->x29;
188 
189 	EMSG_RAW("Call stack:");
190 	do {
191 		EMSG_RAW(" 0x%016" PRIx64, state.pc);
192 	} while (stack && unwind_stack_arm64(&state, stack, stack_size));
193 }
194 #else
195 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
196 {
197 
198 }
199 #endif /*ARM64*/
200 #else /* CFG_UNWIND */
201 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
202 {
203 }
204 
205 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
206 {
207 }
208 #endif /* CFG_UNWIND */
209 
210 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
211 {
212 	if (abort_type == ABORT_TYPE_DATA)
213 		return "data";
214 	if (abort_type == ABORT_TYPE_PREFETCH)
215 		return "prefetch";
216 	return "undef";
217 }
218 
219 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
220 			uint32_t fault_descr)
221 {
222 	/* fault_descr is only valid for data or prefetch abort */
223 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
224 		return "";
225 
226 	switch (core_mmu_get_fault_type(fault_descr)) {
227 	case CORE_MMU_FAULT_ALIGNMENT:
228 		return " (alignment fault)";
229 	case CORE_MMU_FAULT_TRANSLATION:
230 		return " (translation fault)";
231 	case CORE_MMU_FAULT_READ_PERMISSION:
232 		return " (read permission fault)";
233 	case CORE_MMU_FAULT_WRITE_PERMISSION:
234 		return " (write permission fault)";
235 	default:
236 		return "";
237 	}
238 }
239 
240 static __maybe_unused void
241 __print_abort_info(struct abort_info *ai __maybe_unused,
242 		   const char *ctx __maybe_unused)
243 {
244 #ifdef ARM32
245 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
246 	__maybe_unused uint32_t sp;
247 	__maybe_unused uint32_t lr;
248 
249 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
250 		sp = ai->regs->usr_sp;
251 		lr = ai->regs->usr_lr;
252 	} else {
253 		sp = read_mode_sp(mode);
254 		lr = read_mode_lr(mode);
255 	}
256 #endif /*ARM32*/
257 
258 	EMSG_RAW("");
259 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
260 		ctx, abort_type_to_str(ai->abort_type), ai->va,
261 		fault_to_str(ai->abort_type, ai->fault_descr));
262 #ifdef ARM32
263 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
264 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
265 		 read_contextidr());
266 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
267 		 get_core_pos(), ai->regs->spsr);
268 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
269 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
270 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
271 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
272 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
273 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
274 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
275 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
276 #endif /*ARM32*/
277 #ifdef ARM64
278 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
279 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
280 		 read_ttbr1_el1(), read_contextidr_el1());
281 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
282 		 get_core_pos(), (uint32_t)ai->regs->spsr);
283 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
284 		 ai->regs->x0, ai->regs->x1);
285 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
286 		 ai->regs->x2, ai->regs->x3);
287 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
288 		 ai->regs->x4, ai->regs->x5);
289 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
290 		 ai->regs->x6, ai->regs->x7);
291 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
292 		 ai->regs->x8, ai->regs->x9);
293 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
294 		 ai->regs->x10, ai->regs->x11);
295 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
296 		 ai->regs->x12, ai->regs->x13);
297 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
298 		 ai->regs->x14, ai->regs->x15);
299 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
300 		 ai->regs->x16, ai->regs->x17);
301 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
302 		 ai->regs->x18, ai->regs->x19);
303 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
304 		 ai->regs->x20, ai->regs->x21);
305 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
306 		 ai->regs->x22, ai->regs->x23);
307 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
308 		 ai->regs->x24, ai->regs->x25);
309 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
310 		 ai->regs->x26, ai->regs->x27);
311 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
312 		 ai->regs->x28, ai->regs->x29);
313 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
314 		 ai->regs->x30, ai->regs->elr);
315 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
316 #endif /*ARM64*/
317 }
318 
319 #if defined(ARM32)
320 static const bool kernel_is32bit = true;
321 #elif defined(ARM64)
322 static const bool kernel_is32bit;
323 #endif
324 
325 /*
326  * Print abort info and (optionally) stack dump to the console
327  * @ai user-mode or kernel-mode abort info. If user mode, the current session
328  * must be the one of the TA that caused the abort.
329  * @stack_dump true to show a stack trace
330  */
331 static void __abort_print(struct abort_info *ai, bool stack_dump)
332 {
333 	bool is_32bit;
334 	bool paged_ta = false;
335 
336 	if (abort_is_user_exception(ai)) {
337 		struct tee_ta_session *s;
338 		struct user_ta_ctx *utc;
339 
340 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
341 			panic();
342 
343 		utc = to_user_ta_ctx(s->ctx);
344 		is_32bit = utc->is_32bit;
345 #ifdef CFG_PAGED_USER_TA
346 		/*
347 		 * We don't want to unwind paged TAs, because we currently
348 		 * don't handle page faults that could occur when accessing the
349 		 * TA memory (unwind tables for instance).
350 		 */
351 		paged_ta = true;
352 #endif
353 
354 		__print_abort_info(ai, "User TA");
355 		tee_ta_dump_current();
356 	} else {
357 		is_32bit = kernel_is32bit;
358 
359 		__print_abort_info(ai, "Core");
360 	}
361 
362 	if (!stack_dump || paged_ta)
363 		return;
364 
365 	if (is_32bit)
366 		__print_stack_unwind_arm32(ai);
367 	else
368 		__print_stack_unwind_arm64(ai);
369 }
370 
371 void abort_print(struct abort_info *ai)
372 {
373 	__abort_print(ai, false);
374 }
375 
376 void abort_print_error(struct abort_info *ai)
377 {
378 	__abort_print(ai, true);
379 }
380 
381 #ifdef ARM32
382 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
383 		struct abort_info *ai)
384 {
385 	switch (abort_type) {
386 	case ABORT_TYPE_DATA:
387 		ai->fault_descr = read_dfsr();
388 		ai->va = read_dfar();
389 		break;
390 	case ABORT_TYPE_PREFETCH:
391 		ai->fault_descr = read_ifsr();
392 		ai->va = read_ifar();
393 		break;
394 	default:
395 		ai->fault_descr = 0;
396 		ai->va = regs->elr;
397 		break;
398 	}
399 	ai->abort_type = abort_type;
400 	ai->pc = regs->elr;
401 	ai->regs = regs;
402 }
403 #endif /*ARM32*/
404 
405 #ifdef ARM64
406 static void set_abort_info(uint32_t abort_type __unused,
407 		struct thread_abort_regs *regs, struct abort_info *ai)
408 {
409 	ai->fault_descr = read_esr_el1();
410 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
411 	case ESR_EC_IABT_EL0:
412 	case ESR_EC_IABT_EL1:
413 		ai->abort_type = ABORT_TYPE_PREFETCH;
414 		ai->va = read_far_el1();
415 		break;
416 	case ESR_EC_DABT_EL0:
417 	case ESR_EC_DABT_EL1:
418 	case ESR_EC_SP_ALIGN:
419 		ai->abort_type = ABORT_TYPE_DATA;
420 		ai->va = read_far_el1();
421 		break;
422 	default:
423 		ai->abort_type = ABORT_TYPE_UNDEF;
424 		ai->va = regs->elr;
425 	}
426 	ai->pc = regs->elr;
427 	ai->regs = regs;
428 }
429 #endif /*ARM64*/
430 
431 #ifdef ARM32
432 static void handle_user_ta_panic(struct abort_info *ai)
433 {
434 	/*
435 	 * It was a user exception, stop user execution and return
436 	 * to TEE Core.
437 	 */
438 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
439 	ai->regs->r1 = true;
440 	ai->regs->r2 = 0xdeadbeef;
441 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
442 	ai->regs->spsr &= CPSR_FIA;
443 	ai->regs->spsr &= ~CPSR_MODE_MASK;
444 	ai->regs->spsr |= CPSR_MODE_SVC;
445 	/* Select Thumb or ARM mode */
446 	if (ai->regs->elr & 1)
447 		ai->regs->spsr |= CPSR_T;
448 	else
449 		ai->regs->spsr &= ~CPSR_T;
450 }
451 #endif /*ARM32*/
452 
453 #ifdef ARM64
454 static void handle_user_ta_panic(struct abort_info *ai)
455 {
456 	uint32_t daif;
457 
458 	/*
459 	 * It was a user exception, stop user execution and return
460 	 * to TEE Core.
461 	 */
462 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
463 	ai->regs->x1 = true;
464 	ai->regs->x2 = 0xdeadbeef;
465 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
466 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
467 
468 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
469 	/* XXX what about DAIF_D? */
470 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
471 }
472 #endif /*ARM64*/
473 
474 #ifdef CFG_WITH_VFP
475 static void handle_user_ta_vfp(void)
476 {
477 	struct tee_ta_session *s;
478 
479 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
480 		panic();
481 
482 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
483 }
484 #endif /*CFG_WITH_VFP*/
485 
486 #ifdef CFG_WITH_USER_TA
487 #ifdef ARM32
488 /* Returns true if the exception originated from user mode */
489 bool abort_is_user_exception(struct abort_info *ai)
490 {
491 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
492 }
493 #endif /*ARM32*/
494 
495 #ifdef ARM64
496 /* Returns true if the exception originated from user mode */
497 bool abort_is_user_exception(struct abort_info *ai)
498 {
499 	uint32_t spsr = ai->regs->spsr;
500 
501 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
502 		return true;
503 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
504 	    SPSR_64_MODE_EL0)
505 		return true;
506 	return false;
507 }
508 #endif /*ARM64*/
509 #else /*CFG_WITH_USER_TA*/
510 bool abort_is_user_exception(struct abort_info *ai __unused)
511 {
512 	return false;
513 }
514 #endif /*CFG_WITH_USER_TA*/
515 
516 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
517 #ifdef ARM32
518 
519 #define T32_INSTR(w1, w0) \
520 	((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
521 
522 #define T32_VTRANS32_MASK	T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
523 #define T32_VTRANS32_VAL	T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
524 
525 #define T32_VTRANS64_MASK	T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
526 #define T32_VTRANS64_VAL	T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
527 
528 #define T32_VLDST_MASK		T32_INSTR((0xff << 8) | (1 << 4), 0)
529 #define T32_VLDST_VAL		T32_INSTR( 0xf9 << 8            , 0)
530 
531 #define T32_VXLDST_MASK		T32_INSTR(0xfc << 8, 7 << 9)
532 #define T32_VXLDST_VAL		T32_INSTR(0xec << 8, 5 << 9)
533 
534 #define T32_VPROC_MASK		T32_INSTR(0xef << 8, 0)
535 #define T32_VPROC_VAL		T32_VPROC_MASK
536 
537 #define A32_INSTR(x)		((uint32_t)(x))
538 
539 #define A32_VTRANS32_MASK	A32_INSTR(SHIFT_U32(0xf, 24) | \
540 					  SHIFT_U32(7, 9) | BIT32(4))
541 #define A32_VTRANS32_VAL	A32_INSTR(SHIFT_U32(0xe, 24) | \
542 					  SHIFT_U32(5, 9) | BIT32(4))
543 
544 #define A32_VTRANS64_MASK	A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
545 #define A32_VTRANS64_VAL	A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
546 
547 #define A32_VLDST_MASK		A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
548 #define A32_VLDST_VAL		A32_INSTR(SHIFT_U32(0xf4, 24))
549 #define A32_VXLDST_MASK		A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
550 #define A32_VXLDST_VAL		A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
551 
552 #define A32_VPROC_MASK		A32_INSTR(SHIFT_U32(0x7f, 25))
553 #define A32_VPROC_VAL		A32_INSTR(SHIFT_U32(0x79, 25))
554 
555 static bool is_vfp_fault(struct abort_info *ai)
556 {
557 	TEE_Result res;
558 	uint32_t instr;
559 
560 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
561 		return false;
562 
563 	res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
564 	if (res != TEE_SUCCESS)
565 		return false;
566 
567 	if (ai->regs->spsr & CPSR_T) {
568 		/* Thumb mode */
569 		return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
570 		       ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
571 		       ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
572 		       ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
573 		       ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
574 	} else {
575 		/* ARM mode */
576 		return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
577 		       ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
578 		       ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
579 		       ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
580 		       ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
581 	}
582 }
583 #endif /*ARM32*/
584 
585 #ifdef ARM64
586 static bool is_vfp_fault(struct abort_info *ai)
587 {
588 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
589 	case ESR_EC_FP_ASIMD:
590 	case ESR_EC_AARCH32_FP:
591 	case ESR_EC_AARCH64_FP:
592 		return true;
593 	default:
594 		return false;
595 	}
596 }
597 #endif /*ARM64*/
598 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
599 static bool is_vfp_fault(struct abort_info *ai __unused)
600 {
601 	return false;
602 }
603 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
604 
605 static enum fault_type get_fault_type(struct abort_info *ai)
606 {
607 	if (abort_is_user_exception(ai)) {
608 		if (is_vfp_fault(ai))
609 			return FAULT_TYPE_USER_TA_VFP;
610 #ifndef CFG_WITH_PAGER
611 		return FAULT_TYPE_USER_TA_PANIC;
612 #endif
613 	}
614 
615 	if (thread_is_from_abort_mode()) {
616 		abort_print_error(ai);
617 		panic("[abort] abort in abort handler (trap CPU)");
618 	}
619 
620 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
621 		if (abort_is_user_exception(ai))
622 			return FAULT_TYPE_USER_TA_PANIC;
623 		abort_print_error(ai);
624 		panic("[abort] undefined abort (trap CPU)");
625 	}
626 
627 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
628 	case CORE_MMU_FAULT_ALIGNMENT:
629 		if (abort_is_user_exception(ai))
630 			return FAULT_TYPE_USER_TA_PANIC;
631 		abort_print_error(ai);
632 		panic("[abort] alignement fault!  (trap CPU)");
633 		break;
634 
635 	case CORE_MMU_FAULT_ACCESS_BIT:
636 		if (abort_is_user_exception(ai))
637 			return FAULT_TYPE_USER_TA_PANIC;
638 		abort_print_error(ai);
639 		panic("[abort] access bit fault!  (trap CPU)");
640 		break;
641 
642 	case CORE_MMU_FAULT_DEBUG_EVENT:
643 		abort_print(ai);
644 		DMSG("[abort] Ignoring debug event!");
645 		return FAULT_TYPE_IGNORE;
646 
647 	case CORE_MMU_FAULT_TRANSLATION:
648 	case CORE_MMU_FAULT_WRITE_PERMISSION:
649 	case CORE_MMU_FAULT_READ_PERMISSION:
650 		return FAULT_TYPE_PAGEABLE;
651 
652 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
653 		abort_print(ai);
654 		DMSG("[abort] Ignoring async external abort!");
655 		return FAULT_TYPE_IGNORE;
656 
657 	case CORE_MMU_FAULT_OTHER:
658 	default:
659 		abort_print(ai);
660 		DMSG("[abort] Unhandled fault!");
661 		return FAULT_TYPE_IGNORE;
662 	}
663 }
664 
665 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
666 {
667 	struct abort_info ai;
668 	bool handled;
669 
670 	set_abort_info(abort_type, regs, &ai);
671 
672 	switch (get_fault_type(&ai)) {
673 	case FAULT_TYPE_IGNORE:
674 		break;
675 	case FAULT_TYPE_USER_TA_PANIC:
676 		DMSG("[abort] abort in User mode (TA will panic)");
677 		abort_print_error(&ai);
678 		vfp_disable();
679 		handle_user_ta_panic(&ai);
680 		break;
681 #ifdef CFG_WITH_VFP
682 	case FAULT_TYPE_USER_TA_VFP:
683 		handle_user_ta_vfp();
684 		break;
685 #endif
686 	case FAULT_TYPE_PAGEABLE:
687 	default:
688 		thread_kernel_save_vfp();
689 		handled = tee_pager_handle_fault(&ai);
690 		thread_kernel_restore_vfp();
691 		if (!handled) {
692 			abort_print_error(&ai);
693 			if (!abort_is_user_exception(&ai))
694 				panic("unhandled pageable abort");
695 			DMSG("[abort] abort in User mode (TA will panic)");
696 			vfp_disable();
697 			handle_user_ta_panic(&ai);
698 		}
699 		break;
700 	}
701 }
702