xref: /optee_os/core/arch/arm/kernel/abort.c (revision ba6d8df98e3cf376aab45d0d958204c498a94123)
1 /*
2  * Copyright (c) 2015, Linaro Limited
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <kernel/abort.h>
29 #include <kernel/misc.h>
30 #include <kernel/tee_ta_manager.h>
31 #include <kernel/panic.h>
32 #include <kernel/user_ta.h>
33 #include <kernel/unwind.h>
34 #include <mm/core_mmu.h>
35 #include <mm/mobj.h>
36 #include <mm/tee_pager.h>
37 #include <tee/tee_svc.h>
38 #include <trace.h>
39 #include <arm.h>
40 
41 #include "thread_private.h"
42 
43 enum fault_type {
44 	FAULT_TYPE_USER_TA_PANIC,
45 	FAULT_TYPE_USER_TA_VFP,
46 	FAULT_TYPE_PAGEABLE,
47 	FAULT_TYPE_IGNORE,
48 };
49 
50 #ifdef CFG_UNWIND
51 
52 static void get_current_ta_exidx(uaddr_t *exidx, size_t *exidx_sz)
53 {
54 	struct tee_ta_session *s;
55 	struct user_ta_ctx *utc;
56 
57 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
58 		panic();
59 
60 	utc = to_user_ta_ctx(s->ctx);
61 
62 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
63 	assert(utc->is_32bit);
64 
65 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
66 	if (*exidx)
67 		*exidx += utc->load_addr;
68 	*exidx_sz = utc->exidx_size;
69 }
70 
71 #ifdef ARM32
72 
73 /*
74  * These are set in the linker script. Their addresses will be the start or end
75  * of the exception binary search index table (.ARM.exidx section)
76  */
77 extern uint8_t __exidx_start[];
78 extern uint8_t __exidx_end[];
79 
80 /*
81  * Kernel or user mode unwind (32-bit execution state).
82  */
83 static void __print_stack_unwind_arm32(struct abort_info *ai)
84 {
85 	struct unwind_state_arm32 state;
86 	uaddr_t exidx;
87 	size_t exidx_sz;
88 
89 	if (abort_is_user_exception(ai)) {
90 		get_current_ta_exidx(&exidx, &exidx_sz);
91 	} else {
92 		exidx = (vaddr_t)__exidx_start;
93 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
94 	}
95 
96 	memset(&state, 0, sizeof(state));
97 	state.registers[0] = ai->regs->r0;
98 	state.registers[1] = ai->regs->r1;
99 	state.registers[2] = ai->regs->r2;
100 	state.registers[3] = ai->regs->r3;
101 	state.registers[4] = ai->regs->r4;
102 	state.registers[5] = ai->regs->r5;
103 	state.registers[6] = ai->regs->r6;
104 	state.registers[7] = ai->regs->r7;
105 	state.registers[8] = ai->regs->r8;
106 	state.registers[9] = ai->regs->r9;
107 	state.registers[10] = ai->regs->r10;
108 	state.registers[11] = ai->regs->r11;
109 
110 	state.registers[13] = read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK);
111 	state.registers[14] = read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK);
112 	state.registers[15] = ai->pc;
113 
114 	EMSG_RAW("Call stack:");
115 	do {
116 		EMSG_RAW(" 0x%08x", state.registers[15]);
117 	} while (exidx && unwind_stack_arm32(&state, exidx, exidx_sz));
118 }
119 #else /* ARM32 */
120 
121 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
122 {
123 	struct unwind_state_arm32 state;
124 	uaddr_t exidx;
125 	size_t exidx_sz;
126 
127 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
128 	assert(abort_is_user_exception(ai));
129 
130 	get_current_ta_exidx(&exidx, &exidx_sz);
131 
132 	memset(&state, 0, sizeof(state));
133 	state.registers[0] = ai->regs->x0;
134 	state.registers[1] = ai->regs->x1;
135 	state.registers[2] = ai->regs->x2;
136 	state.registers[3] = ai->regs->x3;
137 	state.registers[4] = ai->regs->x4;
138 	state.registers[5] = ai->regs->x5;
139 	state.registers[6] = ai->regs->x6;
140 	state.registers[7] = ai->regs->x7;
141 	state.registers[8] = ai->regs->x8;
142 	state.registers[9] = ai->regs->x9;
143 	state.registers[10] = ai->regs->x10;
144 	state.registers[11] = ai->regs->x11;
145 
146 	state.registers[13] = ai->regs->x13;
147 	state.registers[14] = ai->regs->x14;
148 	state.registers[15] = ai->pc;
149 
150 	EMSG_RAW("Call stack:");
151 	do {
152 		EMSG_RAW(" 0x%08x", state.registers[15]);
153 	} while (exidx && unwind_stack_arm32(&state, exidx, exidx_sz));
154 }
155 #endif /* ARM32 */
156 #ifdef ARM64
157 /* Kernel or user mode unwind (64-bit execution state) */
158 static void __print_stack_unwind_arm64(struct abort_info *ai)
159 {
160 	struct unwind_state_arm64 state;
161 	uaddr_t stack;
162 	size_t stack_size;
163 
164 	if (abort_is_user_exception(ai)) {
165 		struct tee_ta_session *s;
166 		struct user_ta_ctx *utc;
167 
168 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
169 			panic();
170 
171 		utc = to_user_ta_ctx(s->ctx);
172 		/* User stack */
173 		stack = (uaddr_t)utc->mmu->regions[0].va;
174 		stack_size = utc->mobj_stack->size;
175 	} else {
176 		/* Kernel stack */
177 		stack = thread_stack_start();
178 		stack_size = thread_stack_size();
179 	}
180 
181 	memset(&state, 0, sizeof(state));
182 	state.pc = ai->regs->elr;
183 	state.fp = ai->regs->x29;
184 
185 	EMSG_RAW("Call stack:");
186 	do {
187 		EMSG_RAW(" 0x%016" PRIx64, state.pc);
188 	} while (stack && unwind_stack_arm64(&state, stack, stack_size));
189 }
190 #else
191 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
192 {
193 
194 }
195 #endif /*ARM64*/
196 #else /* CFG_UNWIND */
197 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
198 {
199 }
200 
201 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
202 {
203 }
204 #endif /* CFG_UNWIND */
205 
206 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
207 {
208 	if (abort_type == ABORT_TYPE_DATA)
209 		return "data";
210 	if (abort_type == ABORT_TYPE_PREFETCH)
211 		return "prefetch";
212 	return "undef";
213 }
214 
215 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
216 			uint32_t fault_descr)
217 {
218 	/* fault_descr is only valid for data or prefetch abort */
219 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
220 		return "";
221 
222 	switch (core_mmu_get_fault_type(fault_descr)) {
223 	case CORE_MMU_FAULT_ALIGNMENT:
224 		return " (alignment fault)";
225 	case CORE_MMU_FAULT_TRANSLATION:
226 		return " (translation fault)";
227 	case CORE_MMU_FAULT_READ_PERMISSION:
228 		return " (read permission fault)";
229 	case CORE_MMU_FAULT_WRITE_PERMISSION:
230 		return " (write permission fault)";
231 	default:
232 		return "";
233 	}
234 }
235 
236 static __maybe_unused void __print_abort_info(
237 				struct abort_info *ai __maybe_unused,
238 				const char *ctx __maybe_unused)
239 {
240 	EMSG_RAW("");
241 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
242 		ctx, abort_type_to_str(ai->abort_type), ai->va,
243 		fault_to_str(ai->abort_type, ai->fault_descr));
244 #ifdef ARM32
245 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
246 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
247 		 read_contextidr());
248 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
249 		 get_core_pos(), ai->regs->spsr);
250 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
251 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
252 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
253 		 ai->regs->r1, ai->regs->r5, ai->regs->r9,
254 		 read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
255 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
256 		 ai->regs->r2, ai->regs->r6, ai->regs->r10,
257 		 read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
258 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
259 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
260 #endif /*ARM32*/
261 #ifdef ARM64
262 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
263 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
264 		 read_ttbr1_el1(), read_contextidr_el1());
265 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
266 		 get_core_pos(), (uint32_t)ai->regs->spsr);
267 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
268 		 ai->regs->x0, ai->regs->x1);
269 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
270 		 ai->regs->x2, ai->regs->x3);
271 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
272 		 ai->regs->x4, ai->regs->x5);
273 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
274 		 ai->regs->x6, ai->regs->x7);
275 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
276 		 ai->regs->x8, ai->regs->x9);
277 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
278 		 ai->regs->x10, ai->regs->x11);
279 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
280 		 ai->regs->x12, ai->regs->x13);
281 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
282 		 ai->regs->x14, ai->regs->x15);
283 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
284 		 ai->regs->x16, ai->regs->x17);
285 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
286 		 ai->regs->x18, ai->regs->x19);
287 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
288 		 ai->regs->x20, ai->regs->x21);
289 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
290 		 ai->regs->x22, ai->regs->x23);
291 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
292 		 ai->regs->x24, ai->regs->x25);
293 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
294 		 ai->regs->x26, ai->regs->x27);
295 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
296 		 ai->regs->x28, ai->regs->x29);
297 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
298 		 ai->regs->x30, ai->regs->elr);
299 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
300 #endif /*ARM64*/
301 }
302 
303 #if defined(ARM32)
304 static const bool kernel_is32bit = true;
305 #elif defined(ARM64)
306 static const bool kernel_is32bit;
307 #endif
308 
309 /*
310  * Print abort info and (optionally) stack dump to the console
311  * @ai user-mode or kernel-mode abort info. If user mode, the current session
312  * must be the one of the TA that caused the abort.
313  * @stack_dump true to show a stack trace
314  */
315 static void __abort_print(struct abort_info *ai, bool stack_dump)
316 {
317 	bool is_32bit;
318 	bool paged_ta = false;
319 
320 	if (abort_is_user_exception(ai)) {
321 		struct tee_ta_session *s;
322 		struct user_ta_ctx *utc;
323 
324 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
325 			panic();
326 
327 		utc = to_user_ta_ctx(s->ctx);
328 		is_32bit = utc->is_32bit;
329 #ifdef CFG_PAGED_USER_TA
330 		/*
331 		 * We don't want to unwind paged TAs, because we currently
332 		 * don't handle page faults that could occur when accessing the
333 		 * TA memory (unwind tables for instance).
334 		 */
335 		paged_ta = true;
336 #endif
337 
338 		__print_abort_info(ai, "User TA");
339 		tee_ta_dump_current();
340 	} else {
341 		is_32bit = kernel_is32bit;
342 
343 		__print_abort_info(ai, "Core");
344 	}
345 
346 	if (!stack_dump || paged_ta)
347 		return;
348 
349 	if (is_32bit)
350 		__print_stack_unwind_arm32(ai);
351 	else
352 		__print_stack_unwind_arm64(ai);
353 }
354 
355 void abort_print(struct abort_info *ai)
356 {
357 	__abort_print(ai, false);
358 }
359 
360 void abort_print_error(struct abort_info *ai)
361 {
362 	__abort_print(ai, true);
363 }
364 
365 #ifdef ARM32
366 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
367 		struct abort_info *ai)
368 {
369 	switch (abort_type) {
370 	case ABORT_TYPE_DATA:
371 		ai->fault_descr = read_dfsr();
372 		ai->va = read_dfar();
373 		break;
374 	case ABORT_TYPE_PREFETCH:
375 		ai->fault_descr = read_ifsr();
376 		ai->va = read_ifar();
377 		break;
378 	default:
379 		ai->fault_descr = 0;
380 		ai->va = regs->elr;
381 		break;
382 	}
383 	ai->abort_type = abort_type;
384 	ai->pc = regs->elr;
385 	ai->regs = regs;
386 }
387 #endif /*ARM32*/
388 
389 #ifdef ARM64
390 static void set_abort_info(uint32_t abort_type __unused,
391 		struct thread_abort_regs *regs, struct abort_info *ai)
392 {
393 	ai->fault_descr = read_esr_el1();
394 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
395 	case ESR_EC_IABT_EL0:
396 	case ESR_EC_IABT_EL1:
397 		ai->abort_type = ABORT_TYPE_PREFETCH;
398 		ai->va = read_far_el1();
399 		break;
400 	case ESR_EC_DABT_EL0:
401 	case ESR_EC_DABT_EL1:
402 	case ESR_EC_SP_ALIGN:
403 		ai->abort_type = ABORT_TYPE_DATA;
404 		ai->va = read_far_el1();
405 		break;
406 	default:
407 		ai->abort_type = ABORT_TYPE_UNDEF;
408 		ai->va = regs->elr;
409 	}
410 	ai->pc = regs->elr;
411 	ai->regs = regs;
412 }
413 #endif /*ARM64*/
414 
415 #ifdef ARM32
416 static void handle_user_ta_panic(struct abort_info *ai)
417 {
418 	/*
419 	 * It was a user exception, stop user execution and return
420 	 * to TEE Core.
421 	 */
422 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
423 	ai->regs->r1 = true;
424 	ai->regs->r2 = 0xdeadbeef;
425 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
426 	ai->regs->spsr = read_cpsr();
427 	ai->regs->spsr &= ~CPSR_MODE_MASK;
428 	ai->regs->spsr |= CPSR_MODE_SVC;
429 	ai->regs->spsr &= ~CPSR_FIA;
430 	ai->regs->spsr |= read_spsr() & CPSR_FIA;
431 	/* Select Thumb or ARM mode */
432 	if (ai->regs->elr & 1)
433 		ai->regs->spsr |= CPSR_T;
434 	else
435 		ai->regs->spsr &= ~CPSR_T;
436 }
437 #endif /*ARM32*/
438 
439 #ifdef ARM64
440 static void handle_user_ta_panic(struct abort_info *ai)
441 {
442 	uint32_t daif;
443 
444 	/*
445 	 * It was a user exception, stop user execution and return
446 	 * to TEE Core.
447 	 */
448 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
449 	ai->regs->x1 = true;
450 	ai->regs->x2 = 0xdeadbeef;
451 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
452 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
453 
454 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
455 	/* XXX what about DAIF_D? */
456 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
457 }
458 #endif /*ARM64*/
459 
460 #ifdef CFG_WITH_VFP
461 static void handle_user_ta_vfp(void)
462 {
463 	struct tee_ta_session *s;
464 
465 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
466 		panic();
467 
468 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
469 }
470 #endif /*CFG_WITH_VFP*/
471 
472 #ifdef CFG_WITH_USER_TA
473 #ifdef ARM32
474 /* Returns true if the exception originated from user mode */
475 bool abort_is_user_exception(struct abort_info *ai)
476 {
477 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
478 }
479 #endif /*ARM32*/
480 
481 #ifdef ARM64
482 /* Returns true if the exception originated from user mode */
483 bool abort_is_user_exception(struct abort_info *ai)
484 {
485 	uint32_t spsr = ai->regs->spsr;
486 
487 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
488 		return true;
489 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
490 	    SPSR_64_MODE_EL0)
491 		return true;
492 	return false;
493 }
494 #endif /*ARM64*/
495 #else /*CFG_WITH_USER_TA*/
496 bool abort_is_user_exception(struct abort_info *ai __unused)
497 {
498 	return false;
499 }
500 #endif /*CFG_WITH_USER_TA*/
501 
502 #ifdef ARM32
503 /* Returns true if the exception originated from abort mode */
504 static bool is_abort_in_abort_handler(struct abort_info *ai)
505 {
506 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
507 }
508 #endif /*ARM32*/
509 
510 #ifdef ARM64
511 /* Returns true if the exception originated from abort mode */
512 static bool is_abort_in_abort_handler(struct abort_info *ai __unused)
513 {
514 	return false;
515 }
516 #endif /*ARM64*/
517 
518 
519 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
520 #ifdef ARM32
521 
522 #define T32_INSTR(w1, w0) \
523 	((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
524 
525 #define T32_VTRANS32_MASK	T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
526 #define T32_VTRANS32_VAL	T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
527 
528 #define T32_VTRANS64_MASK	T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
529 #define T32_VTRANS64_VAL	T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
530 
531 #define T32_VLDST_MASK		T32_INSTR((0xff << 8) | (1 << 4), 0)
532 #define T32_VLDST_VAL		T32_INSTR( 0xf9 << 8            , 0)
533 
534 #define T32_VXLDST_MASK		T32_INSTR(0xfc << 8, 7 << 9)
535 #define T32_VXLDST_VAL		T32_INSTR(0xec << 8, 5 << 9)
536 
537 #define T32_VPROC_MASK		T32_INSTR(0xef << 8, 0)
538 #define T32_VPROC_VAL		T32_VPROC_MASK
539 
540 #define A32_INSTR(x)		((uint32_t)(x))
541 
542 #define A32_VTRANS32_MASK	A32_INSTR(SHIFT_U32(0xf, 24) | \
543 					  SHIFT_U32(7, 9) | BIT32(4))
544 #define A32_VTRANS32_VAL	A32_INSTR(SHIFT_U32(0xe, 24) | \
545 					  SHIFT_U32(5, 9) | BIT32(4))
546 
547 #define A32_VTRANS64_MASK	A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
548 #define A32_VTRANS64_VAL	A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
549 
550 #define A32_VLDST_MASK		A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
551 #define A32_VLDST_VAL		A32_INSTR(SHIFT_U32(0xf4, 24))
552 #define A32_VXLDST_MASK		A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
553 #define A32_VXLDST_VAL		A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
554 
555 #define A32_VPROC_MASK		A32_INSTR(SHIFT_U32(0x7f, 25))
556 #define A32_VPROC_VAL		A32_INSTR(SHIFT_U32(0x79, 25))
557 
558 static bool is_vfp_fault(struct abort_info *ai)
559 {
560 	TEE_Result res;
561 	uint32_t instr;
562 
563 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
564 		return false;
565 
566 	res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
567 	if (res != TEE_SUCCESS)
568 		return false;
569 
570 	if (ai->regs->spsr & CPSR_T) {
571 		/* Thumb mode */
572 		return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
573 		       ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
574 		       ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
575 		       ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
576 		       ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
577 	} else {
578 		/* ARM mode */
579 		return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
580 		       ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
581 		       ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
582 		       ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
583 		       ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
584 	}
585 }
586 #endif /*ARM32*/
587 
588 #ifdef ARM64
589 static bool is_vfp_fault(struct abort_info *ai)
590 {
591 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
592 	case ESR_EC_FP_ASIMD:
593 	case ESR_EC_AARCH32_FP:
594 	case ESR_EC_AARCH64_FP:
595 		return true;
596 	default:
597 		return false;
598 	}
599 }
600 #endif /*ARM64*/
601 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
602 static bool is_vfp_fault(struct abort_info *ai __unused)
603 {
604 	return false;
605 }
606 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
607 
608 static enum fault_type get_fault_type(struct abort_info *ai)
609 {
610 	if (abort_is_user_exception(ai)) {
611 		if (is_vfp_fault(ai))
612 			return FAULT_TYPE_USER_TA_VFP;
613 #ifndef CFG_WITH_PAGER
614 		return FAULT_TYPE_USER_TA_PANIC;
615 #endif
616 	}
617 
618 	if (is_abort_in_abort_handler(ai)) {
619 		abort_print_error(ai);
620 		panic("[abort] abort in abort handler (trap CPU)");
621 	}
622 
623 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
624 		if (abort_is_user_exception(ai))
625 			return FAULT_TYPE_USER_TA_PANIC;
626 		abort_print_error(ai);
627 		panic("[abort] undefined abort (trap CPU)");
628 	}
629 
630 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
631 	case CORE_MMU_FAULT_ALIGNMENT:
632 		if (abort_is_user_exception(ai))
633 			return FAULT_TYPE_USER_TA_PANIC;
634 		abort_print_error(ai);
635 		panic("[abort] alignement fault!  (trap CPU)");
636 		break;
637 
638 	case CORE_MMU_FAULT_ACCESS_BIT:
639 		if (abort_is_user_exception(ai))
640 			return FAULT_TYPE_USER_TA_PANIC;
641 		abort_print_error(ai);
642 		panic("[abort] access bit fault!  (trap CPU)");
643 		break;
644 
645 	case CORE_MMU_FAULT_DEBUG_EVENT:
646 		abort_print(ai);
647 		DMSG("[abort] Ignoring debug event!");
648 		return FAULT_TYPE_IGNORE;
649 
650 	case CORE_MMU_FAULT_TRANSLATION:
651 	case CORE_MMU_FAULT_WRITE_PERMISSION:
652 	case CORE_MMU_FAULT_READ_PERMISSION:
653 		return FAULT_TYPE_PAGEABLE;
654 
655 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
656 		abort_print(ai);
657 		DMSG("[abort] Ignoring async external abort!");
658 		return FAULT_TYPE_IGNORE;
659 
660 	case CORE_MMU_FAULT_OTHER:
661 	default:
662 		abort_print(ai);
663 		DMSG("[abort] Unhandled fault!");
664 		return FAULT_TYPE_IGNORE;
665 	}
666 }
667 
668 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
669 {
670 	struct abort_info ai;
671 	bool handled;
672 
673 	set_abort_info(abort_type, regs, &ai);
674 
675 	switch (get_fault_type(&ai)) {
676 	case FAULT_TYPE_IGNORE:
677 		break;
678 	case FAULT_TYPE_USER_TA_PANIC:
679 		DMSG("[abort] abort in User mode (TA will panic)");
680 		abort_print_error(&ai);
681 		vfp_disable();
682 		handle_user_ta_panic(&ai);
683 		break;
684 #ifdef CFG_WITH_VFP
685 	case FAULT_TYPE_USER_TA_VFP:
686 		handle_user_ta_vfp();
687 		break;
688 #endif
689 	case FAULT_TYPE_PAGEABLE:
690 	default:
691 		thread_kernel_save_vfp();
692 		handled = tee_pager_handle_fault(&ai);
693 		thread_kernel_restore_vfp();
694 		if (!handled) {
695 			abort_print_error(&ai);
696 			if (!abort_is_user_exception(&ai))
697 				panic("unhandled pageable abort");
698 			DMSG("[abort] abort in User mode (TA will panic)");
699 			vfp_disable();
700 			handle_user_ta_panic(&ai);
701 		}
702 		break;
703 	}
704 }
705