xref: /optee_os/core/arch/arm/kernel/abort.c (revision b1469ba0bfd0371eb52bd50f5c52eeda7a8f5f1e)
1 /*
2  * Copyright (c) 2015, Linaro Limited
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <arm.h>
29 #include <kernel/abort.h>
30 #include <kernel/linker.h>
31 #include <kernel/misc.h>
32 #include <kernel/panic.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/unwind.h>
35 #include <kernel/user_ta.h>
36 #include <mm/core_mmu.h>
37 #include <mm/mobj.h>
38 #include <mm/tee_pager.h>
39 #include <tee/tee_svc.h>
40 #include <trace.h>
41 
42 #include "thread_private.h"
43 
44 enum fault_type {
45 	FAULT_TYPE_USER_TA_PANIC,
46 	FAULT_TYPE_USER_TA_VFP,
47 	FAULT_TYPE_PAGEABLE,
48 	FAULT_TYPE_IGNORE,
49 };
50 
51 #ifdef CFG_UNWIND
52 
53 static void get_current_ta_exidx(uaddr_t *exidx, size_t *exidx_sz)
54 {
55 	struct tee_ta_session *s;
56 	struct user_ta_ctx *utc;
57 
58 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
59 		panic();
60 
61 	utc = to_user_ta_ctx(s->ctx);
62 
63 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
64 	assert(utc->is_32bit);
65 
66 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
67 	if (*exidx)
68 		*exidx += utc->load_addr;
69 	*exidx_sz = utc->exidx_size;
70 }
71 
72 #ifdef ARM32
73 
74 /*
75  * Kernel or user mode unwind (32-bit execution state).
76  */
77 static void __print_stack_unwind_arm32(struct abort_info *ai)
78 {
79 	struct unwind_state_arm32 state;
80 	uaddr_t exidx;
81 	size_t exidx_sz;
82 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
83 	uint32_t sp;
84 	uint32_t lr;
85 
86 	if (abort_is_user_exception(ai)) {
87 		get_current_ta_exidx(&exidx, &exidx_sz);
88 	} else {
89 		exidx = (vaddr_t)__exidx_start;
90 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
91 	}
92 
93 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
94 		sp = ai->regs->usr_sp;
95 		lr = ai->regs->usr_lr;
96 	} else {
97 		sp = read_mode_sp(mode);
98 		lr = read_mode_lr(mode);
99 	}
100 
101 	memset(&state, 0, sizeof(state));
102 	state.registers[0] = ai->regs->r0;
103 	state.registers[1] = ai->regs->r1;
104 	state.registers[2] = ai->regs->r2;
105 	state.registers[3] = ai->regs->r3;
106 	state.registers[4] = ai->regs->r4;
107 	state.registers[5] = ai->regs->r5;
108 	state.registers[6] = ai->regs->r6;
109 	state.registers[7] = ai->regs->r7;
110 	state.registers[8] = ai->regs->r8;
111 	state.registers[9] = ai->regs->r9;
112 	state.registers[10] = ai->regs->r10;
113 	state.registers[11] = ai->regs->r11;
114 	state.registers[13] = sp;
115 	state.registers[14] = lr;
116 	state.registers[15] = ai->pc;
117 
118 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
119 }
120 #else /* ARM32 */
121 
122 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
123 {
124 	struct unwind_state_arm32 state;
125 	uaddr_t exidx;
126 	size_t exidx_sz;
127 
128 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
129 	assert(abort_is_user_exception(ai));
130 
131 	get_current_ta_exidx(&exidx, &exidx_sz);
132 
133 	memset(&state, 0, sizeof(state));
134 	state.registers[0] = ai->regs->x0;
135 	state.registers[1] = ai->regs->x1;
136 	state.registers[2] = ai->regs->x2;
137 	state.registers[3] = ai->regs->x3;
138 	state.registers[4] = ai->regs->x4;
139 	state.registers[5] = ai->regs->x5;
140 	state.registers[6] = ai->regs->x6;
141 	state.registers[7] = ai->regs->x7;
142 	state.registers[8] = ai->regs->x8;
143 	state.registers[9] = ai->regs->x9;
144 	state.registers[10] = ai->regs->x10;
145 	state.registers[11] = ai->regs->x11;
146 
147 	state.registers[13] = ai->regs->x13;
148 	state.registers[14] = ai->regs->x14;
149 	state.registers[15] = ai->pc;
150 
151 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
152 }
153 #endif /* ARM32 */
154 #ifdef ARM64
155 /* Kernel or user mode unwind (64-bit execution state) */
156 static void __print_stack_unwind_arm64(struct abort_info *ai)
157 {
158 	struct unwind_state_arm64 state;
159 	uaddr_t stack;
160 	size_t stack_size;
161 
162 	if (abort_is_user_exception(ai)) {
163 		struct tee_ta_session *s;
164 		struct user_ta_ctx *utc;
165 
166 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
167 			panic();
168 
169 		utc = to_user_ta_ctx(s->ctx);
170 		/* User stack */
171 		stack = (uaddr_t)utc->mmu->regions[0].va;
172 		stack_size = utc->mobj_stack->size;
173 	} else {
174 		/* Kernel stack */
175 		stack = thread_stack_start();
176 		stack_size = thread_stack_size();
177 	}
178 
179 	memset(&state, 0, sizeof(state));
180 	state.pc = ai->regs->elr;
181 	state.fp = ai->regs->x29;
182 
183 	print_stack_arm64(TRACE_ERROR, &state, stack, stack_size);
184 }
185 #else
186 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
187 {
188 
189 }
190 #endif /*ARM64*/
191 #else /* CFG_UNWIND */
192 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
193 {
194 }
195 
196 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
197 {
198 }
199 #endif /* CFG_UNWIND */
200 
201 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
202 {
203 	if (abort_type == ABORT_TYPE_DATA)
204 		return "data";
205 	if (abort_type == ABORT_TYPE_PREFETCH)
206 		return "prefetch";
207 	return "undef";
208 }
209 
210 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
211 			uint32_t fault_descr)
212 {
213 	/* fault_descr is only valid for data or prefetch abort */
214 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
215 		return "";
216 
217 	switch (core_mmu_get_fault_type(fault_descr)) {
218 	case CORE_MMU_FAULT_ALIGNMENT:
219 		return " (alignment fault)";
220 	case CORE_MMU_FAULT_TRANSLATION:
221 		return " (translation fault)";
222 	case CORE_MMU_FAULT_READ_PERMISSION:
223 		return " (read permission fault)";
224 	case CORE_MMU_FAULT_WRITE_PERMISSION:
225 		return " (write permission fault)";
226 	default:
227 		return "";
228 	}
229 }
230 
231 static __maybe_unused void
232 __print_abort_info(struct abort_info *ai __maybe_unused,
233 		   const char *ctx __maybe_unused)
234 {
235 #ifdef ARM32
236 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
237 	__maybe_unused uint32_t sp;
238 	__maybe_unused uint32_t lr;
239 
240 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
241 		sp = ai->regs->usr_sp;
242 		lr = ai->regs->usr_lr;
243 	} else {
244 		sp = read_mode_sp(mode);
245 		lr = read_mode_lr(mode);
246 	}
247 #endif /*ARM32*/
248 
249 	EMSG_RAW("");
250 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
251 		ctx, abort_type_to_str(ai->abort_type), ai->va,
252 		fault_to_str(ai->abort_type, ai->fault_descr));
253 #ifdef ARM32
254 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
255 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
256 		 read_contextidr());
257 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
258 		 get_core_pos(), ai->regs->spsr);
259 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
260 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
261 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
262 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
263 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
264 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
265 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
266 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
267 #endif /*ARM32*/
268 #ifdef ARM64
269 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
270 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
271 		 read_ttbr1_el1(), read_contextidr_el1());
272 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
273 		 get_core_pos(), (uint32_t)ai->regs->spsr);
274 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
275 		 ai->regs->x0, ai->regs->x1);
276 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
277 		 ai->regs->x2, ai->regs->x3);
278 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
279 		 ai->regs->x4, ai->regs->x5);
280 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
281 		 ai->regs->x6, ai->regs->x7);
282 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
283 		 ai->regs->x8, ai->regs->x9);
284 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
285 		 ai->regs->x10, ai->regs->x11);
286 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
287 		 ai->regs->x12, ai->regs->x13);
288 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
289 		 ai->regs->x14, ai->regs->x15);
290 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
291 		 ai->regs->x16, ai->regs->x17);
292 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
293 		 ai->regs->x18, ai->regs->x19);
294 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
295 		 ai->regs->x20, ai->regs->x21);
296 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
297 		 ai->regs->x22, ai->regs->x23);
298 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
299 		 ai->regs->x24, ai->regs->x25);
300 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
301 		 ai->regs->x26, ai->regs->x27);
302 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
303 		 ai->regs->x28, ai->regs->x29);
304 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
305 		 ai->regs->x30, ai->regs->elr);
306 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
307 #endif /*ARM64*/
308 }
309 
310 #if defined(ARM32)
311 static const bool kernel_is32bit = true;
312 #elif defined(ARM64)
313 static const bool kernel_is32bit;
314 #endif
315 
316 /*
317  * Print abort info and (optionally) stack dump to the console
318  * @ai user-mode or kernel-mode abort info. If user mode, the current session
319  * must be the one of the TA that caused the abort.
320  * @stack_dump true to show a stack trace
321  */
322 static void __abort_print(struct abort_info *ai, bool stack_dump)
323 {
324 	bool is_32bit;
325 	bool paged_ta_abort = false;
326 
327 	if (abort_is_user_exception(ai)) {
328 		struct tee_ta_session *s;
329 		struct user_ta_ctx *utc;
330 
331 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
332 			panic();
333 
334 		utc = to_user_ta_ctx(s->ctx);
335 		is_32bit = utc->is_32bit;
336 #ifdef CFG_PAGED_USER_TA
337 		/*
338 		 * It is not safe to unwind paged TAs that received an abort,
339 		 * because we currently don't handle page faults that could
340 		 * occur when accessing the TA memory (unwind tables for
341 		 * instance).
342 		 */
343 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
344 			paged_ta_abort = true;
345 #endif
346 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
347 			__print_abort_info(ai, "User TA");
348 		tee_ta_dump_current();
349 	} else {
350 		is_32bit = kernel_is32bit;
351 
352 		__print_abort_info(ai, "Core");
353 	}
354 
355 	if (!stack_dump || paged_ta_abort)
356 		return;
357 
358 	if (is_32bit)
359 		__print_stack_unwind_arm32(ai);
360 	else
361 		__print_stack_unwind_arm64(ai);
362 }
363 
364 void abort_print(struct abort_info *ai)
365 {
366 	__abort_print(ai, false);
367 }
368 
369 void abort_print_error(struct abort_info *ai)
370 {
371 	__abort_print(ai, true);
372 }
373 
374 #ifdef ARM32
375 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
376 		struct abort_info *ai)
377 {
378 	switch (abort_type) {
379 	case ABORT_TYPE_DATA:
380 		ai->fault_descr = read_dfsr();
381 		ai->va = read_dfar();
382 		break;
383 	case ABORT_TYPE_PREFETCH:
384 		ai->fault_descr = read_ifsr();
385 		ai->va = read_ifar();
386 		break;
387 	default:
388 		ai->fault_descr = 0;
389 		ai->va = regs->elr;
390 		break;
391 	}
392 	ai->abort_type = abort_type;
393 	ai->pc = regs->elr;
394 	ai->regs = regs;
395 }
396 #endif /*ARM32*/
397 
398 #ifdef ARM64
399 static void set_abort_info(uint32_t abort_type __unused,
400 		struct thread_abort_regs *regs, struct abort_info *ai)
401 {
402 	ai->fault_descr = read_esr_el1();
403 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
404 	case ESR_EC_IABT_EL0:
405 	case ESR_EC_IABT_EL1:
406 		ai->abort_type = ABORT_TYPE_PREFETCH;
407 		ai->va = read_far_el1();
408 		break;
409 	case ESR_EC_DABT_EL0:
410 	case ESR_EC_DABT_EL1:
411 	case ESR_EC_SP_ALIGN:
412 		ai->abort_type = ABORT_TYPE_DATA;
413 		ai->va = read_far_el1();
414 		break;
415 	default:
416 		ai->abort_type = ABORT_TYPE_UNDEF;
417 		ai->va = regs->elr;
418 	}
419 	ai->pc = regs->elr;
420 	ai->regs = regs;
421 }
422 #endif /*ARM64*/
423 
424 #ifdef ARM32
425 static void handle_user_ta_panic(struct abort_info *ai)
426 {
427 	/*
428 	 * It was a user exception, stop user execution and return
429 	 * to TEE Core.
430 	 */
431 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
432 	ai->regs->r1 = true;
433 	ai->regs->r2 = 0xdeadbeef;
434 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
435 	ai->regs->spsr &= CPSR_FIA;
436 	ai->regs->spsr &= ~CPSR_MODE_MASK;
437 	ai->regs->spsr |= CPSR_MODE_SVC;
438 	/* Select Thumb or ARM mode */
439 	if (ai->regs->elr & 1)
440 		ai->regs->spsr |= CPSR_T;
441 	else
442 		ai->regs->spsr &= ~CPSR_T;
443 }
444 #endif /*ARM32*/
445 
446 #ifdef ARM64
447 static void handle_user_ta_panic(struct abort_info *ai)
448 {
449 	uint32_t daif;
450 
451 	/*
452 	 * It was a user exception, stop user execution and return
453 	 * to TEE Core.
454 	 */
455 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
456 	ai->regs->x1 = true;
457 	ai->regs->x2 = 0xdeadbeef;
458 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
459 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
460 
461 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
462 	/* XXX what about DAIF_D? */
463 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
464 }
465 #endif /*ARM64*/
466 
467 #ifdef CFG_WITH_VFP
468 static void handle_user_ta_vfp(void)
469 {
470 	struct tee_ta_session *s;
471 
472 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
473 		panic();
474 
475 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
476 }
477 #endif /*CFG_WITH_VFP*/
478 
479 #ifdef CFG_WITH_USER_TA
480 #ifdef ARM32
481 /* Returns true if the exception originated from user mode */
482 bool abort_is_user_exception(struct abort_info *ai)
483 {
484 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
485 }
486 #endif /*ARM32*/
487 
488 #ifdef ARM64
489 /* Returns true if the exception originated from user mode */
490 bool abort_is_user_exception(struct abort_info *ai)
491 {
492 	uint32_t spsr = ai->regs->spsr;
493 
494 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
495 		return true;
496 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
497 	    SPSR_64_MODE_EL0)
498 		return true;
499 	return false;
500 }
501 #endif /*ARM64*/
502 #else /*CFG_WITH_USER_TA*/
503 bool abort_is_user_exception(struct abort_info *ai __unused)
504 {
505 	return false;
506 }
507 #endif /*CFG_WITH_USER_TA*/
508 
509 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
510 #ifdef ARM32
511 
512 #define T32_INSTR(w1, w0) \
513 	((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
514 
515 #define T32_VTRANS32_MASK	T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
516 #define T32_VTRANS32_VAL	T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
517 
518 #define T32_VTRANS64_MASK	T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
519 #define T32_VTRANS64_VAL	T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
520 
521 #define T32_VLDST_MASK		T32_INSTR((0xff << 8) | (1 << 4), 0)
522 #define T32_VLDST_VAL		T32_INSTR( 0xf9 << 8            , 0)
523 
524 #define T32_VXLDST_MASK		T32_INSTR(0xfc << 8, 7 << 9)
525 #define T32_VXLDST_VAL		T32_INSTR(0xec << 8, 5 << 9)
526 
527 #define T32_VPROC_MASK		T32_INSTR(0xef << 8, 0)
528 #define T32_VPROC_VAL		T32_VPROC_MASK
529 
530 #define A32_INSTR(x)		((uint32_t)(x))
531 
532 #define A32_VTRANS32_MASK	A32_INSTR(SHIFT_U32(0xf, 24) | \
533 					  SHIFT_U32(7, 9) | BIT32(4))
534 #define A32_VTRANS32_VAL	A32_INSTR(SHIFT_U32(0xe, 24) | \
535 					  SHIFT_U32(5, 9) | BIT32(4))
536 
537 #define A32_VTRANS64_MASK	A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
538 #define A32_VTRANS64_VAL	A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
539 
540 #define A32_VLDST_MASK		A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
541 #define A32_VLDST_VAL		A32_INSTR(SHIFT_U32(0xf4, 24))
542 #define A32_VXLDST_MASK		A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
543 #define A32_VXLDST_VAL		A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
544 
545 #define A32_VPROC_MASK		A32_INSTR(SHIFT_U32(0x7f, 25))
546 #define A32_VPROC_VAL		A32_INSTR(SHIFT_U32(0x79, 25))
547 
548 static bool is_vfp_fault(struct abort_info *ai)
549 {
550 	TEE_Result res;
551 	uint32_t instr;
552 
553 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
554 		return false;
555 
556 	res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
557 	if (res != TEE_SUCCESS)
558 		return false;
559 
560 	if (ai->regs->spsr & CPSR_T) {
561 		/* Thumb mode */
562 		return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
563 		       ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
564 		       ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
565 		       ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
566 		       ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
567 	} else {
568 		/* ARM mode */
569 		return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
570 		       ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
571 		       ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
572 		       ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
573 		       ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
574 	}
575 }
576 #endif /*ARM32*/
577 
578 #ifdef ARM64
579 static bool is_vfp_fault(struct abort_info *ai)
580 {
581 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
582 	case ESR_EC_FP_ASIMD:
583 	case ESR_EC_AARCH32_FP:
584 	case ESR_EC_AARCH64_FP:
585 		return true;
586 	default:
587 		return false;
588 	}
589 }
590 #endif /*ARM64*/
591 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
592 static bool is_vfp_fault(struct abort_info *ai __unused)
593 {
594 	return false;
595 }
596 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
597 
598 static enum fault_type get_fault_type(struct abort_info *ai)
599 {
600 	if (abort_is_user_exception(ai)) {
601 		if (is_vfp_fault(ai))
602 			return FAULT_TYPE_USER_TA_VFP;
603 #ifndef CFG_WITH_PAGER
604 		return FAULT_TYPE_USER_TA_PANIC;
605 #endif
606 	}
607 
608 	if (thread_is_from_abort_mode()) {
609 		abort_print_error(ai);
610 		panic("[abort] abort in abort handler (trap CPU)");
611 	}
612 
613 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
614 		if (abort_is_user_exception(ai))
615 			return FAULT_TYPE_USER_TA_PANIC;
616 		abort_print_error(ai);
617 		panic("[abort] undefined abort (trap CPU)");
618 	}
619 
620 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
621 	case CORE_MMU_FAULT_ALIGNMENT:
622 		if (abort_is_user_exception(ai))
623 			return FAULT_TYPE_USER_TA_PANIC;
624 		abort_print_error(ai);
625 		panic("[abort] alignement fault!  (trap CPU)");
626 		break;
627 
628 	case CORE_MMU_FAULT_ACCESS_BIT:
629 		if (abort_is_user_exception(ai))
630 			return FAULT_TYPE_USER_TA_PANIC;
631 		abort_print_error(ai);
632 		panic("[abort] access bit fault!  (trap CPU)");
633 		break;
634 
635 	case CORE_MMU_FAULT_DEBUG_EVENT:
636 		abort_print(ai);
637 		DMSG("[abort] Ignoring debug event!");
638 		return FAULT_TYPE_IGNORE;
639 
640 	case CORE_MMU_FAULT_TRANSLATION:
641 	case CORE_MMU_FAULT_WRITE_PERMISSION:
642 	case CORE_MMU_FAULT_READ_PERMISSION:
643 		return FAULT_TYPE_PAGEABLE;
644 
645 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
646 		abort_print(ai);
647 		DMSG("[abort] Ignoring async external abort!");
648 		return FAULT_TYPE_IGNORE;
649 
650 	case CORE_MMU_FAULT_OTHER:
651 	default:
652 		abort_print(ai);
653 		DMSG("[abort] Unhandled fault!");
654 		return FAULT_TYPE_IGNORE;
655 	}
656 }
657 
658 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
659 {
660 	struct abort_info ai;
661 	bool handled;
662 
663 	set_abort_info(abort_type, regs, &ai);
664 
665 	switch (get_fault_type(&ai)) {
666 	case FAULT_TYPE_IGNORE:
667 		break;
668 	case FAULT_TYPE_USER_TA_PANIC:
669 		DMSG("[abort] abort in User mode (TA will panic)");
670 		abort_print_error(&ai);
671 		vfp_disable();
672 		handle_user_ta_panic(&ai);
673 		break;
674 #ifdef CFG_WITH_VFP
675 	case FAULT_TYPE_USER_TA_VFP:
676 		handle_user_ta_vfp();
677 		break;
678 #endif
679 	case FAULT_TYPE_PAGEABLE:
680 	default:
681 		thread_kernel_save_vfp();
682 		handled = tee_pager_handle_fault(&ai);
683 		thread_kernel_restore_vfp();
684 		if (!handled) {
685 			abort_print_error(&ai);
686 			if (!abort_is_user_exception(&ai))
687 				panic("unhandled pageable abort");
688 			DMSG("[abort] abort in User mode (TA will panic)");
689 			vfp_disable();
690 			handle_user_ta_panic(&ai);
691 		}
692 		break;
693 	}
694 }
695