xref: /optee_os/core/arch/arm/kernel/abort.c (revision 1bb929836182ecb96d2d9d268daa807c67596396)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <kernel/abort.h>
31 #include <kernel/linker.h>
32 #include <kernel/misc.h>
33 #include <kernel/panic.h>
34 #include <kernel/tee_ta_manager.h>
35 #include <kernel/unwind.h>
36 #include <kernel/user_ta.h>
37 #include <mm/core_mmu.h>
38 #include <mm/mobj.h>
39 #include <mm/tee_pager.h>
40 #include <tee/tee_svc.h>
41 #include <trace.h>
42 
43 #include "thread_private.h"
44 
45 enum fault_type {
46 	FAULT_TYPE_USER_TA_PANIC,
47 	FAULT_TYPE_USER_TA_VFP,
48 	FAULT_TYPE_PAGEABLE,
49 	FAULT_TYPE_IGNORE,
50 };
51 
52 #ifdef CFG_UNWIND
53 
54 static void get_current_ta_exidx(uaddr_t *exidx, size_t *exidx_sz)
55 {
56 	struct tee_ta_session *s;
57 	struct user_ta_ctx *utc;
58 
59 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
60 		panic();
61 
62 	utc = to_user_ta_ctx(s->ctx);
63 
64 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
65 	assert(utc->is_32bit);
66 
67 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
68 	if (*exidx)
69 		*exidx += utc->load_addr;
70 	*exidx_sz = utc->exidx_size;
71 }
72 
73 #ifdef ARM32
74 
75 /*
76  * Kernel or user mode unwind (32-bit execution state).
77  */
78 static void __print_stack_unwind_arm32(struct abort_info *ai)
79 {
80 	struct unwind_state_arm32 state;
81 	uaddr_t exidx;
82 	size_t exidx_sz;
83 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
84 	uint32_t sp;
85 	uint32_t lr;
86 
87 	if (abort_is_user_exception(ai)) {
88 		get_current_ta_exidx(&exidx, &exidx_sz);
89 	} else {
90 		exidx = (vaddr_t)__exidx_start;
91 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
92 	}
93 
94 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
95 		sp = ai->regs->usr_sp;
96 		lr = ai->regs->usr_lr;
97 	} else {
98 		sp = read_mode_sp(mode);
99 		lr = read_mode_lr(mode);
100 	}
101 
102 	memset(&state, 0, sizeof(state));
103 	state.registers[0] = ai->regs->r0;
104 	state.registers[1] = ai->regs->r1;
105 	state.registers[2] = ai->regs->r2;
106 	state.registers[3] = ai->regs->r3;
107 	state.registers[4] = ai->regs->r4;
108 	state.registers[5] = ai->regs->r5;
109 	state.registers[6] = ai->regs->r6;
110 	state.registers[7] = ai->regs->r7;
111 	state.registers[8] = ai->regs->r8;
112 	state.registers[9] = ai->regs->r9;
113 	state.registers[10] = ai->regs->r10;
114 	state.registers[11] = ai->regs->r11;
115 	state.registers[13] = sp;
116 	state.registers[14] = lr;
117 	state.registers[15] = ai->pc;
118 
119 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
120 }
121 #else /* ARM32 */
122 
123 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
124 {
125 	struct unwind_state_arm32 state;
126 	uaddr_t exidx;
127 	size_t exidx_sz;
128 
129 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
130 	assert(abort_is_user_exception(ai));
131 
132 	get_current_ta_exidx(&exidx, &exidx_sz);
133 
134 	memset(&state, 0, sizeof(state));
135 	state.registers[0] = ai->regs->x0;
136 	state.registers[1] = ai->regs->x1;
137 	state.registers[2] = ai->regs->x2;
138 	state.registers[3] = ai->regs->x3;
139 	state.registers[4] = ai->regs->x4;
140 	state.registers[5] = ai->regs->x5;
141 	state.registers[6] = ai->regs->x6;
142 	state.registers[7] = ai->regs->x7;
143 	state.registers[8] = ai->regs->x8;
144 	state.registers[9] = ai->regs->x9;
145 	state.registers[10] = ai->regs->x10;
146 	state.registers[11] = ai->regs->x11;
147 
148 	state.registers[13] = ai->regs->x13;
149 	state.registers[14] = ai->regs->x14;
150 	state.registers[15] = ai->pc;
151 
152 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz);
153 }
154 #endif /* ARM32 */
155 #ifdef ARM64
156 /* Kernel or user mode unwind (64-bit execution state) */
157 static void __print_stack_unwind_arm64(struct abort_info *ai)
158 {
159 	struct unwind_state_arm64 state;
160 	uaddr_t stack;
161 	size_t stack_size;
162 
163 	if (abort_is_user_exception(ai)) {
164 		struct tee_ta_session *s;
165 		struct user_ta_ctx *utc;
166 
167 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
168 			panic();
169 
170 		utc = to_user_ta_ctx(s->ctx);
171 		/* User stack */
172 		stack = (uaddr_t)utc->mmu->regions[0].va;
173 		stack_size = utc->mobj_stack->size;
174 	} else {
175 		/* Kernel stack */
176 		stack = thread_stack_start();
177 		stack_size = thread_stack_size();
178 	}
179 
180 	memset(&state, 0, sizeof(state));
181 	state.pc = ai->regs->elr;
182 	state.fp = ai->regs->x29;
183 
184 	print_stack_arm64(TRACE_ERROR, &state, stack, stack_size);
185 }
186 #else
187 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
188 {
189 
190 }
191 #endif /*ARM64*/
192 #else /* CFG_UNWIND */
193 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
194 {
195 }
196 
197 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
198 {
199 }
200 #endif /* CFG_UNWIND */
201 
202 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
203 {
204 	if (abort_type == ABORT_TYPE_DATA)
205 		return "data";
206 	if (abort_type == ABORT_TYPE_PREFETCH)
207 		return "prefetch";
208 	return "undef";
209 }
210 
211 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
212 			uint32_t fault_descr)
213 {
214 	/* fault_descr is only valid for data or prefetch abort */
215 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
216 		return "";
217 
218 	switch (core_mmu_get_fault_type(fault_descr)) {
219 	case CORE_MMU_FAULT_ALIGNMENT:
220 		return " (alignment fault)";
221 	case CORE_MMU_FAULT_TRANSLATION:
222 		return " (translation fault)";
223 	case CORE_MMU_FAULT_READ_PERMISSION:
224 		return " (read permission fault)";
225 	case CORE_MMU_FAULT_WRITE_PERMISSION:
226 		return " (write permission fault)";
227 	default:
228 		return "";
229 	}
230 }
231 
232 static __maybe_unused void
233 __print_abort_info(struct abort_info *ai __maybe_unused,
234 		   const char *ctx __maybe_unused)
235 {
236 #ifdef ARM32
237 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
238 	__maybe_unused uint32_t sp;
239 	__maybe_unused uint32_t lr;
240 
241 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
242 		sp = ai->regs->usr_sp;
243 		lr = ai->regs->usr_lr;
244 	} else {
245 		sp = read_mode_sp(mode);
246 		lr = read_mode_lr(mode);
247 	}
248 #endif /*ARM32*/
249 
250 	EMSG_RAW("");
251 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
252 		ctx, abort_type_to_str(ai->abort_type), ai->va,
253 		fault_to_str(ai->abort_type, ai->fault_descr));
254 #ifdef ARM32
255 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
256 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
257 		 read_contextidr());
258 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
259 		 get_core_pos(), ai->regs->spsr);
260 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
261 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
262 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
263 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
264 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
265 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
266 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
267 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
268 #endif /*ARM32*/
269 #ifdef ARM64
270 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
271 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
272 		 read_ttbr1_el1(), read_contextidr_el1());
273 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
274 		 get_core_pos(), (uint32_t)ai->regs->spsr);
275 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
276 		 ai->regs->x0, ai->regs->x1);
277 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
278 		 ai->regs->x2, ai->regs->x3);
279 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
280 		 ai->regs->x4, ai->regs->x5);
281 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
282 		 ai->regs->x6, ai->regs->x7);
283 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
284 		 ai->regs->x8, ai->regs->x9);
285 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
286 		 ai->regs->x10, ai->regs->x11);
287 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
288 		 ai->regs->x12, ai->regs->x13);
289 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
290 		 ai->regs->x14, ai->regs->x15);
291 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
292 		 ai->regs->x16, ai->regs->x17);
293 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
294 		 ai->regs->x18, ai->regs->x19);
295 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
296 		 ai->regs->x20, ai->regs->x21);
297 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
298 		 ai->regs->x22, ai->regs->x23);
299 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
300 		 ai->regs->x24, ai->regs->x25);
301 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
302 		 ai->regs->x26, ai->regs->x27);
303 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
304 		 ai->regs->x28, ai->regs->x29);
305 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
306 		 ai->regs->x30, ai->regs->elr);
307 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
308 #endif /*ARM64*/
309 }
310 
311 #if defined(ARM32)
312 static const bool kernel_is32bit = true;
313 #elif defined(ARM64)
314 static const bool kernel_is32bit;
315 #endif
316 
317 /*
318  * Print abort info and (optionally) stack dump to the console
319  * @ai user-mode or kernel-mode abort info. If user mode, the current session
320  * must be the one of the TA that caused the abort.
321  * @stack_dump true to show a stack trace
322  */
323 static void __abort_print(struct abort_info *ai, bool stack_dump)
324 {
325 	bool is_32bit;
326 	bool paged_ta_abort = false;
327 
328 	if (abort_is_user_exception(ai)) {
329 		struct tee_ta_session *s;
330 		struct user_ta_ctx *utc;
331 
332 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
333 			panic();
334 
335 		utc = to_user_ta_ctx(s->ctx);
336 		is_32bit = utc->is_32bit;
337 #ifdef CFG_PAGED_USER_TA
338 		/*
339 		 * It is not safe to unwind paged TAs that received an abort,
340 		 * because we currently don't handle page faults that could
341 		 * occur when accessing the TA memory (unwind tables for
342 		 * instance).
343 		 */
344 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
345 			paged_ta_abort = true;
346 #endif
347 		if (ai->abort_type != ABORT_TYPE_TA_PANIC)
348 			__print_abort_info(ai, "User TA");
349 		tee_ta_dump_current();
350 	} else {
351 		is_32bit = kernel_is32bit;
352 
353 		__print_abort_info(ai, "Core");
354 	}
355 
356 	if (!stack_dump || paged_ta_abort)
357 		return;
358 
359 	if (is_32bit)
360 		__print_stack_unwind_arm32(ai);
361 	else
362 		__print_stack_unwind_arm64(ai);
363 }
364 
365 void abort_print(struct abort_info *ai)
366 {
367 	__abort_print(ai, false);
368 }
369 
370 void abort_print_error(struct abort_info *ai)
371 {
372 	__abort_print(ai, true);
373 }
374 
375 #ifdef ARM32
376 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
377 		struct abort_info *ai)
378 {
379 	switch (abort_type) {
380 	case ABORT_TYPE_DATA:
381 		ai->fault_descr = read_dfsr();
382 		ai->va = read_dfar();
383 		break;
384 	case ABORT_TYPE_PREFETCH:
385 		ai->fault_descr = read_ifsr();
386 		ai->va = read_ifar();
387 		break;
388 	default:
389 		ai->fault_descr = 0;
390 		ai->va = regs->elr;
391 		break;
392 	}
393 	ai->abort_type = abort_type;
394 	ai->pc = regs->elr;
395 	ai->regs = regs;
396 }
397 #endif /*ARM32*/
398 
399 #ifdef ARM64
400 static void set_abort_info(uint32_t abort_type __unused,
401 		struct thread_abort_regs *regs, struct abort_info *ai)
402 {
403 	ai->fault_descr = read_esr_el1();
404 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
405 	case ESR_EC_IABT_EL0:
406 	case ESR_EC_IABT_EL1:
407 		ai->abort_type = ABORT_TYPE_PREFETCH;
408 		ai->va = read_far_el1();
409 		break;
410 	case ESR_EC_DABT_EL0:
411 	case ESR_EC_DABT_EL1:
412 	case ESR_EC_SP_ALIGN:
413 		ai->abort_type = ABORT_TYPE_DATA;
414 		ai->va = read_far_el1();
415 		break;
416 	default:
417 		ai->abort_type = ABORT_TYPE_UNDEF;
418 		ai->va = regs->elr;
419 	}
420 	ai->pc = regs->elr;
421 	ai->regs = regs;
422 }
423 #endif /*ARM64*/
424 
425 #ifdef ARM32
426 static void handle_user_ta_panic(struct abort_info *ai)
427 {
428 	/*
429 	 * It was a user exception, stop user execution and return
430 	 * to TEE Core.
431 	 */
432 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
433 	ai->regs->r1 = true;
434 	ai->regs->r2 = 0xdeadbeef;
435 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
436 	ai->regs->spsr &= CPSR_FIA;
437 	ai->regs->spsr &= ~CPSR_MODE_MASK;
438 	ai->regs->spsr |= CPSR_MODE_SVC;
439 	/* Select Thumb or ARM mode */
440 	if (ai->regs->elr & 1)
441 		ai->regs->spsr |= CPSR_T;
442 	else
443 		ai->regs->spsr &= ~CPSR_T;
444 }
445 #endif /*ARM32*/
446 
447 #ifdef ARM64
448 static void handle_user_ta_panic(struct abort_info *ai)
449 {
450 	uint32_t daif;
451 
452 	/*
453 	 * It was a user exception, stop user execution and return
454 	 * to TEE Core.
455 	 */
456 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
457 	ai->regs->x1 = true;
458 	ai->regs->x2 = 0xdeadbeef;
459 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
460 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
461 
462 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
463 	/* XXX what about DAIF_D? */
464 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
465 }
466 #endif /*ARM64*/
467 
468 #ifdef CFG_WITH_VFP
469 static void handle_user_ta_vfp(void)
470 {
471 	struct tee_ta_session *s;
472 
473 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
474 		panic();
475 
476 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
477 }
478 #endif /*CFG_WITH_VFP*/
479 
480 #ifdef CFG_WITH_USER_TA
481 #ifdef ARM32
482 /* Returns true if the exception originated from user mode */
483 bool abort_is_user_exception(struct abort_info *ai)
484 {
485 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
486 }
487 #endif /*ARM32*/
488 
489 #ifdef ARM64
490 /* Returns true if the exception originated from user mode */
491 bool abort_is_user_exception(struct abort_info *ai)
492 {
493 	uint32_t spsr = ai->regs->spsr;
494 
495 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
496 		return true;
497 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
498 	    SPSR_64_MODE_EL0)
499 		return true;
500 	return false;
501 }
502 #endif /*ARM64*/
503 #else /*CFG_WITH_USER_TA*/
504 bool abort_is_user_exception(struct abort_info *ai __unused)
505 {
506 	return false;
507 }
508 #endif /*CFG_WITH_USER_TA*/
509 
510 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
511 #ifdef ARM32
512 
513 #define T32_INSTR(w1, w0) \
514 	((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
515 
516 #define T32_VTRANS32_MASK	T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
517 #define T32_VTRANS32_VAL	T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
518 
519 #define T32_VTRANS64_MASK	T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
520 #define T32_VTRANS64_VAL	T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
521 
522 #define T32_VLDST_MASK		T32_INSTR((0xff << 8) | (1 << 4), 0)
523 #define T32_VLDST_VAL		T32_INSTR( 0xf9 << 8            , 0)
524 
525 #define T32_VXLDST_MASK		T32_INSTR(0xfc << 8, 7 << 9)
526 #define T32_VXLDST_VAL		T32_INSTR(0xec << 8, 5 << 9)
527 
528 #define T32_VPROC_MASK		T32_INSTR(0xef << 8, 0)
529 #define T32_VPROC_VAL		T32_VPROC_MASK
530 
531 #define A32_INSTR(x)		((uint32_t)(x))
532 
533 #define A32_VTRANS32_MASK	A32_INSTR(SHIFT_U32(0xf, 24) | \
534 					  SHIFT_U32(7, 9) | BIT32(4))
535 #define A32_VTRANS32_VAL	A32_INSTR(SHIFT_U32(0xe, 24) | \
536 					  SHIFT_U32(5, 9) | BIT32(4))
537 
538 #define A32_VTRANS64_MASK	A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
539 #define A32_VTRANS64_VAL	A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
540 
541 #define A32_VLDST_MASK		A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
542 #define A32_VLDST_VAL		A32_INSTR(SHIFT_U32(0xf4, 24))
543 #define A32_VXLDST_MASK		A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
544 #define A32_VXLDST_VAL		A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
545 
546 #define A32_VPROC_MASK		A32_INSTR(SHIFT_U32(0x7f, 25))
547 #define A32_VPROC_VAL		A32_INSTR(SHIFT_U32(0x79, 25))
548 
549 static bool is_vfp_fault(struct abort_info *ai)
550 {
551 	TEE_Result res;
552 	uint32_t instr;
553 
554 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
555 		return false;
556 
557 	res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
558 	if (res != TEE_SUCCESS)
559 		return false;
560 
561 	if (ai->regs->spsr & CPSR_T) {
562 		/* Thumb mode */
563 		return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
564 		       ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
565 		       ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
566 		       ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
567 		       ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
568 	} else {
569 		/* ARM mode */
570 		return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
571 		       ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
572 		       ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
573 		       ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
574 		       ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
575 	}
576 }
577 #endif /*ARM32*/
578 
579 #ifdef ARM64
580 static bool is_vfp_fault(struct abort_info *ai)
581 {
582 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
583 	case ESR_EC_FP_ASIMD:
584 	case ESR_EC_AARCH32_FP:
585 	case ESR_EC_AARCH64_FP:
586 		return true;
587 	default:
588 		return false;
589 	}
590 }
591 #endif /*ARM64*/
592 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
593 static bool is_vfp_fault(struct abort_info *ai __unused)
594 {
595 	return false;
596 }
597 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
598 
599 static enum fault_type get_fault_type(struct abort_info *ai)
600 {
601 	if (abort_is_user_exception(ai)) {
602 		if (is_vfp_fault(ai))
603 			return FAULT_TYPE_USER_TA_VFP;
604 #ifndef CFG_WITH_PAGER
605 		return FAULT_TYPE_USER_TA_PANIC;
606 #endif
607 	}
608 
609 	if (thread_is_from_abort_mode()) {
610 		abort_print_error(ai);
611 		panic("[abort] abort in abort handler (trap CPU)");
612 	}
613 
614 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
615 		if (abort_is_user_exception(ai))
616 			return FAULT_TYPE_USER_TA_PANIC;
617 		abort_print_error(ai);
618 		panic("[abort] undefined abort (trap CPU)");
619 	}
620 
621 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
622 	case CORE_MMU_FAULT_ALIGNMENT:
623 		if (abort_is_user_exception(ai))
624 			return FAULT_TYPE_USER_TA_PANIC;
625 		abort_print_error(ai);
626 		panic("[abort] alignement fault!  (trap CPU)");
627 		break;
628 
629 	case CORE_MMU_FAULT_ACCESS_BIT:
630 		if (abort_is_user_exception(ai))
631 			return FAULT_TYPE_USER_TA_PANIC;
632 		abort_print_error(ai);
633 		panic("[abort] access bit fault!  (trap CPU)");
634 		break;
635 
636 	case CORE_MMU_FAULT_DEBUG_EVENT:
637 		abort_print(ai);
638 		DMSG("[abort] Ignoring debug event!");
639 		return FAULT_TYPE_IGNORE;
640 
641 	case CORE_MMU_FAULT_TRANSLATION:
642 	case CORE_MMU_FAULT_WRITE_PERMISSION:
643 	case CORE_MMU_FAULT_READ_PERMISSION:
644 		return FAULT_TYPE_PAGEABLE;
645 
646 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
647 		abort_print(ai);
648 		DMSG("[abort] Ignoring async external abort!");
649 		return FAULT_TYPE_IGNORE;
650 
651 	case CORE_MMU_FAULT_OTHER:
652 	default:
653 		abort_print(ai);
654 		DMSG("[abort] Unhandled fault!");
655 		return FAULT_TYPE_IGNORE;
656 	}
657 }
658 
659 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
660 {
661 	struct abort_info ai;
662 	bool handled;
663 
664 	set_abort_info(abort_type, regs, &ai);
665 
666 	switch (get_fault_type(&ai)) {
667 	case FAULT_TYPE_IGNORE:
668 		break;
669 	case FAULT_TYPE_USER_TA_PANIC:
670 		DMSG("[abort] abort in User mode (TA will panic)");
671 		abort_print_error(&ai);
672 		vfp_disable();
673 		handle_user_ta_panic(&ai);
674 		break;
675 #ifdef CFG_WITH_VFP
676 	case FAULT_TYPE_USER_TA_VFP:
677 		handle_user_ta_vfp();
678 		break;
679 #endif
680 	case FAULT_TYPE_PAGEABLE:
681 	default:
682 		thread_kernel_save_vfp();
683 		handled = tee_pager_handle_fault(&ai);
684 		thread_kernel_restore_vfp();
685 		if (!handled) {
686 			abort_print_error(&ai);
687 			if (!abort_is_user_exception(&ai))
688 				panic("unhandled pageable abort");
689 			DMSG("[abort] abort in User mode (TA will panic)");
690 			vfp_disable();
691 			handle_user_ta_panic(&ai);
692 		}
693 		break;
694 	}
695 }
696