xref: /optee_os/core/arch/arm/kernel/abort.c (revision 0014a941f0d7d8bd61fbe6dd9977b902529bf803)
1 /*
2  * Copyright (c) 2015, Linaro Limited
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <arm.h>
29 #include <kernel/abort.h>
30 #include <kernel/linker.h>
31 #include <kernel/misc.h>
32 #include <kernel/panic.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/unwind.h>
35 #include <kernel/user_ta.h>
36 #include <mm/core_mmu.h>
37 #include <mm/mobj.h>
38 #include <mm/tee_pager.h>
39 #include <tee/tee_svc.h>
40 #include <trace.h>
41 
42 #include "thread_private.h"
43 
44 enum fault_type {
45 	FAULT_TYPE_USER_TA_PANIC,
46 	FAULT_TYPE_USER_TA_VFP,
47 	FAULT_TYPE_PAGEABLE,
48 	FAULT_TYPE_IGNORE,
49 };
50 
51 #ifdef CFG_UNWIND
52 
53 static void get_current_ta_exidx(uaddr_t *exidx, size_t *exidx_sz)
54 {
55 	struct tee_ta_session *s;
56 	struct user_ta_ctx *utc;
57 
58 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
59 		panic();
60 
61 	utc = to_user_ta_ctx(s->ctx);
62 
63 	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
64 	assert(utc->is_32bit);
65 
66 	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
67 	if (*exidx)
68 		*exidx += utc->load_addr;
69 	*exidx_sz = utc->exidx_size;
70 }
71 
72 #ifdef ARM32
73 
74 /*
75  * Kernel or user mode unwind (32-bit execution state).
76  */
77 static void __print_stack_unwind_arm32(struct abort_info *ai)
78 {
79 	struct unwind_state_arm32 state;
80 	uaddr_t exidx;
81 	size_t exidx_sz;
82 
83 	if (abort_is_user_exception(ai)) {
84 		get_current_ta_exidx(&exidx, &exidx_sz);
85 	} else {
86 		exidx = (vaddr_t)__exidx_start;
87 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
88 	}
89 
90 	memset(&state, 0, sizeof(state));
91 	state.registers[0] = ai->regs->r0;
92 	state.registers[1] = ai->regs->r1;
93 	state.registers[2] = ai->regs->r2;
94 	state.registers[3] = ai->regs->r3;
95 	state.registers[4] = ai->regs->r4;
96 	state.registers[5] = ai->regs->r5;
97 	state.registers[6] = ai->regs->r6;
98 	state.registers[7] = ai->regs->r7;
99 	state.registers[8] = ai->regs->r8;
100 	state.registers[9] = ai->regs->r9;
101 	state.registers[10] = ai->regs->r10;
102 	state.registers[11] = ai->regs->r11;
103 
104 	state.registers[13] = read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK);
105 	state.registers[14] = read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK);
106 	state.registers[15] = ai->pc;
107 
108 	EMSG_RAW("Call stack:");
109 	do {
110 		EMSG_RAW(" 0x%08x", state.registers[15]);
111 	} while (exidx && unwind_stack_arm32(&state, exidx, exidx_sz));
112 }
113 #else /* ARM32 */
114 
115 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
116 {
117 	struct unwind_state_arm32 state;
118 	uaddr_t exidx;
119 	size_t exidx_sz;
120 
121 	/* 64-bit kernel, hence 32-bit unwind must be for user mode */
122 	assert(abort_is_user_exception(ai));
123 
124 	get_current_ta_exidx(&exidx, &exidx_sz);
125 
126 	memset(&state, 0, sizeof(state));
127 	state.registers[0] = ai->regs->x0;
128 	state.registers[1] = ai->regs->x1;
129 	state.registers[2] = ai->regs->x2;
130 	state.registers[3] = ai->regs->x3;
131 	state.registers[4] = ai->regs->x4;
132 	state.registers[5] = ai->regs->x5;
133 	state.registers[6] = ai->regs->x6;
134 	state.registers[7] = ai->regs->x7;
135 	state.registers[8] = ai->regs->x8;
136 	state.registers[9] = ai->regs->x9;
137 	state.registers[10] = ai->regs->x10;
138 	state.registers[11] = ai->regs->x11;
139 
140 	state.registers[13] = ai->regs->x13;
141 	state.registers[14] = ai->regs->x14;
142 	state.registers[15] = ai->pc;
143 
144 	EMSG_RAW("Call stack:");
145 	do {
146 		EMSG_RAW(" 0x%08x", state.registers[15]);
147 	} while (exidx && unwind_stack_arm32(&state, exidx, exidx_sz));
148 }
149 #endif /* ARM32 */
150 #ifdef ARM64
151 /* Kernel or user mode unwind (64-bit execution state) */
152 static void __print_stack_unwind_arm64(struct abort_info *ai)
153 {
154 	struct unwind_state_arm64 state;
155 	uaddr_t stack;
156 	size_t stack_size;
157 
158 	if (abort_is_user_exception(ai)) {
159 		struct tee_ta_session *s;
160 		struct user_ta_ctx *utc;
161 
162 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
163 			panic();
164 
165 		utc = to_user_ta_ctx(s->ctx);
166 		/* User stack */
167 		stack = (uaddr_t)utc->mmu->regions[0].va;
168 		stack_size = utc->mobj_stack->size;
169 	} else {
170 		/* Kernel stack */
171 		stack = thread_stack_start();
172 		stack_size = thread_stack_size();
173 	}
174 
175 	memset(&state, 0, sizeof(state));
176 	state.pc = ai->regs->elr;
177 	state.fp = ai->regs->x29;
178 
179 	EMSG_RAW("Call stack:");
180 	do {
181 		EMSG_RAW(" 0x%016" PRIx64, state.pc);
182 	} while (stack && unwind_stack_arm64(&state, stack, stack_size));
183 }
184 #else
185 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
186 {
187 
188 }
189 #endif /*ARM64*/
190 #else /* CFG_UNWIND */
191 static void __print_stack_unwind_arm32(struct abort_info *ai __unused)
192 {
193 }
194 
195 static void __print_stack_unwind_arm64(struct abort_info *ai __unused)
196 {
197 }
198 #endif /* CFG_UNWIND */
199 
200 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
201 {
202 	if (abort_type == ABORT_TYPE_DATA)
203 		return "data";
204 	if (abort_type == ABORT_TYPE_PREFETCH)
205 		return "prefetch";
206 	return "undef";
207 }
208 
209 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
210 			uint32_t fault_descr)
211 {
212 	/* fault_descr is only valid for data or prefetch abort */
213 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
214 		return "";
215 
216 	switch (core_mmu_get_fault_type(fault_descr)) {
217 	case CORE_MMU_FAULT_ALIGNMENT:
218 		return " (alignment fault)";
219 	case CORE_MMU_FAULT_TRANSLATION:
220 		return " (translation fault)";
221 	case CORE_MMU_FAULT_READ_PERMISSION:
222 		return " (read permission fault)";
223 	case CORE_MMU_FAULT_WRITE_PERMISSION:
224 		return " (write permission fault)";
225 	default:
226 		return "";
227 	}
228 }
229 
230 static __maybe_unused void __print_abort_info(
231 				struct abort_info *ai __maybe_unused,
232 				const char *ctx __maybe_unused)
233 {
234 	EMSG_RAW("");
235 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
236 		ctx, abort_type_to_str(ai->abort_type), ai->va,
237 		fault_to_str(ai->abort_type, ai->fault_descr));
238 #ifdef ARM32
239 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
240 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
241 		 read_contextidr());
242 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
243 		 get_core_pos(), ai->regs->spsr);
244 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
245 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
246 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
247 		 ai->regs->r1, ai->regs->r5, ai->regs->r9,
248 		 read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
249 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
250 		 ai->regs->r2, ai->regs->r6, ai->regs->r10,
251 		 read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
252 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
253 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
254 #endif /*ARM32*/
255 #ifdef ARM64
256 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
257 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
258 		 read_ttbr1_el1(), read_contextidr_el1());
259 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
260 		 get_core_pos(), (uint32_t)ai->regs->spsr);
261 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
262 		 ai->regs->x0, ai->regs->x1);
263 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
264 		 ai->regs->x2, ai->regs->x3);
265 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
266 		 ai->regs->x4, ai->regs->x5);
267 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
268 		 ai->regs->x6, ai->regs->x7);
269 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
270 		 ai->regs->x8, ai->regs->x9);
271 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
272 		 ai->regs->x10, ai->regs->x11);
273 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
274 		 ai->regs->x12, ai->regs->x13);
275 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
276 		 ai->regs->x14, ai->regs->x15);
277 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
278 		 ai->regs->x16, ai->regs->x17);
279 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
280 		 ai->regs->x18, ai->regs->x19);
281 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
282 		 ai->regs->x20, ai->regs->x21);
283 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
284 		 ai->regs->x22, ai->regs->x23);
285 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
286 		 ai->regs->x24, ai->regs->x25);
287 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
288 		 ai->regs->x26, ai->regs->x27);
289 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
290 		 ai->regs->x28, ai->regs->x29);
291 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
292 		 ai->regs->x30, ai->regs->elr);
293 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
294 #endif /*ARM64*/
295 }
296 
297 #if defined(ARM32)
298 static const bool kernel_is32bit = true;
299 #elif defined(ARM64)
300 static const bool kernel_is32bit;
301 #endif
302 
303 /*
304  * Print abort info and (optionally) stack dump to the console
305  * @ai user-mode or kernel-mode abort info. If user mode, the current session
306  * must be the one of the TA that caused the abort.
307  * @stack_dump true to show a stack trace
308  */
309 static void __abort_print(struct abort_info *ai, bool stack_dump)
310 {
311 	bool is_32bit;
312 	bool paged_ta = false;
313 
314 	if (abort_is_user_exception(ai)) {
315 		struct tee_ta_session *s;
316 		struct user_ta_ctx *utc;
317 
318 		if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
319 			panic();
320 
321 		utc = to_user_ta_ctx(s->ctx);
322 		is_32bit = utc->is_32bit;
323 #ifdef CFG_PAGED_USER_TA
324 		/*
325 		 * We don't want to unwind paged TAs, because we currently
326 		 * don't handle page faults that could occur when accessing the
327 		 * TA memory (unwind tables for instance).
328 		 */
329 		paged_ta = true;
330 #endif
331 
332 		__print_abort_info(ai, "User TA");
333 		tee_ta_dump_current();
334 	} else {
335 		is_32bit = kernel_is32bit;
336 
337 		__print_abort_info(ai, "Core");
338 	}
339 
340 	if (!stack_dump || paged_ta)
341 		return;
342 
343 	if (is_32bit)
344 		__print_stack_unwind_arm32(ai);
345 	else
346 		__print_stack_unwind_arm64(ai);
347 }
348 
349 void abort_print(struct abort_info *ai)
350 {
351 	__abort_print(ai, false);
352 }
353 
354 void abort_print_error(struct abort_info *ai)
355 {
356 	__abort_print(ai, true);
357 }
358 
359 #ifdef ARM32
360 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
361 		struct abort_info *ai)
362 {
363 	switch (abort_type) {
364 	case ABORT_TYPE_DATA:
365 		ai->fault_descr = read_dfsr();
366 		ai->va = read_dfar();
367 		break;
368 	case ABORT_TYPE_PREFETCH:
369 		ai->fault_descr = read_ifsr();
370 		ai->va = read_ifar();
371 		break;
372 	default:
373 		ai->fault_descr = 0;
374 		ai->va = regs->elr;
375 		break;
376 	}
377 	ai->abort_type = abort_type;
378 	ai->pc = regs->elr;
379 	ai->regs = regs;
380 }
381 #endif /*ARM32*/
382 
383 #ifdef ARM64
384 static void set_abort_info(uint32_t abort_type __unused,
385 		struct thread_abort_regs *regs, struct abort_info *ai)
386 {
387 	ai->fault_descr = read_esr_el1();
388 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
389 	case ESR_EC_IABT_EL0:
390 	case ESR_EC_IABT_EL1:
391 		ai->abort_type = ABORT_TYPE_PREFETCH;
392 		ai->va = read_far_el1();
393 		break;
394 	case ESR_EC_DABT_EL0:
395 	case ESR_EC_DABT_EL1:
396 	case ESR_EC_SP_ALIGN:
397 		ai->abort_type = ABORT_TYPE_DATA;
398 		ai->va = read_far_el1();
399 		break;
400 	default:
401 		ai->abort_type = ABORT_TYPE_UNDEF;
402 		ai->va = regs->elr;
403 	}
404 	ai->pc = regs->elr;
405 	ai->regs = regs;
406 }
407 #endif /*ARM64*/
408 
409 #ifdef ARM32
410 static void handle_user_ta_panic(struct abort_info *ai)
411 {
412 	/*
413 	 * It was a user exception, stop user execution and return
414 	 * to TEE Core.
415 	 */
416 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
417 	ai->regs->r1 = true;
418 	ai->regs->r2 = 0xdeadbeef;
419 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
420 	ai->regs->spsr = read_cpsr();
421 	ai->regs->spsr &= ~CPSR_MODE_MASK;
422 	ai->regs->spsr |= CPSR_MODE_SVC;
423 	ai->regs->spsr &= ~CPSR_FIA;
424 	ai->regs->spsr |= read_spsr() & CPSR_FIA;
425 	/* Select Thumb or ARM mode */
426 	if (ai->regs->elr & 1)
427 		ai->regs->spsr |= CPSR_T;
428 	else
429 		ai->regs->spsr &= ~CPSR_T;
430 }
431 #endif /*ARM32*/
432 
433 #ifdef ARM64
434 static void handle_user_ta_panic(struct abort_info *ai)
435 {
436 	uint32_t daif;
437 
438 	/*
439 	 * It was a user exception, stop user execution and return
440 	 * to TEE Core.
441 	 */
442 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
443 	ai->regs->x1 = true;
444 	ai->regs->x2 = 0xdeadbeef;
445 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
446 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
447 
448 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
449 	/* XXX what about DAIF_D? */
450 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
451 }
452 #endif /*ARM64*/
453 
454 #ifdef CFG_WITH_VFP
455 static void handle_user_ta_vfp(void)
456 {
457 	struct tee_ta_session *s;
458 
459 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
460 		panic();
461 
462 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
463 }
464 #endif /*CFG_WITH_VFP*/
465 
466 #ifdef CFG_WITH_USER_TA
467 #ifdef ARM32
468 /* Returns true if the exception originated from user mode */
469 bool abort_is_user_exception(struct abort_info *ai)
470 {
471 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
472 }
473 #endif /*ARM32*/
474 
475 #ifdef ARM64
476 /* Returns true if the exception originated from user mode */
477 bool abort_is_user_exception(struct abort_info *ai)
478 {
479 	uint32_t spsr = ai->regs->spsr;
480 
481 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
482 		return true;
483 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
484 	    SPSR_64_MODE_EL0)
485 		return true;
486 	return false;
487 }
488 #endif /*ARM64*/
489 #else /*CFG_WITH_USER_TA*/
490 bool abort_is_user_exception(struct abort_info *ai __unused)
491 {
492 	return false;
493 }
494 #endif /*CFG_WITH_USER_TA*/
495 
496 #ifdef ARM32
497 /* Returns true if the exception originated from abort mode */
498 static bool is_abort_in_abort_handler(struct abort_info *ai)
499 {
500 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
501 }
502 #endif /*ARM32*/
503 
504 #ifdef ARM64
505 /* Returns true if the exception originated from abort mode */
506 static bool is_abort_in_abort_handler(struct abort_info *ai __unused)
507 {
508 	return false;
509 }
510 #endif /*ARM64*/
511 
512 
513 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
514 #ifdef ARM32
515 
516 #define T32_INSTR(w1, w0) \
517 	((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
518 
519 #define T32_VTRANS32_MASK	T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
520 #define T32_VTRANS32_VAL	T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
521 
522 #define T32_VTRANS64_MASK	T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
523 #define T32_VTRANS64_VAL	T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
524 
525 #define T32_VLDST_MASK		T32_INSTR((0xff << 8) | (1 << 4), 0)
526 #define T32_VLDST_VAL		T32_INSTR( 0xf9 << 8            , 0)
527 
528 #define T32_VXLDST_MASK		T32_INSTR(0xfc << 8, 7 << 9)
529 #define T32_VXLDST_VAL		T32_INSTR(0xec << 8, 5 << 9)
530 
531 #define T32_VPROC_MASK		T32_INSTR(0xef << 8, 0)
532 #define T32_VPROC_VAL		T32_VPROC_MASK
533 
534 #define A32_INSTR(x)		((uint32_t)(x))
535 
536 #define A32_VTRANS32_MASK	A32_INSTR(SHIFT_U32(0xf, 24) | \
537 					  SHIFT_U32(7, 9) | BIT32(4))
538 #define A32_VTRANS32_VAL	A32_INSTR(SHIFT_U32(0xe, 24) | \
539 					  SHIFT_U32(5, 9) | BIT32(4))
540 
541 #define A32_VTRANS64_MASK	A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
542 #define A32_VTRANS64_VAL	A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
543 
544 #define A32_VLDST_MASK		A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
545 #define A32_VLDST_VAL		A32_INSTR(SHIFT_U32(0xf4, 24))
546 #define A32_VXLDST_MASK		A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
547 #define A32_VXLDST_VAL		A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
548 
549 #define A32_VPROC_MASK		A32_INSTR(SHIFT_U32(0x7f, 25))
550 #define A32_VPROC_VAL		A32_INSTR(SHIFT_U32(0x79, 25))
551 
552 static bool is_vfp_fault(struct abort_info *ai)
553 {
554 	TEE_Result res;
555 	uint32_t instr;
556 
557 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
558 		return false;
559 
560 	res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
561 	if (res != TEE_SUCCESS)
562 		return false;
563 
564 	if (ai->regs->spsr & CPSR_T) {
565 		/* Thumb mode */
566 		return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
567 		       ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
568 		       ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
569 		       ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
570 		       ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
571 	} else {
572 		/* ARM mode */
573 		return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
574 		       ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
575 		       ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
576 		       ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
577 		       ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
578 	}
579 }
580 #endif /*ARM32*/
581 
582 #ifdef ARM64
583 static bool is_vfp_fault(struct abort_info *ai)
584 {
585 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
586 	case ESR_EC_FP_ASIMD:
587 	case ESR_EC_AARCH32_FP:
588 	case ESR_EC_AARCH64_FP:
589 		return true;
590 	default:
591 		return false;
592 	}
593 }
594 #endif /*ARM64*/
595 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
596 static bool is_vfp_fault(struct abort_info *ai __unused)
597 {
598 	return false;
599 }
600 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
601 
602 static enum fault_type get_fault_type(struct abort_info *ai)
603 {
604 	if (abort_is_user_exception(ai)) {
605 		if (is_vfp_fault(ai))
606 			return FAULT_TYPE_USER_TA_VFP;
607 #ifndef CFG_WITH_PAGER
608 		return FAULT_TYPE_USER_TA_PANIC;
609 #endif
610 	}
611 
612 	if (is_abort_in_abort_handler(ai)) {
613 		abort_print_error(ai);
614 		panic("[abort] abort in abort handler (trap CPU)");
615 	}
616 
617 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
618 		if (abort_is_user_exception(ai))
619 			return FAULT_TYPE_USER_TA_PANIC;
620 		abort_print_error(ai);
621 		panic("[abort] undefined abort (trap CPU)");
622 	}
623 
624 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
625 	case CORE_MMU_FAULT_ALIGNMENT:
626 		if (abort_is_user_exception(ai))
627 			return FAULT_TYPE_USER_TA_PANIC;
628 		abort_print_error(ai);
629 		panic("[abort] alignement fault!  (trap CPU)");
630 		break;
631 
632 	case CORE_MMU_FAULT_ACCESS_BIT:
633 		if (abort_is_user_exception(ai))
634 			return FAULT_TYPE_USER_TA_PANIC;
635 		abort_print_error(ai);
636 		panic("[abort] access bit fault!  (trap CPU)");
637 		break;
638 
639 	case CORE_MMU_FAULT_DEBUG_EVENT:
640 		abort_print(ai);
641 		DMSG("[abort] Ignoring debug event!");
642 		return FAULT_TYPE_IGNORE;
643 
644 	case CORE_MMU_FAULT_TRANSLATION:
645 	case CORE_MMU_FAULT_WRITE_PERMISSION:
646 	case CORE_MMU_FAULT_READ_PERMISSION:
647 		return FAULT_TYPE_PAGEABLE;
648 
649 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
650 		abort_print(ai);
651 		DMSG("[abort] Ignoring async external abort!");
652 		return FAULT_TYPE_IGNORE;
653 
654 	case CORE_MMU_FAULT_OTHER:
655 	default:
656 		abort_print(ai);
657 		DMSG("[abort] Unhandled fault!");
658 		return FAULT_TYPE_IGNORE;
659 	}
660 }
661 
662 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
663 {
664 	struct abort_info ai;
665 	bool handled;
666 
667 	set_abort_info(abort_type, regs, &ai);
668 
669 	switch (get_fault_type(&ai)) {
670 	case FAULT_TYPE_IGNORE:
671 		break;
672 	case FAULT_TYPE_USER_TA_PANIC:
673 		DMSG("[abort] abort in User mode (TA will panic)");
674 		abort_print_error(&ai);
675 		vfp_disable();
676 		handle_user_ta_panic(&ai);
677 		break;
678 #ifdef CFG_WITH_VFP
679 	case FAULT_TYPE_USER_TA_VFP:
680 		handle_user_ta_vfp();
681 		break;
682 #endif
683 	case FAULT_TYPE_PAGEABLE:
684 	default:
685 		thread_kernel_save_vfp();
686 		handled = tee_pager_handle_fault(&ai);
687 		thread_kernel_restore_vfp();
688 		if (!handled) {
689 			abort_print_error(&ai);
690 			if (!abort_is_user_exception(&ai))
691 				panic("unhandled pageable abort");
692 			DMSG("[abort] abort in User mode (TA will panic)");
693 			vfp_disable();
694 			handle_user_ta_panic(&ai);
695 		}
696 		break;
697 	}
698 }
699