xref: /optee_os/core/arch/arm/kernel/abort.c (revision 23cf8945c70e1df9ef878eb1a6c9e204c04f9ea9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <kernel/abort.h>
8 #include <kernel/ftrace.h>
9 #include <kernel/linker.h>
10 #include <kernel/misc.h>
11 #include <kernel/panic.h>
12 #include <kernel/tee_ta_manager.h>
13 #include <kernel/unwind.h>
14 #include <kernel/user_ta.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <tee/tee_svc.h>
19 #include <trace.h>
20 
21 #include "thread_private.h"
22 
23 enum fault_type {
24 	FAULT_TYPE_USER_TA_PANIC,
25 	FAULT_TYPE_USER_TA_VFP,
26 	FAULT_TYPE_PAGEABLE,
27 	FAULT_TYPE_IGNORE,
28 };
29 
30 #ifdef CFG_UNWIND
31 
32 #ifdef ARM32
33 static void get_current_ta_exidx_stack(vaddr_t *exidx, size_t *exidx_sz,
34 				       vaddr_t *stack, size_t *stack_size)
35 {
36 	*exidx = 0;
37 	*exidx_sz = 0;
38 	*stack = 0;
39 	*stack_size = 0;
40 }
41 
42 /*
43  * Kernel or user mode unwind (32-bit execution state).
44  */
45 static void __print_stack_unwind(struct abort_info *ai)
46 {
47 	struct unwind_state_arm32 state;
48 	vaddr_t exidx;
49 	size_t exidx_sz;
50 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
51 	uint32_t sp;
52 	uint32_t lr;
53 	vaddr_t stack;
54 	size_t stack_size;
55 	bool kernel_stack;
56 
57 	if (abort_is_user_exception(ai)) {
58 		get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack,
59 					   &stack_size);
60 		if (!exidx) {
61 			EMSG_RAW("Call stack not available");
62 			return;
63 		}
64 		kernel_stack = false;
65 	} else {
66 		exidx = (vaddr_t)__exidx_start;
67 		exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start;
68 		/* Kernel stack */
69 		stack = thread_stack_start();
70 		stack_size = thread_stack_size();
71 		kernel_stack = true;
72 	}
73 
74 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
75 		sp = ai->regs->usr_sp;
76 		lr = ai->regs->usr_lr;
77 	} else {
78 		sp = read_mode_sp(mode);
79 		lr = read_mode_lr(mode);
80 	}
81 
82 	memset(&state, 0, sizeof(state));
83 	state.registers[0] = ai->regs->r0;
84 	state.registers[1] = ai->regs->r1;
85 	state.registers[2] = ai->regs->r2;
86 	state.registers[3] = ai->regs->r3;
87 	state.registers[4] = ai->regs->r4;
88 	state.registers[5] = ai->regs->r5;
89 	state.registers[6] = ai->regs->r6;
90 	state.registers[7] = ai->regs->r7;
91 	state.registers[8] = ai->regs->r8;
92 	state.registers[9] = ai->regs->r9;
93 	state.registers[10] = ai->regs->r10;
94 	state.registers[11] = ai->regs->r11;
95 	state.registers[13] = sp;
96 	state.registers[14] = lr;
97 	state.registers[15] = ai->pc;
98 
99 	print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz, kernel_stack,
100 			  stack, stack_size);
101 }
102 #endif /* ARM32 */
103 
104 #ifdef ARM64
105 /* Kernel or user mode unwind (64-bit execution state) */
106 static void __print_stack_unwind(struct abort_info *ai)
107 {
108 	struct unwind_state_arm64 state = { };
109 	bool kernel_stack = false;
110 	uaddr_t stack = 0;
111 	size_t stack_size = 0;
112 
113 	if (abort_is_user_exception(ai)) {
114 		/* User stack */
115 		stack = 0;
116 		stack_size = 0;
117 	} else {
118 		/* Kernel stack */
119 		stack = thread_stack_start();
120 		stack_size = thread_stack_size();
121 		kernel_stack = true;
122 	}
123 
124 	state.pc = ai->regs->elr;
125 	state.fp = ai->regs->x29;
126 
127 	print_stack_arm64(TRACE_ERROR, &state, kernel_stack, stack, stack_size);
128 }
129 #endif /*ARM64*/
130 
131 #else /* CFG_UNWIND */
132 static void __print_stack_unwind(struct abort_info *ai __unused)
133 {
134 }
135 #endif /* CFG_UNWIND */
136 
137 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
138 {
139 	if (abort_type == ABORT_TYPE_DATA)
140 		return "data";
141 	if (abort_type == ABORT_TYPE_PREFETCH)
142 		return "prefetch";
143 	return "undef";
144 }
145 
146 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
147 			uint32_t fault_descr)
148 {
149 	/* fault_descr is only valid for data or prefetch abort */
150 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
151 		return "";
152 
153 	switch (core_mmu_get_fault_type(fault_descr)) {
154 	case CORE_MMU_FAULT_ALIGNMENT:
155 		return " (alignment fault)";
156 	case CORE_MMU_FAULT_TRANSLATION:
157 		return " (translation fault)";
158 	case CORE_MMU_FAULT_READ_PERMISSION:
159 		return " (read permission fault)";
160 	case CORE_MMU_FAULT_WRITE_PERMISSION:
161 		return " (write permission fault)";
162 	default:
163 		return "";
164 	}
165 }
166 
167 static __maybe_unused void
168 __print_abort_info(struct abort_info *ai __maybe_unused,
169 		   const char *ctx __maybe_unused)
170 {
171 	__maybe_unused size_t core_pos = 0;
172 #ifdef ARM32
173 	uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
174 	__maybe_unused uint32_t sp = 0;
175 	__maybe_unused uint32_t lr = 0;
176 
177 	if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
178 		sp = ai->regs->usr_sp;
179 		lr = ai->regs->usr_lr;
180 		core_pos = thread_get_tsd()->abort_core;
181 	} else {
182 		sp = read_mode_sp(mode);
183 		lr = read_mode_lr(mode);
184 		core_pos = get_core_pos();
185 	}
186 #endif /*ARM32*/
187 #ifdef ARM64
188 	if (abort_is_user_exception(ai))
189 		core_pos = thread_get_tsd()->abort_core;
190 	else
191 		core_pos = get_core_pos();
192 #endif /*ARM64*/
193 
194 	EMSG_RAW("");
195 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
196 		ctx, abort_type_to_str(ai->abort_type), ai->va,
197 		fault_to_str(ai->abort_type, ai->fault_descr));
198 #ifdef ARM32
199 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X",
200 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
201 		 read_contextidr());
202 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
203 		 core_pos, ai->regs->spsr);
204 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x",
205 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
206 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x",
207 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
208 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x",
209 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
210 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x",
211 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
212 #endif /*ARM32*/
213 #ifdef ARM64
214 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64
215 		 "   cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
216 		 read_ttbr1_el1(), read_contextidr_el1());
217 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x",
218 		 core_pos, (uint32_t)ai->regs->spsr);
219 	EMSG_RAW(" x0  %016" PRIx64 " x1  %016" PRIx64,
220 		 ai->regs->x0, ai->regs->x1);
221 	EMSG_RAW(" x2  %016" PRIx64 " x3  %016" PRIx64,
222 		 ai->regs->x2, ai->regs->x3);
223 	EMSG_RAW(" x4  %016" PRIx64 " x5  %016" PRIx64,
224 		 ai->regs->x4, ai->regs->x5);
225 	EMSG_RAW(" x6  %016" PRIx64 " x7  %016" PRIx64,
226 		 ai->regs->x6, ai->regs->x7);
227 	EMSG_RAW(" x8  %016" PRIx64 " x9  %016" PRIx64,
228 		 ai->regs->x8, ai->regs->x9);
229 	EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
230 		 ai->regs->x10, ai->regs->x11);
231 	EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
232 		 ai->regs->x12, ai->regs->x13);
233 	EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
234 		 ai->regs->x14, ai->regs->x15);
235 	EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
236 		 ai->regs->x16, ai->regs->x17);
237 	EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
238 		 ai->regs->x18, ai->regs->x19);
239 	EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
240 		 ai->regs->x20, ai->regs->x21);
241 	EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
242 		 ai->regs->x22, ai->regs->x23);
243 	EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
244 		 ai->regs->x24, ai->regs->x25);
245 	EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
246 		 ai->regs->x26, ai->regs->x27);
247 	EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
248 		 ai->regs->x28, ai->regs->x29);
249 	EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
250 		 ai->regs->x30, ai->regs->elr);
251 	EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
252 #endif /*ARM64*/
253 }
254 
255 /*
256  * Print abort info and (optionally) stack dump to the console
257  * @ai kernel-mode abort info.
258  * @stack_dump true to show a stack trace
259  */
260 static void __abort_print(struct abort_info *ai, bool stack_dump)
261 {
262 	assert(!abort_is_user_exception(ai));
263 
264 	__print_abort_info(ai, "Core");
265 
266 	if (stack_dump)
267 		__print_stack_unwind(ai);
268 }
269 
270 void abort_print(struct abort_info *ai)
271 {
272 	__abort_print(ai, false);
273 }
274 
275 void abort_print_error(struct abort_info *ai)
276 {
277 	__abort_print(ai, true);
278 }
279 
280 /* This function must be called from a normal thread */
281 void abort_print_current_ta(void)
282 {
283 	struct thread_specific_data *tsd = thread_get_tsd();
284 	struct abort_info ai = { };
285 	struct tee_ta_session *s = NULL;
286 
287 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
288 		panic();
289 
290 	ai.abort_type = tsd->abort_type;
291 	ai.fault_descr = tsd->abort_descr;
292 	ai.va = tsd->abort_va;
293 	ai.pc = tsd->abort_regs.elr;
294 	ai.regs = &tsd->abort_regs;
295 
296 	if (ai.abort_type != ABORT_TYPE_TA_PANIC)
297 		__print_abort_info(&ai, "User TA");
298 
299 	EMSG_RAW("Status of TA %pUl (%p)",
300 		 (void *)&s->ctx->uuid, (void *)s->ctx);
301 	s->ctx->ops->dump_state(s->ctx);
302 
303 	ta_fbuf_dump(s);
304 }
305 
306 static void save_abort_info_in_tsd(struct abort_info *ai)
307 {
308 	struct thread_specific_data *tsd = thread_get_tsd();
309 
310 	tsd->abort_type = ai->abort_type;
311 	tsd->abort_descr = ai->fault_descr;
312 	tsd->abort_va = ai->va;
313 	tsd->abort_regs = *ai->regs;
314 	tsd->abort_core = get_core_pos();
315 }
316 
317 #ifdef ARM32
318 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
319 		struct abort_info *ai)
320 {
321 	switch (abort_type) {
322 	case ABORT_TYPE_DATA:
323 		ai->fault_descr = read_dfsr();
324 		ai->va = read_dfar();
325 		break;
326 	case ABORT_TYPE_PREFETCH:
327 		ai->fault_descr = read_ifsr();
328 		ai->va = read_ifar();
329 		break;
330 	default:
331 		ai->fault_descr = 0;
332 		ai->va = regs->elr;
333 		break;
334 	}
335 	ai->abort_type = abort_type;
336 	ai->pc = regs->elr;
337 	ai->regs = regs;
338 }
339 #endif /*ARM32*/
340 
341 #ifdef ARM64
342 static void set_abort_info(uint32_t abort_type __unused,
343 		struct thread_abort_regs *regs, struct abort_info *ai)
344 {
345 	ai->fault_descr = read_esr_el1();
346 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
347 	case ESR_EC_IABT_EL0:
348 	case ESR_EC_IABT_EL1:
349 		ai->abort_type = ABORT_TYPE_PREFETCH;
350 		ai->va = read_far_el1();
351 		break;
352 	case ESR_EC_DABT_EL0:
353 	case ESR_EC_DABT_EL1:
354 	case ESR_EC_SP_ALIGN:
355 		ai->abort_type = ABORT_TYPE_DATA;
356 		ai->va = read_far_el1();
357 		break;
358 	default:
359 		ai->abort_type = ABORT_TYPE_UNDEF;
360 		ai->va = regs->elr;
361 	}
362 	ai->pc = regs->elr;
363 	ai->regs = regs;
364 }
365 #endif /*ARM64*/
366 
367 #ifdef ARM32
368 static void handle_user_ta_panic(struct abort_info *ai)
369 {
370 	/*
371 	 * It was a user exception, stop user execution and return
372 	 * to TEE Core.
373 	 */
374 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
375 	ai->regs->r1 = true;
376 	ai->regs->r2 = 0xdeadbeef;
377 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
378 	ai->regs->spsr &= CPSR_FIA;
379 	ai->regs->spsr &= ~CPSR_MODE_MASK;
380 	ai->regs->spsr |= CPSR_MODE_SVC;
381 	/* Select Thumb or ARM mode */
382 	if (ai->regs->elr & 1)
383 		ai->regs->spsr |= CPSR_T;
384 	else
385 		ai->regs->spsr &= ~CPSR_T;
386 }
387 #endif /*ARM32*/
388 
389 #ifdef ARM64
390 static void handle_user_ta_panic(struct abort_info *ai)
391 {
392 	uint32_t daif;
393 
394 	/*
395 	 * It was a user exception, stop user execution and return
396 	 * to TEE Core.
397 	 */
398 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
399 	ai->regs->x1 = true;
400 	ai->regs->x2 = 0xdeadbeef;
401 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
402 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
403 
404 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
405 	/* XXX what about DAIF_D? */
406 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
407 }
408 #endif /*ARM64*/
409 
410 #ifdef CFG_WITH_VFP
411 static void handle_user_ta_vfp(void)
412 {
413 	struct tee_ta_session *s;
414 
415 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
416 		panic();
417 
418 	thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
419 }
420 #endif /*CFG_WITH_VFP*/
421 
422 #ifdef CFG_WITH_USER_TA
423 #ifdef ARM32
424 /* Returns true if the exception originated from user mode */
425 bool abort_is_user_exception(struct abort_info *ai)
426 {
427 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
428 }
429 #endif /*ARM32*/
430 
431 #ifdef ARM64
432 /* Returns true if the exception originated from user mode */
433 bool abort_is_user_exception(struct abort_info *ai)
434 {
435 	uint32_t spsr = ai->regs->spsr;
436 
437 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
438 		return true;
439 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
440 	    SPSR_64_MODE_EL0)
441 		return true;
442 	return false;
443 }
444 #endif /*ARM64*/
445 #else /*CFG_WITH_USER_TA*/
446 bool abort_is_user_exception(struct abort_info *ai __unused)
447 {
448 	return false;
449 }
450 #endif /*CFG_WITH_USER_TA*/
451 
452 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
453 #ifdef ARM32
454 static bool is_vfp_fault(struct abort_info *ai)
455 {
456 	if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
457 		return false;
458 
459 	/*
460 	 * Not entirely accurate, but if it's a truly undefined instruction
461 	 * we'll end up in this function again, except this time
462 	 * vfp_is_enabled() so we'll return false.
463 	 */
464 	return true;
465 }
466 #endif /*ARM32*/
467 
468 #ifdef ARM64
469 static bool is_vfp_fault(struct abort_info *ai)
470 {
471 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
472 	case ESR_EC_FP_ASIMD:
473 	case ESR_EC_AARCH32_FP:
474 	case ESR_EC_AARCH64_FP:
475 		return true;
476 	default:
477 		return false;
478 	}
479 }
480 #endif /*ARM64*/
481 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
482 static bool is_vfp_fault(struct abort_info *ai __unused)
483 {
484 	return false;
485 }
486 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
487 
488 static enum fault_type get_fault_type(struct abort_info *ai)
489 {
490 	if (abort_is_user_exception(ai)) {
491 		if (is_vfp_fault(ai))
492 			return FAULT_TYPE_USER_TA_VFP;
493 #ifndef CFG_WITH_PAGER
494 		return FAULT_TYPE_USER_TA_PANIC;
495 #endif
496 	}
497 
498 	if (thread_is_from_abort_mode()) {
499 		abort_print_error(ai);
500 		panic("[abort] abort in abort handler (trap CPU)");
501 	}
502 
503 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
504 		if (abort_is_user_exception(ai))
505 			return FAULT_TYPE_USER_TA_PANIC;
506 		abort_print_error(ai);
507 		panic("[abort] undefined abort (trap CPU)");
508 	}
509 
510 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
511 	case CORE_MMU_FAULT_ALIGNMENT:
512 		if (abort_is_user_exception(ai))
513 			return FAULT_TYPE_USER_TA_PANIC;
514 		abort_print_error(ai);
515 		panic("[abort] alignement fault!  (trap CPU)");
516 		break;
517 
518 	case CORE_MMU_FAULT_ACCESS_BIT:
519 		if (abort_is_user_exception(ai))
520 			return FAULT_TYPE_USER_TA_PANIC;
521 		abort_print_error(ai);
522 		panic("[abort] access bit fault!  (trap CPU)");
523 		break;
524 
525 	case CORE_MMU_FAULT_DEBUG_EVENT:
526 		if (!abort_is_user_exception(ai))
527 			abort_print(ai);
528 		DMSG("[abort] Ignoring debug event!");
529 		return FAULT_TYPE_IGNORE;
530 
531 	case CORE_MMU_FAULT_TRANSLATION:
532 	case CORE_MMU_FAULT_WRITE_PERMISSION:
533 	case CORE_MMU_FAULT_READ_PERMISSION:
534 		return FAULT_TYPE_PAGEABLE;
535 
536 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
537 		if (!abort_is_user_exception(ai))
538 			abort_print(ai);
539 		DMSG("[abort] Ignoring async external abort!");
540 		return FAULT_TYPE_IGNORE;
541 
542 	case CORE_MMU_FAULT_OTHER:
543 	default:
544 		if (!abort_is_user_exception(ai))
545 			abort_print(ai);
546 		DMSG("[abort] Unhandled fault!");
547 		return FAULT_TYPE_IGNORE;
548 	}
549 }
550 
551 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
552 {
553 	struct abort_info ai;
554 	bool handled;
555 
556 	set_abort_info(abort_type, regs, &ai);
557 
558 	switch (get_fault_type(&ai)) {
559 	case FAULT_TYPE_IGNORE:
560 		break;
561 	case FAULT_TYPE_USER_TA_PANIC:
562 		DMSG("[abort] abort in User mode (TA will panic)");
563 		save_abort_info_in_tsd(&ai);
564 		vfp_disable();
565 		handle_user_ta_panic(&ai);
566 		break;
567 #ifdef CFG_WITH_VFP
568 	case FAULT_TYPE_USER_TA_VFP:
569 		handle_user_ta_vfp();
570 		break;
571 #endif
572 	case FAULT_TYPE_PAGEABLE:
573 	default:
574 		if (thread_get_id_may_fail() < 0) {
575 			abort_print_error(&ai);
576 			panic("abort outside thread context");
577 		}
578 		thread_kernel_save_vfp();
579 		handled = tee_pager_handle_fault(&ai);
580 		thread_kernel_restore_vfp();
581 		if (!handled) {
582 			if (!abort_is_user_exception(&ai)) {
583 				abort_print_error(&ai);
584 				panic("unhandled pageable abort");
585 			}
586 			DMSG("[abort] abort in User mode (TA will panic)");
587 			save_abort_info_in_tsd(&ai);
588 			vfp_disable();
589 			handle_user_ta_panic(&ai);
590 		}
591 		break;
592 	}
593 }
594