xref: /optee_os/core/arch/riscv/kernel/abort.c (revision 32b3180828fa15a49ccc86ecb4be9d274c140c89)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 NXP
4  * Copyright (c) 2015-2022, Linaro Limited
5  */
6 
7 #include <kernel/abort.h>
8 #include <kernel/linker.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/tee_ta_manager.h>
12 #include <kernel/thread_private.h>
13 #include <kernel/user_mode_ctx.h>
14 #include <mm/core_mmu.h>
15 #include <mm/mobj.h>
16 #include <riscv.h>
17 #include <tee/tee_svc.h>
18 #include <trace.h>
19 #include <unw/unwind.h>
20 
21 enum fault_type {
22 	FAULT_TYPE_USER_MODE_PANIC,
23 	FAULT_TYPE_USER_MODE_VFP,
24 	FAULT_TYPE_PAGE_FAULT,
25 	FAULT_TYPE_IGNORE,
26 };
27 
28 #ifdef CFG_UNWIND
29 
30 /* Kernel mode unwind */
31 static void __print_stack_unwind(struct abort_info *ai)
32 {
33 	struct unwind_state_riscv state = {
34 		.fp = ai->regs->s0,
35 		.pc = ai->regs->epc,
36 	};
37 
38 	print_stack_riscv(&state, thread_stack_start(), thread_stack_size());
39 }
40 
41 #else /* CFG_UNWIND */
42 static void __print_stack_unwind(struct abort_info *ai __unused)
43 {
44 }
45 #endif /* CFG_UNWIND */
46 
47 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
48 {
49 	if (abort_type == ABORT_TYPE_DATA)
50 		return "data";
51 	if (abort_type == ABORT_TYPE_PREFETCH)
52 		return "prefetch";
53 	return "undef";
54 }
55 
56 static __maybe_unused const char *
57 fault_to_str(uint32_t abort_type, uint32_t fault_descr)
58 {
59 	/* fault_descr is only valid for data or prefetch abort */
60 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
61 		return "";
62 
63 	switch (core_mmu_get_fault_type(fault_descr)) {
64 	case CORE_MMU_FAULT_ALIGNMENT:
65 		return " (alignment fault)";
66 	case CORE_MMU_FAULT_TRANSLATION:
67 		return " (translation fault)";
68 	case CORE_MMU_FAULT_READ_PERMISSION:
69 		return " (read permission fault)";
70 	case CORE_MMU_FAULT_WRITE_PERMISSION:
71 		return " (write permission fault)";
72 	case CORE_MMU_FAULT_TAG_CHECK:
73 		return " (tag check fault)";
74 	default:
75 		return "";
76 	}
77 }
78 
79 static __maybe_unused void
80 __print_abort_info(struct abort_info *ai __maybe_unused,
81 		   const char *ctx __maybe_unused)
82 {
83 	__maybe_unused size_t core_pos = 0;
84 
85 	if (abort_is_user_exception(ai))
86 		core_pos = thread_get_tsd()->abort_core;
87 	else
88 		core_pos = get_core_pos();
89 
90 	EMSG_RAW("");
91 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
92 		 ctx, abort_type_to_str(ai->abort_type), ai->va,
93 		 fault_to_str(ai->abort_type, ai->fault_descr));
94 	EMSG_RAW("cpu\t#%zu", core_pos);
95 	EMSG_RAW("cause\t%016" PRIxPTR " epc\t%016" PRIxPTR,
96 		 ai->regs->cause, ai->regs->epc);
97 	EMSG_RAW("tval\t%016" PRIxPTR " status\t%016" PRIxPTR,
98 		 ai->regs->tval, ai->regs->status);
99 	EMSG_RAW("ra\t%016" PRIxPTR " sp\t%016" PRIxPTR,
100 		 ai->regs->ra, ai->regs->sp);
101 	EMSG_RAW("gp\t%016" PRIxPTR " tp\t%016" PRIxPTR,
102 		 ai->regs->gp, ai->regs->tp);
103 	EMSG_RAW("t0\t%016" PRIxPTR " t1\t%016" PRIxPTR,
104 		 ai->regs->t0, ai->regs->t1);
105 	EMSG_RAW("t2\t%016" PRIxPTR " s0\t%016" PRIxPTR,
106 		 ai->regs->t2, ai->regs->s0);
107 	EMSG_RAW("s1\t%016" PRIxPTR " a0\t%016" PRIxPTR,
108 		 ai->regs->s1, ai->regs->a0);
109 	EMSG_RAW("a1\t%016" PRIxPTR " a2\t%016" PRIxPTR,
110 		 ai->regs->a1, ai->regs->a2);
111 	EMSG_RAW("a3\t%016" PRIxPTR " a4\t%016" PRIxPTR,
112 		 ai->regs->a3, ai->regs->a4);
113 	EMSG_RAW("a5\t%016" PRIxPTR " a5\t%016" PRIxPTR,
114 		 ai->regs->a5, ai->regs->a5);
115 	EMSG_RAW("a6\t%016" PRIxPTR " a7\t%016" PRIxPTR,
116 		 ai->regs->a6, ai->regs->a7);
117 	EMSG_RAW("s2\t%016" PRIxPTR " s3\t%016" PRIxPTR,
118 		 ai->regs->s2, ai->regs->s3);
119 	EMSG_RAW("s4\t%016" PRIxPTR " s5\t%016" PRIxPTR,
120 		 ai->regs->s4, ai->regs->s5);
121 	EMSG_RAW("s6\t%016" PRIxPTR " s7\t%016" PRIxPTR,
122 		 ai->regs->s6, ai->regs->s7);
123 	EMSG_RAW("s8\t%016" PRIxPTR " s9\t%016" PRIxPTR,
124 		 ai->regs->s8, ai->regs->s9);
125 	EMSG_RAW("s10\t%016" PRIxPTR " s11\t%016" PRIxPTR,
126 		 ai->regs->s10, ai->regs->s11);
127 	EMSG_RAW("t3\t%016" PRIxPTR " t4\t%016" PRIxPTR,
128 		 ai->regs->t3, ai->regs->t4);
129 	EMSG_RAW("t5\t%016" PRIxPTR " t6\t%016" PRIxPTR,
130 		 ai->regs->t5, ai->regs->t6);
131 }
132 
133 /*
134  * Print abort info and (optionally) stack dump to the console
135  * @ai kernel-mode abort info.
136  * @stack_dump true to show a stack trace
137  */
138 static void __abort_print(struct abort_info *ai, bool stack_dump)
139 {
140 	assert(!abort_is_user_exception(ai));
141 
142 	__print_abort_info(ai, "Core");
143 
144 	if (stack_dump) {
145 		trace_printf_helper_raw(TRACE_ERROR, true,
146 					"TEE load address @ %#"PRIxVA,
147 					VCORE_START_VA);
148 		__print_stack_unwind(ai);
149 	}
150 }
151 
152 void abort_print(struct abort_info *ai)
153 {
154 	__abort_print(ai, false);
155 }
156 
157 void abort_print_error(struct abort_info *ai)
158 {
159 	__abort_print(ai, true);
160 }
161 
162 /* This function must be called from a normal thread */
163 void abort_print_current_ts(void)
164 {
165 	struct thread_specific_data *tsd = thread_get_tsd();
166 	struct abort_info ai = { };
167 	struct ts_session *s = ts_get_current_session();
168 
169 	ai.abort_type = tsd->abort_type;
170 	ai.fault_descr = tsd->abort_descr;
171 	ai.va = tsd->abort_va;
172 	ai.pc = tsd->abort_regs.epc;
173 	ai.regs = &tsd->abort_regs;
174 
175 	if (ai.abort_type != ABORT_TYPE_USER_MODE_PANIC)
176 		__print_abort_info(&ai, "User mode");
177 
178 	s->ctx->ops->dump_state(s->ctx);
179 
180 #if defined(CFG_FTRACE_SUPPORT)
181 	if (s->ctx->ops->dump_ftrace) {
182 		s->fbuf = NULL;
183 		s->ctx->ops->dump_ftrace(s->ctx);
184 	}
185 #endif
186 }
187 
188 static void save_abort_info_in_tsd(struct abort_info *ai)
189 {
190 	struct thread_specific_data *tsd = thread_get_tsd();
191 
192 	tsd->abort_type = ai->abort_type;
193 	tsd->abort_descr = ai->fault_descr;
194 	tsd->abort_va = ai->va;
195 	tsd->abort_regs = *ai->regs;
196 	tsd->abort_core = get_core_pos();
197 }
198 
199 static void set_abort_info(uint32_t abort_type __unused,
200 			   struct thread_abort_regs *regs,
201 			   struct abort_info *ai)
202 {
203 	ai->fault_descr = regs->cause;
204 	switch (ai->fault_descr) {
205 	case CAUSE_MISALIGNED_FETCH:
206 	case CAUSE_FETCH_ACCESS:
207 	case CAUSE_FETCH_PAGE_FAULT:
208 	case CAUSE_FETCH_GUEST_PAGE_FAULT:
209 		ai->abort_type = ABORT_TYPE_PREFETCH;
210 		break;
211 	case CAUSE_MISALIGNED_LOAD:
212 	case CAUSE_LOAD_ACCESS:
213 	case CAUSE_MISALIGNED_STORE:
214 	case CAUSE_STORE_ACCESS:
215 	case CAUSE_LOAD_PAGE_FAULT:
216 	case CAUSE_STORE_PAGE_FAULT:
217 	case CAUSE_LOAD_GUEST_PAGE_FAULT:
218 	case CAUSE_STORE_GUEST_PAGE_FAULT:
219 		ai->abort_type = ABORT_TYPE_DATA;
220 		break;
221 	default:
222 		ai->abort_type = ABORT_TYPE_UNDEF;
223 	}
224 
225 	ai->va = regs->tval;
226 	ai->pc = regs->epc;
227 	ai->regs = regs;
228 }
229 
230 static void handle_user_mode_panic(struct abort_info *ai)
231 {
232 	/*
233 	 * It was a user exception, stop user execution and return
234 	 * to TEE Core.
235 	 */
236 	ai->regs->a0 = TEE_ERROR_TARGET_DEAD;
237 	ai->regs->a1 = true;
238 	ai->regs->a2 = 0xdeadbeef;
239 	ai->regs->ra = (vaddr_t)thread_unwind_user_mode;
240 	ai->regs->sp = thread_get_saved_thread_sp();
241 	ai->regs->status = read_csr(CSR_XSTATUS);
242 
243 	thread_exit_user_mode(ai->regs->a0, ai->regs->a1, ai->regs->a2,
244 			      ai->regs->a3, ai->regs->sp, ai->regs->ra,
245 			      ai->regs->status);
246 }
247 
248 #ifdef CFG_WITH_VFP
249 static void handle_user_mode_vfp(void)
250 {
251 	struct ts_session *s = ts_get_current_session();
252 
253 	thread_user_enable_vfp(&to_user_mode_ctx(s->ctx)->vfp);
254 }
255 #endif /*CFG_WITH_VFP*/
256 
257 #ifdef CFG_WITH_USER_TA
258 
259 /* Returns true if the exception originated from user mode */
260 bool abort_is_user_exception(struct abort_info *ai)
261 {
262 	return (ai->regs->status & CSR_XSTATUS_SPP) == 0;
263 }
264 
265 #else /*CFG_WITH_USER_TA*/
266 bool abort_is_user_exception(struct abort_info *ai __unused)
267 {
268 	return false;
269 }
270 #endif /*CFG_WITH_USER_TA*/
271 
272 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
273 static bool is_vfp_fault(struct abort_info *ai)
274 {
275 	/* Implement */
276 	return false;
277 }
278 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
279 static bool is_vfp_fault(struct abort_info *ai __unused)
280 {
281 	return false;
282 }
283 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
284 
285 static enum fault_type get_fault_type(struct abort_info *ai)
286 {
287 	if (abort_is_user_exception(ai)) {
288 		if (is_vfp_fault(ai))
289 			return FAULT_TYPE_USER_MODE_VFP;
290 		return FAULT_TYPE_USER_MODE_PANIC;
291 	}
292 
293 	if (thread_is_from_abort_mode()) {
294 		abort_print_error(ai);
295 		panic("[abort] abort in abort handler (trap CPU)");
296 	}
297 
298 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
299 		if (abort_is_user_exception(ai))
300 			return FAULT_TYPE_USER_MODE_PANIC;
301 		abort_print_error(ai);
302 		panic("[abort] undefined abort (trap CPU)");
303 	}
304 
305 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
306 	case CORE_MMU_FAULT_ALIGNMENT:
307 		if (abort_is_user_exception(ai))
308 			return FAULT_TYPE_USER_MODE_PANIC;
309 		abort_print_error(ai);
310 		panic("[abort] alignment fault!  (trap CPU)");
311 		break;
312 
313 	case CORE_MMU_FAULT_ACCESS_BIT:
314 		if (abort_is_user_exception(ai))
315 			return FAULT_TYPE_USER_MODE_PANIC;
316 		abort_print_error(ai);
317 		panic("[abort] access bit fault!  (trap CPU)");
318 		break;
319 
320 	case CORE_MMU_FAULT_DEBUG_EVENT:
321 		if (!abort_is_user_exception(ai))
322 			abort_print(ai);
323 		DMSG("[abort] Ignoring debug event!");
324 		return FAULT_TYPE_IGNORE;
325 
326 	case CORE_MMU_FAULT_TRANSLATION:
327 	case CORE_MMU_FAULT_WRITE_PERMISSION:
328 	case CORE_MMU_FAULT_READ_PERMISSION:
329 		return FAULT_TYPE_PAGE_FAULT;
330 
331 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
332 		if (!abort_is_user_exception(ai))
333 			abort_print(ai);
334 		DMSG("[abort] Ignoring async external abort!");
335 		return FAULT_TYPE_IGNORE;
336 
337 	case CORE_MMU_FAULT_TAG_CHECK:
338 		if (abort_is_user_exception(ai))
339 			return FAULT_TYPE_USER_MODE_PANIC;
340 		abort_print_error(ai);
341 		panic("[abort] Tag check fault! (trap CPU)");
342 		break;
343 
344 	case CORE_MMU_FAULT_OTHER:
345 	default:
346 		if (!abort_is_user_exception(ai))
347 			abort_print(ai);
348 		DMSG("[abort] Unhandled fault!");
349 		return FAULT_TYPE_IGNORE;
350 	}
351 }
352 
353 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
354 {
355 	struct abort_info ai;
356 
357 	set_abort_info(abort_type, regs, &ai);
358 
359 	switch (get_fault_type(&ai)) {
360 	case FAULT_TYPE_IGNORE:
361 		break;
362 	case FAULT_TYPE_USER_MODE_PANIC:
363 		DMSG("[abort] abort in User mode (TA will panic)");
364 		save_abort_info_in_tsd(&ai);
365 #ifdef CFG_WITH_VFP
366 		vfp_disable();
367 #endif
368 		handle_user_mode_panic(&ai);
369 		break;
370 #ifdef CFG_WITH_VFP
371 	case FAULT_TYPE_USER_MODE_VFP:
372 		handle_user_mode_vfp();
373 		break;
374 #endif
375 	case FAULT_TYPE_PAGE_FAULT:
376 	default:
377 		if (thread_get_id_may_fail() < 0) {
378 			abort_print_error(&ai);
379 			panic("abort outside thread context");
380 		}
381 
382 		if (!abort_is_user_exception(&ai)) {
383 			abort_print_error(&ai);
384 			panic("unhandled page fault abort");
385 		}
386 		DMSG("[abort] abort in User mode (TA will panic)");
387 		save_abort_info_in_tsd(&ai);
388 #ifdef CFG_WITH_VFP
389 		vfp_disable();
390 #endif
391 		handle_user_mode_panic(&ai);
392 		break;
393 	}
394 }
395