xref: /optee_os/core/arch/riscv/kernel/abort.c (revision cb5f271c1eaed4c18fd26873f152afc0590b0413)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 NXP
4  * Copyright (c) 2015-2022, Linaro Limited
5  */
6 
7 #include <kernel/abort.h>
8 #include <kernel/linker.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/tee_ta_manager.h>
12 #include <kernel/thread_private.h>
13 #include <kernel/user_mode_ctx.h>
14 #include <mm/core_mmu.h>
15 #include <mm/mobj.h>
16 #include <riscv.h>
17 #include <tee/tee_svc.h>
18 #include <trace.h>
19 #include <unw/unwind.h>
20 
21 enum fault_type {
22 	FAULT_TYPE_USER_MODE_PANIC,
23 	FAULT_TYPE_USER_MODE_VFP,
24 	FAULT_TYPE_PAGE_FAULT,
25 	FAULT_TYPE_IGNORE,
26 };
27 
28 #ifdef CFG_UNWIND
29 
30 /* Kernel mode unwind */
31 static void __print_stack_unwind(struct abort_info *ai)
32 {
33 	struct unwind_state_rv state = {
34 		.fp = ai->regs->s0,
35 		.sp = ai->regs->sp,
36 		.pc = ai->regs->epc,
37 	};
38 
39 	print_stack_rv(&state, thread_stack_start(), thread_stack_size());
40 }
41 
42 #else /* CFG_UNWIND */
43 static void __print_stack_unwind(struct abort_info *ai __unused)
44 {
45 }
46 #endif /* CFG_UNWIND */
47 
48 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
49 {
50 	if (abort_type == ABORT_TYPE_DATA)
51 		return "data";
52 	if (abort_type == ABORT_TYPE_PREFETCH)
53 		return "prefetch";
54 	return "undef";
55 }
56 
57 static __maybe_unused const char *
58 fault_to_str(uint32_t abort_type, uint32_t fault_descr)
59 {
60 	/* fault_descr is only valid for data or prefetch abort */
61 	if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
62 		return "";
63 
64 	switch (core_mmu_get_fault_type(fault_descr)) {
65 	case CORE_MMU_FAULT_ALIGNMENT:
66 		return " (alignment fault)";
67 	case CORE_MMU_FAULT_TRANSLATION:
68 		return " (translation fault)";
69 	case CORE_MMU_FAULT_READ_PERMISSION:
70 		return " (read permission fault)";
71 	case CORE_MMU_FAULT_WRITE_PERMISSION:
72 		return " (write permission fault)";
73 	case CORE_MMU_FAULT_TAG_CHECK:
74 		return " (tag check fault)";
75 	default:
76 		return "";
77 	}
78 }
79 
80 static __maybe_unused void
81 __print_abort_info(struct abort_info *ai __maybe_unused,
82 		   const char *ctx __maybe_unused)
83 {
84 	__maybe_unused size_t core_pos = 0;
85 
86 	if (abort_is_user_exception(ai))
87 		core_pos = thread_get_tsd()->abort_core;
88 	else
89 		core_pos = get_core_pos();
90 
91 	EMSG_RAW("");
92 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
93 		 ctx, abort_type_to_str(ai->abort_type), ai->va,
94 		 fault_to_str(ai->abort_type, ai->fault_descr));
95 	EMSG_RAW("cpu\t#%zu", core_pos);
96 	EMSG_RAW("cause\t%016" PRIxPTR " epc\t%016" PRIxPTR,
97 		 ai->regs->cause, ai->regs->epc);
98 	EMSG_RAW("tval\t%016" PRIxPTR " status\t%016" PRIxPTR,
99 		 ai->regs->tval, ai->regs->status);
100 	EMSG_RAW("ra\t%016" PRIxPTR " sp\t%016" PRIxPTR,
101 		 ai->regs->ra, ai->regs->sp);
102 	EMSG_RAW("gp\t%016" PRIxPTR " tp\t%016" PRIxPTR,
103 		 ai->regs->gp, ai->regs->tp);
104 	EMSG_RAW("t0\t%016" PRIxPTR " t1\t%016" PRIxPTR,
105 		 ai->regs->t0, ai->regs->t1);
106 	EMSG_RAW("t2\t%016" PRIxPTR " s0\t%016" PRIxPTR,
107 		 ai->regs->t2, ai->regs->s0);
108 	EMSG_RAW("s1\t%016" PRIxPTR " a0\t%016" PRIxPTR,
109 		 ai->regs->s1, ai->regs->a0);
110 	EMSG_RAW("a1\t%016" PRIxPTR " a2\t%016" PRIxPTR,
111 		 ai->regs->a1, ai->regs->a2);
112 	EMSG_RAW("a3\t%016" PRIxPTR " a4\t%016" PRIxPTR,
113 		 ai->regs->a3, ai->regs->a4);
114 	EMSG_RAW("a5\t%016" PRIxPTR " a5\t%016" PRIxPTR,
115 		 ai->regs->a5, ai->regs->a5);
116 	EMSG_RAW("a6\t%016" PRIxPTR " a7\t%016" PRIxPTR,
117 		 ai->regs->a6, ai->regs->a7);
118 	EMSG_RAW("s2\t%016" PRIxPTR " s3\t%016" PRIxPTR,
119 		 ai->regs->s2, ai->regs->s3);
120 	EMSG_RAW("s4\t%016" PRIxPTR " s5\t%016" PRIxPTR,
121 		 ai->regs->s4, ai->regs->s5);
122 	EMSG_RAW("s6\t%016" PRIxPTR " s7\t%016" PRIxPTR,
123 		 ai->regs->s6, ai->regs->s7);
124 	EMSG_RAW("s8\t%016" PRIxPTR " s9\t%016" PRIxPTR,
125 		 ai->regs->s8, ai->regs->s9);
126 	EMSG_RAW("s10\t%016" PRIxPTR " s11\t%016" PRIxPTR,
127 		 ai->regs->s10, ai->regs->s11);
128 	EMSG_RAW("t3\t%016" PRIxPTR " t4\t%016" PRIxPTR,
129 		 ai->regs->t3, ai->regs->t4);
130 	EMSG_RAW("t5\t%016" PRIxPTR " t6\t%016" PRIxPTR,
131 		 ai->regs->t5, ai->regs->t6);
132 }
133 
134 /*
135  * Print abort info and (optionally) stack dump to the console
136  * @ai kernel-mode abort info.
137  * @stack_dump true to show a stack trace
138  */
139 static void __abort_print(struct abort_info *ai, bool stack_dump)
140 {
141 	assert(!abort_is_user_exception(ai));
142 
143 	__print_abort_info(ai, "Core");
144 
145 	if (stack_dump) {
146 		trace_printf_helper_raw(TRACE_ERROR, true,
147 					"TEE load address @ %#"PRIxVA,
148 					VCORE_START_VA);
149 		__print_stack_unwind(ai);
150 	}
151 }
152 
153 void abort_print(struct abort_info *ai)
154 {
155 	__abort_print(ai, false);
156 }
157 
158 void abort_print_error(struct abort_info *ai)
159 {
160 	__abort_print(ai, true);
161 }
162 
163 /* This function must be called from a normal thread */
164 void abort_print_current_ts(void)
165 {
166 	struct thread_specific_data *tsd = thread_get_tsd();
167 	struct abort_info ai = { };
168 	struct ts_session *s = ts_get_current_session();
169 
170 	ai.abort_type = tsd->abort_type;
171 	ai.fault_descr = tsd->abort_descr;
172 	ai.va = tsd->abort_va;
173 	ai.pc = tsd->abort_regs.epc;
174 	ai.regs = &tsd->abort_regs;
175 
176 	if (ai.abort_type != ABORT_TYPE_USER_MODE_PANIC)
177 		__print_abort_info(&ai, "User mode");
178 
179 	s->ctx->ops->dump_state(s->ctx);
180 
181 #if defined(CFG_FTRACE_SUPPORT)
182 	if (s->ctx->ops->dump_ftrace) {
183 		s->fbuf = NULL;
184 		s->ctx->ops->dump_ftrace(s->ctx);
185 	}
186 #endif
187 }
188 
189 static void save_abort_info_in_tsd(struct abort_info *ai)
190 {
191 	struct thread_specific_data *tsd = thread_get_tsd();
192 
193 	tsd->abort_type = ai->abort_type;
194 	tsd->abort_descr = ai->fault_descr;
195 	tsd->abort_va = ai->va;
196 	tsd->abort_regs = *ai->regs;
197 	tsd->abort_core = get_core_pos();
198 }
199 
200 static void set_abort_info(uint32_t abort_type __unused,
201 			   struct thread_abort_regs *regs,
202 			   struct abort_info *ai)
203 {
204 	ai->fault_descr = regs->cause;
205 	switch (ai->fault_descr) {
206 	case CAUSE_MISALIGNED_FETCH:
207 	case CAUSE_FETCH_ACCESS:
208 	case CAUSE_FETCH_PAGE_FAULT:
209 	case CAUSE_FETCH_GUEST_PAGE_FAULT:
210 		ai->abort_type = ABORT_TYPE_PREFETCH;
211 		break;
212 	case CAUSE_MISALIGNED_LOAD:
213 	case CAUSE_LOAD_ACCESS:
214 	case CAUSE_MISALIGNED_STORE:
215 	case CAUSE_STORE_ACCESS:
216 	case CAUSE_LOAD_PAGE_FAULT:
217 	case CAUSE_STORE_PAGE_FAULT:
218 	case CAUSE_LOAD_GUEST_PAGE_FAULT:
219 	case CAUSE_STORE_GUEST_PAGE_FAULT:
220 		ai->abort_type = ABORT_TYPE_DATA;
221 		break;
222 	default:
223 		ai->abort_type = ABORT_TYPE_UNDEF;
224 	}
225 
226 	ai->va = regs->tval;
227 	ai->pc = regs->epc;
228 	ai->regs = regs;
229 }
230 
231 static void handle_user_mode_panic(struct abort_info *ai)
232 {
233 	/*
234 	 * It was a user exception, stop user execution and return
235 	 * to TEE Core.
236 	 */
237 	ai->regs->a0 = TEE_ERROR_TARGET_DEAD;
238 	ai->regs->a1 = true;
239 	ai->regs->a2 = 0xdeadbeef;
240 	ai->regs->ra = (vaddr_t)thread_unwind_user_mode;
241 	ai->regs->sp = thread_get_saved_thread_sp();
242 	ai->regs->status = read_csr(CSR_XSTATUS);
243 
244 	thread_exit_user_mode(ai->regs->a0, ai->regs->a1, ai->regs->a2,
245 			      ai->regs->a3, ai->regs->sp, ai->regs->ra,
246 			      ai->regs->status);
247 }
248 
249 #ifdef CFG_WITH_VFP
250 static void handle_user_mode_vfp(void)
251 {
252 	struct ts_session *s = ts_get_current_session();
253 
254 	thread_user_enable_vfp(&to_user_mode_ctx(s->ctx)->vfp);
255 }
256 #endif /*CFG_WITH_VFP*/
257 
258 #ifdef CFG_WITH_USER_TA
259 
260 /* Returns true if the exception originated from user mode */
261 bool abort_is_user_exception(struct abort_info *ai)
262 {
263 	return (ai->regs->status & CSR_XSTATUS_SPP) == 0;
264 }
265 
266 #else /*CFG_WITH_USER_TA*/
267 bool abort_is_user_exception(struct abort_info *ai __unused)
268 {
269 	return false;
270 }
271 #endif /*CFG_WITH_USER_TA*/
272 
273 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
274 static bool is_vfp_fault(struct abort_info *ai)
275 {
276 	/* Implement */
277 	return false;
278 }
279 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
280 static bool is_vfp_fault(struct abort_info *ai __unused)
281 {
282 	return false;
283 }
284 #endif  /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
285 
286 static enum fault_type get_fault_type(struct abort_info *ai)
287 {
288 	if (abort_is_user_exception(ai)) {
289 		if (is_vfp_fault(ai))
290 			return FAULT_TYPE_USER_MODE_VFP;
291 		return FAULT_TYPE_USER_MODE_PANIC;
292 	}
293 
294 	if (thread_is_from_abort_mode()) {
295 		abort_print_error(ai);
296 		panic("[abort] abort in abort handler (trap CPU)");
297 	}
298 
299 	if (ai->abort_type == ABORT_TYPE_UNDEF) {
300 		if (abort_is_user_exception(ai))
301 			return FAULT_TYPE_USER_MODE_PANIC;
302 		abort_print_error(ai);
303 		panic("[abort] undefined abort (trap CPU)");
304 	}
305 
306 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
307 	case CORE_MMU_FAULT_ALIGNMENT:
308 		if (abort_is_user_exception(ai))
309 			return FAULT_TYPE_USER_MODE_PANIC;
310 		abort_print_error(ai);
311 		panic("[abort] alignment fault!  (trap CPU)");
312 		break;
313 
314 	case CORE_MMU_FAULT_ACCESS_BIT:
315 		if (abort_is_user_exception(ai))
316 			return FAULT_TYPE_USER_MODE_PANIC;
317 		abort_print_error(ai);
318 		panic("[abort] access bit fault!  (trap CPU)");
319 		break;
320 
321 	case CORE_MMU_FAULT_DEBUG_EVENT:
322 		if (!abort_is_user_exception(ai))
323 			abort_print(ai);
324 		DMSG("[abort] Ignoring debug event!");
325 		return FAULT_TYPE_IGNORE;
326 
327 	case CORE_MMU_FAULT_TRANSLATION:
328 	case CORE_MMU_FAULT_WRITE_PERMISSION:
329 	case CORE_MMU_FAULT_READ_PERMISSION:
330 		return FAULT_TYPE_PAGE_FAULT;
331 
332 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
333 		if (!abort_is_user_exception(ai))
334 			abort_print(ai);
335 		DMSG("[abort] Ignoring async external abort!");
336 		return FAULT_TYPE_IGNORE;
337 
338 	case CORE_MMU_FAULT_TAG_CHECK:
339 		if (abort_is_user_exception(ai))
340 			return FAULT_TYPE_USER_MODE_PANIC;
341 		abort_print_error(ai);
342 		panic("[abort] Tag check fault! (trap CPU)");
343 		break;
344 
345 	case CORE_MMU_FAULT_OTHER:
346 	default:
347 		if (!abort_is_user_exception(ai))
348 			abort_print(ai);
349 		DMSG("[abort] Unhandled fault!");
350 		return FAULT_TYPE_IGNORE;
351 	}
352 }
353 
354 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
355 {
356 	struct abort_info ai;
357 
358 	set_abort_info(abort_type, regs, &ai);
359 
360 	switch (get_fault_type(&ai)) {
361 	case FAULT_TYPE_IGNORE:
362 		break;
363 	case FAULT_TYPE_USER_MODE_PANIC:
364 		DMSG("[abort] abort in User mode (TA will panic)");
365 		save_abort_info_in_tsd(&ai);
366 #ifdef CFG_WITH_VFP
367 		vfp_disable();
368 #endif
369 		handle_user_mode_panic(&ai);
370 		break;
371 #ifdef CFG_WITH_VFP
372 	case FAULT_TYPE_USER_MODE_VFP:
373 		handle_user_mode_vfp();
374 		break;
375 #endif
376 	case FAULT_TYPE_PAGE_FAULT:
377 	default:
378 		if (thread_get_id_may_fail() < 0) {
379 			abort_print_error(&ai);
380 			panic("abort outside thread context");
381 		}
382 
383 		if (!abort_is_user_exception(&ai)) {
384 			abort_print_error(&ai);
385 			panic("unhandled page fault abort");
386 		}
387 		DMSG("[abort] abort in User mode (TA will panic)");
388 		save_abort_info_in_tsd(&ai);
389 #ifdef CFG_WITH_VFP
390 		vfp_disable();
391 #endif
392 		handle_user_mode_panic(&ai);
393 		break;
394 	}
395 }
396