1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright 2022-2023 NXP
4 * Copyright (c) 2015-2022, Linaro Limited
5 */
6
7 #include <kernel/abort.h>
8 #include <kernel/linker.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/tee_ta_manager.h>
12 #include <kernel/thread_private.h>
13 #include <kernel/user_mode_ctx.h>
14 #include <mm/core_mmu.h>
15 #include <mm/mobj.h>
16 #include <riscv.h>
17 #include <tee/tee_svc.h>
18 #include <trace.h>
19 #include <unw/unwind.h>
20
21 enum fault_type {
22 FAULT_TYPE_USER_MODE_PANIC,
23 FAULT_TYPE_USER_MODE_VFP,
24 FAULT_TYPE_PAGE_FAULT,
25 FAULT_TYPE_IGNORE,
26 };
27
28 #ifdef CFG_UNWIND
29
30 /* Kernel mode unwind */
__print_stack_unwind(struct abort_info * ai)31 static void __print_stack_unwind(struct abort_info *ai)
32 {
33 struct unwind_state_riscv state = {
34 .fp = ai->regs->s0,
35 .pc = ai->regs->epc,
36 };
37
38 print_stack_riscv(&state, thread_stack_start(), thread_stack_size());
39 }
40
41 #else /* CFG_UNWIND */
__print_stack_unwind(struct abort_info * ai __unused)42 static void __print_stack_unwind(struct abort_info *ai __unused)
43 {
44 }
45 #endif /* CFG_UNWIND */
46
abort_type_to_str(uint32_t abort_type)47 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
48 {
49 if (abort_type == ABORT_TYPE_DATA)
50 return "data";
51 if (abort_type == ABORT_TYPE_PREFETCH)
52 return "prefetch";
53 return "undef";
54 }
55
56 static __maybe_unused const char *
fault_to_str(uint32_t abort_type,uint32_t fault_descr)57 fault_to_str(uint32_t abort_type, uint32_t fault_descr)
58 {
59 /* fault_descr is only valid for data or prefetch abort */
60 if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
61 return "";
62
63 switch (core_mmu_get_fault_type(fault_descr)) {
64 case CORE_MMU_FAULT_ALIGNMENT:
65 return " (alignment fault)";
66 case CORE_MMU_FAULT_TRANSLATION:
67 return " (translation fault)";
68 case CORE_MMU_FAULT_READ_PERMISSION:
69 return " (read permission fault)";
70 case CORE_MMU_FAULT_WRITE_PERMISSION:
71 return " (write permission fault)";
72 case CORE_MMU_FAULT_TAG_CHECK:
73 return " (tag check fault)";
74 default:
75 return "";
76 }
77 }
78
79 static __maybe_unused void
__print_abort_info(struct abort_info * ai __maybe_unused,const char * ctx __maybe_unused)80 __print_abort_info(struct abort_info *ai __maybe_unused,
81 const char *ctx __maybe_unused)
82 {
83 __maybe_unused size_t core_pos = 0;
84
85 if (abort_is_user_exception(ai))
86 core_pos = thread_get_tsd()->abort_core;
87 else
88 core_pos = get_core_pos();
89
90 EMSG_RAW("");
91 EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
92 ctx, abort_type_to_str(ai->abort_type), ai->va,
93 fault_to_str(ai->abort_type, ai->fault_descr));
94 EMSG_RAW("cpu\t#%zu", core_pos);
95 EMSG_RAW("cause\t%016" PRIxPTR " epc\t%016" PRIxPTR,
96 ai->regs->cause, ai->regs->epc);
97 EMSG_RAW("tval\t%016" PRIxPTR " status\t%016" PRIxPTR,
98 ai->regs->tval, ai->regs->status);
99 EMSG_RAW("ra\t%016" PRIxPTR " sp\t%016" PRIxPTR,
100 ai->regs->ra, ai->regs->sp);
101 EMSG_RAW("gp\t%016" PRIxPTR " tp\t%016" PRIxPTR,
102 ai->regs->gp, ai->regs->tp);
103 EMSG_RAW("t0\t%016" PRIxPTR " t1\t%016" PRIxPTR,
104 ai->regs->t0, ai->regs->t1);
105 EMSG_RAW("t2\t%016" PRIxPTR " s0\t%016" PRIxPTR,
106 ai->regs->t2, ai->regs->s0);
107 EMSG_RAW("s1\t%016" PRIxPTR " a0\t%016" PRIxPTR,
108 ai->regs->s1, ai->regs->a0);
109 EMSG_RAW("a1\t%016" PRIxPTR " a2\t%016" PRIxPTR,
110 ai->regs->a1, ai->regs->a2);
111 EMSG_RAW("a3\t%016" PRIxPTR " a4\t%016" PRIxPTR,
112 ai->regs->a3, ai->regs->a4);
113 EMSG_RAW("a5\t%016" PRIxPTR " a5\t%016" PRIxPTR,
114 ai->regs->a5, ai->regs->a5);
115 EMSG_RAW("a6\t%016" PRIxPTR " a7\t%016" PRIxPTR,
116 ai->regs->a6, ai->regs->a7);
117 EMSG_RAW("s2\t%016" PRIxPTR " s3\t%016" PRIxPTR,
118 ai->regs->s2, ai->regs->s3);
119 EMSG_RAW("s4\t%016" PRIxPTR " s5\t%016" PRIxPTR,
120 ai->regs->s4, ai->regs->s5);
121 EMSG_RAW("s6\t%016" PRIxPTR " s7\t%016" PRIxPTR,
122 ai->regs->s6, ai->regs->s7);
123 EMSG_RAW("s8\t%016" PRIxPTR " s9\t%016" PRIxPTR,
124 ai->regs->s8, ai->regs->s9);
125 EMSG_RAW("s10\t%016" PRIxPTR " s11\t%016" PRIxPTR,
126 ai->regs->s10, ai->regs->s11);
127 EMSG_RAW("t3\t%016" PRIxPTR " t4\t%016" PRIxPTR,
128 ai->regs->t3, ai->regs->t4);
129 EMSG_RAW("t5\t%016" PRIxPTR " t6\t%016" PRIxPTR,
130 ai->regs->t5, ai->regs->t6);
131 }
132
133 /*
134 * Print abort info and (optionally) stack dump to the console
135 * @ai kernel-mode abort info.
136 * @stack_dump true to show a stack trace
137 */
__abort_print(struct abort_info * ai,bool stack_dump)138 static void __abort_print(struct abort_info *ai, bool stack_dump)
139 {
140 assert(!abort_is_user_exception(ai));
141
142 __print_abort_info(ai, "Core");
143
144 if (stack_dump) {
145 trace_printf_helper_raw(TRACE_ERROR, true,
146 "TEE load address @ %#"PRIxVA,
147 VCORE_START_VA);
148 __print_stack_unwind(ai);
149 }
150 }
151
abort_print(struct abort_info * ai)152 void abort_print(struct abort_info *ai)
153 {
154 __abort_print(ai, false);
155 }
156
abort_print_error(struct abort_info * ai)157 void abort_print_error(struct abort_info *ai)
158 {
159 __abort_print(ai, true);
160 }
161
162 /* This function must be called from a normal thread */
abort_print_current_ts(void)163 void abort_print_current_ts(void)
164 {
165 struct thread_specific_data *tsd = thread_get_tsd();
166 struct abort_info ai = { };
167 struct ts_session *s = ts_get_current_session();
168
169 ai.abort_type = tsd->abort_type;
170 ai.fault_descr = tsd->abort_descr;
171 ai.va = tsd->abort_va;
172 ai.pc = tsd->abort_regs.epc;
173 ai.regs = &tsd->abort_regs;
174
175 if (ai.abort_type != ABORT_TYPE_USER_MODE_PANIC)
176 __print_abort_info(&ai, "User mode");
177
178 s->ctx->ops->dump_state(s->ctx);
179
180 #if defined(CFG_FTRACE_SUPPORT)
181 if (s->ctx->ops->dump_ftrace) {
182 s->fbuf = NULL;
183 s->ctx->ops->dump_ftrace(s->ctx);
184 }
185 #endif
186 }
187
save_abort_info_in_tsd(struct abort_info * ai)188 static void save_abort_info_in_tsd(struct abort_info *ai)
189 {
190 struct thread_specific_data *tsd = thread_get_tsd();
191
192 tsd->abort_type = ai->abort_type;
193 tsd->abort_descr = ai->fault_descr;
194 tsd->abort_va = ai->va;
195 tsd->abort_regs = *ai->regs;
196 tsd->abort_core = get_core_pos();
197 }
198
set_abort_info(uint32_t abort_type __unused,struct thread_abort_regs * regs,struct abort_info * ai)199 static void set_abort_info(uint32_t abort_type __unused,
200 struct thread_abort_regs *regs,
201 struct abort_info *ai)
202 {
203 ai->fault_descr = regs->cause;
204 switch (ai->fault_descr) {
205 case CAUSE_MISALIGNED_FETCH:
206 case CAUSE_FETCH_ACCESS:
207 case CAUSE_FETCH_PAGE_FAULT:
208 case CAUSE_FETCH_GUEST_PAGE_FAULT:
209 ai->abort_type = ABORT_TYPE_PREFETCH;
210 break;
211 case CAUSE_MISALIGNED_LOAD:
212 case CAUSE_LOAD_ACCESS:
213 case CAUSE_MISALIGNED_STORE:
214 case CAUSE_STORE_ACCESS:
215 case CAUSE_LOAD_PAGE_FAULT:
216 case CAUSE_STORE_PAGE_FAULT:
217 case CAUSE_LOAD_GUEST_PAGE_FAULT:
218 case CAUSE_STORE_GUEST_PAGE_FAULT:
219 ai->abort_type = ABORT_TYPE_DATA;
220 break;
221 default:
222 ai->abort_type = ABORT_TYPE_UNDEF;
223 }
224
225 ai->va = regs->tval;
226 ai->pc = regs->epc;
227 ai->regs = regs;
228 }
229
handle_user_mode_panic(struct abort_info * ai)230 static void handle_user_mode_panic(struct abort_info *ai)
231 {
232 /*
233 * It was a user exception, stop user execution and return
234 * to TEE Core.
235 */
236 ai->regs->a0 = TEE_ERROR_TARGET_DEAD;
237 ai->regs->a1 = true;
238 ai->regs->a2 = 0xdeadbeef;
239 ai->regs->epc = (vaddr_t)thread_unwind_user_mode;
240 ai->regs->sp = thread_get_saved_thread_sp();
241 ai->regs->status = xstatus_for_xret(true, PRV_S);
242 ai->regs->ie = 0;
243 }
244
245 #ifdef CFG_WITH_VFP
handle_user_mode_vfp(void)246 static void handle_user_mode_vfp(void)
247 {
248 struct ts_session *s = ts_get_current_session();
249
250 thread_user_enable_vfp(&to_user_mode_ctx(s->ctx)->vfp);
251 }
252 #endif /*CFG_WITH_VFP*/
253
254 #ifdef CFG_WITH_USER_TA
255
256 /* Returns true if the exception originated from user mode */
abort_is_user_exception(struct abort_info * ai)257 bool abort_is_user_exception(struct abort_info *ai)
258 {
259 return (ai->regs->status & CSR_XSTATUS_SPP) == 0;
260 }
261
262 #else /*CFG_WITH_USER_TA*/
abort_is_user_exception(struct abort_info * ai __unused)263 bool abort_is_user_exception(struct abort_info *ai __unused)
264 {
265 return false;
266 }
267 #endif /*CFG_WITH_USER_TA*/
268
269 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
is_vfp_fault(struct abort_info * ai)270 static bool is_vfp_fault(struct abort_info *ai)
271 {
272 /* Implement */
273 return false;
274 }
275 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
is_vfp_fault(struct abort_info * ai __unused)276 static bool is_vfp_fault(struct abort_info *ai __unused)
277 {
278 return false;
279 }
280 #endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
281
get_fault_type(struct abort_info * ai)282 static enum fault_type get_fault_type(struct abort_info *ai)
283 {
284 if (abort_is_user_exception(ai)) {
285 if (is_vfp_fault(ai))
286 return FAULT_TYPE_USER_MODE_VFP;
287 return FAULT_TYPE_USER_MODE_PANIC;
288 }
289
290 if (thread_is_from_abort_mode()) {
291 abort_print_error(ai);
292 panic("[abort] abort in abort handler (trap CPU)");
293 }
294
295 if (ai->abort_type == ABORT_TYPE_UNDEF) {
296 if (abort_is_user_exception(ai))
297 return FAULT_TYPE_USER_MODE_PANIC;
298 abort_print_error(ai);
299 panic("[abort] undefined abort (trap CPU)");
300 }
301
302 switch (core_mmu_get_fault_type(ai->fault_descr)) {
303 case CORE_MMU_FAULT_ALIGNMENT:
304 if (abort_is_user_exception(ai))
305 return FAULT_TYPE_USER_MODE_PANIC;
306 abort_print_error(ai);
307 panic("[abort] alignment fault! (trap CPU)");
308 break;
309
310 case CORE_MMU_FAULT_ACCESS_BIT:
311 if (abort_is_user_exception(ai))
312 return FAULT_TYPE_USER_MODE_PANIC;
313 abort_print_error(ai);
314 panic("[abort] access bit fault! (trap CPU)");
315 break;
316
317 case CORE_MMU_FAULT_DEBUG_EVENT:
318 if (!abort_is_user_exception(ai))
319 abort_print(ai);
320 DMSG("[abort] Ignoring debug event!");
321 return FAULT_TYPE_IGNORE;
322
323 case CORE_MMU_FAULT_TRANSLATION:
324 case CORE_MMU_FAULT_WRITE_PERMISSION:
325 case CORE_MMU_FAULT_READ_PERMISSION:
326 return FAULT_TYPE_PAGE_FAULT;
327
328 case CORE_MMU_FAULT_ASYNC_EXTERNAL:
329 if (!abort_is_user_exception(ai))
330 abort_print(ai);
331 DMSG("[abort] Ignoring async external abort!");
332 return FAULT_TYPE_IGNORE;
333
334 case CORE_MMU_FAULT_TAG_CHECK:
335 if (abort_is_user_exception(ai))
336 return FAULT_TYPE_USER_MODE_PANIC;
337 abort_print_error(ai);
338 panic("[abort] Tag check fault! (trap CPU)");
339 break;
340
341 case CORE_MMU_FAULT_OTHER:
342 default:
343 if (!abort_is_user_exception(ai))
344 abort_print(ai);
345 DMSG("[abort] Unhandled fault!");
346 return FAULT_TYPE_IGNORE;
347 }
348 }
349
abort_handler(uint32_t abort_type,struct thread_abort_regs * regs)350 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
351 {
352 struct abort_info ai;
353
354 set_abort_info(abort_type, regs, &ai);
355
356 switch (get_fault_type(&ai)) {
357 case FAULT_TYPE_IGNORE:
358 break;
359 case FAULT_TYPE_USER_MODE_PANIC:
360 DMSG("[abort] abort in User mode (TA will panic)");
361 save_abort_info_in_tsd(&ai);
362 #ifdef CFG_WITH_VFP
363 vfp_disable();
364 #endif
365 handle_user_mode_panic(&ai);
366 break;
367 #ifdef CFG_WITH_VFP
368 case FAULT_TYPE_USER_MODE_VFP:
369 handle_user_mode_vfp();
370 break;
371 #endif
372 case FAULT_TYPE_PAGE_FAULT:
373 default:
374 if (thread_get_id_may_fail() < 0) {
375 abort_print_error(&ai);
376 panic("abort outside thread context");
377 }
378
379 if (!abort_is_user_exception(&ai)) {
380 abort_print_error(&ai);
381 panic("unhandled page fault abort");
382 }
383 DMSG("[abort] abort in User mode (TA will panic)");
384 save_abort_info_in_tsd(&ai);
385 #ifdef CFG_WITH_VFP
386 vfp_disable();
387 #endif
388 handle_user_mode_panic(&ai);
389 break;
390 }
391 }
392