xref: /optee_os/core/kernel/trace_ext.c (revision 3ad46cddcc42fa9d09cdf37d3dac86c773da6d15)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, Linaro Limited
4  */
5 #include <stdbool.h>
6 #include <trace.h>
7 #include <console.h>
8 #include <config.h>
9 #include <kernel/misc.h>
10 #include <kernel/spinlock.h>
11 #include <kernel/thread.h>
12 #include <kernel/virtualization.h>
13 #include <mm/core_mmu.h>
14 
15 const char trace_ext_prefix[] = "TC";
16 int trace_level __nex_data = TRACE_LEVEL;
17 static unsigned int puts_lock __nex_bss = SPINLOCK_UNLOCK;
18 
19 /*
20  * Atomic flags for sequencing concurrent messages
21  * when CFG_CONSOLE_MASK_INTERRUPTS is disabled.
22  * itr_trace_busy tracks context always uninterruptible
23  * (when thread_is_in_normal_mode() returns false).
24  */
25 static unsigned int thread_trace_busy;
26 static unsigned int itr_trace_busy;
27 
get_busy_state(void)28 static unsigned int *get_busy_state(void)
29 {
30 	if (thread_is_in_normal_mode())
31 		return &thread_trace_busy;
32 	else
33 		return &itr_trace_busy;
34 }
35 
wait_if_trace_contended(uint32_t * itr_status)36 static bool wait_if_trace_contended(uint32_t *itr_status)
37 {
38 	bool was_contended = false;
39 
40 	if (IS_ENABLED(CFG_CONSOLE_MASK_INTERRUPTS)) {
41 		*itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
42 
43 		if (cpu_mmu_enabled() && !cpu_spin_trylock(&puts_lock)) {
44 			was_contended = true;
45 			cpu_spin_lock_no_dldetect(&puts_lock);
46 		}
47 	} else if (cpu_mmu_enabled()) {
48 		unsigned int trace_not_busy = 0;
49 
50 		/* Don't mix thread traces, don't mix interrupt traces */
51 		while (!atomic_cas_uint(get_busy_state(), &trace_not_busy, 1)) {
52 			trace_not_busy = 0;
53 			was_contended = true;
54 		}
55 
56 		/* Don't emit a thread trace inside an interrupt trace */
57 		if (thread_is_in_normal_mode())
58 			while (atomic_load_uint(&itr_trace_busy))
59 				was_contended = true;
60 	}
61 
62 	return was_contended;
63 }
64 
release_trace_contention(uint32_t itr_status)65 static void release_trace_contention(uint32_t itr_status)
66 {
67 	if (IS_ENABLED(CFG_CONSOLE_MASK_INTERRUPTS)) {
68 		if (cpu_mmu_enabled())
69 			cpu_spin_unlock(&puts_lock);
70 
71 		thread_unmask_exceptions(itr_status);
72 	} else if (cpu_mmu_enabled()) {
73 		atomic_store_uint(get_busy_state(), 0);
74 	}
75 }
76 
plat_trace_ext_puts(const char * str __unused)77 void __weak plat_trace_ext_puts(const char *str __unused)
78 {
79 }
80 
trace_ext_puts(const char * str)81 void trace_ext_puts(const char *str)
82 {
83 	bool was_contended = false;
84 	uint32_t itr_status = 0;
85 	const char *p = NULL;
86 
87 	was_contended = wait_if_trace_contended(&itr_status);
88 
89 	plat_trace_ext_puts(str);
90 
91 	console_flush();
92 
93 	if (was_contended)
94 		console_putc('*');
95 
96 	for (p = str; *p; p++)
97 		console_putc(*p);
98 
99 	console_flush();
100 
101 	release_trace_contention(itr_status);
102 }
103 
trace_ext_get_thread_id(void)104 int trace_ext_get_thread_id(void)
105 {
106 	return thread_get_id_may_fail();
107 }
108 
trace_ext_get_core_id(void)109 int trace_ext_get_core_id(void)
110 {
111 	/* If foreign interrupts aren't masked we report invalid core ID */
112 	if (thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR)
113 		return get_core_pos();
114 	else
115 		return -1;
116 }
117 
trace_ext_get_guest_id(void)118 int trace_ext_get_guest_id(void)
119 {
120 	return virt_get_current_guest_id();
121 }
122