xref: /optee_os/lib/libutils/ext/ftrace/ftrace.c (revision 941a58d78c99c4754fbd4ec3079ec9e1d596af8f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (C) 2019, Linaro Limited
4  */
5 
6 /*
7  * APIs defined in this file are required to use __noprof attribute to
8  * avoid any circular dependency during profiling. So this requirement
9  * prohibits these APIs to use standard library APIs as those can be
10  * profiled too.
11  */
12 
13 #include <assert.h>
14 #include <types_ext.h>
15 #include <user_ta_header.h>
16 #if defined(__KERNEL__)
17 #if defined(ARM32) || defined(ARM64)
18 #include <arm.h>
19 #elif defined(RV32) || defined(RV64)
20 #include <riscv.h>
21 #endif
22 #include <kernel/panic.h>
23 #include <kernel/tee_ta_manager.h>
24 #include <kernel/thread.h>
25 #include <mm/core_mmu.h>
26 #else
27 #if defined(ARM32) || defined(ARM64)
28 #include <arm_user_sysreg.h>
29 #elif defined(RV32) || defined(RV64)
30 #include <riscv_user_sysreg.h>
31 #endif
32 #include <setjmp.h>
33 #include <utee_syscalls.h>
34 #endif
35 #include "ftrace.h"
36 
37 static __noprof struct ftrace_buf *get_fbuf(void)
38 {
39 #if defined(__KERNEL__)
40 	struct ts_session *s = NULL;
41 	struct thread_specific_data *tsd = NULL;
42 
43 	if (!thread_is_in_normal_mode())
44 		return NULL;
45 
46 	if (!(core_mmu_user_va_range_is_defined() &&
47 	      core_mmu_user_mapping_is_active()))
48 		return NULL;
49 
50 	tsd = thread_get_tsd();
51 	s = TAILQ_FIRST(&tsd->sess_stack);
52 
53 	if (!s || tsd->ctx != s->ctx)
54 		return NULL;
55 
56 	if (!is_ta_ctx(s->ctx) || to_ta_ctx(s->ctx)->panicked)
57 		return NULL;
58 
59 	if (s->fbuf && s->fbuf->syscall_trace_enabled &&
60 	    !s->fbuf->syscall_trace_suspended)
61 		return s->fbuf;
62 	else
63 		return NULL;
64 #else
65 	return &__ftrace_buf_start;
66 #endif
67 }
68 
69 static void __noprof add_elem(struct ftrace_buf *fbuf, uint8_t level,
70 			       uint64_t val)
71 {
72 	uint64_t *elem = NULL;
73 	size_t idx = fbuf->curr_idx;
74 
75 	/* Make sure the topmost byte doesn't contain useful information */
76 	assert(!(val >> 56));
77 
78 	elem = (uint64_t *)((vaddr_t)fbuf + fbuf->buf_off) + idx;
79 	*elem = SHIFT_U64(level, 56) | val;
80 
81 	idx++;
82 	if ((idx + 1) * sizeof(*elem) > fbuf->max_size) {
83 		idx = 0;
84 		fbuf->overflow = true;
85 	}
86 
87 	fbuf->curr_idx = idx;
88 }
89 
90 void __noprof ftrace_enter(unsigned long pc, unsigned long *lr)
91 {
92 	uint64_t now = barrier_read_counter_timer();
93 	struct ftrace_buf *fbuf = get_fbuf();
94 
95 	if (!fbuf || !fbuf->buf_off || !fbuf->max_size)
96 		return;
97 
98 	add_elem(fbuf, fbuf->ret_idx + 1, pc);
99 
100 	if (fbuf->ret_idx < FTRACE_RETFUNC_DEPTH) {
101 		fbuf->ret_stack[fbuf->ret_idx] = *lr;
102 		fbuf->begin_time[fbuf->ret_idx] = now;
103 		fbuf->ret_idx++;
104 	} else {
105 		/*
106 		 * This scenario isn't expected as function call depth
107 		 * shouldn't be more than FTRACE_RETFUNC_DEPTH.
108 		 */
109 #if defined(__KERNEL__)
110 		panic();
111 #else
112 		_utee_panic(0);
113 #endif
114 	}
115 
116 	*lr = (unsigned long)&__ftrace_return;
117 }
118 
119 unsigned long __noprof ftrace_return(void)
120 {
121 	uint64_t now = barrier_read_counter_timer();
122 	struct ftrace_buf *fbuf = get_fbuf();
123 	uint64_t start = 0;
124 	uint64_t elapsed = 0;
125 
126 	/* Check for valid return index */
127 	if (!fbuf || !fbuf->ret_idx || fbuf->ret_idx > FTRACE_RETFUNC_DEPTH)
128 		return 0;
129 
130 	fbuf->ret_idx--;
131 	start = fbuf->begin_time[fbuf->ret_idx];
132 	elapsed = (now - start) * 1000000000 / read_cntfrq();
133 	add_elem(fbuf, 0, elapsed);
134 
135 	return fbuf->ret_stack[fbuf->ret_idx];
136 }
137 
138 #if !defined(__KERNEL__)
139 void __noprof ftrace_longjmp(unsigned int *ret_idx)
140 {
141 	while (__ftrace_buf_start.ret_idx > *ret_idx)
142 		ftrace_return();
143 }
144 
145 void __noprof ftrace_setjmp(unsigned int *ret_idx)
146 {
147 	*ret_idx = __ftrace_buf_start.ret_idx;
148 }
149 #endif
150