xref: /optee_os/lib/libutils/ext/ftrace/ftrace.c (revision 32b3180828fa15a49ccc86ecb4be9d274c140c89)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (C) 2019, Linaro Limited
4  */
5 
6 /*
7  * APIs defined in this file are required to use __noprof attribute to
8  * avoid any circular dependency during profiling. So this requirement
9  * prohibits these APIs to use standard library APIs as those can be
10  * profiled too.
11  */
12 
13 #include <assert.h>
14 #include <types_ext.h>
15 #include <user_ta_header.h>
16 #if defined(__KERNEL__)
17 #if defined(ARM32) || defined(ARM64)
18 #include <arm.h>
19 #elif defined(RV32) || defined(RV64)
20 #include <riscv.h>
21 #endif
22 #include <kernel/panic.h>
23 #include <kernel/tee_ta_manager.h>
24 #include <kernel/thread.h>
25 #include <mm/core_mmu.h>
26 #else
27 #if defined(ARM32) || defined(ARM64)
28 #include <arm_user_sysreg.h>
29 #elif defined(RV32) || defined(RV64)
30 #include <riscv_user_sysreg.h>
31 #endif
32 #include <setjmp.h>
33 #include <utee_syscalls.h>
34 #endif
35 #include "ftrace.h"
36 
37 static __noprof struct ftrace_buf *get_fbuf(void)
38 {
39 #if defined(__KERNEL__)
40 	short int ct = thread_get_id_may_fail();
41 	struct ts_session *s = NULL;
42 	struct thread_specific_data *tsd = NULL;
43 
44 	if (ct == -1)
45 		return NULL;
46 
47 	if (!(core_mmu_user_va_range_is_defined() &&
48 	      core_mmu_user_mapping_is_active()))
49 		return NULL;
50 
51 	tsd = thread_get_tsd();
52 	s = TAILQ_FIRST(&tsd->sess_stack);
53 
54 	if (!s || tsd->ctx != s->ctx)
55 		return NULL;
56 
57 	if (s->fbuf && s->fbuf->syscall_trace_enabled &&
58 	    !s->fbuf->syscall_trace_suspended)
59 		return s->fbuf;
60 	else
61 		return NULL;
62 #else
63 	return &__ftrace_buf_start;
64 #endif
65 }
66 
67 static void __noprof add_elem(struct ftrace_buf *fbuf, uint8_t level,
68 			       uint64_t val)
69 {
70 	uint64_t *elem = NULL;
71 	size_t idx = fbuf->curr_idx;
72 
73 	/* Make sure the topmost byte doesn't contain useful information */
74 	assert(!(val >> 56));
75 
76 	elem = (uint64_t *)((vaddr_t)fbuf + fbuf->buf_off) + idx;
77 	*elem = SHIFT_U64(level, 56) | val;
78 
79 	idx++;
80 	if ((idx + 1) * sizeof(*elem) > fbuf->max_size) {
81 		idx = 0;
82 		fbuf->overflow = true;
83 	}
84 
85 	fbuf->curr_idx = idx;
86 }
87 
88 void __noprof ftrace_enter(unsigned long pc, unsigned long *lr)
89 {
90 	uint64_t now = barrier_read_counter_timer();
91 	struct ftrace_buf *fbuf = get_fbuf();
92 
93 	if (!fbuf || !fbuf->buf_off || !fbuf->max_size)
94 		return;
95 
96 	add_elem(fbuf, fbuf->ret_idx + 1, pc);
97 
98 	if (fbuf->ret_idx < FTRACE_RETFUNC_DEPTH) {
99 		fbuf->ret_stack[fbuf->ret_idx] = *lr;
100 		fbuf->begin_time[fbuf->ret_idx] = now;
101 		fbuf->ret_idx++;
102 	} else {
103 		/*
104 		 * This scenario isn't expected as function call depth
105 		 * shouldn't be more than FTRACE_RETFUNC_DEPTH.
106 		 */
107 #if defined(__KERNEL__)
108 		panic();
109 #else
110 		_utee_panic(0);
111 #endif
112 	}
113 
114 	*lr = (unsigned long)&__ftrace_return;
115 }
116 
117 unsigned long __noprof ftrace_return(void)
118 {
119 	uint64_t now = barrier_read_counter_timer();
120 	struct ftrace_buf *fbuf = get_fbuf();
121 	uint64_t start = 0;
122 	uint64_t elapsed = 0;
123 
124 	/* Check for valid return index */
125 	if (!fbuf || !fbuf->ret_idx || fbuf->ret_idx > FTRACE_RETFUNC_DEPTH)
126 		return 0;
127 
128 	fbuf->ret_idx--;
129 	start = fbuf->begin_time[fbuf->ret_idx];
130 	elapsed = (now - start) * 1000000000 / read_cntfrq();
131 	add_elem(fbuf, 0, elapsed);
132 
133 	return fbuf->ret_stack[fbuf->ret_idx];
134 }
135 
136 #if !defined(__KERNEL__)
137 void __noprof ftrace_longjmp(unsigned int *ret_idx)
138 {
139 	while (__ftrace_buf_start.ret_idx > *ret_idx)
140 		ftrace_return();
141 }
142 
143 void __noprof ftrace_setjmp(unsigned int *ret_idx)
144 {
145 	*ret_idx = __ftrace_buf_start.ret_idx;
146 }
147 #endif
148