1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2019, Linaro Limited
4 * Copyright (c) 2022-2023, Arm Limited
5 */
6
7 #include <assert.h>
8 #include <ldelf.h>
9 #include <malloc.h>
10 #include <printk.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 #include <tee_api_types.h>
14 #include <trace.h>
15 #include <types_ext.h>
16 #include <util.h>
17
18 #include "dl.h"
19 #include "ftrace.h"
20 #include "sys.h"
21 #include "ta_elf.h"
22
23 static size_t mpool_size = 4 * SMALL_PAGE_SIZE;
24 static vaddr_t mpool_base;
25
print_to_console(void * pctx __unused,const char * fmt,va_list ap)26 static void __printf(2, 0) print_to_console(void *pctx __unused,
27 const char *fmt, va_list ap)
28 {
29 trace_vprintf(NULL, 0, TRACE_ERROR, true, fmt, ap);
30 }
31
dump_ta_state(struct dump_entry_arg * arg)32 static void __noreturn __maybe_unused dump_ta_state(struct dump_entry_arg *arg)
33 {
34 struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
35
36 assert(elf && elf->is_main);
37 EMSG_RAW("Status of TA %pUl", (void *)&elf->uuid);
38 #if defined(ARM32) || defined(ARM64)
39 EMSG_RAW(" arch: %s", elf->is_32bit ? "arm" : "aarch64");
40 #elif defined(RV32) || defined(RV64)
41 EMSG_RAW(" arch: %s", elf->is_32bit ? "riscv32" : "riscv64");
42 #endif
43
44 ta_elf_print_mappings(NULL, print_to_console, &main_elf_queue,
45 arg->num_maps, arg->maps, mpool_base);
46
47 #if defined(ARM32) || defined(ARM64)
48 if (arg->is_32bit)
49 ta_elf_stack_trace_a32(arg->arm32.regs);
50 else
51 ta_elf_stack_trace_a64(arg->arm64.fp, arg->arm64.sp,
52 arg->arm64.pc);
53 #elif defined(RV32) || defined(RV64)
54 ta_elf_stack_trace_riscv(arg->rv.fp, arg->rv.pc);
55 #endif
56
57 sys_return_cleanup();
58 }
59
60 #ifdef CFG_FTRACE_SUPPORT
61 struct print_buf_ctx {
62 char *buf;
63 size_t blen;
64 size_t ret;
65 };
66
print_to_pbuf(void * pctx,const char * fmt,va_list ap)67 static void __printf(2, 0) print_to_pbuf(void *pctx, const char *fmt,
68 va_list ap)
69 {
70 struct print_buf_ctx *pbuf = pctx;
71 char *buf = NULL;
72 size_t blen = 0;
73 int ret = 0;
74
75 if (pbuf->buf && pbuf->blen > pbuf->ret) {
76 buf = pbuf->buf + pbuf->ret;
77 blen = pbuf->blen - pbuf->ret;
78 }
79
80 ret = vsnprintk(buf, blen, fmt, ap);
81 assert(ret >= 0);
82
83 pbuf->ret += ret;
84 }
85
copy_to_pbuf(void * pctx,void * b,size_t bl)86 static void copy_to_pbuf(void *pctx, void *b, size_t bl)
87 {
88 struct print_buf_ctx *pbuf = pctx;
89 char *buf = NULL;
90 size_t blen = 0;
91
92 if (pbuf->buf && pbuf->blen > pbuf->ret) {
93 buf = pbuf->buf + pbuf->ret;
94 blen = pbuf->blen - pbuf->ret;
95 memcpy(buf, b, MIN(blen, bl));
96 }
97
98 pbuf->ret += bl;
99
100 }
101
ftrace_dump(void * buf,size_t * blen)102 static void __noreturn ftrace_dump(void *buf, size_t *blen)
103 {
104 struct print_buf_ctx pbuf = { .buf = buf, .blen = *blen };
105
106 /* only print the header when this is a new dump */
107 if (!ftrace_get_dump_id())
108 ta_elf_print_mappings(&pbuf, print_to_pbuf, &main_elf_queue,
109 0, NULL, mpool_base);
110 ftrace_copy_buf(&pbuf, copy_to_pbuf);
111 /*
112 * Reset the buffer after dump if this is the actual write
113 * The OS may call this function with buf == NULL,
114 * in order to get the length required to write ftrace data.
115 */
116 if (buf)
117 ftrace_reset_buf();
118 *blen = pbuf.ret;
119 sys_return_cleanup();
120 }
121 #endif
122
dl_entry(struct dl_entry_arg * arg)123 static void __noreturn dl_entry(struct dl_entry_arg *arg)
124 {
125 switch (arg->cmd) {
126 case LDELF_DL_ENTRY_DLOPEN:
127 arg->ret = dlopen_entry(arg);
128 break;
129 case LDELF_DL_ENTRY_DLSYM:
130 arg->ret = dlsym_entry(arg);
131 break;
132 default:
133 arg->ret = TEE_ERROR_NOT_SUPPORTED;
134 }
135
136 sys_return_cleanup();
137 }
138
139 /*
140 * ldelf()- Loads ELF into memory
141 * @arg: Argument passing to/from TEE Core
142 *
143 * Only called from assembly
144 */
145 void __noreturn ldelf(struct ldelf_arg *arg);
ldelf(struct ldelf_arg * arg)146 void ldelf(struct ldelf_arg *arg)
147 {
148 TEE_Result res = TEE_SUCCESS;
149 struct ta_elf *elf = NULL;
150
151 DMSG("Loading TS %pUl", (void *)&arg->uuid);
152 res = sys_map_zi(mpool_size, 0, &mpool_base, 0, 0);
153 if (res) {
154 EMSG("sys_map_zi(%zu): result %"PRIx32, mpool_size, res);
155 panic();
156 }
157 malloc_add_pool((void *)mpool_base, mpool_size);
158
159 /* Load the main binary and get a list of dependencies, if any. */
160 ta_elf_load_main(&arg->uuid, &arg->is_32bit, &arg->stack_ptr,
161 &arg->flags);
162
163 /*
164 * Load binaries, ta_elf_load() may add external libraries to the
165 * list, so the loop will end when all the dependencies are
166 * satisfied.
167 */
168 TAILQ_FOREACH(elf, &main_elf_queue, link)
169 ta_elf_load_dependency(elf, arg->is_32bit);
170
171 TAILQ_FOREACH(elf, &main_elf_queue, link) {
172 ta_elf_relocate(elf);
173 ta_elf_finalize_mappings(elf);
174 }
175
176 ta_elf_finalize_load_main(&arg->entry_func, &arg->load_addr);
177
178 arg->ftrace_entry = 0;
179 #ifdef CFG_FTRACE_SUPPORT
180 if (ftrace_init(&arg->fbuf))
181 arg->ftrace_entry = (vaddr_t)(void *)ftrace_dump;
182 #endif
183
184 TAILQ_FOREACH(elf, &main_elf_queue, link)
185 DMSG("ELF (%pUl) at %#"PRIxVA,
186 (void *)&elf->uuid, elf->load_addr);
187
188 #if TRACE_LEVEL >= TRACE_ERROR
189 arg->dump_entry = (vaddr_t)(void *)dump_ta_state;
190 #else
191 arg->dump_entry = 0;
192 #endif
193 arg->dl_entry = (vaddr_t)(void *)dl_entry;
194
195 sys_return_cleanup();
196 }
197