1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright 2023 NXP
4 * Copyright (c) 2015-2021, Linaro Limited
5 * Copyright (c) 2014, STMicroelectronics International N.V.
6 */
7
8 #include <config.h>
9 #include <kernel/boot.h>
10 #include <kernel/misc.h>
11 #include <kernel/notif.h>
12 #include <kernel/tee_l2cc_mutex.h>
13 #include <kernel/virtualization.h>
14 #include <mm/core_mmu.h>
15 #include <optee_msg.h>
16 #include <tee/entry_fast.h>
17 #include <tee/optee_abi.h>
18
19 #ifdef CFG_CORE_RESERVED_SHM
tee_entry_get_shm_config(struct thread_abi_args * args)20 static void tee_entry_get_shm_config(struct thread_abi_args *args)
21 {
22 args->a0 = OPTEE_ABI_RETURN_OK;
23 args->a1 = default_nsec_shm_paddr;
24 args->a2 = default_nsec_shm_size;
25 /* Should this be TEEABI cache attributes instead? */
26 args->a3 = core_mmu_is_shm_cached();
27 }
28 #endif
29
tee_entry_fastcall_l2cc_mutex(struct thread_abi_args * args)30 static void tee_entry_fastcall_l2cc_mutex(struct thread_abi_args *args)
31 {
32 args->a0 = OPTEE_ABI_RETURN_UNKNOWN_FUNCTION;
33 }
34
tee_entry_exchange_capabilities(struct thread_abi_args * args)35 static void tee_entry_exchange_capabilities(struct thread_abi_args *args)
36 {
37 bool res_shm_en = IS_ENABLED(CFG_CORE_RESERVED_SHM);
38 bool dyn_shm_en __maybe_unused = false;
39
40 /*
41 * Currently we ignore OPTEE_ABI_NSEC_CAP_UNIPROCESSOR.
42 *
43 * The memory mapping of shared memory is defined as normal
44 * shared memory for SMP systems and normal memory for UP
45 * systems. Currently we map all memory as shared in secure
46 * world.
47 *
48 * When translation tables are created with shared bit cleared for
49 * uniprocessor systems we'll need to check
50 * OPTEE_ABI_NSEC_CAP_UNIPROCESSOR.
51 */
52
53 if (args->a1 & ~OPTEE_ABI_NSEC_CAP_UNIPROCESSOR) {
54 /* Unknown capability. */
55 args->a0 = OPTEE_ABI_RETURN_ENOTAVAIL;
56 return;
57 }
58
59 args->a0 = OPTEE_ABI_RETURN_OK;
60 args->a1 = 0;
61
62 if (res_shm_en)
63 args->a1 |= OPTEE_ABI_SEC_CAP_HAVE_RESERVED_SHM;
64 IMSG("Reserved shared memory is %sabled", res_shm_en ? "en" : "dis");
65
66 #if defined(CFG_CORE_DYN_SHM)
67 dyn_shm_en = core_mmu_nsec_ddr_is_defined();
68 if (dyn_shm_en)
69 args->a1 |= OPTEE_ABI_SEC_CAP_DYNAMIC_SHM;
70 #endif
71 IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
72
73 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
74 args->a1 |= OPTEE_ABI_SEC_CAP_VIRTUALIZATION;
75 IMSG("Normal World virtualization support is %sabled",
76 IS_ENABLED(CFG_NS_VIRTUALIZATION) ? "en" : "dis");
77
78 args->a1 |= OPTEE_ABI_SEC_CAP_MEMREF_NULL;
79
80 if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
81 args->a1 |= OPTEE_ABI_SEC_CAP_ASYNC_NOTIF;
82 args->a2 = NOTIF_VALUE_MAX;
83 }
84 IMSG("Asynchronous notifications are %sabled",
85 IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis");
86
87 args->a1 |= OPTEE_ABI_SEC_CAP_RPC_ARG;
88 args->a3 = THREAD_RPC_MAX_NUM_PARAMS;
89 }
90
tee_entry_disable_shm_cache(struct thread_abi_args * args)91 static void tee_entry_disable_shm_cache(struct thread_abi_args *args)
92 {
93 uint64_t cookie;
94
95 if (!thread_disable_prealloc_rpc_cache(&cookie)) {
96 args->a0 = OPTEE_ABI_RETURN_EBUSY;
97 return;
98 }
99
100 if (!cookie) {
101 args->a0 = OPTEE_ABI_RETURN_ENOTAVAIL;
102 return;
103 }
104
105 args->a0 = OPTEE_ABI_RETURN_OK;
106 args->a1 = cookie >> 32;
107 args->a2 = cookie;
108 }
109
tee_entry_enable_shm_cache(struct thread_abi_args * args)110 static void tee_entry_enable_shm_cache(struct thread_abi_args *args)
111 {
112 if (thread_enable_prealloc_rpc_cache())
113 args->a0 = OPTEE_ABI_RETURN_OK;
114 else
115 args->a0 = OPTEE_ABI_RETURN_EBUSY;
116 }
117
tee_entry_boot_secondary(struct thread_abi_args * args)118 static void tee_entry_boot_secondary(struct thread_abi_args *args)
119 {
120 #if defined(CFG_BOOT_SECONDARY_REQUEST)
121 if (!boot_core_release(args->a1, (paddr_t)(args->a3)))
122 args->a0 = OPTEE_ABI_RETURN_OK;
123 else
124 args->a0 = OPTEE_ABI_RETURN_EBADCMD;
125 #else
126 args->a0 = OPTEE_ABI_RETURN_ENOTAVAIL;
127 #endif
128 }
129
tee_entry_get_thread_count(struct thread_abi_args * args)130 static void tee_entry_get_thread_count(struct thread_abi_args *args)
131 {
132 args->a0 = OPTEE_ABI_RETURN_OK;
133 args->a1 = CFG_NUM_THREADS;
134 }
135
136 #if defined(CFG_NS_VIRTUALIZATION)
tee_entry_vm_created(struct thread_abi_args * args)137 static void tee_entry_vm_created(struct thread_abi_args *args)
138 {
139 uint16_t guest_id = args->a1;
140
141 /* Only hypervisor can issue this request */
142 if (args->a7 != HYP_CLNT_ID) {
143 args->a0 = OPTEE_ABI_RETURN_ENOTAVAIL;
144 return;
145 }
146
147 if (virt_guest_created(guest_id))
148 args->a0 = OPTEE_ABI_RETURN_ENOTAVAIL;
149 else
150 args->a0 = OPTEE_ABI_RETURN_OK;
151 }
152
tee_entry_vm_destroyed(struct thread_abi_args * args)153 static void tee_entry_vm_destroyed(struct thread_abi_args *args)
154 {
155 uint16_t guest_id = args->a1;
156
157 /* Only hypervisor can issue this request */
158 if (args->a7 != HYP_CLNT_ID) {
159 args->a0 = OPTEE_ABI_RETURN_ENOTAVAIL;
160 return;
161 }
162
163 if (virt_guest_destroyed(guest_id))
164 args->a0 = OPTEE_ABI_RETURN_ENOTAVAIL;
165 else
166 args->a0 = OPTEE_ABI_RETURN_OK;
167 }
168 #endif
169
170 /* Note: this function is weak to let platforms add special handling */
tee_entry_fast(struct thread_abi_args * args)171 void __weak tee_entry_fast(struct thread_abi_args *args)
172 {
173 __tee_entry_fast(args);
174 }
175
get_async_notif_value(struct thread_abi_args * args)176 static void get_async_notif_value(struct thread_abi_args *args)
177 {
178 bool value_valid = false;
179 bool value_pending = false;
180
181 args->a0 = OPTEE_ABI_RETURN_OK;
182 args->a1 = notif_get_value(&value_valid, &value_pending);
183 args->a2 = 0;
184 if (value_valid)
185 args->a2 |= OPTEE_ABI_ASYNC_NOTIF_VALID;
186 if (value_pending)
187 args->a2 |= OPTEE_ABI_ASYNC_NOTIF_PENDING;
188 }
189
190 /*
191 * If tee_entry_fast() is overridden, it's still supposed to call this
192 * function.
193 */
__tee_entry_fast(struct thread_abi_args * args)194 void __tee_entry_fast(struct thread_abi_args *args)
195 {
196 switch (args->a0) {
197 /* Generic functions */
198 case OPTEE_ABI_CALLS_COUNT:
199 tee_entry_get_api_call_count(args);
200 break;
201 case OPTEE_ABI_CALLS_UID:
202 tee_entry_get_api_uuid(args);
203 break;
204 case OPTEE_ABI_CALLS_REVISION:
205 tee_entry_get_api_revision(args);
206 break;
207 case OPTEE_ABI_CALL_GET_OS_UUID:
208 tee_entry_get_os_uuid(args);
209 break;
210 case OPTEE_ABI_CALL_GET_OS_REVISION:
211 tee_entry_get_os_revision(args);
212 break;
213
214 /* OP-TEE specific ABI functions */
215 #ifdef CFG_CORE_RESERVED_SHM
216 case OPTEE_ABI_GET_SHM_CONFIG:
217 tee_entry_get_shm_config(args);
218 break;
219 #endif
220 case OPTEE_ABI_L2CC_MUTEX:
221 tee_entry_fastcall_l2cc_mutex(args);
222 break;
223 case OPTEE_ABI_EXCHANGE_CAPABILITIES:
224 tee_entry_exchange_capabilities(args);
225 break;
226 case OPTEE_ABI_DISABLE_SHM_CACHE:
227 tee_entry_disable_shm_cache(args);
228 break;
229 case OPTEE_ABI_ENABLE_SHM_CACHE:
230 tee_entry_enable_shm_cache(args);
231 break;
232 case OPTEE_ABI_BOOT_SECONDARY:
233 tee_entry_boot_secondary(args);
234 break;
235 case OPTEE_ABI_GET_THREAD_COUNT:
236 tee_entry_get_thread_count(args);
237 break;
238
239 #if defined(CFG_NS_VIRTUALIZATION)
240 case OPTEE_ABI_VM_CREATED:
241 tee_entry_vm_created(args);
242 break;
243 case OPTEE_ABI_VM_DESTROYED:
244 tee_entry_vm_destroyed(args);
245 break;
246 #endif
247
248 case OPTEE_ABI_ENABLE_ASYNC_NOTIF:
249 if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
250 notif_deliver_atomic_event(NOTIF_EVENT_STARTED, 0);
251 args->a0 = OPTEE_ABI_RETURN_OK;
252 } else {
253 args->a0 = OPTEE_ABI_RETURN_UNKNOWN_FUNCTION;
254 }
255 break;
256 case OPTEE_ABI_GET_ASYNC_NOTIF_VALUE:
257 if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF))
258 get_async_notif_value(args);
259 else
260 args->a0 = OPTEE_ABI_RETURN_UNKNOWN_FUNCTION;
261 break;
262
263 default:
264 args->a0 = OPTEE_ABI_RETURN_UNKNOWN_FUNCTION;
265 break;
266 }
267 }
268
tee_entry_generic_get_api_call_count(void)269 size_t tee_entry_generic_get_api_call_count(void)
270 {
271 /*
272 * All the different calls handled in this file. If the specific
273 * target has additional calls it will call this function and
274 * add the number of calls the target has added.
275 */
276 size_t ret = 12;
277
278 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
279 ret += 2;
280
281 return ret;
282 }
283
tee_entry_get_api_call_count(struct thread_abi_args * args)284 void __weak tee_entry_get_api_call_count(struct thread_abi_args *args)
285 {
286 args->a0 = tee_entry_generic_get_api_call_count();
287 }
288
tee_entry_get_api_uuid(struct thread_abi_args * args)289 void __weak tee_entry_get_api_uuid(struct thread_abi_args *args)
290 {
291 args->a0 = OPTEE_MSG_UID_0;
292 args->a1 = OPTEE_MSG_UID_1;
293 args->a2 = OPTEE_MSG_UID_2;
294 args->a3 = OPTEE_MSG_UID_3;
295 }
296
tee_entry_get_api_revision(struct thread_abi_args * args)297 void __weak tee_entry_get_api_revision(struct thread_abi_args *args)
298 {
299 args->a0 = OPTEE_MSG_REVISION_MAJOR;
300 args->a1 = OPTEE_MSG_REVISION_MINOR;
301 }
302
tee_entry_get_os_uuid(struct thread_abi_args * args)303 void __weak tee_entry_get_os_uuid(struct thread_abi_args *args)
304 {
305 args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
306 args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
307 args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
308 args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
309 }
310
tee_entry_get_os_revision(struct thread_abi_args * args)311 void __weak tee_entry_get_os_revision(struct thread_abi_args *args)
312 {
313 args->a0 = CFG_OPTEE_REVISION_MAJOR;
314 args->a1 = CFG_OPTEE_REVISION_MINOR;
315 args->a2 = TEE_IMPL_GIT_SHA1;
316 }
317