xref: /optee_os/core/arch/arm/tee/entry_fast.c (revision 003383344c26be3589383acc87c1ebb2860e9317)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <config.h>
8 #include <drivers/wdt.h>
9 #include <kernel/boot.h>
10 #include <kernel/notif.h>
11 #include <kernel/tee_l2cc_mutex.h>
12 #include <kernel/virtualization.h>
13 #include <mm/core_mmu.h>
14 #include <optee_msg.h>
15 #include <sm/optee_smc.h>
16 #include <tee/entry_fast.h>
17 
18 #ifdef CFG_CORE_RESERVED_SHM
tee_entry_get_shm_config(struct thread_smc_args * args)19 static void tee_entry_get_shm_config(struct thread_smc_args *args)
20 {
21 	args->a0 = OPTEE_SMC_RETURN_OK;
22 	args->a1 = default_nsec_shm_paddr;
23 	args->a2 = default_nsec_shm_size;
24 	/* Should this be TEESMC cache attributes instead? */
25 	args->a3 = core_mmu_is_shm_cached();
26 }
27 #endif
28 
29 #if defined(CFG_SECURE_DATA_PATH) && !defined(CFG_CORE_DYN_PROTMEM)
tee_entry_get_protmem_config(struct thread_smc_args * args)30 static void tee_entry_get_protmem_config(struct thread_smc_args *args)
31 {
32 #if defined(CFG_TEE_SDP_MEM_BASE)
33 	args->a0 = OPTEE_SMC_RETURN_OK;
34 	args->a1 = CFG_TEE_SDP_MEM_BASE;
35 	args->a2 = CFG_TEE_SDP_MEM_SIZE;
36 #elif defined(TEE_SDP_TEST_MEM_BASE)
37 	args->a0 = OPTEE_SMC_RETURN_OK;
38 	args->a1 = TEE_SDP_TEST_MEM_BASE;
39 	args->a2 = TEE_SDP_TEST_MEM_SIZE;
40 #else
41 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
42 	args->a1 = 0;
43 	args->a2 = 0;
44 #endif
45 	args->a3 = sizeof(long) * 8;
46 }
47 #endif
48 
tee_entry_fastcall_l2cc_mutex(struct thread_smc_args * args)49 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
50 {
51 #ifdef ARM32
52 	TEE_Result ret = TEE_ERROR_NOT_SUPPORTED;
53 	paddr_t pa = 0;
54 
55 	switch (args->a1) {
56 	case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
57 		ret = tee_get_l2cc_mutex(&pa);
58 		reg_pair_from_64(pa, &args->a2, &args->a3);
59 		break;
60 	case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
61 		pa = reg_pair_to_64(args->a2, args->a3);
62 		ret = tee_set_l2cc_mutex(&pa);
63 		break;
64 	case OPTEE_SMC_L2CC_MUTEX_ENABLE:
65 		ret = tee_enable_l2cc_mutex();
66 		break;
67 	case OPTEE_SMC_L2CC_MUTEX_DISABLE:
68 		ret = tee_disable_l2cc_mutex();
69 		break;
70 	default:
71 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
72 		return;
73 	}
74 
75 	if (ret == TEE_ERROR_NOT_SUPPORTED)
76 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
77 	else if (ret)
78 		args->a0 = OPTEE_SMC_RETURN_EBADADDR;
79 	else
80 		args->a0 = OPTEE_SMC_RETURN_OK;
81 #else
82 	args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
83 #endif
84 }
85 
tee_entry_exchange_capabilities(struct thread_smc_args * args)86 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
87 {
88 	bool res_shm_en = IS_ENABLED(CFG_CORE_RESERVED_SHM);
89 	bool dyn_shm_en __maybe_unused = false;
90 
91 	/*
92 	 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
93 	 *
94 	 * The memory mapping of shared memory is defined as normal
95 	 * shared memory for SMP systems and normal memory for UP
96 	 * systems. Currently we map all memory as shared in secure
97 	 * world.
98 	 *
99 	 * When translation tables are created with shared bit cleared for
100 	 * uniprocessor systems we'll need to check
101 	 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
102 	 */
103 
104 	if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
105 		/* Unknown capability. */
106 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
107 		return;
108 	}
109 
110 	args->a0 = OPTEE_SMC_RETURN_OK;
111 	args->a1 = 0;
112 
113 	if (res_shm_en)
114 		args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
115 	IMSG("Reserved shared memory is %sabled", res_shm_en ? "en" : "dis");
116 
117 #if defined(CFG_CORE_DYN_SHM)
118 	dyn_shm_en = core_mmu_nsec_ddr_is_defined();
119 	if (dyn_shm_en)
120 		args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
121 #endif
122 	IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
123 
124 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
125 		args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION;
126 	IMSG("Normal World virtualization support is %sabled",
127 	     IS_ENABLED(CFG_NS_VIRTUALIZATION) ? "en" : "dis");
128 
129 	args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL;
130 
131 	if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
132 		args->a1 |= OPTEE_SMC_SEC_CAP_ASYNC_NOTIF;
133 		args->a2 = NOTIF_VALUE_MAX;
134 	}
135 	IMSG("Asynchronous notifications are %sabled",
136 	     IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis");
137 
138 	if (IS_ENABLED(CFG_SECURE_DATA_PATH)) {
139 		if (IS_ENABLED(CFG_CORE_DYN_PROTMEM))
140 			args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM;
141 		else
142 			args->a1 |= OPTEE_SMC_SEC_CAP_PROTMEM;
143 	}
144 
145 	args->a1 |= OPTEE_SMC_SEC_CAP_RPC_ARG;
146 	args->a3 = THREAD_RPC_MAX_NUM_PARAMS;
147 
148 	if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
149 		args->a1 |= OPTEE_SMC_SEC_CAP_RPMB_PROBE;
150 }
151 
tee_entry_disable_shm_cache(struct thread_smc_args * args)152 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
153 {
154 	uint64_t cookie;
155 
156 	if (!thread_disable_prealloc_rpc_cache(&cookie)) {
157 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
158 		return;
159 	}
160 
161 	if (!cookie) {
162 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
163 		return;
164 	}
165 
166 	args->a0 = OPTEE_SMC_RETURN_OK;
167 	args->a1 = cookie >> 32;
168 	args->a2 = cookie;
169 }
170 
tee_entry_enable_shm_cache(struct thread_smc_args * args)171 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
172 {
173 	if (thread_enable_prealloc_rpc_cache())
174 		args->a0 = OPTEE_SMC_RETURN_OK;
175 	else
176 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
177 }
178 
tee_entry_boot_secondary(struct thread_smc_args * args)179 static void tee_entry_boot_secondary(struct thread_smc_args *args)
180 {
181 #if defined(CFG_BOOT_SECONDARY_REQUEST)
182 	if (!boot_core_release(args->a1, (paddr_t)(args->a3)))
183 		args->a0 = OPTEE_SMC_RETURN_OK;
184 	else
185 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
186 #else
187 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
188 #endif
189 }
190 
tee_entry_get_thread_count(struct thread_smc_args * args)191 static void tee_entry_get_thread_count(struct thread_smc_args *args)
192 {
193 	args->a0 = OPTEE_SMC_RETURN_OK;
194 	args->a1 = CFG_NUM_THREADS;
195 }
196 
197 #if defined(CFG_NS_VIRTUALIZATION)
tee_entry_vm_created(struct thread_smc_args * args)198 static void tee_entry_vm_created(struct thread_smc_args *args)
199 {
200 	uint16_t guest_id = args->a1;
201 
202 	/* Only hypervisor can issue this request */
203 	if (args->a7 != HYP_CLNT_ID) {
204 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
205 		return;
206 	}
207 
208 	if (virt_guest_created(guest_id))
209 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
210 	else
211 		args->a0 = OPTEE_SMC_RETURN_OK;
212 }
213 
tee_entry_vm_destroyed(struct thread_smc_args * args)214 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
215 {
216 	uint16_t guest_id = args->a1;
217 
218 	/* Only hypervisor can issue this request */
219 	if (args->a7 != HYP_CLNT_ID) {
220 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
221 		return;
222 	}
223 
224 	if (virt_guest_destroyed(guest_id))
225 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
226 	else
227 		args->a0 = OPTEE_SMC_RETURN_OK;
228 }
229 #endif
230 
231 /* Note: this function is weak to let platforms add special handling */
tee_entry_fast(struct thread_smc_args * args)232 void __weak tee_entry_fast(struct thread_smc_args *args)
233 {
234 	__tee_entry_fast(args);
235 }
236 
get_async_notif_value(struct thread_smc_args * args)237 static void get_async_notif_value(struct thread_smc_args *args)
238 {
239 	bool value_valid = false;
240 	bool value_pending = false;
241 
242 	args->a0 = OPTEE_SMC_RETURN_OK;
243 	args->a1 = notif_get_value(&value_valid, &value_pending);
244 	args->a2 = 0;
245 	if (value_valid)
246 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_VALID;
247 	if (value_pending)
248 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_PENDING;
249 }
250 
tee_entry_watchdog(struct thread_smc_args * args)251 static void tee_entry_watchdog(struct thread_smc_args *args)
252 {
253 #if defined(CFG_WDT_SM_HANDLER)
254 	__wdt_sm_handler(args);
255 #else
256 	args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
257 #endif
258 }
259 
260 /*
261  * If tee_entry_fast() is overridden, it's still supposed to call this
262  * function.
263  */
__tee_entry_fast(struct thread_smc_args * args)264 void __tee_entry_fast(struct thread_smc_args *args)
265 {
266 	switch (args->a0) {
267 
268 	/* Generic functions */
269 	case OPTEE_SMC_CALLS_COUNT:
270 		tee_entry_get_api_call_count(args);
271 		break;
272 	case OPTEE_SMC_CALLS_UID:
273 		tee_entry_get_api_uuid(args);
274 		break;
275 	case OPTEE_SMC_CALLS_REVISION:
276 		tee_entry_get_api_revision(args);
277 		break;
278 	case OPTEE_SMC_CALL_GET_OS_UUID:
279 		tee_entry_get_os_uuid(args);
280 		break;
281 	case OPTEE_SMC_CALL_GET_OS_REVISION:
282 		tee_entry_get_os_revision(args);
283 		break;
284 
285 	/* OP-TEE specific SMC functions */
286 #ifdef CFG_CORE_RESERVED_SHM
287 	case OPTEE_SMC_GET_SHM_CONFIG:
288 		tee_entry_get_shm_config(args);
289 		break;
290 #endif
291 #if defined(CFG_SECURE_DATA_PATH) && !defined(CFG_CORE_DYN_PROTMEM)
292 	case OPTEE_SMC_GET_PROTMEM_CONFIG:
293 		tee_entry_get_protmem_config(args);
294 		break;
295 #endif
296 	case OPTEE_SMC_L2CC_MUTEX:
297 		tee_entry_fastcall_l2cc_mutex(args);
298 		break;
299 	case OPTEE_SMC_EXCHANGE_CAPABILITIES:
300 		tee_entry_exchange_capabilities(args);
301 		break;
302 	case OPTEE_SMC_DISABLE_SHM_CACHE:
303 		tee_entry_disable_shm_cache(args);
304 		break;
305 	case OPTEE_SMC_ENABLE_SHM_CACHE:
306 		tee_entry_enable_shm_cache(args);
307 		break;
308 	case OPTEE_SMC_BOOT_SECONDARY:
309 		tee_entry_boot_secondary(args);
310 		break;
311 	case OPTEE_SMC_GET_THREAD_COUNT:
312 		tee_entry_get_thread_count(args);
313 		break;
314 
315 #if defined(CFG_NS_VIRTUALIZATION)
316 	case OPTEE_SMC_VM_CREATED:
317 		tee_entry_vm_created(args);
318 		break;
319 	case OPTEE_SMC_VM_DESTROYED:
320 		tee_entry_vm_destroyed(args);
321 		break;
322 #endif
323 
324 	case OPTEE_SMC_ENABLE_ASYNC_NOTIF:
325 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
326 			uint16_t g_id = 0;
327 
328 			if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
329 				g_id = args->a7;
330 
331 			notif_deliver_atomic_event(NOTIF_EVENT_STARTED, g_id);
332 
333 			args->a0 = OPTEE_SMC_RETURN_OK;
334 		} else {
335 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
336 		}
337 		break;
338 	case OPTEE_SMC_GET_ASYNC_NOTIF_VALUE:
339 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF))
340 			get_async_notif_value(args);
341 		else
342 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
343 		break;
344 
345 	/* Watchdog entry if handler ID is defined in TOS range */
346 	case CFG_WDT_SM_HANDLER_ID:
347 		tee_entry_watchdog(args);
348 		break;
349 
350 	default:
351 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
352 		break;
353 	}
354 }
355 
tee_entry_generic_get_api_call_count(void)356 size_t tee_entry_generic_get_api_call_count(void)
357 {
358 	/*
359 	 * All the different calls handled in this file. If the specific
360 	 * target has additional calls it will call this function and
361 	 * add the number of calls the target has added.
362 	 */
363 	size_t ret = 12;
364 
365 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
366 		ret += 2;
367 
368 	return ret;
369 }
370 
tee_entry_get_api_call_count(struct thread_smc_args * args)371 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
372 {
373 	args->a0 = tee_entry_generic_get_api_call_count();
374 }
375 
tee_entry_get_api_uuid(struct thread_smc_args * args)376 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
377 {
378 	args->a0 = OPTEE_MSG_UID_0;
379 	args->a1 = OPTEE_MSG_UID_1;
380 	args->a2 = OPTEE_MSG_UID_2;
381 	args->a3 = OPTEE_MSG_UID_3;
382 }
383 
tee_entry_get_api_revision(struct thread_smc_args * args)384 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
385 {
386 	args->a0 = OPTEE_MSG_REVISION_MAJOR;
387 	args->a1 = OPTEE_MSG_REVISION_MINOR;
388 }
389 
tee_entry_get_os_uuid(struct thread_smc_args * args)390 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
391 {
392 	args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
393 	args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
394 	args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
395 	args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
396 }
397 
tee_entry_get_os_revision(struct thread_smc_args * args)398 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
399 {
400 	args->a0 = CFG_OPTEE_REVISION_MAJOR;
401 	args->a1 = CFG_OPTEE_REVISION_MINOR;
402 	args->a2 = TEE_IMPL_GIT_SHA1;
403 }
404