xref: /optee_os/core/arch/arm/tee/entry_fast.c (revision 89f492f5cb3144e32d48dabc9284a8b2a7430cf6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <config.h>
8 #include <drivers/wdt.h>
9 #include <kernel/boot.h>
10 #include <kernel/notif.h>
11 #include <kernel/tee_l2cc_mutex.h>
12 #include <kernel/virtualization.h>
13 #include <mm/core_mmu.h>
14 #include <optee_msg.h>
15 #include <sm/optee_smc.h>
16 #include <tee/entry_fast.h>
17 
18 #ifdef CFG_CORE_RESERVED_SHM
19 static void tee_entry_get_shm_config(struct thread_smc_args *args)
20 {
21 	args->a0 = OPTEE_SMC_RETURN_OK;
22 	args->a1 = default_nsec_shm_paddr;
23 	args->a2 = default_nsec_shm_size;
24 	/* Should this be TEESMC cache attributes instead? */
25 	args->a3 = core_mmu_is_shm_cached();
26 }
27 #endif
28 
29 #ifdef CFG_SECURE_DATA_PATH
30 static void tee_entry_get_protmem_config(struct thread_smc_args *args)
31 {
32 #if defined(CFG_TEE_SDP_MEM_BASE)
33 	args->a0 = OPTEE_SMC_RETURN_OK;
34 	args->a1 = CFG_TEE_SDP_MEM_BASE;
35 	args->a2 = CFG_TEE_SDP_MEM_SIZE;
36 #elif defined(TEE_SDP_TEST_MEM_BASE)
37 	args->a0 = OPTEE_SMC_RETURN_OK;
38 	args->a1 = TEE_SDP_TEST_MEM_BASE;
39 	args->a2 = TEE_SDP_TEST_MEM_SIZE;
40 #else
41 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
42 	args->a1 = 0;
43 	args->a2 = 0;
44 #endif
45 	args->a3 = sizeof(long) * 8;
46 }
47 #endif
48 
49 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
50 {
51 #ifdef ARM32
52 	TEE_Result ret = TEE_ERROR_NOT_SUPPORTED;
53 	paddr_t pa = 0;
54 
55 	switch (args->a1) {
56 	case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
57 		ret = tee_get_l2cc_mutex(&pa);
58 		reg_pair_from_64(pa, &args->a2, &args->a3);
59 		break;
60 	case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
61 		pa = reg_pair_to_64(args->a2, args->a3);
62 		ret = tee_set_l2cc_mutex(&pa);
63 		break;
64 	case OPTEE_SMC_L2CC_MUTEX_ENABLE:
65 		ret = tee_enable_l2cc_mutex();
66 		break;
67 	case OPTEE_SMC_L2CC_MUTEX_DISABLE:
68 		ret = tee_disable_l2cc_mutex();
69 		break;
70 	default:
71 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
72 		return;
73 	}
74 
75 	if (ret == TEE_ERROR_NOT_SUPPORTED)
76 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
77 	else if (ret)
78 		args->a0 = OPTEE_SMC_RETURN_EBADADDR;
79 	else
80 		args->a0 = OPTEE_SMC_RETURN_OK;
81 #else
82 	args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
83 #endif
84 }
85 
86 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
87 {
88 	bool res_shm_en = IS_ENABLED(CFG_CORE_RESERVED_SHM);
89 	bool dyn_shm_en __maybe_unused = false;
90 
91 	/*
92 	 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
93 	 *
94 	 * The memory mapping of shared memory is defined as normal
95 	 * shared memory for SMP systems and normal memory for UP
96 	 * systems. Currently we map all memory as shared in secure
97 	 * world.
98 	 *
99 	 * When translation tables are created with shared bit cleared for
100 	 * uniprocessor systems we'll need to check
101 	 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
102 	 */
103 
104 	if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
105 		/* Unknown capability. */
106 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
107 		return;
108 	}
109 
110 	args->a0 = OPTEE_SMC_RETURN_OK;
111 	args->a1 = 0;
112 
113 	if (res_shm_en)
114 		args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
115 	IMSG("Reserved shared memory is %sabled", res_shm_en ? "en" : "dis");
116 
117 #if defined(CFG_CORE_DYN_SHM)
118 	dyn_shm_en = core_mmu_nsec_ddr_is_defined();
119 	if (dyn_shm_en)
120 		args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
121 #endif
122 	IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
123 
124 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
125 		args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION;
126 	IMSG("Normal World virtualization support is %sabled",
127 	     IS_ENABLED(CFG_NS_VIRTUALIZATION) ? "en" : "dis");
128 
129 	args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL;
130 
131 	if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
132 		args->a1 |= OPTEE_SMC_SEC_CAP_ASYNC_NOTIF;
133 		args->a2 = NOTIF_VALUE_MAX;
134 	}
135 	IMSG("Asynchronous notifications are %sabled",
136 	     IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis");
137 
138 	if (IS_ENABLED(CFG_SECURE_DATA_PATH))
139 		args->a1 |= OPTEE_SMC_SEC_CAP_PROTMEM;
140 
141 	args->a1 |= OPTEE_SMC_SEC_CAP_RPC_ARG;
142 	args->a3 = THREAD_RPC_MAX_NUM_PARAMS;
143 
144 	if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
145 		args->a1 |= OPTEE_SMC_SEC_CAP_RPMB_PROBE;
146 }
147 
148 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
149 {
150 	uint64_t cookie;
151 
152 	if (!thread_disable_prealloc_rpc_cache(&cookie)) {
153 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
154 		return;
155 	}
156 
157 	if (!cookie) {
158 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
159 		return;
160 	}
161 
162 	args->a0 = OPTEE_SMC_RETURN_OK;
163 	args->a1 = cookie >> 32;
164 	args->a2 = cookie;
165 }
166 
167 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
168 {
169 	if (thread_enable_prealloc_rpc_cache())
170 		args->a0 = OPTEE_SMC_RETURN_OK;
171 	else
172 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
173 }
174 
175 static void tee_entry_boot_secondary(struct thread_smc_args *args)
176 {
177 #if defined(CFG_BOOT_SECONDARY_REQUEST)
178 	if (!boot_core_release(args->a1, (paddr_t)(args->a3)))
179 		args->a0 = OPTEE_SMC_RETURN_OK;
180 	else
181 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
182 #else
183 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
184 #endif
185 }
186 
187 static void tee_entry_get_thread_count(struct thread_smc_args *args)
188 {
189 	args->a0 = OPTEE_SMC_RETURN_OK;
190 	args->a1 = CFG_NUM_THREADS;
191 }
192 
193 #if defined(CFG_NS_VIRTUALIZATION)
194 static void tee_entry_vm_created(struct thread_smc_args *args)
195 {
196 	uint16_t guest_id = args->a1;
197 
198 	/* Only hypervisor can issue this request */
199 	if (args->a7 != HYP_CLNT_ID) {
200 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
201 		return;
202 	}
203 
204 	if (virt_guest_created(guest_id))
205 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
206 	else
207 		args->a0 = OPTEE_SMC_RETURN_OK;
208 }
209 
210 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
211 {
212 	uint16_t guest_id = args->a1;
213 
214 	/* Only hypervisor can issue this request */
215 	if (args->a7 != HYP_CLNT_ID) {
216 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
217 		return;
218 	}
219 
220 	if (virt_guest_destroyed(guest_id))
221 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
222 	else
223 		args->a0 = OPTEE_SMC_RETURN_OK;
224 }
225 #endif
226 
227 /* Note: this function is weak to let platforms add special handling */
228 void __weak tee_entry_fast(struct thread_smc_args *args)
229 {
230 	__tee_entry_fast(args);
231 }
232 
233 static void get_async_notif_value(struct thread_smc_args *args)
234 {
235 	bool value_valid = false;
236 	bool value_pending = false;
237 
238 	args->a0 = OPTEE_SMC_RETURN_OK;
239 	args->a1 = notif_get_value(&value_valid, &value_pending);
240 	args->a2 = 0;
241 	if (value_valid)
242 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_VALID;
243 	if (value_pending)
244 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_PENDING;
245 }
246 
247 static void tee_entry_watchdog(struct thread_smc_args *args)
248 {
249 #if defined(CFG_WDT_SM_HANDLER)
250 	__wdt_sm_handler(args);
251 #else
252 	args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
253 #endif
254 }
255 
256 /*
257  * If tee_entry_fast() is overridden, it's still supposed to call this
258  * function.
259  */
260 void __tee_entry_fast(struct thread_smc_args *args)
261 {
262 	switch (args->a0) {
263 
264 	/* Generic functions */
265 	case OPTEE_SMC_CALLS_COUNT:
266 		tee_entry_get_api_call_count(args);
267 		break;
268 	case OPTEE_SMC_CALLS_UID:
269 		tee_entry_get_api_uuid(args);
270 		break;
271 	case OPTEE_SMC_CALLS_REVISION:
272 		tee_entry_get_api_revision(args);
273 		break;
274 	case OPTEE_SMC_CALL_GET_OS_UUID:
275 		tee_entry_get_os_uuid(args);
276 		break;
277 	case OPTEE_SMC_CALL_GET_OS_REVISION:
278 		tee_entry_get_os_revision(args);
279 		break;
280 
281 	/* OP-TEE specific SMC functions */
282 #ifdef CFG_CORE_RESERVED_SHM
283 	case OPTEE_SMC_GET_SHM_CONFIG:
284 		tee_entry_get_shm_config(args);
285 		break;
286 #endif
287 #ifdef CFG_SECURE_DATA_PATH
288 	case OPTEE_SMC_GET_PROTMEM_CONFIG:
289 		tee_entry_get_protmem_config(args);
290 		break;
291 #endif
292 	case OPTEE_SMC_L2CC_MUTEX:
293 		tee_entry_fastcall_l2cc_mutex(args);
294 		break;
295 	case OPTEE_SMC_EXCHANGE_CAPABILITIES:
296 		tee_entry_exchange_capabilities(args);
297 		break;
298 	case OPTEE_SMC_DISABLE_SHM_CACHE:
299 		tee_entry_disable_shm_cache(args);
300 		break;
301 	case OPTEE_SMC_ENABLE_SHM_CACHE:
302 		tee_entry_enable_shm_cache(args);
303 		break;
304 	case OPTEE_SMC_BOOT_SECONDARY:
305 		tee_entry_boot_secondary(args);
306 		break;
307 	case OPTEE_SMC_GET_THREAD_COUNT:
308 		tee_entry_get_thread_count(args);
309 		break;
310 
311 #if defined(CFG_NS_VIRTUALIZATION)
312 	case OPTEE_SMC_VM_CREATED:
313 		tee_entry_vm_created(args);
314 		break;
315 	case OPTEE_SMC_VM_DESTROYED:
316 		tee_entry_vm_destroyed(args);
317 		break;
318 #endif
319 
320 	case OPTEE_SMC_ENABLE_ASYNC_NOTIF:
321 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
322 			uint16_t g_id = 0;
323 
324 			if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
325 				g_id = args->a7;
326 
327 			notif_deliver_atomic_event(NOTIF_EVENT_STARTED, g_id);
328 
329 			args->a0 = OPTEE_SMC_RETURN_OK;
330 		} else {
331 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
332 		}
333 		break;
334 	case OPTEE_SMC_GET_ASYNC_NOTIF_VALUE:
335 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF))
336 			get_async_notif_value(args);
337 		else
338 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
339 		break;
340 
341 	/* Watchdog entry if handler ID is defined in TOS range */
342 	case CFG_WDT_SM_HANDLER_ID:
343 		tee_entry_watchdog(args);
344 		break;
345 
346 	default:
347 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
348 		break;
349 	}
350 }
351 
352 size_t tee_entry_generic_get_api_call_count(void)
353 {
354 	/*
355 	 * All the different calls handled in this file. If the specific
356 	 * target has additional calls it will call this function and
357 	 * add the number of calls the target has added.
358 	 */
359 	size_t ret = 12;
360 
361 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
362 		ret += 2;
363 
364 	return ret;
365 }
366 
367 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
368 {
369 	args->a0 = tee_entry_generic_get_api_call_count();
370 }
371 
372 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
373 {
374 	args->a0 = OPTEE_MSG_UID_0;
375 	args->a1 = OPTEE_MSG_UID_1;
376 	args->a2 = OPTEE_MSG_UID_2;
377 	args->a3 = OPTEE_MSG_UID_3;
378 }
379 
380 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
381 {
382 	args->a0 = OPTEE_MSG_REVISION_MAJOR;
383 	args->a1 = OPTEE_MSG_REVISION_MINOR;
384 }
385 
386 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
387 {
388 	args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
389 	args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
390 	args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
391 	args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
392 }
393 
394 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
395 {
396 	args->a0 = CFG_OPTEE_REVISION_MAJOR;
397 	args->a1 = CFG_OPTEE_REVISION_MINOR;
398 	args->a2 = TEE_IMPL_GIT_SHA1;
399 }
400