xref: /optee_os/core/arch/arm/tee/entry_fast.c (revision ef3bc69c72b8d46493eab724eab6e018423088e1)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <config.h>
8 #include <drivers/wdt.h>
9 #include <kernel/boot.h>
10 #include <kernel/notif.h>
11 #include <kernel/tee_l2cc_mutex.h>
12 #include <kernel/virtualization.h>
13 #include <mm/core_mmu.h>
14 #include <optee_msg.h>
15 #include <sm/optee_smc.h>
16 #include <tee/entry_fast.h>
17 
18 #ifdef CFG_CORE_RESERVED_SHM
19 static void tee_entry_get_shm_config(struct thread_smc_args *args)
20 {
21 	args->a0 = OPTEE_SMC_RETURN_OK;
22 	args->a1 = default_nsec_shm_paddr;
23 	args->a2 = default_nsec_shm_size;
24 	/* Should this be TEESMC cache attributes instead? */
25 	args->a3 = core_mmu_is_shm_cached();
26 }
27 #endif
28 
29 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
30 {
31 #ifdef ARM32
32 	TEE_Result ret = TEE_ERROR_NOT_SUPPORTED;
33 	paddr_t pa = 0;
34 
35 	switch (args->a1) {
36 	case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
37 		ret = tee_get_l2cc_mutex(&pa);
38 		reg_pair_from_64(pa, &args->a2, &args->a3);
39 		break;
40 	case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
41 		pa = reg_pair_to_64(args->a2, args->a3);
42 		ret = tee_set_l2cc_mutex(&pa);
43 		break;
44 	case OPTEE_SMC_L2CC_MUTEX_ENABLE:
45 		ret = tee_enable_l2cc_mutex();
46 		break;
47 	case OPTEE_SMC_L2CC_MUTEX_DISABLE:
48 		ret = tee_disable_l2cc_mutex();
49 		break;
50 	default:
51 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
52 		return;
53 	}
54 
55 	if (ret == TEE_ERROR_NOT_SUPPORTED)
56 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
57 	else if (ret)
58 		args->a0 = OPTEE_SMC_RETURN_EBADADDR;
59 	else
60 		args->a0 = OPTEE_SMC_RETURN_OK;
61 #else
62 	args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
63 #endif
64 }
65 
66 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
67 {
68 	bool res_shm_en = IS_ENABLED(CFG_CORE_RESERVED_SHM);
69 	bool dyn_shm_en __maybe_unused = false;
70 
71 	/*
72 	 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
73 	 *
74 	 * The memory mapping of shared memory is defined as normal
75 	 * shared memory for SMP systems and normal memory for UP
76 	 * systems. Currently we map all memory as shared in secure
77 	 * world.
78 	 *
79 	 * When translation tables are created with shared bit cleared for
80 	 * uniprocessor systems we'll need to check
81 	 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
82 	 */
83 
84 	if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
85 		/* Unknown capability. */
86 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
87 		return;
88 	}
89 
90 	args->a0 = OPTEE_SMC_RETURN_OK;
91 	args->a1 = 0;
92 
93 	if (res_shm_en)
94 		args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
95 	IMSG("Reserved shared memory is %sabled", res_shm_en ? "en" : "dis");
96 
97 #if defined(CFG_CORE_DYN_SHM)
98 	dyn_shm_en = core_mmu_nsec_ddr_is_defined();
99 	if (dyn_shm_en)
100 		args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
101 #endif
102 	IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
103 
104 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
105 		args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION;
106 	IMSG("Normal World virtualization support is %sabled",
107 	     IS_ENABLED(CFG_NS_VIRTUALIZATION) ? "en" : "dis");
108 
109 	args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL;
110 
111 	if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
112 		args->a1 |= OPTEE_SMC_SEC_CAP_ASYNC_NOTIF;
113 		args->a2 = NOTIF_VALUE_MAX;
114 	}
115 	IMSG("Asynchronous notifications are %sabled",
116 	     IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis");
117 
118 	args->a1 |= OPTEE_SMC_SEC_CAP_RPC_ARG;
119 	args->a3 = THREAD_RPC_MAX_NUM_PARAMS;
120 
121 	if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
122 		args->a1 |= OPTEE_SMC_SEC_CAP_RPMB_PROBE;
123 }
124 
125 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
126 {
127 	uint64_t cookie;
128 
129 	if (!thread_disable_prealloc_rpc_cache(&cookie)) {
130 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
131 		return;
132 	}
133 
134 	if (!cookie) {
135 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
136 		return;
137 	}
138 
139 	args->a0 = OPTEE_SMC_RETURN_OK;
140 	args->a1 = cookie >> 32;
141 	args->a2 = cookie;
142 }
143 
144 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
145 {
146 	if (thread_enable_prealloc_rpc_cache())
147 		args->a0 = OPTEE_SMC_RETURN_OK;
148 	else
149 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
150 }
151 
152 static void tee_entry_boot_secondary(struct thread_smc_args *args)
153 {
154 #if defined(CFG_BOOT_SECONDARY_REQUEST)
155 	if (!boot_core_release(args->a1, (paddr_t)(args->a3)))
156 		args->a0 = OPTEE_SMC_RETURN_OK;
157 	else
158 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
159 #else
160 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
161 #endif
162 }
163 
164 static void tee_entry_get_thread_count(struct thread_smc_args *args)
165 {
166 	args->a0 = OPTEE_SMC_RETURN_OK;
167 	args->a1 = CFG_NUM_THREADS;
168 }
169 
170 #if defined(CFG_NS_VIRTUALIZATION)
171 static void tee_entry_vm_created(struct thread_smc_args *args)
172 {
173 	uint16_t guest_id = args->a1;
174 
175 	/* Only hypervisor can issue this request */
176 	if (args->a7 != HYP_CLNT_ID) {
177 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
178 		return;
179 	}
180 
181 	if (virt_guest_created(guest_id))
182 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
183 	else
184 		args->a0 = OPTEE_SMC_RETURN_OK;
185 }
186 
187 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
188 {
189 	uint16_t guest_id = args->a1;
190 
191 	/* Only hypervisor can issue this request */
192 	if (args->a7 != HYP_CLNT_ID) {
193 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
194 		return;
195 	}
196 
197 	if (virt_guest_destroyed(guest_id))
198 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
199 	else
200 		args->a0 = OPTEE_SMC_RETURN_OK;
201 }
202 #endif
203 
204 /* Note: this function is weak to let platforms add special handling */
205 void __weak tee_entry_fast(struct thread_smc_args *args)
206 {
207 	__tee_entry_fast(args);
208 }
209 
210 static void get_async_notif_value(struct thread_smc_args *args)
211 {
212 	bool value_valid = false;
213 	bool value_pending = false;
214 
215 	args->a0 = OPTEE_SMC_RETURN_OK;
216 	args->a1 = notif_get_value(&value_valid, &value_pending);
217 	args->a2 = 0;
218 	if (value_valid)
219 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_VALID;
220 	if (value_pending)
221 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_PENDING;
222 }
223 
224 static void tee_entry_watchdog(struct thread_smc_args *args)
225 {
226 #if defined(CFG_WDT_SM_HANDLER)
227 	__wdt_sm_handler(args);
228 #else
229 	args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
230 #endif
231 }
232 
233 /*
234  * If tee_entry_fast() is overridden, it's still supposed to call this
235  * function.
236  */
237 void __tee_entry_fast(struct thread_smc_args *args)
238 {
239 	switch (args->a0) {
240 
241 	/* Generic functions */
242 	case OPTEE_SMC_CALLS_COUNT:
243 		tee_entry_get_api_call_count(args);
244 		break;
245 	case OPTEE_SMC_CALLS_UID:
246 		tee_entry_get_api_uuid(args);
247 		break;
248 	case OPTEE_SMC_CALLS_REVISION:
249 		tee_entry_get_api_revision(args);
250 		break;
251 	case OPTEE_SMC_CALL_GET_OS_UUID:
252 		tee_entry_get_os_uuid(args);
253 		break;
254 	case OPTEE_SMC_CALL_GET_OS_REVISION:
255 		tee_entry_get_os_revision(args);
256 		break;
257 
258 	/* OP-TEE specific SMC functions */
259 #ifdef CFG_CORE_RESERVED_SHM
260 	case OPTEE_SMC_GET_SHM_CONFIG:
261 		tee_entry_get_shm_config(args);
262 		break;
263 #endif
264 	case OPTEE_SMC_L2CC_MUTEX:
265 		tee_entry_fastcall_l2cc_mutex(args);
266 		break;
267 	case OPTEE_SMC_EXCHANGE_CAPABILITIES:
268 		tee_entry_exchange_capabilities(args);
269 		break;
270 	case OPTEE_SMC_DISABLE_SHM_CACHE:
271 		tee_entry_disable_shm_cache(args);
272 		break;
273 	case OPTEE_SMC_ENABLE_SHM_CACHE:
274 		tee_entry_enable_shm_cache(args);
275 		break;
276 	case OPTEE_SMC_BOOT_SECONDARY:
277 		tee_entry_boot_secondary(args);
278 		break;
279 	case OPTEE_SMC_GET_THREAD_COUNT:
280 		tee_entry_get_thread_count(args);
281 		break;
282 
283 #if defined(CFG_NS_VIRTUALIZATION)
284 	case OPTEE_SMC_VM_CREATED:
285 		tee_entry_vm_created(args);
286 		break;
287 	case OPTEE_SMC_VM_DESTROYED:
288 		tee_entry_vm_destroyed(args);
289 		break;
290 #endif
291 
292 	case OPTEE_SMC_ENABLE_ASYNC_NOTIF:
293 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
294 			notif_deliver_atomic_event(NOTIF_EVENT_STARTED, 0);
295 			args->a0 = OPTEE_SMC_RETURN_OK;
296 		} else {
297 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
298 		}
299 		break;
300 	case OPTEE_SMC_GET_ASYNC_NOTIF_VALUE:
301 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF))
302 			get_async_notif_value(args);
303 		else
304 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
305 		break;
306 
307 	/* Watchdog entry if handler ID is defined in TOS range */
308 	case CFG_WDT_SM_HANDLER_ID:
309 		tee_entry_watchdog(args);
310 		break;
311 
312 	default:
313 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
314 		break;
315 	}
316 }
317 
318 size_t tee_entry_generic_get_api_call_count(void)
319 {
320 	/*
321 	 * All the different calls handled in this file. If the specific
322 	 * target has additional calls it will call this function and
323 	 * add the number of calls the target has added.
324 	 */
325 	size_t ret = 12;
326 
327 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
328 		ret += 2;
329 
330 	return ret;
331 }
332 
333 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
334 {
335 	args->a0 = tee_entry_generic_get_api_call_count();
336 }
337 
338 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
339 {
340 	args->a0 = OPTEE_MSG_UID_0;
341 	args->a1 = OPTEE_MSG_UID_1;
342 	args->a2 = OPTEE_MSG_UID_2;
343 	args->a3 = OPTEE_MSG_UID_3;
344 }
345 
346 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
347 {
348 	args->a0 = OPTEE_MSG_REVISION_MAJOR;
349 	args->a1 = OPTEE_MSG_REVISION_MINOR;
350 }
351 
352 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
353 {
354 	args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
355 	args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
356 	args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
357 	args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
358 }
359 
360 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
361 {
362 	args->a0 = CFG_OPTEE_REVISION_MAJOR;
363 	args->a1 = CFG_OPTEE_REVISION_MINOR;
364 	args->a2 = TEE_IMPL_GIT_SHA1;
365 }
366