xref: /optee_os/core/arch/arm/tee/entry_fast.c (revision 6cfa381e534b362afbd103f526b132048e54ba47)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <config.h>
8 #include <drivers/wdt.h>
9 #include <kernel/boot.h>
10 #include <kernel/misc.h>
11 #include <kernel/notif.h>
12 #include <kernel/tee_l2cc_mutex.h>
13 #include <kernel/virtualization.h>
14 #include <mm/core_mmu.h>
15 #include <optee_msg.h>
16 #include <sm/optee_smc.h>
17 #include <sm/watchdog_smc.h>
18 #include <tee/entry_fast.h>
19 
20 #ifdef CFG_CORE_RESERVED_SHM
21 static void tee_entry_get_shm_config(struct thread_smc_args *args)
22 {
23 	args->a0 = OPTEE_SMC_RETURN_OK;
24 	args->a1 = default_nsec_shm_paddr;
25 	args->a2 = default_nsec_shm_size;
26 	/* Should this be TEESMC cache attributes instead? */
27 	args->a3 = core_mmu_is_shm_cached();
28 }
29 #endif
30 
31 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
32 {
33 	TEE_Result ret;
34 #ifdef ARM32
35 	paddr_t pa = 0;
36 
37 	switch (args->a1) {
38 	case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
39 		ret = tee_get_l2cc_mutex(&pa);
40 		reg_pair_from_64(pa, &args->a2, &args->a3);
41 		break;
42 	case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
43 		pa = reg_pair_to_64(args->a2, args->a3);
44 		ret = tee_set_l2cc_mutex(&pa);
45 		break;
46 	case OPTEE_SMC_L2CC_MUTEX_ENABLE:
47 		ret = tee_enable_l2cc_mutex();
48 		break;
49 	case OPTEE_SMC_L2CC_MUTEX_DISABLE:
50 		ret = tee_disable_l2cc_mutex();
51 		break;
52 	default:
53 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
54 		return;
55 	}
56 #else
57 	ret = TEE_ERROR_NOT_SUPPORTED;
58 #endif
59 	if (ret == TEE_ERROR_NOT_SUPPORTED)
60 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
61 	else if (ret)
62 		args->a0 = OPTEE_SMC_RETURN_EBADADDR;
63 	else
64 		args->a0 = OPTEE_SMC_RETURN_OK;
65 }
66 
67 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
68 {
69 	bool res_shm_en = IS_ENABLED(CFG_CORE_RESERVED_SHM);
70 	bool dyn_shm_en __maybe_unused = false;
71 
72 	/*
73 	 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
74 	 *
75 	 * The memory mapping of shared memory is defined as normal
76 	 * shared memory for SMP systems and normal memory for UP
77 	 * systems. Currently we map all memory as shared in secure
78 	 * world.
79 	 *
80 	 * When translation tables are created with shared bit cleared for
81 	 * uniprocessor systems we'll need to check
82 	 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
83 	 */
84 
85 	if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
86 		/* Unknown capability. */
87 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
88 		return;
89 	}
90 
91 	args->a0 = OPTEE_SMC_RETURN_OK;
92 	args->a1 = 0;
93 
94 	if (res_shm_en)
95 		args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
96 	IMSG("Reserved shared memory is %sabled", res_shm_en ? "en" : "dis");
97 
98 #if defined(CFG_CORE_DYN_SHM)
99 	dyn_shm_en = core_mmu_nsec_ddr_is_defined();
100 	if (dyn_shm_en)
101 		args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
102 #endif
103 	IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
104 
105 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
106 		args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION;
107 	IMSG("Normal World virtualization support is %sabled",
108 	     IS_ENABLED(CFG_NS_VIRTUALIZATION) ? "en" : "dis");
109 
110 	args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL;
111 
112 	if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
113 		args->a1 |= OPTEE_SMC_SEC_CAP_ASYNC_NOTIF;
114 		args->a2 = NOTIF_VALUE_MAX;
115 	}
116 	IMSG("Asynchronous notifications are %sabled",
117 	     IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis");
118 
119 	args->a1 |= OPTEE_SMC_SEC_CAP_RPC_ARG;
120 	args->a3 = THREAD_RPC_MAX_NUM_PARAMS;
121 }
122 
123 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
124 {
125 	uint64_t cookie;
126 
127 	if (!thread_disable_prealloc_rpc_cache(&cookie)) {
128 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
129 		return;
130 	}
131 
132 	if (!cookie) {
133 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
134 		return;
135 	}
136 
137 	args->a0 = OPTEE_SMC_RETURN_OK;
138 	args->a1 = cookie >> 32;
139 	args->a2 = cookie;
140 }
141 
142 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
143 {
144 	if (thread_enable_prealloc_rpc_cache())
145 		args->a0 = OPTEE_SMC_RETURN_OK;
146 	else
147 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
148 }
149 
150 static void tee_entry_boot_secondary(struct thread_smc_args *args)
151 {
152 #if defined(CFG_BOOT_SECONDARY_REQUEST)
153 	if (!boot_core_release(args->a1, (paddr_t)(args->a3)))
154 		args->a0 = OPTEE_SMC_RETURN_OK;
155 	else
156 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
157 #else
158 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
159 #endif
160 }
161 
162 static void tee_entry_get_thread_count(struct thread_smc_args *args)
163 {
164 	args->a0 = OPTEE_SMC_RETURN_OK;
165 	args->a1 = CFG_NUM_THREADS;
166 }
167 
168 #if defined(CFG_NS_VIRTUALIZATION)
169 static void tee_entry_vm_created(struct thread_smc_args *args)
170 {
171 	uint16_t guest_id = args->a1;
172 
173 	/* Only hypervisor can issue this request */
174 	if (args->a7 != HYP_CLNT_ID) {
175 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
176 		return;
177 	}
178 
179 	if (virt_guest_created(guest_id))
180 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
181 	else
182 		args->a0 = OPTEE_SMC_RETURN_OK;
183 }
184 
185 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
186 {
187 	uint16_t guest_id = args->a1;
188 
189 	/* Only hypervisor can issue this request */
190 	if (args->a7 != HYP_CLNT_ID) {
191 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
192 		return;
193 	}
194 
195 	if (virt_guest_destroyed(guest_id))
196 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
197 	else
198 		args->a0 = OPTEE_SMC_RETURN_OK;
199 }
200 #endif
201 
202 /* Note: this function is weak to let platforms add special handling */
203 void __weak tee_entry_fast(struct thread_smc_args *args)
204 {
205 	__tee_entry_fast(args);
206 }
207 
208 static void get_async_notif_value(struct thread_smc_args *args)
209 {
210 	bool value_valid = false;
211 	bool value_pending = false;
212 
213 	args->a0 = OPTEE_SMC_RETURN_OK;
214 	args->a1 = notif_get_value(&value_valid, &value_pending);
215 	args->a2 = 0;
216 	if (value_valid)
217 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_VALID;
218 	if (value_pending)
219 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_PENDING;
220 }
221 
222 static void tee_entry_watchdog(struct thread_smc_args *args)
223 {
224 #if defined(CFG_WDT_SM_HANDLER)
225 	__wdt_sm_handler(args);
226 #else
227 	args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
228 #endif
229 }
230 
231 /*
232  * If tee_entry_fast() is overridden, it's still supposed to call this
233  * function.
234  */
235 void __tee_entry_fast(struct thread_smc_args *args)
236 {
237 	switch (args->a0) {
238 
239 	/* Generic functions */
240 	case OPTEE_SMC_CALLS_COUNT:
241 		tee_entry_get_api_call_count(args);
242 		break;
243 	case OPTEE_SMC_CALLS_UID:
244 		tee_entry_get_api_uuid(args);
245 		break;
246 	case OPTEE_SMC_CALLS_REVISION:
247 		tee_entry_get_api_revision(args);
248 		break;
249 	case OPTEE_SMC_CALL_GET_OS_UUID:
250 		tee_entry_get_os_uuid(args);
251 		break;
252 	case OPTEE_SMC_CALL_GET_OS_REVISION:
253 		tee_entry_get_os_revision(args);
254 		break;
255 
256 	/* OP-TEE specific SMC functions */
257 #ifdef CFG_CORE_RESERVED_SHM
258 	case OPTEE_SMC_GET_SHM_CONFIG:
259 		tee_entry_get_shm_config(args);
260 		break;
261 #endif
262 	case OPTEE_SMC_L2CC_MUTEX:
263 		tee_entry_fastcall_l2cc_mutex(args);
264 		break;
265 	case OPTEE_SMC_EXCHANGE_CAPABILITIES:
266 		tee_entry_exchange_capabilities(args);
267 		break;
268 	case OPTEE_SMC_DISABLE_SHM_CACHE:
269 		tee_entry_disable_shm_cache(args);
270 		break;
271 	case OPTEE_SMC_ENABLE_SHM_CACHE:
272 		tee_entry_enable_shm_cache(args);
273 		break;
274 	case OPTEE_SMC_BOOT_SECONDARY:
275 		tee_entry_boot_secondary(args);
276 		break;
277 	case OPTEE_SMC_GET_THREAD_COUNT:
278 		tee_entry_get_thread_count(args);
279 		break;
280 
281 #if defined(CFG_NS_VIRTUALIZATION)
282 	case OPTEE_SMC_VM_CREATED:
283 		tee_entry_vm_created(args);
284 		break;
285 	case OPTEE_SMC_VM_DESTROYED:
286 		tee_entry_vm_destroyed(args);
287 		break;
288 #endif
289 
290 	case OPTEE_SMC_ENABLE_ASYNC_NOTIF:
291 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
292 			notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
293 			args->a0 = OPTEE_SMC_RETURN_OK;
294 		} else {
295 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
296 		}
297 		break;
298 	case OPTEE_SMC_GET_ASYNC_NOTIF_VALUE:
299 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF))
300 			get_async_notif_value(args);
301 		else
302 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
303 		break;
304 
305 	/* Watchdog entry if handler ID is defined in TOS range */
306 	case CFG_WDT_SM_HANDLER_ID:
307 		tee_entry_watchdog(args);
308 		break;
309 
310 	default:
311 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
312 		break;
313 	}
314 }
315 
316 size_t tee_entry_generic_get_api_call_count(void)
317 {
318 	/*
319 	 * All the different calls handled in this file. If the specific
320 	 * target has additional calls it will call this function and
321 	 * add the number of calls the target has added.
322 	 */
323 	size_t ret = 12;
324 
325 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
326 		ret += 2;
327 
328 	return ret;
329 }
330 
331 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
332 {
333 	args->a0 = tee_entry_generic_get_api_call_count();
334 }
335 
336 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
337 {
338 	args->a0 = OPTEE_MSG_UID_0;
339 	args->a1 = OPTEE_MSG_UID_1;
340 	args->a2 = OPTEE_MSG_UID_2;
341 	args->a3 = OPTEE_MSG_UID_3;
342 }
343 
344 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
345 {
346 	args->a0 = OPTEE_MSG_REVISION_MAJOR;
347 	args->a1 = OPTEE_MSG_REVISION_MINOR;
348 }
349 
350 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
351 {
352 	args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
353 	args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
354 	args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
355 	args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
356 }
357 
358 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
359 {
360 	args->a0 = CFG_OPTEE_REVISION_MAJOR;
361 	args->a1 = CFG_OPTEE_REVISION_MINOR;
362 	args->a2 = TEE_IMPL_GIT_SHA1;
363 }
364