xref: /optee_os/core/arch/arm/tee/entry_fast.c (revision 8dfdf3927214073d034281d479ce65ace7f3a08b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <config.h>
8 #include <drivers/wdt.h>
9 #include <kernel/boot.h>
10 #include <kernel/notif.h>
11 #include <kernel/tee_l2cc_mutex.h>
12 #include <kernel/virtualization.h>
13 #include <mm/core_mmu.h>
14 #include <optee_msg.h>
15 #include <sm/optee_smc.h>
16 #include <tee/entry_fast.h>
17 
18 #ifdef CFG_CORE_RESERVED_SHM
19 static void tee_entry_get_shm_config(struct thread_smc_args *args)
20 {
21 	args->a0 = OPTEE_SMC_RETURN_OK;
22 	args->a1 = default_nsec_shm_paddr;
23 	args->a2 = default_nsec_shm_size;
24 	/* Should this be TEESMC cache attributes instead? */
25 	args->a3 = core_mmu_is_shm_cached();
26 }
27 #endif
28 
29 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
30 {
31 #ifdef ARM32
32 	TEE_Result ret = TEE_ERROR_NOT_SUPPORTED;
33 	paddr_t pa = 0;
34 
35 	switch (args->a1) {
36 	case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
37 		ret = tee_get_l2cc_mutex(&pa);
38 		reg_pair_from_64(pa, &args->a2, &args->a3);
39 		break;
40 	case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
41 		pa = reg_pair_to_64(args->a2, args->a3);
42 		ret = tee_set_l2cc_mutex(&pa);
43 		break;
44 	case OPTEE_SMC_L2CC_MUTEX_ENABLE:
45 		ret = tee_enable_l2cc_mutex();
46 		break;
47 	case OPTEE_SMC_L2CC_MUTEX_DISABLE:
48 		ret = tee_disable_l2cc_mutex();
49 		break;
50 	default:
51 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
52 		return;
53 	}
54 
55 	if (ret == TEE_ERROR_NOT_SUPPORTED)
56 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
57 	else if (ret)
58 		args->a0 = OPTEE_SMC_RETURN_EBADADDR;
59 	else
60 		args->a0 = OPTEE_SMC_RETURN_OK;
61 #else
62 	args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
63 #endif
64 }
65 
66 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
67 {
68 	bool res_shm_en = IS_ENABLED(CFG_CORE_RESERVED_SHM);
69 	bool dyn_shm_en __maybe_unused = false;
70 
71 	/*
72 	 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
73 	 *
74 	 * The memory mapping of shared memory is defined as normal
75 	 * shared memory for SMP systems and normal memory for UP
76 	 * systems. Currently we map all memory as shared in secure
77 	 * world.
78 	 *
79 	 * When translation tables are created with shared bit cleared for
80 	 * uniprocessor systems we'll need to check
81 	 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
82 	 */
83 
84 	if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
85 		/* Unknown capability. */
86 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
87 		return;
88 	}
89 
90 	args->a0 = OPTEE_SMC_RETURN_OK;
91 	args->a1 = 0;
92 
93 	if (res_shm_en)
94 		args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
95 	IMSG("Reserved shared memory is %sabled", res_shm_en ? "en" : "dis");
96 
97 #if defined(CFG_CORE_DYN_SHM)
98 	dyn_shm_en = core_mmu_nsec_ddr_is_defined();
99 	if (dyn_shm_en)
100 		args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
101 #endif
102 	IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
103 
104 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
105 		args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION;
106 	IMSG("Normal World virtualization support is %sabled",
107 	     IS_ENABLED(CFG_NS_VIRTUALIZATION) ? "en" : "dis");
108 
109 	args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL;
110 
111 	if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
112 		args->a1 |= OPTEE_SMC_SEC_CAP_ASYNC_NOTIF;
113 		args->a2 = NOTIF_VALUE_MAX;
114 	}
115 	IMSG("Asynchronous notifications are %sabled",
116 	     IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis");
117 
118 	args->a1 |= OPTEE_SMC_SEC_CAP_RPC_ARG;
119 	args->a3 = THREAD_RPC_MAX_NUM_PARAMS;
120 
121 	args->a1 |= OPTEE_SMC_SEC_CAP_RPMB_PROBE;
122 }
123 
124 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
125 {
126 	uint64_t cookie;
127 
128 	if (!thread_disable_prealloc_rpc_cache(&cookie)) {
129 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
130 		return;
131 	}
132 
133 	if (!cookie) {
134 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
135 		return;
136 	}
137 
138 	args->a0 = OPTEE_SMC_RETURN_OK;
139 	args->a1 = cookie >> 32;
140 	args->a2 = cookie;
141 }
142 
143 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
144 {
145 	if (thread_enable_prealloc_rpc_cache())
146 		args->a0 = OPTEE_SMC_RETURN_OK;
147 	else
148 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
149 }
150 
151 static void tee_entry_boot_secondary(struct thread_smc_args *args)
152 {
153 #if defined(CFG_BOOT_SECONDARY_REQUEST)
154 	if (!boot_core_release(args->a1, (paddr_t)(args->a3)))
155 		args->a0 = OPTEE_SMC_RETURN_OK;
156 	else
157 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
158 #else
159 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
160 #endif
161 }
162 
163 static void tee_entry_get_thread_count(struct thread_smc_args *args)
164 {
165 	args->a0 = OPTEE_SMC_RETURN_OK;
166 	args->a1 = CFG_NUM_THREADS;
167 }
168 
169 #if defined(CFG_NS_VIRTUALIZATION)
170 static void tee_entry_vm_created(struct thread_smc_args *args)
171 {
172 	uint16_t guest_id = args->a1;
173 
174 	/* Only hypervisor can issue this request */
175 	if (args->a7 != HYP_CLNT_ID) {
176 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
177 		return;
178 	}
179 
180 	if (virt_guest_created(guest_id))
181 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
182 	else
183 		args->a0 = OPTEE_SMC_RETURN_OK;
184 }
185 
186 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
187 {
188 	uint16_t guest_id = args->a1;
189 
190 	/* Only hypervisor can issue this request */
191 	if (args->a7 != HYP_CLNT_ID) {
192 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
193 		return;
194 	}
195 
196 	if (virt_guest_destroyed(guest_id))
197 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
198 	else
199 		args->a0 = OPTEE_SMC_RETURN_OK;
200 }
201 #endif
202 
203 /* Note: this function is weak to let platforms add special handling */
204 void __weak tee_entry_fast(struct thread_smc_args *args)
205 {
206 	__tee_entry_fast(args);
207 }
208 
209 static void get_async_notif_value(struct thread_smc_args *args)
210 {
211 	bool value_valid = false;
212 	bool value_pending = false;
213 
214 	args->a0 = OPTEE_SMC_RETURN_OK;
215 	args->a1 = notif_get_value(&value_valid, &value_pending);
216 	args->a2 = 0;
217 	if (value_valid)
218 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_VALID;
219 	if (value_pending)
220 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_PENDING;
221 }
222 
223 static void tee_entry_watchdog(struct thread_smc_args *args)
224 {
225 #if defined(CFG_WDT_SM_HANDLER)
226 	__wdt_sm_handler(args);
227 #else
228 	args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
229 #endif
230 }
231 
232 /*
233  * If tee_entry_fast() is overridden, it's still supposed to call this
234  * function.
235  */
236 void __tee_entry_fast(struct thread_smc_args *args)
237 {
238 	switch (args->a0) {
239 
240 	/* Generic functions */
241 	case OPTEE_SMC_CALLS_COUNT:
242 		tee_entry_get_api_call_count(args);
243 		break;
244 	case OPTEE_SMC_CALLS_UID:
245 		tee_entry_get_api_uuid(args);
246 		break;
247 	case OPTEE_SMC_CALLS_REVISION:
248 		tee_entry_get_api_revision(args);
249 		break;
250 	case OPTEE_SMC_CALL_GET_OS_UUID:
251 		tee_entry_get_os_uuid(args);
252 		break;
253 	case OPTEE_SMC_CALL_GET_OS_REVISION:
254 		tee_entry_get_os_revision(args);
255 		break;
256 
257 	/* OP-TEE specific SMC functions */
258 #ifdef CFG_CORE_RESERVED_SHM
259 	case OPTEE_SMC_GET_SHM_CONFIG:
260 		tee_entry_get_shm_config(args);
261 		break;
262 #endif
263 	case OPTEE_SMC_L2CC_MUTEX:
264 		tee_entry_fastcall_l2cc_mutex(args);
265 		break;
266 	case OPTEE_SMC_EXCHANGE_CAPABILITIES:
267 		tee_entry_exchange_capabilities(args);
268 		break;
269 	case OPTEE_SMC_DISABLE_SHM_CACHE:
270 		tee_entry_disable_shm_cache(args);
271 		break;
272 	case OPTEE_SMC_ENABLE_SHM_CACHE:
273 		tee_entry_enable_shm_cache(args);
274 		break;
275 	case OPTEE_SMC_BOOT_SECONDARY:
276 		tee_entry_boot_secondary(args);
277 		break;
278 	case OPTEE_SMC_GET_THREAD_COUNT:
279 		tee_entry_get_thread_count(args);
280 		break;
281 
282 #if defined(CFG_NS_VIRTUALIZATION)
283 	case OPTEE_SMC_VM_CREATED:
284 		tee_entry_vm_created(args);
285 		break;
286 	case OPTEE_SMC_VM_DESTROYED:
287 		tee_entry_vm_destroyed(args);
288 		break;
289 #endif
290 
291 	case OPTEE_SMC_ENABLE_ASYNC_NOTIF:
292 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
293 			notif_deliver_atomic_event(NOTIF_EVENT_STARTED, 0);
294 			args->a0 = OPTEE_SMC_RETURN_OK;
295 		} else {
296 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
297 		}
298 		break;
299 	case OPTEE_SMC_GET_ASYNC_NOTIF_VALUE:
300 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF))
301 			get_async_notif_value(args);
302 		else
303 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
304 		break;
305 
306 	/* Watchdog entry if handler ID is defined in TOS range */
307 	case CFG_WDT_SM_HANDLER_ID:
308 		tee_entry_watchdog(args);
309 		break;
310 
311 	default:
312 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
313 		break;
314 	}
315 }
316 
317 size_t tee_entry_generic_get_api_call_count(void)
318 {
319 	/*
320 	 * All the different calls handled in this file. If the specific
321 	 * target has additional calls it will call this function and
322 	 * add the number of calls the target has added.
323 	 */
324 	size_t ret = 12;
325 
326 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
327 		ret += 2;
328 
329 	return ret;
330 }
331 
332 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
333 {
334 	args->a0 = tee_entry_generic_get_api_call_count();
335 }
336 
337 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
338 {
339 	args->a0 = OPTEE_MSG_UID_0;
340 	args->a1 = OPTEE_MSG_UID_1;
341 	args->a2 = OPTEE_MSG_UID_2;
342 	args->a3 = OPTEE_MSG_UID_3;
343 }
344 
345 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
346 {
347 	args->a0 = OPTEE_MSG_REVISION_MAJOR;
348 	args->a1 = OPTEE_MSG_REVISION_MINOR;
349 }
350 
351 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
352 {
353 	args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
354 	args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
355 	args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
356 	args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
357 }
358 
359 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
360 {
361 	args->a0 = CFG_OPTEE_REVISION_MAJOR;
362 	args->a1 = CFG_OPTEE_REVISION_MINOR;
363 	args->a2 = TEE_IMPL_GIT_SHA1;
364 }
365