xref: /optee_os/core/arch/arm/tee/entry_fast.c (revision 039e02df2716a0ed886b56e1e07b7ac1d8597228)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <config.h>
8 #include <kernel/boot.h>
9 #include <kernel/misc.h>
10 #include <kernel/notif.h>
11 #include <kernel/tee_l2cc_mutex.h>
12 #include <kernel/virtualization.h>
13 #include <mm/core_mmu.h>
14 #include <optee_msg.h>
15 #include <sm/optee_smc.h>
16 #include <tee/entry_fast.h>
17 
18 #ifdef CFG_CORE_RESERVED_SHM
19 static void tee_entry_get_shm_config(struct thread_smc_args *args)
20 {
21 	args->a0 = OPTEE_SMC_RETURN_OK;
22 	args->a1 = default_nsec_shm_paddr;
23 	args->a2 = default_nsec_shm_size;
24 	/* Should this be TEESMC cache attributes instead? */
25 	args->a3 = core_mmu_is_shm_cached();
26 }
27 #endif
28 
29 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
30 {
31 	TEE_Result ret;
32 #ifdef ARM32
33 	paddr_t pa = 0;
34 
35 	switch (args->a1) {
36 	case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
37 		ret = tee_get_l2cc_mutex(&pa);
38 		reg_pair_from_64(pa, &args->a2, &args->a3);
39 		break;
40 	case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
41 		pa = reg_pair_to_64(args->a2, args->a3);
42 		ret = tee_set_l2cc_mutex(&pa);
43 		break;
44 	case OPTEE_SMC_L2CC_MUTEX_ENABLE:
45 		ret = tee_enable_l2cc_mutex();
46 		break;
47 	case OPTEE_SMC_L2CC_MUTEX_DISABLE:
48 		ret = tee_disable_l2cc_mutex();
49 		break;
50 	default:
51 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
52 		return;
53 	}
54 #else
55 	ret = TEE_ERROR_NOT_SUPPORTED;
56 #endif
57 	if (ret == TEE_ERROR_NOT_SUPPORTED)
58 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
59 	else if (ret)
60 		args->a0 = OPTEE_SMC_RETURN_EBADADDR;
61 	else
62 		args->a0 = OPTEE_SMC_RETURN_OK;
63 }
64 
65 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
66 {
67 	bool dyn_shm_en __maybe_unused = false;
68 
69 	/*
70 	 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
71 	 *
72 	 * The memory mapping of shared memory is defined as normal
73 	 * shared memory for SMP systems and normal memory for UP
74 	 * systems. Currently we map all memory as shared in secure
75 	 * world.
76 	 *
77 	 * When translation tables are created with shared bit cleared for
78 	 * uniprocessor systems we'll need to check
79 	 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
80 	 */
81 
82 	if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
83 		/* Unknown capability. */
84 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
85 		return;
86 	}
87 
88 	args->a0 = OPTEE_SMC_RETURN_OK;
89 	args->a1 = 0;
90 #ifdef CFG_CORE_RESERVED_SHM
91 	args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
92 #endif
93 	if (IS_ENABLED(CFG_VIRTUALIZATION))
94 		args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION;
95 	args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL;
96 	if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
97 		args->a1 |= OPTEE_SMC_SEC_CAP_ASYNC_NOTIF;
98 		args->a2 = NOTIF_VALUE_MAX;
99 	}
100 	DMSG("Asynchronous notifications are %sabled",
101 	     IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis");
102 
103 #if defined(CFG_CORE_DYN_SHM)
104 	dyn_shm_en = core_mmu_nsec_ddr_is_defined();
105 	if (dyn_shm_en)
106 		args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
107 #endif
108 
109 	DMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
110 
111 	args->a1 |= OPTEE_SMC_SEC_CAP_RPC_ARG;
112 	args->a3 = THREAD_RPC_MAX_NUM_PARAMS;
113 }
114 
115 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
116 {
117 	uint64_t cookie;
118 
119 	if (!thread_disable_prealloc_rpc_cache(&cookie)) {
120 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
121 		return;
122 	}
123 
124 	if (!cookie) {
125 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
126 		return;
127 	}
128 
129 	args->a0 = OPTEE_SMC_RETURN_OK;
130 	args->a1 = cookie >> 32;
131 	args->a2 = cookie;
132 }
133 
134 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
135 {
136 	if (thread_enable_prealloc_rpc_cache())
137 		args->a0 = OPTEE_SMC_RETURN_OK;
138 	else
139 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
140 }
141 
142 static void tee_entry_boot_secondary(struct thread_smc_args *args)
143 {
144 #if defined(CFG_BOOT_SECONDARY_REQUEST)
145 	if (!boot_core_release(args->a1, (paddr_t)(args->a3)))
146 		args->a0 = OPTEE_SMC_RETURN_OK;
147 	else
148 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
149 #else
150 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
151 #endif
152 }
153 
154 static void tee_entry_get_thread_count(struct thread_smc_args *args)
155 {
156 	args->a0 = OPTEE_SMC_RETURN_OK;
157 	args->a1 = CFG_NUM_THREADS;
158 }
159 
160 #if defined(CFG_VIRTUALIZATION)
161 static void tee_entry_vm_created(struct thread_smc_args *args)
162 {
163 	uint16_t guest_id = args->a1;
164 
165 	/* Only hypervisor can issue this request */
166 	if (args->a7 != HYP_CLNT_ID) {
167 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
168 		return;
169 	}
170 
171 	if (virt_guest_created(guest_id))
172 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
173 	else
174 		args->a0 = OPTEE_SMC_RETURN_OK;
175 }
176 
177 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
178 {
179 	uint16_t guest_id = args->a1;
180 
181 	/* Only hypervisor can issue this request */
182 	if (args->a7 != HYP_CLNT_ID) {
183 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
184 		return;
185 	}
186 
187 	if (virt_guest_destroyed(guest_id))
188 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
189 	else
190 		args->a0 = OPTEE_SMC_RETURN_OK;
191 }
192 #endif
193 
194 /* Note: this function is weak to let platforms add special handling */
195 void __weak tee_entry_fast(struct thread_smc_args *args)
196 {
197 	__tee_entry_fast(args);
198 }
199 
200 static void get_async_notif_value(struct thread_smc_args *args)
201 {
202 	bool value_valid = false;
203 	bool value_pending = false;
204 
205 	args->a0 = OPTEE_SMC_RETURN_OK;
206 	args->a1 = notif_get_value(&value_valid, &value_pending);
207 	args->a2 = 0;
208 	if (value_valid)
209 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_VALID;
210 	if (value_pending)
211 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_PENDING;
212 }
213 
214 /*
215  * If tee_entry_fast() is overridden, it's still supposed to call this
216  * function.
217  */
218 void __tee_entry_fast(struct thread_smc_args *args)
219 {
220 	switch (args->a0) {
221 
222 	/* Generic functions */
223 	case OPTEE_SMC_CALLS_COUNT:
224 		tee_entry_get_api_call_count(args);
225 		break;
226 	case OPTEE_SMC_CALLS_UID:
227 		tee_entry_get_api_uuid(args);
228 		break;
229 	case OPTEE_SMC_CALLS_REVISION:
230 		tee_entry_get_api_revision(args);
231 		break;
232 	case OPTEE_SMC_CALL_GET_OS_UUID:
233 		tee_entry_get_os_uuid(args);
234 		break;
235 	case OPTEE_SMC_CALL_GET_OS_REVISION:
236 		tee_entry_get_os_revision(args);
237 		break;
238 
239 	/* OP-TEE specific SMC functions */
240 #ifdef CFG_CORE_RESERVED_SHM
241 	case OPTEE_SMC_GET_SHM_CONFIG:
242 		tee_entry_get_shm_config(args);
243 		break;
244 #endif
245 	case OPTEE_SMC_L2CC_MUTEX:
246 		tee_entry_fastcall_l2cc_mutex(args);
247 		break;
248 	case OPTEE_SMC_EXCHANGE_CAPABILITIES:
249 		tee_entry_exchange_capabilities(args);
250 		break;
251 	case OPTEE_SMC_DISABLE_SHM_CACHE:
252 		tee_entry_disable_shm_cache(args);
253 		break;
254 	case OPTEE_SMC_ENABLE_SHM_CACHE:
255 		tee_entry_enable_shm_cache(args);
256 		break;
257 	case OPTEE_SMC_BOOT_SECONDARY:
258 		tee_entry_boot_secondary(args);
259 		break;
260 	case OPTEE_SMC_GET_THREAD_COUNT:
261 		tee_entry_get_thread_count(args);
262 		break;
263 
264 #if defined(CFG_VIRTUALIZATION)
265 	case OPTEE_SMC_VM_CREATED:
266 		tee_entry_vm_created(args);
267 		break;
268 	case OPTEE_SMC_VM_DESTROYED:
269 		tee_entry_vm_destroyed(args);
270 		break;
271 #endif
272 
273 	case OPTEE_SMC_ENABLE_ASYNC_NOTIF:
274 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
275 			notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
276 			args->a0 = OPTEE_SMC_RETURN_OK;
277 		} else {
278 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
279 		}
280 		break;
281 	case OPTEE_SMC_GET_ASYNC_NOTIF_VALUE:
282 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF))
283 			get_async_notif_value(args);
284 		else
285 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
286 		break;
287 
288 	default:
289 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
290 		break;
291 	}
292 }
293 
294 size_t tee_entry_generic_get_api_call_count(void)
295 {
296 	/*
297 	 * All the different calls handled in this file. If the specific
298 	 * target has additional calls it will call this function and
299 	 * add the number of calls the target has added.
300 	 */
301 	size_t ret = 12;
302 
303 	if (IS_ENABLED(CFG_VIRTUALIZATION))
304 		ret += 2;
305 
306 	return ret;
307 }
308 
309 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
310 {
311 	args->a0 = tee_entry_generic_get_api_call_count();
312 }
313 
314 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
315 {
316 	args->a0 = OPTEE_MSG_UID_0;
317 	args->a1 = OPTEE_MSG_UID_1;
318 	args->a2 = OPTEE_MSG_UID_2;
319 	args->a3 = OPTEE_MSG_UID_3;
320 }
321 
322 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
323 {
324 	args->a0 = OPTEE_MSG_REVISION_MAJOR;
325 	args->a1 = OPTEE_MSG_REVISION_MINOR;
326 }
327 
328 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
329 {
330 	args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
331 	args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
332 	args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
333 	args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
334 }
335 
336 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
337 {
338 	args->a0 = CFG_OPTEE_REVISION_MAJOR;
339 	args->a1 = CFG_OPTEE_REVISION_MINOR;
340 	args->a2 = TEE_IMPL_GIT_SHA1;
341 }
342