xref: /optee_os/core/arch/arm/tee/entry_fast.c (revision 41e5aa8f18c4d48083341ff3df9e75f0c77cf703)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <tee/entry_fast.h>
8 #include <optee_msg.h>
9 #include <sm/optee_smc.h>
10 #include <kernel/generic_boot.h>
11 #include <kernel/tee_l2cc_mutex.h>
12 #include <kernel/virtualization.h>
13 #include <kernel/misc.h>
14 #include <mm/core_mmu.h>
15 
16 #ifdef CFG_CORE_RESERVED_SHM
17 static void tee_entry_get_shm_config(struct thread_smc_args *args)
18 {
19 	args->a0 = OPTEE_SMC_RETURN_OK;
20 	args->a1 = default_nsec_shm_paddr;
21 	args->a2 = default_nsec_shm_size;
22 	/* Should this be TEESMC cache attributes instead? */
23 	args->a3 = core_mmu_is_shm_cached();
24 }
25 #endif
26 
27 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
28 {
29 	TEE_Result ret;
30 #ifdef ARM32
31 	paddr_t pa = 0;
32 
33 	switch (args->a1) {
34 	case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
35 		ret = tee_get_l2cc_mutex(&pa);
36 		reg_pair_from_64(pa, &args->a2, &args->a3);
37 		break;
38 	case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
39 		pa = reg_pair_to_64(args->a2, args->a3);
40 		ret = tee_set_l2cc_mutex(&pa);
41 		break;
42 	case OPTEE_SMC_L2CC_MUTEX_ENABLE:
43 		ret = tee_enable_l2cc_mutex();
44 		break;
45 	case OPTEE_SMC_L2CC_MUTEX_DISABLE:
46 		ret = tee_disable_l2cc_mutex();
47 		break;
48 	default:
49 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
50 		return;
51 	}
52 #else
53 	ret = TEE_ERROR_NOT_SUPPORTED;
54 #endif
55 	if (ret == TEE_ERROR_NOT_SUPPORTED)
56 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
57 	else if (ret)
58 		args->a0 = OPTEE_SMC_RETURN_EBADADDR;
59 	else
60 		args->a0 = OPTEE_SMC_RETURN_OK;
61 }
62 
63 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
64 {
65 	bool dyn_shm_en = false;
66 
67 	/*
68 	 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
69 	 *
70 	 * The memory mapping of shared memory is defined as normal
71 	 * shared memory for SMP systems and normal memory for UP
72 	 * systems. Currently we map all memory as shared in secure
73 	 * world.
74 	 *
75 	 * When translation tables are created with shared bit cleared for
76 	 * uniprocessor systems we'll need to check
77 	 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
78 	 */
79 
80 	if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
81 		/* Unknown capability. */
82 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
83 		return;
84 	}
85 
86 	args->a0 = OPTEE_SMC_RETURN_OK;
87 	args->a1 = 0;
88 #ifdef CFG_CORE_RESERVED_SHM
89 	args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
90 #endif
91 
92 #if defined(CFG_CORE_DYN_SHM)
93 	dyn_shm_en = core_mmu_nsec_ddr_is_defined();
94 	if (dyn_shm_en)
95 		args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
96 #endif
97 
98 	IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
99 }
100 
101 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
102 {
103 	uint64_t cookie;
104 
105 	if (!thread_disable_prealloc_rpc_cache(&cookie)) {
106 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
107 		return;
108 	}
109 
110 	if (!cookie) {
111 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
112 		return;
113 	}
114 
115 	args->a0 = OPTEE_SMC_RETURN_OK;
116 	args->a1 = cookie >> 32;
117 	args->a2 = cookie;
118 }
119 
120 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
121 {
122 	if (thread_enable_prealloc_rpc_cache())
123 		args->a0 = OPTEE_SMC_RETURN_OK;
124 	else
125 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
126 }
127 
128 static void tee_entry_boot_secondary(struct thread_smc_args *args)
129 {
130 #if defined(CFG_BOOT_SECONDARY_REQUEST)
131 	if (!generic_boot_core_release(args->a1, (paddr_t)(args->a3)))
132 		args->a0 = OPTEE_SMC_RETURN_OK;
133 	else
134 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
135 #else
136 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
137 #endif
138 }
139 
140 static void tee_entry_get_thread_count(struct thread_smc_args *args)
141 {
142 	args->a0 = OPTEE_SMC_RETURN_OK;
143 	args->a1 = CFG_NUM_THREADS;
144 }
145 
146 #if defined(CFG_VIRTUALIZATION)
147 static void tee_entry_vm_created(struct thread_smc_args *args)
148 {
149 	uint16_t guest_id = args->a1;
150 
151 	/* Only hypervisor can issue this request */
152 	if (args->a7 != HYP_CLNT_ID) {
153 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
154 		return;
155 	}
156 
157 	args->a0 = virt_guest_created(guest_id);
158 }
159 
160 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
161 {
162 	uint16_t guest_id = args->a1;
163 
164 	/* Only hypervisor can issue this request */
165 	if (args->a7 != HYP_CLNT_ID) {
166 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
167 		return;
168 	}
169 
170 	args->a0 = virt_guest_destroyed(guest_id);
171 }
172 #endif
173 
174 void tee_entry_fast(struct thread_smc_args *args)
175 {
176 	switch (args->a0) {
177 
178 	/* Generic functions */
179 	case OPTEE_SMC_CALLS_COUNT:
180 		tee_entry_get_api_call_count(args);
181 		break;
182 	case OPTEE_SMC_CALLS_UID:
183 		tee_entry_get_api_uuid(args);
184 		break;
185 	case OPTEE_SMC_CALLS_REVISION:
186 		tee_entry_get_api_revision(args);
187 		break;
188 	case OPTEE_SMC_CALL_GET_OS_UUID:
189 		tee_entry_get_os_uuid(args);
190 		break;
191 	case OPTEE_SMC_CALL_GET_OS_REVISION:
192 		tee_entry_get_os_revision(args);
193 		break;
194 
195 	/* OP-TEE specific SMC functions */
196 #ifdef CFG_CORE_RESERVED_SHM
197 	case OPTEE_SMC_GET_SHM_CONFIG:
198 		tee_entry_get_shm_config(args);
199 		break;
200 #endif
201 	case OPTEE_SMC_L2CC_MUTEX:
202 		tee_entry_fastcall_l2cc_mutex(args);
203 		break;
204 	case OPTEE_SMC_EXCHANGE_CAPABILITIES:
205 		tee_entry_exchange_capabilities(args);
206 		break;
207 	case OPTEE_SMC_DISABLE_SHM_CACHE:
208 		tee_entry_disable_shm_cache(args);
209 		break;
210 	case OPTEE_SMC_ENABLE_SHM_CACHE:
211 		tee_entry_enable_shm_cache(args);
212 		break;
213 	case OPTEE_SMC_BOOT_SECONDARY:
214 		tee_entry_boot_secondary(args);
215 		break;
216 	case OPTEE_SMC_GET_THREAD_COUNT:
217 		tee_entry_get_thread_count(args);
218 		break;
219 
220 #if defined(CFG_VIRTUALIZATION)
221 	case OPTEE_SMC_VM_CREATED:
222 		tee_entry_vm_created(args);
223 		break;
224 	case OPTEE_SMC_VM_DESTROYED:
225 		tee_entry_vm_destroyed(args);
226 		break;
227 #endif
228 
229 	default:
230 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
231 		break;
232 	}
233 }
234 
235 size_t tee_entry_generic_get_api_call_count(void)
236 {
237 	/*
238 	 * All the different calls handled in this file. If the specific
239 	 * target has additional calls it will call this function and
240 	 * add the number of calls the target has added.
241 	 */
242 	size_t ret = 12;
243 
244 #if defined(CFG_VIRTUALIZATION)
245 	ret += 2;
246 #endif
247 
248 	return ret;
249 }
250 
251 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
252 {
253 	args->a0 = tee_entry_generic_get_api_call_count();
254 }
255 
256 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
257 {
258 	args->a0 = OPTEE_MSG_UID_0;
259 	args->a1 = OPTEE_MSG_UID_1;
260 	args->a2 = OPTEE_MSG_UID_2;
261 	args->a3 = OPTEE_MSG_UID_3;
262 }
263 
264 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
265 {
266 	args->a0 = OPTEE_MSG_REVISION_MAJOR;
267 	args->a1 = OPTEE_MSG_REVISION_MINOR;
268 }
269 
270 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
271 {
272 	args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
273 	args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
274 	args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
275 	args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
276 }
277 
278 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
279 {
280 	args->a0 = CFG_OPTEE_REVISION_MAJOR;
281 	args->a1 = CFG_OPTEE_REVISION_MINOR;
282 	args->a2 = TEE_IMPL_GIT_SHA1;
283 }
284