xref: /optee_os/core/arch/arm/tee/entry_fast.c (revision a1cbb728630308fcf902a8953a32cc972d14757e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <tee/entry_fast.h>
8 #include <optee_msg.h>
9 #include <sm/optee_smc.h>
10 #include <kernel/generic_boot.h>
11 #include <kernel/tee_l2cc_mutex.h>
12 #include <kernel/virtualization.h>
13 #include <kernel/misc.h>
14 #include <mm/core_mmu.h>
15 
16 static void tee_entry_get_shm_config(struct thread_smc_args *args)
17 {
18 	args->a0 = OPTEE_SMC_RETURN_OK;
19 	args->a1 = default_nsec_shm_paddr;
20 	args->a2 = default_nsec_shm_size;
21 	/* Should this be TEESMC cache attributes instead? */
22 	args->a3 = core_mmu_is_shm_cached();
23 }
24 
25 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
26 {
27 	TEE_Result ret;
28 #ifdef ARM32
29 	paddr_t pa = 0;
30 
31 	switch (args->a1) {
32 	case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
33 		ret = tee_get_l2cc_mutex(&pa);
34 		reg_pair_from_64(pa, &args->a2, &args->a3);
35 		break;
36 	case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
37 		pa = reg_pair_to_64(args->a2, args->a3);
38 		ret = tee_set_l2cc_mutex(&pa);
39 		break;
40 	case OPTEE_SMC_L2CC_MUTEX_ENABLE:
41 		ret = tee_enable_l2cc_mutex();
42 		break;
43 	case OPTEE_SMC_L2CC_MUTEX_DISABLE:
44 		ret = tee_disable_l2cc_mutex();
45 		break;
46 	default:
47 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
48 		return;
49 	}
50 #else
51 	ret = TEE_ERROR_NOT_SUPPORTED;
52 #endif
53 	if (ret == TEE_ERROR_NOT_SUPPORTED)
54 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
55 	else if (ret)
56 		args->a0 = OPTEE_SMC_RETURN_EBADADDR;
57 	else
58 		args->a0 = OPTEE_SMC_RETURN_OK;
59 }
60 
61 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
62 {
63 	bool dyn_shm_en = false;
64 
65 	/*
66 	 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
67 	 *
68 	 * The memory mapping of shared memory is defined as normal
69 	 * shared memory for SMP systems and normal memory for UP
70 	 * systems. Currently we map all memory as shared in secure
71 	 * world.
72 	 *
73 	 * When translation tables are created with shared bit cleared for
74 	 * uniprocessor systems we'll need to check
75 	 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
76 	 */
77 
78 	if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
79 		/* Unknown capability. */
80 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
81 		return;
82 	}
83 
84 	args->a0 = OPTEE_SMC_RETURN_OK;
85 	args->a1 = OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
86 
87 #if defined(CFG_DYN_SHM_CAP)
88 	dyn_shm_en = core_mmu_nsec_ddr_is_defined();
89 	if (dyn_shm_en)
90 		args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
91 #endif
92 
93 	IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
94 }
95 
96 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
97 {
98 	uint64_t cookie;
99 
100 	if (!thread_disable_prealloc_rpc_cache(&cookie)) {
101 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
102 		return;
103 	}
104 
105 	if (!cookie) {
106 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
107 		return;
108 	}
109 
110 	args->a0 = OPTEE_SMC_RETURN_OK;
111 	args->a1 = cookie >> 32;
112 	args->a2 = cookie;
113 }
114 
115 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
116 {
117 	if (thread_enable_prealloc_rpc_cache())
118 		args->a0 = OPTEE_SMC_RETURN_OK;
119 	else
120 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
121 }
122 
123 static void tee_entry_boot_secondary(struct thread_smc_args *args)
124 {
125 #if defined(CFG_BOOT_SECONDARY_REQUEST)
126 	if (!generic_boot_core_release(args->a1, (paddr_t)(args->a3)))
127 		args->a0 = OPTEE_SMC_RETURN_OK;
128 	else
129 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
130 #else
131 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
132 #endif
133 }
134 
135 #if defined(CFG_VIRTUALIZATION)
136 static void tee_entry_vm_created(struct thread_smc_args *args)
137 {
138 	uint16_t guest_id = args->a1;
139 
140 	/* Only hypervisor can issue this request */
141 	if (args->a7 != HYP_CLNT_ID) {
142 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
143 		return;
144 	}
145 
146 	args->a0 = virt_guest_created(guest_id);
147 }
148 
149 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
150 {
151 	uint16_t guest_id = args->a1;
152 
153 	/* Only hypervisor can issue this request */
154 	if (args->a7 != HYP_CLNT_ID) {
155 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
156 		return;
157 	}
158 
159 	args->a0 = virt_guest_destroyed(guest_id);
160 }
161 #endif
162 
163 void tee_entry_fast(struct thread_smc_args *args)
164 {
165 	switch (args->a0) {
166 
167 	/* Generic functions */
168 	case OPTEE_SMC_CALLS_COUNT:
169 		tee_entry_get_api_call_count(args);
170 		break;
171 	case OPTEE_SMC_CALLS_UID:
172 		tee_entry_get_api_uuid(args);
173 		break;
174 	case OPTEE_SMC_CALLS_REVISION:
175 		tee_entry_get_api_revision(args);
176 		break;
177 	case OPTEE_SMC_CALL_GET_OS_UUID:
178 		tee_entry_get_os_uuid(args);
179 		break;
180 	case OPTEE_SMC_CALL_GET_OS_REVISION:
181 		tee_entry_get_os_revision(args);
182 		break;
183 
184 	/* OP-TEE specific SMC functions */
185 	case OPTEE_SMC_GET_SHM_CONFIG:
186 		tee_entry_get_shm_config(args);
187 		break;
188 	case OPTEE_SMC_L2CC_MUTEX:
189 		tee_entry_fastcall_l2cc_mutex(args);
190 		break;
191 	case OPTEE_SMC_EXCHANGE_CAPABILITIES:
192 		tee_entry_exchange_capabilities(args);
193 		break;
194 	case OPTEE_SMC_DISABLE_SHM_CACHE:
195 		tee_entry_disable_shm_cache(args);
196 		break;
197 	case OPTEE_SMC_ENABLE_SHM_CACHE:
198 		tee_entry_enable_shm_cache(args);
199 		break;
200 	case OPTEE_SMC_BOOT_SECONDARY:
201 		tee_entry_boot_secondary(args);
202 		break;
203 
204 #if defined(CFG_VIRTUALIZATION)
205 	case OPTEE_SMC_VM_CREATED:
206 		tee_entry_vm_created(args);
207 		break;
208 	case OPTEE_SMC_VM_DESTROYED:
209 		tee_entry_vm_destroyed(args);
210 		break;
211 #endif
212 
213 	default:
214 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
215 		break;
216 	}
217 }
218 
219 size_t tee_entry_generic_get_api_call_count(void)
220 {
221 	/*
222 	 * All the different calls handled in this file. If the specific
223 	 * target has additional calls it will call this function and
224 	 * add the number of calls the target has added.
225 	 */
226 	size_t ret = 11;
227 
228 #if defined(CFG_VIRTUALIZATION)
229 	ret += 2;
230 #endif
231 
232 	return ret;
233 }
234 
235 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
236 {
237 	args->a0 = tee_entry_generic_get_api_call_count();
238 }
239 
240 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
241 {
242 	args->a0 = OPTEE_MSG_UID_0;
243 	args->a1 = OPTEE_MSG_UID_1;
244 	args->a2 = OPTEE_MSG_UID_2;
245 	args->a3 = OPTEE_MSG_UID_3;
246 }
247 
248 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
249 {
250 	args->a0 = OPTEE_MSG_REVISION_MAJOR;
251 	args->a1 = OPTEE_MSG_REVISION_MINOR;
252 }
253 
254 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
255 {
256 	args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
257 	args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
258 	args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
259 	args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
260 }
261 
262 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
263 {
264 	args->a0 = CFG_OPTEE_REVISION_MAJOR;
265 	args->a1 = CFG_OPTEE_REVISION_MINOR;
266 	args->a2 = TEE_IMPL_GIT_SHA1;
267 }
268