1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <config.h> 8 #include <drivers/wdt.h> 9 #include <kernel/boot.h> 10 #include <kernel/notif.h> 11 #include <kernel/tee_l2cc_mutex.h> 12 #include <kernel/virtualization.h> 13 #include <mm/core_mmu.h> 14 #include <optee_msg.h> 15 #include <sm/optee_smc.h> 16 #include <tee/entry_fast.h> 17 18 #ifdef CFG_CORE_RESERVED_SHM 19 static void tee_entry_get_shm_config(struct thread_smc_args *args) 20 { 21 args->a0 = OPTEE_SMC_RETURN_OK; 22 args->a1 = default_nsec_shm_paddr; 23 args->a2 = default_nsec_shm_size; 24 /* Should this be TEESMC cache attributes instead? */ 25 args->a3 = core_mmu_is_shm_cached(); 26 } 27 #endif 28 29 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args) 30 { 31 #ifdef ARM32 32 TEE_Result ret = TEE_ERROR_NOT_SUPPORTED; 33 paddr_t pa = 0; 34 35 switch (args->a1) { 36 case OPTEE_SMC_L2CC_MUTEX_GET_ADDR: 37 ret = tee_get_l2cc_mutex(&pa); 38 reg_pair_from_64(pa, &args->a2, &args->a3); 39 break; 40 case OPTEE_SMC_L2CC_MUTEX_SET_ADDR: 41 pa = reg_pair_to_64(args->a2, args->a3); 42 ret = tee_set_l2cc_mutex(&pa); 43 break; 44 case OPTEE_SMC_L2CC_MUTEX_ENABLE: 45 ret = tee_enable_l2cc_mutex(); 46 break; 47 case OPTEE_SMC_L2CC_MUTEX_DISABLE: 48 ret = tee_disable_l2cc_mutex(); 49 break; 50 default: 51 args->a0 = OPTEE_SMC_RETURN_EBADCMD; 52 return; 53 } 54 55 if (ret == TEE_ERROR_NOT_SUPPORTED) 56 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 57 else if (ret) 58 args->a0 = OPTEE_SMC_RETURN_EBADADDR; 59 else 60 args->a0 = OPTEE_SMC_RETURN_OK; 61 #else 62 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 63 #endif 64 } 65 66 static void tee_entry_exchange_capabilities(struct thread_smc_args *args) 67 { 68 bool res_shm_en = IS_ENABLED(CFG_CORE_RESERVED_SHM); 69 bool dyn_shm_en __maybe_unused = false; 70 71 /* 72 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR. 73 * 74 * The memory mapping of shared memory is defined as normal 75 * shared memory for SMP systems and normal memory for UP 76 * systems. Currently we map all memory as shared in secure 77 * world. 78 * 79 * When translation tables are created with shared bit cleared for 80 * uniprocessor systems we'll need to check 81 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR. 82 */ 83 84 if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) { 85 /* Unknown capability. */ 86 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 87 return; 88 } 89 90 args->a0 = OPTEE_SMC_RETURN_OK; 91 args->a1 = 0; 92 93 if (res_shm_en) 94 args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM; 95 IMSG("Reserved shared memory is %sabled", res_shm_en ? "en" : "dis"); 96 97 #if defined(CFG_CORE_DYN_SHM) 98 dyn_shm_en = core_mmu_nsec_ddr_is_defined(); 99 if (dyn_shm_en) 100 args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM; 101 #endif 102 IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis"); 103 104 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 105 args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION; 106 IMSG("Normal World virtualization support is %sabled", 107 IS_ENABLED(CFG_NS_VIRTUALIZATION) ? "en" : "dis"); 108 109 args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL; 110 111 if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) { 112 args->a1 |= OPTEE_SMC_SEC_CAP_ASYNC_NOTIF; 113 args->a2 = NOTIF_VALUE_MAX; 114 } 115 IMSG("Asynchronous notifications are %sabled", 116 IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis"); 117 118 args->a1 |= OPTEE_SMC_SEC_CAP_RPC_ARG; 119 args->a3 = THREAD_RPC_MAX_NUM_PARAMS; 120 } 121 122 static void tee_entry_disable_shm_cache(struct thread_smc_args *args) 123 { 124 uint64_t cookie; 125 126 if (!thread_disable_prealloc_rpc_cache(&cookie)) { 127 args->a0 = OPTEE_SMC_RETURN_EBUSY; 128 return; 129 } 130 131 if (!cookie) { 132 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 133 return; 134 } 135 136 args->a0 = OPTEE_SMC_RETURN_OK; 137 args->a1 = cookie >> 32; 138 args->a2 = cookie; 139 } 140 141 static void tee_entry_enable_shm_cache(struct thread_smc_args *args) 142 { 143 if (thread_enable_prealloc_rpc_cache()) 144 args->a0 = OPTEE_SMC_RETURN_OK; 145 else 146 args->a0 = OPTEE_SMC_RETURN_EBUSY; 147 } 148 149 static void tee_entry_boot_secondary(struct thread_smc_args *args) 150 { 151 #if defined(CFG_BOOT_SECONDARY_REQUEST) 152 if (!boot_core_release(args->a1, (paddr_t)(args->a3))) 153 args->a0 = OPTEE_SMC_RETURN_OK; 154 else 155 args->a0 = OPTEE_SMC_RETURN_EBADCMD; 156 #else 157 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 158 #endif 159 } 160 161 static void tee_entry_get_thread_count(struct thread_smc_args *args) 162 { 163 args->a0 = OPTEE_SMC_RETURN_OK; 164 args->a1 = CFG_NUM_THREADS; 165 } 166 167 #if defined(CFG_NS_VIRTUALIZATION) 168 static void tee_entry_vm_created(struct thread_smc_args *args) 169 { 170 uint16_t guest_id = args->a1; 171 172 /* Only hypervisor can issue this request */ 173 if (args->a7 != HYP_CLNT_ID) { 174 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 175 return; 176 } 177 178 if (virt_guest_created(guest_id)) 179 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 180 else 181 args->a0 = OPTEE_SMC_RETURN_OK; 182 } 183 184 static void tee_entry_vm_destroyed(struct thread_smc_args *args) 185 { 186 uint16_t guest_id = args->a1; 187 188 /* Only hypervisor can issue this request */ 189 if (args->a7 != HYP_CLNT_ID) { 190 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 191 return; 192 } 193 194 if (virt_guest_destroyed(guest_id)) 195 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 196 else 197 args->a0 = OPTEE_SMC_RETURN_OK; 198 } 199 #endif 200 201 /* Note: this function is weak to let platforms add special handling */ 202 void __weak tee_entry_fast(struct thread_smc_args *args) 203 { 204 __tee_entry_fast(args); 205 } 206 207 static void get_async_notif_value(struct thread_smc_args *args) 208 { 209 bool value_valid = false; 210 bool value_pending = false; 211 212 args->a0 = OPTEE_SMC_RETURN_OK; 213 args->a1 = notif_get_value(&value_valid, &value_pending); 214 args->a2 = 0; 215 if (value_valid) 216 args->a2 |= OPTEE_SMC_ASYNC_NOTIF_VALID; 217 if (value_pending) 218 args->a2 |= OPTEE_SMC_ASYNC_NOTIF_PENDING; 219 } 220 221 static void tee_entry_watchdog(struct thread_smc_args *args) 222 { 223 #if defined(CFG_WDT_SM_HANDLER) 224 __wdt_sm_handler(args); 225 #else 226 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 227 #endif 228 } 229 230 /* 231 * If tee_entry_fast() is overridden, it's still supposed to call this 232 * function. 233 */ 234 void __tee_entry_fast(struct thread_smc_args *args) 235 { 236 switch (args->a0) { 237 238 /* Generic functions */ 239 case OPTEE_SMC_CALLS_COUNT: 240 tee_entry_get_api_call_count(args); 241 break; 242 case OPTEE_SMC_CALLS_UID: 243 tee_entry_get_api_uuid(args); 244 break; 245 case OPTEE_SMC_CALLS_REVISION: 246 tee_entry_get_api_revision(args); 247 break; 248 case OPTEE_SMC_CALL_GET_OS_UUID: 249 tee_entry_get_os_uuid(args); 250 break; 251 case OPTEE_SMC_CALL_GET_OS_REVISION: 252 tee_entry_get_os_revision(args); 253 break; 254 255 /* OP-TEE specific SMC functions */ 256 #ifdef CFG_CORE_RESERVED_SHM 257 case OPTEE_SMC_GET_SHM_CONFIG: 258 tee_entry_get_shm_config(args); 259 break; 260 #endif 261 case OPTEE_SMC_L2CC_MUTEX: 262 tee_entry_fastcall_l2cc_mutex(args); 263 break; 264 case OPTEE_SMC_EXCHANGE_CAPABILITIES: 265 tee_entry_exchange_capabilities(args); 266 break; 267 case OPTEE_SMC_DISABLE_SHM_CACHE: 268 tee_entry_disable_shm_cache(args); 269 break; 270 case OPTEE_SMC_ENABLE_SHM_CACHE: 271 tee_entry_enable_shm_cache(args); 272 break; 273 case OPTEE_SMC_BOOT_SECONDARY: 274 tee_entry_boot_secondary(args); 275 break; 276 case OPTEE_SMC_GET_THREAD_COUNT: 277 tee_entry_get_thread_count(args); 278 break; 279 280 #if defined(CFG_NS_VIRTUALIZATION) 281 case OPTEE_SMC_VM_CREATED: 282 tee_entry_vm_created(args); 283 break; 284 case OPTEE_SMC_VM_DESTROYED: 285 tee_entry_vm_destroyed(args); 286 break; 287 #endif 288 289 case OPTEE_SMC_ENABLE_ASYNC_NOTIF: 290 if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) { 291 notif_deliver_atomic_event(NOTIF_EVENT_STARTED); 292 args->a0 = OPTEE_SMC_RETURN_OK; 293 } else { 294 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 295 } 296 break; 297 case OPTEE_SMC_GET_ASYNC_NOTIF_VALUE: 298 if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) 299 get_async_notif_value(args); 300 else 301 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 302 break; 303 304 /* Watchdog entry if handler ID is defined in TOS range */ 305 case CFG_WDT_SM_HANDLER_ID: 306 tee_entry_watchdog(args); 307 break; 308 309 default: 310 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 311 break; 312 } 313 } 314 315 size_t tee_entry_generic_get_api_call_count(void) 316 { 317 /* 318 * All the different calls handled in this file. If the specific 319 * target has additional calls it will call this function and 320 * add the number of calls the target has added. 321 */ 322 size_t ret = 12; 323 324 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 325 ret += 2; 326 327 return ret; 328 } 329 330 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args) 331 { 332 args->a0 = tee_entry_generic_get_api_call_count(); 333 } 334 335 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args) 336 { 337 args->a0 = OPTEE_MSG_UID_0; 338 args->a1 = OPTEE_MSG_UID_1; 339 args->a2 = OPTEE_MSG_UID_2; 340 args->a3 = OPTEE_MSG_UID_3; 341 } 342 343 void __weak tee_entry_get_api_revision(struct thread_smc_args *args) 344 { 345 args->a0 = OPTEE_MSG_REVISION_MAJOR; 346 args->a1 = OPTEE_MSG_REVISION_MINOR; 347 } 348 349 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args) 350 { 351 args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0; 352 args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1; 353 args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2; 354 args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3; 355 } 356 357 void __weak tee_entry_get_os_revision(struct thread_smc_args *args) 358 { 359 args->a0 = CFG_OPTEE_REVISION_MAJOR; 360 args->a1 = CFG_OPTEE_REVISION_MINOR; 361 args->a2 = TEE_IMPL_GIT_SHA1; 362 } 363