1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <config.h> 8 #include <kernel/boot.h> 9 #include <kernel/misc.h> 10 #include <kernel/tee_l2cc_mutex.h> 11 #include <kernel/virtualization.h> 12 #include <mm/core_mmu.h> 13 #include <optee_msg.h> 14 #include <sm/optee_smc.h> 15 #include <tee/entry_fast.h> 16 17 #ifdef CFG_CORE_RESERVED_SHM 18 static void tee_entry_get_shm_config(struct thread_smc_args *args) 19 { 20 args->a0 = OPTEE_SMC_RETURN_OK; 21 args->a1 = default_nsec_shm_paddr; 22 args->a2 = default_nsec_shm_size; 23 /* Should this be TEESMC cache attributes instead? */ 24 args->a3 = core_mmu_is_shm_cached(); 25 } 26 #endif 27 28 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args) 29 { 30 TEE_Result ret; 31 #ifdef ARM32 32 paddr_t pa = 0; 33 34 switch (args->a1) { 35 case OPTEE_SMC_L2CC_MUTEX_GET_ADDR: 36 ret = tee_get_l2cc_mutex(&pa); 37 reg_pair_from_64(pa, &args->a2, &args->a3); 38 break; 39 case OPTEE_SMC_L2CC_MUTEX_SET_ADDR: 40 pa = reg_pair_to_64(args->a2, args->a3); 41 ret = tee_set_l2cc_mutex(&pa); 42 break; 43 case OPTEE_SMC_L2CC_MUTEX_ENABLE: 44 ret = tee_enable_l2cc_mutex(); 45 break; 46 case OPTEE_SMC_L2CC_MUTEX_DISABLE: 47 ret = tee_disable_l2cc_mutex(); 48 break; 49 default: 50 args->a0 = OPTEE_SMC_RETURN_EBADCMD; 51 return; 52 } 53 #else 54 ret = TEE_ERROR_NOT_SUPPORTED; 55 #endif 56 if (ret == TEE_ERROR_NOT_SUPPORTED) 57 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 58 else if (ret) 59 args->a0 = OPTEE_SMC_RETURN_EBADADDR; 60 else 61 args->a0 = OPTEE_SMC_RETURN_OK; 62 } 63 64 static void tee_entry_exchange_capabilities(struct thread_smc_args *args) 65 { 66 bool dyn_shm_en __maybe_unused = false; 67 68 /* 69 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR. 70 * 71 * The memory mapping of shared memory is defined as normal 72 * shared memory for SMP systems and normal memory for UP 73 * systems. Currently we map all memory as shared in secure 74 * world. 75 * 76 * When translation tables are created with shared bit cleared for 77 * uniprocessor systems we'll need to check 78 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR. 79 */ 80 81 if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) { 82 /* Unknown capability. */ 83 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 84 return; 85 } 86 87 args->a0 = OPTEE_SMC_RETURN_OK; 88 args->a1 = 0; 89 #ifdef CFG_CORE_RESERVED_SHM 90 args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM; 91 #endif 92 if (IS_ENABLED(CFG_VIRTUALIZATION)) 93 args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION; 94 args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL; 95 96 #if defined(CFG_CORE_DYN_SHM) 97 dyn_shm_en = core_mmu_nsec_ddr_is_defined(); 98 if (dyn_shm_en) 99 args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM; 100 #endif 101 102 DMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis"); 103 } 104 105 static void tee_entry_disable_shm_cache(struct thread_smc_args *args) 106 { 107 uint64_t cookie; 108 109 if (!thread_disable_prealloc_rpc_cache(&cookie)) { 110 args->a0 = OPTEE_SMC_RETURN_EBUSY; 111 return; 112 } 113 114 if (!cookie) { 115 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 116 return; 117 } 118 119 args->a0 = OPTEE_SMC_RETURN_OK; 120 args->a1 = cookie >> 32; 121 args->a2 = cookie; 122 } 123 124 static void tee_entry_enable_shm_cache(struct thread_smc_args *args) 125 { 126 if (thread_enable_prealloc_rpc_cache()) 127 args->a0 = OPTEE_SMC_RETURN_OK; 128 else 129 args->a0 = OPTEE_SMC_RETURN_EBUSY; 130 } 131 132 static void tee_entry_boot_secondary(struct thread_smc_args *args) 133 { 134 #if defined(CFG_BOOT_SECONDARY_REQUEST) 135 if (!boot_core_release(args->a1, (paddr_t)(args->a3))) 136 args->a0 = OPTEE_SMC_RETURN_OK; 137 else 138 args->a0 = OPTEE_SMC_RETURN_EBADCMD; 139 #else 140 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 141 #endif 142 } 143 144 static void tee_entry_get_thread_count(struct thread_smc_args *args) 145 { 146 args->a0 = OPTEE_SMC_RETURN_OK; 147 args->a1 = CFG_NUM_THREADS; 148 } 149 150 #if defined(CFG_VIRTUALIZATION) 151 static void tee_entry_vm_created(struct thread_smc_args *args) 152 { 153 uint16_t guest_id = args->a1; 154 155 /* Only hypervisor can issue this request */ 156 if (args->a7 != HYP_CLNT_ID) { 157 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 158 return; 159 } 160 161 if (virt_guest_created(guest_id)) 162 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 163 else 164 args->a0 = OPTEE_SMC_RETURN_OK; 165 } 166 167 static void tee_entry_vm_destroyed(struct thread_smc_args *args) 168 { 169 uint16_t guest_id = args->a1; 170 171 /* Only hypervisor can issue this request */ 172 if (args->a7 != HYP_CLNT_ID) { 173 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 174 return; 175 } 176 177 if (virt_guest_destroyed(guest_id)) 178 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 179 else 180 args->a0 = OPTEE_SMC_RETURN_OK; 181 } 182 #endif 183 184 /* Note: this function is weak to let platforms add special handling */ 185 void __weak tee_entry_fast(struct thread_smc_args *args) 186 { 187 __tee_entry_fast(args); 188 } 189 190 /* 191 * If tee_entry_fast() is overridden, it's still supposed to call this 192 * function. 193 */ 194 void __tee_entry_fast(struct thread_smc_args *args) 195 { 196 switch (args->a0) { 197 198 /* Generic functions */ 199 case OPTEE_SMC_CALLS_COUNT: 200 tee_entry_get_api_call_count(args); 201 break; 202 case OPTEE_SMC_CALLS_UID: 203 tee_entry_get_api_uuid(args); 204 break; 205 case OPTEE_SMC_CALLS_REVISION: 206 tee_entry_get_api_revision(args); 207 break; 208 case OPTEE_SMC_CALL_GET_OS_UUID: 209 tee_entry_get_os_uuid(args); 210 break; 211 case OPTEE_SMC_CALL_GET_OS_REVISION: 212 tee_entry_get_os_revision(args); 213 break; 214 215 /* OP-TEE specific SMC functions */ 216 #ifdef CFG_CORE_RESERVED_SHM 217 case OPTEE_SMC_GET_SHM_CONFIG: 218 tee_entry_get_shm_config(args); 219 break; 220 #endif 221 case OPTEE_SMC_L2CC_MUTEX: 222 tee_entry_fastcall_l2cc_mutex(args); 223 break; 224 case OPTEE_SMC_EXCHANGE_CAPABILITIES: 225 tee_entry_exchange_capabilities(args); 226 break; 227 case OPTEE_SMC_DISABLE_SHM_CACHE: 228 tee_entry_disable_shm_cache(args); 229 break; 230 case OPTEE_SMC_ENABLE_SHM_CACHE: 231 tee_entry_enable_shm_cache(args); 232 break; 233 case OPTEE_SMC_BOOT_SECONDARY: 234 tee_entry_boot_secondary(args); 235 break; 236 case OPTEE_SMC_GET_THREAD_COUNT: 237 tee_entry_get_thread_count(args); 238 break; 239 240 #if defined(CFG_VIRTUALIZATION) 241 case OPTEE_SMC_VM_CREATED: 242 tee_entry_vm_created(args); 243 break; 244 case OPTEE_SMC_VM_DESTROYED: 245 tee_entry_vm_destroyed(args); 246 break; 247 #endif 248 249 default: 250 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 251 break; 252 } 253 } 254 255 size_t tee_entry_generic_get_api_call_count(void) 256 { 257 /* 258 * All the different calls handled in this file. If the specific 259 * target has additional calls it will call this function and 260 * add the number of calls the target has added. 261 */ 262 size_t ret = 12; 263 264 if (IS_ENABLED(CFG_VIRTUALIZATION)) 265 ret += 2; 266 267 return ret; 268 } 269 270 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args) 271 { 272 args->a0 = tee_entry_generic_get_api_call_count(); 273 } 274 275 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args) 276 { 277 args->a0 = OPTEE_MSG_UID_0; 278 args->a1 = OPTEE_MSG_UID_1; 279 args->a2 = OPTEE_MSG_UID_2; 280 args->a3 = OPTEE_MSG_UID_3; 281 } 282 283 void __weak tee_entry_get_api_revision(struct thread_smc_args *args) 284 { 285 args->a0 = OPTEE_MSG_REVISION_MAJOR; 286 args->a1 = OPTEE_MSG_REVISION_MINOR; 287 } 288 289 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args) 290 { 291 args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0; 292 args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1; 293 args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2; 294 args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3; 295 } 296 297 void __weak tee_entry_get_os_revision(struct thread_smc_args *args) 298 { 299 args->a0 = CFG_OPTEE_REVISION_MAJOR; 300 args->a1 = CFG_OPTEE_REVISION_MINOR; 301 args->a2 = TEE_IMPL_GIT_SHA1; 302 } 303