1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <tee/entry_fast.h> 8 #include <optee_msg.h> 9 #include <sm/optee_smc.h> 10 #include <kernel/boot.h> 11 #include <kernel/tee_l2cc_mutex.h> 12 #include <kernel/virtualization.h> 13 #include <kernel/misc.h> 14 #include <mm/core_mmu.h> 15 16 #ifdef CFG_CORE_RESERVED_SHM 17 static void tee_entry_get_shm_config(struct thread_smc_args *args) 18 { 19 args->a0 = OPTEE_SMC_RETURN_OK; 20 args->a1 = default_nsec_shm_paddr; 21 args->a2 = default_nsec_shm_size; 22 /* Should this be TEESMC cache attributes instead? */ 23 args->a3 = core_mmu_is_shm_cached(); 24 } 25 #endif 26 27 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args) 28 { 29 TEE_Result ret; 30 #ifdef ARM32 31 paddr_t pa = 0; 32 33 switch (args->a1) { 34 case OPTEE_SMC_L2CC_MUTEX_GET_ADDR: 35 ret = tee_get_l2cc_mutex(&pa); 36 reg_pair_from_64(pa, &args->a2, &args->a3); 37 break; 38 case OPTEE_SMC_L2CC_MUTEX_SET_ADDR: 39 pa = reg_pair_to_64(args->a2, args->a3); 40 ret = tee_set_l2cc_mutex(&pa); 41 break; 42 case OPTEE_SMC_L2CC_MUTEX_ENABLE: 43 ret = tee_enable_l2cc_mutex(); 44 break; 45 case OPTEE_SMC_L2CC_MUTEX_DISABLE: 46 ret = tee_disable_l2cc_mutex(); 47 break; 48 default: 49 args->a0 = OPTEE_SMC_RETURN_EBADCMD; 50 return; 51 } 52 #else 53 ret = TEE_ERROR_NOT_SUPPORTED; 54 #endif 55 if (ret == TEE_ERROR_NOT_SUPPORTED) 56 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 57 else if (ret) 58 args->a0 = OPTEE_SMC_RETURN_EBADADDR; 59 else 60 args->a0 = OPTEE_SMC_RETURN_OK; 61 } 62 63 static void tee_entry_exchange_capabilities(struct thread_smc_args *args) 64 { 65 bool dyn_shm_en __maybe_unused = false; 66 67 /* 68 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR. 69 * 70 * The memory mapping of shared memory is defined as normal 71 * shared memory for SMP systems and normal memory for UP 72 * systems. Currently we map all memory as shared in secure 73 * world. 74 * 75 * When translation tables are created with shared bit cleared for 76 * uniprocessor systems we'll need to check 77 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR. 78 */ 79 80 if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) { 81 /* Unknown capability. */ 82 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 83 return; 84 } 85 86 args->a0 = OPTEE_SMC_RETURN_OK; 87 args->a1 = 0; 88 #ifdef CFG_CORE_RESERVED_SHM 89 args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM; 90 #endif 91 #ifdef CFG_VIRTUALIZATION 92 args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION; 93 #endif 94 args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL; 95 96 #if defined(CFG_CORE_DYN_SHM) 97 dyn_shm_en = core_mmu_nsec_ddr_is_defined(); 98 if (dyn_shm_en) 99 args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM; 100 #endif 101 102 DMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis"); 103 } 104 105 static void tee_entry_disable_shm_cache(struct thread_smc_args *args) 106 { 107 uint64_t cookie; 108 109 if (!thread_disable_prealloc_rpc_cache(&cookie)) { 110 args->a0 = OPTEE_SMC_RETURN_EBUSY; 111 return; 112 } 113 114 if (!cookie) { 115 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 116 return; 117 } 118 119 args->a0 = OPTEE_SMC_RETURN_OK; 120 args->a1 = cookie >> 32; 121 args->a2 = cookie; 122 } 123 124 static void tee_entry_enable_shm_cache(struct thread_smc_args *args) 125 { 126 if (thread_enable_prealloc_rpc_cache()) 127 args->a0 = OPTEE_SMC_RETURN_OK; 128 else 129 args->a0 = OPTEE_SMC_RETURN_EBUSY; 130 } 131 132 static void tee_entry_boot_secondary(struct thread_smc_args *args) 133 { 134 #if defined(CFG_BOOT_SECONDARY_REQUEST) 135 if (!boot_core_release(args->a1, (paddr_t)(args->a3))) 136 args->a0 = OPTEE_SMC_RETURN_OK; 137 else 138 args->a0 = OPTEE_SMC_RETURN_EBADCMD; 139 #else 140 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 141 #endif 142 } 143 144 static void tee_entry_get_thread_count(struct thread_smc_args *args) 145 { 146 args->a0 = OPTEE_SMC_RETURN_OK; 147 args->a1 = CFG_NUM_THREADS; 148 } 149 150 #if defined(CFG_VIRTUALIZATION) 151 static void tee_entry_vm_created(struct thread_smc_args *args) 152 { 153 uint16_t guest_id = args->a1; 154 155 /* Only hypervisor can issue this request */ 156 if (args->a7 != HYP_CLNT_ID) { 157 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 158 return; 159 } 160 161 if (virt_guest_created(guest_id)) 162 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 163 else 164 args->a0 = OPTEE_SMC_RETURN_OK; 165 } 166 167 static void tee_entry_vm_destroyed(struct thread_smc_args *args) 168 { 169 uint16_t guest_id = args->a1; 170 171 /* Only hypervisor can issue this request */ 172 if (args->a7 != HYP_CLNT_ID) { 173 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 174 return; 175 } 176 177 if (virt_guest_destroyed(guest_id)) 178 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL; 179 else 180 args->a0 = OPTEE_SMC_RETURN_OK; 181 } 182 #endif 183 184 /* Note: this function is weak to let platforms add special handling */ 185 void __weak tee_entry_fast(struct thread_smc_args *args) 186 { 187 __tee_entry_fast(args); 188 } 189 190 /* 191 * If tee_entry_fast() is overridden, it's still supposed to call this 192 * function. 193 */ 194 void __tee_entry_fast(struct thread_smc_args *args) 195 { 196 switch (args->a0) { 197 198 /* Generic functions */ 199 case OPTEE_SMC_CALLS_COUNT: 200 tee_entry_get_api_call_count(args); 201 break; 202 case OPTEE_SMC_CALLS_UID: 203 tee_entry_get_api_uuid(args); 204 break; 205 case OPTEE_SMC_CALLS_REVISION: 206 tee_entry_get_api_revision(args); 207 break; 208 case OPTEE_SMC_CALL_GET_OS_UUID: 209 tee_entry_get_os_uuid(args); 210 break; 211 case OPTEE_SMC_CALL_GET_OS_REVISION: 212 tee_entry_get_os_revision(args); 213 break; 214 215 /* OP-TEE specific SMC functions */ 216 #ifdef CFG_CORE_RESERVED_SHM 217 case OPTEE_SMC_GET_SHM_CONFIG: 218 tee_entry_get_shm_config(args); 219 break; 220 #endif 221 case OPTEE_SMC_L2CC_MUTEX: 222 tee_entry_fastcall_l2cc_mutex(args); 223 break; 224 case OPTEE_SMC_EXCHANGE_CAPABILITIES: 225 tee_entry_exchange_capabilities(args); 226 break; 227 case OPTEE_SMC_DISABLE_SHM_CACHE: 228 tee_entry_disable_shm_cache(args); 229 break; 230 case OPTEE_SMC_ENABLE_SHM_CACHE: 231 tee_entry_enable_shm_cache(args); 232 break; 233 case OPTEE_SMC_BOOT_SECONDARY: 234 tee_entry_boot_secondary(args); 235 break; 236 case OPTEE_SMC_GET_THREAD_COUNT: 237 tee_entry_get_thread_count(args); 238 break; 239 240 #if defined(CFG_VIRTUALIZATION) 241 case OPTEE_SMC_VM_CREATED: 242 tee_entry_vm_created(args); 243 break; 244 case OPTEE_SMC_VM_DESTROYED: 245 tee_entry_vm_destroyed(args); 246 break; 247 #endif 248 249 default: 250 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; 251 break; 252 } 253 } 254 255 size_t tee_entry_generic_get_api_call_count(void) 256 { 257 /* 258 * All the different calls handled in this file. If the specific 259 * target has additional calls it will call this function and 260 * add the number of calls the target has added. 261 */ 262 size_t ret = 12; 263 264 #if defined(CFG_VIRTUALIZATION) 265 ret += 2; 266 #endif 267 268 return ret; 269 } 270 271 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args) 272 { 273 args->a0 = tee_entry_generic_get_api_call_count(); 274 } 275 276 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args) 277 { 278 args->a0 = OPTEE_MSG_UID_0; 279 args->a1 = OPTEE_MSG_UID_1; 280 args->a2 = OPTEE_MSG_UID_2; 281 args->a3 = OPTEE_MSG_UID_3; 282 } 283 284 void __weak tee_entry_get_api_revision(struct thread_smc_args *args) 285 { 286 args->a0 = OPTEE_MSG_REVISION_MAJOR; 287 args->a1 = OPTEE_MSG_REVISION_MINOR; 288 } 289 290 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args) 291 { 292 args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0; 293 args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1; 294 args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2; 295 args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3; 296 } 297 298 void __weak tee_entry_get_os_revision(struct thread_smc_args *args) 299 { 300 args->a0 = CFG_OPTEE_REVISION_MAJOR; 301 args->a1 = CFG_OPTEE_REVISION_MINOR; 302 args->a2 = TEE_IMPL_GIT_SHA1; 303 } 304