1 /* 2 * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <common/debug.h> 8 #include <common/runtime_svc.h> 9 #include <lib/cpus/errata.h> 10 #include <lib/cpus/wa_cve_2017_5715.h> 11 #include <lib/cpus/wa_cve_2018_3639.h> 12 #include <lib/cpus/wa_cve_2022_23960.h> 13 #include <lib/smccc.h> 14 #include <services/arm_arch_svc.h> 15 #include <smccc_helpers.h> 16 #include <plat/common/platform.h> 17 #include <arch_features.h> 18 #include <arch_helpers.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 21 static int32_t smccc_version(void) 22 { 23 return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION); 24 } 25 26 static int32_t smccc_arch_features(u_register_t arg1) 27 { 28 switch (arg1) { 29 case SMCCC_VERSION: 30 case SMCCC_ARCH_FEATURES: 31 return SMC_ARCH_CALL_SUCCESS; 32 case SMCCC_ARCH_SOC_ID: 33 return plat_is_smccc_feature_available(arg1); 34 #ifdef __aarch64__ 35 /* Workaround checks are currently only implemented for aarch64 */ 36 #if WORKAROUND_CVE_2017_5715 37 case SMCCC_ARCH_WORKAROUND_1: 38 if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES) 39 return 1; 40 return 0; /* ERRATA_APPLIES || ERRATA_MISSING */ 41 #endif 42 43 #if WORKAROUND_CVE_2018_3639 44 case SMCCC_ARCH_WORKAROUND_2: { 45 #if DYNAMIC_WORKAROUND_CVE_2018_3639 46 unsigned long long ssbs; 47 48 /* 49 * Firmware doesn't have to carry out dynamic workaround if the 50 * PE implements architectural Speculation Store Bypass Safe 51 * (SSBS) feature. 52 */ 53 ssbs = (read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SSBS_SHIFT) & 54 ID_AA64PFR1_EL1_SSBS_MASK; 55 56 /* 57 * If architectural SSBS is available on this PE, no firmware 58 * mitigation via SMCCC_ARCH_WORKAROUND_2 is required. 59 */ 60 if (ssbs != SSBS_NOT_IMPLEMENTED) 61 return 1; 62 63 /* 64 * On a platform where at least one CPU requires 65 * dynamic mitigation but others are either unaffected 66 * or permanently mitigated, report the latter as not 67 * needing dynamic mitigation. 68 */ 69 if (wa_cve_2018_3639_get_disable_ptr() == NULL) 70 return 1; 71 /* 72 * If we get here, this CPU requires dynamic mitigation 73 * so report it as such. 74 */ 75 return 0; 76 #else 77 /* Either the CPUs are unaffected or permanently mitigated */ 78 return SMC_ARCH_CALL_NOT_REQUIRED; 79 #endif 80 } 81 #endif 82 83 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715) 84 case SMCCC_ARCH_WORKAROUND_3: 85 /* 86 * SMCCC_ARCH_WORKAROUND_3 should also take into account 87 * CVE-2017-5715 since this SMC can be used instead of 88 * SMCCC_ARCH_WORKAROUND_1. 89 */ 90 if ((check_smccc_arch_wa3_applies() == ERRATA_NOT_APPLIES) && 91 (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)) { 92 return 1; 93 } 94 return 0; /* ERRATA_APPLIES || ERRATA_MISSING */ 95 #endif 96 97 #if ARCH_FEATURE_AVAILABILITY 98 case SMCCC_ARCH_FEATURE_AVAILABILITY: 99 return SMC_ARCH_CALL_SUCCESS; 100 #endif /* ARCH_FEATURE_AVAILABILITY */ 101 102 #endif /* __aarch64__ */ 103 104 /* Fallthrough */ 105 106 default: 107 return SMC_UNK; 108 } 109 } 110 111 /* return soc revision or soc version on success otherwise 112 * return invalid parameter */ 113 static int32_t smccc_arch_id(u_register_t arg1) 114 { 115 if (arg1 == SMCCC_GET_SOC_REVISION) { 116 return plat_get_soc_revision(); 117 } 118 if (arg1 == SMCCC_GET_SOC_VERSION) { 119 return plat_get_soc_version(); 120 } 121 return SMC_ARCH_CALL_INVAL_PARAM; 122 } 123 124 /* 125 * Reads a system register, sanitises its value, and returns a bitmask 126 * representing which feature in that sysreg has been enabled by firmware. The 127 * bitmask is a 1:1 mapping to the register's fields. 128 */ 129 #if ARCH_FEATURE_AVAILABILITY 130 static uintptr_t smccc_arch_feature_availability(u_register_t reg, 131 void *handle, 132 u_register_t flags) 133 { 134 cpu_context_t *caller_context; 135 per_world_context_t *caller_per_world_context; 136 el3_state_t *state; 137 u_register_t bitmask, check; 138 139 /* check the caller security state */ 140 if (is_caller_secure(flags)) { 141 caller_context = cm_get_context(SECURE); 142 caller_per_world_context = &per_world_context[CPU_CONTEXT_SECURE]; 143 } else if (is_caller_non_secure(flags)) { 144 caller_context = cm_get_context(NON_SECURE); 145 caller_per_world_context = &per_world_context[CPU_CONTEXT_NS]; 146 } else { 147 #if ENABLE_RME 148 caller_context = cm_get_context(REALM); 149 caller_per_world_context = &per_world_context[CPU_CONTEXT_REALM]; 150 #else /* !ENABLE_RME */ 151 assert(0); /* shouldn't be possible */ 152 #endif /* ENABLE_RME */ 153 } 154 155 state = get_el3state_ctx(caller_context); 156 157 switch (reg) { 158 case SCR_EL3_OPCODE: 159 bitmask = read_ctx_reg(state, CTX_SCR_EL3); 160 bitmask &= ~SCR_EL3_IGNORED; 161 check = bitmask & ~SCR_EL3_FEATS; 162 bitmask &= SCR_EL3_FEATS; 163 bitmask ^= SCR_EL3_FLIPPED; 164 /* will only report 0 if neither is implemented */ 165 if (is_feat_rng_trap_supported() || is_feat_rng_present()) 166 bitmask |= SCR_TRNDR_BIT; 167 break; 168 case CPTR_EL3_OPCODE: 169 bitmask = caller_per_world_context->ctx_cptr_el3; 170 check = bitmask & ~CPTR_EL3_FEATS; 171 bitmask &= CPTR_EL3_FEATS; 172 bitmask ^= CPTR_EL3_FLIPPED; 173 break; 174 case MDCR_EL3_OPCODE: 175 bitmask = read_ctx_reg(state, CTX_MDCR_EL3); 176 bitmask &= ~MDCR_EL3_IGNORED; 177 check = bitmask & ~MDCR_EL3_FEATS; 178 bitmask &= MDCR_EL3_FEATS; 179 bitmask ^= MDCR_EL3_FLIPPED; 180 break; 181 #if ENABLE_FEAT_MPAM 182 case MPAM3_EL3_OPCODE: 183 bitmask = caller_per_world_context->ctx_mpam3_el3; 184 bitmask &= ~MPAM3_EL3_IGNORED; 185 check = bitmask & ~MPAM3_EL3_FEATS; 186 bitmask &= MPAM3_EL3_FEATS; 187 bitmask ^= MPAM3_EL3_FLIPPED; 188 break; 189 #endif /* ENABLE_FEAT_MPAM */ 190 default: 191 SMC_RET2(handle, SMC_INVALID_PARAM, ULL(0)); 192 } 193 194 /* 195 * failing this means that the requested register has a bit set that 196 * hasn't been declared as a known feature bit or an ignore bit. This is 197 * likely to happen when support for a new feature is added but the 198 * bitmask macros are not updated. 199 */ 200 if (ENABLE_ASSERTIONS && check != 0) { 201 ERROR("Unexpected bits 0x%lx were set in register %lx!\n", check, reg); 202 assert(0); 203 } 204 205 SMC_RET2(handle, SMC_ARCH_CALL_SUCCESS, bitmask); 206 } 207 #endif /* ARCH_FEATURE_AVAILABILITY */ 208 209 /* 210 * Top-level Arm Architectural Service SMC handler. 211 */ 212 static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid, 213 u_register_t x1, 214 u_register_t x2, 215 u_register_t x3, 216 u_register_t x4, 217 void *cookie, 218 void *handle, 219 u_register_t flags) 220 { 221 switch (smc_fid) { 222 case SMCCC_VERSION: 223 SMC_RET1(handle, smccc_version()); 224 case SMCCC_ARCH_FEATURES: 225 SMC_RET1(handle, smccc_arch_features(x1)); 226 case SMCCC_ARCH_SOC_ID: 227 SMC_RET1(handle, smccc_arch_id(x1)); 228 #ifdef __aarch64__ 229 #if WORKAROUND_CVE_2017_5715 230 case SMCCC_ARCH_WORKAROUND_1: 231 /* 232 * The workaround has already been applied on affected PEs 233 * during entry to EL3. On unaffected PEs, this function 234 * has no effect. 235 */ 236 SMC_RET0(handle); 237 #endif 238 #if WORKAROUND_CVE_2018_3639 239 case SMCCC_ARCH_WORKAROUND_2: 240 /* 241 * The workaround has already been applied on affected PEs 242 * requiring dynamic mitigation during entry to EL3. 243 * On unaffected or statically mitigated PEs, this function 244 * has no effect. 245 */ 246 SMC_RET0(handle); 247 #endif 248 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715) 249 case SMCCC_ARCH_WORKAROUND_3: 250 /* 251 * The workaround has already been applied on affected PEs 252 * during entry to EL3. On unaffected PEs, this function 253 * has no effect. 254 */ 255 SMC_RET0(handle); 256 #endif 257 #endif /* __aarch64__ */ 258 #if ARCH_FEATURE_AVAILABILITY 259 /* return is 64 bit so only reply on SMC64 requests */ 260 case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT): 261 return smccc_arch_feature_availability(x1, handle, flags); 262 #endif /* ARCH_FEATURE_AVAILABILITY */ 263 default: 264 WARN("Unimplemented Arm Architecture Service Call: 0x%x \n", 265 smc_fid); 266 SMC_RET1(handle, SMC_UNK); 267 } 268 } 269 270 /* Register Standard Service Calls as runtime service */ 271 DECLARE_RT_SVC( 272 arm_arch_svc, 273 OEN_ARM_START, 274 OEN_ARM_END, 275 SMC_TYPE_FAST, 276 NULL, 277 arm_arch_svc_smc_handler 278 ); 279