1 /* 2 * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <common/debug.h> 8 #include <common/runtime_svc.h> 9 #include <lib/cpus/errata.h> 10 #include <lib/smccc.h> 11 #include <services/arm_arch_svc.h> 12 #include <smccc_helpers.h> 13 #include <plat/common/platform.h> 14 #include <arch_features.h> 15 #include <arch_helpers.h> 16 #include <lib/el3_runtime/context_mgmt.h> 17 18 static int32_t smccc_version(void) 19 { 20 return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION); 21 } 22 23 static int32_t smccc_arch_features(u_register_t arg1) 24 { 25 switch (arg1) { 26 case SMCCC_VERSION: 27 case SMCCC_ARCH_FEATURES: 28 return SMC_ARCH_CALL_SUCCESS; 29 case SMCCC_ARCH_SOC_ID: 30 return plat_is_smccc_feature_available(arg1); 31 #ifdef __aarch64__ 32 /* Workaround checks are currently only implemented for aarch64 */ 33 #if WORKAROUND_CVE_2017_5715 34 case SMCCC_ARCH_WORKAROUND_1: 35 if (check_erratum_applies(CVE(2017, 5715)) 36 == ERRATA_NOT_APPLIES) { 37 return 1; 38 } 39 40 return 0; /* ERRATA_APPLIES || ERRATA_MISSING */ 41 #endif 42 43 #if WORKAROUND_CVE_2018_3639 44 case SMCCC_ARCH_WORKAROUND_2: { 45 #if DYNAMIC_WORKAROUND_CVE_2018_3639 46 unsigned long long ssbs; 47 48 /* 49 * Firmware doesn't have to carry out dynamic workaround if the 50 * PE implements architectural Speculation Store Bypass Safe 51 * (SSBS) feature. 52 */ 53 ssbs = (read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SSBS_SHIFT) & 54 ID_AA64PFR1_EL1_SSBS_MASK; 55 56 /* 57 * If architectural SSBS is available on this PE, no firmware 58 * mitigation via SMCCC_ARCH_WORKAROUND_2 is required. 59 */ 60 if (ssbs != SSBS_NOT_IMPLEMENTED) 61 return 1; 62 63 /* 64 * On a platform where at least one CPU requires 65 * dynamic mitigation but others are either unaffected 66 * or permanently mitigated, report the latter as not 67 * needing dynamic mitigation. 68 */ 69 if (check_erratum_applies(ERRATUM(ARCH_WORKAROUND_2)) 70 == ERRATA_NOT_APPLIES) 71 return 1; 72 73 /* 74 * If we get here, this CPU requires dynamic mitigation 75 * so report it as such. 76 */ 77 return 0; 78 #else 79 /* Either the CPUs are unaffected or permanently mitigated */ 80 return SMC_ARCH_CALL_NOT_REQUIRED; 81 #endif 82 } 83 #endif 84 85 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715) 86 case SMCCC_ARCH_WORKAROUND_3: 87 /* 88 * SMCCC_ARCH_WORKAROUND_3 should also take into account 89 * CVE-2017-5715 since this SMC can be used instead of 90 * SMCCC_ARCH_WORKAROUND_1. 91 */ 92 if ((check_erratum_applies(ERRATUM(ARCH_WORKAROUND_3)) 93 == ERRATA_NOT_APPLIES) && 94 (check_erratum_applies(CVE(2017, 5715)) 95 == ERRATA_NOT_APPLIES)) { 96 return 1; 97 } 98 99 return 0; /* ERRATA_APPLIES || ERRATA_MISSING */ 100 #endif 101 102 #if ARCH_FEATURE_AVAILABILITY 103 case SMCCC_ARCH_FEATURE_AVAILABILITY: 104 return SMC_ARCH_CALL_SUCCESS; 105 #endif /* ARCH_FEATURE_AVAILABILITY */ 106 107 #if WORKAROUND_CVE_2024_7881 108 case SMCCC_ARCH_WORKAROUND_4: 109 if (check_erratum_applies(CVE(2024, 7881)) != ERRATA_APPLIES) { 110 return SMC_ARCH_CALL_NOT_SUPPORTED; 111 } 112 return 0; 113 #endif /* WORKAROUND_CVE_2024_7881 */ 114 115 #endif /* __aarch64__ */ 116 117 /* Fallthrough */ 118 119 default: 120 return SMC_UNK; 121 } 122 } 123 124 /* return soc revision or soc version on success otherwise 125 * return invalid parameter */ 126 static int32_t smccc_arch_id(u_register_t arg1) 127 { 128 if (arg1 == SMCCC_GET_SOC_REVISION) { 129 return plat_get_soc_revision(); 130 } 131 if (arg1 == SMCCC_GET_SOC_VERSION) { 132 return plat_get_soc_version(); 133 } 134 return SMC_ARCH_CALL_INVAL_PARAM; 135 } 136 137 /* 138 * Reads a system register, sanitises its value, and returns a bitmask 139 * representing which feature in that sysreg has been enabled by firmware. The 140 * bitmask is a 1:1 mapping to the register's fields. 141 */ 142 #if ARCH_FEATURE_AVAILABILITY 143 static uintptr_t smccc_arch_feature_availability(u_register_t reg, 144 void *handle, 145 u_register_t flags) 146 { 147 cpu_context_t *caller_context; 148 per_world_context_t *caller_per_world_context; 149 el3_state_t *state; 150 u_register_t bitmask, check; 151 152 /* check the caller security state */ 153 if (is_caller_secure(flags)) { 154 caller_context = cm_get_context(SECURE); 155 caller_per_world_context = &per_world_context[CPU_CONTEXT_SECURE]; 156 } else if (is_caller_non_secure(flags)) { 157 caller_context = cm_get_context(NON_SECURE); 158 caller_per_world_context = &per_world_context[CPU_CONTEXT_NS]; 159 } else { 160 #if ENABLE_RME 161 caller_context = cm_get_context(REALM); 162 caller_per_world_context = &per_world_context[CPU_CONTEXT_REALM]; 163 #else /* !ENABLE_RME */ 164 assert(0); /* shouldn't be possible */ 165 #endif /* ENABLE_RME */ 166 } 167 168 state = get_el3state_ctx(caller_context); 169 170 switch (reg) { 171 case SCR_EL3_OPCODE: 172 bitmask = read_ctx_reg(state, CTX_SCR_EL3); 173 bitmask &= ~SCR_EL3_IGNORED; 174 check = bitmask & ~SCR_EL3_FEATS; 175 bitmask &= SCR_EL3_FEATS; 176 bitmask ^= SCR_EL3_FLIPPED; 177 /* will only report 0 if neither is implemented */ 178 if (is_feat_rng_trap_supported() || is_feat_rng_present()) 179 bitmask |= SCR_TRNDR_BIT; 180 break; 181 case CPTR_EL3_OPCODE: 182 bitmask = caller_per_world_context->ctx_cptr_el3; 183 check = bitmask & ~CPTR_EL3_FEATS; 184 bitmask &= CPTR_EL3_FEATS; 185 bitmask ^= CPTR_EL3_FLIPPED; 186 break; 187 case MDCR_EL3_OPCODE: 188 bitmask = read_ctx_reg(state, CTX_MDCR_EL3); 189 bitmask &= ~MDCR_EL3_IGNORED; 190 check = bitmask & ~MDCR_EL3_FEATS; 191 bitmask &= MDCR_EL3_FEATS; 192 bitmask ^= MDCR_EL3_FLIPPED; 193 break; 194 #if ENABLE_FEAT_MPAM 195 case MPAM3_EL3_OPCODE: 196 bitmask = caller_per_world_context->ctx_mpam3_el3; 197 bitmask &= ~MPAM3_EL3_IGNORED; 198 check = bitmask & ~MPAM3_EL3_FEATS; 199 bitmask &= MPAM3_EL3_FEATS; 200 bitmask ^= MPAM3_EL3_FLIPPED; 201 break; 202 #endif /* ENABLE_FEAT_MPAM */ 203 default: 204 SMC_RET2(handle, SMC_INVALID_PARAM, ULL(0)); 205 } 206 207 /* 208 * failing this means that the requested register has a bit set that 209 * hasn't been declared as a known feature bit or an ignore bit. This is 210 * likely to happen when support for a new feature is added but the 211 * bitmask macros are not updated. 212 */ 213 if (ENABLE_ASSERTIONS && check != 0) { 214 ERROR("Unexpected bits 0x%lx were set in register %lx!\n", check, reg); 215 assert(0); 216 } 217 218 SMC_RET2(handle, SMC_ARCH_CALL_SUCCESS, bitmask); 219 } 220 #endif /* ARCH_FEATURE_AVAILABILITY */ 221 222 /* 223 * Top-level Arm Architectural Service SMC handler. 224 */ 225 static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid, 226 u_register_t x1, 227 u_register_t x2, 228 u_register_t x3, 229 u_register_t x4, 230 void *cookie, 231 void *handle, 232 u_register_t flags) 233 { 234 switch (smc_fid) { 235 case SMCCC_VERSION: 236 SMC_RET1(handle, smccc_version()); 237 case SMCCC_ARCH_FEATURES: 238 SMC_RET1(handle, smccc_arch_features(x1)); 239 case SMCCC_ARCH_SOC_ID: 240 SMC_RET1(handle, smccc_arch_id(x1)); 241 #ifdef __aarch64__ 242 #if WORKAROUND_CVE_2017_5715 243 case SMCCC_ARCH_WORKAROUND_1: 244 /* 245 * The workaround has already been applied on affected PEs 246 * during entry to EL3. On unaffected PEs, this function 247 * has no effect. 248 */ 249 SMC_RET0(handle); 250 #endif 251 #if WORKAROUND_CVE_2018_3639 252 case SMCCC_ARCH_WORKAROUND_2: 253 /* 254 * The workaround has already been applied on affected PEs 255 * requiring dynamic mitigation during entry to EL3. 256 * On unaffected or statically mitigated PEs, this function 257 * has no effect. 258 */ 259 SMC_RET0(handle); 260 #endif 261 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715) 262 case SMCCC_ARCH_WORKAROUND_3: 263 /* 264 * The workaround has already been applied on affected PEs 265 * during entry to EL3. On unaffected PEs, this function 266 * has no effect. 267 */ 268 SMC_RET0(handle); 269 #endif 270 #if WORKAROUND_CVE_2024_7881 271 case SMCCC_ARCH_WORKAROUND_4: 272 /* 273 * The workaround has already been applied on affected PEs 274 * during cold boot. This function has no effect whether PE is 275 * affected or not. 276 */ 277 SMC_RET0(handle); 278 #endif /* WORKAROUND_CVE_2024_7881 */ 279 #endif /* __aarch64__ */ 280 #if ARCH_FEATURE_AVAILABILITY 281 /* return is 64 bit so only reply on SMC64 requests */ 282 case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT): 283 return smccc_arch_feature_availability(x1, handle, flags); 284 #endif /* ARCH_FEATURE_AVAILABILITY */ 285 default: 286 WARN("Unimplemented Arm Architecture Service Call: 0x%x \n", 287 smc_fid); 288 SMC_RET1(handle, SMC_UNK); 289 } 290 } 291 292 /* Register Standard Service Calls as runtime service */ 293 DECLARE_RT_SVC( 294 arm_arch_svc, 295 OEN_ARM_START, 296 OEN_ARM_END, 297 SMC_TYPE_FAST, 298 NULL, 299 arm_arch_svc_smc_handler 300 ); 301