1/* 2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <assert_macros.S> 10#include <common/bl_common.h> 11#include <common/debug.h> 12#include <cpu_macros.S> 13#include <lib/cpus/cpu_ops.h> 14#include <lib/cpus/errata.h> 15#include <lib/el3_runtime/cpu_data.h> 16 17 /* Reset fn is needed in BL at reset vector */ 18#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \ 19 (defined(IMAGE_BL2) && RESET_TO_BL2) 20 /* 21 * The reset handler common to all platforms. After a matching 22 * cpu_ops structure entry is found, the correponding reset_handler 23 * in the cpu_ops is invoked. 24 * Clobbers: x0 - x19, x30 25 */ 26 .globl reset_handler 27func reset_handler 28 mov x19, x30 29 30 /* The plat_reset_handler can clobber x0 - x18, x30 */ 31 bl plat_reset_handler 32 33 /* Get the matching cpu_ops pointer */ 34 bl get_cpu_ops_ptr 35 36#if ENABLE_ASSERTIONS 37 /* 38 * Assert if invalid cpu_ops obtained. If this is not valid, it may 39 * suggest that the proper CPU file hasn't been included. 40 */ 41 cmp x0, #0 42 ASM_ASSERT(ne) 43#endif 44 45 /* Get the cpu_ops reset handler */ 46 ldr x2, [x0, #CPU_RESET_FUNC] 47 mov x30, x19 48 cbz x2, 1f 49 50 /* The cpu_ops reset handler can clobber x0 - x19, x30 */ 51 br x2 521: 53 ret 54endfunc reset_handler 55 56#endif 57 58#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ 59 /* 60 * void prepare_cpu_pwr_dwn(unsigned int power_level) 61 * 62 * Prepare CPU power down function for all platforms. The function takes 63 * a domain level to be powered down as its parameter. After the cpu_ops 64 * pointer is retrieved from cpu_data, the handler for requested power 65 * level is called. 66 */ 67 .globl prepare_cpu_pwr_dwn 68func prepare_cpu_pwr_dwn 69 /* 70 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the 71 * power down handler for the last power level 72 */ 73 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1) 74 cmp x0, x2 75 csel x2, x2, x0, hi 76 77 mrs x1, tpidr_el3 78 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 79#if ENABLE_ASSERTIONS 80 cmp x0, #0 81 ASM_ASSERT(ne) 82#endif 83 84 /* Get the appropriate power down handler */ 85 mov x1, #CPU_PWR_DWN_OPS 86 add x1, x1, x2, lsl #3 87 ldr x1, [x0, x1] 88#if ENABLE_ASSERTIONS 89 cmp x1, #0 90 ASM_ASSERT(ne) 91#endif 92 br x1 93endfunc prepare_cpu_pwr_dwn 94 95 96 /* 97 * Initializes the cpu_ops_ptr if not already initialized 98 * in cpu_data. This can be called without a runtime stack, but may 99 * only be called after the MMU is enabled. 100 * clobbers: x0 - x6, x10 101 */ 102 .globl init_cpu_ops 103func init_cpu_ops 104 mrs x6, tpidr_el3 105 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR] 106 cbnz x0, 1f 107 mov x10, x30 108 bl get_cpu_ops_ptr 109 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]! 110 mov x30, x10 1111: 112 ret 113endfunc init_cpu_ops 114#endif /* IMAGE_BL31 */ 115 116#if defined(IMAGE_BL31) && CRASH_REPORTING 117 /* 118 * The cpu specific registers which need to be reported in a crash 119 * are reported via cpu_ops cpu_reg_dump function. After a matching 120 * cpu_ops structure entry is found, the correponding cpu_reg_dump 121 * in the cpu_ops is invoked. 122 */ 123 .globl do_cpu_reg_dump 124func do_cpu_reg_dump 125 mov x16, x30 126 127 /* Get the matching cpu_ops pointer */ 128 bl get_cpu_ops_ptr 129 cbz x0, 1f 130 131 /* Get the cpu_ops cpu_reg_dump */ 132 ldr x2, [x0, #CPU_REG_DUMP] 133 cbz x2, 1f 134 blr x2 1351: 136 mov x30, x16 137 ret 138endfunc do_cpu_reg_dump 139#endif 140 141 /* 142 * The below function returns the cpu_ops structure matching the 143 * midr of the core. It reads the MIDR_EL1 and finds the matching 144 * entry in cpu_ops entries. Only the implementation and part number 145 * are used to match the entries. 146 * 147 * If cpu_ops for the MIDR_EL1 cannot be found and 148 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a 149 * default cpu_ops with an MIDR value of 0. 150 * (Implementation number 0x0 should be reserved for software use 151 * and therefore no clashes should happen with that default value). 152 * 153 * Return : 154 * x0 - The matching cpu_ops pointer on Success 155 * x0 - 0 on failure. 156 * Clobbers : x0 - x5 157 */ 158 .globl get_cpu_ops_ptr 159func get_cpu_ops_ptr 160 /* Read the MIDR_EL1 */ 161 mrs x2, midr_el1 162 mov_imm x3, CPU_IMPL_PN_MASK 163 164 /* Retain only the implementation and part number using mask */ 165 and w2, w2, w3 166 167 /* Get the cpu_ops end location */ 168 adr_l x5, (__CPU_OPS_END__ + CPU_MIDR) 169 170 /* Initialize the return parameter */ 171 mov x0, #0 1721: 173 /* Get the cpu_ops start location */ 174 adr_l x4, (__CPU_OPS_START__ + CPU_MIDR) 175 1762: 177 /* Check if we have reached end of list */ 178 cmp x4, x5 179 b.eq search_def_ptr 180 181 /* load the midr from the cpu_ops */ 182 ldr x1, [x4], #CPU_OPS_SIZE 183 and w1, w1, w3 184 185 /* Check if midr matches to midr of this core */ 186 cmp w1, w2 187 b.ne 2b 188 189 /* Subtract the increment and offset to get the cpu-ops pointer */ 190 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR) 191#if ENABLE_ASSERTIONS 192 cmp x0, #0 193 ASM_ASSERT(ne) 194#endif 195#ifdef SUPPORT_UNKNOWN_MPID 196 cbnz x2, exit_mpid_found 197 /* Mark the unsupported MPID flag */ 198 adrp x1, unsupported_mpid_flag 199 add x1, x1, :lo12:unsupported_mpid_flag 200 str w2, [x1] 201exit_mpid_found: 202#endif 203 ret 204 205 /* 206 * Search again for a default pointer (MIDR = 0x0) 207 * or return error if already searched. 208 */ 209search_def_ptr: 210#ifdef SUPPORT_UNKNOWN_MPID 211 cbz x2, error_exit 212 mov x2, #0 213 b 1b 214error_exit: 215#endif 216 ret 217endfunc get_cpu_ops_ptr 218 219/* 220 * Extract CPU revision and variant, and combine them into a single numeric for 221 * easier comparison. 222 */ 223 .globl cpu_get_rev_var 224func cpu_get_rev_var 225 mrs x1, midr_el1 226 227 /* 228 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them 229 * as variant[7:4] and revision[3:0] of x0. 230 * 231 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then 232 * extract x1[3:0] into x0[3:0] retaining other bits. 233 */ 234 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) 235 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS 236 ret 237endfunc cpu_get_rev_var 238 239/* 240 * int check_wa_cve_2017_5715(void); 241 * 242 * This function returns: 243 * - ERRATA_APPLIES when firmware mitigation is required. 244 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required. 245 * - ERRATA_MISSING when firmware mitigation would be required but 246 * is not compiled in. 247 * 248 * NOTE: Must be called only after cpu_ops have been initialized 249 * in per-CPU data. 250 */ 251 .globl check_wa_cve_2017_5715 252func check_wa_cve_2017_5715 253 mrs x0, tpidr_el3 254#if ENABLE_ASSERTIONS 255 cmp x0, #0 256 ASM_ASSERT(ne) 257#endif 258 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 259#if ENABLE_ASSERTIONS 260 cmp x0, #0 261 ASM_ASSERT(ne) 262#endif 263 ldr x0, [x0, #CPU_EXTRA1_FUNC] 264 /* 265 * If the reserved function pointer is NULL, this CPU 266 * is unaffected by CVE-2017-5715 so bail out. 267 */ 268 cmp x0, #CPU_NO_EXTRA1_FUNC 269 beq 1f 270 br x0 2711: 272 mov x0, #ERRATA_NOT_APPLIES 273 ret 274endfunc check_wa_cve_2017_5715 275 276/* 277 * int check_wa_cve_2024_7881(void); 278 * 279 * This function returns: 280 * - ERRATA_APPLIES when firmware mitigation is required. 281 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required. 282 * - ERRATA_MISSING when firmware mitigation would be required but 283 * is not compiled in. 284 * 285 * NOTE: Must be called only after cpu_ops have been initialized 286 * in per-CPU data. 287 */ 288.globl check_wa_cve_2024_7881 289func check_wa_cve_2024_7881 290 mrs x0, tpidr_el3 291#if ENABLE_ASSERTIONS 292 cmp x0, #0 293 ASM_ASSERT(ne) 294#endif 295 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 296#if ENABLE_ASSERTIONS 297 cmp x0, #0 298 ASM_ASSERT(ne) 299#endif 300 ldr x0, [x0, #CPU_EXTRA4_FUNC] 301 /* 302 * If the reserved function pointer is NULL, this CPU 303 * is unaffected by CVE-2024-7881 so bail out. 304 */ 305 cmp x0, #CPU_NO_EXTRA4_FUNC 306 beq 1f 307 br x0 3081: 309 mov x0, #ERRATA_NOT_APPLIES 310 ret 311endfunc check_wa_cve_2024_7881 312 313/* 314 * void *wa_cve_2018_3639_get_disable_ptr(void); 315 * 316 * Returns a function pointer which is used to disable mitigation 317 * for CVE-2018-3639. 318 * The function pointer is only returned on cores that employ 319 * dynamic mitigation. If the core uses static mitigation or is 320 * unaffected by CVE-2018-3639 this function returns NULL. 321 * 322 * NOTE: Must be called only after cpu_ops have been initialized 323 * in per-CPU data. 324 */ 325 .globl wa_cve_2018_3639_get_disable_ptr 326func wa_cve_2018_3639_get_disable_ptr 327 mrs x0, tpidr_el3 328#if ENABLE_ASSERTIONS 329 cmp x0, #0 330 ASM_ASSERT(ne) 331#endif 332 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 333#if ENABLE_ASSERTIONS 334 cmp x0, #0 335 ASM_ASSERT(ne) 336#endif 337 ldr x0, [x0, #CPU_EXTRA2_FUNC] 338 ret 339endfunc wa_cve_2018_3639_get_disable_ptr 340 341/* 342 * int check_smccc_arch_wa3_applies(void); 343 * 344 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate 345 * CVE-2022-23960 for this CPU. It returns: 346 * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate 347 * the CVE. 348 * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to 349 * mitigate the CVE. 350 * 351 * NOTE: Must be called only after cpu_ops have been initialized 352 * in per-CPU data. 353 */ 354 .globl check_smccc_arch_wa3_applies 355func check_smccc_arch_wa3_applies 356 mrs x0, tpidr_el3 357#if ENABLE_ASSERTIONS 358 cmp x0, #0 359 ASM_ASSERT(ne) 360#endif 361 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 362#if ENABLE_ASSERTIONS 363 cmp x0, #0 364 ASM_ASSERT(ne) 365#endif 366 ldr x0, [x0, #CPU_EXTRA3_FUNC] 367 /* 368 * If the reserved function pointer is NULL, this CPU 369 * is unaffected by CVE-2022-23960 so bail out. 370 */ 371 cmp x0, #CPU_NO_EXTRA3_FUNC 372 beq 1f 373 br x0 3741: 375 mov x0, #ERRATA_NOT_APPLIES 376 ret 377endfunc check_smccc_arch_wa3_applies 378