1/* 2 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <assert_macros.S> 10#include <common/bl_common.h> 11#include <common/debug.h> 12#include <cpu_macros.S> 13#include <lib/cpus/errata_report.h> 14#include <lib/el3_runtime/cpu_data.h> 15 16 /* Reset fn is needed in BL at reset vector */ 17#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3) 18 /* 19 * The reset handler common to all platforms. After a matching 20 * cpu_ops structure entry is found, the correponding reset_handler 21 * in the cpu_ops is invoked. 22 * Clobbers: x0 - x19, x30 23 */ 24 .globl reset_handler 25func reset_handler 26 mov x19, x30 27 28 /* The plat_reset_handler can clobber x0 - x18, x30 */ 29 bl plat_reset_handler 30 31 /* Get the matching cpu_ops pointer */ 32 bl get_cpu_ops_ptr 33 34 /* Get the cpu_ops reset handler */ 35 ldr x2, [x0, #CPU_RESET_FUNC] 36 mov x30, x19 37 cbz x2, 1f 38 39 /* The cpu_ops reset handler can clobber x0 - x19, x30 */ 40 br x2 411: 42 ret 43endfunc reset_handler 44 45#endif 46 47#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ 48 /* 49 * void prepare_cpu_pwr_dwn(unsigned int power_level) 50 * 51 * Prepare CPU power down function for all platforms. The function takes 52 * a domain level to be powered down as its parameter. After the cpu_ops 53 * pointer is retrieved from cpu_data, the handler for requested power 54 * level is called. 55 */ 56 .globl prepare_cpu_pwr_dwn 57func prepare_cpu_pwr_dwn 58 /* 59 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the 60 * power down handler for the last power level 61 */ 62 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1) 63 cmp x0, x2 64 csel x2, x2, x0, hi 65 66 mrs x1, tpidr_el3 67 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 68#if ENABLE_ASSERTIONS 69 cmp x0, #0 70 ASM_ASSERT(ne) 71#endif 72 73 /* Get the appropriate power down handler */ 74 mov x1, #CPU_PWR_DWN_OPS 75 add x1, x1, x2, lsl #3 76 ldr x1, [x0, x1] 77#if ENABLE_ASSERTIONS 78 cmp x1, #0 79 ASM_ASSERT(ne) 80#endif 81 br x1 82endfunc prepare_cpu_pwr_dwn 83 84 85 /* 86 * Initializes the cpu_ops_ptr if not already initialized 87 * in cpu_data. This can be called without a runtime stack, but may 88 * only be called after the MMU is enabled. 89 * clobbers: x0 - x6, x10 90 */ 91 .globl init_cpu_ops 92func init_cpu_ops 93 mrs x6, tpidr_el3 94 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR] 95 cbnz x0, 1f 96 mov x10, x30 97 bl get_cpu_ops_ptr 98 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]! 99 mov x30, x10 1001: 101 ret 102endfunc init_cpu_ops 103#endif /* IMAGE_BL31 */ 104 105#if defined(IMAGE_BL31) && CRASH_REPORTING 106 /* 107 * The cpu specific registers which need to be reported in a crash 108 * are reported via cpu_ops cpu_reg_dump function. After a matching 109 * cpu_ops structure entry is found, the correponding cpu_reg_dump 110 * in the cpu_ops is invoked. 111 */ 112 .globl do_cpu_reg_dump 113func do_cpu_reg_dump 114 mov x16, x30 115 116 /* Get the matching cpu_ops pointer */ 117 bl get_cpu_ops_ptr 118 cbz x0, 1f 119 120 /* Get the cpu_ops cpu_reg_dump */ 121 ldr x2, [x0, #CPU_REG_DUMP] 122 cbz x2, 1f 123 blr x2 1241: 125 mov x30, x16 126 ret 127endfunc do_cpu_reg_dump 128#endif 129 130 /* 131 * The below function returns the cpu_ops structure matching the 132 * midr of the core. It reads the MIDR_EL1 and finds the matching 133 * entry in cpu_ops entries. Only the implementation and part number 134 * are used to match the entries. 135 * 136 * If cpu_ops for the MIDR_EL1 cannot be found and 137 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a 138 * default cpu_ops with an MIDR value of 0. 139 * (Implementation number 0x0 should be reserved for software use 140 * and therefore no clashes should happen with that default value). 141 * 142 * Return : 143 * x0 - The matching cpu_ops pointer on Success 144 * x0 - 0 on failure. 145 * Clobbers : x0 - x5 146 */ 147 .globl get_cpu_ops_ptr 148func get_cpu_ops_ptr 149 /* Read the MIDR_EL1 */ 150 mrs x2, midr_el1 151 mov_imm x3, CPU_IMPL_PN_MASK 152 153 /* Retain only the implementation and part number using mask */ 154 and w2, w2, w3 155 156 /* Get the cpu_ops end location */ 157 adr x5, (__CPU_OPS_END__ + CPU_MIDR) 158 159 /* Initialize the return parameter */ 160 mov x0, #0 1611: 162 /* Get the cpu_ops start location */ 163 adr x4, (__CPU_OPS_START__ + CPU_MIDR) 164 1652: 166 /* Check if we have reached end of list */ 167 cmp x4, x5 168 b.eq search_def_ptr 169 170 /* load the midr from the cpu_ops */ 171 ldr x1, [x4], #CPU_OPS_SIZE 172 and w1, w1, w3 173 174 /* Check if midr matches to midr of this core */ 175 cmp w1, w2 176 b.ne 2b 177 178 /* Subtract the increment and offset to get the cpu-ops pointer */ 179 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR) 180#if ENABLE_ASSERTIONS 181 cmp x0, #0 182 ASM_ASSERT(ne) 183#endif 184#ifdef SUPPORT_UNKNOWN_MPID 185 cbnz x2, exit_mpid_found 186 /* Mark the unsupported MPID flag */ 187 adrp x1, unsupported_mpid_flag 188 add x1, x1, :lo12:unsupported_mpid_flag 189 str w2, [x1] 190exit_mpid_found: 191#endif 192 ret 193 194 /* 195 * Search again for a default pointer (MIDR = 0x0) 196 * or return error if already searched. 197 */ 198search_def_ptr: 199#ifdef SUPPORT_UNKNOWN_MPID 200 cbz x2, error_exit 201 mov x2, #0 202 b 1b 203error_exit: 204#endif 205 ret 206endfunc get_cpu_ops_ptr 207 208/* 209 * Extract CPU revision and variant, and combine them into a single numeric for 210 * easier comparison. 211 */ 212 .globl cpu_get_rev_var 213func cpu_get_rev_var 214 mrs x1, midr_el1 215 216 /* 217 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them 218 * as variant[7:4] and revision[3:0] of x0. 219 * 220 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then 221 * extract x1[3:0] into x0[3:0] retaining other bits. 222 */ 223 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) 224 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS 225 ret 226endfunc cpu_get_rev_var 227 228/* 229 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata 230 * application purposes. If the revision-variant is less than or same as a given 231 * value, indicates that errata applies; otherwise not. 232 * 233 * Shall clobber: x0-x3 234 */ 235 .globl cpu_rev_var_ls 236func cpu_rev_var_ls 237 mov x2, #ERRATA_APPLIES 238 mov x3, #ERRATA_NOT_APPLIES 239 cmp x0, x1 240 csel x0, x2, x3, ls 241 ret 242endfunc cpu_rev_var_ls 243 244/* 245 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata 246 * application purposes. If the revision-variant is higher than or same as a 247 * given value, indicates that errata applies; otherwise not. 248 * 249 * Shall clobber: x0-x3 250 */ 251 .globl cpu_rev_var_hs 252func cpu_rev_var_hs 253 mov x2, #ERRATA_APPLIES 254 mov x3, #ERRATA_NOT_APPLIES 255 cmp x0, x1 256 csel x0, x2, x3, hs 257 ret 258endfunc cpu_rev_var_hs 259 260/* 261 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata 262 * application purposes. If the revision-variant is between or includes the given 263 * values, this indicates that errata applies; otherwise not. 264 * 265 * Shall clobber: x0-x4 266 */ 267 .globl cpu_rev_var_range 268func cpu_rev_var_range 269 mov x3, #ERRATA_APPLIES 270 mov x4, #ERRATA_NOT_APPLIES 271 cmp x0, x1 272 csel x1, x3, x4, hs 273 cbz x1, 1f 274 cmp x0, x2 275 csel x1, x3, x4, ls 2761: 277 mov x0, x1 278 ret 279endfunc cpu_rev_var_range 280 281#if REPORT_ERRATA 282/* 283 * void print_errata_status(void); 284 * 285 * Function to print errata status for CPUs of its class. Must be called only: 286 * 287 * - with MMU and data caches are enabled; 288 * - after cpu_ops have been initialized in per-CPU data. 289 */ 290 .globl print_errata_status 291func print_errata_status 292#ifdef IMAGE_BL1 293 /* 294 * BL1 doesn't have per-CPU data. So retrieve the CPU operations 295 * directly. 296 */ 297 stp xzr, x30, [sp, #-16]! 298 bl get_cpu_ops_ptr 299 ldp xzr, x30, [sp], #16 300 ldr x1, [x0, #CPU_ERRATA_FUNC] 301 cbnz x1, .Lprint 302#else 303 /* 304 * Retrieve pointer to cpu_ops from per-CPU data, and further, the 305 * errata printing function. If it's non-NULL, jump to the function in 306 * turn. 307 */ 308 mrs x0, tpidr_el3 309#if ENABLE_ASSERTIONS 310 cmp x0, #0 311 ASM_ASSERT(ne) 312#endif 313 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR] 314#if ENABLE_ASSERTIONS 315 cmp x1, #0 316 ASM_ASSERT(ne) 317#endif 318 ldr x0, [x1, #CPU_ERRATA_FUNC] 319 cbz x0, .Lnoprint 320 321 /* 322 * Printing errata status requires atomically testing the printed flag. 323 */ 324 stp x19, x30, [sp, #-16]! 325 mov x19, x0 326 327 /* 328 * Load pointers to errata lock and printed flag. Call 329 * errata_needs_reporting to check whether this CPU needs to report 330 * errata status pertaining to its class. 331 */ 332 ldr x0, [x1, #CPU_ERRATA_LOCK] 333 ldr x1, [x1, #CPU_ERRATA_PRINTED] 334 bl errata_needs_reporting 335 mov x1, x19 336 ldp x19, x30, [sp], #16 337 cbnz x0, .Lprint 338#endif 339.Lnoprint: 340 ret 341.Lprint: 342 /* Jump to errata reporting function for this CPU */ 343 br x1 344endfunc print_errata_status 345#endif 346 347/* 348 * int check_wa_cve_2017_5715(void); 349 * 350 * This function returns: 351 * - ERRATA_APPLIES when firmware mitigation is required. 352 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required. 353 * - ERRATA_MISSING when firmware mitigation would be required but 354 * is not compiled in. 355 * 356 * NOTE: Must be called only after cpu_ops have been initialized 357 * in per-CPU data. 358 */ 359 .globl check_wa_cve_2017_5715 360func check_wa_cve_2017_5715 361 mrs x0, tpidr_el3 362#if ENABLE_ASSERTIONS 363 cmp x0, #0 364 ASM_ASSERT(ne) 365#endif 366 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 367#if ENABLE_ASSERTIONS 368 cmp x0, #0 369 ASM_ASSERT(ne) 370#endif 371 ldr x0, [x0, #CPU_EXTRA1_FUNC] 372 /* 373 * If the reserved function pointer is NULL, this CPU 374 * is unaffected by CVE-2017-5715 so bail out. 375 */ 376 cmp x0, #CPU_NO_EXTRA1_FUNC 377 beq 1f 378 br x0 3791: 380 mov x0, #ERRATA_NOT_APPLIES 381 ret 382endfunc check_wa_cve_2017_5715 383 384/* 385 * void *wa_cve_2018_3639_get_disable_ptr(void); 386 * 387 * Returns a function pointer which is used to disable mitigation 388 * for CVE-2018-3639. 389 * The function pointer is only returned on cores that employ 390 * dynamic mitigation. If the core uses static mitigation or is 391 * unaffected by CVE-2018-3639 this function returns NULL. 392 * 393 * NOTE: Must be called only after cpu_ops have been initialized 394 * in per-CPU data. 395 */ 396 .globl wa_cve_2018_3639_get_disable_ptr 397func wa_cve_2018_3639_get_disable_ptr 398 mrs x0, tpidr_el3 399#if ENABLE_ASSERTIONS 400 cmp x0, #0 401 ASM_ASSERT(ne) 402#endif 403 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 404#if ENABLE_ASSERTIONS 405 cmp x0, #0 406 ASM_ASSERT(ne) 407#endif 408 ldr x0, [x0, #CPU_EXTRA2_FUNC] 409 ret 410endfunc wa_cve_2018_3639_get_disable_ptr 411 412/* 413 * int check_smccc_arch_wa3_applies(void); 414 * 415 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate 416 * CVE-2022-23960 for this CPU. It returns: 417 * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate 418 * the CVE. 419 * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to 420 * mitigate the CVE. 421 * 422 * NOTE: Must be called only after cpu_ops have been initialized 423 * in per-CPU data. 424 */ 425 .globl check_smccc_arch_wa3_applies 426func check_smccc_arch_wa3_applies 427 mrs x0, tpidr_el3 428#if ENABLE_ASSERTIONS 429 cmp x0, #0 430 ASM_ASSERT(ne) 431#endif 432 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 433#if ENABLE_ASSERTIONS 434 cmp x0, #0 435 ASM_ASSERT(ne) 436#endif 437 ldr x0, [x0, #CPU_EXTRA3_FUNC] 438 /* 439 * If the reserved function pointer is NULL, this CPU 440 * is unaffected by CVE-2022-23960 so bail out. 441 */ 442 cmp x0, #CPU_NO_EXTRA3_FUNC 443 beq 1f 444 br x0 4451: 446 mov x0, #ERRATA_NOT_APPLIES 447 ret 448endfunc check_smccc_arch_wa3_applies 449