1/* 2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6#ifndef CPU_MACROS_S 7#define CPU_MACROS_S 8 9#include <assert_macros.S> 10#include <lib/cpus/cpu_ops.h> 11#include <lib/cpus/errata.h> 12 13 /* 14 * Write given expressions as quad words 15 * 16 * _count: 17 * Write at least _count quad words. If the given number of 18 * expressions is less than _count, repeat the last expression to 19 * fill _count quad words in total 20 * _rest: 21 * Optional list of expressions. _this is for parameter extraction 22 * only, and has no significance to the caller 23 * 24 * Invoked as: 25 * fill_constants 2, foo, bar, blah, ... 26 */ 27 .macro fill_constants _count:req, _this, _rest:vararg 28 .ifgt \_count 29 /* Write the current expression */ 30 .ifb \_this 31 .error "Nothing to fill" 32 .endif 33 .quad \_this 34 35 /* Invoke recursively for remaining expressions */ 36 .ifnb \_rest 37 fill_constants \_count-1, \_rest 38 .else 39 fill_constants \_count-1, \_this 40 .endif 41 .endif 42 .endm 43 44 /* 45 * Declare CPU operations 46 * 47 * _name: 48 * Name of the CPU for which operations are being specified 49 * _midr: 50 * Numeric value expected to read from CPU's MIDR 51 * _resetfunc: 52 * Reset function for the CPU. 53 * _extra1: 54 * This is a placeholder for future per CPU operations. Currently, 55 * some CPUs use this entry to set a test function to determine if 56 * the workaround for CVE-2017-5715 needs to be applied or not. 57 * _extra2: 58 * This is a placeholder for future per CPU operations. Currently 59 * some CPUs use this entry to set a function to disable the 60 * workaround for CVE-2018-3639. 61 * _extra3: 62 * This is a placeholder for future per CPU operations. Currently, 63 * some CPUs use this entry to set a test function to determine if 64 * the workaround for CVE-2022-23960 needs to be applied or not. 65 * _extra4: 66 * This is a placeholder for future per CPU operations. Currently, 67 * some CPUs use this entry to set a test function to determine if 68 * the workaround for CVE-2024-7881 needs to be applied or not. 69 * _e_handler: 70 * This is a placeholder for future per CPU exception handlers. 71 * _power_down_ops: 72 * Comma-separated list of functions to perform power-down 73 * operatios on the CPU. At least one, and up to 74 * CPU_MAX_PWR_DWN_OPS number of functions may be specified. 75 * Starting at power level 0, these functions shall handle power 76 * down at subsequent power levels. If there aren't exactly 77 * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be 78 * used to handle power down at subsequent levels 79 */ 80 .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \ 81 _extra1:req, _extra2:req, _extra3:req, _extra4:req, \ 82 _e_handler:req, _power_down_ops:vararg 83 .section .cpu_ops, "a" 84 .align 3 85 .type cpu_ops_\_name, %object 86 .quad \_midr 87#if defined(IMAGE_AT_EL3) 88 .quad \_resetfunc 89#endif 90 .quad \_extra1 91 .quad \_extra2 92 .quad \_extra3 93 .quad \_extra4 94 .quad \_e_handler 95#ifdef IMAGE_BL31 96 /* Insert list of functions */ 97 fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops 98#endif 99 /* 100 * It is possible (although unlikely) that a cpu may have no errata in 101 * code. In that case the start label will not be defined. The list is 102 * intended to be used in a loop, so define it as zero-length for 103 * predictable behaviour. Since this macro is always called at the end 104 * of the cpu file (after all errata have been parsed) we can be sure 105 * that we are at the end of the list. Some cpus call declare_cpu_ops 106 * twice, so only do this once. 107 */ 108 .pushsection .rodata.errata_entries 109 .ifndef \_name\()_errata_list_start 110 \_name\()_errata_list_start: 111 .endif 112 .ifndef \_name\()_errata_list_end 113 \_name\()_errata_list_end: 114 .endif 115 .popsection 116 117 /* and now put them in cpu_ops */ 118 .quad \_name\()_errata_list_start 119 .quad \_name\()_errata_list_end 120 121#if REPORT_ERRATA 122 .ifndef \_name\()_cpu_str 123 /* 124 * Place errata reported flag, and the spinlock to arbitrate access to 125 * it in the data section. 126 */ 127 .pushsection .data 128 define_asm_spinlock \_name\()_errata_lock 129 \_name\()_errata_reported: 130 .word 0 131 .popsection 132 133 /* Place CPU string in rodata */ 134 .pushsection .rodata 135 \_name\()_cpu_str: 136 .asciz "\_name" 137 .popsection 138 .endif 139 140 .quad \_name\()_cpu_str 141 142#ifdef IMAGE_BL31 143 /* Pointers to errata lock and reported flag */ 144 .quad \_name\()_errata_lock 145 .quad \_name\()_errata_reported 146#endif /* IMAGE_BL31 */ 147#endif /* REPORT_ERRATA */ 148 149#if defined(IMAGE_BL31) && CRASH_REPORTING 150 .quad \_name\()_cpu_reg_dump 151#endif 152 .endm 153 154 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \ 155 _power_down_ops:vararg 156 declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \ 157 \_power_down_ops 158 .endm 159 160 .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \ 161 _e_handler:req, _power_down_ops:vararg 162 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ 163 0, 0, 0, 0, \_e_handler, \_power_down_ops 164 .endm 165 166 .macro declare_cpu_ops_wa _name:req, _midr:req, \ 167 _resetfunc:req, _extra1:req, _extra2:req, \ 168 _extra3:req, _power_down_ops:vararg 169 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ 170 \_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops 171 .endm 172 173 .macro declare_cpu_ops_wa_4 _name:req, _midr:req, \ 174 _resetfunc:req, _extra1:req, _extra2:req, \ 175 _extra3:req, _extra4:req, _power_down_ops:vararg 176 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ 177 \_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops 178 .endm 179 180 /* 181 * This macro is used on some CPUs to detect if they are vulnerable 182 * to CVE-2017-5715. 183 */ 184 .macro cpu_check_csv2 _reg _label 185 mrs \_reg, id_aa64pfr0_el1 186 ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH 187 /* 188 * If the field equals 1, branch targets trained in one context cannot 189 * affect speculative execution in a different context. 190 * 191 * If the field equals 2, it means that the system is also aware of 192 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we 193 * expect users of the registers to do the right thing. 194 * 195 * Only apply mitigations if the value of this field is 0. 196 */ 197#if ENABLE_ASSERTIONS 198 cmp \_reg, #3 /* Only values 0 to 2 are expected */ 199 ASM_ASSERT(lo) 200#endif 201 202 cmp \_reg, #0 203 bne \_label 204 .endm 205 206 /* 207 * Helper macro that reads the part number of the current 208 * CPU and jumps to the given label if it matches the CPU 209 * MIDR provided. 210 * 211 * Clobbers x0. 212 */ 213 .macro jump_if_cpu_midr _cpu_midr, _label 214 mrs x0, midr_el1 215 ubfx x0, x0, MIDR_PN_SHIFT, #12 216 cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK) 217 b.eq \_label 218 .endm 219 220 221/* 222 * Workaround wrappers for errata that apply at reset or runtime. Reset errata 223 * will be applied automatically 224 * 225 * _cpu: 226 * Name of cpu as given to declare_cpu_ops 227 * 228 * _cve: 229 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 230 * 231 * _id: 232 * Erratum or CVE number. Please combine with previous field with ERRATUM 233 * or CVE macros 234 * 235 * _chosen: 236 * Compile time flag on whether the erratum is included 237 * 238 * _apply_at_reset: 239 * Whether the erratum should be automatically applied at reset 240 */ 241.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req 242#if REPORT_ERRATA || ERRATA_ABI_SUPPORT 243 .pushsection .rodata.errata_entries 244 .align 3 245 .ifndef \_cpu\()_errata_list_start 246 \_cpu\()_errata_list_start: 247 .endif 248 249 .quad check_erratum_\_cpu\()_\_id 250 /* Will fit CVEs with up to 10 character in the ID field */ 251 .word \_id 252 .hword \_cve 253 .byte \_chosen 254 .byte 0x0 /* alignment */ 255 .popsection 256#endif 257.endm 258 259/******************************************************************************* 260 * Errata workaround wrappers 261 ******************************************************************************/ 262/* 263 * Workaround wrappers for errata that apply at reset or runtime. Reset errata 264 * will be applied automatically 265 * 266 * _cpu: 267 * Name of cpu as given to declare_cpu_ops 268 * 269 * _cve: 270 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 271 * 272 * _id: 273 * Erratum or CVE number. Please combine with previous field with ERRATUM 274 * or CVE macros 275 * 276 * _chosen: 277 * Compile time flag on whether the erratum is included 278 * 279 * in body: 280 * clobber x0 to x7 (please only use those) 281 * argument x7 - cpu_rev_var 282 * 283 * _wa clobbers: x0-x8 (PCS compliant) 284 */ 285.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req 286 add_erratum_entry \_cpu, \_cve, \_id, \_chosen 287 288 .if \_chosen 289 /* put errata directly into the reset function */ 290 .pushsection .text.asm.\_cpu\()_reset_func, "ax" 291 .else 292 /* or something else that will get garbage collected by the 293 * linker */ 294 .pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax" 295 .endif 296 /* revision is stored in x14, get it */ 297 mov x0, x14 298 bl check_erratum_\_cpu\()_\_id 299 /* save rev_var for workarounds that might need it */ 300 mov x7, x14 301 cbz x0, erratum_\_cpu\()_\_id\()_skip_reset 302.endm 303 304/* 305 * See `workaround_reset_start` for usage info. Additional arguments: 306 * 307 * _midr: 308 * Check if CPU's MIDR matches the CPU it's meant for. Must be specified 309 * for errata applied in generic code 310 */ 311.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr 312 add_erratum_entry \_cpu, \_cve, \_id, \_chosen 313 314 func erratum_\_cpu\()_\_id\()_wa 315 mov x8, x30 316 /* 317 * Let errata specify if they need MIDR checking. Sadly, storing the 318 * MIDR in an .equ to retrieve automatically blows up as it stores some 319 * brackets in the symbol 320 */ 321 .ifnb \_midr 322 jump_if_cpu_midr \_midr, 1f 323 b erratum_\_cpu\()_\_id\()_skip_runtime 324 325 1: 326 .endif 327 /* save rev_var for workarounds that might need it but don't 328 * restore to x0 because few will care */ 329 mov x7, x0 330 bl check_erratum_\_cpu\()_\_id 331 cbz x0, erratum_\_cpu\()_\_id\()_skip_runtime 332.endm 333 334/* 335 * Usage and arguments identical to `workaround_reset_start`. The _cve argument 336 * is kept here so the same #define can be used as that macro 337 */ 338.macro workaround_reset_end _cpu:req, _cve:req, _id:req 339 erratum_\_cpu\()_\_id\()_skip_reset: 340 .popsection 341.endm 342 343/* 344 * See `workaround_reset_start` for usage info. The _cve argument is kept here 345 * so the same #define can be used as that macro. Additional arguments: 346 * 347 * _no_isb: 348 * Optionally do not include the trailing isb. Please disable with the 349 * NO_ISB macro 350 */ 351.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb 352 /* 353 * Runtime errata do not have a reset function to call the isb for them 354 * and missing the isb could be very problematic. It is also likely as 355 * they tend to be scattered in generic code. 356 */ 357 .ifb \_no_isb 358 isb 359 .endif 360 erratum_\_cpu\()_\_id\()_skip_runtime: 361 ret x8 362 endfunc erratum_\_cpu\()_\_id\()_wa 363.endm 364 365/******************************************************************************* 366 * Errata workaround helpers 367 ******************************************************************************/ 368/* 369 * Set a bit in a system register. Can set multiple bits but is limited by the 370 * way the ORR instruction encodes them. 371 * 372 * _reg: 373 * Register to write to 374 * 375 * _bit: 376 * Bit to set. Please use a descriptive #define 377 * 378 * _assert: 379 * Optionally whether to read back and assert that the bit has been 380 * written. Please disable with NO_ASSERT macro 381 * 382 * clobbers: x1 383 */ 384.macro sysreg_bit_set _reg:req, _bit:req, _assert=1 385 mrs x1, \_reg 386 orr x1, x1, #\_bit 387 msr \_reg, x1 388.endm 389 390/* 391 * Clear a bit in a system register. Can clear multiple bits but is limited by 392 * the way the BIC instrucion encodes them. 393 * 394 * see sysreg_bit_set for usage 395 */ 396.macro sysreg_bit_clear _reg:req, _bit:req 397 mrs x1, \_reg 398 bic x1, x1, #\_bit 399 msr \_reg, x1 400.endm 401 402/* 403 * Toggle a bit in a system register. Can toggle multiple bits but is limited by 404 * the way the EOR instrucion encodes them. 405 * 406 * see sysreg_bit_set for usage 407 */ 408.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1 409 mrs x1, \_reg 410 eor x1, x1, #\_bit 411 msr \_reg, x1 412.endm 413 414.macro override_vector_table _table:req 415 adr x1, \_table 416 msr vbar_el3, x1 417.endm 418 419/* 420 * BFI : Inserts bitfield into a system register. 421 * 422 * BFI{cond} Rd, Rn, #lsb, #width 423 */ 424.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req 425 /* Source value for BFI */ 426 mov x1, #\_src 427 mrs x0, \_reg 428 bfi x0, x1, #\_lsb, #\_width 429 msr \_reg, x0 430.endm 431 432.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req 433 /* Source value in register for BFI */ 434 mov x1, \_gpr 435 mrs x0, \_reg 436 bfi x0, x1, #\_lsb, #\_width 437 msr \_reg, x0 438.endm 439 440/* 441 * Extract CPU revision and variant, and combine them into a single numeric for 442 * easier comparison. 443 * 444 * _res: 445 * register where the result will be placed 446 * _tmp: 447 * register to clobber for temporaries 448 */ 449.macro get_rev_var _res:req, _tmp:req 450 mrs \_tmp, midr_el1 451 452 /* 453 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them 454 * as variant[7:4] and revision[3:0] of x0. 455 * 456 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then 457 * extract x1[3:0] into x0[3:0] retaining other bits. 458 */ 459 ubfx \_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) 460 bfxil \_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS 461.endm 462 463/* 464 * Apply erratum 465 * 466 * _cpu: 467 * Name of cpu as given to declare_cpu_ops 468 * 469 * _cve: 470 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 471 * 472 * _id: 473 * Erratum or CVE number. Please combine with previous field with ERRATUM 474 * or CVE macros 475 * 476 * _chosen: 477 * Compile time flag on whether the erratum is included 478 * 479 * _get_rev: 480 * Optional parameter that determines whether to insert a call to the CPU revision fetching 481 * procedure. Stores the result of this in the temporary register x10 to allow for chaining 482 * 483 * clobbers: x0-x10 (PCS compliant) 484 */ 485.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV 486 .if (\_chosen && \_get_rev) 487 mov x9, x30 488 bl cpu_get_rev_var 489 mov x10, x0 490 .elseif (\_chosen) 491 mov x9, x30 492 mov x0, x10 493 .endif 494 495 .if \_chosen 496 bl erratum_\_cpu\()_\_id\()_wa 497 mov x30, x9 498 .endif 499.endm 500 501/* 502 * Helpers to report if an erratum applies. Compares the given revision variant 503 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly. 504 * 505 * _rev_num: the given revision variant. Or 506 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant 507 * 508 * in body: 509 * clobber: x0 510 * argument: x0 - cpu_rev_var 511 */ 512.macro cpu_rev_var_ls _rev_num:req 513 cmp x0, #\_rev_num 514 cset x0, ls 515.endm 516 517.macro cpu_rev_var_hs _rev_num:req 518 cmp x0, #\_rev_num 519 cset x0, hs 520.endm 521 522.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req 523 cmp x0, #\_rev_num_lo 524 mov x1, #\_rev_num_hi 525 ccmp x0, x1, #2, hs 526 cset x0, ls 527.endm 528 529/* 530 * Helpers to select which revisions errata apply to. 531 * 532 * _cpu: 533 * Name of cpu as given to declare_cpu_ops 534 * 535 * _cve: 536 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 537 * 538 * _id: 539 * Erratum or CVE number. Please combine with previous field with ERRATUM 540 * or CVE macros 541 * 542 * _rev_num: 543 * Revision to apply to 544 * 545 * in body: 546 * clobber: x0 to x1 547 * argument: x0 - cpu_rev_var 548 */ 549.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req 550 func check_erratum_\_cpu\()_\_id 551 cpu_rev_var_ls \_rev_num 552 ret 553 endfunc check_erratum_\_cpu\()_\_id 554.endm 555 556.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req 557 func check_erratum_\_cpu\()_\_id 558 cpu_rev_var_hs \_rev_num 559 ret 560 endfunc check_erratum_\_cpu\()_\_id 561.endm 562 563.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req 564 func check_erratum_\_cpu\()_\_id 565 cpu_rev_var_range \_rev_num_lo, \_rev_num_hi 566 ret 567 endfunc check_erratum_\_cpu\()_\_id 568.endm 569 570.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req 571 func check_erratum_\_cpu\()_\_id 572 .if \_chosen 573 mov x0, #ERRATA_APPLIES 574 .else 575 mov x0, #ERRATA_MISSING 576 .endif 577 ret 578 endfunc check_erratum_\_cpu\()_\_id 579.endm 580 581/* 582 * provide a shorthand for the name format for annoying errata 583 * body: clobber x0 to x4 584 */ 585.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req 586 func check_erratum_\_cpu\()_\_id 587.endm 588 589.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req 590 endfunc check_erratum_\_cpu\()_\_id 591.endm 592 593 594/******************************************************************************* 595 * CPU reset function wrapper 596 ******************************************************************************/ 597 598/* 599 * Helper to register a cpu with the errata framework. Begins the definition of 600 * the reset function. 601 * 602 * _cpu: 603 * Name of cpu as given to declare_cpu_ops 604 */ 605.macro cpu_reset_prologue _cpu:req 606 func \_cpu\()_reset_func 607 mov x15, x30 608 get_rev_var x14, x0 609.endm 610 611/* 612 * Wrapper of the reset function to automatically apply all reset-time errata. 613 * Will end with an isb. 614 * 615 * _cpu: 616 * Name of cpu as given to declare_cpu_ops 617 * 618 * in body: 619 * clobber x8 to x14 620 * argument x14 - cpu_rev_var 621 */ 622.macro cpu_reset_func_start _cpu:req 623 /* the func/endfunc macros will change sections. So change the section 624 * back to the reset function's */ 625 .section .text.asm.\_cpu\()_reset_func, "ax" 626.endm 627 628.macro cpu_reset_func_end _cpu:req 629 isb 630 ret x15 631 endfunc \_cpu\()_reset_func 632.endm 633 634/* 635 * Helper macro that enables Maximum Power Mitigation Mechanism (MPMM) on 636 * compatible Arm cores. 637 * 638 * Clobbers x0. 639 */ 640.macro enable_mpmm 641#if ENABLE_MPMM 642 mrs x0, CPUPPMCR_EL3 643 /* if CPUPPMCR_EL3.MPMMPINCTL != 0, skip enabling MPMM */ 644 ands x0, x0, CPUPPMCR_EL3_MPMMPINCTL_BIT 645 b.ne 1f 646 sysreg_bit_set CPUPPMCR_EL3, CPUMPMMCR_EL3_MPMM_EN_BIT 647 1: 648#endif 649.endm 650 651#endif /* CPU_MACROS_S */ 652