1/* 2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6#ifndef CPU_MACROS_S 7#define CPU_MACROS_S 8 9#include <assert_macros.S> 10#include <lib/cpus/cpu_ops.h> 11#include <lib/cpus/errata.h> 12 13 /* 14 * Write given expressions as quad words 15 * 16 * _count: 17 * Write at least _count quad words. If the given number of 18 * expressions is less than _count, repeat the last expression to 19 * fill _count quad words in total 20 * _rest: 21 * Optional list of expressions. _this is for parameter extraction 22 * only, and has no significance to the caller 23 * 24 * Invoked as: 25 * fill_constants 2, foo, bar, blah, ... 26 */ 27 .macro fill_constants _count:req, _this, _rest:vararg 28 .ifgt \_count 29 /* Write the current expression */ 30 .ifb \_this 31 .error "Nothing to fill" 32 .endif 33 .quad \_this 34 35 /* Invoke recursively for remaining expressions */ 36 .ifnb \_rest 37 fill_constants \_count-1, \_rest 38 .else 39 fill_constants \_count-1, \_this 40 .endif 41 .endif 42 .endm 43 44 /* 45 * Declare CPU operations 46 * 47 * _name: 48 * Name of the CPU for which operations are being specified 49 * _midr: 50 * Numeric value expected to read from CPU's MIDR 51 * _resetfunc: 52 * Reset function for the CPU. 53 * _extra1: 54 * This is a placeholder for future per CPU operations. Currently, 55 * some CPUs use this entry to set a test function to determine if 56 * the workaround for CVE-2017-5715 needs to be applied or not. 57 * _extra2: 58 * This is a placeholder for future per CPU operations. Currently 59 * some CPUs use this entry to set a function to disable the 60 * workaround for CVE-2018-3639. 61 * _extra3: 62 * This is a placeholder for future per CPU operations. Currently, 63 * some CPUs use this entry to set a test function to determine if 64 * the workaround for CVE-2022-23960 needs to be applied or not. 65 * _extra4: 66 * This is a placeholder for future per CPU operations. Currently, 67 * some CPUs use this entry to set a test function to determine if 68 * the workaround for CVE-2024-7881 needs to be applied or not. 69 * _e_handler: 70 * This is a placeholder for future per CPU exception handlers. 71 * _power_down_ops: 72 * Comma-separated list of functions to perform power-down 73 * operatios on the CPU. At least one, and up to 74 * CPU_MAX_PWR_DWN_OPS number of functions may be specified. 75 * Starting at power level 0, these functions shall handle power 76 * down at subsequent power levels. If there aren't exactly 77 * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be 78 * used to handle power down at subsequent levels 79 */ 80 .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \ 81 _extra1:req, _extra2:req, _extra3:req, _extra4:req, \ 82 _e_handler:req, _power_down_ops:vararg 83 .section .cpu_ops, "a" 84 .align 3 85 .type cpu_ops_\_name, %object 86 .quad \_midr 87#if defined(IMAGE_AT_EL3) 88 .quad \_resetfunc 89#endif 90 .quad \_extra1 91 .quad \_extra2 92 .quad \_extra3 93 .quad \_extra4 94 .quad \_e_handler 95#ifdef IMAGE_BL31 96 /* Insert list of functions */ 97 fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops 98#endif 99 /* 100 * It is possible (although unlikely) that a cpu may have no errata in 101 * code. In that case the start label will not be defined. The list is 102 * intended to be used in a loop, so define it as zero-length for 103 * predictable behaviour. Since this macro is always called at the end 104 * of the cpu file (after all errata have been parsed) we can be sure 105 * that we are at the end of the list. Some cpus call declare_cpu_ops 106 * twice, so only do this once. 107 */ 108 .pushsection .rodata.errata_entries 109 .ifndef \_name\()_errata_list_start 110 \_name\()_errata_list_start: 111 .endif 112 .ifndef \_name\()_errata_list_end 113 \_name\()_errata_list_end: 114 .endif 115 .popsection 116 117 /* and now put them in cpu_ops */ 118 .quad \_name\()_errata_list_start 119 .quad \_name\()_errata_list_end 120 121#if REPORT_ERRATA 122 .ifndef \_name\()_cpu_str 123 /* 124 * Place errata reported flag, and the spinlock to arbitrate access to 125 * it in the data section. 126 */ 127 .pushsection .data 128 define_asm_spinlock \_name\()_errata_lock 129 \_name\()_errata_reported: 130 .word 0 131 .popsection 132 133 /* Place CPU string in rodata */ 134 .pushsection .rodata 135 \_name\()_cpu_str: 136 .asciz "\_name" 137 .popsection 138 .endif 139 140 .quad \_name\()_cpu_str 141 142#ifdef IMAGE_BL31 143 /* Pointers to errata lock and reported flag */ 144 .quad \_name\()_errata_lock 145 .quad \_name\()_errata_reported 146#endif /* IMAGE_BL31 */ 147#endif /* REPORT_ERRATA */ 148 149#if defined(IMAGE_BL31) && CRASH_REPORTING 150 .quad \_name\()_cpu_reg_dump 151#endif 152 .endm 153 154 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \ 155 _power_down_ops:vararg 156 declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \ 157 \_power_down_ops 158 .endm 159 160 .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \ 161 _e_handler:req, _power_down_ops:vararg 162 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ 163 0, 0, 0, 0, \_e_handler, \_power_down_ops 164 .endm 165 166 .macro declare_cpu_ops_wa _name:req, _midr:req, \ 167 _resetfunc:req, _extra1:req, _extra2:req, \ 168 _extra3:req, _power_down_ops:vararg 169 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ 170 \_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops 171 .endm 172 173 .macro declare_cpu_ops_wa_4 _name:req, _midr:req, \ 174 _resetfunc:req, _extra1:req, _extra2:req, \ 175 _extra3:req, _extra4:req, _power_down_ops:vararg 176 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ 177 \_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops 178 .endm 179 180 /* 181 * This macro is used on some CPUs to detect if they are vulnerable 182 * to CVE-2017-5715. 183 */ 184 .macro cpu_check_csv2 _reg _label 185 mrs \_reg, id_aa64pfr0_el1 186 ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH 187 /* 188 * If the field equals 1, branch targets trained in one context cannot 189 * affect speculative execution in a different context. 190 * 191 * If the field equals 2, it means that the system is also aware of 192 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we 193 * expect users of the registers to do the right thing. 194 * 195 * Only apply mitigations if the value of this field is 0. 196 */ 197#if ENABLE_ASSERTIONS 198 cmp \_reg, #3 /* Only values 0 to 2 are expected */ 199 ASM_ASSERT(lo) 200#endif 201 202 cmp \_reg, #0 203 bne \_label 204 .endm 205 206 /* 207 * Helper macro that reads the part number of the current 208 * CPU and jumps to the given label if it matches the CPU 209 * MIDR provided. 210 * 211 * Clobbers x0. 212 */ 213 .macro jump_if_cpu_midr _cpu_midr, _label 214 mrs x0, midr_el1 215 ubfx x0, x0, MIDR_PN_SHIFT, #12 216 cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK) 217 b.eq \_label 218 .endm 219 220 221/* 222 * Workaround wrappers for errata that apply at reset or runtime. Reset errata 223 * will be applied automatically 224 * 225 * _cpu: 226 * Name of cpu as given to declare_cpu_ops 227 * 228 * _cve: 229 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 230 * 231 * _id: 232 * Erratum or CVE number. Please combine with previous field with ERRATUM 233 * or CVE macros 234 * 235 * _chosen: 236 * Compile time flag on whether the erratum is included 237 * 238 * _apply_at_reset: 239 * Whether the erratum should be automatically applied at reset 240 */ 241.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req 242 .pushsection .rodata.errata_entries 243 .align 3 244 .ifndef \_cpu\()_errata_list_start 245 \_cpu\()_errata_list_start: 246 .endif 247 248 /* check if unused and compile out if no references */ 249 .if \_apply_at_reset && \_chosen 250 .quad erratum_\_cpu\()_\_id\()_wa 251 .else 252 .quad 0 253 .endif 254 /* TODO(errata ABI): this prevents all checker functions from 255 * being optimised away. Can be done away with unless the ABI 256 * needs them */ 257 .quad check_erratum_\_cpu\()_\_id 258 /* Will fit CVEs with up to 10 character in the ID field */ 259 .word \_id 260 .hword \_cve 261 .byte \_chosen 262 /* TODO(errata ABI): mitigated field for known but unmitigated 263 * errata */ 264 .byte 0x1 265 .popsection 266.endm 267 268.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req 269 add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset 270 271 func erratum_\_cpu\()_\_id\()_wa 272 mov x8, x30 273 274 /* save rev_var for workarounds that might need it but don't 275 * restore to x0 because few will care */ 276 mov x7, x0 277 bl check_erratum_\_cpu\()_\_id 278 cbz x0, erratum_\_cpu\()_\_id\()_skip 279.endm 280 281.macro _workaround_end _cpu:req, _id:req 282 erratum_\_cpu\()_\_id\()_skip: 283 ret x8 284 endfunc erratum_\_cpu\()_\_id\()_wa 285.endm 286 287/******************************************************************************* 288 * Errata workaround wrappers 289 ******************************************************************************/ 290/* 291 * Workaround wrappers for errata that apply at reset or runtime. Reset errata 292 * will be applied automatically 293 * 294 * _cpu: 295 * Name of cpu as given to declare_cpu_ops 296 * 297 * _cve: 298 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 299 * 300 * _id: 301 * Erratum or CVE number. Please combine with previous field with ERRATUM 302 * or CVE macros 303 * 304 * _chosen: 305 * Compile time flag on whether the erratum is included 306 * 307 * in body: 308 * clobber x0 to x7 (please only use those) 309 * argument x7 - cpu_rev_var 310 * 311 * _wa clobbers: x0-x8 (PCS compliant) 312 */ 313.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req 314 _workaround_start \_cpu, \_cve, \_id, \_chosen, 1 315.endm 316 317/* 318 * See `workaround_reset_start` for usage info. Additional arguments: 319 * 320 * _midr: 321 * Check if CPU's MIDR matches the CPU it's meant for. Must be specified 322 * for errata applied in generic code 323 */ 324.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr 325 /* 326 * Let errata specify if they need MIDR checking. Sadly, storing the 327 * MIDR in an .equ to retrieve automatically blows up as it stores some 328 * brackets in the symbol 329 */ 330 .ifnb \_midr 331 jump_if_cpu_midr \_midr, 1f 332 b erratum_\_cpu\()_\_id\()_skip 333 334 1: 335 .endif 336 _workaround_start \_cpu, \_cve, \_id, \_chosen, 0 337.endm 338 339/* 340 * Usage and arguments identical to `workaround_reset_start`. The _cve argument 341 * is kept here so the same #define can be used as that macro 342 */ 343.macro workaround_reset_end _cpu:req, _cve:req, _id:req 344 _workaround_end \_cpu, \_id 345.endm 346 347/* 348 * See `workaround_reset_start` for usage info. The _cve argument is kept here 349 * so the same #define can be used as that macro. Additional arguments: 350 * 351 * _no_isb: 352 * Optionally do not include the trailing isb. Please disable with the 353 * NO_ISB macro 354 */ 355.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb 356 /* 357 * Runtime errata do not have a reset function to call the isb for them 358 * and missing the isb could be very problematic. It is also likely as 359 * they tend to be scattered in generic code. 360 */ 361 .ifb \_no_isb 362 isb 363 .endif 364 _workaround_end \_cpu, \_id 365.endm 366 367/******************************************************************************* 368 * Errata workaround helpers 369 ******************************************************************************/ 370/* 371 * Set a bit in a system register. Can set multiple bits but is limited by the 372 * way the ORR instruction encodes them. 373 * 374 * _reg: 375 * Register to write to 376 * 377 * _bit: 378 * Bit to set. Please use a descriptive #define 379 * 380 * _assert: 381 * Optionally whether to read back and assert that the bit has been 382 * written. Please disable with NO_ASSERT macro 383 * 384 * clobbers: x1 385 */ 386.macro sysreg_bit_set _reg:req, _bit:req, _assert=1 387 mrs x1, \_reg 388 orr x1, x1, #\_bit 389 msr \_reg, x1 390.endm 391 392/* 393 * Clear a bit in a system register. Can clear multiple bits but is limited by 394 * the way the BIC instrucion encodes them. 395 * 396 * see sysreg_bit_set for usage 397 */ 398.macro sysreg_bit_clear _reg:req, _bit:req 399 mrs x1, \_reg 400 bic x1, x1, #\_bit 401 msr \_reg, x1 402.endm 403 404/* 405 * Toggle a bit in a system register. Can toggle multiple bits but is limited by 406 * the way the EOR instrucion encodes them. 407 * 408 * see sysreg_bit_set for usage 409 */ 410.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1 411 mrs x1, \_reg 412 eor x1, x1, #\_bit 413 msr \_reg, x1 414.endm 415 416.macro override_vector_table _table:req 417 adr x1, \_table 418 msr vbar_el3, x1 419.endm 420 421/* 422 * BFI : Inserts bitfield into a system register. 423 * 424 * BFI{cond} Rd, Rn, #lsb, #width 425 */ 426.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req 427 /* Source value for BFI */ 428 mov x1, #\_src 429 mrs x0, \_reg 430 bfi x0, x1, #\_lsb, #\_width 431 msr \_reg, x0 432.endm 433 434.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req 435 /* Source value in register for BFI */ 436 mov x1, \_gpr 437 mrs x0, \_reg 438 bfi x0, x1, #\_lsb, #\_width 439 msr \_reg, x0 440.endm 441 442/* 443 * Extract CPU revision and variant, and combine them into a single numeric for 444 * easier comparison. 445 * 446 * _res: 447 * register where the result will be placed 448 * _tmp: 449 * register to clobber for temporaries 450 */ 451.macro get_rev_var _res:req, _tmp:req 452 mrs \_tmp, midr_el1 453 454 /* 455 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them 456 * as variant[7:4] and revision[3:0] of x0. 457 * 458 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then 459 * extract x1[3:0] into x0[3:0] retaining other bits. 460 */ 461 ubfx \_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) 462 bfxil \_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS 463.endm 464 465/* 466 * Apply erratum 467 * 468 * _cpu: 469 * Name of cpu as given to declare_cpu_ops 470 * 471 * _cve: 472 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 473 * 474 * _id: 475 * Erratum or CVE number. Please combine with previous field with ERRATUM 476 * or CVE macros 477 * 478 * _chosen: 479 * Compile time flag on whether the erratum is included 480 * 481 * _get_rev: 482 * Optional parameter that determines whether to insert a call to the CPU revision fetching 483 * procedure. Stores the result of this in the temporary register x10 to allow for chaining 484 * 485 * clobbers: x0-x10 (PCS compliant) 486 */ 487.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV 488 .if (\_chosen && \_get_rev) 489 mov x9, x30 490 bl cpu_get_rev_var 491 mov x10, x0 492 .elseif (\_chosen) 493 mov x9, x30 494 mov x0, x10 495 .endif 496 497 .if \_chosen 498 bl erratum_\_cpu\()_\_id\()_wa 499 mov x30, x9 500 .endif 501.endm 502 503/* 504 * Helpers to report if an erratum applies. Compares the given revision variant 505 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly. 506 * 507 * _rev_num: the given revision variant. Or 508 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant 509 * 510 * in body: 511 * clobber: x0 512 * argument: x0 - cpu_rev_var 513 */ 514.macro cpu_rev_var_ls _rev_num:req 515 cmp x0, #\_rev_num 516 cset x0, ls 517.endm 518 519.macro cpu_rev_var_hs _rev_num:req 520 cmp x0, #\_rev_num 521 cset x0, hs 522.endm 523 524.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req 525 cmp x0, #\_rev_num_lo 526 mov x1, #\_rev_num_hi 527 ccmp x0, x1, #2, hs 528 cset x0, ls 529.endm 530 531/* 532 * Helpers to select which revisions errata apply to. 533 * 534 * _cpu: 535 * Name of cpu as given to declare_cpu_ops 536 * 537 * _cve: 538 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 539 * 540 * _id: 541 * Erratum or CVE number. Please combine with previous field with ERRATUM 542 * or CVE macros 543 * 544 * _rev_num: 545 * Revision to apply to 546 * 547 * in body: 548 * clobber: x0 to x1 549 * argument: x0 - cpu_rev_var 550 */ 551.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req 552 func check_erratum_\_cpu\()_\_id 553 cpu_rev_var_ls \_rev_num 554 ret 555 endfunc check_erratum_\_cpu\()_\_id 556.endm 557 558.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req 559 func check_erratum_\_cpu\()_\_id 560 cpu_rev_var_hs \_rev_num 561 ret 562 endfunc check_erratum_\_cpu\()_\_id 563.endm 564 565.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req 566 func check_erratum_\_cpu\()_\_id 567 cpu_rev_var_range \_rev_num_lo, \_rev_num_hi 568 ret 569 endfunc check_erratum_\_cpu\()_\_id 570.endm 571 572.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req 573 func check_erratum_\_cpu\()_\_id 574 .if \_chosen 575 mov x0, #ERRATA_APPLIES 576 .else 577 mov x0, #ERRATA_MISSING 578 .endif 579 ret 580 endfunc check_erratum_\_cpu\()_\_id 581.endm 582 583/* 584 * provide a shorthand for the name format for annoying errata 585 * body: clobber x0 to x4 586 */ 587.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req 588 func check_erratum_\_cpu\()_\_id 589.endm 590 591.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req 592 endfunc check_erratum_\_cpu\()_\_id 593.endm 594 595 596/******************************************************************************* 597 * CPU reset function wrapper 598 ******************************************************************************/ 599 600/* 601 * Wrapper to automatically apply all reset-time errata. Will end with an isb. 602 * 603 * _cpu: 604 * Name of cpu as given to declare_cpu_ops 605 * 606 * in body: 607 * clobber x8 to x14 608 * argument x14 - cpu_rev_var 609 */ 610.macro cpu_reset_func_start _cpu:req 611 func \_cpu\()_reset_func 612 mov x15, x30 613 get_rev_var x14, x0 614 615 /* short circuit the location to avoid searching the list */ 616 adrp x12, \_cpu\()_errata_list_start 617 add x12, x12, :lo12:\_cpu\()_errata_list_start 618 adrp x13, \_cpu\()_errata_list_end 619 add x13, x13, :lo12:\_cpu\()_errata_list_end 620 621 errata_begin: 622 /* if head catches up with end of list, exit */ 623 cmp x12, x13 624 b.eq errata_end 625 626 ldr x10, [x12, #ERRATUM_WA_FUNC] 627 /* TODO(errata ABI): check mitigated and checker function fields 628 * for 0 */ 629 ldrb w11, [x12, #ERRATUM_CHOSEN] 630 631 /* skip if not chosen */ 632 cbz x11, 1f 633 /* skip if runtime erratum */ 634 cbz x10, 1f 635 636 /* put cpu revision in x0 and call workaround */ 637 mov x0, x14 638 blr x10 639 1: 640 add x12, x12, #ERRATUM_ENTRY_SIZE 641 b errata_begin 642 errata_end: 643.endm 644 645.macro cpu_reset_func_end _cpu:req 646 isb 647 ret x15 648 endfunc \_cpu\()_reset_func 649.endm 650 651#endif /* CPU_MACROS_S */ 652