1/* 2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6#ifndef CPU_MACROS_S 7#define CPU_MACROS_S 8 9#include <assert_macros.S> 10#include <lib/cpus/cpu_ops.h> 11#include <lib/cpus/errata.h> 12 13 /* 14 * Write given expressions as quad words 15 * 16 * _count: 17 * Write at least _count quad words. If the given number of 18 * expressions is less than _count, repeat the last expression to 19 * fill _count quad words in total 20 * _rest: 21 * Optional list of expressions. _this is for parameter extraction 22 * only, and has no significance to the caller 23 * 24 * Invoked as: 25 * fill_constants 2, foo, bar, blah, ... 26 */ 27 .macro fill_constants _count:req, _this, _rest:vararg 28 .ifgt \_count 29 /* Write the current expression */ 30 .ifb \_this 31 .error "Nothing to fill" 32 .endif 33 .quad \_this 34 35 /* Invoke recursively for remaining expressions */ 36 .ifnb \_rest 37 fill_constants \_count-1, \_rest 38 .else 39 fill_constants \_count-1, \_this 40 .endif 41 .endif 42 .endm 43 44 /* 45 * Declare CPU operations 46 * 47 * _name: 48 * Name of the CPU for which operations are being specified 49 * _midr: 50 * Numeric value expected to read from CPU's MIDR 51 * _resetfunc: 52 * Reset function for the CPU. If there's no CPU reset function, 53 * specify CPU_NO_RESET_FUNC 54 * _extra1: 55 * This is a placeholder for future per CPU operations. Currently, 56 * some CPUs use this entry to set a test function to determine if 57 * the workaround for CVE-2017-5715 needs to be applied or not. 58 * _extra2: 59 * This is a placeholder for future per CPU operations. Currently 60 * some CPUs use this entry to set a function to disable the 61 * workaround for CVE-2018-3639. 62 * _extra3: 63 * This is a placeholder for future per CPU operations. Currently, 64 * some CPUs use this entry to set a test function to determine if 65 * the workaround for CVE-2022-23960 needs to be applied or not. 66 * _extra4: 67 * This is a placeholder for future per CPU operations. Currently, 68 * some CPUs use this entry to set a test function to determine if 69 * the workaround for CVE-2024-7881 needs to be applied or not. 70 * _e_handler: 71 * This is a placeholder for future per CPU exception handlers. 72 * _power_down_ops: 73 * Comma-separated list of functions to perform power-down 74 * operatios on the CPU. At least one, and up to 75 * CPU_MAX_PWR_DWN_OPS number of functions may be specified. 76 * Starting at power level 0, these functions shall handle power 77 * down at subsequent power levels. If there aren't exactly 78 * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be 79 * used to handle power down at subsequent levels 80 */ 81 .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \ 82 _extra1:req, _extra2:req, _extra3:req, _extra4:req, \ 83 _e_handler:req, _power_down_ops:vararg 84 .section .cpu_ops, "a" 85 .align 3 86 .type cpu_ops_\_name, %object 87 .quad \_midr 88#if defined(IMAGE_AT_EL3) 89 .quad \_resetfunc 90#endif 91 .quad \_extra1 92 .quad \_extra2 93 .quad \_extra3 94 .quad \_extra4 95 .quad \_e_handler 96#ifdef IMAGE_BL31 97 /* Insert list of functions */ 98 fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops 99#endif 100 /* 101 * It is possible (although unlikely) that a cpu may have no errata in 102 * code. In that case the start label will not be defined. The list is 103 * intended to be used in a loop, so define it as zero-length for 104 * predictable behaviour. Since this macro is always called at the end 105 * of the cpu file (after all errata have been parsed) we can be sure 106 * that we are at the end of the list. Some cpus call declare_cpu_ops 107 * twice, so only do this once. 108 */ 109 .pushsection .rodata.errata_entries 110 .ifndef \_name\()_errata_list_start 111 \_name\()_errata_list_start: 112 .endif 113 .ifndef \_name\()_errata_list_end 114 \_name\()_errata_list_end: 115 .endif 116 .popsection 117 118 /* and now put them in cpu_ops */ 119 .quad \_name\()_errata_list_start 120 .quad \_name\()_errata_list_end 121 122#if REPORT_ERRATA 123 .ifndef \_name\()_cpu_str 124 /* 125 * Place errata reported flag, and the spinlock to arbitrate access to 126 * it in the data section. 127 */ 128 .pushsection .data 129 define_asm_spinlock \_name\()_errata_lock 130 \_name\()_errata_reported: 131 .word 0 132 .popsection 133 134 /* Place CPU string in rodata */ 135 .pushsection .rodata 136 \_name\()_cpu_str: 137 .asciz "\_name" 138 .popsection 139 .endif 140 141 .quad \_name\()_cpu_str 142 143#ifdef IMAGE_BL31 144 /* Pointers to errata lock and reported flag */ 145 .quad \_name\()_errata_lock 146 .quad \_name\()_errata_reported 147#endif /* IMAGE_BL31 */ 148#endif /* REPORT_ERRATA */ 149 150#if defined(IMAGE_BL31) && CRASH_REPORTING 151 .quad \_name\()_cpu_reg_dump 152#endif 153 .endm 154 155 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \ 156 _power_down_ops:vararg 157 declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \ 158 \_power_down_ops 159 .endm 160 161 .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \ 162 _e_handler:req, _power_down_ops:vararg 163 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ 164 0, 0, 0, 0, \_e_handler, \_power_down_ops 165 .endm 166 167 .macro declare_cpu_ops_wa _name:req, _midr:req, \ 168 _resetfunc:req, _extra1:req, _extra2:req, \ 169 _extra3:req, _power_down_ops:vararg 170 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ 171 \_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops 172 .endm 173 174 .macro declare_cpu_ops_wa_4 _name:req, _midr:req, \ 175 _resetfunc:req, _extra1:req, _extra2:req, \ 176 _extra3:req, _extra4:req, _power_down_ops:vararg 177 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ 178 \_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops 179 .endm 180 181 /* 182 * This macro is used on some CPUs to detect if they are vulnerable 183 * to CVE-2017-5715. 184 */ 185 .macro cpu_check_csv2 _reg _label 186 mrs \_reg, id_aa64pfr0_el1 187 ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH 188 /* 189 * If the field equals 1, branch targets trained in one context cannot 190 * affect speculative execution in a different context. 191 * 192 * If the field equals 2, it means that the system is also aware of 193 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we 194 * expect users of the registers to do the right thing. 195 * 196 * Only apply mitigations if the value of this field is 0. 197 */ 198#if ENABLE_ASSERTIONS 199 cmp \_reg, #3 /* Only values 0 to 2 are expected */ 200 ASM_ASSERT(lo) 201#endif 202 203 cmp \_reg, #0 204 bne \_label 205 .endm 206 207 /* 208 * Helper macro that reads the part number of the current 209 * CPU and jumps to the given label if it matches the CPU 210 * MIDR provided. 211 * 212 * Clobbers x0. 213 */ 214 .macro jump_if_cpu_midr _cpu_midr, _label 215 mrs x0, midr_el1 216 ubfx x0, x0, MIDR_PN_SHIFT, #12 217 cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK) 218 b.eq \_label 219 .endm 220 221 222/* 223 * Workaround wrappers for errata that apply at reset or runtime. Reset errata 224 * will be applied automatically 225 * 226 * _cpu: 227 * Name of cpu as given to declare_cpu_ops 228 * 229 * _cve: 230 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 231 * 232 * _id: 233 * Erratum or CVE number. Please combine with previous field with ERRATUM 234 * or CVE macros 235 * 236 * _chosen: 237 * Compile time flag on whether the erratum is included 238 * 239 * _apply_at_reset: 240 * Whether the erratum should be automatically applied at reset 241 */ 242.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req 243 .pushsection .rodata.errata_entries 244 .align 3 245 .ifndef \_cpu\()_errata_list_start 246 \_cpu\()_errata_list_start: 247 .endif 248 249 /* check if unused and compile out if no references */ 250 .if \_apply_at_reset && \_chosen 251 .quad erratum_\_cpu\()_\_id\()_wa 252 .else 253 .quad 0 254 .endif 255 /* TODO(errata ABI): this prevents all checker functions from 256 * being optimised away. Can be done away with unless the ABI 257 * needs them */ 258 .quad check_erratum_\_cpu\()_\_id 259 /* Will fit CVEs with up to 10 character in the ID field */ 260 .word \_id 261 .hword \_cve 262 .byte \_chosen 263 /* TODO(errata ABI): mitigated field for known but unmitigated 264 * errata */ 265 .byte 0x1 266 .popsection 267.endm 268 269.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req 270 add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset 271 272 func erratum_\_cpu\()_\_id\()_wa 273 mov x8, x30 274 275 /* save rev_var for workarounds that might need it but don't 276 * restore to x0 because few will care */ 277 mov x7, x0 278 bl check_erratum_\_cpu\()_\_id 279 cbz x0, erratum_\_cpu\()_\_id\()_skip 280.endm 281 282.macro _workaround_end _cpu:req, _id:req 283 erratum_\_cpu\()_\_id\()_skip: 284 ret x8 285 endfunc erratum_\_cpu\()_\_id\()_wa 286.endm 287 288/******************************************************************************* 289 * Errata workaround wrappers 290 ******************************************************************************/ 291/* 292 * Workaround wrappers for errata that apply at reset or runtime. Reset errata 293 * will be applied automatically 294 * 295 * _cpu: 296 * Name of cpu as given to declare_cpu_ops 297 * 298 * _cve: 299 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 300 * 301 * _id: 302 * Erratum or CVE number. Please combine with previous field with ERRATUM 303 * or CVE macros 304 * 305 * _chosen: 306 * Compile time flag on whether the erratum is included 307 * 308 * in body: 309 * clobber x0 to x7 (please only use those) 310 * argument x7 - cpu_rev_var 311 * 312 * _wa clobbers: x0-x8 (PCS compliant) 313 */ 314.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req 315 _workaround_start \_cpu, \_cve, \_id, \_chosen, 1 316.endm 317 318/* 319 * See `workaround_reset_start` for usage info. Additional arguments: 320 * 321 * _midr: 322 * Check if CPU's MIDR matches the CPU it's meant for. Must be specified 323 * for errata applied in generic code 324 */ 325.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr 326 /* 327 * Let errata specify if they need MIDR checking. Sadly, storing the 328 * MIDR in an .equ to retrieve automatically blows up as it stores some 329 * brackets in the symbol 330 */ 331 .ifnb \_midr 332 jump_if_cpu_midr \_midr, 1f 333 b erratum_\_cpu\()_\_id\()_skip 334 335 1: 336 .endif 337 _workaround_start \_cpu, \_cve, \_id, \_chosen, 0 338.endm 339 340/* 341 * Usage and arguments identical to `workaround_reset_start`. The _cve argument 342 * is kept here so the same #define can be used as that macro 343 */ 344.macro workaround_reset_end _cpu:req, _cve:req, _id:req 345 _workaround_end \_cpu, \_id 346.endm 347 348/* 349 * See `workaround_reset_start` for usage info. The _cve argument is kept here 350 * so the same #define can be used as that macro. Additional arguments: 351 * 352 * _no_isb: 353 * Optionally do not include the trailing isb. Please disable with the 354 * NO_ISB macro 355 */ 356.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb 357 /* 358 * Runtime errata do not have a reset function to call the isb for them 359 * and missing the isb could be very problematic. It is also likely as 360 * they tend to be scattered in generic code. 361 */ 362 .ifb \_no_isb 363 isb 364 .endif 365 _workaround_end \_cpu, \_id 366.endm 367 368/******************************************************************************* 369 * Errata workaround helpers 370 ******************************************************************************/ 371/* 372 * Set a bit in a system register. Can set multiple bits but is limited by the 373 * way the ORR instruction encodes them. 374 * 375 * _reg: 376 * Register to write to 377 * 378 * _bit: 379 * Bit to set. Please use a descriptive #define 380 * 381 * _assert: 382 * Optionally whether to read back and assert that the bit has been 383 * written. Please disable with NO_ASSERT macro 384 * 385 * clobbers: x1 386 */ 387.macro sysreg_bit_set _reg:req, _bit:req, _assert=1 388 mrs x1, \_reg 389 orr x1, x1, #\_bit 390 msr \_reg, x1 391.endm 392 393/* 394 * Clear a bit in a system register. Can clear multiple bits but is limited by 395 * the way the BIC instrucion encodes them. 396 * 397 * see sysreg_bit_set for usage 398 */ 399.macro sysreg_bit_clear _reg:req, _bit:req 400 mrs x1, \_reg 401 bic x1, x1, #\_bit 402 msr \_reg, x1 403.endm 404 405/* 406 * Toggle a bit in a system register. Can toggle multiple bits but is limited by 407 * the way the EOR instrucion encodes them. 408 * 409 * see sysreg_bit_set for usage 410 */ 411.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1 412 mrs x1, \_reg 413 eor x1, x1, #\_bit 414 msr \_reg, x1 415.endm 416 417.macro override_vector_table _table:req 418 adr x1, \_table 419 msr vbar_el3, x1 420.endm 421 422/* 423 * BFI : Inserts bitfield into a system register. 424 * 425 * BFI{cond} Rd, Rn, #lsb, #width 426 */ 427.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req 428 /* Source value for BFI */ 429 mov x1, #\_src 430 mrs x0, \_reg 431 bfi x0, x1, #\_lsb, #\_width 432 msr \_reg, x0 433.endm 434 435.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req 436 /* Source value in register for BFI */ 437 mov x1, \_gpr 438 mrs x0, \_reg 439 bfi x0, x1, #\_lsb, #\_width 440 msr \_reg, x0 441.endm 442 443/* 444 * Extract CPU revision and variant, and combine them into a single numeric for 445 * easier comparison. 446 * 447 * _res: 448 * register where the result will be placed 449 * _tmp: 450 * register to clobber for temporaries 451 */ 452.macro get_rev_var _res:req, _tmp:req 453 mrs \_tmp, midr_el1 454 455 /* 456 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them 457 * as variant[7:4] and revision[3:0] of x0. 458 * 459 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then 460 * extract x1[3:0] into x0[3:0] retaining other bits. 461 */ 462 ubfx \_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) 463 bfxil \_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS 464.endm 465 466/* 467 * Apply erratum 468 * 469 * _cpu: 470 * Name of cpu as given to declare_cpu_ops 471 * 472 * _cve: 473 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 474 * 475 * _id: 476 * Erratum or CVE number. Please combine with previous field with ERRATUM 477 * or CVE macros 478 * 479 * _chosen: 480 * Compile time flag on whether the erratum is included 481 * 482 * _get_rev: 483 * Optional parameter that determines whether to insert a call to the CPU revision fetching 484 * procedure. Stores the result of this in the temporary register x10 to allow for chaining 485 * 486 * clobbers: x0-x10 (PCS compliant) 487 */ 488.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV 489 .if (\_chosen && \_get_rev) 490 mov x9, x30 491 bl cpu_get_rev_var 492 mov x10, x0 493 .elseif (\_chosen) 494 mov x9, x30 495 mov x0, x10 496 .endif 497 498 .if \_chosen 499 bl erratum_\_cpu\()_\_id\()_wa 500 mov x30, x9 501 .endif 502.endm 503 504/* 505 * Helpers to report if an erratum applies. Compares the given revision variant 506 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly. 507 * 508 * _rev_num: the given revision variant. Or 509 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant 510 * 511 * in body: 512 * clobber: x0 513 * argument: x0 - cpu_rev_var 514 */ 515.macro cpu_rev_var_ls _rev_num:req 516 cmp x0, #\_rev_num 517 cset x0, ls 518.endm 519 520.macro cpu_rev_var_hs _rev_num:req 521 cmp x0, #\_rev_num 522 cset x0, hs 523.endm 524 525.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req 526 cmp x0, #\_rev_num_lo 527 mov x1, #\_rev_num_hi 528 ccmp x0, x1, #2, hs 529 cset x0, ls 530.endm 531 532/* 533 * Helpers to select which revisions errata apply to. 534 * 535 * _cpu: 536 * Name of cpu as given to declare_cpu_ops 537 * 538 * _cve: 539 * Whether erratum is a CVE. CVE year if yes, 0 otherwise 540 * 541 * _id: 542 * Erratum or CVE number. Please combine with previous field with ERRATUM 543 * or CVE macros 544 * 545 * _rev_num: 546 * Revision to apply to 547 * 548 * in body: 549 * clobber: x0 to x1 550 * argument: x0 - cpu_rev_var 551 */ 552.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req 553 func check_erratum_\_cpu\()_\_id 554 cpu_rev_var_ls \_rev_num 555 ret 556 endfunc check_erratum_\_cpu\()_\_id 557.endm 558 559.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req 560 func check_erratum_\_cpu\()_\_id 561 cpu_rev_var_hs \_rev_num 562 ret 563 endfunc check_erratum_\_cpu\()_\_id 564.endm 565 566.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req 567 func check_erratum_\_cpu\()_\_id 568 cpu_rev_var_range \_rev_num_lo, \_rev_num_hi 569 ret 570 endfunc check_erratum_\_cpu\()_\_id 571.endm 572 573.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req 574 func check_erratum_\_cpu\()_\_id 575 .if \_chosen 576 mov x0, #ERRATA_APPLIES 577 .else 578 mov x0, #ERRATA_MISSING 579 .endif 580 ret 581 endfunc check_erratum_\_cpu\()_\_id 582.endm 583 584/* 585 * provide a shorthand for the name format for annoying errata 586 * body: clobber x0 to x4 587 */ 588.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req 589 func check_erratum_\_cpu\()_\_id 590.endm 591 592.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req 593 endfunc check_erratum_\_cpu\()_\_id 594.endm 595 596 597/******************************************************************************* 598 * CPU reset function wrapper 599 ******************************************************************************/ 600 601/* 602 * Wrapper to automatically apply all reset-time errata. Will end with an isb. 603 * 604 * _cpu: 605 * Name of cpu as given to declare_cpu_ops 606 * 607 * in body: 608 * clobber x8 to x14 609 * argument x14 - cpu_rev_var 610 */ 611.macro cpu_reset_func_start _cpu:req 612 func \_cpu\()_reset_func 613 mov x15, x30 614 get_rev_var x14, x0 615 616 /* short circuit the location to avoid searching the list */ 617 adrp x12, \_cpu\()_errata_list_start 618 add x12, x12, :lo12:\_cpu\()_errata_list_start 619 adrp x13, \_cpu\()_errata_list_end 620 add x13, x13, :lo12:\_cpu\()_errata_list_end 621 622 errata_begin: 623 /* if head catches up with end of list, exit */ 624 cmp x12, x13 625 b.eq errata_end 626 627 ldr x10, [x12, #ERRATUM_WA_FUNC] 628 /* TODO(errata ABI): check mitigated and checker function fields 629 * for 0 */ 630 ldrb w11, [x12, #ERRATUM_CHOSEN] 631 632 /* skip if not chosen */ 633 cbz x11, 1f 634 /* skip if runtime erratum */ 635 cbz x10, 1f 636 637 /* put cpu revision in x0 and call workaround */ 638 mov x0, x14 639 blr x10 640 1: 641 add x12, x12, #ERRATUM_ENTRY_SIZE 642 b errata_begin 643 errata_end: 644.endm 645 646.macro cpu_reset_func_end _cpu:req 647 isb 648 ret x15 649 endfunc \_cpu\()_reset_func 650.endm 651 652#endif /* CPU_MACROS_S */ 653