19b476841SSoby Mathew/* 242d4d3baSArvind Ram Prakash * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved. 39b476841SSoby Mathew * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 59b476841SSoby Mathew */ 69b476841SSoby Mathew 79b476841SSoby Mathew#include <arch.h> 89b476841SSoby Mathew#include <asm_macros.S> 99b476841SSoby Mathew#include <assert_macros.S> 10c2ad38ceSVarun Wadekar#include <common/bl_common.h> 1109d40e0eSAntonio Nino Diaz#include <common/debug.h> 1255c70cb7SDavid Cunado#include <cpu_macros.S> 13007433d8SBoyan Karatotev#include <lib/cpus/cpu_ops.h> 14*6bb96fa6SBoyan Karatotev#include <lib/cpus/errata.h> 1509d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/cpu_data.h> 169b476841SSoby Mathew 179b476841SSoby Mathew /* Reset fn is needed in BL at reset vector */ 1842d4d3baSArvind Ram Prakash#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \ 1942d4d3baSArvind Ram Prakash (defined(IMAGE_BL2) && RESET_TO_BL2) 209b476841SSoby Mathew /* 219b476841SSoby Mathew * The reset handler common to all platforms. After a matching 229b476841SSoby Mathew * cpu_ops structure entry is found, the correponding reset_handler 239b476841SSoby Mathew * in the cpu_ops is invoked. 24683f788fSSoby Mathew * Clobbers: x0 - x19, x30 259b476841SSoby Mathew */ 269b476841SSoby Mathew .globl reset_handler 279b476841SSoby Mathewfunc reset_handler 287395a725SSoby Mathew mov x19, x30 299b476841SSoby Mathew 30683f788fSSoby Mathew /* The plat_reset_handler can clobber x0 - x18, x30 */ 3124fb838fSSoby Mathew bl plat_reset_handler 3224fb838fSSoby Mathew 339b476841SSoby Mathew /* Get the matching cpu_ops pointer */ 349b476841SSoby Mathew bl get_cpu_ops_ptr 359b476841SSoby Mathew 369b476841SSoby Mathew /* Get the cpu_ops reset handler */ 379b476841SSoby Mathew ldr x2, [x0, #CPU_RESET_FUNC] 387395a725SSoby Mathew mov x30, x19 399b476841SSoby Mathew cbz x2, 1f 40683f788fSSoby Mathew 41683f788fSSoby Mathew /* The cpu_ops reset handler can clobber x0 - x19, x30 */ 427395a725SSoby Mathew br x2 439b476841SSoby Mathew1: 447395a725SSoby Mathew ret 458b779620SKévin Petitendfunc reset_handler 4624fb838fSSoby Mathew 47b1d27b48SRoberto Vargas#endif 489b476841SSoby Mathew 493d8256b2SMasahiro Yamada#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ 50add40351SSoby Mathew /* 515dd9dbb5SJeenu Viswambharan * void prepare_cpu_pwr_dwn(unsigned int power_level) 525dd9dbb5SJeenu Viswambharan * 535dd9dbb5SJeenu Viswambharan * Prepare CPU power down function for all platforms. The function takes 545dd9dbb5SJeenu Viswambharan * a domain level to be powered down as its parameter. After the cpu_ops 555dd9dbb5SJeenu Viswambharan * pointer is retrieved from cpu_data, the handler for requested power 565dd9dbb5SJeenu Viswambharan * level is called. 57add40351SSoby Mathew */ 585dd9dbb5SJeenu Viswambharan .globl prepare_cpu_pwr_dwn 595dd9dbb5SJeenu Viswambharanfunc prepare_cpu_pwr_dwn 60add40351SSoby Mathew /* 615dd9dbb5SJeenu Viswambharan * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the 625dd9dbb5SJeenu Viswambharan * power down handler for the last power level 63add40351SSoby Mathew */ 645dd9dbb5SJeenu Viswambharan mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1) 655dd9dbb5SJeenu Viswambharan cmp x0, x2 665dd9dbb5SJeenu Viswambharan csel x2, x2, x0, hi 675dd9dbb5SJeenu Viswambharan 68add40351SSoby Mathew mrs x1, tpidr_el3 69add40351SSoby Mathew ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 70044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS 71add40351SSoby Mathew cmp x0, #0 72add40351SSoby Mathew ASM_ASSERT(ne) 73add40351SSoby Mathew#endif 74add40351SSoby Mathew 755dd9dbb5SJeenu Viswambharan /* Get the appropriate power down handler */ 765dd9dbb5SJeenu Viswambharan mov x1, #CPU_PWR_DWN_OPS 775dd9dbb5SJeenu Viswambharan add x1, x1, x2, lsl #3 785dd9dbb5SJeenu Viswambharan ldr x1, [x0, x1] 79601e3ed2SVarun Wadekar#if ENABLE_ASSERTIONS 80601e3ed2SVarun Wadekar cmp x1, #0 81601e3ed2SVarun Wadekar ASM_ASSERT(ne) 82601e3ed2SVarun Wadekar#endif 83add40351SSoby Mathew br x1 845dd9dbb5SJeenu Viswambharanendfunc prepare_cpu_pwr_dwn 85add40351SSoby Mathew 86add40351SSoby Mathew 87add40351SSoby Mathew /* 88add40351SSoby Mathew * Initializes the cpu_ops_ptr if not already initialized 8912e7c4abSVikram Kanigiri * in cpu_data. This can be called without a runtime stack, but may 9012e7c4abSVikram Kanigiri * only be called after the MMU is enabled. 91add40351SSoby Mathew * clobbers: x0 - x6, x10 92add40351SSoby Mathew */ 93add40351SSoby Mathew .globl init_cpu_ops 94add40351SSoby Mathewfunc init_cpu_ops 95add40351SSoby Mathew mrs x6, tpidr_el3 96add40351SSoby Mathew ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR] 97add40351SSoby Mathew cbnz x0, 1f 98add40351SSoby Mathew mov x10, x30 99add40351SSoby Mathew bl get_cpu_ops_ptr 10009997346SSoby Mathew str x0, [x6, #CPU_DATA_CPU_OPS_PTR]! 101add40351SSoby Mathew mov x30, x10 102add40351SSoby Mathew1: 103add40351SSoby Mathew ret 1048b779620SKévin Petitendfunc init_cpu_ops 105add40351SSoby Mathew#endif /* IMAGE_BL31 */ 106add40351SSoby Mathew 1073d8256b2SMasahiro Yamada#if defined(IMAGE_BL31) && CRASH_REPORTING 108d3f70af6SSoby Mathew /* 109d3f70af6SSoby Mathew * The cpu specific registers which need to be reported in a crash 110d3f70af6SSoby Mathew * are reported via cpu_ops cpu_reg_dump function. After a matching 111d3f70af6SSoby Mathew * cpu_ops structure entry is found, the correponding cpu_reg_dump 112d3f70af6SSoby Mathew * in the cpu_ops is invoked. 113d3f70af6SSoby Mathew */ 114d3f70af6SSoby Mathew .globl do_cpu_reg_dump 115d3f70af6SSoby Mathewfunc do_cpu_reg_dump 116d3f70af6SSoby Mathew mov x16, x30 117d3f70af6SSoby Mathew 118d3f70af6SSoby Mathew /* Get the matching cpu_ops pointer */ 119d3f70af6SSoby Mathew bl get_cpu_ops_ptr 120d3f70af6SSoby Mathew cbz x0, 1f 121d3f70af6SSoby Mathew 122d3f70af6SSoby Mathew /* Get the cpu_ops cpu_reg_dump */ 123d3f70af6SSoby Mathew ldr x2, [x0, #CPU_REG_DUMP] 124d3f70af6SSoby Mathew cbz x2, 1f 125d3f70af6SSoby Mathew blr x2 126d3f70af6SSoby Mathew1: 127d3f70af6SSoby Mathew mov x30, x16 128d3f70af6SSoby Mathew ret 1298b779620SKévin Petitendfunc do_cpu_reg_dump 130d3f70af6SSoby Mathew#endif 131d3f70af6SSoby Mathew 1329b476841SSoby Mathew /* 1339b476841SSoby Mathew * The below function returns the cpu_ops structure matching the 1349b476841SSoby Mathew * midr of the core. It reads the MIDR_EL1 and finds the matching 1359b476841SSoby Mathew * entry in cpu_ops entries. Only the implementation and part number 1369b476841SSoby Mathew * are used to match the entries. 1371994e562SJavier Almansa Sobrino * 1381994e562SJavier Almansa Sobrino * If cpu_ops for the MIDR_EL1 cannot be found and 1391994e562SJavier Almansa Sobrino * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a 1401994e562SJavier Almansa Sobrino * default cpu_ops with an MIDR value of 0. 1412e61d687SOlivier Deprez * (Implementation number 0x0 should be reserved for software use 1421994e562SJavier Almansa Sobrino * and therefore no clashes should happen with that default value). 1431994e562SJavier Almansa Sobrino * 1449b476841SSoby Mathew * Return : 1459b476841SSoby Mathew * x0 - The matching cpu_ops pointer on Success 1469b476841SSoby Mathew * x0 - 0 on failure. 1479b476841SSoby Mathew * Clobbers : x0 - x5 1489b476841SSoby Mathew */ 1499b476841SSoby Mathew .globl get_cpu_ops_ptr 1509b476841SSoby Mathewfunc get_cpu_ops_ptr 1519b476841SSoby Mathew /* Read the MIDR_EL1 */ 1529b476841SSoby Mathew mrs x2, midr_el1 1539b476841SSoby Mathew mov_imm x3, CPU_IMPL_PN_MASK 1549b476841SSoby Mathew 1559b476841SSoby Mathew /* Retain only the implementation and part number using mask */ 1569b476841SSoby Mathew and w2, w2, w3 1571994e562SJavier Almansa Sobrino 1581994e562SJavier Almansa Sobrino /* Get the cpu_ops end location */ 1591994e562SJavier Almansa Sobrino adr x5, (__CPU_OPS_END__ + CPU_MIDR) 1601994e562SJavier Almansa Sobrino 1611994e562SJavier Almansa Sobrino /* Initialize the return parameter */ 1621994e562SJavier Almansa Sobrino mov x0, #0 1639b476841SSoby Mathew1: 1641994e562SJavier Almansa Sobrino /* Get the cpu_ops start location */ 1651994e562SJavier Almansa Sobrino adr x4, (__CPU_OPS_START__ + CPU_MIDR) 1661994e562SJavier Almansa Sobrino 1671994e562SJavier Almansa Sobrino2: 1689b476841SSoby Mathew /* Check if we have reached end of list */ 1699b476841SSoby Mathew cmp x4, x5 1701994e562SJavier Almansa Sobrino b.eq search_def_ptr 1719b476841SSoby Mathew 1729b476841SSoby Mathew /* load the midr from the cpu_ops */ 1739b476841SSoby Mathew ldr x1, [x4], #CPU_OPS_SIZE 1749b476841SSoby Mathew and w1, w1, w3 1759b476841SSoby Mathew 1769b476841SSoby Mathew /* Check if midr matches to midr of this core */ 1779b476841SSoby Mathew cmp w1, w2 1781994e562SJavier Almansa Sobrino b.ne 2b 1799b476841SSoby Mathew 1809b476841SSoby Mathew /* Subtract the increment and offset to get the cpu-ops pointer */ 1819b476841SSoby Mathew sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR) 182601e3ed2SVarun Wadekar#if ENABLE_ASSERTIONS 183601e3ed2SVarun Wadekar cmp x0, #0 184601e3ed2SVarun Wadekar ASM_ASSERT(ne) 185601e3ed2SVarun Wadekar#endif 1861994e562SJavier Almansa Sobrino#ifdef SUPPORT_UNKNOWN_MPID 1871994e562SJavier Almansa Sobrino cbnz x2, exit_mpid_found 1881994e562SJavier Almansa Sobrino /* Mark the unsupported MPID flag */ 1891994e562SJavier Almansa Sobrino adrp x1, unsupported_mpid_flag 1901994e562SJavier Almansa Sobrino add x1, x1, :lo12:unsupported_mpid_flag 1911994e562SJavier Almansa Sobrino str w2, [x1] 1921994e562SJavier Almansa Sobrinoexit_mpid_found: 1931994e562SJavier Almansa Sobrino#endif 1941994e562SJavier Almansa Sobrino ret 1951994e562SJavier Almansa Sobrino 1961994e562SJavier Almansa Sobrino /* 1971994e562SJavier Almansa Sobrino * Search again for a default pointer (MIDR = 0x0) 1981994e562SJavier Almansa Sobrino * or return error if already searched. 1991994e562SJavier Almansa Sobrino */ 2001994e562SJavier Almansa Sobrinosearch_def_ptr: 2011994e562SJavier Almansa Sobrino#ifdef SUPPORT_UNKNOWN_MPID 2021994e562SJavier Almansa Sobrino cbz x2, error_exit 2031994e562SJavier Almansa Sobrino mov x2, #0 2041994e562SJavier Almansa Sobrino b 1b 2059b476841SSoby Mathewerror_exit: 2061994e562SJavier Almansa Sobrino#endif 2079b476841SSoby Mathew ret 2088b779620SKévin Petitendfunc get_cpu_ops_ptr 2097395a725SSoby Mathew 21010bcd761SJeenu Viswambharan/* 21110bcd761SJeenu Viswambharan * Extract CPU revision and variant, and combine them into a single numeric for 21210bcd761SJeenu Viswambharan * easier comparison. 21310bcd761SJeenu Viswambharan */ 21410bcd761SJeenu Viswambharan .globl cpu_get_rev_var 21510bcd761SJeenu Viswambharanfunc cpu_get_rev_var 21610bcd761SJeenu Viswambharan mrs x1, midr_el1 2177395a725SSoby Mathew 21854035fc4SSandrine Bailleux /* 21910bcd761SJeenu Viswambharan * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them 22010bcd761SJeenu Viswambharan * as variant[7:4] and revision[3:0] of x0. 22154035fc4SSandrine Bailleux * 22210bcd761SJeenu Viswambharan * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then 22310bcd761SJeenu Viswambharan * extract x1[3:0] into x0[3:0] retaining other bits. 22454035fc4SSandrine Bailleux */ 22510bcd761SJeenu Viswambharan ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) 22610bcd761SJeenu Viswambharan bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS 22710bcd761SJeenu Viswambharan ret 22810bcd761SJeenu Viswambharanendfunc cpu_get_rev_var 2297395a725SSoby Mathew 23010bcd761SJeenu Viswambharan/* 23110bcd761SJeenu Viswambharan * Compare the CPU's revision-variant (x0) with a given value (x1), for errata 23210bcd761SJeenu Viswambharan * application purposes. If the revision-variant is less than or same as a given 23310bcd761SJeenu Viswambharan * value, indicates that errata applies; otherwise not. 2349ec3921cSJonathan Wright * 2359ec3921cSJonathan Wright * Shall clobber: x0-x3 23610bcd761SJeenu Viswambharan */ 23710bcd761SJeenu Viswambharan .globl cpu_rev_var_ls 23810bcd761SJeenu Viswambharanfunc cpu_rev_var_ls 23910bcd761SJeenu Viswambharan mov x2, #ERRATA_APPLIES 24010bcd761SJeenu Viswambharan mov x3, #ERRATA_NOT_APPLIES 24110bcd761SJeenu Viswambharan cmp x0, x1 24210bcd761SJeenu Viswambharan csel x0, x2, x3, ls 24310bcd761SJeenu Viswambharan ret 24410bcd761SJeenu Viswambharanendfunc cpu_rev_var_ls 24510bcd761SJeenu Viswambharan 246b75dc0e4SAndre Przywara/* 247b75dc0e4SAndre Przywara * Compare the CPU's revision-variant (x0) with a given value (x1), for errata 248b75dc0e4SAndre Przywara * application purposes. If the revision-variant is higher than or same as a 249b75dc0e4SAndre Przywara * given value, indicates that errata applies; otherwise not. 2509ec3921cSJonathan Wright * 2519ec3921cSJonathan Wright * Shall clobber: x0-x3 252b75dc0e4SAndre Przywara */ 253b75dc0e4SAndre Przywara .globl cpu_rev_var_hs 254b75dc0e4SAndre Przywarafunc cpu_rev_var_hs 255b75dc0e4SAndre Przywara mov x2, #ERRATA_APPLIES 256b75dc0e4SAndre Przywara mov x3, #ERRATA_NOT_APPLIES 257b75dc0e4SAndre Przywara cmp x0, x1 258b75dc0e4SAndre Przywara csel x0, x2, x3, hs 259b75dc0e4SAndre Przywara ret 260b75dc0e4SAndre Przywaraendfunc cpu_rev_var_hs 261b75dc0e4SAndre Przywara 26280942622Slaurenw-arm/* 26380942622Slaurenw-arm * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata 26480942622Slaurenw-arm * application purposes. If the revision-variant is between or includes the given 26580942622Slaurenw-arm * values, this indicates that errata applies; otherwise not. 26680942622Slaurenw-arm * 26780942622Slaurenw-arm * Shall clobber: x0-x4 26880942622Slaurenw-arm */ 26980942622Slaurenw-arm .globl cpu_rev_var_range 27080942622Slaurenw-armfunc cpu_rev_var_range 27180942622Slaurenw-arm mov x3, #ERRATA_APPLIES 27280942622Slaurenw-arm mov x4, #ERRATA_NOT_APPLIES 27380942622Slaurenw-arm cmp x0, x1 27480942622Slaurenw-arm csel x1, x3, x4, hs 27580942622Slaurenw-arm cbz x1, 1f 27680942622Slaurenw-arm cmp x0, x2 27780942622Slaurenw-arm csel x1, x3, x4, ls 27880942622Slaurenw-arm1: 27980942622Slaurenw-arm mov x0, x1 28080942622Slaurenw-arm ret 28180942622Slaurenw-armendfunc cpu_rev_var_range 28280942622Slaurenw-arm 28310bcd761SJeenu Viswambharan#if REPORT_ERRATA 28410bcd761SJeenu Viswambharan/* 28510bcd761SJeenu Viswambharan * void print_errata_status(void); 28610bcd761SJeenu Viswambharan * 28710bcd761SJeenu Viswambharan * Function to print errata status for CPUs of its class. Must be called only: 28810bcd761SJeenu Viswambharan * 28910bcd761SJeenu Viswambharan * - with MMU and data caches are enabled; 29010bcd761SJeenu Viswambharan * - after cpu_ops have been initialized in per-CPU data. 29110bcd761SJeenu Viswambharan */ 29210bcd761SJeenu Viswambharan .globl print_errata_status 29310bcd761SJeenu Viswambharanfunc print_errata_status 29410bcd761SJeenu Viswambharan#ifdef IMAGE_BL1 29510bcd761SJeenu Viswambharan /* 29610bcd761SJeenu Viswambharan * BL1 doesn't have per-CPU data. So retrieve the CPU operations 29710bcd761SJeenu Viswambharan * directly. 29810bcd761SJeenu Viswambharan */ 29910bcd761SJeenu Viswambharan stp xzr, x30, [sp, #-16]! 30010bcd761SJeenu Viswambharan bl get_cpu_ops_ptr 30110bcd761SJeenu Viswambharan ldp xzr, x30, [sp], #16 30210bcd761SJeenu Viswambharan ldr x1, [x0, #CPU_ERRATA_FUNC] 30310bcd761SJeenu Viswambharan cbnz x1, .Lprint 30410bcd761SJeenu Viswambharan#else 30510bcd761SJeenu Viswambharan /* 30610bcd761SJeenu Viswambharan * Retrieve pointer to cpu_ops from per-CPU data, and further, the 30710bcd761SJeenu Viswambharan * errata printing function. If it's non-NULL, jump to the function in 30810bcd761SJeenu Viswambharan * turn. 30910bcd761SJeenu Viswambharan */ 31010bcd761SJeenu Viswambharan mrs x0, tpidr_el3 311601e3ed2SVarun Wadekar#if ENABLE_ASSERTIONS 312601e3ed2SVarun Wadekar cmp x0, #0 313601e3ed2SVarun Wadekar ASM_ASSERT(ne) 314601e3ed2SVarun Wadekar#endif 31510bcd761SJeenu Viswambharan ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR] 316601e3ed2SVarun Wadekar#if ENABLE_ASSERTIONS 317601e3ed2SVarun Wadekar cmp x1, #0 318601e3ed2SVarun Wadekar ASM_ASSERT(ne) 319601e3ed2SVarun Wadekar#endif 32010bcd761SJeenu Viswambharan ldr x0, [x1, #CPU_ERRATA_FUNC] 32110bcd761SJeenu Viswambharan cbz x0, .Lnoprint 32210bcd761SJeenu Viswambharan 32310bcd761SJeenu Viswambharan /* 32410bcd761SJeenu Viswambharan * Printing errata status requires atomically testing the printed flag. 32510bcd761SJeenu Viswambharan */ 32622fa58cbSdp-arm stp x19, x30, [sp, #-16]! 32722fa58cbSdp-arm mov x19, x0 32810bcd761SJeenu Viswambharan 32910bcd761SJeenu Viswambharan /* 33010bcd761SJeenu Viswambharan * Load pointers to errata lock and printed flag. Call 33110bcd761SJeenu Viswambharan * errata_needs_reporting to check whether this CPU needs to report 33210bcd761SJeenu Viswambharan * errata status pertaining to its class. 33310bcd761SJeenu Viswambharan */ 33410bcd761SJeenu Viswambharan ldr x0, [x1, #CPU_ERRATA_LOCK] 33510bcd761SJeenu Viswambharan ldr x1, [x1, #CPU_ERRATA_PRINTED] 33610bcd761SJeenu Viswambharan bl errata_needs_reporting 33722fa58cbSdp-arm mov x1, x19 33822fa58cbSdp-arm ldp x19, x30, [sp], #16 33910bcd761SJeenu Viswambharan cbnz x0, .Lprint 34010bcd761SJeenu Viswambharan#endif 34110bcd761SJeenu Viswambharan.Lnoprint: 34210bcd761SJeenu Viswambharan ret 34310bcd761SJeenu Viswambharan.Lprint: 34410bcd761SJeenu Viswambharan /* Jump to errata reporting function for this CPU */ 34510bcd761SJeenu Viswambharan br x1 34610bcd761SJeenu Viswambharanendfunc print_errata_status 34710bcd761SJeenu Viswambharan#endif 348a205a56eSDimitris Papastamos 349a205a56eSDimitris Papastamos/* 3502c3a1078SDimitris Papastamos * int check_wa_cve_2017_5715(void); 351a205a56eSDimitris Papastamos * 352a205a56eSDimitris Papastamos * This function returns: 353a205a56eSDimitris Papastamos * - ERRATA_APPLIES when firmware mitigation is required. 354a205a56eSDimitris Papastamos * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required. 355a205a56eSDimitris Papastamos * - ERRATA_MISSING when firmware mitigation would be required but 356a205a56eSDimitris Papastamos * is not compiled in. 357a205a56eSDimitris Papastamos * 358a205a56eSDimitris Papastamos * NOTE: Must be called only after cpu_ops have been initialized 359a205a56eSDimitris Papastamos * in per-CPU data. 360a205a56eSDimitris Papastamos */ 3612c3a1078SDimitris Papastamos .globl check_wa_cve_2017_5715 3622c3a1078SDimitris Papastamosfunc check_wa_cve_2017_5715 363a205a56eSDimitris Papastamos mrs x0, tpidr_el3 364a205a56eSDimitris Papastamos#if ENABLE_ASSERTIONS 365a205a56eSDimitris Papastamos cmp x0, #0 366a205a56eSDimitris Papastamos ASM_ASSERT(ne) 367a205a56eSDimitris Papastamos#endif 368a205a56eSDimitris Papastamos ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 369601e3ed2SVarun Wadekar#if ENABLE_ASSERTIONS 370601e3ed2SVarun Wadekar cmp x0, #0 371601e3ed2SVarun Wadekar ASM_ASSERT(ne) 372601e3ed2SVarun Wadekar#endif 373a205a56eSDimitris Papastamos ldr x0, [x0, #CPU_EXTRA1_FUNC] 374a205a56eSDimitris Papastamos /* 375a205a56eSDimitris Papastamos * If the reserved function pointer is NULL, this CPU 376a205a56eSDimitris Papastamos * is unaffected by CVE-2017-5715 so bail out. 377a205a56eSDimitris Papastamos */ 3789b2510b6SBipin Ravi cmp x0, #CPU_NO_EXTRA1_FUNC 379a205a56eSDimitris Papastamos beq 1f 380a205a56eSDimitris Papastamos br x0 381a205a56eSDimitris Papastamos1: 382a205a56eSDimitris Papastamos mov x0, #ERRATA_NOT_APPLIES 383a205a56eSDimitris Papastamos ret 3842c3a1078SDimitris Papastamosendfunc check_wa_cve_2017_5715 385fe007b2eSDimitris Papastamos 386fe007b2eSDimitris Papastamos/* 387fe007b2eSDimitris Papastamos * void *wa_cve_2018_3639_get_disable_ptr(void); 388fe007b2eSDimitris Papastamos * 389fe007b2eSDimitris Papastamos * Returns a function pointer which is used to disable mitigation 390fe007b2eSDimitris Papastamos * for CVE-2018-3639. 391fe007b2eSDimitris Papastamos * The function pointer is only returned on cores that employ 392fe007b2eSDimitris Papastamos * dynamic mitigation. If the core uses static mitigation or is 393fe007b2eSDimitris Papastamos * unaffected by CVE-2018-3639 this function returns NULL. 394fe007b2eSDimitris Papastamos * 395fe007b2eSDimitris Papastamos * NOTE: Must be called only after cpu_ops have been initialized 396fe007b2eSDimitris Papastamos * in per-CPU data. 397fe007b2eSDimitris Papastamos */ 398fe007b2eSDimitris Papastamos .globl wa_cve_2018_3639_get_disable_ptr 399fe007b2eSDimitris Papastamosfunc wa_cve_2018_3639_get_disable_ptr 400fe007b2eSDimitris Papastamos mrs x0, tpidr_el3 401fe007b2eSDimitris Papastamos#if ENABLE_ASSERTIONS 402fe007b2eSDimitris Papastamos cmp x0, #0 403fe007b2eSDimitris Papastamos ASM_ASSERT(ne) 404fe007b2eSDimitris Papastamos#endif 405fe007b2eSDimitris Papastamos ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 406601e3ed2SVarun Wadekar#if ENABLE_ASSERTIONS 407601e3ed2SVarun Wadekar cmp x0, #0 408601e3ed2SVarun Wadekar ASM_ASSERT(ne) 409601e3ed2SVarun Wadekar#endif 410fe007b2eSDimitris Papastamos ldr x0, [x0, #CPU_EXTRA2_FUNC] 411fe007b2eSDimitris Papastamos ret 412fe007b2eSDimitris Papastamosendfunc wa_cve_2018_3639_get_disable_ptr 4139b2510b6SBipin Ravi 4149b2510b6SBipin Ravi/* 4159b2510b6SBipin Ravi * int check_smccc_arch_wa3_applies(void); 4169b2510b6SBipin Ravi * 4179b2510b6SBipin Ravi * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate 4189b2510b6SBipin Ravi * CVE-2022-23960 for this CPU. It returns: 4199b2510b6SBipin Ravi * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate 4209b2510b6SBipin Ravi * the CVE. 4219b2510b6SBipin Ravi * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to 4229b2510b6SBipin Ravi * mitigate the CVE. 4239b2510b6SBipin Ravi * 4249b2510b6SBipin Ravi * NOTE: Must be called only after cpu_ops have been initialized 4259b2510b6SBipin Ravi * in per-CPU data. 4269b2510b6SBipin Ravi */ 4279b2510b6SBipin Ravi .globl check_smccc_arch_wa3_applies 4289b2510b6SBipin Ravifunc check_smccc_arch_wa3_applies 4299b2510b6SBipin Ravi mrs x0, tpidr_el3 4309b2510b6SBipin Ravi#if ENABLE_ASSERTIONS 4319b2510b6SBipin Ravi cmp x0, #0 4329b2510b6SBipin Ravi ASM_ASSERT(ne) 4339b2510b6SBipin Ravi#endif 4349b2510b6SBipin Ravi ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 4359b2510b6SBipin Ravi#if ENABLE_ASSERTIONS 4369b2510b6SBipin Ravi cmp x0, #0 4379b2510b6SBipin Ravi ASM_ASSERT(ne) 4389b2510b6SBipin Ravi#endif 4399b2510b6SBipin Ravi ldr x0, [x0, #CPU_EXTRA3_FUNC] 4409b2510b6SBipin Ravi /* 4419b2510b6SBipin Ravi * If the reserved function pointer is NULL, this CPU 4429b2510b6SBipin Ravi * is unaffected by CVE-2022-23960 so bail out. 4439b2510b6SBipin Ravi */ 4449b2510b6SBipin Ravi cmp x0, #CPU_NO_EXTRA3_FUNC 4459b2510b6SBipin Ravi beq 1f 4469b2510b6SBipin Ravi br x0 4479b2510b6SBipin Ravi1: 4489b2510b6SBipin Ravi mov x0, #ERRATA_NOT_APPLIES 4499b2510b6SBipin Ravi ret 4509b2510b6SBipin Raviendfunc check_smccc_arch_wa3_applies 451