1/* 2 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8#include <arch.h> 9#include <asm_macros.S> 10#include <assert_macros.S> 11#include <context.h> 12#include <denver.h> 13#include <cpu_macros.S> 14#include <plat_macros.S> 15 16cpu_reset_prologue denver 17 18 /* ------------------------------------------------- 19 * CVE-2017-5715 mitigation 20 * 21 * Flush the indirect branch predictor and RSB on 22 * entry to EL3 by issuing a newly added instruction 23 * for Denver CPUs. 24 * 25 * To achieve this without performing any branch 26 * instruction, a per-cpu vbar is installed which 27 * executes the workaround and then branches off to 28 * the corresponding vector entry in the main vector 29 * table. 30 * ------------------------------------------------- 31 */ 32vector_base workaround_bpflush_runtime_exceptions 33 34 .macro apply_workaround 35 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 36 37 /* Disable cycle counter when event counting is prohibited */ 38 mrs x1, pmcr_el0 39 orr x0, x1, #PMCR_EL0_DP_BIT 40 msr pmcr_el0, x0 41 isb 42 43 /* ------------------------------------------------- 44 * A new write-only system register where a write of 45 * 1 to bit 0 will cause the indirect branch predictor 46 * and RSB to be flushed. 47 * 48 * A write of 0 to bit 0 will be ignored. A write of 49 * 1 to any other bit will cause an MCA. 50 * ------------------------------------------------- 51 */ 52 mov x0, #1 53 msr s3_0_c15_c0_6, x0 54 isb 55 56 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 57 .endm 58 59 /* --------------------------------------------------------------------- 60 * Current EL with SP_EL0 : 0x0 - 0x200 61 * --------------------------------------------------------------------- 62 */ 63vector_entry workaround_bpflush_sync_exception_sp_el0 64 b sync_exception_sp_el0 65end_vector_entry workaround_bpflush_sync_exception_sp_el0 66 67vector_entry workaround_bpflush_irq_sp_el0 68 b irq_sp_el0 69end_vector_entry workaround_bpflush_irq_sp_el0 70 71vector_entry workaround_bpflush_fiq_sp_el0 72 b fiq_sp_el0 73end_vector_entry workaround_bpflush_fiq_sp_el0 74 75vector_entry workaround_bpflush_serror_sp_el0 76 b serror_sp_el0 77end_vector_entry workaround_bpflush_serror_sp_el0 78 79 /* --------------------------------------------------------------------- 80 * Current EL with SP_ELx: 0x200 - 0x400 81 * --------------------------------------------------------------------- 82 */ 83vector_entry workaround_bpflush_sync_exception_sp_elx 84 b sync_exception_sp_elx 85end_vector_entry workaround_bpflush_sync_exception_sp_elx 86 87vector_entry workaround_bpflush_irq_sp_elx 88 b irq_sp_elx 89end_vector_entry workaround_bpflush_irq_sp_elx 90 91vector_entry workaround_bpflush_fiq_sp_elx 92 b fiq_sp_elx 93end_vector_entry workaround_bpflush_fiq_sp_elx 94 95vector_entry workaround_bpflush_serror_sp_elx 96 b serror_sp_elx 97end_vector_entry workaround_bpflush_serror_sp_elx 98 99 /* --------------------------------------------------------------------- 100 * Lower EL using AArch64 : 0x400 - 0x600 101 * --------------------------------------------------------------------- 102 */ 103vector_entry workaround_bpflush_sync_exception_aarch64 104 apply_workaround 105 b sync_exception_aarch64 106end_vector_entry workaround_bpflush_sync_exception_aarch64 107 108vector_entry workaround_bpflush_irq_aarch64 109 apply_workaround 110 b irq_aarch64 111end_vector_entry workaround_bpflush_irq_aarch64 112 113vector_entry workaround_bpflush_fiq_aarch64 114 apply_workaround 115 b fiq_aarch64 116end_vector_entry workaround_bpflush_fiq_aarch64 117 118vector_entry workaround_bpflush_serror_aarch64 119 apply_workaround 120 b serror_aarch64 121end_vector_entry workaround_bpflush_serror_aarch64 122 123 /* --------------------------------------------------------------------- 124 * Lower EL using AArch32 : 0x600 - 0x800 125 * --------------------------------------------------------------------- 126 */ 127vector_entry workaround_bpflush_sync_exception_aarch32 128 apply_workaround 129 b sync_exception_aarch32 130end_vector_entry workaround_bpflush_sync_exception_aarch32 131 132vector_entry workaround_bpflush_irq_aarch32 133 apply_workaround 134 b irq_aarch32 135end_vector_entry workaround_bpflush_irq_aarch32 136 137vector_entry workaround_bpflush_fiq_aarch32 138 apply_workaround 139 b fiq_aarch32 140end_vector_entry workaround_bpflush_fiq_aarch32 141 142vector_entry workaround_bpflush_serror_aarch32 143 apply_workaround 144 b serror_aarch32 145end_vector_entry workaround_bpflush_serror_aarch32 146 147 .global denver_disable_dco 148 149 /* --------------------------------------------- 150 * Disable debug interfaces 151 * --------------------------------------------- 152 */ 153func denver_disable_ext_debug 154 mov x0, #1 155 msr osdlr_el1, x0 156 isb 157 dsb sy 158 ret 159endfunc denver_disable_ext_debug 160 161 /* ---------------------------------------------------- 162 * Enable dynamic code optimizer (DCO) 163 * ---------------------------------------------------- 164 */ 165func denver_enable_dco 166 /* DCO is not supported on PN5 and later */ 167 mrs x1, midr_el1 168 mov_imm x2, DENVER_MIDR_PN4 169 cmp x1, x2 170 b.hi 1f 171 172 mov x18, x30 173 bl plat_my_core_pos 174 mov x1, #1 175 lsl x1, x1, x0 176 msr s3_0_c15_c0_2, x1 177 mov x30, x18 1781: ret 179endfunc denver_enable_dco 180 181 /* ---------------------------------------------------- 182 * Disable dynamic code optimizer (DCO) 183 * ---------------------------------------------------- 184 */ 185func denver_disable_dco 186 /* DCO is not supported on PN5 and later */ 187 mrs x1, midr_el1 188 mov_imm x2, DENVER_MIDR_PN4 189 cmp x1, x2 190 b.hi 2f 191 192 /* turn off background work */ 193 mov x18, x30 194 bl plat_my_core_pos 195 mov x1, #1 196 lsl x1, x1, x0 197 lsl x2, x1, #16 198 msr s3_0_c15_c0_2, x2 199 isb 200 201 /* wait till the background work turns off */ 2021: mrs x2, s3_0_c15_c0_2 203 lsr x2, x2, #32 204 and w2, w2, 0xFFFF 205 and x2, x2, x1 206 cbnz x2, 1b 207 208 mov x30, x18 2092: ret 210endfunc denver_disable_dco 211 212workaround_reset_start denver, CVE(2017, 5715), WORKAROUND_CVE_2017_5715 213#if IMAGE_BL31 214 adr x1, workaround_bpflush_runtime_exceptions 215 msr vbar_el3, x1 216#endif 217workaround_reset_end denver, CVE(2017, 5715) 218 219check_erratum_custom_start denver, CVE(2017, 5715) 220 mov x0, #ERRATA_MISSING 221#if WORKAROUND_CVE_2017_5715 222 /* 223 * Check if the CPU supports the special instruction 224 * required to flush the indirect branch predictor and 225 * RSB. Support for this operation can be determined by 226 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001. 227 */ 228 mrs x1, id_afr0_el1 229 mov x2, #0x10000 230 and x1, x1, x2 231 cbz x1, 1f 232 mov x0, #ERRATA_APPLIES 2331: 234#endif 235 ret 236check_erratum_custom_end denver, CVE(2017, 5715) 237 238workaround_reset_start denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639 239 /* 240 * Denver CPUs with DENVER_MIDR_PN3 or earlier, use different 241 * bits in the ACTLR_EL3 register to disable speculative 242 * store buffer and memory disambiguation. 243 */ 244 mrs x0, midr_el1 245 mov_imm x1, DENVER_MIDR_PN4 246 cmp x0, x1 247 mrs x0, actlr_el3 248 mov x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3) 249 mov x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3) 250 csel x3, x1, x2, ne 251 orr x0, x0, x3 252 msr actlr_el3, x0 253 isb 254 dsb sy 255workaround_reset_end denver, CVE(2018, 3639) 256 257check_erratum_chosen denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639 258 259cpu_reset_func_start denver 260 /* ---------------------------------------------------- 261 * Reset ACTLR.PMSTATE to C1 state 262 * ---------------------------------------------------- 263 */ 264 mrs x0, actlr_el1 265 bic x0, x0, #DENVER_CPU_PMSTATE_MASK 266 orr x0, x0, #DENVER_CPU_PMSTATE_C1 267 msr actlr_el1, x0 268 269 /* ---------------------------------------------------- 270 * Enable dynamic code optimizer (DCO) 271 * ---------------------------------------------------- 272 */ 273 bl denver_enable_dco 274cpu_reset_func_end denver 275 276 /* ---------------------------------------------------- 277 * The CPU Ops core power down function for Denver. 278 * ---------------------------------------------------- 279 */ 280func denver_core_pwr_dwn 281 282 mov x19, x30 283 284 /* --------------------------------------------- 285 * Force the debug interfaces to be quiescent 286 * --------------------------------------------- 287 */ 288 bl denver_disable_ext_debug 289 290 ret x19 291endfunc denver_core_pwr_dwn 292 293 /* ------------------------------------------------------- 294 * The CPU Ops cluster power down function for Denver. 295 * ------------------------------------------------------- 296 */ 297func denver_cluster_pwr_dwn 298 ret 299endfunc denver_cluster_pwr_dwn 300 301 /* --------------------------------------------- 302 * This function provides Denver specific 303 * register information for crash reporting. 304 * It needs to return with x6 pointing to 305 * a list of register names in ascii and 306 * x8 - x15 having values of registers to be 307 * reported. 308 * --------------------------------------------- 309 */ 310.section .rodata.denver_regs, "aS" 311denver_regs: /* The ascii list of register names to be reported */ 312 .asciz "actlr_el1", "" 313 314func denver_cpu_reg_dump 315 adr x6, denver_regs 316 mrs x8, ACTLR_EL1 317 ret 318endfunc denver_cpu_reg_dump 319 320/* macro to declare cpu_ops for Denver SKUs */ 321.macro denver_cpu_ops_wa midr 322 declare_cpu_ops_wa denver, \midr, \ 323 denver_reset_func, \ 324 check_erratum_denver_5715, \ 325 CPU_NO_EXTRA2_FUNC, \ 326 CPU_NO_EXTRA3_FUNC, \ 327 denver_core_pwr_dwn, \ 328 denver_cluster_pwr_dwn 329.endm 330 331denver_cpu_ops_wa DENVER_MIDR_PN0 332denver_cpu_ops_wa DENVER_MIDR_PN1 333denver_cpu_ops_wa DENVER_MIDR_PN2 334denver_cpu_ops_wa DENVER_MIDR_PN3 335denver_cpu_ops_wa DENVER_MIDR_PN4 336denver_cpu_ops_wa DENVER_MIDR_PN5 337denver_cpu_ops_wa DENVER_MIDR_PN6 338denver_cpu_ops_wa DENVER_MIDR_PN7 339denver_cpu_ops_wa DENVER_MIDR_PN8 340denver_cpu_ops_wa DENVER_MIDR_PN9 341