1/* 2 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. 3 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8#include <arch.h> 9#include <asm_macros.S> 10#include <assert_macros.S> 11#include <context.h> 12#include <denver.h> 13#include <cpu_macros.S> 14#include <plat_macros.S> 15 16 /* ------------------------------------------------- 17 * CVE-2017-5715 mitigation 18 * 19 * Flush the indirect branch predictor and RSB on 20 * entry to EL3 by issuing a newly added instruction 21 * for Denver CPUs. 22 * 23 * To achieve this without performing any branch 24 * instruction, a per-cpu vbar is installed which 25 * executes the workaround and then branches off to 26 * the corresponding vector entry in the main vector 27 * table. 28 * ------------------------------------------------- 29 */ 30 .globl workaround_bpflush_runtime_exceptions 31 32vector_base workaround_bpflush_runtime_exceptions 33 34 .macro apply_workaround 35 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 36 37 /* Disable cycle counter when event counting is prohibited */ 38 mrs x1, pmcr_el0 39 orr x0, x1, #PMCR_EL0_DP_BIT 40 msr pmcr_el0, x0 41 isb 42 43 /* ------------------------------------------------- 44 * A new write-only system register where a write of 45 * 1 to bit 0 will cause the indirect branch predictor 46 * and RSB to be flushed. 47 * 48 * A write of 0 to bit 0 will be ignored. A write of 49 * 1 to any other bit will cause an MCA. 50 * ------------------------------------------------- 51 */ 52 mov x0, #1 53 msr s3_0_c15_c0_6, x0 54 isb 55 56 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 57 .endm 58 59 /* --------------------------------------------------------------------- 60 * Current EL with SP_EL0 : 0x0 - 0x200 61 * --------------------------------------------------------------------- 62 */ 63vector_entry workaround_bpflush_sync_exception_sp_el0 64 b sync_exception_sp_el0 65end_vector_entry workaround_bpflush_sync_exception_sp_el0 66 67vector_entry workaround_bpflush_irq_sp_el0 68 b irq_sp_el0 69end_vector_entry workaround_bpflush_irq_sp_el0 70 71vector_entry workaround_bpflush_fiq_sp_el0 72 b fiq_sp_el0 73end_vector_entry workaround_bpflush_fiq_sp_el0 74 75vector_entry workaround_bpflush_serror_sp_el0 76 b serror_sp_el0 77end_vector_entry workaround_bpflush_serror_sp_el0 78 79 /* --------------------------------------------------------------------- 80 * Current EL with SP_ELx: 0x200 - 0x400 81 * --------------------------------------------------------------------- 82 */ 83vector_entry workaround_bpflush_sync_exception_sp_elx 84 b sync_exception_sp_elx 85end_vector_entry workaround_bpflush_sync_exception_sp_elx 86 87vector_entry workaround_bpflush_irq_sp_elx 88 b irq_sp_elx 89end_vector_entry workaround_bpflush_irq_sp_elx 90 91vector_entry workaround_bpflush_fiq_sp_elx 92 b fiq_sp_elx 93end_vector_entry workaround_bpflush_fiq_sp_elx 94 95vector_entry workaround_bpflush_serror_sp_elx 96 b serror_sp_elx 97end_vector_entry workaround_bpflush_serror_sp_elx 98 99 /* --------------------------------------------------------------------- 100 * Lower EL using AArch64 : 0x400 - 0x600 101 * --------------------------------------------------------------------- 102 */ 103vector_entry workaround_bpflush_sync_exception_aarch64 104 apply_workaround 105 b sync_exception_aarch64 106end_vector_entry workaround_bpflush_sync_exception_aarch64 107 108vector_entry workaround_bpflush_irq_aarch64 109 apply_workaround 110 b irq_aarch64 111end_vector_entry workaround_bpflush_irq_aarch64 112 113vector_entry workaround_bpflush_fiq_aarch64 114 apply_workaround 115 b fiq_aarch64 116end_vector_entry workaround_bpflush_fiq_aarch64 117 118vector_entry workaround_bpflush_serror_aarch64 119 apply_workaround 120 b serror_aarch64 121end_vector_entry workaround_bpflush_serror_aarch64 122 123 /* --------------------------------------------------------------------- 124 * Lower EL using AArch32 : 0x600 - 0x800 125 * --------------------------------------------------------------------- 126 */ 127vector_entry workaround_bpflush_sync_exception_aarch32 128 apply_workaround 129 b sync_exception_aarch32 130end_vector_entry workaround_bpflush_sync_exception_aarch32 131 132vector_entry workaround_bpflush_irq_aarch32 133 apply_workaround 134 b irq_aarch32 135end_vector_entry workaround_bpflush_irq_aarch32 136 137vector_entry workaround_bpflush_fiq_aarch32 138 apply_workaround 139 b fiq_aarch32 140end_vector_entry workaround_bpflush_fiq_aarch32 141 142vector_entry workaround_bpflush_serror_aarch32 143 apply_workaround 144 b serror_aarch32 145end_vector_entry workaround_bpflush_serror_aarch32 146 147 .global denver_disable_dco 148 149 /* --------------------------------------------- 150 * Disable debug interfaces 151 * --------------------------------------------- 152 */ 153func denver_disable_ext_debug 154 mov x0, #1 155 msr osdlr_el1, x0 156 isb 157 dsb sy 158 ret 159endfunc denver_disable_ext_debug 160 161 /* ---------------------------------------------------- 162 * Enable dynamic code optimizer (DCO) 163 * ---------------------------------------------------- 164 */ 165func denver_enable_dco 166 mov x18, x30 167 bl plat_my_core_pos 168 mov x1, #1 169 lsl x1, x1, x0 170 msr s3_0_c15_c0_2, x1 171 mov x30, x18 172 ret 173endfunc denver_enable_dco 174 175 /* ---------------------------------------------------- 176 * Disable dynamic code optimizer (DCO) 177 * ---------------------------------------------------- 178 */ 179func denver_disable_dco 180 181 mov x18, x30 182 183 /* turn off background work */ 184 bl plat_my_core_pos 185 mov x1, #1 186 lsl x1, x1, x0 187 lsl x2, x1, #16 188 msr s3_0_c15_c0_2, x2 189 isb 190 191 /* wait till the background work turns off */ 1921: mrs x2, s3_0_c15_c0_2 193 lsr x2, x2, #32 194 and w2, w2, 0xFFFF 195 and x2, x2, x1 196 cbnz x2, 1b 197 198 mov x30, x18 199 ret 200endfunc denver_disable_dco 201 202func check_errata_cve_2017_5715 203 mov x0, #ERRATA_MISSING 204#if WORKAROUND_CVE_2017_5715 205 /* 206 * Check if the CPU supports the special instruction 207 * required to flush the indirect branch predictor and 208 * RSB. Support for this operation can be determined by 209 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001. 210 */ 211 mrs x1, id_afr0_el1 212 mov x2, #0x10000 213 and x1, x1, x2 214 cbz x1, 1f 215 mov x0, #ERRATA_APPLIES 2161: 217#endif 218 ret 219endfunc check_errata_cve_2017_5715 220 221func check_errata_cve_2018_3639 222#if WORKAROUND_CVE_2018_3639 223 mov x0, #ERRATA_APPLIES 224#else 225 mov x0, #ERRATA_MISSING 226#endif 227 ret 228endfunc check_errata_cve_2018_3639 229 230 /* ------------------------------------------------- 231 * The CPU Ops reset function for Denver. 232 * ------------------------------------------------- 233 */ 234func denver_reset_func 235 236 mov x19, x30 237 238#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 239 /* 240 * Check if the CPU supports the special instruction 241 * required to flush the indirect branch predictor and 242 * RSB. Support for this operation can be determined by 243 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001. 244 */ 245 mrs x0, id_afr0_el1 246 mov x1, #0x10000 247 and x0, x0, x1 248 cmp x0, #0 249 adr x1, workaround_bpflush_runtime_exceptions 250 mrs x2, vbar_el3 251 csel x0, x1, x2, ne 252 msr vbar_el3, x0 253#endif 254 255#if WORKAROUND_CVE_2018_3639 256 /* 257 * Denver CPUs with DENVER_MIDR_PN3 or earlier, use different 258 * bits in the ACTLR_EL3 register to disable speculative 259 * store buffer and memory disambiguation. 260 */ 261 mrs x0, midr_el1 262 mov_imm x1, DENVER_MIDR_PN4 263 cmp x0, x1 264 mrs x0, actlr_el3 265 mov x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3) 266 mov x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3) 267 csel x3, x1, x2, ne 268 orr x0, x0, x3 269 msr actlr_el3, x0 270 isb 271 dsb sy 272#endif 273 274 /* ---------------------------------------------------- 275 * Reset ACTLR.PMSTATE to C1 state 276 * ---------------------------------------------------- 277 */ 278 mrs x0, actlr_el1 279 bic x0, x0, #DENVER_CPU_PMSTATE_MASK 280 orr x0, x0, #DENVER_CPU_PMSTATE_C1 281 msr actlr_el1, x0 282 283 /* ---------------------------------------------------- 284 * Enable dynamic code optimizer (DCO) 285 * ---------------------------------------------------- 286 */ 287 bl denver_enable_dco 288 289 ret x19 290endfunc denver_reset_func 291 292 /* ---------------------------------------------------- 293 * The CPU Ops core power down function for Denver. 294 * ---------------------------------------------------- 295 */ 296func denver_core_pwr_dwn 297 298 mov x19, x30 299 300 /* --------------------------------------------- 301 * Force the debug interfaces to be quiescent 302 * --------------------------------------------- 303 */ 304 bl denver_disable_ext_debug 305 306 ret x19 307endfunc denver_core_pwr_dwn 308 309 /* ------------------------------------------------------- 310 * The CPU Ops cluster power down function for Denver. 311 * ------------------------------------------------------- 312 */ 313func denver_cluster_pwr_dwn 314 ret 315endfunc denver_cluster_pwr_dwn 316 317#if REPORT_ERRATA 318 /* 319 * Errata printing function for Denver. Must follow AAPCS. 320 */ 321func denver_errata_report 322 stp x8, x30, [sp, #-16]! 323 324 bl cpu_get_rev_var 325 mov x8, x0 326 327 /* 328 * Report all errata. The revision-variant information is passed to 329 * checking functions of each errata. 330 */ 331 report_errata WORKAROUND_CVE_2017_5715, denver, cve_2017_5715 332 report_errata WORKAROUND_CVE_2018_3639, denver, cve_2018_3639 333 334 ldp x8, x30, [sp], #16 335 ret 336endfunc denver_errata_report 337#endif 338 339 /* --------------------------------------------- 340 * This function provides Denver specific 341 * register information for crash reporting. 342 * It needs to return with x6 pointing to 343 * a list of register names in ascii and 344 * x8 - x15 having values of registers to be 345 * reported. 346 * --------------------------------------------- 347 */ 348.section .rodata.denver_regs, "aS" 349denver_regs: /* The ascii list of register names to be reported */ 350 .asciz "actlr_el1", "" 351 352func denver_cpu_reg_dump 353 adr x6, denver_regs 354 mrs x8, ACTLR_EL1 355 ret 356endfunc denver_cpu_reg_dump 357 358declare_cpu_ops_wa denver, DENVER_MIDR_PN0, \ 359 denver_reset_func, \ 360 check_errata_cve_2017_5715, \ 361 CPU_NO_EXTRA2_FUNC, \ 362 denver_core_pwr_dwn, \ 363 denver_cluster_pwr_dwn 364 365declare_cpu_ops_wa denver, DENVER_MIDR_PN1, \ 366 denver_reset_func, \ 367 check_errata_cve_2017_5715, \ 368 CPU_NO_EXTRA2_FUNC, \ 369 denver_core_pwr_dwn, \ 370 denver_cluster_pwr_dwn 371 372declare_cpu_ops_wa denver, DENVER_MIDR_PN2, \ 373 denver_reset_func, \ 374 check_errata_cve_2017_5715, \ 375 CPU_NO_EXTRA2_FUNC, \ 376 denver_core_pwr_dwn, \ 377 denver_cluster_pwr_dwn 378 379declare_cpu_ops_wa denver, DENVER_MIDR_PN3, \ 380 denver_reset_func, \ 381 check_errata_cve_2017_5715, \ 382 CPU_NO_EXTRA2_FUNC, \ 383 denver_core_pwr_dwn, \ 384 denver_cluster_pwr_dwn 385 386declare_cpu_ops_wa denver, DENVER_MIDR_PN4, \ 387 denver_reset_func, \ 388 check_errata_cve_2017_5715, \ 389 CPU_NO_EXTRA2_FUNC, \ 390 denver_core_pwr_dwn, \ 391 denver_cluster_pwr_dwn 392