1/* 2 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <assert_macros.S> 10#include <cpu_data.h> 11#include <cpu_macros.S> 12#include <debug.h> 13#include <errata_report.h> 14 15 /* Reset fn is needed in BL at reset vector */ 16#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3) 17 /* 18 * The reset handler common to all platforms. After a matching 19 * cpu_ops structure entry is found, the correponding reset_handler 20 * in the cpu_ops is invoked. 21 * Clobbers: x0 - x19, x30 22 */ 23 .globl reset_handler 24func reset_handler 25 mov x19, x30 26 27 /* The plat_reset_handler can clobber x0 - x18, x30 */ 28 bl plat_reset_handler 29 30 /* Get the matching cpu_ops pointer */ 31 bl get_cpu_ops_ptr 32#if ENABLE_ASSERTIONS 33 cmp x0, #0 34 ASM_ASSERT(ne) 35#endif 36 37 /* Get the cpu_ops reset handler */ 38 ldr x2, [x0, #CPU_RESET_FUNC] 39 mov x30, x19 40 cbz x2, 1f 41 42 /* The cpu_ops reset handler can clobber x0 - x19, x30 */ 43 br x2 441: 45 ret 46endfunc reset_handler 47 48#endif 49 50#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ 51 /* 52 * void prepare_cpu_pwr_dwn(unsigned int power_level) 53 * 54 * Prepare CPU power down function for all platforms. The function takes 55 * a domain level to be powered down as its parameter. After the cpu_ops 56 * pointer is retrieved from cpu_data, the handler for requested power 57 * level is called. 58 */ 59 .globl prepare_cpu_pwr_dwn 60func prepare_cpu_pwr_dwn 61 /* 62 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the 63 * power down handler for the last power level 64 */ 65 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1) 66 cmp x0, x2 67 csel x2, x2, x0, hi 68 69 mrs x1, tpidr_el3 70 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 71#if ENABLE_ASSERTIONS 72 cmp x0, #0 73 ASM_ASSERT(ne) 74#endif 75 76 /* Get the appropriate power down handler */ 77 mov x1, #CPU_PWR_DWN_OPS 78 add x1, x1, x2, lsl #3 79 ldr x1, [x0, x1] 80 br x1 81endfunc prepare_cpu_pwr_dwn 82 83 84 /* 85 * Initializes the cpu_ops_ptr if not already initialized 86 * in cpu_data. This can be called without a runtime stack, but may 87 * only be called after the MMU is enabled. 88 * clobbers: x0 - x6, x10 89 */ 90 .globl init_cpu_ops 91func init_cpu_ops 92 mrs x6, tpidr_el3 93 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR] 94 cbnz x0, 1f 95 mov x10, x30 96 bl get_cpu_ops_ptr 97#if ENABLE_ASSERTIONS 98 cmp x0, #0 99 ASM_ASSERT(ne) 100#endif 101 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]! 102 mov x30, x10 1031: 104 ret 105endfunc init_cpu_ops 106#endif /* IMAGE_BL31 */ 107 108#if defined(IMAGE_BL31) && CRASH_REPORTING 109 /* 110 * The cpu specific registers which need to be reported in a crash 111 * are reported via cpu_ops cpu_reg_dump function. After a matching 112 * cpu_ops structure entry is found, the correponding cpu_reg_dump 113 * in the cpu_ops is invoked. 114 */ 115 .globl do_cpu_reg_dump 116func do_cpu_reg_dump 117 mov x16, x30 118 119 /* Get the matching cpu_ops pointer */ 120 bl get_cpu_ops_ptr 121 cbz x0, 1f 122 123 /* Get the cpu_ops cpu_reg_dump */ 124 ldr x2, [x0, #CPU_REG_DUMP] 125 cbz x2, 1f 126 blr x2 1271: 128 mov x30, x16 129 ret 130endfunc do_cpu_reg_dump 131#endif 132 133 /* 134 * The below function returns the cpu_ops structure matching the 135 * midr of the core. It reads the MIDR_EL1 and finds the matching 136 * entry in cpu_ops entries. Only the implementation and part number 137 * are used to match the entries. 138 * Return : 139 * x0 - The matching cpu_ops pointer on Success 140 * x0 - 0 on failure. 141 * Clobbers : x0 - x5 142 */ 143 .globl get_cpu_ops_ptr 144func get_cpu_ops_ptr 145 /* Get the cpu_ops start and end locations */ 146 adr x4, (__CPU_OPS_START__ + CPU_MIDR) 147 adr x5, (__CPU_OPS_END__ + CPU_MIDR) 148 149 /* Initialize the return parameter */ 150 mov x0, #0 151 152 /* Read the MIDR_EL1 */ 153 mrs x2, midr_el1 154 mov_imm x3, CPU_IMPL_PN_MASK 155 156 /* Retain only the implementation and part number using mask */ 157 and w2, w2, w3 1581: 159 /* Check if we have reached end of list */ 160 cmp x4, x5 161 b.eq error_exit 162 163 /* load the midr from the cpu_ops */ 164 ldr x1, [x4], #CPU_OPS_SIZE 165 and w1, w1, w3 166 167 /* Check if midr matches to midr of this core */ 168 cmp w1, w2 169 b.ne 1b 170 171 /* Subtract the increment and offset to get the cpu-ops pointer */ 172 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR) 173error_exit: 174 ret 175endfunc get_cpu_ops_ptr 176 177/* 178 * Extract CPU revision and variant, and combine them into a single numeric for 179 * easier comparison. 180 */ 181 .globl cpu_get_rev_var 182func cpu_get_rev_var 183 mrs x1, midr_el1 184 185 /* 186 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them 187 * as variant[7:4] and revision[3:0] of x0. 188 * 189 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then 190 * extract x1[3:0] into x0[3:0] retaining other bits. 191 */ 192 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) 193 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS 194 ret 195endfunc cpu_get_rev_var 196 197/* 198 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata 199 * application purposes. If the revision-variant is less than or same as a given 200 * value, indicates that errata applies; otherwise not. 201 */ 202 .globl cpu_rev_var_ls 203func cpu_rev_var_ls 204 mov x2, #ERRATA_APPLIES 205 mov x3, #ERRATA_NOT_APPLIES 206 cmp x0, x1 207 csel x0, x2, x3, ls 208 ret 209endfunc cpu_rev_var_ls 210 211/* 212 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata 213 * application purposes. If the revision-variant is higher than or same as a 214 * given value, indicates that errata applies; otherwise not. 215 */ 216 .globl cpu_rev_var_hs 217func cpu_rev_var_hs 218 mov x2, #ERRATA_APPLIES 219 mov x3, #ERRATA_NOT_APPLIES 220 cmp x0, x1 221 csel x0, x2, x3, hs 222 ret 223endfunc cpu_rev_var_hs 224 225#if REPORT_ERRATA 226/* 227 * void print_errata_status(void); 228 * 229 * Function to print errata status for CPUs of its class. Must be called only: 230 * 231 * - with MMU and data caches are enabled; 232 * - after cpu_ops have been initialized in per-CPU data. 233 */ 234 .globl print_errata_status 235func print_errata_status 236#ifdef IMAGE_BL1 237 /* 238 * BL1 doesn't have per-CPU data. So retrieve the CPU operations 239 * directly. 240 */ 241 stp xzr, x30, [sp, #-16]! 242 bl get_cpu_ops_ptr 243 ldp xzr, x30, [sp], #16 244 ldr x1, [x0, #CPU_ERRATA_FUNC] 245 cbnz x1, .Lprint 246#else 247 /* 248 * Retrieve pointer to cpu_ops from per-CPU data, and further, the 249 * errata printing function. If it's non-NULL, jump to the function in 250 * turn. 251 */ 252 mrs x0, tpidr_el3 253 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR] 254 ldr x0, [x1, #CPU_ERRATA_FUNC] 255 cbz x0, .Lnoprint 256 257 /* 258 * Printing errata status requires atomically testing the printed flag. 259 */ 260 stp x19, x30, [sp, #-16]! 261 mov x19, x0 262 263 /* 264 * Load pointers to errata lock and printed flag. Call 265 * errata_needs_reporting to check whether this CPU needs to report 266 * errata status pertaining to its class. 267 */ 268 ldr x0, [x1, #CPU_ERRATA_LOCK] 269 ldr x1, [x1, #CPU_ERRATA_PRINTED] 270 bl errata_needs_reporting 271 mov x1, x19 272 ldp x19, x30, [sp], #16 273 cbnz x0, .Lprint 274#endif 275.Lnoprint: 276 ret 277.Lprint: 278 /* Jump to errata reporting function for this CPU */ 279 br x1 280endfunc print_errata_status 281#endif 282 283/* 284 * int check_workaround_cve_2017_5715(void); 285 * 286 * This function returns: 287 * - ERRATA_APPLIES when firmware mitigation is required. 288 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required. 289 * - ERRATA_MISSING when firmware mitigation would be required but 290 * is not compiled in. 291 * 292 * NOTE: Must be called only after cpu_ops have been initialized 293 * in per-CPU data. 294 */ 295 .globl check_workaround_cve_2017_5715 296func check_workaround_cve_2017_5715 297 mrs x0, tpidr_el3 298#if ENABLE_ASSERTIONS 299 cmp x0, #0 300 ASM_ASSERT(ne) 301#endif 302 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] 303 ldr x0, [x0, #CPU_EXTRA1_FUNC] 304 /* 305 * If the reserved function pointer is NULL, this CPU 306 * is unaffected by CVE-2017-5715 so bail out. 307 */ 308 cmp x0, #0 309 beq 1f 310 br x0 3111: 312 mov x0, #ERRATA_NOT_APPLIES 313 ret 314endfunc check_workaround_cve_2017_5715 315