1/* 2 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <arch.h> 32#include <asm_macros.S> 33#include <assert_macros.S> 34#include <cpu_macros.S> 35#if IMAGE_BL31 36#include <cpu_data.h> 37#endif 38 39 /* Reset fn is needed in BL at reset vector */ 40#if IMAGE_BL1 || IMAGE_BL31 41 /* 42 * The reset handler common to all platforms. After a matching 43 * cpu_ops structure entry is found, the correponding reset_handler 44 * in the cpu_ops is invoked. 45 * Clobbers: x0 - x19, x30 46 */ 47 .globl reset_handler 48func reset_handler 49 mov x19, x30 50 51 /* The plat_reset_handler can clobber x0 - x18, x30 */ 52 bl plat_reset_handler 53 54 /* Get the matching cpu_ops pointer */ 55 bl get_cpu_ops_ptr 56#if ASM_ASSERTION 57 cmp x0, #0 58 ASM_ASSERT(ne) 59#endif 60 61 /* Get the cpu_ops reset handler */ 62 ldr x2, [x0, #CPU_RESET_FUNC] 63 mov x30, x19 64 cbz x2, 1f 65 66 /* The cpu_ops reset handler can clobber x0 - x19, x30 */ 67 br x2 681: 69 ret 70endfunc reset_handler 71 72#endif /* IMAGE_BL1 || IMAGE_BL31 */ 73 74#if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ 75 /* 76 * The prepare core power down function for all platforms. After 77 * the cpu_ops pointer is retrieved from cpu_data, the corresponding 78 * pwr_dwn_core in the cpu_ops is invoked. 79 */ 80 .globl prepare_core_pwr_dwn 81func prepare_core_pwr_dwn 82 mrs x1, tpidr_el3 83 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 84#if ASM_ASSERTION 85 cmp x0, #0 86 ASM_ASSERT(ne) 87#endif 88 89 /* Get the cpu_ops core_pwr_dwn handler */ 90 ldr x1, [x0, #CPU_PWR_DWN_CORE] 91 br x1 92endfunc prepare_core_pwr_dwn 93 94 /* 95 * The prepare cluster power down function for all platforms. After 96 * the cpu_ops pointer is retrieved from cpu_data, the corresponding 97 * pwr_dwn_cluster in the cpu_ops is invoked. 98 */ 99 .globl prepare_cluster_pwr_dwn 100func prepare_cluster_pwr_dwn 101 mrs x1, tpidr_el3 102 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 103#if ASM_ASSERTION 104 cmp x0, #0 105 ASM_ASSERT(ne) 106#endif 107 108 /* Get the cpu_ops cluster_pwr_dwn handler */ 109 ldr x1, [x0, #CPU_PWR_DWN_CLUSTER] 110 br x1 111endfunc prepare_cluster_pwr_dwn 112 113 114 /* 115 * Initializes the cpu_ops_ptr if not already initialized 116 * in cpu_data. This can be called without a runtime stack, but may 117 * only be called after the MMU is enabled. 118 * clobbers: x0 - x6, x10 119 */ 120 .globl init_cpu_ops 121func init_cpu_ops 122 mrs x6, tpidr_el3 123 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR] 124 cbnz x0, 1f 125 mov x10, x30 126 bl get_cpu_ops_ptr 127#if ASM_ASSERTION 128 cmp x0, #0 129 ASM_ASSERT(ne) 130#endif 131 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]! 132 mov x30, x10 1331: 134 ret 135endfunc init_cpu_ops 136#endif /* IMAGE_BL31 */ 137 138#if IMAGE_BL31 && CRASH_REPORTING 139 /* 140 * The cpu specific registers which need to be reported in a crash 141 * are reported via cpu_ops cpu_reg_dump function. After a matching 142 * cpu_ops structure entry is found, the correponding cpu_reg_dump 143 * in the cpu_ops is invoked. 144 */ 145 .globl do_cpu_reg_dump 146func do_cpu_reg_dump 147 mov x16, x30 148 149 /* Get the matching cpu_ops pointer */ 150 bl get_cpu_ops_ptr 151 cbz x0, 1f 152 153 /* Get the cpu_ops cpu_reg_dump */ 154 ldr x2, [x0, #CPU_REG_DUMP] 155 cbz x2, 1f 156 blr x2 1571: 158 mov x30, x16 159 ret 160endfunc do_cpu_reg_dump 161#endif 162 163 /* 164 * The below function returns the cpu_ops structure matching the 165 * midr of the core. It reads the MIDR_EL1 and finds the matching 166 * entry in cpu_ops entries. Only the implementation and part number 167 * are used to match the entries. 168 * Return : 169 * x0 - The matching cpu_ops pointer on Success 170 * x0 - 0 on failure. 171 * Clobbers : x0 - x5 172 */ 173 .globl get_cpu_ops_ptr 174func get_cpu_ops_ptr 175 /* Get the cpu_ops start and end locations */ 176 adr x4, (__CPU_OPS_START__ + CPU_MIDR) 177 adr x5, (__CPU_OPS_END__ + CPU_MIDR) 178 179 /* Initialize the return parameter */ 180 mov x0, #0 181 182 /* Read the MIDR_EL1 */ 183 mrs x2, midr_el1 184 mov_imm x3, CPU_IMPL_PN_MASK 185 186 /* Retain only the implementation and part number using mask */ 187 and w2, w2, w3 1881: 189 /* Check if we have reached end of list */ 190 cmp x4, x5 191 b.eq error_exit 192 193 /* load the midr from the cpu_ops */ 194 ldr x1, [x4], #CPU_OPS_SIZE 195 and w1, w1, w3 196 197 /* Check if midr matches to midr of this core */ 198 cmp w1, w2 199 b.ne 1b 200 201 /* Subtract the increment and offset to get the cpu-ops pointer */ 202 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR) 203error_exit: 204 ret 205endfunc get_cpu_ops_ptr 206 207#if DEBUG 208 /* 209 * This function prints a warning message to the crash console 210 * if the CPU revision/part number does not match the errata 211 * workaround enabled in the build. 212 * Clobber: x30, x0 - x5 213 */ 214.section .rodata.rev_warn_str, "aS" 215rev_warn_str: 216 .asciz "Warning: Skipping Errata workaround for non matching CPU revision number.\n" 217 218 .globl print_revision_warning 219func print_revision_warning 220 mov x5, x30 221 /* Ensure the console is initialized */ 222 bl plat_crash_console_init 223 /* Check if the console is initialized */ 224 cbz x0, 1f 225 /* The console is initialized */ 226 adr x4, rev_warn_str 227 bl asm_print_str 2281: 229 ret x5 230endfunc print_revision_warning 231#endif 232 233