1/* 2 * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <arch.h> 32#include <asm_macros.S> 33#include <assert_macros.S> 34#include <cpu_macros.S> 35#if IMAGE_BL31 36#include <cpu_data.h> 37#endif 38#include <debug.h> 39 40 /* Reset fn is needed in BL at reset vector */ 41#if IMAGE_BL1 || IMAGE_BL31 42 /* 43 * The reset handler common to all platforms. After a matching 44 * cpu_ops structure entry is found, the correponding reset_handler 45 * in the cpu_ops is invoked. 46 * Clobbers: x0 - x19, x30 47 */ 48 .globl reset_handler 49func reset_handler 50 mov x19, x30 51 52 /* The plat_reset_handler can clobber x0 - x18, x30 */ 53 bl plat_reset_handler 54 55 /* Get the matching cpu_ops pointer */ 56 bl get_cpu_ops_ptr 57#if ASM_ASSERTION 58 cmp x0, #0 59 ASM_ASSERT(ne) 60#endif 61 62 /* Get the cpu_ops reset handler */ 63 ldr x2, [x0, #CPU_RESET_FUNC] 64 mov x30, x19 65 cbz x2, 1f 66 67 /* The cpu_ops reset handler can clobber x0 - x19, x30 */ 68 br x2 691: 70 ret 71endfunc reset_handler 72 73#endif /* IMAGE_BL1 || IMAGE_BL31 */ 74 75#if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ 76 /* 77 * The prepare core power down function for all platforms. After 78 * the cpu_ops pointer is retrieved from cpu_data, the corresponding 79 * pwr_dwn_core in the cpu_ops is invoked. 80 */ 81 .globl prepare_core_pwr_dwn 82func prepare_core_pwr_dwn 83 mrs x1, tpidr_el3 84 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 85#if ASM_ASSERTION 86 cmp x0, #0 87 ASM_ASSERT(ne) 88#endif 89 90 /* Get the cpu_ops core_pwr_dwn handler */ 91 ldr x1, [x0, #CPU_PWR_DWN_CORE] 92 br x1 93endfunc prepare_core_pwr_dwn 94 95 /* 96 * The prepare cluster power down function for all platforms. After 97 * the cpu_ops pointer is retrieved from cpu_data, the corresponding 98 * pwr_dwn_cluster in the cpu_ops is invoked. 99 */ 100 .globl prepare_cluster_pwr_dwn 101func prepare_cluster_pwr_dwn 102 mrs x1, tpidr_el3 103 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 104#if ASM_ASSERTION 105 cmp x0, #0 106 ASM_ASSERT(ne) 107#endif 108 109 /* Get the cpu_ops cluster_pwr_dwn handler */ 110 ldr x1, [x0, #CPU_PWR_DWN_CLUSTER] 111 br x1 112endfunc prepare_cluster_pwr_dwn 113 114 115 /* 116 * Initializes the cpu_ops_ptr if not already initialized 117 * in cpu_data. This can be called without a runtime stack, but may 118 * only be called after the MMU is enabled. 119 * clobbers: x0 - x6, x10 120 */ 121 .globl init_cpu_ops 122func init_cpu_ops 123 mrs x6, tpidr_el3 124 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR] 125 cbnz x0, 1f 126 mov x10, x30 127 bl get_cpu_ops_ptr 128#if ASM_ASSERTION 129 cmp x0, #0 130 ASM_ASSERT(ne) 131#endif 132 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]! 133 mov x30, x10 1341: 135 ret 136endfunc init_cpu_ops 137#endif /* IMAGE_BL31 */ 138 139#if IMAGE_BL31 && CRASH_REPORTING 140 /* 141 * The cpu specific registers which need to be reported in a crash 142 * are reported via cpu_ops cpu_reg_dump function. After a matching 143 * cpu_ops structure entry is found, the correponding cpu_reg_dump 144 * in the cpu_ops is invoked. 145 */ 146 .globl do_cpu_reg_dump 147func do_cpu_reg_dump 148 mov x16, x30 149 150 /* Get the matching cpu_ops pointer */ 151 bl get_cpu_ops_ptr 152 cbz x0, 1f 153 154 /* Get the cpu_ops cpu_reg_dump */ 155 ldr x2, [x0, #CPU_REG_DUMP] 156 cbz x2, 1f 157 blr x2 1581: 159 mov x30, x16 160 ret 161endfunc do_cpu_reg_dump 162#endif 163 164 /* 165 * The below function returns the cpu_ops structure matching the 166 * midr of the core. It reads the MIDR_EL1 and finds the matching 167 * entry in cpu_ops entries. Only the implementation and part number 168 * are used to match the entries. 169 * Return : 170 * x0 - The matching cpu_ops pointer on Success 171 * x0 - 0 on failure. 172 * Clobbers : x0 - x5 173 */ 174 .globl get_cpu_ops_ptr 175func get_cpu_ops_ptr 176 /* Get the cpu_ops start and end locations */ 177 adr x4, (__CPU_OPS_START__ + CPU_MIDR) 178 adr x5, (__CPU_OPS_END__ + CPU_MIDR) 179 180 /* Initialize the return parameter */ 181 mov x0, #0 182 183 /* Read the MIDR_EL1 */ 184 mrs x2, midr_el1 185 mov_imm x3, CPU_IMPL_PN_MASK 186 187 /* Retain only the implementation and part number using mask */ 188 and w2, w2, w3 1891: 190 /* Check if we have reached end of list */ 191 cmp x4, x5 192 b.eq error_exit 193 194 /* load the midr from the cpu_ops */ 195 ldr x1, [x4], #CPU_OPS_SIZE 196 and w1, w1, w3 197 198 /* Check if midr matches to midr of this core */ 199 cmp w1, w2 200 b.ne 1b 201 202 /* Subtract the increment and offset to get the cpu-ops pointer */ 203 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR) 204error_exit: 205 ret 206endfunc get_cpu_ops_ptr 207 208#if LOG_LEVEL >= LOG_LEVEL_VERBOSE 209.section .rodata.rev_verbose_str, "aS" 210rev_verbose_str: 211 .asciz "VERBOSE: Skipping CPU specific reset operation for non-matching CPU revision number.\n" 212 213 /* 214 * This function prints the above warning message to the crash console. 215 * It should be called when a CPU specific operation is enabled in the 216 * build but doesn't apply to this CPU revision/part number. 217 * 218 * Clobber: x30, x0 - x5 219 */ 220 .globl print_revision_warning 221func print_revision_warning 222 mov x5, x30 223 /* Ensure the console is initialized */ 224 bl plat_crash_console_init 225 /* Check if the console is initialized */ 226 cbz x0, 1f 227 /* The console is initialized */ 228 adr x4, rev_verbose_str 229 bl asm_print_str 2301: 231 ret x5 232endfunc print_revision_warning 233#endif 234 235