1/* 2 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <arch.h> 32#include <asm_macros.S> 33#include <assert_macros.S> 34#include <cpu_macros.S> 35#if IMAGE_BL31 36#include <cpu_data.h> 37#endif 38 39 /* Reset fn is needed in BL at reset vector */ 40#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31) 41 /* 42 * The reset handler common to all platforms. After a matching 43 * cpu_ops structure entry is found, the correponding reset_handler 44 * in the cpu_ops is invoked. 45 */ 46 .globl reset_handler 47func reset_handler 48 mov x10, x30 49 50 bl plat_reset_handler 51 52 /* Get the matching cpu_ops pointer */ 53 bl get_cpu_ops_ptr 54#if ASM_ASSERTION 55 cmp x0, #0 56 ASM_ASSERT(ne) 57#endif 58 59 /* Get the cpu_ops reset handler */ 60 ldr x2, [x0, #CPU_RESET_FUNC] 61 cbz x2, 1f 62 blr x2 631: 64 ret x10 65 66#endif /* IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31) */ 67 68#if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ 69 /* 70 * The prepare core power down function for all platforms. After 71 * the cpu_ops pointer is retrieved from cpu_data, the corresponding 72 * pwr_dwn_core in the cpu_ops is invoked. 73 */ 74 .globl prepare_core_pwr_dwn 75func prepare_core_pwr_dwn 76 mrs x1, tpidr_el3 77 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 78#if ASM_ASSERTION 79 cmp x0, #0 80 ASM_ASSERT(ne) 81#endif 82 83 /* Get the cpu_ops core_pwr_dwn handler */ 84 ldr x1, [x0, #CPU_PWR_DWN_CORE] 85 br x1 86 87 /* 88 * The prepare cluster power down function for all platforms. After 89 * the cpu_ops pointer is retrieved from cpu_data, the corresponding 90 * pwr_dwn_cluster in the cpu_ops is invoked. 91 */ 92 .globl prepare_cluster_pwr_dwn 93func prepare_cluster_pwr_dwn 94 mrs x1, tpidr_el3 95 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] 96#if ASM_ASSERTION 97 cmp x0, #0 98 ASM_ASSERT(ne) 99#endif 100 101 /* Get the cpu_ops cluster_pwr_dwn handler */ 102 ldr x1, [x0, #CPU_PWR_DWN_CLUSTER] 103 br x1 104 105 106 /* 107 * Initializes the cpu_ops_ptr if not already initialized 108 * in cpu_data. This can be called without a runtime stack. 109 * clobbers: x0 - x6, x10 110 */ 111 .globl init_cpu_ops 112func init_cpu_ops 113 mrs x6, tpidr_el3 114 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR] 115 cbnz x0, 1f 116 mov x10, x30 117 bl get_cpu_ops_ptr 118#if ASM_ASSERTION 119 cmp x0, #0 120 ASM_ASSERT(ne) 121#endif 122 str x0, [x6, #CPU_DATA_CPU_OPS_PTR] 123 mov x30, x10 1241: 125 ret 126#endif /* IMAGE_BL31 */ 127 128#if IMAGE_BL31 && CRASH_REPORTING 129 /* 130 * The cpu specific registers which need to be reported in a crash 131 * are reported via cpu_ops cpu_reg_dump function. After a matching 132 * cpu_ops structure entry is found, the correponding cpu_reg_dump 133 * in the cpu_ops is invoked. 134 */ 135 .globl do_cpu_reg_dump 136func do_cpu_reg_dump 137 mov x16, x30 138 139 /* Get the matching cpu_ops pointer */ 140 bl get_cpu_ops_ptr 141 cbz x0, 1f 142 143 /* Get the cpu_ops cpu_reg_dump */ 144 ldr x2, [x0, #CPU_REG_DUMP] 145 cbz x2, 1f 146 blr x2 1471: 148 mov x30, x16 149 ret 150#endif 151 152 /* 153 * The below function returns the cpu_ops structure matching the 154 * midr of the core. It reads the MIDR_EL1 and finds the matching 155 * entry in cpu_ops entries. Only the implementation and part number 156 * are used to match the entries. 157 * Return : 158 * x0 - The matching cpu_ops pointer on Success 159 * x0 - 0 on failure. 160 * Clobbers : x0 - x5 161 */ 162 .globl get_cpu_ops_ptr 163func get_cpu_ops_ptr 164 /* Get the cpu_ops start and end locations */ 165 adr x4, (__CPU_OPS_START__ + CPU_MIDR) 166 adr x5, (__CPU_OPS_END__ + CPU_MIDR) 167 168 /* Initialize the return parameter */ 169 mov x0, #0 170 171 /* Read the MIDR_EL1 */ 172 mrs x2, midr_el1 173 mov_imm x3, CPU_IMPL_PN_MASK 174 175 /* Retain only the implementation and part number using mask */ 176 and w2, w2, w3 1771: 178 /* Check if we have reached end of list */ 179 cmp x4, x5 180 b.eq error_exit 181 182 /* load the midr from the cpu_ops */ 183 ldr x1, [x4], #CPU_OPS_SIZE 184 and w1, w1, w3 185 186 /* Check if midr matches to midr of this core */ 187 cmp w1, w2 188 b.ne 1b 189 190 /* Subtract the increment and offset to get the cpu-ops pointer */ 191 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR) 192error_exit: 193 ret 194