1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <linux/linkage.h> 11#include <asm/macro.h> 12#include <asm/armv8/mmu.h> 13 14/************************************************************************* 15 * 16 * Startup Code (reset vector) 17 * 18 *************************************************************************/ 19 20.globl _start 21_start: 22#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK 23/* 24 * Various SoCs need something special and SoC-specific up front in 25 * order to boot, allow them to set that in their boot0.h file and then 26 * use it here. 27 */ 28#include <asm/arch/boot0.h> 29#else 30 b reset 31#endif 32 33#if !CONFIG_IS_ENABLED(TINY_FRAMEWORK) 34 .align 3 35 36.globl _TEXT_BASE 37_TEXT_BASE: 38#if defined(CONFIG_SPL_BUILD) 39 .quad CONFIG_SPL_TEXT_BASE 40#else 41 .quad CONFIG_SYS_TEXT_BASE 42#endif 43 44/* 45 * These are defined in the linker script. 46 */ 47.globl _end_ofs 48_end_ofs: 49 .quad _end - _start 50 51.globl _bss_start_ofs 52_bss_start_ofs: 53 .quad __bss_start - _start 54 55.globl _bss_end_ofs 56_bss_end_ofs: 57 .quad __bss_end - _start 58 59reset: 60 /* Allow the board to save important registers */ 61 b save_boot_params 62.globl save_boot_params_ret 63save_boot_params_ret: 64 65#if CONFIG_POSITION_INDEPENDENT 66 /* 67 * Fix .rela.dyn relocations. This allows U-Boot to be loaded to and 68 * executed at a different address than it was linked at. 69 */ 70pie_fixup: 71 adr x0, _start /* x0 <- Runtime value of _start */ 72 ldr x1, _TEXT_BASE /* x1 <- Linked value of _start */ 73 sub x9, x0, x1 /* x9 <- Run-vs-link offset */ 74 adr x2, __rel_dyn_start /* x2 <- Runtime &__rel_dyn_start */ 75 adr x3, __rel_dyn_end /* x3 <- Runtime &__rel_dyn_end */ 76pie_fix_loop: 77 ldp x0, x1, [x2], #16 /* (x0, x1) <- (Link location, fixup) */ 78 ldr x4, [x2], #8 /* x4 <- addend */ 79 cmp w1, #1027 /* relative fixup? */ 80 bne pie_skip_reloc 81 /* relative fix: store addend plus offset at dest location */ 82 add x0, x0, x9 83 add x4, x4, x9 84 str x4, [x0] 85pie_skip_reloc: 86 cmp x2, x3 87 b.lo pie_fix_loop 88pie_fixup_done: 89#endif 90 91#ifdef CONFIG_SYS_RESET_SCTRL 92 bl reset_sctrl 93#endif 94 /* 95 * Could be EL3/EL2/EL1, Initial State: 96 * Little Endian, MMU Disabled, i/dCache Disabled 97 */ 98 adr x0, vectors 99 switch_el x1, 3f, 2f, 1f 1003: msr vbar_el3, x0 101 mrs x0, scr_el3 102 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 103 msr scr_el3, x0 104 msr cptr_el3, xzr /* Enable FP/SIMD */ 105#ifdef COUNTER_FREQUENCY 106 ldr x0, =COUNTER_FREQUENCY 107 msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 108#endif 109 b 0f 1102: msr vbar_el2, x0 111 mov x0, #0x33ff 112 msr cptr_el2, x0 /* Enable FP/SIMD */ 113 b 0f 1141: msr vbar_el1, x0 115 mov x0, #3 << 20 116 msr cpacr_el1, x0 /* Enable FP/SIMD */ 1170: 118 119 /* 120 * Enable instruction cache (if required), stack pointer, 121 * data access alignment checks and SError. 122 */ 123#ifndef CONFIG_SYS_ICACHE_OFF 124 mov x1, #CR_I 125#else 126 mov x1, #0 127#endif 128 switch_el x2, 3f, 2f, 1f 1293: mrs x0, sctlr_el3 130 orr x0, x0, x1 131 msr sctlr_el3, x0 132#ifndef CONFIG_SUPPORT_USBPLUG 133 msr daifclr, #4 /* Enable SError. SCR_EL3.EA=1 was already set in start.S */ 134#endif 135 b 0f 1362: mrs x0, sctlr_el2 137 orr x0, x0, x1 138 msr sctlr_el2, x0 139 140 mrs x0, hcr_el2 141 orr x0, x0, #HCR_EL2_TGE 142 orr x0, x0, #HCR_EL2_AMO 143#if CONFIG_IS_ENABLED(IRQ) 144 orr x0, x0, #HCR_EL2_IMO 145#endif 146 msr hcr_el2, x0 147 msr daifclr, #4 148 b 0f 1491: mrs x0, sctlr_el1 150 orr x0, x0, x1 151 msr sctlr_el1, x0 152 msr daifclr, #4 1530: 154 isb 155 156 /* 157 * Enable SMPEN bit for coherency. 158 * This register is not architectural but at the moment 159 * this bit should be set for A53/A57/A72. 160 */ 161#ifdef CONFIG_ARMV8_SET_SMPEN 162 switch_el x1, 3f, 1f, 1f 1633: 164 mrs x0, S3_1_c15_c2_1 /* cpuectlr_el1 */ 165 orr x0, x0, #0x40 166 msr S3_1_c15_c2_1, x0 1671: 168#endif 169 170 /* Apply ARM core specific erratas */ 171 bl apply_core_errata 172 173 /* 174 * Cache/BPB/TLB Invalidate 175 * i-cache is invalidated before enabled in icache_enable() 176 * tlb is invalidated before mmu is enabled in dcache_enable() 177 * d-cache is invalidated before enabled in dcache_enable() 178 */ 179 180 /* Processor specific initialization */ 181 bl lowlevel_init 182 183#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD) 184 branch_if_master x0, x1, master_cpu 185 b spin_table_secondary_jump 186 /* never return */ 187#elif defined(CONFIG_ARMV8_MULTIENTRY) 188 branch_if_master x0, x1, master_cpu 189 190 /* 191 * Slave CPUs 192 */ 193slave_cpu: 194 wfe 195 ldr x1, =CPU_RELEASE_ADDR 196 ldr x0, [x1] 197 cbz x0, slave_cpu 198 br x0 /* branch to the given address */ 199#endif /* CONFIG_ARMV8_MULTIENTRY */ 200 201#if defined(CONFIG_SPL_BUILD) && defined(CONFIG_ARM_SMP) 202 mrs x0, mpidr_el1 203 and x0, x0, #0xfff 204 cmp x0, #0 205 beq master_cpu 206 207#ifdef SMP_CPU1 208 cmp x0, #(SMP_CPU1) 209 ldr x1, =(SMP_CPU1_STACK) 210 beq slave_cpu 211#endif 212 213#ifdef SMP_CPU2 214 cmp x0, #(SMP_CPU2) 215 ldr x1, =(SMP_CPU2_STACK) 216 beq slave_cpu 217#endif 218 219#ifdef SMP_CPU3 220 cmp x0, #(SMP_CPU3) 221 ldr x1, =(SMP_CPU3_STACK) 222 beq slave_cpu 223#endif 224 dsb sy 225 isb 226 227loop: 228 wfe 229 b loop 230 231slave_cpu: 232 bic sp, x1, #0xf 233 bl smp_entry 234 b loop 235#endif 236 237master_cpu: 238 bl _main 239 240#ifdef CONFIG_SYS_RESET_SCTRL 241reset_sctrl: 242 switch_el x1, 3f, 2f, 1f 2433: 244 mrs x0, sctlr_el3 245 b 0f 2462: 247 mrs x0, sctlr_el2 248 b 0f 2491: 250 mrs x0, sctlr_el1 251 2520: 253 ldr x1, =0xfdfffffa 254 and x0, x0, x1 255 256 switch_el x1, 6f, 5f, 4f 2576: 258 msr sctlr_el3, x0 259 b 7f 2605: 261 msr sctlr_el2, x0 262 b 7f 2634: 264 msr sctlr_el1, x0 265 2667: 267 dsb sy 268 isb 269 b __asm_invalidate_tlb_all 270 ret 271#endif 272 273/*-----------------------------------------------------------------------*/ 274 275WEAK(apply_core_errata) 276 277 mov x29, lr /* Save LR */ 278 /* For now, we support Cortex-A57 specific errata only */ 279 280 /* Check if we are running on a Cortex-A57 core */ 281 branch_if_a57_core x0, apply_a57_core_errata 2820: 283 mov lr, x29 /* Restore LR */ 284 ret 285 286apply_a57_core_errata: 287 288#ifdef CONFIG_ARM_ERRATA_828024 289 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 290 /* Disable non-allocate hint of w-b-n-a memory type */ 291 orr x0, x0, #1 << 49 292 /* Disable write streaming no L1-allocate threshold */ 293 orr x0, x0, #3 << 25 294 /* Disable write streaming no-allocate threshold */ 295 orr x0, x0, #3 << 27 296 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 297#endif 298 299#ifdef CONFIG_ARM_ERRATA_826974 300 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 301 /* Disable speculative load execution ahead of a DMB */ 302 orr x0, x0, #1 << 59 303 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 304#endif 305 306#ifdef CONFIG_ARM_ERRATA_833471 307 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 308 /* FPSCR write flush. 309 * Note that in some cases where a flush is unnecessary this 310 could impact performance. */ 311 orr x0, x0, #1 << 38 312 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 313#endif 314 315#ifdef CONFIG_ARM_ERRATA_829520 316 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 317 /* Disable Indirect Predictor bit will prevent this erratum 318 from occurring 319 * Note that in some cases where a flush is unnecessary this 320 could impact performance. */ 321 orr x0, x0, #1 << 4 322 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 323#endif 324 325#ifdef CONFIG_ARM_ERRATA_833069 326 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 327 /* Disable Enable Invalidates of BTB bit */ 328 and x0, x0, #0xE 329 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 330#endif 331 b 0b 332ENDPROC(apply_core_errata) 333 334/*-----------------------------------------------------------------------*/ 335 336WEAK(lowlevel_init) 337 mov x29, lr /* Save LR */ 338 339#if CONFIG_IS_ENABLED(IRQ) 340 branch_if_slave x0, 1f 341 ldr x0, =GICD_BASE 342 bl gic_init_secure 3431: 344#if defined(CONFIG_GICV3) 345 ldr x0, =GICR_BASE 346 bl gic_init_secure_percpu 347#elif defined(CONFIG_GICV2) 348 ldr x0, =GICD_BASE 349 ldr x1, =GICC_BASE 350 bl gic_init_secure_percpu 351#endif 352#endif 353 354#ifdef CONFIG_ARMV8_MULTIENTRY 355 branch_if_master x0, x1, 2f 356 357 /* 358 * Slave should wait for master clearing spin table. 359 * This sync prevent salves observing incorrect 360 * value of spin table and jumping to wrong place. 361 */ 362#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 363#ifdef CONFIG_GICV2 364 ldr x0, =GICC_BASE 365#endif 366 bl gic_wait_for_interrupt 367#endif 368 369 /* 370 * All slaves will enter EL2 and optionally EL1. 371 */ 372 adr x4, lowlevel_in_el2 373 ldr x5, =ES_TO_AARCH64 374 bl armv8_switch_to_el2 375 376lowlevel_in_el2: 377#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 378 adr x4, lowlevel_in_el1 379 ldr x5, =ES_TO_AARCH64 380 bl armv8_switch_to_el1 381 382lowlevel_in_el1: 383#endif 384 385#endif /* CONFIG_ARMV8_MULTIENTRY */ 386 3872: 388 mov lr, x29 /* Restore LR */ 389 ret 390ENDPROC(lowlevel_init) 391 392WEAK(smp_kick_all_cpus) 393 /* Kick secondary cpus up by SGI 0 interrupt */ 394#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 395 ldr x0, =GICD_BASE 396 b gic_kick_secondary_cpus 397#endif 398 ret 399ENDPROC(smp_kick_all_cpus) 400 401/*-----------------------------------------------------------------------*/ 402 403ENTRY(c_runtime_cpu_setup) 404 /* Relocate vBAR */ 405 adr x0, vectors 406 switch_el x1, 3f, 2f, 1f 4073: msr vbar_el3, x0 408 b 0f 4092: msr vbar_el2, x0 410 b 0f 4111: msr vbar_el1, x0 4120: 413 414 ret 415ENDPROC(c_runtime_cpu_setup) 416 417WEAK(save_boot_params) 418 b save_boot_params_ret /* back to my caller */ 419ENDPROC(save_boot_params) 420#endif 421