1/* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <version.h> 11#include <linux/linkage.h> 12#include <asm/macro.h> 13#include <asm/armv8/mmu.h> 14 15/************************************************************************* 16 * 17 * Startup Code (reset vector) 18 * 19 *************************************************************************/ 20 21.globl _start 22_start: 23 b reset 24 25 .align 3 26 27.globl _TEXT_BASE 28_TEXT_BASE: 29 .quad CONFIG_SYS_TEXT_BASE 30 31/* 32 * These are defined in the linker script. 33 */ 34.globl _end_ofs 35_end_ofs: 36 .quad _end - _start 37 38.globl _bss_start_ofs 39_bss_start_ofs: 40 .quad __bss_start - _start 41 42.globl _bss_end_ofs 43_bss_end_ofs: 44 .quad __bss_end - _start 45 46reset: 47 /* 48 * Could be EL3/EL2/EL1, Initial State: 49 * Little Endian, MMU Disabled, i/dCache Disabled 50 */ 51 adr x0, vectors 52 switch_el x1, 3f, 2f, 1f 533: msr vbar_el3, x0 54 mrs x0, scr_el3 55 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 56 msr scr_el3, x0 57 msr cptr_el3, xzr /* Enable FP/SIMD */ 58 ldr x0, =COUNTER_FREQUENCY 59 msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 60 b 0f 612: msr vbar_el2, x0 62 mov x0, #0x33ff 63 msr cptr_el2, x0 /* Enable FP/SIMD */ 64 b 0f 651: msr vbar_el1, x0 66 mov x0, #3 << 20 67 msr cpacr_el1, x0 /* Enable FP/SIMD */ 680: 69 70 /* 71 * Cache/BPB/TLB Invalidate 72 * i-cache is invalidated before enabled in icache_enable() 73 * tlb is invalidated before mmu is enabled in dcache_enable() 74 * d-cache is invalidated before enabled in dcache_enable() 75 */ 76 77 /* Processor specific initialization */ 78 bl lowlevel_init 79 80#ifdef CONFIG_ARMV8_MULTIENTRY 81 branch_if_master x0, x1, master_cpu 82 83 /* 84 * Slave CPUs 85 */ 86slave_cpu: 87 wfe 88 ldr x1, =CPU_RELEASE_ADDR 89 ldr x0, [x1] 90 cbz x0, slave_cpu 91 br x0 /* branch to the given address */ 92master_cpu: 93 /* On the master CPU */ 94#endif /* CONFIG_ARMV8_MULTIENTRY */ 95 96 bl _main 97 98/*-----------------------------------------------------------------------*/ 99 100WEAK(lowlevel_init) 101 mov x29, lr /* Save LR */ 102 103#ifndef CONFIG_ARMV8_MULTIENTRY 104 /* 105 * For single-entry systems the lowlevel init is very simple. 106 */ 107 ldr x0, =GICD_BASE 108 bl gic_init_secure 109 110#else /* CONFIG_ARMV8_MULTIENTRY is set */ 111 112#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 113 branch_if_slave x0, 1f 114 ldr x0, =GICD_BASE 115 bl gic_init_secure 1161: 117#if defined(CONFIG_GICV3) 118 ldr x0, =GICR_BASE 119 bl gic_init_secure_percpu 120#elif defined(CONFIG_GICV2) 121 ldr x0, =GICD_BASE 122 ldr x1, =GICC_BASE 123 bl gic_init_secure_percpu 124#endif 125#endif 126 127 branch_if_master x0, x1, 2f 128 129 /* 130 * Slave should wait for master clearing spin table. 131 * This sync prevent salves observing incorrect 132 * value of spin table and jumping to wrong place. 133 */ 134#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 135#ifdef CONFIG_GICV2 136 ldr x0, =GICC_BASE 137#endif 138 bl gic_wait_for_interrupt 139#endif 140 141 /* 142 * All slaves will enter EL2 and optionally EL1. 143 */ 144 bl armv8_switch_to_el2 145#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 146 bl armv8_switch_to_el1 147#endif 148 149#endif /* CONFIG_ARMV8_MULTIENTRY */ 150 1512: 152 mov lr, x29 /* Restore LR */ 153 ret 154ENDPROC(lowlevel_init) 155 156WEAK(smp_kick_all_cpus) 157 /* Kick secondary cpus up by SGI 0 interrupt */ 158 mov x29, lr /* Save LR */ 159#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 160 ldr x0, =GICD_BASE 161 bl gic_kick_secondary_cpus 162#endif 163 mov lr, x29 /* Restore LR */ 164 ret 165ENDPROC(smp_kick_all_cpus) 166 167/*-----------------------------------------------------------------------*/ 168 169ENTRY(c_runtime_cpu_setup) 170 /* Relocate vBAR */ 171 adr x0, vectors 172 switch_el x1, 3f, 2f, 1f 1733: msr vbar_el3, x0 174 b 0f 1752: msr vbar_el2, x0 176 b 0f 1771: msr vbar_el1, x0 1780: 179 180 ret 181ENDPROC(c_runtime_cpu_setup) 182