10ae76531SDavid Feng/* 20ae76531SDavid Feng * (C) Copyright 2013 30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn> 40ae76531SDavid Feng * 50ae76531SDavid Feng * SPDX-License-Identifier: GPL-2.0+ 60ae76531SDavid Feng */ 70ae76531SDavid Feng 80ae76531SDavid Feng#include <asm-offsets.h> 90ae76531SDavid Feng#include <config.h> 100ae76531SDavid Feng#include <linux/linkage.h> 110ae76531SDavid Feng#include <asm/macro.h> 120ae76531SDavid Feng#include <asm/armv8/mmu.h> 130ae76531SDavid Feng 140ae76531SDavid Feng/************************************************************************* 150ae76531SDavid Feng * 160ae76531SDavid Feng * Startup Code (reset vector) 170ae76531SDavid Feng * 180ae76531SDavid Feng *************************************************************************/ 190ae76531SDavid Feng 200ae76531SDavid Feng.globl _start 210ae76531SDavid Feng_start: 220ae76531SDavid Feng b reset 230ae76531SDavid Feng 240ae76531SDavid Feng .align 3 250ae76531SDavid Feng 260ae76531SDavid Feng.globl _TEXT_BASE 270ae76531SDavid Feng_TEXT_BASE: 280ae76531SDavid Feng .quad CONFIG_SYS_TEXT_BASE 290ae76531SDavid Feng 300ae76531SDavid Feng/* 310ae76531SDavid Feng * These are defined in the linker script. 320ae76531SDavid Feng */ 330ae76531SDavid Feng.globl _end_ofs 340ae76531SDavid Feng_end_ofs: 350ae76531SDavid Feng .quad _end - _start 360ae76531SDavid Feng 370ae76531SDavid Feng.globl _bss_start_ofs 380ae76531SDavid Feng_bss_start_ofs: 390ae76531SDavid Feng .quad __bss_start - _start 400ae76531SDavid Feng 410ae76531SDavid Feng.globl _bss_end_ofs 420ae76531SDavid Feng_bss_end_ofs: 430ae76531SDavid Feng .quad __bss_end - _start 440ae76531SDavid Feng 450ae76531SDavid Fengreset: 4694f7ff36SSergey Temerkhanov#ifdef CONFIG_SYS_RESET_SCTRL 4794f7ff36SSergey Temerkhanov bl reset_sctrl 4894f7ff36SSergey Temerkhanov#endif 490ae76531SDavid Feng /* 500ae76531SDavid Feng * Could be EL3/EL2/EL1, Initial State: 510ae76531SDavid Feng * Little Endian, MMU Disabled, i/dCache Disabled 520ae76531SDavid Feng */ 530ae76531SDavid Feng adr x0, vectors 540ae76531SDavid Feng switch_el x1, 3f, 2f, 1f 551277bac0SDavid Feng3: msr vbar_el3, x0 561277bac0SDavid Feng mrs x0, scr_el3 57c71645adSDavid Feng orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ 58c71645adSDavid Feng msr scr_el3, x0 590ae76531SDavid Feng msr cptr_el3, xzr /* Enable FP/SIMD */ 6070bcb43eSThierry Reding#ifdef COUNTER_FREQUENCY 610ae76531SDavid Feng ldr x0, =COUNTER_FREQUENCY 620ae76531SDavid Feng msr cntfrq_el0, x0 /* Initialize CNTFRQ */ 6370bcb43eSThierry Reding#endif 640ae76531SDavid Feng b 0f 650ae76531SDavid Feng2: msr vbar_el2, x0 660ae76531SDavid Feng mov x0, #0x33ff 670ae76531SDavid Feng msr cptr_el2, x0 /* Enable FP/SIMD */ 680ae76531SDavid Feng b 0f 690ae76531SDavid Feng1: msr vbar_el1, x0 700ae76531SDavid Feng mov x0, #3 << 20 710ae76531SDavid Feng msr cpacr_el1, x0 /* Enable FP/SIMD */ 720ae76531SDavid Feng0: 730ae76531SDavid Feng 7437118fb2SBhupesh Sharma /* Apply ARM core specific erratas */ 7537118fb2SBhupesh Sharma bl apply_core_errata 7637118fb2SBhupesh Sharma 771e6ad55cSYork Sun /* 781e6ad55cSYork Sun * Cache/BPB/TLB Invalidate 791e6ad55cSYork Sun * i-cache is invalidated before enabled in icache_enable() 801e6ad55cSYork Sun * tlb is invalidated before mmu is enabled in dcache_enable() 811e6ad55cSYork Sun * d-cache is invalidated before enabled in dcache_enable() 821e6ad55cSYork Sun */ 830ae76531SDavid Feng 840ae76531SDavid Feng /* Processor specific initialization */ 850ae76531SDavid Feng bl lowlevel_init 860ae76531SDavid Feng 8723b5877cSLinus Walleij#ifdef CONFIG_ARMV8_MULTIENTRY 880ae76531SDavid Feng branch_if_master x0, x1, master_cpu 890ae76531SDavid Feng 900ae76531SDavid Feng /* 910ae76531SDavid Feng * Slave CPUs 920ae76531SDavid Feng */ 930ae76531SDavid Fengslave_cpu: 940ae76531SDavid Feng wfe 950ae76531SDavid Feng ldr x1, =CPU_RELEASE_ADDR 960ae76531SDavid Feng ldr x0, [x1] 970ae76531SDavid Feng cbz x0, slave_cpu 980ae76531SDavid Feng br x0 /* branch to the given address */ 990ae76531SDavid Fengmaster_cpu: 10023b5877cSLinus Walleij /* On the master CPU */ 10123b5877cSLinus Walleij#endif /* CONFIG_ARMV8_MULTIENTRY */ 10223b5877cSLinus Walleij 1030ae76531SDavid Feng bl _main 1040ae76531SDavid Feng 10594f7ff36SSergey Temerkhanov#ifdef CONFIG_SYS_RESET_SCTRL 10694f7ff36SSergey Temerkhanovreset_sctrl: 10794f7ff36SSergey Temerkhanov switch_el x1, 3f, 2f, 1f 10894f7ff36SSergey Temerkhanov3: 10994f7ff36SSergey Temerkhanov mrs x0, sctlr_el3 11094f7ff36SSergey Temerkhanov b 0f 11194f7ff36SSergey Temerkhanov2: 11294f7ff36SSergey Temerkhanov mrs x0, sctlr_el2 11394f7ff36SSergey Temerkhanov b 0f 11494f7ff36SSergey Temerkhanov1: 11594f7ff36SSergey Temerkhanov mrs x0, sctlr_el1 11694f7ff36SSergey Temerkhanov 11794f7ff36SSergey Temerkhanov0: 11894f7ff36SSergey Temerkhanov ldr x1, =0xfdfffffa 11994f7ff36SSergey Temerkhanov and x0, x0, x1 12094f7ff36SSergey Temerkhanov 12194f7ff36SSergey Temerkhanov switch_el x1, 6f, 5f, 4f 12294f7ff36SSergey Temerkhanov6: 12394f7ff36SSergey Temerkhanov msr sctlr_el3, x0 12494f7ff36SSergey Temerkhanov b 7f 12594f7ff36SSergey Temerkhanov5: 12694f7ff36SSergey Temerkhanov msr sctlr_el2, x0 12794f7ff36SSergey Temerkhanov b 7f 12894f7ff36SSergey Temerkhanov4: 12994f7ff36SSergey Temerkhanov msr sctlr_el1, x0 13094f7ff36SSergey Temerkhanov 13194f7ff36SSergey Temerkhanov7: 13294f7ff36SSergey Temerkhanov dsb sy 13394f7ff36SSergey Temerkhanov isb 13494f7ff36SSergey Temerkhanov b __asm_invalidate_tlb_all 13594f7ff36SSergey Temerkhanov ret 13694f7ff36SSergey Temerkhanov#endif 13794f7ff36SSergey Temerkhanov 1380ae76531SDavid Feng/*-----------------------------------------------------------------------*/ 1390ae76531SDavid Feng 14037118fb2SBhupesh SharmaWEAK(apply_core_errata) 14137118fb2SBhupesh Sharma 14237118fb2SBhupesh Sharma mov x29, lr /* Save LR */ 14337118fb2SBhupesh Sharma /* For now, we support Cortex-A57 specific errata only */ 14437118fb2SBhupesh Sharma 14537118fb2SBhupesh Sharma /* Check if we are running on a Cortex-A57 core */ 14637118fb2SBhupesh Sharma branch_if_a57_core x0, apply_a57_core_errata 14737118fb2SBhupesh Sharma0: 14837118fb2SBhupesh Sharma mov lr, x29 /* Restore LR */ 14937118fb2SBhupesh Sharma ret 15037118fb2SBhupesh Sharma 15137118fb2SBhupesh Sharmaapply_a57_core_errata: 15237118fb2SBhupesh Sharma 15337118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_828024 15437118fb2SBhupesh Sharma mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 15537118fb2SBhupesh Sharma /* Disable non-allocate hint of w-b-n-a memory type */ 156f299b5b0SBhupesh Sharma orr x0, x0, #1 << 49 15737118fb2SBhupesh Sharma /* Disable write streaming no L1-allocate threshold */ 158f299b5b0SBhupesh Sharma orr x0, x0, #3 << 25 15937118fb2SBhupesh Sharma /* Disable write streaming no-allocate threshold */ 160f299b5b0SBhupesh Sharma orr x0, x0, #3 << 27 16137118fb2SBhupesh Sharma msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 16237118fb2SBhupesh Sharma#endif 16337118fb2SBhupesh Sharma 16437118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_826974 16537118fb2SBhupesh Sharma mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 16637118fb2SBhupesh Sharma /* Disable speculative load execution ahead of a DMB */ 167f299b5b0SBhupesh Sharma orr x0, x0, #1 << 59 16837118fb2SBhupesh Sharma msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 16937118fb2SBhupesh Sharma#endif 17037118fb2SBhupesh Sharma 1712ea3a448SAshish kumar#ifdef CONFIG_ARM_ERRATA_833471 1722ea3a448SAshish kumar mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 1732ea3a448SAshish kumar /* FPSCR write flush. 1742ea3a448SAshish kumar * Note that in some cases where a flush is unnecessary this 1752ea3a448SAshish kumar could impact performance. */ 1762ea3a448SAshish kumar orr x0, x0, #1 << 38 1772ea3a448SAshish kumar msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 1782ea3a448SAshish kumar#endif 1792ea3a448SAshish kumar 1802ea3a448SAshish kumar#ifdef CONFIG_ARM_ERRATA_829520 1812ea3a448SAshish kumar mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 1822ea3a448SAshish kumar /* Disable Indirect Predictor bit will prevent this erratum 1832ea3a448SAshish kumar from occurring 1842ea3a448SAshish kumar * Note that in some cases where a flush is unnecessary this 1852ea3a448SAshish kumar could impact performance. */ 1862ea3a448SAshish kumar orr x0, x0, #1 << 4 1872ea3a448SAshish kumar msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 1882ea3a448SAshish kumar#endif 1892ea3a448SAshish kumar 19037118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_833069 19137118fb2SBhupesh Sharma mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ 19237118fb2SBhupesh Sharma /* Disable Enable Invalidates of BTB bit */ 19337118fb2SBhupesh Sharma and x0, x0, #0xE 19437118fb2SBhupesh Sharma msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ 19537118fb2SBhupesh Sharma#endif 19637118fb2SBhupesh Sharma b 0b 19737118fb2SBhupesh SharmaENDPROC(apply_core_errata) 19837118fb2SBhupesh Sharma 19937118fb2SBhupesh Sharma/*-----------------------------------------------------------------------*/ 20037118fb2SBhupesh Sharma 2010ae76531SDavid FengWEAK(lowlevel_init) 2020ae76531SDavid Feng mov x29, lr /* Save LR */ 2030ae76531SDavid Feng 204c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 205c71645adSDavid Feng branch_if_slave x0, 1f 206c71645adSDavid Feng ldr x0, =GICD_BASE 207c71645adSDavid Feng bl gic_init_secure 208c71645adSDavid Feng1: 209c71645adSDavid Feng#if defined(CONFIG_GICV3) 210c71645adSDavid Feng ldr x0, =GICR_BASE 211c71645adSDavid Feng bl gic_init_secure_percpu 212c71645adSDavid Feng#elif defined(CONFIG_GICV2) 213c71645adSDavid Feng ldr x0, =GICD_BASE 214c71645adSDavid Feng ldr x1, =GICC_BASE 215c71645adSDavid Feng bl gic_init_secure_percpu 216c71645adSDavid Feng#endif 21711661193SStephen Warren#endif 218c71645adSDavid Feng 219*d38fca40SMasahiro Yamada#ifdef CONFIG_ARMV8_MULTIENTRY 220c71645adSDavid Feng branch_if_master x0, x1, 2f 2210ae76531SDavid Feng 2220ae76531SDavid Feng /* 2230ae76531SDavid Feng * Slave should wait for master clearing spin table. 2240ae76531SDavid Feng * This sync prevent salves observing incorrect 2250ae76531SDavid Feng * value of spin table and jumping to wrong place. 2260ae76531SDavid Feng */ 227c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 228c71645adSDavid Feng#ifdef CONFIG_GICV2 229c71645adSDavid Feng ldr x0, =GICC_BASE 230c71645adSDavid Feng#endif 231c71645adSDavid Feng bl gic_wait_for_interrupt 232c71645adSDavid Feng#endif 2330ae76531SDavid Feng 2340ae76531SDavid Feng /* 235c71645adSDavid Feng * All slaves will enter EL2 and optionally EL1. 2360ae76531SDavid Feng */ 2370ae76531SDavid Feng bl armv8_switch_to_el2 2380ae76531SDavid Feng#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 2390ae76531SDavid Feng bl armv8_switch_to_el1 2400ae76531SDavid Feng#endif 2410ae76531SDavid Feng 24223b5877cSLinus Walleij#endif /* CONFIG_ARMV8_MULTIENTRY */ 24323b5877cSLinus Walleij 244c71645adSDavid Feng2: 2450ae76531SDavid Feng mov lr, x29 /* Restore LR */ 2460ae76531SDavid Feng ret 2470ae76531SDavid FengENDPROC(lowlevel_init) 2480ae76531SDavid Feng 249c71645adSDavid FengWEAK(smp_kick_all_cpus) 250c71645adSDavid Feng /* Kick secondary cpus up by SGI 0 interrupt */ 251c71645adSDavid Feng mov x29, lr /* Save LR */ 252c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 253c71645adSDavid Feng ldr x0, =GICD_BASE 254c71645adSDavid Feng bl gic_kick_secondary_cpus 255c71645adSDavid Feng#endif 256c71645adSDavid Feng mov lr, x29 /* Restore LR */ 257c71645adSDavid Feng ret 258c71645adSDavid FengENDPROC(smp_kick_all_cpus) 259c71645adSDavid Feng 2600ae76531SDavid Feng/*-----------------------------------------------------------------------*/ 2610ae76531SDavid Feng 2620ae76531SDavid FengENTRY(c_runtime_cpu_setup) 2630ae76531SDavid Feng /* Relocate vBAR */ 2640ae76531SDavid Feng adr x0, vectors 2650ae76531SDavid Feng switch_el x1, 3f, 2f, 1f 2660ae76531SDavid Feng3: msr vbar_el3, x0 2670ae76531SDavid Feng b 0f 2680ae76531SDavid Feng2: msr vbar_el2, x0 2690ae76531SDavid Feng b 0f 2700ae76531SDavid Feng1: msr vbar_el1, x0 2710ae76531SDavid Feng0: 2720ae76531SDavid Feng 2730ae76531SDavid Feng ret 2740ae76531SDavid FengENDPROC(c_runtime_cpu_setup) 275