1*87056d31SPankaj Gupta/* 2*87056d31SPankaj Gupta * Copyright 2018-2020 NXP 3*87056d31SPankaj Gupta * 4*87056d31SPankaj Gupta * SPDX-License-Identifier: BSD-3-Clause 5*87056d31SPankaj Gupta * 6*87056d31SPankaj Gupta */ 7*87056d31SPankaj Gupta 8*87056d31SPankaj Gupta.section .text, "ax" 9*87056d31SPankaj Gupta 10*87056d31SPankaj Gupta#include <asm_macros.S> 11*87056d31SPankaj Gupta 12*87056d31SPankaj Gupta#include <lib/psci/psci.h> 13*87056d31SPankaj Gupta#include <nxp_timer.h> 14*87056d31SPankaj Gupta#include <plat_gic.h> 15*87056d31SPankaj Gupta#include <pmu.h> 16*87056d31SPankaj Gupta 17*87056d31SPankaj Gupta#include <bl31_data.h> 18*87056d31SPankaj Gupta#include <plat_psci.h> 19*87056d31SPankaj Gupta#include <platform_def.h> 20*87056d31SPankaj Gupta 21*87056d31SPankaj Gupta.global soc_init_start 22*87056d31SPankaj Gupta.global soc_init_percpu 23*87056d31SPankaj Gupta.global soc_init_finish 24*87056d31SPankaj Gupta.global _set_platform_security 25*87056d31SPankaj Gupta.global _soc_set_start_addr 26*87056d31SPankaj Gupta 27*87056d31SPankaj Gupta.global _soc_core_release 28*87056d31SPankaj Gupta.global _soc_ck_disabled 29*87056d31SPankaj Gupta.global _soc_core_restart 30*87056d31SPankaj Gupta.global _soc_core_prep_off 31*87056d31SPankaj Gupta.global _soc_core_entr_off 32*87056d31SPankaj Gupta.global _soc_core_exit_off 33*87056d31SPankaj Gupta.global _soc_sys_reset 34*87056d31SPankaj Gupta.global _soc_sys_off 35*87056d31SPankaj Gupta.global _soc_core_prep_stdby 36*87056d31SPankaj Gupta.global _soc_core_entr_stdby 37*87056d31SPankaj Gupta.global _soc_core_exit_stdby 38*87056d31SPankaj Gupta.global _soc_core_prep_pwrdn 39*87056d31SPankaj Gupta.global _soc_core_entr_pwrdn 40*87056d31SPankaj Gupta.global _soc_core_exit_pwrdn 41*87056d31SPankaj Gupta.global _soc_clstr_prep_stdby 42*87056d31SPankaj Gupta.global _soc_clstr_exit_stdby 43*87056d31SPankaj Gupta.global _soc_clstr_prep_pwrdn 44*87056d31SPankaj Gupta.global _soc_clstr_exit_pwrdn 45*87056d31SPankaj Gupta.global _soc_sys_prep_stdby 46*87056d31SPankaj Gupta.global _soc_sys_exit_stdby 47*87056d31SPankaj Gupta.global _soc_sys_prep_pwrdn 48*87056d31SPankaj Gupta.global _soc_sys_pwrdn_wfi 49*87056d31SPankaj Gupta.global _soc_sys_exit_pwrdn 50*87056d31SPankaj Gupta 51*87056d31SPankaj Gupta.equ TZPC_BASE, 0x02200000 52*87056d31SPankaj Gupta.equ TZPCDECPROT_0_SET_BASE, 0x02200804 53*87056d31SPankaj Gupta.equ TZPCDECPROT_1_SET_BASE, 0x02200810 54*87056d31SPankaj Gupta.equ TZPCDECPROT_2_SET_BASE, 0x0220081C 55*87056d31SPankaj Gupta 56*87056d31SPankaj Gupta#define CLUSTER_3_CORES_MASK 0xC0 57*87056d31SPankaj Gupta#define CLUSTER_3_IN_RESET 1 58*87056d31SPankaj Gupta#define CLUSTER_3_NORMAL 0 59*87056d31SPankaj Gupta 60*87056d31SPankaj Gupta/* cluster 3 handling no longer based on frequency, but rather on RCW[850], 61*87056d31SPankaj Gupta * which is bit 18 of RCWSR27 62*87056d31SPankaj Gupta */ 63*87056d31SPankaj Gupta#define CLUSTER_3_RCW_BIT 0x40000 64*87056d31SPankaj Gupta 65*87056d31SPankaj Gupta/* retry count for clock-stop acks */ 66*87056d31SPankaj Gupta.equ CLOCK_RETRY_CNT, 800 67*87056d31SPankaj Gupta 68*87056d31SPankaj Gupta/* disable prefetching in the A72 core */ 69*87056d31SPankaj Gupta#define CPUACTLR_DIS_LS_HW_PRE 0x100000000000000 70*87056d31SPankaj Gupta#define CPUACTLR_DIS_L2_TLB_PRE 0x200000 71*87056d31SPankaj Gupta 72*87056d31SPankaj Gupta/* Function starts the initialization tasks of the soc, 73*87056d31SPankaj Gupta * using secondary cores if they are available 74*87056d31SPankaj Gupta * 75*87056d31SPankaj Gupta * Called from C, saving the non-volatile regs 76*87056d31SPankaj Gupta * save these as pairs of registers to maintain the 77*87056d31SPankaj Gupta * required 16-byte alignment on the stack 78*87056d31SPankaj Gupta * 79*87056d31SPankaj Gupta * in: 80*87056d31SPankaj Gupta * out: 81*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11 82*87056d31SPankaj Gupta */ 83*87056d31SPankaj Guptafunc soc_init_start 84*87056d31SPankaj Gupta stp x4, x5, [sp, #-16]! 85*87056d31SPankaj Gupta stp x6, x7, [sp, #-16]! 86*87056d31SPankaj Gupta stp x8, x9, [sp, #-16]! 87*87056d31SPankaj Gupta stp x10, x11, [sp, #-16]! 88*87056d31SPankaj Gupta stp x12, x13, [sp, #-16]! 89*87056d31SPankaj Gupta stp x18, x30, [sp, #-16]! 90*87056d31SPankaj Gupta 91*87056d31SPankaj Gupta /* make sure the personality has been 92*87056d31SPankaj Gupta * established by releasing cores that 93*87056d31SPankaj Gupta * are marked "to-be-disabled" from reset 94*87056d31SPankaj Gupta */ 95*87056d31SPankaj Gupta bl release_disabled /* 0-9 */ 96*87056d31SPankaj Gupta 97*87056d31SPankaj Gupta /* init the task flags */ 98*87056d31SPankaj Gupta bl _init_task_flags /* 0-1 */ 99*87056d31SPankaj Gupta 100*87056d31SPankaj Gupta /* set SCRATCHRW7 to 0x0 */ 101*87056d31SPankaj Gupta ldr x0, =DCFG_SCRATCHRW7_OFFSET 102*87056d31SPankaj Gupta mov x1, xzr 103*87056d31SPankaj Gupta bl _write_reg_dcfg 104*87056d31SPankaj Gupta 105*87056d31SPankaj Gupta1: 106*87056d31SPankaj Gupta /* restore the aarch32/64 non-volatile registers */ 107*87056d31SPankaj Gupta ldp x18, x30, [sp], #16 108*87056d31SPankaj Gupta ldp x12, x13, [sp], #16 109*87056d31SPankaj Gupta ldp x10, x11, [sp], #16 110*87056d31SPankaj Gupta ldp x8, x9, [sp], #16 111*87056d31SPankaj Gupta ldp x6, x7, [sp], #16 112*87056d31SPankaj Gupta ldp x4, x5, [sp], #16 113*87056d31SPankaj Gupta ret 114*87056d31SPankaj Guptaendfunc soc_init_start 115*87056d31SPankaj Gupta 116*87056d31SPankaj Gupta 117*87056d31SPankaj Gupta/* Function performs any soc-specific initialization that is needed on 118*87056d31SPankaj Gupta * a per-core basis. 119*87056d31SPankaj Gupta * in: none 120*87056d31SPankaj Gupta * out: none 121*87056d31SPankaj Gupta * uses x0, x1, x2, x3 122*87056d31SPankaj Gupta */ 123*87056d31SPankaj Guptafunc soc_init_percpu 124*87056d31SPankaj Gupta stp x4, x30, [sp, #-16]! 125*87056d31SPankaj Gupta 126*87056d31SPankaj Gupta bl plat_my_core_mask 127*87056d31SPankaj Gupta mov x2, x0 /* x2 = core mask */ 128*87056d31SPankaj Gupta 129*87056d31SPankaj Gupta /* Check if this core is marked for prefetch disable 130*87056d31SPankaj Gupta */ 131*87056d31SPankaj Gupta mov x0, #PREFETCH_DIS_OFFSET 132*87056d31SPankaj Gupta bl _get_global_data /* 0-1 */ 133*87056d31SPankaj Gupta tst x0, x2 134*87056d31SPankaj Gupta b.eq 1f 135*87056d31SPankaj Gupta bl _disable_ldstr_pfetch_A72 /* 0 */ 136*87056d31SPankaj Gupta1: 137*87056d31SPankaj Gupta mov x0, #NXP_PMU_ADDR 138*87056d31SPankaj Gupta bl enable_timer_base_to_cluster 139*87056d31SPankaj Gupta ldp x4, x30, [sp], #16 140*87056d31SPankaj Gupta ret 141*87056d31SPankaj Guptaendfunc soc_init_percpu 142*87056d31SPankaj Gupta 143*87056d31SPankaj Gupta 144*87056d31SPankaj Gupta/* Function completes the initialization tasks of the soc 145*87056d31SPankaj Gupta * in: 146*87056d31SPankaj Gupta * out: 147*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4 148*87056d31SPankaj Gupta */ 149*87056d31SPankaj Guptafunc soc_init_finish 150*87056d31SPankaj Gupta stp x4, x30, [sp, #-16]! 151*87056d31SPankaj Gupta 152*87056d31SPankaj Gupta ldp x4, x30, [sp], #16 153*87056d31SPankaj Gupta ret 154*87056d31SPankaj Guptaendfunc soc_init_finish 155*87056d31SPankaj Gupta 156*87056d31SPankaj Gupta 157*87056d31SPankaj Gupta/* Function sets the security mechanisms in the SoC to implement the 158*87056d31SPankaj Gupta * Platform Security Policy 159*87056d31SPankaj Gupta */ 160*87056d31SPankaj Guptafunc _set_platform_security 161*87056d31SPankaj Gupta mov x8, x30 162*87056d31SPankaj Gupta 163*87056d31SPankaj Gupta#if (!SUPPRESS_TZC) 164*87056d31SPankaj Gupta /* initialize the tzpc */ 165*87056d31SPankaj Gupta bl init_tzpc 166*87056d31SPankaj Gupta#endif 167*87056d31SPankaj Gupta 168*87056d31SPankaj Gupta#if (!SUPPRESS_SEC) 169*87056d31SPankaj Gupta /* initialize secmon */ 170*87056d31SPankaj Gupta#ifdef NXP_SNVS_ENABLED 171*87056d31SPankaj Gupta mov x0, #NXP_SNVS_ADDR 172*87056d31SPankaj Gupta bl init_sec_mon 173*87056d31SPankaj Gupta#endif 174*87056d31SPankaj Gupta#endif 175*87056d31SPankaj Gupta 176*87056d31SPankaj Gupta mov x30, x8 177*87056d31SPankaj Gupta ret 178*87056d31SPankaj Guptaendfunc _set_platform_security 179*87056d31SPankaj Gupta 180*87056d31SPankaj Gupta 181*87056d31SPankaj Gupta/* Function writes a 64-bit address to bootlocptrh/l 182*87056d31SPankaj Gupta * in: x0, 64-bit address to write to BOOTLOCPTRL/H 183*87056d31SPankaj Gupta * uses x0, x1, x2 184*87056d31SPankaj Gupta */ 185*87056d31SPankaj Guptafunc _soc_set_start_addr 186*87056d31SPankaj Gupta /* Get the 64-bit base address of the dcfg block */ 187*87056d31SPankaj Gupta ldr x2, =NXP_DCFG_ADDR 188*87056d31SPankaj Gupta 189*87056d31SPankaj Gupta /* write the 32-bit BOOTLOCPTRL register */ 190*87056d31SPankaj Gupta mov x1, x0 191*87056d31SPankaj Gupta str w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET] 192*87056d31SPankaj Gupta 193*87056d31SPankaj Gupta /* write the 32-bit BOOTLOCPTRH register */ 194*87056d31SPankaj Gupta lsr x1, x0, #32 195*87056d31SPankaj Gupta str w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET] 196*87056d31SPankaj Gupta ret 197*87056d31SPankaj Guptaendfunc _soc_set_start_addr 198*87056d31SPankaj Gupta 199*87056d31SPankaj Gupta/* Function releases a secondary core from reset 200*87056d31SPankaj Gupta * in: x0 = core_mask_lsb 201*87056d31SPankaj Gupta * out: none 202*87056d31SPankaj Gupta * uses: x0, x1, x2, x3 203*87056d31SPankaj Gupta */ 204*87056d31SPankaj Guptafunc _soc_core_release 205*87056d31SPankaj Gupta mov x3, x30 206*87056d31SPankaj Gupta 207*87056d31SPankaj Gupta ldr x1, =NXP_SEC_REGFILE_ADDR 208*87056d31SPankaj Gupta /* write to CORE_HOLD to tell 209*87056d31SPankaj Gupta * the bootrom that this core is 210*87056d31SPankaj Gupta * expected to run. 211*87056d31SPankaj Gupta */ 212*87056d31SPankaj Gupta str w0, [x1, #CORE_HOLD_OFFSET] 213*87056d31SPankaj Gupta 214*87056d31SPankaj Gupta /* read-modify-write BRRL to release core */ 215*87056d31SPankaj Gupta mov x1, #NXP_RESET_ADDR 216*87056d31SPankaj Gupta ldr w2, [x1, #BRR_OFFSET] 217*87056d31SPankaj Gupta 218*87056d31SPankaj Gupta /* x0 = core mask */ 219*87056d31SPankaj Gupta orr w2, w2, w0 220*87056d31SPankaj Gupta str w2, [x1, #BRR_OFFSET] 221*87056d31SPankaj Gupta dsb sy 222*87056d31SPankaj Gupta isb 223*87056d31SPankaj Gupta 224*87056d31SPankaj Gupta /* send event */ 225*87056d31SPankaj Gupta sev 226*87056d31SPankaj Gupta isb 227*87056d31SPankaj Gupta 228*87056d31SPankaj Gupta mov x30, x3 229*87056d31SPankaj Gupta ret 230*87056d31SPankaj Guptaendfunc _soc_core_release 231*87056d31SPankaj Gupta 232*87056d31SPankaj Gupta 233*87056d31SPankaj Gupta/* Function determines if a core is disabled via COREDISABLEDSR 234*87056d31SPankaj Gupta * in: w0 = core_mask_lsb 235*87056d31SPankaj Gupta * out: w0 = 0, core not disabled 236*87056d31SPankaj Gupta * w0 != 0, core disabled 237*87056d31SPankaj Gupta * uses x0, x1 238*87056d31SPankaj Gupta */ 239*87056d31SPankaj Guptafunc _soc_ck_disabled 240*87056d31SPankaj Gupta 241*87056d31SPankaj Gupta /* get base addr of dcfg block */ 242*87056d31SPankaj Gupta ldr x1, =NXP_DCFG_ADDR 243*87056d31SPankaj Gupta 244*87056d31SPankaj Gupta /* read COREDISABLEDSR */ 245*87056d31SPankaj Gupta ldr w1, [x1, #DCFG_COREDISABLEDSR_OFFSET] 246*87056d31SPankaj Gupta 247*87056d31SPankaj Gupta /* test core bit */ 248*87056d31SPankaj Gupta and w0, w1, w0 249*87056d31SPankaj Gupta 250*87056d31SPankaj Gupta ret 251*87056d31SPankaj Guptaendfunc _soc_ck_disabled 252*87056d31SPankaj Gupta 253*87056d31SPankaj Gupta 254*87056d31SPankaj Gupta/* Part of CPU_ON 255*87056d31SPankaj Gupta * Function restarts a core shutdown via _soc_core_entr_off 256*87056d31SPankaj Gupta * in: x0 = core mask lsb (of the target cpu) 257*87056d31SPankaj Gupta * out: x0 == 0, on success 258*87056d31SPankaj Gupta * x0 != 0, on failure 259*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6 260*87056d31SPankaj Gupta */ 261*87056d31SPankaj Guptafunc _soc_core_restart 262*87056d31SPankaj Gupta mov x6, x30 263*87056d31SPankaj Gupta mov x4, x0 264*87056d31SPankaj Gupta 265*87056d31SPankaj Gupta /* pgm GICD_CTLR - enable secure grp0 */ 266*87056d31SPankaj Gupta mov x5, #NXP_GICD_ADDR 267*87056d31SPankaj Gupta ldr w2, [x5, #GICD_CTLR_OFFSET] 268*87056d31SPankaj Gupta orr w2, w2, #GICD_CTLR_EN_GRP_0 269*87056d31SPankaj Gupta str w2, [x5, #GICD_CTLR_OFFSET] 270*87056d31SPankaj Gupta dsb sy 271*87056d31SPankaj Gupta isb 272*87056d31SPankaj Gupta 273*87056d31SPankaj Gupta /* poll on RWP til write completes */ 274*87056d31SPankaj Gupta4: 275*87056d31SPankaj Gupta ldr w2, [x5, #GICD_CTLR_OFFSET] 276*87056d31SPankaj Gupta tst w2, #GICD_CTLR_RWP 277*87056d31SPankaj Gupta b.ne 4b 278*87056d31SPankaj Gupta 279*87056d31SPankaj Gupta /* x4 = core mask lsb 280*87056d31SPankaj Gupta * x5 = gicd base addr 281*87056d31SPankaj Gupta */ 282*87056d31SPankaj Gupta mov x0, x4 283*87056d31SPankaj Gupta bl get_mpidr_value 284*87056d31SPankaj Gupta 285*87056d31SPankaj Gupta /* x0 = mpidr of target core 286*87056d31SPankaj Gupta * x4 = core mask lsb of target core 287*87056d31SPankaj Gupta * x5 = gicd base addr 288*87056d31SPankaj Gupta */ 289*87056d31SPankaj Gupta 290*87056d31SPankaj Gupta /* generate target list bit */ 291*87056d31SPankaj Gupta and x1, x0, #MPIDR_AFFINITY0_MASK 292*87056d31SPankaj Gupta mov x2, #1 293*87056d31SPankaj Gupta lsl x2, x2, x1 294*87056d31SPankaj Gupta 295*87056d31SPankaj Gupta /* get the affinity1 field */ 296*87056d31SPankaj Gupta and x1, x0, #MPIDR_AFFINITY1_MASK 297*87056d31SPankaj Gupta lsl x1, x1, #8 298*87056d31SPankaj Gupta orr x2, x2, x1 299*87056d31SPankaj Gupta 300*87056d31SPankaj Gupta /* insert the INTID for SGI15 */ 301*87056d31SPankaj Gupta orr x2, x2, #ICC_SGI0R_EL1_INTID 302*87056d31SPankaj Gupta 303*87056d31SPankaj Gupta /* fire the SGI */ 304*87056d31SPankaj Gupta msr ICC_SGI0R_EL1, x2 305*87056d31SPankaj Gupta dsb sy 306*87056d31SPankaj Gupta isb 307*87056d31SPankaj Gupta 308*87056d31SPankaj Gupta /* load '0' on success */ 309*87056d31SPankaj Gupta mov x0, xzr 310*87056d31SPankaj Gupta 311*87056d31SPankaj Gupta mov x30, x6 312*87056d31SPankaj Gupta ret 313*87056d31SPankaj Guptaendfunc _soc_core_restart 314*87056d31SPankaj Gupta 315*87056d31SPankaj Gupta 316*87056d31SPankaj Gupta/* Part of CPU_OFF 317*87056d31SPankaj Gupta * Function programs SoC & GIC registers in preparation for shutting down 318*87056d31SPankaj Gupta * the core 319*87056d31SPankaj Gupta * in: x0 = core mask lsb 320*87056d31SPankaj Gupta * out: none 321*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7 322*87056d31SPankaj Gupta */ 323*87056d31SPankaj Guptafunc _soc_core_prep_off 324*87056d31SPankaj Gupta mov x8, x30 325*87056d31SPankaj Gupta mov x7, x0 /* x7 = core mask lsb */ 326*87056d31SPankaj Gupta 327*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 328*87056d31SPankaj Gupta 329*87056d31SPankaj Gupta /* set smp and disable L2 snoops in cpuectlr */ 330*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_EN 331*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH 332*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_INS_PREFETCH_MASK 333*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_DAT_PREFETCH_MASK 334*87056d31SPankaj Gupta 335*87056d31SPankaj Gupta /* set retention control in cpuectlr */ 336*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 337*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_TIMER_8TICKS 338*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 339*87056d31SPankaj Gupta 340*87056d31SPankaj Gupta /* get redistributor rd base addr for this core */ 341*87056d31SPankaj Gupta mov x0, x7 342*87056d31SPankaj Gupta bl get_gic_rd_base 343*87056d31SPankaj Gupta mov x6, x0 344*87056d31SPankaj Gupta 345*87056d31SPankaj Gupta /* get redistributor sgi base addr for this core */ 346*87056d31SPankaj Gupta mov x0, x7 347*87056d31SPankaj Gupta bl get_gic_sgi_base 348*87056d31SPankaj Gupta mov x5, x0 349*87056d31SPankaj Gupta 350*87056d31SPankaj Gupta /* x5 = gicr sgi base addr 351*87056d31SPankaj Gupta * x6 = gicr rd base addr 352*87056d31SPankaj Gupta * x7 = core mask lsb 353*87056d31SPankaj Gupta */ 354*87056d31SPankaj Gupta 355*87056d31SPankaj Gupta /* disable SGI 15 at redistributor - GICR_ICENABLER0 */ 356*87056d31SPankaj Gupta mov w3, #GICR_ICENABLER0_SGI15 357*87056d31SPankaj Gupta str w3, [x5, #GICR_ICENABLER0_OFFSET] 358*87056d31SPankaj Gupta2: 359*87056d31SPankaj Gupta /* poll on rwp bit in GICR_CTLR */ 360*87056d31SPankaj Gupta ldr w4, [x6, #GICR_CTLR_OFFSET] 361*87056d31SPankaj Gupta tst w4, #GICR_CTLR_RWP 362*87056d31SPankaj Gupta b.ne 2b 363*87056d31SPankaj Gupta 364*87056d31SPankaj Gupta /* disable GRP1 interrupts at cpu interface */ 365*87056d31SPankaj Gupta msr ICC_IGRPEN1_EL3, xzr 366*87056d31SPankaj Gupta 367*87056d31SPankaj Gupta /* disable GRP0 ints at cpu interface */ 368*87056d31SPankaj Gupta msr ICC_IGRPEN0_EL1, xzr 369*87056d31SPankaj Gupta 370*87056d31SPankaj Gupta /* program the redistributor - poll on GICR_CTLR.RWP as needed */ 371*87056d31SPankaj Gupta 372*87056d31SPankaj Gupta /* define SGI 15 as Grp0 - GICR_IGROUPR0 */ 373*87056d31SPankaj Gupta ldr w4, [x5, #GICR_IGROUPR0_OFFSET] 374*87056d31SPankaj Gupta bic w4, w4, #GICR_IGROUPR0_SGI15 375*87056d31SPankaj Gupta str w4, [x5, #GICR_IGROUPR0_OFFSET] 376*87056d31SPankaj Gupta 377*87056d31SPankaj Gupta /* define SGI 15 as Grp0 - GICR_IGRPMODR0 */ 378*87056d31SPankaj Gupta ldr w3, [x5, #GICR_IGRPMODR0_OFFSET] 379*87056d31SPankaj Gupta bic w3, w3, #GICR_IGRPMODR0_SGI15 380*87056d31SPankaj Gupta str w3, [x5, #GICR_IGRPMODR0_OFFSET] 381*87056d31SPankaj Gupta 382*87056d31SPankaj Gupta /* set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */ 383*87056d31SPankaj Gupta ldr w4, [x5, #GICR_IPRIORITYR3_OFFSET] 384*87056d31SPankaj Gupta bic w4, w4, #GICR_IPRIORITYR3_SGI15_MASK 385*87056d31SPankaj Gupta str w4, [x5, #GICR_IPRIORITYR3_OFFSET] 386*87056d31SPankaj Gupta 387*87056d31SPankaj Gupta /* enable SGI 15 at redistributor - GICR_ISENABLER0 */ 388*87056d31SPankaj Gupta mov w3, #GICR_ISENABLER0_SGI15 389*87056d31SPankaj Gupta str w3, [x5, #GICR_ISENABLER0_OFFSET] 390*87056d31SPankaj Gupta dsb sy 391*87056d31SPankaj Gupta isb 392*87056d31SPankaj Gupta3: 393*87056d31SPankaj Gupta /* poll on rwp bit in GICR_CTLR */ 394*87056d31SPankaj Gupta ldr w4, [x6, #GICR_CTLR_OFFSET] 395*87056d31SPankaj Gupta tst w4, #GICR_CTLR_RWP 396*87056d31SPankaj Gupta b.ne 3b 397*87056d31SPankaj Gupta 398*87056d31SPankaj Gupta /* quiesce the debug interfaces */ 399*87056d31SPankaj Gupta mrs x3, osdlr_el1 400*87056d31SPankaj Gupta orr x3, x3, #OSDLR_EL1_DLK_LOCK 401*87056d31SPankaj Gupta msr osdlr_el1, x3 402*87056d31SPankaj Gupta isb 403*87056d31SPankaj Gupta 404*87056d31SPankaj Gupta /* enable grp0 ints */ 405*87056d31SPankaj Gupta mov x3, #ICC_IGRPEN0_EL1_EN 406*87056d31SPankaj Gupta msr ICC_IGRPEN0_EL1, x3 407*87056d31SPankaj Gupta 408*87056d31SPankaj Gupta /* x5 = gicr sgi base addr 409*87056d31SPankaj Gupta * x6 = gicr rd base addr 410*87056d31SPankaj Gupta * x7 = core mask lsb 411*87056d31SPankaj Gupta */ 412*87056d31SPankaj Gupta 413*87056d31SPankaj Gupta /* clear any pending interrupts */ 414*87056d31SPankaj Gupta mvn w1, wzr 415*87056d31SPankaj Gupta str w1, [x5, #GICR_ICPENDR0_OFFSET] 416*87056d31SPankaj Gupta 417*87056d31SPankaj Gupta /* make sure system counter is enabled */ 418*87056d31SPankaj Gupta ldr x3, =NXP_TIMER_ADDR 419*87056d31SPankaj Gupta ldr w0, [x3, #SYS_COUNTER_CNTCR_OFFSET] 420*87056d31SPankaj Gupta tst w0, #SYS_COUNTER_CNTCR_EN 421*87056d31SPankaj Gupta b.ne 4f 422*87056d31SPankaj Gupta orr w0, w0, #SYS_COUNTER_CNTCR_EN 423*87056d31SPankaj Gupta str w0, [x3, #SYS_COUNTER_CNTCR_OFFSET] 424*87056d31SPankaj Gupta4: 425*87056d31SPankaj Gupta /* enable the core timer and mask timer interrupt */ 426*87056d31SPankaj Gupta mov x1, #CNTP_CTL_EL0_EN 427*87056d31SPankaj Gupta orr x1, x1, #CNTP_CTL_EL0_IMASK 428*87056d31SPankaj Gupta msr cntp_ctl_el0, x1 429*87056d31SPankaj Gupta 430*87056d31SPankaj Gupta isb 431*87056d31SPankaj Gupta mov x30, x8 432*87056d31SPankaj Gupta ret 433*87056d31SPankaj Guptaendfunc _soc_core_prep_off 434*87056d31SPankaj Gupta 435*87056d31SPankaj Gupta 436*87056d31SPankaj Gupta/* Part of CPU_OFF: 437*87056d31SPankaj Gupta * Function performs the final steps to shutdown the core 438*87056d31SPankaj Gupta * in: x0 = core mask lsb 439*87056d31SPankaj Gupta * out: none 440*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5 441*87056d31SPankaj Gupta */ 442*87056d31SPankaj Guptafunc _soc_core_entr_off 443*87056d31SPankaj Gupta mov x5, x30 444*87056d31SPankaj Gupta mov x4, x0 445*87056d31SPankaj Gupta 446*87056d31SPankaj Gupta1: 447*87056d31SPankaj Gupta /* enter low-power state by executing wfi */ 448*87056d31SPankaj Gupta wfi 449*87056d31SPankaj Gupta 450*87056d31SPankaj Gupta /* see if SGI15 woke us up */ 451*87056d31SPankaj Gupta mrs x2, ICC_IAR0_EL1 452*87056d31SPankaj Gupta mov x3, #ICC_IAR0_EL1_SGI15 453*87056d31SPankaj Gupta cmp x2, x3 454*87056d31SPankaj Gupta b.ne 2f 455*87056d31SPankaj Gupta 456*87056d31SPankaj Gupta /* deactivate the intrrupts. */ 457*87056d31SPankaj Gupta msr ICC_EOIR0_EL1, x2 458*87056d31SPankaj Gupta 459*87056d31SPankaj Gupta2: 460*87056d31SPankaj Gupta /* check if core is turned ON */ 461*87056d31SPankaj Gupta mov x0, x4 462*87056d31SPankaj Gupta /* Fetched the core state in x0 */ 463*87056d31SPankaj Gupta bl _getCoreState 464*87056d31SPankaj Gupta 465*87056d31SPankaj Gupta cmp x0, #CORE_WAKEUP 466*87056d31SPankaj Gupta b.ne 1b 467*87056d31SPankaj Gupta 468*87056d31SPankaj Gupta /* Reached here, exited the wfi */ 469*87056d31SPankaj Gupta 470*87056d31SPankaj Gupta mov x30, x5 471*87056d31SPankaj Gupta ret 472*87056d31SPankaj Guptaendfunc _soc_core_entr_off 473*87056d31SPankaj Gupta 474*87056d31SPankaj Gupta 475*87056d31SPankaj Gupta/* Part of CPU_OFF: 476*87056d31SPankaj Gupta * Function starts the process of starting a core back up 477*87056d31SPankaj Gupta * in: x0 = core mask lsb 478*87056d31SPankaj Gupta * out: none 479*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6 480*87056d31SPankaj Gupta */ 481*87056d31SPankaj Guptafunc _soc_core_exit_off 482*87056d31SPankaj Gupta mov x6, x30 483*87056d31SPankaj Gupta mov x5, x0 484*87056d31SPankaj Gupta 485*87056d31SPankaj Gupta /* disable forwarding of GRP0 ints at cpu interface */ 486*87056d31SPankaj Gupta msr ICC_IGRPEN0_EL1, xzr 487*87056d31SPankaj Gupta 488*87056d31SPankaj Gupta /* get redistributor sgi base addr for this core */ 489*87056d31SPankaj Gupta mov x0, x5 490*87056d31SPankaj Gupta bl get_gic_sgi_base 491*87056d31SPankaj Gupta mov x4, x0 492*87056d31SPankaj Gupta 493*87056d31SPankaj Gupta /* x4 = gicr sgi base addr 494*87056d31SPankaj Gupta * x5 = core mask 495*87056d31SPankaj Gupta */ 496*87056d31SPankaj Gupta 497*87056d31SPankaj Gupta /* disable SGI 15 at redistributor - GICR_ICENABLER0 */ 498*87056d31SPankaj Gupta mov w1, #GICR_ICENABLER0_SGI15 499*87056d31SPankaj Gupta str w1, [x4, #GICR_ICENABLER0_OFFSET] 500*87056d31SPankaj Gupta 501*87056d31SPankaj Gupta /* get redistributor rd base addr for this core */ 502*87056d31SPankaj Gupta mov x0, x5 503*87056d31SPankaj Gupta bl get_gic_rd_base 504*87056d31SPankaj Gupta mov x4, x0 505*87056d31SPankaj Gupta 506*87056d31SPankaj Gupta2: 507*87056d31SPankaj Gupta /* poll on rwp bit in GICR_CTLR */ 508*87056d31SPankaj Gupta ldr w2, [x4, #GICR_CTLR_OFFSET] 509*87056d31SPankaj Gupta tst w2, #GICR_CTLR_RWP 510*87056d31SPankaj Gupta b.ne 2b 511*87056d31SPankaj Gupta 512*87056d31SPankaj Gupta /* unlock the debug interfaces */ 513*87056d31SPankaj Gupta mrs x3, osdlr_el1 514*87056d31SPankaj Gupta bic x3, x3, #OSDLR_EL1_DLK_LOCK 515*87056d31SPankaj Gupta msr osdlr_el1, x3 516*87056d31SPankaj Gupta isb 517*87056d31SPankaj Gupta 518*87056d31SPankaj Gupta dsb sy 519*87056d31SPankaj Gupta isb 520*87056d31SPankaj Gupta mov x30, x6 521*87056d31SPankaj Gupta ret 522*87056d31SPankaj Guptaendfunc _soc_core_exit_off 523*87056d31SPankaj Gupta 524*87056d31SPankaj Gupta 525*87056d31SPankaj Gupta/* Function requests a reset of the entire SOC 526*87056d31SPankaj Gupta * in: none 527*87056d31SPankaj Gupta * out: none 528*87056d31SPankaj Gupta * uses: x0, x1, x2, x3, x4, x5, x6 529*87056d31SPankaj Gupta */ 530*87056d31SPankaj Guptafunc _soc_sys_reset 531*87056d31SPankaj Gupta mov x6, x30 532*87056d31SPankaj Gupta 533*87056d31SPankaj Gupta ldr x2, =NXP_RST_ADDR 534*87056d31SPankaj Gupta /* clear the RST_REQ_MSK and SW_RST_REQ */ 535*87056d31SPankaj Gupta 536*87056d31SPankaj Gupta mov w0, #0x00000000 537*87056d31SPankaj Gupta str w0, [x2, #RSTCNTL_OFFSET] 538*87056d31SPankaj Gupta 539*87056d31SPankaj Gupta /* initiate the sw reset request */ 540*87056d31SPankaj Gupta mov w0, #SW_RST_REQ_INIT 541*87056d31SPankaj Gupta str w0, [x2, #RSTCNTL_OFFSET] 542*87056d31SPankaj Gupta 543*87056d31SPankaj Gupta /* In case this address range is mapped as cacheable, 544*87056d31SPankaj Gupta * flush the write out of the dcaches. 545*87056d31SPankaj Gupta */ 546*87056d31SPankaj Gupta add x2, x2, #RSTCNTL_OFFSET 547*87056d31SPankaj Gupta dc cvac, x2 548*87056d31SPankaj Gupta dsb st 549*87056d31SPankaj Gupta isb 550*87056d31SPankaj Gupta 551*87056d31SPankaj Gupta /* Function does not return */ 552*87056d31SPankaj Gupta b . 553*87056d31SPankaj Guptaendfunc _soc_sys_reset 554*87056d31SPankaj Gupta 555*87056d31SPankaj Gupta 556*87056d31SPankaj Gupta/* Part of SYSTEM_OFF: 557*87056d31SPankaj Gupta * Function turns off the SoC clocks 558*87056d31SPankaj Gupta * Note: Function is not intended to return, and the only allowable 559*87056d31SPankaj Gupta * recovery is POR 560*87056d31SPankaj Gupta * in: none 561*87056d31SPankaj Gupta * out: none 562*87056d31SPankaj Gupta * uses x0, x1, x2, x3 563*87056d31SPankaj Gupta */ 564*87056d31SPankaj Guptafunc _soc_sys_off 565*87056d31SPankaj Gupta 566*87056d31SPankaj Gupta /* disable sec, QBman, spi and qspi */ 567*87056d31SPankaj Gupta ldr x2, =NXP_DCFG_ADDR 568*87056d31SPankaj Gupta ldr x0, =DCFG_DEVDISR1_OFFSET 569*87056d31SPankaj Gupta ldr w1, =DCFG_DEVDISR1_SEC 570*87056d31SPankaj Gupta str w1, [x2, x0] 571*87056d31SPankaj Gupta ldr x0, =DCFG_DEVDISR3_OFFSET 572*87056d31SPankaj Gupta ldr w1, =DCFG_DEVDISR3_QBMAIN 573*87056d31SPankaj Gupta str w1, [x2, x0] 574*87056d31SPankaj Gupta ldr x0, =DCFG_DEVDISR4_OFFSET 575*87056d31SPankaj Gupta ldr w1, =DCFG_DEVDISR4_SPI_QSPI 576*87056d31SPankaj Gupta str w1, [x2, x0] 577*87056d31SPankaj Gupta 578*87056d31SPankaj Gupta /* set TPMWAKEMR0 */ 579*87056d31SPankaj Gupta ldr x0, =TPMWAKEMR0_ADDR 580*87056d31SPankaj Gupta mov w1, #0x1 581*87056d31SPankaj Gupta str w1, [x0] 582*87056d31SPankaj Gupta 583*87056d31SPankaj Gupta /* disable icache, dcache, mmu @ EL1 */ 584*87056d31SPankaj Gupta mov x1, #SCTLR_I_C_M_MASK 585*87056d31SPankaj Gupta mrs x0, sctlr_el1 586*87056d31SPankaj Gupta bic x0, x0, x1 587*87056d31SPankaj Gupta msr sctlr_el1, x0 588*87056d31SPankaj Gupta 589*87056d31SPankaj Gupta /* disable L2 prefetches */ 590*87056d31SPankaj Gupta mrs x0, CORTEX_A72_ECTLR_EL1 591*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 592*87056d31SPankaj Gupta orr x0, x0, #CPUECTLR_SMPEN_EN 593*87056d31SPankaj Gupta orr x0, x0, #CPUECTLR_TIMER_8TICKS 594*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x0 595*87056d31SPankaj Gupta isb 596*87056d31SPankaj Gupta 597*87056d31SPankaj Gupta /* disable CCN snoop domain */ 598*87056d31SPankaj Gupta mov x1, #NXP_CCN_HN_F_0_ADDR 599*87056d31SPankaj Gupta ldr x0, =CCN_HN_F_SNP_DMN_CTL_MASK 600*87056d31SPankaj Gupta str x0, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET] 601*87056d31SPankaj Gupta3: 602*87056d31SPankaj Gupta ldr w2, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET] 603*87056d31SPankaj Gupta cmp w2, #0x2 604*87056d31SPankaj Gupta b.ne 3b 605*87056d31SPankaj Gupta 606*87056d31SPankaj Gupta mov x3, #NXP_PMU_ADDR 607*87056d31SPankaj Gupta 608*87056d31SPankaj Gupta4: 609*87056d31SPankaj Gupta ldr w1, [x3, #PMU_PCPW20SR_OFFSET] 610*87056d31SPankaj Gupta cmp w1, #PMU_IDLE_CORE_MASK 611*87056d31SPankaj Gupta b.ne 4b 612*87056d31SPankaj Gupta 613*87056d31SPankaj Gupta mov w1, #PMU_IDLE_CLUSTER_MASK 614*87056d31SPankaj Gupta str w1, [x3, #PMU_CLAINACTSETR_OFFSET] 615*87056d31SPankaj Gupta 616*87056d31SPankaj Gupta1: 617*87056d31SPankaj Gupta ldr w1, [x3, #PMU_PCPW20SR_OFFSET] 618*87056d31SPankaj Gupta cmp w1, #PMU_IDLE_CORE_MASK 619*87056d31SPankaj Gupta b.ne 1b 620*87056d31SPankaj Gupta 621*87056d31SPankaj Gupta mov w1, #PMU_FLUSH_CLUSTER_MASK 622*87056d31SPankaj Gupta str w1, [x3, #PMU_CLL2FLUSHSETR_OFFSET] 623*87056d31SPankaj Gupta 624*87056d31SPankaj Gupta2: 625*87056d31SPankaj Gupta ldr w1, [x3, #PMU_CLL2FLUSHSR_OFFSET] 626*87056d31SPankaj Gupta cmp w1, #PMU_FLUSH_CLUSTER_MASK 627*87056d31SPankaj Gupta b.ne 2b 628*87056d31SPankaj Gupta 629*87056d31SPankaj Gupta mov w1, #PMU_FLUSH_CLUSTER_MASK 630*87056d31SPankaj Gupta str w1, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET] 631*87056d31SPankaj Gupta 632*87056d31SPankaj Gupta mov w1, #PMU_FLUSH_CLUSTER_MASK 633*87056d31SPankaj Gupta str w1, [x3, #PMU_CLSINACTSETR_OFFSET] 634*87056d31SPankaj Gupta 635*87056d31SPankaj Gupta mov x2, #DAIF_SET_MASK 636*87056d31SPankaj Gupta mrs x1, spsr_el1 637*87056d31SPankaj Gupta orr x1, x1, x2 638*87056d31SPankaj Gupta msr spsr_el1, x1 639*87056d31SPankaj Gupta 640*87056d31SPankaj Gupta mrs x1, spsr_el2 641*87056d31SPankaj Gupta orr x1, x1, x2 642*87056d31SPankaj Gupta msr spsr_el2, x1 643*87056d31SPankaj Gupta 644*87056d31SPankaj Gupta /* force the debug interface to be quiescent */ 645*87056d31SPankaj Gupta mrs x0, osdlr_el1 646*87056d31SPankaj Gupta orr x0, x0, #0x1 647*87056d31SPankaj Gupta msr osdlr_el1, x0 648*87056d31SPankaj Gupta 649*87056d31SPankaj Gupta /* invalidate all TLB entries at all 3 exception levels */ 650*87056d31SPankaj Gupta tlbi alle1 651*87056d31SPankaj Gupta tlbi alle2 652*87056d31SPankaj Gupta tlbi alle3 653*87056d31SPankaj Gupta 654*87056d31SPankaj Gupta /* x3 = pmu base addr */ 655*87056d31SPankaj Gupta 656*87056d31SPankaj Gupta /* request lpm20 */ 657*87056d31SPankaj Gupta ldr x0, =PMU_POWMGTCSR_OFFSET 658*87056d31SPankaj Gupta ldr w1, =PMU_POWMGTCSR_VAL 659*87056d31SPankaj Gupta str w1, [x3, x0] 660*87056d31SPankaj Gupta 661*87056d31SPankaj Gupta5: 662*87056d31SPankaj Gupta wfe 663*87056d31SPankaj Gupta b.eq 5b 664*87056d31SPankaj Guptaendfunc _soc_sys_off 665*87056d31SPankaj Gupta 666*87056d31SPankaj Gupta 667*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 668*87056d31SPankaj Gupta * Function puts the calling core into standby state 669*87056d31SPankaj Gupta * in: x0 = core mask lsb 670*87056d31SPankaj Gupta * out: none 671*87056d31SPankaj Gupta * uses x0 672*87056d31SPankaj Gupta */ 673*87056d31SPankaj Guptafunc _soc_core_entr_stdby 674*87056d31SPankaj Gupta 675*87056d31SPankaj Gupta dsb sy 676*87056d31SPankaj Gupta isb 677*87056d31SPankaj Gupta wfi 678*87056d31SPankaj Gupta 679*87056d31SPankaj Gupta ret 680*87056d31SPankaj Guptaendfunc _soc_core_entr_stdby 681*87056d31SPankaj Gupta 682*87056d31SPankaj Gupta 683*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 684*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to standby 685*87056d31SPankaj Gupta * in: x0 = core mask lsb 686*87056d31SPankaj Gupta * out: none 687*87056d31SPankaj Gupta * uses x0, x1 688*87056d31SPankaj Gupta */ 689*87056d31SPankaj Guptafunc _soc_core_prep_stdby 690*87056d31SPankaj Gupta 691*87056d31SPankaj Gupta /* clear CORTEX_A72_ECTLR_EL1[2:0] */ 692*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 693*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 694*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 695*87056d31SPankaj Gupta 696*87056d31SPankaj Gupta ret 697*87056d31SPankaj Guptaendfunc _soc_core_prep_stdby 698*87056d31SPankaj Gupta 699*87056d31SPankaj Gupta 700*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 701*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after standby state 702*87056d31SPankaj Gupta * in: x0 = core mask lsb 703*87056d31SPankaj Gupta * out: none 704*87056d31SPankaj Gupta * uses none 705*87056d31SPankaj Gupta */ 706*87056d31SPankaj Guptafunc _soc_core_exit_stdby 707*87056d31SPankaj Gupta 708*87056d31SPankaj Gupta ret 709*87056d31SPankaj Guptaendfunc _soc_core_exit_stdby 710*87056d31SPankaj Gupta 711*87056d31SPankaj Gupta 712*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 713*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to power-down 714*87056d31SPankaj Gupta * in: x0 = core mask lsb 715*87056d31SPankaj Gupta * out: none 716*87056d31SPankaj Gupta * uses none 717*87056d31SPankaj Gupta */ 718*87056d31SPankaj Guptafunc _soc_core_prep_pwrdn 719*87056d31SPankaj Gupta 720*87056d31SPankaj Gupta /* make sure system counter is enabled */ 721*87056d31SPankaj Gupta ldr x2, =NXP_TIMER_ADDR 722*87056d31SPankaj Gupta ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET] 723*87056d31SPankaj Gupta tst w0, #SYS_COUNTER_CNTCR_EN 724*87056d31SPankaj Gupta b.ne 1f 725*87056d31SPankaj Gupta orr w0, w0, #SYS_COUNTER_CNTCR_EN 726*87056d31SPankaj Gupta str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET] 727*87056d31SPankaj Gupta1: 728*87056d31SPankaj Gupta 729*87056d31SPankaj Gupta /* enable dynamic retention control (CPUECTLR[2:0]) 730*87056d31SPankaj Gupta * set the SMPEN bit (CPUECTLR[6]) 731*87056d31SPankaj Gupta */ 732*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 733*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_RET_MASK 734*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_TIMER_8TICKS 735*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_EN 736*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 737*87056d31SPankaj Gupta 738*87056d31SPankaj Gupta isb 739*87056d31SPankaj Gupta ret 740*87056d31SPankaj Guptaendfunc _soc_core_prep_pwrdn 741*87056d31SPankaj Gupta 742*87056d31SPankaj Gupta 743*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 744*87056d31SPankaj Gupta * Function puts the calling core into a power-down state 745*87056d31SPankaj Gupta * in: x0 = core mask lsb 746*87056d31SPankaj Gupta * out: none 747*87056d31SPankaj Gupta * uses x0 748*87056d31SPankaj Gupta */ 749*87056d31SPankaj Guptafunc _soc_core_entr_pwrdn 750*87056d31SPankaj Gupta 751*87056d31SPankaj Gupta /* X0 = core mask lsb */ 752*87056d31SPankaj Gupta 753*87056d31SPankaj Gupta dsb sy 754*87056d31SPankaj Gupta isb 755*87056d31SPankaj Gupta wfi 756*87056d31SPankaj Gupta 757*87056d31SPankaj Gupta ret 758*87056d31SPankaj Guptaendfunc _soc_core_entr_pwrdn 759*87056d31SPankaj Gupta 760*87056d31SPankaj Gupta 761*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 762*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after power-down state 763*87056d31SPankaj Gupta * in: x0 = core mask lsb 764*87056d31SPankaj Gupta * out: none 765*87056d31SPankaj Gupta * uses none 766*87056d31SPankaj Gupta */ 767*87056d31SPankaj Guptafunc _soc_core_exit_pwrdn 768*87056d31SPankaj Gupta 769*87056d31SPankaj Gupta ret 770*87056d31SPankaj Guptaendfunc _soc_core_exit_pwrdn 771*87056d31SPankaj Gupta 772*87056d31SPankaj Gupta 773*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 774*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to standby 775*87056d31SPankaj Gupta * in: x0 = core mask lsb 776*87056d31SPankaj Gupta * out: none 777*87056d31SPankaj Gupta * uses x0, x1 778*87056d31SPankaj Gupta */ 779*87056d31SPankaj Guptafunc _soc_clstr_prep_stdby 780*87056d31SPankaj Gupta 781*87056d31SPankaj Gupta /* clear CORTEX_A72_ECTLR_EL1[2:0] */ 782*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 783*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 784*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 785*87056d31SPankaj Gupta 786*87056d31SPankaj Gupta ret 787*87056d31SPankaj Guptaendfunc _soc_clstr_prep_stdby 788*87056d31SPankaj Gupta 789*87056d31SPankaj Gupta 790*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 791*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after standby state 792*87056d31SPankaj Gupta * in: x0 = core mask lsb 793*87056d31SPankaj Gupta * out: none 794*87056d31SPankaj Gupta * uses none 795*87056d31SPankaj Gupta */ 796*87056d31SPankaj Guptafunc _soc_clstr_exit_stdby 797*87056d31SPankaj Gupta 798*87056d31SPankaj Gupta ret 799*87056d31SPankaj Guptaendfunc _soc_clstr_exit_stdby 800*87056d31SPankaj Gupta 801*87056d31SPankaj Gupta 802*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 803*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to power-down 804*87056d31SPankaj Gupta * in: x0 = core mask lsb 805*87056d31SPankaj Gupta * out: none 806*87056d31SPankaj Gupta * uses none 807*87056d31SPankaj Gupta */ 808*87056d31SPankaj Guptafunc _soc_clstr_prep_pwrdn 809*87056d31SPankaj Gupta 810*87056d31SPankaj Gupta /* make sure system counter is enabled */ 811*87056d31SPankaj Gupta ldr x2, =NXP_TIMER_ADDR 812*87056d31SPankaj Gupta ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET] 813*87056d31SPankaj Gupta tst w0, #SYS_COUNTER_CNTCR_EN 814*87056d31SPankaj Gupta b.ne 1f 815*87056d31SPankaj Gupta orr w0, w0, #SYS_COUNTER_CNTCR_EN 816*87056d31SPankaj Gupta str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET] 817*87056d31SPankaj Gupta1: 818*87056d31SPankaj Gupta 819*87056d31SPankaj Gupta /* enable dynamic retention control (CPUECTLR[2:0]) 820*87056d31SPankaj Gupta * set the SMPEN bit (CPUECTLR[6]) 821*87056d31SPankaj Gupta */ 822*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 823*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_RET_MASK 824*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_TIMER_8TICKS 825*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_EN 826*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 827*87056d31SPankaj Gupta 828*87056d31SPankaj Gupta isb 829*87056d31SPankaj Gupta ret 830*87056d31SPankaj Guptaendfunc _soc_clstr_prep_pwrdn 831*87056d31SPankaj Gupta 832*87056d31SPankaj Gupta 833*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 834*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after power-down state 835*87056d31SPankaj Gupta * in: x0 = core mask lsb 836*87056d31SPankaj Gupta * out: none 837*87056d31SPankaj Gupta * uses none 838*87056d31SPankaj Gupta */ 839*87056d31SPankaj Guptafunc _soc_clstr_exit_pwrdn 840*87056d31SPankaj Gupta 841*87056d31SPankaj Gupta ret 842*87056d31SPankaj Guptaendfunc _soc_clstr_exit_pwrdn 843*87056d31SPankaj Gupta 844*87056d31SPankaj Gupta 845*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 846*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to standby 847*87056d31SPankaj Gupta * in: x0 = core mask lsb 848*87056d31SPankaj Gupta * out: none 849*87056d31SPankaj Gupta * uses x0, x1 850*87056d31SPankaj Gupta */ 851*87056d31SPankaj Guptafunc _soc_sys_prep_stdby 852*87056d31SPankaj Gupta 853*87056d31SPankaj Gupta /* clear CORTEX_A72_ECTLR_EL1[2:0] */ 854*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 855*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 856*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 857*87056d31SPankaj Gupta ret 858*87056d31SPankaj Guptaendfunc _soc_sys_prep_stdby 859*87056d31SPankaj Gupta 860*87056d31SPankaj Gupta 861*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 862*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after standby state 863*87056d31SPankaj Gupta * in: x0 = core mask lsb 864*87056d31SPankaj Gupta * out: none 865*87056d31SPankaj Gupta * uses none 866*87056d31SPankaj Gupta */ 867*87056d31SPankaj Guptafunc _soc_sys_exit_stdby 868*87056d31SPankaj Gupta 869*87056d31SPankaj Gupta ret 870*87056d31SPankaj Guptaendfunc _soc_sys_exit_stdby 871*87056d31SPankaj Gupta 872*87056d31SPankaj Gupta 873*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 874*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to 875*87056d31SPankaj Gupta * suspend-to-power-down 876*87056d31SPankaj Gupta * in: x0 = core mask lsb 877*87056d31SPankaj Gupta * out: none 878*87056d31SPankaj Gupta * uses x0, x1 879*87056d31SPankaj Gupta */ 880*87056d31SPankaj Guptafunc _soc_sys_prep_pwrdn 881*87056d31SPankaj Gupta 882*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 883*87056d31SPankaj Gupta /* make sure the smp bit is set */ 884*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_MASK 885*87056d31SPankaj Gupta /* set the retention control */ 886*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_RET_8CLK 887*87056d31SPankaj Gupta /* disable tablewalk prefetch */ 888*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH 889*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 890*87056d31SPankaj Gupta isb 891*87056d31SPankaj Gupta 892*87056d31SPankaj Gupta ret 893*87056d31SPankaj Guptaendfunc _soc_sys_prep_pwrdn 894*87056d31SPankaj Gupta 895*87056d31SPankaj Gupta 896*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 897*87056d31SPankaj Gupta * Function puts the calling core, and potentially the soc, into a 898*87056d31SPankaj Gupta * low-power state 899*87056d31SPankaj Gupta * in: x0 = core mask lsb 900*87056d31SPankaj Gupta * out: x0 = 0, success 901*87056d31SPankaj Gupta * x0 < 0, failure 902*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, 903*87056d31SPankaj Gupta * x15, x16, x17, x18, x19, x20, x21, x28 904*87056d31SPankaj Gupta */ 905*87056d31SPankaj Guptafunc _soc_sys_pwrdn_wfi 906*87056d31SPankaj Gupta mov x28, x30 907*87056d31SPankaj Gupta 908*87056d31SPankaj Gupta /* disable cluster snooping in the CCN-508 */ 909*87056d31SPankaj Gupta ldr x1, =NXP_CCN_HN_F_0_ADDR 910*87056d31SPankaj Gupta ldr x7, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET] 911*87056d31SPankaj Gupta mov x6, #CCN_HNF_NODE_COUNT 912*87056d31SPankaj Gupta1: 913*87056d31SPankaj Gupta str x7, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET] 914*87056d31SPankaj Gupta sub x6, x6, #1 915*87056d31SPankaj Gupta add x1, x1, #CCN_HNF_OFFSET 916*87056d31SPankaj Gupta cbnz x6, 1b 917*87056d31SPankaj Gupta 918*87056d31SPankaj Gupta /* x0 = core mask 919*87056d31SPankaj Gupta * x7 = hnf sdcr 920*87056d31SPankaj Gupta */ 921*87056d31SPankaj Gupta 922*87056d31SPankaj Gupta ldr x1, =NXP_PMU_CCSR_ADDR 923*87056d31SPankaj Gupta ldr x2, =NXP_PMU_DCSR_ADDR 924*87056d31SPankaj Gupta 925*87056d31SPankaj Gupta /* enable the stop-request-override */ 926*87056d31SPankaj Gupta mov x3, #PMU_POWMGTDCR0_OFFSET 927*87056d31SPankaj Gupta mov x4, #POWMGTDCR_STP_OV_EN 928*87056d31SPankaj Gupta str w4, [x2, x3] 929*87056d31SPankaj Gupta 930*87056d31SPankaj Gupta /* x0 = core mask 931*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 932*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 933*87056d31SPankaj Gupta * x7 = hnf sdcr 934*87056d31SPankaj Gupta */ 935*87056d31SPankaj Gupta 936*87056d31SPankaj Gupta /* disable prefetching in the A72 core */ 937*87056d31SPankaj Gupta mrs x8, CORTEX_A72_CPUACTLR_EL1 938*87056d31SPankaj Gupta tst x8, #CPUACTLR_DIS_LS_HW_PRE 939*87056d31SPankaj Gupta b.ne 2f 940*87056d31SPankaj Gupta dsb sy 941*87056d31SPankaj Gupta isb 942*87056d31SPankaj Gupta /* disable data prefetch */ 943*87056d31SPankaj Gupta orr x16, x8, #CPUACTLR_DIS_LS_HW_PRE 944*87056d31SPankaj Gupta /* disable tlb prefetch */ 945*87056d31SPankaj Gupta orr x16, x16, #CPUACTLR_DIS_L2_TLB_PRE 946*87056d31SPankaj Gupta msr CORTEX_A72_CPUACTLR_EL1, x16 947*87056d31SPankaj Gupta isb 948*87056d31SPankaj Gupta 949*87056d31SPankaj Gupta /* x0 = core mask 950*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 951*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 952*87056d31SPankaj Gupta * x7 = hnf sdcr 953*87056d31SPankaj Gupta * x8 = cpuactlr 954*87056d31SPankaj Gupta */ 955*87056d31SPankaj Gupta 956*87056d31SPankaj Gupta2: 957*87056d31SPankaj Gupta /* save hnf-sdcr and cpuactlr to stack */ 958*87056d31SPankaj Gupta stp x7, x8, [sp, #-16]! 959*87056d31SPankaj Gupta 960*87056d31SPankaj Gupta /* x0 = core mask 961*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 962*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 963*87056d31SPankaj Gupta */ 964*87056d31SPankaj Gupta 965*87056d31SPankaj Gupta /* save the IPSTPCRn registers to stack */ 966*87056d31SPankaj Gupta mov x15, #PMU_IPSTPCR0_OFFSET 967*87056d31SPankaj Gupta ldr w9, [x1, x15] 968*87056d31SPankaj Gupta mov x16, #PMU_IPSTPCR1_OFFSET 969*87056d31SPankaj Gupta ldr w10, [x1, x16] 970*87056d31SPankaj Gupta mov x17, #PMU_IPSTPCR2_OFFSET 971*87056d31SPankaj Gupta ldr w11, [x1, x17] 972*87056d31SPankaj Gupta mov x18, #PMU_IPSTPCR3_OFFSET 973*87056d31SPankaj Gupta ldr w12, [x1, x18] 974*87056d31SPankaj Gupta mov x19, #PMU_IPSTPCR4_OFFSET 975*87056d31SPankaj Gupta ldr w13, [x1, x19] 976*87056d31SPankaj Gupta mov x20, #PMU_IPSTPCR5_OFFSET 977*87056d31SPankaj Gupta ldr w14, [x1, x20] 978*87056d31SPankaj Gupta 979*87056d31SPankaj Gupta stp x9, x10, [sp, #-16]! 980*87056d31SPankaj Gupta stp x11, x12, [sp, #-16]! 981*87056d31SPankaj Gupta stp x13, x14, [sp, #-16]! 982*87056d31SPankaj Gupta 983*87056d31SPankaj Gupta /* x0 = core mask 984*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 985*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 986*87056d31SPankaj Gupta * x15 = PMU_IPSTPCR0_OFFSET 987*87056d31SPankaj Gupta * x16 = PMU_IPSTPCR1_OFFSET 988*87056d31SPankaj Gupta * x17 = PMU_IPSTPCR2_OFFSET 989*87056d31SPankaj Gupta * x18 = PMU_IPSTPCR3_OFFSET 990*87056d31SPankaj Gupta * x19 = PMU_IPSTPCR4_OFFSET 991*87056d31SPankaj Gupta * x20 = PMU_IPSTPCR5_OFFSET 992*87056d31SPankaj Gupta */ 993*87056d31SPankaj Gupta 994*87056d31SPankaj Gupta /* load the full clock mask for IPSTPCR0 */ 995*87056d31SPankaj Gupta ldr x3, =DEVDISR1_MASK 996*87056d31SPankaj Gupta /* get the exclusions */ 997*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR0_OFFSET 998*87056d31SPankaj Gupta ldr w4, [x1, x21] 999*87056d31SPankaj Gupta /* apply the exclusions to the mask */ 1000*87056d31SPankaj Gupta bic w7, w3, w4 1001*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR0 */ 1002*87056d31SPankaj Gupta str w7, [x1, x15] 1003*87056d31SPankaj Gupta 1004*87056d31SPankaj Gupta /* use same procedure for IPSTPCR1-IPSTPCR5 */ 1005*87056d31SPankaj Gupta 1006*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR1 */ 1007*87056d31SPankaj Gupta ldr x5, =DEVDISR2_MASK 1008*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR1_OFFSET 1009*87056d31SPankaj Gupta ldr w6, [x1, x21] 1010*87056d31SPankaj Gupta bic w8, w5, w6 1011*87056d31SPankaj Gupta str w8, [x1, x16] 1012*87056d31SPankaj Gupta 1013*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR2 */ 1014*87056d31SPankaj Gupta ldr x3, =DEVDISR3_MASK 1015*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR2_OFFSET 1016*87056d31SPankaj Gupta ldr w4, [x1, x21] 1017*87056d31SPankaj Gupta bic w9, w3, w4 1018*87056d31SPankaj Gupta str w9, [x1, x17] 1019*87056d31SPankaj Gupta 1020*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR3 */ 1021*87056d31SPankaj Gupta ldr x5, =DEVDISR4_MASK 1022*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR3_OFFSET 1023*87056d31SPankaj Gupta ldr w6, [x1, x21] 1024*87056d31SPankaj Gupta bic w10, w5, w6 1025*87056d31SPankaj Gupta str w10, [x1, x18] 1026*87056d31SPankaj Gupta 1027*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR4 1028*87056d31SPankaj Gupta * - exclude the ddr clocks as we are currently executing 1029*87056d31SPankaj Gupta * out of *some* memory, might be ddr 1030*87056d31SPankaj Gupta * - exclude the OCRAM clk so that we retain any code/data in 1031*87056d31SPankaj Gupta * OCRAM 1032*87056d31SPankaj Gupta * - may need to exclude the debug clock if we are testing 1033*87056d31SPankaj Gupta */ 1034*87056d31SPankaj Gupta ldr x3, =DEVDISR5_MASK 1035*87056d31SPankaj Gupta mov w6, #DEVDISR5_MASK_ALL_MEM 1036*87056d31SPankaj Gupta bic w3, w3, w6 1037*87056d31SPankaj Gupta 1038*87056d31SPankaj Gupta mov w5, #POLICY_DEBUG_ENABLE 1039*87056d31SPankaj Gupta cbz w5, 3f 1040*87056d31SPankaj Gupta mov w6, #DEVDISR5_MASK_DBG 1041*87056d31SPankaj Gupta bic w3, w3, w6 1042*87056d31SPankaj Gupta3: 1043*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR4_OFFSET 1044*87056d31SPankaj Gupta ldr w4, [x1, x21] 1045*87056d31SPankaj Gupta bic w11, w3, w4 1046*87056d31SPankaj Gupta str w11, [x1, x19] 1047*87056d31SPankaj Gupta 1048*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR5 */ 1049*87056d31SPankaj Gupta ldr x5, =DEVDISR6_MASK 1050*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR5_OFFSET 1051*87056d31SPankaj Gupta ldr w6, [x1, x21] 1052*87056d31SPankaj Gupta bic w12, w5, w6 1053*87056d31SPankaj Gupta str w12, [x1, x20] 1054*87056d31SPankaj Gupta 1055*87056d31SPankaj Gupta /* x0 = core mask 1056*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 1057*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 1058*87056d31SPankaj Gupta * x7 = IPSTPCR0 1059*87056d31SPankaj Gupta * x8 = IPSTPCR1 1060*87056d31SPankaj Gupta * x9 = IPSTPCR2 1061*87056d31SPankaj Gupta * x10 = IPSTPCR3 1062*87056d31SPankaj Gupta * x11 = IPSTPCR4 1063*87056d31SPankaj Gupta * x12 = IPSTPCR5 1064*87056d31SPankaj Gupta */ 1065*87056d31SPankaj Gupta 1066*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR0 */ 1067*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1068*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR0_OFFSET 1069*87056d31SPankaj Gupta4: 1070*87056d31SPankaj Gupta ldr w5, [x1, x21] 1071*87056d31SPankaj Gupta cmp w5, w7 1072*87056d31SPankaj Gupta b.eq 5f 1073*87056d31SPankaj Gupta sub w4, w4, #1 1074*87056d31SPankaj Gupta cbnz w4, 4b 1075*87056d31SPankaj Gupta 1076*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR1 */ 1077*87056d31SPankaj Gupta5: 1078*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1079*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR1_OFFSET 1080*87056d31SPankaj Gupta6: 1081*87056d31SPankaj Gupta ldr w5, [x1, x21] 1082*87056d31SPankaj Gupta cmp w5, w8 1083*87056d31SPankaj Gupta b.eq 7f 1084*87056d31SPankaj Gupta sub w4, w4, #1 1085*87056d31SPankaj Gupta cbnz w4, 6b 1086*87056d31SPankaj Gupta 1087*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR2 */ 1088*87056d31SPankaj Gupta7: 1089*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1090*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR2_OFFSET 1091*87056d31SPankaj Gupta8: 1092*87056d31SPankaj Gupta ldr w5, [x1, x21] 1093*87056d31SPankaj Gupta cmp w5, w9 1094*87056d31SPankaj Gupta b.eq 9f 1095*87056d31SPankaj Gupta sub w4, w4, #1 1096*87056d31SPankaj Gupta cbnz w4, 8b 1097*87056d31SPankaj Gupta 1098*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR3 */ 1099*87056d31SPankaj Gupta9: 1100*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1101*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR3_OFFSET 1102*87056d31SPankaj Gupta10: 1103*87056d31SPankaj Gupta ldr w5, [x1, x21] 1104*87056d31SPankaj Gupta cmp w5, w10 1105*87056d31SPankaj Gupta b.eq 11f 1106*87056d31SPankaj Gupta sub w4, w4, #1 1107*87056d31SPankaj Gupta cbnz w4, 10b 1108*87056d31SPankaj Gupta 1109*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR4 */ 1110*87056d31SPankaj Gupta11: 1111*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1112*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR4_OFFSET 1113*87056d31SPankaj Gupta12: 1114*87056d31SPankaj Gupta ldr w5, [x1, x21] 1115*87056d31SPankaj Gupta cmp w5, w11 1116*87056d31SPankaj Gupta b.eq 13f 1117*87056d31SPankaj Gupta sub w4, w4, #1 1118*87056d31SPankaj Gupta cbnz w4, 12b 1119*87056d31SPankaj Gupta 1120*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR5 */ 1121*87056d31SPankaj Gupta13: 1122*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1123*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR5_OFFSET 1124*87056d31SPankaj Gupta14: 1125*87056d31SPankaj Gupta ldr w5, [x1, x21] 1126*87056d31SPankaj Gupta cmp w5, w12 1127*87056d31SPankaj Gupta b.eq 15f 1128*87056d31SPankaj Gupta sub w4, w4, #1 1129*87056d31SPankaj Gupta cbnz w4, 14b 1130*87056d31SPankaj Gupta 1131*87056d31SPankaj Gupta /* x0 = core mask 1132*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 1133*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 1134*87056d31SPankaj Gupta * x7 = IPSTPCR0 1135*87056d31SPankaj Gupta * x8 = IPSTPCR1 1136*87056d31SPankaj Gupta * x9 = IPSTPCR2 1137*87056d31SPankaj Gupta * x10 = IPSTPCR3 1138*87056d31SPankaj Gupta * x11 = IPSTPCR4 1139*87056d31SPankaj Gupta * x12 = IPSTPCR5 1140*87056d31SPankaj Gupta */ 1141*87056d31SPankaj Gupta 1142*87056d31SPankaj Gupta15: 1143*87056d31SPankaj Gupta mov x3, #NXP_DCFG_ADDR 1144*87056d31SPankaj Gupta 1145*87056d31SPankaj Gupta /* save the devdisr registers to stack */ 1146*87056d31SPankaj Gupta ldr w13, [x3, #DCFG_DEVDISR1_OFFSET] 1147*87056d31SPankaj Gupta ldr w14, [x3, #DCFG_DEVDISR2_OFFSET] 1148*87056d31SPankaj Gupta ldr w15, [x3, #DCFG_DEVDISR3_OFFSET] 1149*87056d31SPankaj Gupta ldr w16, [x3, #DCFG_DEVDISR4_OFFSET] 1150*87056d31SPankaj Gupta ldr w17, [x3, #DCFG_DEVDISR5_OFFSET] 1151*87056d31SPankaj Gupta ldr w18, [x3, #DCFG_DEVDISR6_OFFSET] 1152*87056d31SPankaj Gupta 1153*87056d31SPankaj Gupta stp x13, x14, [sp, #-16]! 1154*87056d31SPankaj Gupta stp x15, x16, [sp, #-16]! 1155*87056d31SPankaj Gupta stp x17, x18, [sp, #-16]! 1156*87056d31SPankaj Gupta 1157*87056d31SPankaj Gupta /* power down the IP in DEVDISR1 - corresponds to IPSTPCR0 */ 1158*87056d31SPankaj Gupta str w7, [x3, #DCFG_DEVDISR1_OFFSET] 1159*87056d31SPankaj Gupta 1160*87056d31SPankaj Gupta /* power down the IP in DEVDISR2 - corresponds to IPSTPCR1 */ 1161*87056d31SPankaj Gupta str w8, [x3, #DCFG_DEVDISR2_OFFSET] 1162*87056d31SPankaj Gupta 1163*87056d31SPankaj Gupta /* power down the IP in DEVDISR3 - corresponds to IPSTPCR2 */ 1164*87056d31SPankaj Gupta str w9, [x3, #DCFG_DEVDISR3_OFFSET] 1165*87056d31SPankaj Gupta 1166*87056d31SPankaj Gupta /* power down the IP in DEVDISR4 - corresponds to IPSTPCR3 */ 1167*87056d31SPankaj Gupta str w10, [x3, #DCFG_DEVDISR4_OFFSET] 1168*87056d31SPankaj Gupta 1169*87056d31SPankaj Gupta /* power down the IP in DEVDISR5 - corresponds to IPSTPCR4 */ 1170*87056d31SPankaj Gupta str w11, [x3, #DCFG_DEVDISR5_OFFSET] 1171*87056d31SPankaj Gupta 1172*87056d31SPankaj Gupta /* power down the IP in DEVDISR6 - corresponds to IPSTPCR5 */ 1173*87056d31SPankaj Gupta str w12, [x3, #DCFG_DEVDISR6_OFFSET] 1174*87056d31SPankaj Gupta 1175*87056d31SPankaj Gupta /* setup register values for the cache-only sequence */ 1176*87056d31SPankaj Gupta mov x4, #NXP_DDR_ADDR 1177*87056d31SPankaj Gupta mov x5, #NXP_DDR2_ADDR 1178*87056d31SPankaj Gupta mov x6, x11 1179*87056d31SPankaj Gupta mov x7, x17 1180*87056d31SPankaj Gupta ldr x12, =PMU_CLAINACTSETR_OFFSET 1181*87056d31SPankaj Gupta ldr x13, =PMU_CLSINACTSETR_OFFSET 1182*87056d31SPankaj Gupta ldr x14, =PMU_CLAINACTCLRR_OFFSET 1183*87056d31SPankaj Gupta ldr x15, =PMU_CLSINACTCLRR_OFFSET 1184*87056d31SPankaj Gupta 1185*87056d31SPankaj Gupta /* x0 = core mask 1186*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 1187*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 1188*87056d31SPankaj Gupta * x3 = NXP_DCFG_ADDR 1189*87056d31SPankaj Gupta * x4 = NXP_DDR_ADDR 1190*87056d31SPankaj Gupta * x5 = NXP_DDR2_ADDR 1191*87056d31SPankaj Gupta * w6 = IPSTPCR4 1192*87056d31SPankaj Gupta * w7 = DEVDISR5 1193*87056d31SPankaj Gupta * x12 = PMU_CLAINACTSETR_OFFSET 1194*87056d31SPankaj Gupta * x13 = PMU_CLSINACTSETR_OFFSET 1195*87056d31SPankaj Gupta * x14 = PMU_CLAINACTCLRR_OFFSET 1196*87056d31SPankaj Gupta * x15 = PMU_CLSINACTCLRR_OFFSET 1197*87056d31SPankaj Gupta */ 1198*87056d31SPankaj Gupta 1199*87056d31SPankaj Gupta mov x8, #POLICY_DEBUG_ENABLE 1200*87056d31SPankaj Gupta cbnz x8, 29f 1201*87056d31SPankaj Gupta /* force the debug interface to be quiescent */ 1202*87056d31SPankaj Gupta mrs x9, OSDLR_EL1 1203*87056d31SPankaj Gupta orr x9, x9, #0x1 1204*87056d31SPankaj Gupta msr OSDLR_EL1, x9 1205*87056d31SPankaj Gupta 1206*87056d31SPankaj Gupta /* enter the cache-only sequence */ 1207*87056d31SPankaj Gupta29: 1208*87056d31SPankaj Gupta bl final_pwrdown 1209*87056d31SPankaj Gupta 1210*87056d31SPankaj Gupta /* when we are here, the core has come out of wfi and the 1211*87056d31SPankaj Gupta * ddr is back up 1212*87056d31SPankaj Gupta */ 1213*87056d31SPankaj Gupta 1214*87056d31SPankaj Gupta mov x8, #POLICY_DEBUG_ENABLE 1215*87056d31SPankaj Gupta cbnz x8, 30f 1216*87056d31SPankaj Gupta /* restart the debug interface */ 1217*87056d31SPankaj Gupta mrs x9, OSDLR_EL1 1218*87056d31SPankaj Gupta mov x10, #1 1219*87056d31SPankaj Gupta bic x9, x9, x10 1220*87056d31SPankaj Gupta msr OSDLR_EL1, x9 1221*87056d31SPankaj Gupta 1222*87056d31SPankaj Gupta /* get saved DEVDISR regs off stack */ 1223*87056d31SPankaj Gupta30: 1224*87056d31SPankaj Gupta ldp x17, x18, [sp], #16 1225*87056d31SPankaj Gupta ldp x15, x16, [sp], #16 1226*87056d31SPankaj Gupta ldp x13, x14, [sp], #16 1227*87056d31SPankaj Gupta /* restore DEVDISR regs */ 1228*87056d31SPankaj Gupta str w18, [x3, #DCFG_DEVDISR6_OFFSET] 1229*87056d31SPankaj Gupta str w17, [x3, #DCFG_DEVDISR5_OFFSET] 1230*87056d31SPankaj Gupta str w16, [x3, #DCFG_DEVDISR4_OFFSET] 1231*87056d31SPankaj Gupta str w15, [x3, #DCFG_DEVDISR3_OFFSET] 1232*87056d31SPankaj Gupta str w14, [x3, #DCFG_DEVDISR2_OFFSET] 1233*87056d31SPankaj Gupta str w13, [x3, #DCFG_DEVDISR1_OFFSET] 1234*87056d31SPankaj Gupta isb 1235*87056d31SPankaj Gupta 1236*87056d31SPankaj Gupta /* get saved IPSTPCRn regs off stack */ 1237*87056d31SPankaj Gupta ldp x13, x14, [sp], #16 1238*87056d31SPankaj Gupta ldp x11, x12, [sp], #16 1239*87056d31SPankaj Gupta ldp x9, x10, [sp], #16 1240*87056d31SPankaj Gupta 1241*87056d31SPankaj Gupta /* restore IPSTPCRn regs */ 1242*87056d31SPankaj Gupta mov x15, #PMU_IPSTPCR5_OFFSET 1243*87056d31SPankaj Gupta str w14, [x1, x15] 1244*87056d31SPankaj Gupta mov x16, #PMU_IPSTPCR4_OFFSET 1245*87056d31SPankaj Gupta str w13, [x1, x16] 1246*87056d31SPankaj Gupta mov x17, #PMU_IPSTPCR3_OFFSET 1247*87056d31SPankaj Gupta str w12, [x1, x17] 1248*87056d31SPankaj Gupta mov x18, #PMU_IPSTPCR2_OFFSET 1249*87056d31SPankaj Gupta str w11, [x1, x18] 1250*87056d31SPankaj Gupta mov x19, #PMU_IPSTPCR1_OFFSET 1251*87056d31SPankaj Gupta str w10, [x1, x19] 1252*87056d31SPankaj Gupta mov x20, #PMU_IPSTPCR0_OFFSET 1253*87056d31SPankaj Gupta str w9, [x1, x20] 1254*87056d31SPankaj Gupta isb 1255*87056d31SPankaj Gupta 1256*87056d31SPankaj Gupta /* poll on IPSTPACKCRn regs til IP clocks are restarted */ 1257*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1258*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR5_OFFSET 1259*87056d31SPankaj Gupta16: 1260*87056d31SPankaj Gupta ldr w5, [x1, x15] 1261*87056d31SPankaj Gupta and w5, w5, w14 1262*87056d31SPankaj Gupta cbz w5, 17f 1263*87056d31SPankaj Gupta sub w4, w4, #1 1264*87056d31SPankaj Gupta cbnz w4, 16b 1265*87056d31SPankaj Gupta 1266*87056d31SPankaj Gupta17: 1267*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1268*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR4_OFFSET 1269*87056d31SPankaj Gupta18: 1270*87056d31SPankaj Gupta ldr w5, [x1, x15] 1271*87056d31SPankaj Gupta and w5, w5, w13 1272*87056d31SPankaj Gupta cbz w5, 19f 1273*87056d31SPankaj Gupta sub w4, w4, #1 1274*87056d31SPankaj Gupta cbnz w4, 18b 1275*87056d31SPankaj Gupta 1276*87056d31SPankaj Gupta19: 1277*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1278*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR3_OFFSET 1279*87056d31SPankaj Gupta20: 1280*87056d31SPankaj Gupta ldr w5, [x1, x15] 1281*87056d31SPankaj Gupta and w5, w5, w12 1282*87056d31SPankaj Gupta cbz w5, 21f 1283*87056d31SPankaj Gupta sub w4, w4, #1 1284*87056d31SPankaj Gupta cbnz w4, 20b 1285*87056d31SPankaj Gupta 1286*87056d31SPankaj Gupta21: 1287*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1288*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR2_OFFSET 1289*87056d31SPankaj Gupta22: 1290*87056d31SPankaj Gupta ldr w5, [x1, x15] 1291*87056d31SPankaj Gupta and w5, w5, w11 1292*87056d31SPankaj Gupta cbz w5, 23f 1293*87056d31SPankaj Gupta sub w4, w4, #1 1294*87056d31SPankaj Gupta cbnz w4, 22b 1295*87056d31SPankaj Gupta 1296*87056d31SPankaj Gupta23: 1297*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1298*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR1_OFFSET 1299*87056d31SPankaj Gupta24: 1300*87056d31SPankaj Gupta ldr w5, [x1, x15] 1301*87056d31SPankaj Gupta and w5, w5, w10 1302*87056d31SPankaj Gupta cbz w5, 25f 1303*87056d31SPankaj Gupta sub w4, w4, #1 1304*87056d31SPankaj Gupta cbnz w4, 24b 1305*87056d31SPankaj Gupta 1306*87056d31SPankaj Gupta25: 1307*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1308*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR0_OFFSET 1309*87056d31SPankaj Gupta26: 1310*87056d31SPankaj Gupta ldr w5, [x1, x15] 1311*87056d31SPankaj Gupta and w5, w5, w9 1312*87056d31SPankaj Gupta cbz w5, 27f 1313*87056d31SPankaj Gupta sub w4, w4, #1 1314*87056d31SPankaj Gupta cbnz w4, 26b 1315*87056d31SPankaj Gupta 1316*87056d31SPankaj Gupta27: 1317*87056d31SPankaj Gupta /* disable the stop-request-override */ 1318*87056d31SPankaj Gupta mov x8, #PMU_POWMGTDCR0_OFFSET 1319*87056d31SPankaj Gupta mov w9, #POWMGTDCR_STP_OV_EN 1320*87056d31SPankaj Gupta str w9, [x2, x8] 1321*87056d31SPankaj Gupta isb 1322*87056d31SPankaj Gupta 1323*87056d31SPankaj Gupta /* get hnf-sdcr and cpuactlr off stack */ 1324*87056d31SPankaj Gupta ldp x7, x8, [sp], #16 1325*87056d31SPankaj Gupta 1326*87056d31SPankaj Gupta /* restore cpuactlr */ 1327*87056d31SPankaj Gupta msr CORTEX_A72_CPUACTLR_EL1, x8 1328*87056d31SPankaj Gupta isb 1329*87056d31SPankaj Gupta 1330*87056d31SPankaj Gupta /* restore snooping in the hnf nodes */ 1331*87056d31SPankaj Gupta ldr x9, =NXP_CCN_HN_F_0_ADDR 1332*87056d31SPankaj Gupta mov x6, #CCN_HNF_NODE_COUNT 1333*87056d31SPankaj Gupta28: 1334*87056d31SPankaj Gupta str x7, [x9, #CCN_HN_F_SNP_DMN_CTL_SET_OFFSET] 1335*87056d31SPankaj Gupta sub x6, x6, #1 1336*87056d31SPankaj Gupta add x9, x9, #CCN_HNF_OFFSET 1337*87056d31SPankaj Gupta cbnz x6, 28b 1338*87056d31SPankaj Gupta isb 1339*87056d31SPankaj Gupta 1340*87056d31SPankaj Gupta mov x30, x28 1341*87056d31SPankaj Gupta ret 1342*87056d31SPankaj Guptaendfunc _soc_sys_pwrdn_wfi 1343*87056d31SPankaj Gupta 1344*87056d31SPankaj Gupta 1345*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 1346*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after power-down 1347*87056d31SPankaj Gupta * in: x0 = core mask lsb 1348*87056d31SPankaj Gupta * out: none 1349*87056d31SPankaj Gupta * uses x0, 1350*87056d31SPankaj Gupta */ 1351*87056d31SPankaj Guptafunc _soc_sys_exit_pwrdn 1352*87056d31SPankaj Gupta 1353*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 1354*87056d31SPankaj Gupta /* make sure the smp bit is set */ 1355*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_MASK 1356*87056d31SPankaj Gupta /* clr the retention control */ 1357*87056d31SPankaj Gupta mov x2, #CPUECTLR_RET_8CLK 1358*87056d31SPankaj Gupta bic x1, x1, x2 1359*87056d31SPankaj Gupta /* enable tablewalk prefetch */ 1360*87056d31SPankaj Gupta mov x2, #CPUECTLR_DISABLE_TWALK_PREFETCH 1361*87056d31SPankaj Gupta bic x1, x1, x2 1362*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 1363*87056d31SPankaj Gupta isb 1364*87056d31SPankaj Gupta 1365*87056d31SPankaj Gupta ret 1366*87056d31SPankaj Guptaendfunc _soc_sys_exit_pwrdn 1367*87056d31SPankaj Gupta 1368*87056d31SPankaj Gupta 1369*87056d31SPankaj Gupta/* Function will pwrdown ddr and the final core - it will do this 1370*87056d31SPankaj Gupta * by loading itself into the icache and then executing from there 1371*87056d31SPankaj Gupta * in: 1372*87056d31SPankaj Gupta * x0 = core mask 1373*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 1374*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 1375*87056d31SPankaj Gupta * x3 = NXP_DCFG_ADDR 1376*87056d31SPankaj Gupta * x4 = NXP_DDR_ADDR 1377*87056d31SPankaj Gupta * x5 = NXP_DDR2_ADDR 1378*87056d31SPankaj Gupta * w6 = IPSTPCR4 1379*87056d31SPankaj Gupta * w7 = DEVDISR5 1380*87056d31SPankaj Gupta * x12 = PMU_CLAINACTSETR_OFFSET 1381*87056d31SPankaj Gupta * x13 = PMU_CLSINACTSETR_OFFSET 1382*87056d31SPankaj Gupta * x14 = PMU_CLAINACTCLRR_OFFSET 1383*87056d31SPankaj Gupta * x15 = PMU_CLSINACTCLRR_OFFSET 1384*87056d31SPankaj Gupta * out: none 1385*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x13, x14, x15, x16, 1386*87056d31SPankaj Gupta * x17, x18 1387*87056d31SPankaj Gupta */ 1388*87056d31SPankaj Gupta 1389*87056d31SPankaj Gupta/* 4Kb aligned */ 1390*87056d31SPankaj Gupta.align 12 1391*87056d31SPankaj Guptafunc final_pwrdown 1392*87056d31SPankaj Gupta 1393*87056d31SPankaj Gupta mov x0, xzr 1394*87056d31SPankaj Gupta b touch_line_0 1395*87056d31SPankaj Guptastart_line_0: 1396*87056d31SPankaj Gupta mov x0, #1 1397*87056d31SPankaj Gupta /* put ddr controller 1 into self-refresh */ 1398*87056d31SPankaj Gupta ldr w8, [x4, #DDR_CFG_2_OFFSET] 1399*87056d31SPankaj Gupta orr w8, w8, #CFG_2_FORCE_REFRESH 1400*87056d31SPankaj Gupta str w8, [x4, #DDR_CFG_2_OFFSET] 1401*87056d31SPankaj Gupta 1402*87056d31SPankaj Gupta /* put ddr controller 2 into self-refresh */ 1403*87056d31SPankaj Gupta ldr w8, [x5, #DDR_CFG_2_OFFSET] 1404*87056d31SPankaj Gupta orr w8, w8, #CFG_2_FORCE_REFRESH 1405*87056d31SPankaj Gupta str w8, [x5, #DDR_CFG_2_OFFSET] 1406*87056d31SPankaj Gupta 1407*87056d31SPankaj Gupta /* stop the clocks in both ddr controllers */ 1408*87056d31SPankaj Gupta mov w10, #DEVDISR5_MASK_DDR 1409*87056d31SPankaj Gupta mov x16, #PMU_IPSTPCR4_OFFSET 1410*87056d31SPankaj Gupta orr w9, w6, w10 1411*87056d31SPankaj Gupta str w9, [x1, x16] 1412*87056d31SPankaj Gupta isb 1413*87056d31SPankaj Gupta 1414*87056d31SPankaj Gupta mov x17, #PMU_IPSTPACKSR4_OFFSET 1415*87056d31SPankaj Guptatouch_line_0: 1416*87056d31SPankaj Gupta cbz x0, touch_line_1 1417*87056d31SPankaj Gupta 1418*87056d31SPankaj Guptastart_line_1: 1419*87056d31SPankaj Gupta /* poll IPSTPACKSR4 until 1420*87056d31SPankaj Gupta * ddr controller clocks are stopped. 1421*87056d31SPankaj Gupta */ 1422*87056d31SPankaj Gupta1: 1423*87056d31SPankaj Gupta ldr w8, [x1, x17] 1424*87056d31SPankaj Gupta and w8, w8, w10 1425*87056d31SPankaj Gupta cmp w8, w10 1426*87056d31SPankaj Gupta b.ne 1b 1427*87056d31SPankaj Gupta 1428*87056d31SPankaj Gupta /* shut down power to the ddr controllers */ 1429*87056d31SPankaj Gupta orr w9, w7, #DEVDISR5_MASK_DDR 1430*87056d31SPankaj Gupta str w9, [x3, #DCFG_DEVDISR5_OFFSET] 1431*87056d31SPankaj Gupta 1432*87056d31SPankaj Gupta /* disable cluster acp ports */ 1433*87056d31SPankaj Gupta mov w8, #CLAINACT_DISABLE_ACP 1434*87056d31SPankaj Gupta str w8, [x1, x12] 1435*87056d31SPankaj Gupta 1436*87056d31SPankaj Gupta /* disable skyros ports */ 1437*87056d31SPankaj Gupta mov w9, #CLSINACT_DISABLE_SKY 1438*87056d31SPankaj Gupta str w9, [x1, x13] 1439*87056d31SPankaj Gupta isb 1440*87056d31SPankaj Gupta 1441*87056d31SPankaj Guptatouch_line_1: 1442*87056d31SPankaj Gupta cbz x0, touch_line_2 1443*87056d31SPankaj Gupta 1444*87056d31SPankaj Guptastart_line_2: 1445*87056d31SPankaj Gupta isb 1446*87056d31SPankaj Gupta3: 1447*87056d31SPankaj Gupta wfi 1448*87056d31SPankaj Gupta 1449*87056d31SPankaj Gupta /* if we are here then we are awake 1450*87056d31SPankaj Gupta * - bring this device back up 1451*87056d31SPankaj Gupta */ 1452*87056d31SPankaj Gupta 1453*87056d31SPankaj Gupta /* enable skyros ports */ 1454*87056d31SPankaj Gupta mov w9, #CLSINACT_DISABLE_SKY 1455*87056d31SPankaj Gupta str w9, [x1, x15] 1456*87056d31SPankaj Gupta 1457*87056d31SPankaj Gupta /* enable acp ports */ 1458*87056d31SPankaj Gupta mov w8, #CLAINACT_DISABLE_ACP 1459*87056d31SPankaj Gupta str w8, [x1, x14] 1460*87056d31SPankaj Gupta isb 1461*87056d31SPankaj Gupta 1462*87056d31SPankaj Gupta /* bring up the ddr controllers */ 1463*87056d31SPankaj Gupta str w7, [x3, #DCFG_DEVDISR5_OFFSET] 1464*87056d31SPankaj Gupta isb 1465*87056d31SPankaj Gupta str w6, [x1, x16] 1466*87056d31SPankaj Gupta isb 1467*87056d31SPankaj Gupta 1468*87056d31SPankaj Gupta nop 1469*87056d31SPankaj Guptatouch_line_2: 1470*87056d31SPankaj Gupta cbz x0, touch_line_3 1471*87056d31SPankaj Gupta 1472*87056d31SPankaj Guptastart_line_3: 1473*87056d31SPankaj Gupta /* poll IPSTPACKSR4 until 1474*87056d31SPankaj Gupta * ddr controller clocks are running 1475*87056d31SPankaj Gupta */ 1476*87056d31SPankaj Gupta mov w10, #DEVDISR5_MASK_DDR 1477*87056d31SPankaj Gupta2: 1478*87056d31SPankaj Gupta ldr w8, [x1, x17] 1479*87056d31SPankaj Gupta and w8, w8, w10 1480*87056d31SPankaj Gupta cbnz w8, 2b 1481*87056d31SPankaj Gupta 1482*87056d31SPankaj Gupta /* take ddr controller 2 out of self-refresh */ 1483*87056d31SPankaj Gupta mov w8, #CFG_2_FORCE_REFRESH 1484*87056d31SPankaj Gupta ldr w9, [x5, #DDR_CFG_2_OFFSET] 1485*87056d31SPankaj Gupta bic w9, w9, w8 1486*87056d31SPankaj Gupta str w9, [x5, #DDR_CFG_2_OFFSET] 1487*87056d31SPankaj Gupta 1488*87056d31SPankaj Gupta /* take ddr controller 1 out of self-refresh */ 1489*87056d31SPankaj Gupta ldr w9, [x4, #DDR_CFG_2_OFFSET] 1490*87056d31SPankaj Gupta bic w9, w9, w8 1491*87056d31SPankaj Gupta str w9, [x4, #DDR_CFG_2_OFFSET] 1492*87056d31SPankaj Gupta isb 1493*87056d31SPankaj Gupta 1494*87056d31SPankaj Gupta nop 1495*87056d31SPankaj Gupta nop 1496*87056d31SPankaj Gupta nop 1497*87056d31SPankaj Guptatouch_line_3: 1498*87056d31SPankaj Gupta cbz x0, start_line_0 1499*87056d31SPankaj Gupta 1500*87056d31SPankaj Gupta /* execute here after ddr is back up */ 1501*87056d31SPankaj Gupta 1502*87056d31SPankaj Gupta ret 1503*87056d31SPankaj Guptaendfunc final_pwrdown 1504*87056d31SPankaj Gupta 1505*87056d31SPankaj Gupta/* Function returns CLUSTER_3_NORMAL if the cores of cluster 3 are 1506*87056d31SPankaj Gupta * to be handled normally, and it returns CLUSTER_3_IN_RESET if the cores 1507*87056d31SPankaj Gupta * are to be held in reset 1508*87056d31SPankaj Gupta * in: none 1509*87056d31SPankaj Gupta * out: x0 = #CLUSTER_3_NORMAL, cluster 3 treated normal 1510*87056d31SPankaj Gupta * x0 = #CLUSTER_3_IN_RESET, cluster 3 cores held in reset 1511*87056d31SPankaj Gupta * uses x0, x1, x2 1512*87056d31SPankaj Gupta */ 1513*87056d31SPankaj Guptafunc cluster3InReset 1514*87056d31SPankaj Gupta 1515*87056d31SPankaj Gupta /* default return is treat cores normal */ 1516*87056d31SPankaj Gupta mov x0, #CLUSTER_3_NORMAL 1517*87056d31SPankaj Gupta 1518*87056d31SPankaj Gupta /* read RCW_SR27 register */ 1519*87056d31SPankaj Gupta mov x1, #NXP_DCFG_ADDR 1520*87056d31SPankaj Gupta ldr w2, [x1, #RCW_SR27_OFFSET] 1521*87056d31SPankaj Gupta 1522*87056d31SPankaj Gupta /* test the cluster 3 bit */ 1523*87056d31SPankaj Gupta tst w2, #CLUSTER_3_RCW_BIT 1524*87056d31SPankaj Gupta b.eq 1f 1525*87056d31SPankaj Gupta 1526*87056d31SPankaj Gupta /* if we are here, then the bit was set */ 1527*87056d31SPankaj Gupta mov x0, #CLUSTER_3_IN_RESET 1528*87056d31SPankaj Gupta1: 1529*87056d31SPankaj Gupta ret 1530*87056d31SPankaj Guptaendfunc cluster3InReset 1531*87056d31SPankaj Gupta 1532*87056d31SPankaj Gupta 1533*87056d31SPankaj Gupta/* Function checks to see if cores which are to be disabled have been 1534*87056d31SPankaj Gupta * released from reset - if not, it releases them 1535*87056d31SPankaj Gupta * Note: there may be special handling of cluster 3 cores depending upon the 1536*87056d31SPankaj Gupta * sys clk frequency 1537*87056d31SPankaj Gupta * in: none 1538*87056d31SPankaj Gupta * out: none 1539*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 1540*87056d31SPankaj Gupta */ 1541*87056d31SPankaj Guptafunc release_disabled 1542*87056d31SPankaj Gupta mov x9, x30 1543*87056d31SPankaj Gupta 1544*87056d31SPankaj Gupta /* check if we need to keep cluster 3 cores in reset */ 1545*87056d31SPankaj Gupta bl cluster3InReset /* 0-2 */ 1546*87056d31SPankaj Gupta mov x8, x0 1547*87056d31SPankaj Gupta 1548*87056d31SPankaj Gupta /* x8 = cluster 3 handling */ 1549*87056d31SPankaj Gupta 1550*87056d31SPankaj Gupta /* read COREDISABLESR */ 1551*87056d31SPankaj Gupta mov x0, #NXP_DCFG_ADDR 1552*87056d31SPankaj Gupta ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET] 1553*87056d31SPankaj Gupta cmp x8, #CLUSTER_3_IN_RESET 1554*87056d31SPankaj Gupta b.ne 4f 1555*87056d31SPankaj Gupta 1556*87056d31SPankaj Gupta /* the cluster 3 cores are to be held in reset, so remove 1557*87056d31SPankaj Gupta * them from the disable mask 1558*87056d31SPankaj Gupta */ 1559*87056d31SPankaj Gupta bic x4, x4, #CLUSTER_3_CORES_MASK 1560*87056d31SPankaj Gupta4: 1561*87056d31SPankaj Gupta /* get the number of cpus on this device */ 1562*87056d31SPankaj Gupta mov x6, #PLATFORM_CORE_COUNT 1563*87056d31SPankaj Gupta 1564*87056d31SPankaj Gupta mov x0, #NXP_RESET_ADDR 1565*87056d31SPankaj Gupta ldr w5, [x0, #BRR_OFFSET] 1566*87056d31SPankaj Gupta 1567*87056d31SPankaj Gupta /* load the core mask for the first core */ 1568*87056d31SPankaj Gupta mov x7, #1 1569*87056d31SPankaj Gupta 1570*87056d31SPankaj Gupta /* x4 = COREDISABLESR 1571*87056d31SPankaj Gupta * x5 = BRR 1572*87056d31SPankaj Gupta * x6 = loop count 1573*87056d31SPankaj Gupta * x7 = core mask bit 1574*87056d31SPankaj Gupta */ 1575*87056d31SPankaj Gupta2: 1576*87056d31SPankaj Gupta /* check if the core is to be disabled */ 1577*87056d31SPankaj Gupta tst x4, x7 1578*87056d31SPankaj Gupta b.eq 1f 1579*87056d31SPankaj Gupta 1580*87056d31SPankaj Gupta /* see if disabled cores have already been released from reset */ 1581*87056d31SPankaj Gupta tst x5, x7 1582*87056d31SPankaj Gupta b.ne 5f 1583*87056d31SPankaj Gupta 1584*87056d31SPankaj Gupta /* if core has not been released, then release it (0-3) */ 1585*87056d31SPankaj Gupta mov x0, x7 1586*87056d31SPankaj Gupta bl _soc_core_release 1587*87056d31SPankaj Gupta 1588*87056d31SPankaj Gupta /* record the core state in the data area (0-3) */ 1589*87056d31SPankaj Gupta mov x0, x7 1590*87056d31SPankaj Gupta mov x1, #CORE_STATE_DATA 1591*87056d31SPankaj Gupta mov x2, #CORE_DISABLED 1592*87056d31SPankaj Gupta bl _setCoreData 1593*87056d31SPankaj Gupta 1594*87056d31SPankaj Gupta1: 1595*87056d31SPankaj Gupta /* see if this is a cluster 3 core */ 1596*87056d31SPankaj Gupta mov x3, #CLUSTER_3_CORES_MASK 1597*87056d31SPankaj Gupta tst x3, x7 1598*87056d31SPankaj Gupta b.eq 5f 1599*87056d31SPankaj Gupta 1600*87056d31SPankaj Gupta /* this is a cluster 3 core - see if it needs to be held in reset */ 1601*87056d31SPankaj Gupta cmp x8, #CLUSTER_3_IN_RESET 1602*87056d31SPankaj Gupta b.ne 5f 1603*87056d31SPankaj Gupta 1604*87056d31SPankaj Gupta /* record the core state as disabled in the data area (0-3) */ 1605*87056d31SPankaj Gupta mov x0, x7 1606*87056d31SPankaj Gupta mov x1, #CORE_STATE_DATA 1607*87056d31SPankaj Gupta mov x2, #CORE_DISABLED 1608*87056d31SPankaj Gupta bl _setCoreData 1609*87056d31SPankaj Gupta 1610*87056d31SPankaj Gupta5: 1611*87056d31SPankaj Gupta /* decrement the counter */ 1612*87056d31SPankaj Gupta subs x6, x6, #1 1613*87056d31SPankaj Gupta b.le 3f 1614*87056d31SPankaj Gupta 1615*87056d31SPankaj Gupta /* shift the core mask to the next core */ 1616*87056d31SPankaj Gupta lsl x7, x7, #1 1617*87056d31SPankaj Gupta /* continue */ 1618*87056d31SPankaj Gupta b 2b 1619*87056d31SPankaj Gupta3: 1620*87056d31SPankaj Gupta cmp x8, #CLUSTER_3_IN_RESET 1621*87056d31SPankaj Gupta b.ne 6f 1622*87056d31SPankaj Gupta 1623*87056d31SPankaj Gupta /* we need to hold the cluster 3 cores in reset, 1624*87056d31SPankaj Gupta * so mark them in the COREDISR and COREDISABLEDSR registers as 1625*87056d31SPankaj Gupta * "disabled", and the rest of the sw stack will leave them alone 1626*87056d31SPankaj Gupta * thinking that they have been disabled 1627*87056d31SPankaj Gupta */ 1628*87056d31SPankaj Gupta mov x0, #NXP_DCFG_ADDR 1629*87056d31SPankaj Gupta ldr w1, [x0, #DCFG_COREDISR_OFFSET] 1630*87056d31SPankaj Gupta orr w1, w1, #CLUSTER_3_CORES_MASK 1631*87056d31SPankaj Gupta str w1, [x0, #DCFG_COREDISR_OFFSET] 1632*87056d31SPankaj Gupta 1633*87056d31SPankaj Gupta ldr w2, [x0, #DCFG_COREDISABLEDSR_OFFSET] 1634*87056d31SPankaj Gupta orr w2, w2, #CLUSTER_3_CORES_MASK 1635*87056d31SPankaj Gupta str w2, [x0, #DCFG_COREDISABLEDSR_OFFSET] 1636*87056d31SPankaj Gupta dsb sy 1637*87056d31SPankaj Gupta isb 1638*87056d31SPankaj Gupta 1639*87056d31SPankaj Gupta#if (PSCI_TEST) 1640*87056d31SPankaj Gupta /* x0 = NXP_DCFG_ADDR : read COREDISABLESR */ 1641*87056d31SPankaj Gupta ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET] 1642*87056d31SPankaj Gupta /* read COREDISR */ 1643*87056d31SPankaj Gupta ldr w3, [x0, #DCFG_COREDISR_OFFSET] 1644*87056d31SPankaj Gupta#endif 1645*87056d31SPankaj Gupta 1646*87056d31SPankaj Gupta6: 1647*87056d31SPankaj Gupta mov x30, x9 1648*87056d31SPankaj Gupta ret 1649*87056d31SPankaj Gupta 1650*87056d31SPankaj Guptaendfunc release_disabled 1651*87056d31SPankaj Gupta 1652*87056d31SPankaj Gupta 1653*87056d31SPankaj Gupta/* Function setc up the TrustZone Address Space Controller (TZASC) 1654*87056d31SPankaj Gupta * in: none 1655*87056d31SPankaj Gupta * out: none 1656*87056d31SPankaj Gupta * uses x0, x1 1657*87056d31SPankaj Gupta */ 1658*87056d31SPankaj Guptafunc init_tzpc 1659*87056d31SPankaj Gupta 1660*87056d31SPankaj Gupta /* set Non Secure access for all devices protected via TZPC */ 1661*87056d31SPankaj Gupta 1662*87056d31SPankaj Gupta /* decode Protection-0 Set Reg */ 1663*87056d31SPankaj Gupta ldr x1, =TZPCDECPROT_0_SET_BASE 1664*87056d31SPankaj Gupta /* set decode region to NS, Bits[7:0] */ 1665*87056d31SPankaj Gupta mov w0, #0xFF 1666*87056d31SPankaj Gupta str w0, [x1] 1667*87056d31SPankaj Gupta 1668*87056d31SPankaj Gupta /* decode Protection-1 Set Reg */ 1669*87056d31SPankaj Gupta ldr x1, =TZPCDECPROT_1_SET_BASE 1670*87056d31SPankaj Gupta /* set decode region to NS, Bits[7:0] */ 1671*87056d31SPankaj Gupta mov w0, #0xFF 1672*87056d31SPankaj Gupta str w0, [x1] 1673*87056d31SPankaj Gupta 1674*87056d31SPankaj Gupta /* decode Protection-2 Set Reg */ 1675*87056d31SPankaj Gupta ldr x1, =TZPCDECPROT_2_SET_BASE 1676*87056d31SPankaj Gupta /* set decode region to NS, Bits[7:0] */ 1677*87056d31SPankaj Gupta mov w0, #0xFF 1678*87056d31SPankaj Gupta str w0, [x1] 1679*87056d31SPankaj Gupta 1680*87056d31SPankaj Gupta /* entire SRAM as NS */ 1681*87056d31SPankaj Gupta /* secure RAM region size Reg */ 1682*87056d31SPankaj Gupta ldr x1, =TZPC_BASE 1683*87056d31SPankaj Gupta /* 0x00000000 = no secure region */ 1684*87056d31SPankaj Gupta mov w0, #0x00000000 1685*87056d31SPankaj Gupta str w0, [x1] 1686*87056d31SPankaj Gupta 1687*87056d31SPankaj Gupta ret 1688*87056d31SPankaj Guptaendfunc init_tzpc 1689*87056d31SPankaj Gupta 1690*87056d31SPankaj Gupta/* write a register in the DCFG block 1691*87056d31SPankaj Gupta * in: x0 = offset 1692*87056d31SPankaj Gupta * in: w1 = value to write 1693*87056d31SPankaj Gupta * uses x0, x1, x2 1694*87056d31SPankaj Gupta */ 1695*87056d31SPankaj Guptafunc _write_reg_dcfg 1696*87056d31SPankaj Gupta ldr x2, =NXP_DCFG_ADDR 1697*87056d31SPankaj Gupta str w1, [x2, x0] 1698*87056d31SPankaj Gupta ret 1699*87056d31SPankaj Guptaendfunc _write_reg_dcfg 1700*87056d31SPankaj Gupta 1701*87056d31SPankaj Gupta 1702*87056d31SPankaj Gupta/* read a register in the DCFG block 1703*87056d31SPankaj Gupta * in: x0 = offset 1704*87056d31SPankaj Gupta * out: w0 = value read 1705*87056d31SPankaj Gupta * uses x0, x1, x2 1706*87056d31SPankaj Gupta */ 1707*87056d31SPankaj Guptafunc _read_reg_dcfg 1708*87056d31SPankaj Gupta ldr x2, =NXP_DCFG_ADDR 1709*87056d31SPankaj Gupta ldr w1, [x2, x0] 1710*87056d31SPankaj Gupta mov w0, w1 1711*87056d31SPankaj Gupta ret 1712*87056d31SPankaj Guptaendfunc _read_reg_dcfg 1713*87056d31SPankaj Gupta 1714*87056d31SPankaj Gupta 1715*87056d31SPankaj Gupta/* Function returns an mpidr value for a core, given a core_mask_lsb 1716*87056d31SPankaj Gupta * in: x0 = core mask lsb 1717*87056d31SPankaj Gupta * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits 1718*87056d31SPankaj Gupta * uses x0, x1 1719*87056d31SPankaj Gupta */ 1720*87056d31SPankaj Guptafunc get_mpidr_value 1721*87056d31SPankaj Gupta 1722*87056d31SPankaj Gupta /* convert a core mask to an SoC core number */ 1723*87056d31SPankaj Gupta clz w0, w0 1724*87056d31SPankaj Gupta mov w1, #31 1725*87056d31SPankaj Gupta sub w0, w1, w0 1726*87056d31SPankaj Gupta 1727*87056d31SPankaj Gupta /* get the mpidr core number from the SoC core number */ 1728*87056d31SPankaj Gupta mov w1, wzr 1729*87056d31SPankaj Gupta tst x0, #1 1730*87056d31SPankaj Gupta b.eq 1f 1731*87056d31SPankaj Gupta orr w1, w1, #1 1732*87056d31SPankaj Gupta 1733*87056d31SPankaj Gupta1: 1734*87056d31SPankaj Gupta /* extract the cluster number */ 1735*87056d31SPankaj Gupta lsr w0, w0, #1 1736*87056d31SPankaj Gupta orr w0, w1, w0, lsl #8 1737*87056d31SPankaj Gupta 1738*87056d31SPankaj Gupta ret 1739*87056d31SPankaj Guptaendfunc get_mpidr_value 1740*87056d31SPankaj Gupta 1741*87056d31SPankaj Gupta 1742*87056d31SPankaj Gupta/* Function returns the redistributor base address for the core specified 1743*87056d31SPankaj Gupta * in x1 1744*87056d31SPankaj Gupta * in: x0 - core mask lsb of specified core 1745*87056d31SPankaj Gupta * out: x0 = redistributor rd base address for specified core 1746*87056d31SPankaj Gupta * uses x0, x1, x2 1747*87056d31SPankaj Gupta */ 1748*87056d31SPankaj Guptafunc get_gic_rd_base 1749*87056d31SPankaj Gupta clz w1, w0 1750*87056d31SPankaj Gupta mov w2, #0x20 1751*87056d31SPankaj Gupta sub w2, w2, w1 1752*87056d31SPankaj Gupta sub w2, w2, #1 1753*87056d31SPankaj Gupta 1754*87056d31SPankaj Gupta ldr x0, =NXP_GICR_ADDR 1755*87056d31SPankaj Gupta mov x1, #GIC_RD_OFFSET 1756*87056d31SPankaj Gupta 1757*87056d31SPankaj Gupta /* x2 = core number 1758*87056d31SPankaj Gupta * loop counter 1759*87056d31SPankaj Gupta */ 1760*87056d31SPankaj Gupta2: 1761*87056d31SPankaj Gupta cbz x2, 1f 1762*87056d31SPankaj Gupta add x0, x0, x1 1763*87056d31SPankaj Gupta sub x2, x2, #1 1764*87056d31SPankaj Gupta b 2b 1765*87056d31SPankaj Gupta1: 1766*87056d31SPankaj Gupta ret 1767*87056d31SPankaj Guptaendfunc get_gic_rd_base 1768*87056d31SPankaj Gupta 1769*87056d31SPankaj Gupta 1770*87056d31SPankaj Gupta/* Function returns the redistributor base address for the core specified 1771*87056d31SPankaj Gupta * in x1 1772*87056d31SPankaj Gupta * in: x0 - core mask lsb of specified core 1773*87056d31SPankaj Gupta * out: x0 = redistributor sgi base address for specified core 1774*87056d31SPankaj Gupta * uses x0, x1, x2 1775*87056d31SPankaj Gupta */ 1776*87056d31SPankaj Guptafunc get_gic_sgi_base 1777*87056d31SPankaj Gupta clz w1, w0 1778*87056d31SPankaj Gupta mov w2, #0x20 1779*87056d31SPankaj Gupta sub w2, w2, w1 1780*87056d31SPankaj Gupta sub w2, w2, #1 1781*87056d31SPankaj Gupta 1782*87056d31SPankaj Gupta ldr x0, =NXP_GICR_SGI_ADDR 1783*87056d31SPankaj Gupta mov x1, #GIC_SGI_OFFSET 1784*87056d31SPankaj Gupta 1785*87056d31SPankaj Gupta /* loop counter */ 1786*87056d31SPankaj Gupta2: 1787*87056d31SPankaj Gupta cbz x2, 1f /* x2 = core number */ 1788*87056d31SPankaj Gupta add x0, x0, x1 1789*87056d31SPankaj Gupta sub x2, x2, #1 1790*87056d31SPankaj Gupta b 2b 1791*87056d31SPankaj Gupta1: 1792*87056d31SPankaj Gupta ret 1793*87056d31SPankaj Guptaendfunc get_gic_sgi_base 1794*87056d31SPankaj Gupta 1795*87056d31SPankaj Gupta/* Function writes a register in the RESET block 1796*87056d31SPankaj Gupta * in: x0 = offset 1797*87056d31SPankaj Gupta * in: w1 = value to write 1798*87056d31SPankaj Gupta * uses x0, x1, x2 1799*87056d31SPankaj Gupta */ 1800*87056d31SPankaj Guptafunc _write_reg_reset 1801*87056d31SPankaj Gupta ldr x2, =NXP_RESET_ADDR 1802*87056d31SPankaj Gupta str w1, [x2, x0] 1803*87056d31SPankaj Gupta ret 1804*87056d31SPankaj Guptaendfunc _write_reg_reset 1805*87056d31SPankaj Gupta 1806*87056d31SPankaj Gupta 1807*87056d31SPankaj Gupta/* Function reads a register in the RESET block 1808*87056d31SPankaj Gupta * in: x0 = offset 1809*87056d31SPankaj Gupta * out: w0 = value read 1810*87056d31SPankaj Gupta * uses x0, x1 1811*87056d31SPankaj Gupta */ 1812*87056d31SPankaj Guptafunc _read_reg_reset 1813*87056d31SPankaj Gupta ldr x1, =NXP_RESET_ADDR 1814*87056d31SPankaj Gupta ldr w0, [x1, x0] 1815*87056d31SPankaj Gupta ret 1816*87056d31SPankaj Guptaendfunc _read_reg_reset 1817