1*87056d31SPankaj Gupta/* 2*87056d31SPankaj Gupta * Copyright 2018-2020 NXP 3*87056d31SPankaj Gupta * 4*87056d31SPankaj Gupta * SPDX-License-Identifier: BSD-3-Clause 5*87056d31SPankaj Gupta * 6*87056d31SPankaj Gupta */ 7*87056d31SPankaj Gupta 8*87056d31SPankaj Gupta.section .text, "ax" 9*87056d31SPankaj Gupta 10*87056d31SPankaj Gupta#include <asm_macros.S> 11*87056d31SPankaj Gupta 12*87056d31SPankaj Gupta#include <lib/psci/psci.h> 13*87056d31SPankaj Gupta#include <nxp_timer.h> 14*87056d31SPankaj Gupta#include <plat_gic.h> 15*87056d31SPankaj Gupta#include <pmu.h> 16*87056d31SPankaj Gupta 17*87056d31SPankaj Gupta#include <bl31_data.h> 18*87056d31SPankaj Gupta#include <plat_psci.h> 19*87056d31SPankaj Gupta#include <platform_def.h> 20*87056d31SPankaj Gupta 21*87056d31SPankaj Gupta.global soc_init_start 22*87056d31SPankaj Gupta.global soc_init_percpu 23*87056d31SPankaj Gupta.global soc_init_finish 24*87056d31SPankaj Gupta.global _set_platform_security 25*87056d31SPankaj Gupta.global _soc_set_start_addr 26*87056d31SPankaj Gupta 27*87056d31SPankaj Gupta.global _soc_core_release 28*87056d31SPankaj Gupta.global _soc_ck_disabled 29*87056d31SPankaj Gupta.global _soc_core_restart 30*87056d31SPankaj Gupta.global _soc_core_prep_off 31*87056d31SPankaj Gupta.global _soc_core_entr_off 32*87056d31SPankaj Gupta.global _soc_core_exit_off 33*87056d31SPankaj Gupta.global _soc_sys_reset 34*87056d31SPankaj Gupta.global _soc_sys_off 35*87056d31SPankaj Gupta.global _soc_core_prep_stdby 36*87056d31SPankaj Gupta.global _soc_core_entr_stdby 37*87056d31SPankaj Gupta.global _soc_core_exit_stdby 38*87056d31SPankaj Gupta.global _soc_core_prep_pwrdn 39*87056d31SPankaj Gupta.global _soc_core_entr_pwrdn 40*87056d31SPankaj Gupta.global _soc_core_exit_pwrdn 41*87056d31SPankaj Gupta.global _soc_clstr_prep_stdby 42*87056d31SPankaj Gupta.global _soc_clstr_exit_stdby 43*87056d31SPankaj Gupta.global _soc_clstr_prep_pwrdn 44*87056d31SPankaj Gupta.global _soc_clstr_exit_pwrdn 45*87056d31SPankaj Gupta.global _soc_sys_prep_stdby 46*87056d31SPankaj Gupta.global _soc_sys_exit_stdby 47*87056d31SPankaj Gupta.global _soc_sys_prep_pwrdn 48*87056d31SPankaj Gupta.global _soc_sys_pwrdn_wfi 49*87056d31SPankaj Gupta.global _soc_sys_exit_pwrdn 50*87056d31SPankaj Gupta 51*87056d31SPankaj Gupta.equ TZPC_BASE, 0x02200000 52*87056d31SPankaj Gupta.equ TZPCDECPROT_0_SET_BASE, 0x02200804 53*87056d31SPankaj Gupta.equ TZPCDECPROT_1_SET_BASE, 0x02200810 54*87056d31SPankaj Gupta.equ TZPCDECPROT_2_SET_BASE, 0x0220081C 55*87056d31SPankaj Gupta 56*87056d31SPankaj Gupta#define CLUSTER_3_CORES_MASK 0xC0 57*87056d31SPankaj Gupta#define CLUSTER_3_IN_RESET 1 58*87056d31SPankaj Gupta#define CLUSTER_3_NORMAL 0 59*87056d31SPankaj Gupta 60*87056d31SPankaj Gupta/* cluster 3 handling no longer based on frequency, but rather on RCW[850], 61*87056d31SPankaj Gupta * which is bit 18 of RCWSR27 62*87056d31SPankaj Gupta */ 63*87056d31SPankaj Gupta#define CLUSTER_3_RCW_BIT 0x40000 64*87056d31SPankaj Gupta 65*87056d31SPankaj Gupta/* retry count for clock-stop acks */ 66*87056d31SPankaj Gupta.equ CLOCK_RETRY_CNT, 800 67*87056d31SPankaj Gupta 68*87056d31SPankaj Gupta/* disable prefetching in the A72 core */ 69*87056d31SPankaj Gupta#define CPUACTLR_DIS_LS_HW_PRE 0x100000000000000 70*87056d31SPankaj Gupta#define CPUACTLR_DIS_L2_TLB_PRE 0x200000 71*87056d31SPankaj Gupta 72*87056d31SPankaj Gupta/* Function starts the initialization tasks of the soc, 73*87056d31SPankaj Gupta * using secondary cores if they are available 74*87056d31SPankaj Gupta * 75*87056d31SPankaj Gupta * Called from C, saving the non-volatile regs 76*87056d31SPankaj Gupta * save these as pairs of registers to maintain the 77*87056d31SPankaj Gupta * required 16-byte alignment on the stack 78*87056d31SPankaj Gupta * 79*87056d31SPankaj Gupta * in: 80*87056d31SPankaj Gupta * out: 81*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11 82*87056d31SPankaj Gupta */ 83*87056d31SPankaj Guptafunc soc_init_start 84*87056d31SPankaj Gupta stp x4, x5, [sp, #-16]! 85*87056d31SPankaj Gupta stp x6, x7, [sp, #-16]! 86*87056d31SPankaj Gupta stp x8, x9, [sp, #-16]! 87*87056d31SPankaj Gupta stp x10, x11, [sp, #-16]! 88*87056d31SPankaj Gupta stp x12, x13, [sp, #-16]! 89*87056d31SPankaj Gupta stp x18, x30, [sp, #-16]! 90*87056d31SPankaj Gupta 91*87056d31SPankaj Gupta /* make sure the personality has been 92*87056d31SPankaj Gupta * established by releasing cores that 93*87056d31SPankaj Gupta * are marked "to-be-disabled" from reset 94*87056d31SPankaj Gupta */ 95*87056d31SPankaj Gupta bl release_disabled /* 0-9 */ 96*87056d31SPankaj Gupta 97*87056d31SPankaj Gupta /* init the task flags */ 98*87056d31SPankaj Gupta bl _init_task_flags /* 0-1 */ 99*87056d31SPankaj Gupta 100*87056d31SPankaj Gupta /* set SCRATCHRW7 to 0x0 */ 101*87056d31SPankaj Gupta ldr x0, =DCFG_SCRATCHRW7_OFFSET 102*87056d31SPankaj Gupta mov x1, xzr 103*87056d31SPankaj Gupta bl _write_reg_dcfg 104*87056d31SPankaj Gupta 105*87056d31SPankaj Gupta1: 106*87056d31SPankaj Gupta /* restore the aarch32/64 non-volatile registers */ 107*87056d31SPankaj Gupta ldp x18, x30, [sp], #16 108*87056d31SPankaj Gupta ldp x12, x13, [sp], #16 109*87056d31SPankaj Gupta ldp x10, x11, [sp], #16 110*87056d31SPankaj Gupta ldp x8, x9, [sp], #16 111*87056d31SPankaj Gupta ldp x6, x7, [sp], #16 112*87056d31SPankaj Gupta ldp x4, x5, [sp], #16 113*87056d31SPankaj Gupta ret 114*87056d31SPankaj Guptaendfunc soc_init_start 115*87056d31SPankaj Gupta 116*87056d31SPankaj Gupta 117*87056d31SPankaj Gupta/* Function performs any soc-specific initialization that is needed on 118*87056d31SPankaj Gupta * a per-core basis. 119*87056d31SPankaj Gupta * in: none 120*87056d31SPankaj Gupta * out: none 121*87056d31SPankaj Gupta * uses x0, x1, x2, x3 122*87056d31SPankaj Gupta */ 123*87056d31SPankaj Guptafunc soc_init_percpu 124*87056d31SPankaj Gupta stp x4, x30, [sp, #-16]! 125*87056d31SPankaj Gupta 126*87056d31SPankaj Gupta bl plat_my_core_mask 127*87056d31SPankaj Gupta mov x2, x0 /* x2 = core mask */ 128*87056d31SPankaj Gupta 129*87056d31SPankaj Gupta /* Check if this core is marked for prefetch disable 130*87056d31SPankaj Gupta */ 131*87056d31SPankaj Gupta mov x0, #PREFETCH_DIS_OFFSET 132*87056d31SPankaj Gupta bl _get_global_data /* 0-1 */ 133*87056d31SPankaj Gupta tst x0, x2 134*87056d31SPankaj Gupta b.eq 1f 135*87056d31SPankaj Gupta bl _disable_ldstr_pfetch_A72 /* 0 */ 136*87056d31SPankaj Gupta1: 137*87056d31SPankaj Gupta mov x0, #NXP_PMU_ADDR 138*87056d31SPankaj Gupta bl enable_timer_base_to_cluster 139*87056d31SPankaj Gupta ldp x4, x30, [sp], #16 140*87056d31SPankaj Gupta ret 141*87056d31SPankaj Guptaendfunc soc_init_percpu 142*87056d31SPankaj Gupta 143*87056d31SPankaj Gupta 144*87056d31SPankaj Gupta/* Function completes the initialization tasks of the soc 145*87056d31SPankaj Gupta * in: 146*87056d31SPankaj Gupta * out: 147*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4 148*87056d31SPankaj Gupta */ 149*87056d31SPankaj Guptafunc soc_init_finish 150*87056d31SPankaj Gupta stp x4, x30, [sp, #-16]! 151*87056d31SPankaj Gupta 152*87056d31SPankaj Gupta ldp x4, x30, [sp], #16 153*87056d31SPankaj Gupta ret 154*87056d31SPankaj Guptaendfunc soc_init_finish 155*87056d31SPankaj Gupta 156*87056d31SPankaj Gupta 157*87056d31SPankaj Gupta/* Function sets the security mechanisms in the SoC to implement the 158*87056d31SPankaj Gupta * Platform Security Policy 159*87056d31SPankaj Gupta */ 160*87056d31SPankaj Guptafunc _set_platform_security 161*87056d31SPankaj Gupta mov x8, x30 162*87056d31SPankaj Gupta 163*87056d31SPankaj Gupta#if (!SUPPRESS_TZC) 164*87056d31SPankaj Gupta /* initialize the tzpc */ 165*87056d31SPankaj Gupta bl init_tzpc 166*87056d31SPankaj Gupta#endif 167*87056d31SPankaj Gupta 168*87056d31SPankaj Gupta#if (!SUPPRESS_SEC) 169*87056d31SPankaj Gupta /* initialize secmon */ 170*87056d31SPankaj Gupta#ifdef NXP_SNVS_ENABLED 171*87056d31SPankaj Gupta mov x0, #NXP_SNVS_ADDR 172*87056d31SPankaj Gupta bl init_sec_mon 173*87056d31SPankaj Gupta#endif 174*87056d31SPankaj Gupta#endif 175*87056d31SPankaj Gupta 176*87056d31SPankaj Gupta mov x30, x8 177*87056d31SPankaj Gupta ret 178*87056d31SPankaj Guptaendfunc _set_platform_security 179*87056d31SPankaj Gupta 180*87056d31SPankaj Gupta 181*87056d31SPankaj Gupta/* Function writes a 64-bit address to bootlocptrh/l 182*87056d31SPankaj Gupta * in: x0, 64-bit address to write to BOOTLOCPTRL/H 183*87056d31SPankaj Gupta * uses x0, x1, x2 184*87056d31SPankaj Gupta */ 185*87056d31SPankaj Guptafunc _soc_set_start_addr 186*87056d31SPankaj Gupta /* Get the 64-bit base address of the dcfg block */ 187*87056d31SPankaj Gupta ldr x2, =NXP_DCFG_ADDR 188*87056d31SPankaj Gupta 189*87056d31SPankaj Gupta /* write the 32-bit BOOTLOCPTRL register */ 190*87056d31SPankaj Gupta mov x1, x0 191*87056d31SPankaj Gupta str w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET] 192*87056d31SPankaj Gupta 193*87056d31SPankaj Gupta /* write the 32-bit BOOTLOCPTRH register */ 194*87056d31SPankaj Gupta lsr x1, x0, #32 195*87056d31SPankaj Gupta str w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET] 196*87056d31SPankaj Gupta ret 197*87056d31SPankaj Guptaendfunc _soc_set_start_addr 198*87056d31SPankaj Gupta 199*87056d31SPankaj Gupta/* Function releases a secondary core from reset 200*87056d31SPankaj Gupta * in: x0 = core_mask_lsb 201*87056d31SPankaj Gupta * out: none 202*87056d31SPankaj Gupta * uses: x0, x1, x2, x3 203*87056d31SPankaj Gupta */ 204*87056d31SPankaj Guptafunc _soc_core_release 205*87056d31SPankaj Gupta mov x3, x30 206*87056d31SPankaj Gupta 207*87056d31SPankaj Gupta ldr x1, =NXP_SEC_REGFILE_ADDR 208*87056d31SPankaj Gupta /* write to CORE_HOLD to tell 209*87056d31SPankaj Gupta * the bootrom that this core is 210*87056d31SPankaj Gupta * expected to run. 211*87056d31SPankaj Gupta */ 212*87056d31SPankaj Gupta str w0, [x1, #CORE_HOLD_OFFSET] 213*87056d31SPankaj Gupta 214*87056d31SPankaj Gupta /* read-modify-write BRRL to release core */ 215*87056d31SPankaj Gupta mov x1, #NXP_RESET_ADDR 216*87056d31SPankaj Gupta ldr w2, [x1, #BRR_OFFSET] 217*87056d31SPankaj Gupta 218*87056d31SPankaj Gupta /* x0 = core mask */ 219*87056d31SPankaj Gupta orr w2, w2, w0 220*87056d31SPankaj Gupta str w2, [x1, #BRR_OFFSET] 221*87056d31SPankaj Gupta dsb sy 222*87056d31SPankaj Gupta isb 223*87056d31SPankaj Gupta 224*87056d31SPankaj Gupta /* send event */ 225*87056d31SPankaj Gupta sev 226*87056d31SPankaj Gupta isb 227*87056d31SPankaj Gupta 228*87056d31SPankaj Gupta mov x30, x3 229*87056d31SPankaj Gupta ret 230*87056d31SPankaj Guptaendfunc _soc_core_release 231*87056d31SPankaj Gupta 232*87056d31SPankaj Gupta 233*87056d31SPankaj Gupta/* Function determines if a core is disabled via COREDISABLEDSR 234*87056d31SPankaj Gupta * in: w0 = core_mask_lsb 235*87056d31SPankaj Gupta * out: w0 = 0, core not disabled 236*87056d31SPankaj Gupta * w0 != 0, core disabled 237*87056d31SPankaj Gupta * uses x0, x1 238*87056d31SPankaj Gupta */ 239*87056d31SPankaj Guptafunc _soc_ck_disabled 240*87056d31SPankaj Gupta 241*87056d31SPankaj Gupta /* get base addr of dcfg block */ 242*87056d31SPankaj Gupta ldr x1, =NXP_DCFG_ADDR 243*87056d31SPankaj Gupta 244*87056d31SPankaj Gupta /* read COREDISABLEDSR */ 245*87056d31SPankaj Gupta ldr w1, [x1, #DCFG_COREDISABLEDSR_OFFSET] 246*87056d31SPankaj Gupta 247*87056d31SPankaj Gupta /* test core bit */ 248*87056d31SPankaj Gupta and w0, w1, w0 249*87056d31SPankaj Gupta 250*87056d31SPankaj Gupta ret 251*87056d31SPankaj Guptaendfunc _soc_ck_disabled 252*87056d31SPankaj Gupta 253*87056d31SPankaj Gupta 254*87056d31SPankaj Gupta/* Part of CPU_ON 255*87056d31SPankaj Gupta * Function restarts a core shutdown via _soc_core_entr_off 256*87056d31SPankaj Gupta * in: x0 = core mask lsb (of the target cpu) 257*87056d31SPankaj Gupta * out: x0 == 0, on success 258*87056d31SPankaj Gupta * x0 != 0, on failure 259*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6 260*87056d31SPankaj Gupta */ 261*87056d31SPankaj Guptafunc _soc_core_restart 262*87056d31SPankaj Gupta mov x6, x30 263*87056d31SPankaj Gupta mov x4, x0 264*87056d31SPankaj Gupta 265*87056d31SPankaj Gupta /* pgm GICD_CTLR - enable secure grp0 */ 266*87056d31SPankaj Gupta mov x5, #NXP_GICD_ADDR 267*87056d31SPankaj Gupta ldr w2, [x5, #GICD_CTLR_OFFSET] 268*87056d31SPankaj Gupta orr w2, w2, #GICD_CTLR_EN_GRP_0 269*87056d31SPankaj Gupta str w2, [x5, #GICD_CTLR_OFFSET] 270*87056d31SPankaj Gupta dsb sy 271*87056d31SPankaj Gupta isb 272*87056d31SPankaj Gupta 273*87056d31SPankaj Gupta /* poll on RWP til write completes */ 274*87056d31SPankaj Gupta4: 275*87056d31SPankaj Gupta ldr w2, [x5, #GICD_CTLR_OFFSET] 276*87056d31SPankaj Gupta tst w2, #GICD_CTLR_RWP 277*87056d31SPankaj Gupta b.ne 4b 278*87056d31SPankaj Gupta 279*87056d31SPankaj Gupta /* x4 = core mask lsb 280*87056d31SPankaj Gupta * x5 = gicd base addr 281*87056d31SPankaj Gupta */ 282*87056d31SPankaj Gupta mov x0, x4 283*87056d31SPankaj Gupta bl get_mpidr_value 284*87056d31SPankaj Gupta 285*87056d31SPankaj Gupta /* x0 = mpidr of target core 286*87056d31SPankaj Gupta * x4 = core mask lsb of target core 287*87056d31SPankaj Gupta * x5 = gicd base addr 288*87056d31SPankaj Gupta */ 289*87056d31SPankaj Gupta 290*87056d31SPankaj Gupta /* generate target list bit */ 291*87056d31SPankaj Gupta and x1, x0, #MPIDR_AFFINITY0_MASK 292*87056d31SPankaj Gupta mov x2, #1 293*87056d31SPankaj Gupta lsl x2, x2, x1 294*87056d31SPankaj Gupta 295*87056d31SPankaj Gupta /* get the affinity1 field */ 296*87056d31SPankaj Gupta and x1, x0, #MPIDR_AFFINITY1_MASK 297*87056d31SPankaj Gupta lsl x1, x1, #8 298*87056d31SPankaj Gupta orr x2, x2, x1 299*87056d31SPankaj Gupta 300*87056d31SPankaj Gupta /* insert the INTID for SGI15 */ 301*87056d31SPankaj Gupta orr x2, x2, #ICC_SGI0R_EL1_INTID 302*87056d31SPankaj Gupta 303*87056d31SPankaj Gupta /* fire the SGI */ 304*87056d31SPankaj Gupta msr ICC_SGI0R_EL1, x2 305*87056d31SPankaj Gupta dsb sy 306*87056d31SPankaj Gupta isb 307*87056d31SPankaj Gupta 308*87056d31SPankaj Gupta /* load '0' on success */ 309*87056d31SPankaj Gupta mov x0, xzr 310*87056d31SPankaj Gupta 311*87056d31SPankaj Gupta mov x30, x6 312*87056d31SPankaj Gupta ret 313*87056d31SPankaj Guptaendfunc _soc_core_restart 314*87056d31SPankaj Gupta 315*87056d31SPankaj Gupta 316*87056d31SPankaj Gupta/* Part of CPU_OFF 317*87056d31SPankaj Gupta * Function programs SoC & GIC registers in preparation for shutting down 318*87056d31SPankaj Gupta * the core 319*87056d31SPankaj Gupta * in: x0 = core mask lsb 320*87056d31SPankaj Gupta * out: none 321*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7 322*87056d31SPankaj Gupta */ 323*87056d31SPankaj Guptafunc _soc_core_prep_off 324*87056d31SPankaj Gupta mov x8, x30 325*87056d31SPankaj Gupta mov x7, x0 /* x7 = core mask lsb */ 326*87056d31SPankaj Gupta 327*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 328*87056d31SPankaj Gupta 329*87056d31SPankaj Gupta /* set smp and disable L2 snoops in cpuectlr */ 330*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_EN 331*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH 332*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_INS_PREFETCH_MASK 333*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_DAT_PREFETCH_MASK 334*87056d31SPankaj Gupta 335*87056d31SPankaj Gupta /* set retention control in cpuectlr */ 336*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 337*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_TIMER_8TICKS 338*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 339*87056d31SPankaj Gupta 340*87056d31SPankaj Gupta /* get redistributor rd base addr for this core */ 341*87056d31SPankaj Gupta mov x0, x7 342*87056d31SPankaj Gupta bl get_gic_rd_base 343*87056d31SPankaj Gupta mov x6, x0 344*87056d31SPankaj Gupta 345*87056d31SPankaj Gupta /* get redistributor sgi base addr for this core */ 346*87056d31SPankaj Gupta mov x0, x7 347*87056d31SPankaj Gupta bl get_gic_sgi_base 348*87056d31SPankaj Gupta mov x5, x0 349*87056d31SPankaj Gupta 350*87056d31SPankaj Gupta /* x5 = gicr sgi base addr 351*87056d31SPankaj Gupta * x6 = gicr rd base addr 352*87056d31SPankaj Gupta * x7 = core mask lsb 353*87056d31SPankaj Gupta */ 354*87056d31SPankaj Gupta 355*87056d31SPankaj Gupta /* disable SGI 15 at redistributor - GICR_ICENABLER0 */ 356*87056d31SPankaj Gupta mov w3, #GICR_ICENABLER0_SGI15 357*87056d31SPankaj Gupta str w3, [x5, #GICR_ICENABLER0_OFFSET] 358*87056d31SPankaj Gupta2: 359*87056d31SPankaj Gupta /* poll on rwp bit in GICR_CTLR */ 360*87056d31SPankaj Gupta ldr w4, [x6, #GICR_CTLR_OFFSET] 361*87056d31SPankaj Gupta tst w4, #GICR_CTLR_RWP 362*87056d31SPankaj Gupta b.ne 2b 363*87056d31SPankaj Gupta 364*87056d31SPankaj Gupta /* disable GRP1 interrupts at cpu interface */ 365*87056d31SPankaj Gupta msr ICC_IGRPEN1_EL3, xzr 366*87056d31SPankaj Gupta 367*87056d31SPankaj Gupta /* disable GRP0 ints at cpu interface */ 368*87056d31SPankaj Gupta msr ICC_IGRPEN0_EL1, xzr 369*87056d31SPankaj Gupta 370*87056d31SPankaj Gupta /* program the redistributor - poll on GICR_CTLR.RWP as needed */ 371*87056d31SPankaj Gupta 372*87056d31SPankaj Gupta /* define SGI 15 as Grp0 - GICR_IGROUPR0 */ 373*87056d31SPankaj Gupta ldr w4, [x5, #GICR_IGROUPR0_OFFSET] 374*87056d31SPankaj Gupta bic w4, w4, #GICR_IGROUPR0_SGI15 375*87056d31SPankaj Gupta str w4, [x5, #GICR_IGROUPR0_OFFSET] 376*87056d31SPankaj Gupta 377*87056d31SPankaj Gupta /* define SGI 15 as Grp0 - GICR_IGRPMODR0 */ 378*87056d31SPankaj Gupta ldr w3, [x5, #GICR_IGRPMODR0_OFFSET] 379*87056d31SPankaj Gupta bic w3, w3, #GICR_IGRPMODR0_SGI15 380*87056d31SPankaj Gupta str w3, [x5, #GICR_IGRPMODR0_OFFSET] 381*87056d31SPankaj Gupta 382*87056d31SPankaj Gupta /* set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */ 383*87056d31SPankaj Gupta ldr w4, [x5, #GICR_IPRIORITYR3_OFFSET] 384*87056d31SPankaj Gupta bic w4, w4, #GICR_IPRIORITYR3_SGI15_MASK 385*87056d31SPankaj Gupta str w4, [x5, #GICR_IPRIORITYR3_OFFSET] 386*87056d31SPankaj Gupta 387*87056d31SPankaj Gupta /* enable SGI 15 at redistributor - GICR_ISENABLER0 */ 388*87056d31SPankaj Gupta mov w3, #GICR_ISENABLER0_SGI15 389*87056d31SPankaj Gupta str w3, [x5, #GICR_ISENABLER0_OFFSET] 390*87056d31SPankaj Gupta dsb sy 391*87056d31SPankaj Gupta isb 392*87056d31SPankaj Gupta3: 393*87056d31SPankaj Gupta /* poll on rwp bit in GICR_CTLR */ 394*87056d31SPankaj Gupta ldr w4, [x6, #GICR_CTLR_OFFSET] 395*87056d31SPankaj Gupta tst w4, #GICR_CTLR_RWP 396*87056d31SPankaj Gupta b.ne 3b 397*87056d31SPankaj Gupta 398*87056d31SPankaj Gupta /* quiesce the debug interfaces */ 399*87056d31SPankaj Gupta mrs x3, osdlr_el1 400*87056d31SPankaj Gupta orr x3, x3, #OSDLR_EL1_DLK_LOCK 401*87056d31SPankaj Gupta msr osdlr_el1, x3 402*87056d31SPankaj Gupta isb 403*87056d31SPankaj Gupta 404*87056d31SPankaj Gupta /* enable grp0 ints */ 405*87056d31SPankaj Gupta mov x3, #ICC_IGRPEN0_EL1_EN 406*87056d31SPankaj Gupta msr ICC_IGRPEN0_EL1, x3 407*87056d31SPankaj Gupta 408*87056d31SPankaj Gupta /* x5 = gicr sgi base addr 409*87056d31SPankaj Gupta * x6 = gicr rd base addr 410*87056d31SPankaj Gupta * x7 = core mask lsb 411*87056d31SPankaj Gupta */ 412*87056d31SPankaj Gupta 413*87056d31SPankaj Gupta /* clear any pending interrupts */ 414*87056d31SPankaj Gupta mvn w1, wzr 415*87056d31SPankaj Gupta str w1, [x5, #GICR_ICPENDR0_OFFSET] 416*87056d31SPankaj Gupta 417*87056d31SPankaj Gupta /* make sure system counter is enabled */ 418*87056d31SPankaj Gupta ldr x3, =NXP_TIMER_ADDR 419*87056d31SPankaj Gupta ldr w0, [x3, #SYS_COUNTER_CNTCR_OFFSET] 420*87056d31SPankaj Gupta tst w0, #SYS_COUNTER_CNTCR_EN 421*87056d31SPankaj Gupta b.ne 4f 422*87056d31SPankaj Gupta orr w0, w0, #SYS_COUNTER_CNTCR_EN 423*87056d31SPankaj Gupta str w0, [x3, #SYS_COUNTER_CNTCR_OFFSET] 424*87056d31SPankaj Gupta4: 425*87056d31SPankaj Gupta /* enable the core timer and mask timer interrupt */ 426*87056d31SPankaj Gupta mov x1, #CNTP_CTL_EL0_EN 427*87056d31SPankaj Gupta orr x1, x1, #CNTP_CTL_EL0_IMASK 428*87056d31SPankaj Gupta msr cntp_ctl_el0, x1 429*87056d31SPankaj Gupta 430*87056d31SPankaj Gupta isb 431*87056d31SPankaj Gupta mov x30, x8 432*87056d31SPankaj Gupta ret 433*87056d31SPankaj Guptaendfunc _soc_core_prep_off 434*87056d31SPankaj Gupta 435*87056d31SPankaj Gupta 436*87056d31SPankaj Gupta/* Part of CPU_OFF: 437*87056d31SPankaj Gupta * Function performs the final steps to shutdown the core 438*87056d31SPankaj Gupta * in: x0 = core mask lsb 439*87056d31SPankaj Gupta * out: none 440*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5 441*87056d31SPankaj Gupta */ 442*87056d31SPankaj Guptafunc _soc_core_entr_off 443*87056d31SPankaj Gupta mov x5, x30 444*87056d31SPankaj Gupta mov x4, x0 445*87056d31SPankaj Gupta 446*87056d31SPankaj Gupta1: 447*87056d31SPankaj Gupta /* enter low-power state by executing wfi */ 448*87056d31SPankaj Gupta wfi 449*87056d31SPankaj Gupta 450*87056d31SPankaj Gupta /* see if SGI15 woke us up */ 451*87056d31SPankaj Gupta mrs x2, ICC_IAR0_EL1 452*87056d31SPankaj Gupta mov x3, #ICC_IAR0_EL1_SGI15 453*87056d31SPankaj Gupta cmp x2, x3 454*87056d31SPankaj Gupta b.ne 2f 455*87056d31SPankaj Gupta 456*87056d31SPankaj Gupta /* deactivate the intrrupts. */ 457*87056d31SPankaj Gupta msr ICC_EOIR0_EL1, x2 458*87056d31SPankaj Gupta 459*87056d31SPankaj Gupta2: 460*87056d31SPankaj Gupta /* check if core is turned ON */ 461*87056d31SPankaj Gupta mov x0, x4 462*87056d31SPankaj Gupta /* Fetched the core state in x0 */ 463*87056d31SPankaj Gupta bl _getCoreState 464*87056d31SPankaj Gupta 465*87056d31SPankaj Gupta cmp x0, #CORE_WAKEUP 466*87056d31SPankaj Gupta b.ne 1b 467*87056d31SPankaj Gupta 468*87056d31SPankaj Gupta /* Reached here, exited the wfi */ 469*87056d31SPankaj Gupta 470*87056d31SPankaj Gupta mov x30, x5 471*87056d31SPankaj Gupta ret 472*87056d31SPankaj Guptaendfunc _soc_core_entr_off 473*87056d31SPankaj Gupta 474*87056d31SPankaj Gupta 475*87056d31SPankaj Gupta/* Part of CPU_OFF: 476*87056d31SPankaj Gupta * Function starts the process of starting a core back up 477*87056d31SPankaj Gupta * in: x0 = core mask lsb 478*87056d31SPankaj Gupta * out: none 479*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6 480*87056d31SPankaj Gupta */ 481*87056d31SPankaj Guptafunc _soc_core_exit_off 482*87056d31SPankaj Gupta mov x6, x30 483*87056d31SPankaj Gupta mov x5, x0 484*87056d31SPankaj Gupta 485*87056d31SPankaj Gupta /* disable forwarding of GRP0 ints at cpu interface */ 486*87056d31SPankaj Gupta msr ICC_IGRPEN0_EL1, xzr 487*87056d31SPankaj Gupta 488*87056d31SPankaj Gupta /* get redistributor sgi base addr for this core */ 489*87056d31SPankaj Gupta mov x0, x5 490*87056d31SPankaj Gupta bl get_gic_sgi_base 491*87056d31SPankaj Gupta mov x4, x0 492*87056d31SPankaj Gupta 493*87056d31SPankaj Gupta /* x4 = gicr sgi base addr 494*87056d31SPankaj Gupta * x5 = core mask 495*87056d31SPankaj Gupta */ 496*87056d31SPankaj Gupta 497*87056d31SPankaj Gupta /* disable SGI 15 at redistributor - GICR_ICENABLER0 */ 498*87056d31SPankaj Gupta mov w1, #GICR_ICENABLER0_SGI15 499*87056d31SPankaj Gupta str w1, [x4, #GICR_ICENABLER0_OFFSET] 500*87056d31SPankaj Gupta 501*87056d31SPankaj Gupta /* get redistributor rd base addr for this core */ 502*87056d31SPankaj Gupta mov x0, x5 503*87056d31SPankaj Gupta bl get_gic_rd_base 504*87056d31SPankaj Gupta mov x4, x0 505*87056d31SPankaj Gupta 506*87056d31SPankaj Gupta2: 507*87056d31SPankaj Gupta /* poll on rwp bit in GICR_CTLR */ 508*87056d31SPankaj Gupta ldr w2, [x4, #GICR_CTLR_OFFSET] 509*87056d31SPankaj Gupta tst w2, #GICR_CTLR_RWP 510*87056d31SPankaj Gupta b.ne 2b 511*87056d31SPankaj Gupta 512*87056d31SPankaj Gupta /* unlock the debug interfaces */ 513*87056d31SPankaj Gupta mrs x3, osdlr_el1 514*87056d31SPankaj Gupta bic x3, x3, #OSDLR_EL1_DLK_LOCK 515*87056d31SPankaj Gupta msr osdlr_el1, x3 516*87056d31SPankaj Gupta isb 517*87056d31SPankaj Gupta 518*87056d31SPankaj Gupta dsb sy 519*87056d31SPankaj Gupta isb 520*87056d31SPankaj Gupta mov x30, x6 521*87056d31SPankaj Gupta ret 522*87056d31SPankaj Guptaendfunc _soc_core_exit_off 523*87056d31SPankaj Gupta 524*87056d31SPankaj Gupta 525*87056d31SPankaj Gupta/* Function requests a reset of the entire SOC 526*87056d31SPankaj Gupta * in: none 527*87056d31SPankaj Gupta * out: none 528*87056d31SPankaj Gupta * uses: x0, x1, x2, x3, x4, x5, x6 529*87056d31SPankaj Gupta */ 530*87056d31SPankaj Guptafunc _soc_sys_reset 531*87056d31SPankaj Gupta mov x6, x30 532*87056d31SPankaj Gupta 533*87056d31SPankaj Gupta ldr x2, =NXP_RST_ADDR 534*87056d31SPankaj Gupta /* clear the RST_REQ_MSK and SW_RST_REQ */ 535*87056d31SPankaj Gupta 536*87056d31SPankaj Gupta mov w0, #0x00000000 537*87056d31SPankaj Gupta str w0, [x2, #RSTCNTL_OFFSET] 538*87056d31SPankaj Gupta 539*87056d31SPankaj Gupta /* initiate the sw reset request */ 540*87056d31SPankaj Gupta mov w0, #SW_RST_REQ_INIT 541*87056d31SPankaj Gupta str w0, [x2, #RSTCNTL_OFFSET] 542*87056d31SPankaj Gupta 543*87056d31SPankaj Gupta /* In case this address range is mapped as cacheable, 544*87056d31SPankaj Gupta * flush the write out of the dcaches. 545*87056d31SPankaj Gupta */ 546*87056d31SPankaj Gupta add x2, x2, #RSTCNTL_OFFSET 547*87056d31SPankaj Gupta dc cvac, x2 548*87056d31SPankaj Gupta dsb st 549*87056d31SPankaj Gupta isb 550*87056d31SPankaj Gupta 551*87056d31SPankaj Gupta /* Function does not return */ 552*87056d31SPankaj Gupta b . 553*87056d31SPankaj Guptaendfunc _soc_sys_reset 554*87056d31SPankaj Gupta 555*87056d31SPankaj Gupta 556*87056d31SPankaj Gupta/* Part of SYSTEM_OFF: 557*87056d31SPankaj Gupta * Function turns off the SoC clocks 558*87056d31SPankaj Gupta * Note: Function is not intended to return, and the only allowable 559*87056d31SPankaj Gupta * recovery is POR 560*87056d31SPankaj Gupta * in: none 561*87056d31SPankaj Gupta * out: none 562*87056d31SPankaj Gupta * uses x0, x1, x2, x3 563*87056d31SPankaj Gupta */ 564*87056d31SPankaj Guptafunc _soc_sys_off 565*87056d31SPankaj Gupta 566*87056d31SPankaj Gupta /* A-009810: LPM20 entry sequence might cause 567*87056d31SPankaj Gupta * spurious timeout reset request 568*87056d31SPankaj Gupta * workaround: MASK RESET REQ RPTOE 569*87056d31SPankaj Gupta */ 570*87056d31SPankaj Gupta ldr x0, =NXP_RESET_ADDR 571*87056d31SPankaj Gupta ldr w1, =RSTRQMR_RPTOE_MASK 572*87056d31SPankaj Gupta str w1, [x0, #RST_RSTRQMR1_OFFSET] 573*87056d31SPankaj Gupta 574*87056d31SPankaj Gupta /* disable sec, QBman, spi and qspi */ 575*87056d31SPankaj Gupta ldr x2, =NXP_DCFG_ADDR 576*87056d31SPankaj Gupta ldr x0, =DCFG_DEVDISR1_OFFSET 577*87056d31SPankaj Gupta ldr w1, =DCFG_DEVDISR1_SEC 578*87056d31SPankaj Gupta str w1, [x2, x0] 579*87056d31SPankaj Gupta ldr x0, =DCFG_DEVDISR3_OFFSET 580*87056d31SPankaj Gupta ldr w1, =DCFG_DEVDISR3_QBMAIN 581*87056d31SPankaj Gupta str w1, [x2, x0] 582*87056d31SPankaj Gupta ldr x0, =DCFG_DEVDISR4_OFFSET 583*87056d31SPankaj Gupta ldr w1, =DCFG_DEVDISR4_SPI_QSPI 584*87056d31SPankaj Gupta str w1, [x2, x0] 585*87056d31SPankaj Gupta 586*87056d31SPankaj Gupta /* set TPMWAKEMR0 */ 587*87056d31SPankaj Gupta ldr x0, =TPMWAKEMR0_ADDR 588*87056d31SPankaj Gupta mov w1, #0x1 589*87056d31SPankaj Gupta str w1, [x0] 590*87056d31SPankaj Gupta 591*87056d31SPankaj Gupta /* disable icache, dcache, mmu @ EL1 */ 592*87056d31SPankaj Gupta mov x1, #SCTLR_I_C_M_MASK 593*87056d31SPankaj Gupta mrs x0, sctlr_el1 594*87056d31SPankaj Gupta bic x0, x0, x1 595*87056d31SPankaj Gupta msr sctlr_el1, x0 596*87056d31SPankaj Gupta 597*87056d31SPankaj Gupta /* disable L2 prefetches */ 598*87056d31SPankaj Gupta mrs x0, CORTEX_A72_ECTLR_EL1 599*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 600*87056d31SPankaj Gupta orr x0, x0, #CPUECTLR_SMPEN_EN 601*87056d31SPankaj Gupta orr x0, x0, #CPUECTLR_TIMER_8TICKS 602*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x0 603*87056d31SPankaj Gupta isb 604*87056d31SPankaj Gupta 605*87056d31SPankaj Gupta /* disable CCN snoop domain */ 606*87056d31SPankaj Gupta mov x1, #NXP_CCN_HN_F_0_ADDR 607*87056d31SPankaj Gupta ldr x0, =CCN_HN_F_SNP_DMN_CTL_MASK 608*87056d31SPankaj Gupta str x0, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET] 609*87056d31SPankaj Gupta3: 610*87056d31SPankaj Gupta ldr w2, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET] 611*87056d31SPankaj Gupta cmp w2, #0x2 612*87056d31SPankaj Gupta b.ne 3b 613*87056d31SPankaj Gupta 614*87056d31SPankaj Gupta mov x3, #NXP_PMU_ADDR 615*87056d31SPankaj Gupta 616*87056d31SPankaj Gupta4: 617*87056d31SPankaj Gupta ldr w1, [x3, #PMU_PCPW20SR_OFFSET] 618*87056d31SPankaj Gupta cmp w1, #PMU_IDLE_CORE_MASK 619*87056d31SPankaj Gupta b.ne 4b 620*87056d31SPankaj Gupta 621*87056d31SPankaj Gupta mov w1, #PMU_IDLE_CLUSTER_MASK 622*87056d31SPankaj Gupta str w1, [x3, #PMU_CLAINACTSETR_OFFSET] 623*87056d31SPankaj Gupta 624*87056d31SPankaj Gupta1: 625*87056d31SPankaj Gupta ldr w1, [x3, #PMU_PCPW20SR_OFFSET] 626*87056d31SPankaj Gupta cmp w1, #PMU_IDLE_CORE_MASK 627*87056d31SPankaj Gupta b.ne 1b 628*87056d31SPankaj Gupta 629*87056d31SPankaj Gupta mov w1, #PMU_FLUSH_CLUSTER_MASK 630*87056d31SPankaj Gupta str w1, [x3, #PMU_CLL2FLUSHSETR_OFFSET] 631*87056d31SPankaj Gupta 632*87056d31SPankaj Gupta2: 633*87056d31SPankaj Gupta ldr w1, [x3, #PMU_CLL2FLUSHSR_OFFSET] 634*87056d31SPankaj Gupta cmp w1, #PMU_FLUSH_CLUSTER_MASK 635*87056d31SPankaj Gupta b.ne 2b 636*87056d31SPankaj Gupta 637*87056d31SPankaj Gupta mov w1, #PMU_FLUSH_CLUSTER_MASK 638*87056d31SPankaj Gupta str w1, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET] 639*87056d31SPankaj Gupta 640*87056d31SPankaj Gupta mov w1, #PMU_FLUSH_CLUSTER_MASK 641*87056d31SPankaj Gupta str w1, [x3, #PMU_CLSINACTSETR_OFFSET] 642*87056d31SPankaj Gupta 643*87056d31SPankaj Gupta mov x2, #DAIF_SET_MASK 644*87056d31SPankaj Gupta mrs x1, spsr_el1 645*87056d31SPankaj Gupta orr x1, x1, x2 646*87056d31SPankaj Gupta msr spsr_el1, x1 647*87056d31SPankaj Gupta 648*87056d31SPankaj Gupta mrs x1, spsr_el2 649*87056d31SPankaj Gupta orr x1, x1, x2 650*87056d31SPankaj Gupta msr spsr_el2, x1 651*87056d31SPankaj Gupta 652*87056d31SPankaj Gupta /* force the debug interface to be quiescent */ 653*87056d31SPankaj Gupta mrs x0, osdlr_el1 654*87056d31SPankaj Gupta orr x0, x0, #0x1 655*87056d31SPankaj Gupta msr osdlr_el1, x0 656*87056d31SPankaj Gupta 657*87056d31SPankaj Gupta /* invalidate all TLB entries at all 3 exception levels */ 658*87056d31SPankaj Gupta tlbi alle1 659*87056d31SPankaj Gupta tlbi alle2 660*87056d31SPankaj Gupta tlbi alle3 661*87056d31SPankaj Gupta 662*87056d31SPankaj Gupta /* x3 = pmu base addr */ 663*87056d31SPankaj Gupta 664*87056d31SPankaj Gupta /* request lpm20 */ 665*87056d31SPankaj Gupta ldr x0, =PMU_POWMGTCSR_OFFSET 666*87056d31SPankaj Gupta ldr w1, =PMU_POWMGTCSR_VAL 667*87056d31SPankaj Gupta str w1, [x3, x0] 668*87056d31SPankaj Gupta 669*87056d31SPankaj Gupta5: 670*87056d31SPankaj Gupta wfe 671*87056d31SPankaj Gupta b.eq 5b 672*87056d31SPankaj Guptaendfunc _soc_sys_off 673*87056d31SPankaj Gupta 674*87056d31SPankaj Gupta 675*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 676*87056d31SPankaj Gupta * Function puts the calling core into standby state 677*87056d31SPankaj Gupta * in: x0 = core mask lsb 678*87056d31SPankaj Gupta * out: none 679*87056d31SPankaj Gupta * uses x0 680*87056d31SPankaj Gupta */ 681*87056d31SPankaj Guptafunc _soc_core_entr_stdby 682*87056d31SPankaj Gupta 683*87056d31SPankaj Gupta dsb sy 684*87056d31SPankaj Gupta isb 685*87056d31SPankaj Gupta wfi 686*87056d31SPankaj Gupta 687*87056d31SPankaj Gupta ret 688*87056d31SPankaj Guptaendfunc _soc_core_entr_stdby 689*87056d31SPankaj Gupta 690*87056d31SPankaj Gupta 691*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 692*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to standby 693*87056d31SPankaj Gupta * in: x0 = core mask lsb 694*87056d31SPankaj Gupta * out: none 695*87056d31SPankaj Gupta * uses x0, x1 696*87056d31SPankaj Gupta */ 697*87056d31SPankaj Guptafunc _soc_core_prep_stdby 698*87056d31SPankaj Gupta 699*87056d31SPankaj Gupta /* clear CORTEX_A72_ECTLR_EL1[2:0] */ 700*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 701*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 702*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 703*87056d31SPankaj Gupta 704*87056d31SPankaj Gupta ret 705*87056d31SPankaj Guptaendfunc _soc_core_prep_stdby 706*87056d31SPankaj Gupta 707*87056d31SPankaj Gupta 708*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 709*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after standby state 710*87056d31SPankaj Gupta * in: x0 = core mask lsb 711*87056d31SPankaj Gupta * out: none 712*87056d31SPankaj Gupta * uses none 713*87056d31SPankaj Gupta */ 714*87056d31SPankaj Guptafunc _soc_core_exit_stdby 715*87056d31SPankaj Gupta 716*87056d31SPankaj Gupta ret 717*87056d31SPankaj Guptaendfunc _soc_core_exit_stdby 718*87056d31SPankaj Gupta 719*87056d31SPankaj Gupta 720*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 721*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to power-down 722*87056d31SPankaj Gupta * in: x0 = core mask lsb 723*87056d31SPankaj Gupta * out: none 724*87056d31SPankaj Gupta * uses none 725*87056d31SPankaj Gupta */ 726*87056d31SPankaj Guptafunc _soc_core_prep_pwrdn 727*87056d31SPankaj Gupta 728*87056d31SPankaj Gupta /* make sure system counter is enabled */ 729*87056d31SPankaj Gupta ldr x2, =NXP_TIMER_ADDR 730*87056d31SPankaj Gupta ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET] 731*87056d31SPankaj Gupta tst w0, #SYS_COUNTER_CNTCR_EN 732*87056d31SPankaj Gupta b.ne 1f 733*87056d31SPankaj Gupta orr w0, w0, #SYS_COUNTER_CNTCR_EN 734*87056d31SPankaj Gupta str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET] 735*87056d31SPankaj Gupta1: 736*87056d31SPankaj Gupta 737*87056d31SPankaj Gupta /* enable dynamic retention control (CPUECTLR[2:0]) 738*87056d31SPankaj Gupta * set the SMPEN bit (CPUECTLR[6]) 739*87056d31SPankaj Gupta */ 740*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 741*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_RET_MASK 742*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_TIMER_8TICKS 743*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_EN 744*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 745*87056d31SPankaj Gupta 746*87056d31SPankaj Gupta isb 747*87056d31SPankaj Gupta ret 748*87056d31SPankaj Guptaendfunc _soc_core_prep_pwrdn 749*87056d31SPankaj Gupta 750*87056d31SPankaj Gupta 751*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 752*87056d31SPankaj Gupta * Function puts the calling core into a power-down state 753*87056d31SPankaj Gupta * in: x0 = core mask lsb 754*87056d31SPankaj Gupta * out: none 755*87056d31SPankaj Gupta * uses x0 756*87056d31SPankaj Gupta */ 757*87056d31SPankaj Guptafunc _soc_core_entr_pwrdn 758*87056d31SPankaj Gupta 759*87056d31SPankaj Gupta /* X0 = core mask lsb */ 760*87056d31SPankaj Gupta 761*87056d31SPankaj Gupta dsb sy 762*87056d31SPankaj Gupta isb 763*87056d31SPankaj Gupta wfi 764*87056d31SPankaj Gupta 765*87056d31SPankaj Gupta ret 766*87056d31SPankaj Guptaendfunc _soc_core_entr_pwrdn 767*87056d31SPankaj Gupta 768*87056d31SPankaj Gupta 769*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 770*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after power-down state 771*87056d31SPankaj Gupta * in: x0 = core mask lsb 772*87056d31SPankaj Gupta * out: none 773*87056d31SPankaj Gupta * uses none 774*87056d31SPankaj Gupta */ 775*87056d31SPankaj Guptafunc _soc_core_exit_pwrdn 776*87056d31SPankaj Gupta 777*87056d31SPankaj Gupta ret 778*87056d31SPankaj Guptaendfunc _soc_core_exit_pwrdn 779*87056d31SPankaj Gupta 780*87056d31SPankaj Gupta 781*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 782*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to standby 783*87056d31SPankaj Gupta * in: x0 = core mask lsb 784*87056d31SPankaj Gupta * out: none 785*87056d31SPankaj Gupta * uses x0, x1 786*87056d31SPankaj Gupta */ 787*87056d31SPankaj Guptafunc _soc_clstr_prep_stdby 788*87056d31SPankaj Gupta 789*87056d31SPankaj Gupta /* clear CORTEX_A72_ECTLR_EL1[2:0] */ 790*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 791*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 792*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 793*87056d31SPankaj Gupta 794*87056d31SPankaj Gupta ret 795*87056d31SPankaj Guptaendfunc _soc_clstr_prep_stdby 796*87056d31SPankaj Gupta 797*87056d31SPankaj Gupta 798*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 799*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after standby state 800*87056d31SPankaj Gupta * in: x0 = core mask lsb 801*87056d31SPankaj Gupta * out: none 802*87056d31SPankaj Gupta * uses none 803*87056d31SPankaj Gupta */ 804*87056d31SPankaj Guptafunc _soc_clstr_exit_stdby 805*87056d31SPankaj Gupta 806*87056d31SPankaj Gupta ret 807*87056d31SPankaj Guptaendfunc _soc_clstr_exit_stdby 808*87056d31SPankaj Gupta 809*87056d31SPankaj Gupta 810*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 811*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to power-down 812*87056d31SPankaj Gupta * in: x0 = core mask lsb 813*87056d31SPankaj Gupta * out: none 814*87056d31SPankaj Gupta * uses none 815*87056d31SPankaj Gupta */ 816*87056d31SPankaj Guptafunc _soc_clstr_prep_pwrdn 817*87056d31SPankaj Gupta 818*87056d31SPankaj Gupta /* make sure system counter is enabled */ 819*87056d31SPankaj Gupta ldr x2, =NXP_TIMER_ADDR 820*87056d31SPankaj Gupta ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET] 821*87056d31SPankaj Gupta tst w0, #SYS_COUNTER_CNTCR_EN 822*87056d31SPankaj Gupta b.ne 1f 823*87056d31SPankaj Gupta orr w0, w0, #SYS_COUNTER_CNTCR_EN 824*87056d31SPankaj Gupta str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET] 825*87056d31SPankaj Gupta1: 826*87056d31SPankaj Gupta 827*87056d31SPankaj Gupta /* enable dynamic retention control (CPUECTLR[2:0]) 828*87056d31SPankaj Gupta * set the SMPEN bit (CPUECTLR[6]) 829*87056d31SPankaj Gupta */ 830*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 831*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_RET_MASK 832*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_TIMER_8TICKS 833*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_EN 834*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 835*87056d31SPankaj Gupta 836*87056d31SPankaj Gupta isb 837*87056d31SPankaj Gupta ret 838*87056d31SPankaj Guptaendfunc _soc_clstr_prep_pwrdn 839*87056d31SPankaj Gupta 840*87056d31SPankaj Gupta 841*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 842*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after power-down state 843*87056d31SPankaj Gupta * in: x0 = core mask lsb 844*87056d31SPankaj Gupta * out: none 845*87056d31SPankaj Gupta * uses none 846*87056d31SPankaj Gupta */ 847*87056d31SPankaj Guptafunc _soc_clstr_exit_pwrdn 848*87056d31SPankaj Gupta 849*87056d31SPankaj Gupta ret 850*87056d31SPankaj Guptaendfunc _soc_clstr_exit_pwrdn 851*87056d31SPankaj Gupta 852*87056d31SPankaj Gupta 853*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 854*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to standby 855*87056d31SPankaj Gupta * in: x0 = core mask lsb 856*87056d31SPankaj Gupta * out: none 857*87056d31SPankaj Gupta * uses x0, x1 858*87056d31SPankaj Gupta */ 859*87056d31SPankaj Guptafunc _soc_sys_prep_stdby 860*87056d31SPankaj Gupta 861*87056d31SPankaj Gupta /* clear CORTEX_A72_ECTLR_EL1[2:0] */ 862*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 863*87056d31SPankaj Gupta bic x1, x1, #CPUECTLR_TIMER_MASK 864*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 865*87056d31SPankaj Gupta ret 866*87056d31SPankaj Guptaendfunc _soc_sys_prep_stdby 867*87056d31SPankaj Gupta 868*87056d31SPankaj Gupta 869*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 870*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after standby state 871*87056d31SPankaj Gupta * in: x0 = core mask lsb 872*87056d31SPankaj Gupta * out: none 873*87056d31SPankaj Gupta * uses none 874*87056d31SPankaj Gupta */ 875*87056d31SPankaj Guptafunc _soc_sys_exit_stdby 876*87056d31SPankaj Gupta 877*87056d31SPankaj Gupta ret 878*87056d31SPankaj Guptaendfunc _soc_sys_exit_stdby 879*87056d31SPankaj Gupta 880*87056d31SPankaj Gupta 881*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 882*87056d31SPankaj Gupta * Function performs SoC-specific programming prior to 883*87056d31SPankaj Gupta * suspend-to-power-down 884*87056d31SPankaj Gupta * in: x0 = core mask lsb 885*87056d31SPankaj Gupta * out: none 886*87056d31SPankaj Gupta * uses x0, x1 887*87056d31SPankaj Gupta */ 888*87056d31SPankaj Guptafunc _soc_sys_prep_pwrdn 889*87056d31SPankaj Gupta 890*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 891*87056d31SPankaj Gupta /* make sure the smp bit is set */ 892*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_MASK 893*87056d31SPankaj Gupta /* set the retention control */ 894*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_RET_8CLK 895*87056d31SPankaj Gupta /* disable tablewalk prefetch */ 896*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH 897*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 898*87056d31SPankaj Gupta isb 899*87056d31SPankaj Gupta 900*87056d31SPankaj Gupta ret 901*87056d31SPankaj Guptaendfunc _soc_sys_prep_pwrdn 902*87056d31SPankaj Gupta 903*87056d31SPankaj Gupta 904*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 905*87056d31SPankaj Gupta * Function puts the calling core, and potentially the soc, into a 906*87056d31SPankaj Gupta * low-power state 907*87056d31SPankaj Gupta * in: x0 = core mask lsb 908*87056d31SPankaj Gupta * out: x0 = 0, success 909*87056d31SPankaj Gupta * x0 < 0, failure 910*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, 911*87056d31SPankaj Gupta * x15, x16, x17, x18, x19, x20, x21, x28 912*87056d31SPankaj Gupta */ 913*87056d31SPankaj Guptafunc _soc_sys_pwrdn_wfi 914*87056d31SPankaj Gupta mov x28, x30 915*87056d31SPankaj Gupta 916*87056d31SPankaj Gupta /* disable cluster snooping in the CCN-508 */ 917*87056d31SPankaj Gupta ldr x1, =NXP_CCN_HN_F_0_ADDR 918*87056d31SPankaj Gupta ldr x7, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET] 919*87056d31SPankaj Gupta mov x6, #CCN_HNF_NODE_COUNT 920*87056d31SPankaj Gupta1: 921*87056d31SPankaj Gupta str x7, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET] 922*87056d31SPankaj Gupta sub x6, x6, #1 923*87056d31SPankaj Gupta add x1, x1, #CCN_HNF_OFFSET 924*87056d31SPankaj Gupta cbnz x6, 1b 925*87056d31SPankaj Gupta 926*87056d31SPankaj Gupta /* x0 = core mask 927*87056d31SPankaj Gupta * x7 = hnf sdcr 928*87056d31SPankaj Gupta */ 929*87056d31SPankaj Gupta 930*87056d31SPankaj Gupta ldr x1, =NXP_PMU_CCSR_ADDR 931*87056d31SPankaj Gupta ldr x2, =NXP_PMU_DCSR_ADDR 932*87056d31SPankaj Gupta 933*87056d31SPankaj Gupta /* enable the stop-request-override */ 934*87056d31SPankaj Gupta mov x3, #PMU_POWMGTDCR0_OFFSET 935*87056d31SPankaj Gupta mov x4, #POWMGTDCR_STP_OV_EN 936*87056d31SPankaj Gupta str w4, [x2, x3] 937*87056d31SPankaj Gupta 938*87056d31SPankaj Gupta /* x0 = core mask 939*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 940*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 941*87056d31SPankaj Gupta * x7 = hnf sdcr 942*87056d31SPankaj Gupta */ 943*87056d31SPankaj Gupta 944*87056d31SPankaj Gupta /* disable prefetching in the A72 core */ 945*87056d31SPankaj Gupta mrs x8, CORTEX_A72_CPUACTLR_EL1 946*87056d31SPankaj Gupta tst x8, #CPUACTLR_DIS_LS_HW_PRE 947*87056d31SPankaj Gupta b.ne 2f 948*87056d31SPankaj Gupta dsb sy 949*87056d31SPankaj Gupta isb 950*87056d31SPankaj Gupta /* disable data prefetch */ 951*87056d31SPankaj Gupta orr x16, x8, #CPUACTLR_DIS_LS_HW_PRE 952*87056d31SPankaj Gupta /* disable tlb prefetch */ 953*87056d31SPankaj Gupta orr x16, x16, #CPUACTLR_DIS_L2_TLB_PRE 954*87056d31SPankaj Gupta msr CORTEX_A72_CPUACTLR_EL1, x16 955*87056d31SPankaj Gupta isb 956*87056d31SPankaj Gupta 957*87056d31SPankaj Gupta /* x0 = core mask 958*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 959*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 960*87056d31SPankaj Gupta * x7 = hnf sdcr 961*87056d31SPankaj Gupta * x8 = cpuactlr 962*87056d31SPankaj Gupta */ 963*87056d31SPankaj Gupta 964*87056d31SPankaj Gupta2: 965*87056d31SPankaj Gupta /* save hnf-sdcr and cpuactlr to stack */ 966*87056d31SPankaj Gupta stp x7, x8, [sp, #-16]! 967*87056d31SPankaj Gupta 968*87056d31SPankaj Gupta /* x0 = core mask 969*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 970*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 971*87056d31SPankaj Gupta */ 972*87056d31SPankaj Gupta 973*87056d31SPankaj Gupta /* save the IPSTPCRn registers to stack */ 974*87056d31SPankaj Gupta mov x15, #PMU_IPSTPCR0_OFFSET 975*87056d31SPankaj Gupta ldr w9, [x1, x15] 976*87056d31SPankaj Gupta mov x16, #PMU_IPSTPCR1_OFFSET 977*87056d31SPankaj Gupta ldr w10, [x1, x16] 978*87056d31SPankaj Gupta mov x17, #PMU_IPSTPCR2_OFFSET 979*87056d31SPankaj Gupta ldr w11, [x1, x17] 980*87056d31SPankaj Gupta mov x18, #PMU_IPSTPCR3_OFFSET 981*87056d31SPankaj Gupta ldr w12, [x1, x18] 982*87056d31SPankaj Gupta mov x19, #PMU_IPSTPCR4_OFFSET 983*87056d31SPankaj Gupta ldr w13, [x1, x19] 984*87056d31SPankaj Gupta mov x20, #PMU_IPSTPCR5_OFFSET 985*87056d31SPankaj Gupta ldr w14, [x1, x20] 986*87056d31SPankaj Gupta 987*87056d31SPankaj Gupta stp x9, x10, [sp, #-16]! 988*87056d31SPankaj Gupta stp x11, x12, [sp, #-16]! 989*87056d31SPankaj Gupta stp x13, x14, [sp, #-16]! 990*87056d31SPankaj Gupta 991*87056d31SPankaj Gupta /* x0 = core mask 992*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 993*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 994*87056d31SPankaj Gupta * x15 = PMU_IPSTPCR0_OFFSET 995*87056d31SPankaj Gupta * x16 = PMU_IPSTPCR1_OFFSET 996*87056d31SPankaj Gupta * x17 = PMU_IPSTPCR2_OFFSET 997*87056d31SPankaj Gupta * x18 = PMU_IPSTPCR3_OFFSET 998*87056d31SPankaj Gupta * x19 = PMU_IPSTPCR4_OFFSET 999*87056d31SPankaj Gupta * x20 = PMU_IPSTPCR5_OFFSET 1000*87056d31SPankaj Gupta */ 1001*87056d31SPankaj Gupta 1002*87056d31SPankaj Gupta /* load the full clock mask for IPSTPCR0 */ 1003*87056d31SPankaj Gupta ldr x3, =DEVDISR1_MASK 1004*87056d31SPankaj Gupta /* get the exclusions */ 1005*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR0_OFFSET 1006*87056d31SPankaj Gupta ldr w4, [x1, x21] 1007*87056d31SPankaj Gupta /* apply the exclusions to the mask */ 1008*87056d31SPankaj Gupta bic w7, w3, w4 1009*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR0 */ 1010*87056d31SPankaj Gupta str w7, [x1, x15] 1011*87056d31SPankaj Gupta 1012*87056d31SPankaj Gupta /* use same procedure for IPSTPCR1-IPSTPCR5 */ 1013*87056d31SPankaj Gupta 1014*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR1 */ 1015*87056d31SPankaj Gupta ldr x5, =DEVDISR2_MASK 1016*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR1_OFFSET 1017*87056d31SPankaj Gupta ldr w6, [x1, x21] 1018*87056d31SPankaj Gupta bic w8, w5, w6 1019*87056d31SPankaj Gupta str w8, [x1, x16] 1020*87056d31SPankaj Gupta 1021*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR2 */ 1022*87056d31SPankaj Gupta ldr x3, =DEVDISR3_MASK 1023*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR2_OFFSET 1024*87056d31SPankaj Gupta ldr w4, [x1, x21] 1025*87056d31SPankaj Gupta bic w9, w3, w4 1026*87056d31SPankaj Gupta str w9, [x1, x17] 1027*87056d31SPankaj Gupta 1028*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR3 */ 1029*87056d31SPankaj Gupta ldr x5, =DEVDISR4_MASK 1030*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR3_OFFSET 1031*87056d31SPankaj Gupta ldr w6, [x1, x21] 1032*87056d31SPankaj Gupta bic w10, w5, w6 1033*87056d31SPankaj Gupta str w10, [x1, x18] 1034*87056d31SPankaj Gupta 1035*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR4 1036*87056d31SPankaj Gupta * - exclude the ddr clocks as we are currently executing 1037*87056d31SPankaj Gupta * out of *some* memory, might be ddr 1038*87056d31SPankaj Gupta * - exclude the OCRAM clk so that we retain any code/data in 1039*87056d31SPankaj Gupta * OCRAM 1040*87056d31SPankaj Gupta * - may need to exclude the debug clock if we are testing 1041*87056d31SPankaj Gupta */ 1042*87056d31SPankaj Gupta ldr x3, =DEVDISR5_MASK 1043*87056d31SPankaj Gupta mov w6, #DEVDISR5_MASK_ALL_MEM 1044*87056d31SPankaj Gupta bic w3, w3, w6 1045*87056d31SPankaj Gupta 1046*87056d31SPankaj Gupta mov w5, #POLICY_DEBUG_ENABLE 1047*87056d31SPankaj Gupta cbz w5, 3f 1048*87056d31SPankaj Gupta mov w6, #DEVDISR5_MASK_DBG 1049*87056d31SPankaj Gupta bic w3, w3, w6 1050*87056d31SPankaj Gupta3: 1051*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR4_OFFSET 1052*87056d31SPankaj Gupta ldr w4, [x1, x21] 1053*87056d31SPankaj Gupta bic w11, w3, w4 1054*87056d31SPankaj Gupta str w11, [x1, x19] 1055*87056d31SPankaj Gupta 1056*87056d31SPankaj Gupta /* stop the clocks in IPSTPCR5 */ 1057*87056d31SPankaj Gupta ldr x5, =DEVDISR6_MASK 1058*87056d31SPankaj Gupta mov x21, #PMU_IPPDEXPCR5_OFFSET 1059*87056d31SPankaj Gupta ldr w6, [x1, x21] 1060*87056d31SPankaj Gupta bic w12, w5, w6 1061*87056d31SPankaj Gupta str w12, [x1, x20] 1062*87056d31SPankaj Gupta 1063*87056d31SPankaj Gupta /* x0 = core mask 1064*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 1065*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 1066*87056d31SPankaj Gupta * x7 = IPSTPCR0 1067*87056d31SPankaj Gupta * x8 = IPSTPCR1 1068*87056d31SPankaj Gupta * x9 = IPSTPCR2 1069*87056d31SPankaj Gupta * x10 = IPSTPCR3 1070*87056d31SPankaj Gupta * x11 = IPSTPCR4 1071*87056d31SPankaj Gupta * x12 = IPSTPCR5 1072*87056d31SPankaj Gupta */ 1073*87056d31SPankaj Gupta 1074*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR0 */ 1075*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1076*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR0_OFFSET 1077*87056d31SPankaj Gupta4: 1078*87056d31SPankaj Gupta ldr w5, [x1, x21] 1079*87056d31SPankaj Gupta cmp w5, w7 1080*87056d31SPankaj Gupta b.eq 5f 1081*87056d31SPankaj Gupta sub w4, w4, #1 1082*87056d31SPankaj Gupta cbnz w4, 4b 1083*87056d31SPankaj Gupta 1084*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR1 */ 1085*87056d31SPankaj Gupta5: 1086*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1087*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR1_OFFSET 1088*87056d31SPankaj Gupta6: 1089*87056d31SPankaj Gupta ldr w5, [x1, x21] 1090*87056d31SPankaj Gupta cmp w5, w8 1091*87056d31SPankaj Gupta b.eq 7f 1092*87056d31SPankaj Gupta sub w4, w4, #1 1093*87056d31SPankaj Gupta cbnz w4, 6b 1094*87056d31SPankaj Gupta 1095*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR2 */ 1096*87056d31SPankaj Gupta7: 1097*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1098*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR2_OFFSET 1099*87056d31SPankaj Gupta8: 1100*87056d31SPankaj Gupta ldr w5, [x1, x21] 1101*87056d31SPankaj Gupta cmp w5, w9 1102*87056d31SPankaj Gupta b.eq 9f 1103*87056d31SPankaj Gupta sub w4, w4, #1 1104*87056d31SPankaj Gupta cbnz w4, 8b 1105*87056d31SPankaj Gupta 1106*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR3 */ 1107*87056d31SPankaj Gupta9: 1108*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1109*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR3_OFFSET 1110*87056d31SPankaj Gupta10: 1111*87056d31SPankaj Gupta ldr w5, [x1, x21] 1112*87056d31SPankaj Gupta cmp w5, w10 1113*87056d31SPankaj Gupta b.eq 11f 1114*87056d31SPankaj Gupta sub w4, w4, #1 1115*87056d31SPankaj Gupta cbnz w4, 10b 1116*87056d31SPankaj Gupta 1117*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR4 */ 1118*87056d31SPankaj Gupta11: 1119*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1120*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR4_OFFSET 1121*87056d31SPankaj Gupta12: 1122*87056d31SPankaj Gupta ldr w5, [x1, x21] 1123*87056d31SPankaj Gupta cmp w5, w11 1124*87056d31SPankaj Gupta b.eq 13f 1125*87056d31SPankaj Gupta sub w4, w4, #1 1126*87056d31SPankaj Gupta cbnz w4, 12b 1127*87056d31SPankaj Gupta 1128*87056d31SPankaj Gupta /* poll until the clocks are stopped in IPSTPACKSR5 */ 1129*87056d31SPankaj Gupta13: 1130*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1131*87056d31SPankaj Gupta mov x21, #PMU_IPSTPACKSR5_OFFSET 1132*87056d31SPankaj Gupta14: 1133*87056d31SPankaj Gupta ldr w5, [x1, x21] 1134*87056d31SPankaj Gupta cmp w5, w12 1135*87056d31SPankaj Gupta b.eq 15f 1136*87056d31SPankaj Gupta sub w4, w4, #1 1137*87056d31SPankaj Gupta cbnz w4, 14b 1138*87056d31SPankaj Gupta 1139*87056d31SPankaj Gupta /* x0 = core mask 1140*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 1141*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 1142*87056d31SPankaj Gupta * x7 = IPSTPCR0 1143*87056d31SPankaj Gupta * x8 = IPSTPCR1 1144*87056d31SPankaj Gupta * x9 = IPSTPCR2 1145*87056d31SPankaj Gupta * x10 = IPSTPCR3 1146*87056d31SPankaj Gupta * x11 = IPSTPCR4 1147*87056d31SPankaj Gupta * x12 = IPSTPCR5 1148*87056d31SPankaj Gupta */ 1149*87056d31SPankaj Gupta 1150*87056d31SPankaj Gupta15: 1151*87056d31SPankaj Gupta mov x3, #NXP_DCFG_ADDR 1152*87056d31SPankaj Gupta 1153*87056d31SPankaj Gupta /* save the devdisr registers to stack */ 1154*87056d31SPankaj Gupta ldr w13, [x3, #DCFG_DEVDISR1_OFFSET] 1155*87056d31SPankaj Gupta ldr w14, [x3, #DCFG_DEVDISR2_OFFSET] 1156*87056d31SPankaj Gupta ldr w15, [x3, #DCFG_DEVDISR3_OFFSET] 1157*87056d31SPankaj Gupta ldr w16, [x3, #DCFG_DEVDISR4_OFFSET] 1158*87056d31SPankaj Gupta ldr w17, [x3, #DCFG_DEVDISR5_OFFSET] 1159*87056d31SPankaj Gupta ldr w18, [x3, #DCFG_DEVDISR6_OFFSET] 1160*87056d31SPankaj Gupta 1161*87056d31SPankaj Gupta stp x13, x14, [sp, #-16]! 1162*87056d31SPankaj Gupta stp x15, x16, [sp, #-16]! 1163*87056d31SPankaj Gupta stp x17, x18, [sp, #-16]! 1164*87056d31SPankaj Gupta 1165*87056d31SPankaj Gupta /* power down the IP in DEVDISR1 - corresponds to IPSTPCR0 */ 1166*87056d31SPankaj Gupta str w7, [x3, #DCFG_DEVDISR1_OFFSET] 1167*87056d31SPankaj Gupta 1168*87056d31SPankaj Gupta /* power down the IP in DEVDISR2 - corresponds to IPSTPCR1 */ 1169*87056d31SPankaj Gupta str w8, [x3, #DCFG_DEVDISR2_OFFSET] 1170*87056d31SPankaj Gupta 1171*87056d31SPankaj Gupta /* power down the IP in DEVDISR3 - corresponds to IPSTPCR2 */ 1172*87056d31SPankaj Gupta str w9, [x3, #DCFG_DEVDISR3_OFFSET] 1173*87056d31SPankaj Gupta 1174*87056d31SPankaj Gupta /* power down the IP in DEVDISR4 - corresponds to IPSTPCR3 */ 1175*87056d31SPankaj Gupta str w10, [x3, #DCFG_DEVDISR4_OFFSET] 1176*87056d31SPankaj Gupta 1177*87056d31SPankaj Gupta /* power down the IP in DEVDISR5 - corresponds to IPSTPCR4 */ 1178*87056d31SPankaj Gupta str w11, [x3, #DCFG_DEVDISR5_OFFSET] 1179*87056d31SPankaj Gupta 1180*87056d31SPankaj Gupta /* power down the IP in DEVDISR6 - corresponds to IPSTPCR5 */ 1181*87056d31SPankaj Gupta str w12, [x3, #DCFG_DEVDISR6_OFFSET] 1182*87056d31SPankaj Gupta 1183*87056d31SPankaj Gupta /* setup register values for the cache-only sequence */ 1184*87056d31SPankaj Gupta mov x4, #NXP_DDR_ADDR 1185*87056d31SPankaj Gupta mov x5, #NXP_DDR2_ADDR 1186*87056d31SPankaj Gupta mov x6, x11 1187*87056d31SPankaj Gupta mov x7, x17 1188*87056d31SPankaj Gupta ldr x12, =PMU_CLAINACTSETR_OFFSET 1189*87056d31SPankaj Gupta ldr x13, =PMU_CLSINACTSETR_OFFSET 1190*87056d31SPankaj Gupta ldr x14, =PMU_CLAINACTCLRR_OFFSET 1191*87056d31SPankaj Gupta ldr x15, =PMU_CLSINACTCLRR_OFFSET 1192*87056d31SPankaj Gupta 1193*87056d31SPankaj Gupta /* x0 = core mask 1194*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 1195*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 1196*87056d31SPankaj Gupta * x3 = NXP_DCFG_ADDR 1197*87056d31SPankaj Gupta * x4 = NXP_DDR_ADDR 1198*87056d31SPankaj Gupta * x5 = NXP_DDR2_ADDR 1199*87056d31SPankaj Gupta * w6 = IPSTPCR4 1200*87056d31SPankaj Gupta * w7 = DEVDISR5 1201*87056d31SPankaj Gupta * x12 = PMU_CLAINACTSETR_OFFSET 1202*87056d31SPankaj Gupta * x13 = PMU_CLSINACTSETR_OFFSET 1203*87056d31SPankaj Gupta * x14 = PMU_CLAINACTCLRR_OFFSET 1204*87056d31SPankaj Gupta * x15 = PMU_CLSINACTCLRR_OFFSET 1205*87056d31SPankaj Gupta */ 1206*87056d31SPankaj Gupta 1207*87056d31SPankaj Gupta mov x8, #POLICY_DEBUG_ENABLE 1208*87056d31SPankaj Gupta cbnz x8, 29f 1209*87056d31SPankaj Gupta /* force the debug interface to be quiescent */ 1210*87056d31SPankaj Gupta mrs x9, OSDLR_EL1 1211*87056d31SPankaj Gupta orr x9, x9, #0x1 1212*87056d31SPankaj Gupta msr OSDLR_EL1, x9 1213*87056d31SPankaj Gupta 1214*87056d31SPankaj Gupta /* enter the cache-only sequence */ 1215*87056d31SPankaj Gupta29: 1216*87056d31SPankaj Gupta bl final_pwrdown 1217*87056d31SPankaj Gupta 1218*87056d31SPankaj Gupta /* when we are here, the core has come out of wfi and the 1219*87056d31SPankaj Gupta * ddr is back up 1220*87056d31SPankaj Gupta */ 1221*87056d31SPankaj Gupta 1222*87056d31SPankaj Gupta mov x8, #POLICY_DEBUG_ENABLE 1223*87056d31SPankaj Gupta cbnz x8, 30f 1224*87056d31SPankaj Gupta /* restart the debug interface */ 1225*87056d31SPankaj Gupta mrs x9, OSDLR_EL1 1226*87056d31SPankaj Gupta mov x10, #1 1227*87056d31SPankaj Gupta bic x9, x9, x10 1228*87056d31SPankaj Gupta msr OSDLR_EL1, x9 1229*87056d31SPankaj Gupta 1230*87056d31SPankaj Gupta /* get saved DEVDISR regs off stack */ 1231*87056d31SPankaj Gupta30: 1232*87056d31SPankaj Gupta ldp x17, x18, [sp], #16 1233*87056d31SPankaj Gupta ldp x15, x16, [sp], #16 1234*87056d31SPankaj Gupta ldp x13, x14, [sp], #16 1235*87056d31SPankaj Gupta /* restore DEVDISR regs */ 1236*87056d31SPankaj Gupta str w18, [x3, #DCFG_DEVDISR6_OFFSET] 1237*87056d31SPankaj Gupta str w17, [x3, #DCFG_DEVDISR5_OFFSET] 1238*87056d31SPankaj Gupta str w16, [x3, #DCFG_DEVDISR4_OFFSET] 1239*87056d31SPankaj Gupta str w15, [x3, #DCFG_DEVDISR3_OFFSET] 1240*87056d31SPankaj Gupta str w14, [x3, #DCFG_DEVDISR2_OFFSET] 1241*87056d31SPankaj Gupta str w13, [x3, #DCFG_DEVDISR1_OFFSET] 1242*87056d31SPankaj Gupta isb 1243*87056d31SPankaj Gupta 1244*87056d31SPankaj Gupta /* get saved IPSTPCRn regs off stack */ 1245*87056d31SPankaj Gupta ldp x13, x14, [sp], #16 1246*87056d31SPankaj Gupta ldp x11, x12, [sp], #16 1247*87056d31SPankaj Gupta ldp x9, x10, [sp], #16 1248*87056d31SPankaj Gupta 1249*87056d31SPankaj Gupta /* restore IPSTPCRn regs */ 1250*87056d31SPankaj Gupta mov x15, #PMU_IPSTPCR5_OFFSET 1251*87056d31SPankaj Gupta str w14, [x1, x15] 1252*87056d31SPankaj Gupta mov x16, #PMU_IPSTPCR4_OFFSET 1253*87056d31SPankaj Gupta str w13, [x1, x16] 1254*87056d31SPankaj Gupta mov x17, #PMU_IPSTPCR3_OFFSET 1255*87056d31SPankaj Gupta str w12, [x1, x17] 1256*87056d31SPankaj Gupta mov x18, #PMU_IPSTPCR2_OFFSET 1257*87056d31SPankaj Gupta str w11, [x1, x18] 1258*87056d31SPankaj Gupta mov x19, #PMU_IPSTPCR1_OFFSET 1259*87056d31SPankaj Gupta str w10, [x1, x19] 1260*87056d31SPankaj Gupta mov x20, #PMU_IPSTPCR0_OFFSET 1261*87056d31SPankaj Gupta str w9, [x1, x20] 1262*87056d31SPankaj Gupta isb 1263*87056d31SPankaj Gupta 1264*87056d31SPankaj Gupta /* poll on IPSTPACKCRn regs til IP clocks are restarted */ 1265*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1266*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR5_OFFSET 1267*87056d31SPankaj Gupta16: 1268*87056d31SPankaj Gupta ldr w5, [x1, x15] 1269*87056d31SPankaj Gupta and w5, w5, w14 1270*87056d31SPankaj Gupta cbz w5, 17f 1271*87056d31SPankaj Gupta sub w4, w4, #1 1272*87056d31SPankaj Gupta cbnz w4, 16b 1273*87056d31SPankaj Gupta 1274*87056d31SPankaj Gupta17: 1275*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1276*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR4_OFFSET 1277*87056d31SPankaj Gupta18: 1278*87056d31SPankaj Gupta ldr w5, [x1, x15] 1279*87056d31SPankaj Gupta and w5, w5, w13 1280*87056d31SPankaj Gupta cbz w5, 19f 1281*87056d31SPankaj Gupta sub w4, w4, #1 1282*87056d31SPankaj Gupta cbnz w4, 18b 1283*87056d31SPankaj Gupta 1284*87056d31SPankaj Gupta19: 1285*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1286*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR3_OFFSET 1287*87056d31SPankaj Gupta20: 1288*87056d31SPankaj Gupta ldr w5, [x1, x15] 1289*87056d31SPankaj Gupta and w5, w5, w12 1290*87056d31SPankaj Gupta cbz w5, 21f 1291*87056d31SPankaj Gupta sub w4, w4, #1 1292*87056d31SPankaj Gupta cbnz w4, 20b 1293*87056d31SPankaj Gupta 1294*87056d31SPankaj Gupta21: 1295*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1296*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR2_OFFSET 1297*87056d31SPankaj Gupta22: 1298*87056d31SPankaj Gupta ldr w5, [x1, x15] 1299*87056d31SPankaj Gupta and w5, w5, w11 1300*87056d31SPankaj Gupta cbz w5, 23f 1301*87056d31SPankaj Gupta sub w4, w4, #1 1302*87056d31SPankaj Gupta cbnz w4, 22b 1303*87056d31SPankaj Gupta 1304*87056d31SPankaj Gupta23: 1305*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1306*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR1_OFFSET 1307*87056d31SPankaj Gupta24: 1308*87056d31SPankaj Gupta ldr w5, [x1, x15] 1309*87056d31SPankaj Gupta and w5, w5, w10 1310*87056d31SPankaj Gupta cbz w5, 25f 1311*87056d31SPankaj Gupta sub w4, w4, #1 1312*87056d31SPankaj Gupta cbnz w4, 24b 1313*87056d31SPankaj Gupta 1314*87056d31SPankaj Gupta25: 1315*87056d31SPankaj Gupta mov w4, #CLOCK_RETRY_CNT 1316*87056d31SPankaj Gupta mov x15, #PMU_IPSTPACKSR0_OFFSET 1317*87056d31SPankaj Gupta26: 1318*87056d31SPankaj Gupta ldr w5, [x1, x15] 1319*87056d31SPankaj Gupta and w5, w5, w9 1320*87056d31SPankaj Gupta cbz w5, 27f 1321*87056d31SPankaj Gupta sub w4, w4, #1 1322*87056d31SPankaj Gupta cbnz w4, 26b 1323*87056d31SPankaj Gupta 1324*87056d31SPankaj Gupta27: 1325*87056d31SPankaj Gupta /* disable the stop-request-override */ 1326*87056d31SPankaj Gupta mov x8, #PMU_POWMGTDCR0_OFFSET 1327*87056d31SPankaj Gupta mov w9, #POWMGTDCR_STP_OV_EN 1328*87056d31SPankaj Gupta str w9, [x2, x8] 1329*87056d31SPankaj Gupta isb 1330*87056d31SPankaj Gupta 1331*87056d31SPankaj Gupta /* get hnf-sdcr and cpuactlr off stack */ 1332*87056d31SPankaj Gupta ldp x7, x8, [sp], #16 1333*87056d31SPankaj Gupta 1334*87056d31SPankaj Gupta /* restore cpuactlr */ 1335*87056d31SPankaj Gupta msr CORTEX_A72_CPUACTLR_EL1, x8 1336*87056d31SPankaj Gupta isb 1337*87056d31SPankaj Gupta 1338*87056d31SPankaj Gupta /* restore snooping in the hnf nodes */ 1339*87056d31SPankaj Gupta ldr x9, =NXP_CCN_HN_F_0_ADDR 1340*87056d31SPankaj Gupta mov x6, #CCN_HNF_NODE_COUNT 1341*87056d31SPankaj Gupta28: 1342*87056d31SPankaj Gupta str x7, [x9, #CCN_HN_F_SNP_DMN_CTL_SET_OFFSET] 1343*87056d31SPankaj Gupta sub x6, x6, #1 1344*87056d31SPankaj Gupta add x9, x9, #CCN_HNF_OFFSET 1345*87056d31SPankaj Gupta cbnz x6, 28b 1346*87056d31SPankaj Gupta isb 1347*87056d31SPankaj Gupta 1348*87056d31SPankaj Gupta mov x30, x28 1349*87056d31SPankaj Gupta ret 1350*87056d31SPankaj Guptaendfunc _soc_sys_pwrdn_wfi 1351*87056d31SPankaj Gupta 1352*87056d31SPankaj Gupta 1353*87056d31SPankaj Gupta/* Part of CPU_SUSPEND 1354*87056d31SPankaj Gupta * Function performs any SoC-specific cleanup after power-down 1355*87056d31SPankaj Gupta * in: x0 = core mask lsb 1356*87056d31SPankaj Gupta * out: none 1357*87056d31SPankaj Gupta * uses x0, 1358*87056d31SPankaj Gupta */ 1359*87056d31SPankaj Guptafunc _soc_sys_exit_pwrdn 1360*87056d31SPankaj Gupta 1361*87056d31SPankaj Gupta mrs x1, CORTEX_A72_ECTLR_EL1 1362*87056d31SPankaj Gupta /* make sure the smp bit is set */ 1363*87056d31SPankaj Gupta orr x1, x1, #CPUECTLR_SMPEN_MASK 1364*87056d31SPankaj Gupta /* clr the retention control */ 1365*87056d31SPankaj Gupta mov x2, #CPUECTLR_RET_8CLK 1366*87056d31SPankaj Gupta bic x1, x1, x2 1367*87056d31SPankaj Gupta /* enable tablewalk prefetch */ 1368*87056d31SPankaj Gupta mov x2, #CPUECTLR_DISABLE_TWALK_PREFETCH 1369*87056d31SPankaj Gupta bic x1, x1, x2 1370*87056d31SPankaj Gupta msr CORTEX_A72_ECTLR_EL1, x1 1371*87056d31SPankaj Gupta isb 1372*87056d31SPankaj Gupta 1373*87056d31SPankaj Gupta ret 1374*87056d31SPankaj Guptaendfunc _soc_sys_exit_pwrdn 1375*87056d31SPankaj Gupta 1376*87056d31SPankaj Gupta 1377*87056d31SPankaj Gupta/* Function will pwrdown ddr and the final core - it will do this 1378*87056d31SPankaj Gupta * by loading itself into the icache and then executing from there 1379*87056d31SPankaj Gupta * in: 1380*87056d31SPankaj Gupta * x0 = core mask 1381*87056d31SPankaj Gupta * x1 = NXP_PMU_CCSR_ADDR 1382*87056d31SPankaj Gupta * x2 = NXP_PMU_DCSR_ADDR 1383*87056d31SPankaj Gupta * x3 = NXP_DCFG_ADDR 1384*87056d31SPankaj Gupta * x4 = NXP_DDR_ADDR 1385*87056d31SPankaj Gupta * x5 = NXP_DDR2_ADDR 1386*87056d31SPankaj Gupta * w6 = IPSTPCR4 1387*87056d31SPankaj Gupta * w7 = DEVDISR5 1388*87056d31SPankaj Gupta * x12 = PMU_CLAINACTSETR_OFFSET 1389*87056d31SPankaj Gupta * x13 = PMU_CLSINACTSETR_OFFSET 1390*87056d31SPankaj Gupta * x14 = PMU_CLAINACTCLRR_OFFSET 1391*87056d31SPankaj Gupta * x15 = PMU_CLSINACTCLRR_OFFSET 1392*87056d31SPankaj Gupta * out: none 1393*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x13, x14, x15, x16, 1394*87056d31SPankaj Gupta * x17, x18 1395*87056d31SPankaj Gupta */ 1396*87056d31SPankaj Gupta 1397*87056d31SPankaj Gupta/* 4Kb aligned */ 1398*87056d31SPankaj Gupta.align 12 1399*87056d31SPankaj Guptafunc final_pwrdown 1400*87056d31SPankaj Gupta 1401*87056d31SPankaj Gupta mov x0, xzr 1402*87056d31SPankaj Gupta b touch_line_0 1403*87056d31SPankaj Guptastart_line_0: 1404*87056d31SPankaj Gupta mov x0, #1 1405*87056d31SPankaj Gupta /* put ddr controller 1 into self-refresh */ 1406*87056d31SPankaj Gupta ldr w8, [x4, #DDR_CFG_2_OFFSET] 1407*87056d31SPankaj Gupta orr w8, w8, #CFG_2_FORCE_REFRESH 1408*87056d31SPankaj Gupta str w8, [x4, #DDR_CFG_2_OFFSET] 1409*87056d31SPankaj Gupta 1410*87056d31SPankaj Gupta /* put ddr controller 2 into self-refresh */ 1411*87056d31SPankaj Gupta ldr w8, [x5, #DDR_CFG_2_OFFSET] 1412*87056d31SPankaj Gupta orr w8, w8, #CFG_2_FORCE_REFRESH 1413*87056d31SPankaj Gupta str w8, [x5, #DDR_CFG_2_OFFSET] 1414*87056d31SPankaj Gupta 1415*87056d31SPankaj Gupta /* stop the clocks in both ddr controllers */ 1416*87056d31SPankaj Gupta mov w10, #DEVDISR5_MASK_DDR 1417*87056d31SPankaj Gupta mov x16, #PMU_IPSTPCR4_OFFSET 1418*87056d31SPankaj Gupta orr w9, w6, w10 1419*87056d31SPankaj Gupta str w9, [x1, x16] 1420*87056d31SPankaj Gupta isb 1421*87056d31SPankaj Gupta 1422*87056d31SPankaj Gupta mov x17, #PMU_IPSTPACKSR4_OFFSET 1423*87056d31SPankaj Guptatouch_line_0: 1424*87056d31SPankaj Gupta cbz x0, touch_line_1 1425*87056d31SPankaj Gupta 1426*87056d31SPankaj Guptastart_line_1: 1427*87056d31SPankaj Gupta /* poll IPSTPACKSR4 until 1428*87056d31SPankaj Gupta * ddr controller clocks are stopped. 1429*87056d31SPankaj Gupta */ 1430*87056d31SPankaj Gupta1: 1431*87056d31SPankaj Gupta ldr w8, [x1, x17] 1432*87056d31SPankaj Gupta and w8, w8, w10 1433*87056d31SPankaj Gupta cmp w8, w10 1434*87056d31SPankaj Gupta b.ne 1b 1435*87056d31SPankaj Gupta 1436*87056d31SPankaj Gupta /* shut down power to the ddr controllers */ 1437*87056d31SPankaj Gupta orr w9, w7, #DEVDISR5_MASK_DDR 1438*87056d31SPankaj Gupta str w9, [x3, #DCFG_DEVDISR5_OFFSET] 1439*87056d31SPankaj Gupta 1440*87056d31SPankaj Gupta /* disable cluster acp ports */ 1441*87056d31SPankaj Gupta mov w8, #CLAINACT_DISABLE_ACP 1442*87056d31SPankaj Gupta str w8, [x1, x12] 1443*87056d31SPankaj Gupta 1444*87056d31SPankaj Gupta /* disable skyros ports */ 1445*87056d31SPankaj Gupta mov w9, #CLSINACT_DISABLE_SKY 1446*87056d31SPankaj Gupta str w9, [x1, x13] 1447*87056d31SPankaj Gupta isb 1448*87056d31SPankaj Gupta 1449*87056d31SPankaj Guptatouch_line_1: 1450*87056d31SPankaj Gupta cbz x0, touch_line_2 1451*87056d31SPankaj Gupta 1452*87056d31SPankaj Guptastart_line_2: 1453*87056d31SPankaj Gupta isb 1454*87056d31SPankaj Gupta3: 1455*87056d31SPankaj Gupta wfi 1456*87056d31SPankaj Gupta 1457*87056d31SPankaj Gupta /* if we are here then we are awake 1458*87056d31SPankaj Gupta * - bring this device back up 1459*87056d31SPankaj Gupta */ 1460*87056d31SPankaj Gupta 1461*87056d31SPankaj Gupta /* enable skyros ports */ 1462*87056d31SPankaj Gupta mov w9, #CLSINACT_DISABLE_SKY 1463*87056d31SPankaj Gupta str w9, [x1, x15] 1464*87056d31SPankaj Gupta 1465*87056d31SPankaj Gupta /* enable acp ports */ 1466*87056d31SPankaj Gupta mov w8, #CLAINACT_DISABLE_ACP 1467*87056d31SPankaj Gupta str w8, [x1, x14] 1468*87056d31SPankaj Gupta isb 1469*87056d31SPankaj Gupta 1470*87056d31SPankaj Gupta /* bring up the ddr controllers */ 1471*87056d31SPankaj Gupta str w7, [x3, #DCFG_DEVDISR5_OFFSET] 1472*87056d31SPankaj Gupta isb 1473*87056d31SPankaj Gupta str w6, [x1, x16] 1474*87056d31SPankaj Gupta isb 1475*87056d31SPankaj Gupta 1476*87056d31SPankaj Gupta nop 1477*87056d31SPankaj Guptatouch_line_2: 1478*87056d31SPankaj Gupta cbz x0, touch_line_3 1479*87056d31SPankaj Gupta 1480*87056d31SPankaj Guptastart_line_3: 1481*87056d31SPankaj Gupta /* poll IPSTPACKSR4 until 1482*87056d31SPankaj Gupta * ddr controller clocks are running 1483*87056d31SPankaj Gupta */ 1484*87056d31SPankaj Gupta mov w10, #DEVDISR5_MASK_DDR 1485*87056d31SPankaj Gupta2: 1486*87056d31SPankaj Gupta ldr w8, [x1, x17] 1487*87056d31SPankaj Gupta and w8, w8, w10 1488*87056d31SPankaj Gupta cbnz w8, 2b 1489*87056d31SPankaj Gupta 1490*87056d31SPankaj Gupta /* take ddr controller 2 out of self-refresh */ 1491*87056d31SPankaj Gupta mov w8, #CFG_2_FORCE_REFRESH 1492*87056d31SPankaj Gupta ldr w9, [x5, #DDR_CFG_2_OFFSET] 1493*87056d31SPankaj Gupta bic w9, w9, w8 1494*87056d31SPankaj Gupta str w9, [x5, #DDR_CFG_2_OFFSET] 1495*87056d31SPankaj Gupta 1496*87056d31SPankaj Gupta /* take ddr controller 1 out of self-refresh */ 1497*87056d31SPankaj Gupta ldr w9, [x4, #DDR_CFG_2_OFFSET] 1498*87056d31SPankaj Gupta bic w9, w9, w8 1499*87056d31SPankaj Gupta str w9, [x4, #DDR_CFG_2_OFFSET] 1500*87056d31SPankaj Gupta isb 1501*87056d31SPankaj Gupta 1502*87056d31SPankaj Gupta nop 1503*87056d31SPankaj Gupta nop 1504*87056d31SPankaj Gupta nop 1505*87056d31SPankaj Guptatouch_line_3: 1506*87056d31SPankaj Gupta cbz x0, start_line_0 1507*87056d31SPankaj Gupta 1508*87056d31SPankaj Gupta /* execute here after ddr is back up */ 1509*87056d31SPankaj Gupta 1510*87056d31SPankaj Gupta ret 1511*87056d31SPankaj Guptaendfunc final_pwrdown 1512*87056d31SPankaj Gupta 1513*87056d31SPankaj Gupta/* Function returns CLUSTER_3_NORMAL if the cores of cluster 3 are 1514*87056d31SPankaj Gupta * to be handled normally, and it returns CLUSTER_3_IN_RESET if the cores 1515*87056d31SPankaj Gupta * are to be held in reset 1516*87056d31SPankaj Gupta * in: none 1517*87056d31SPankaj Gupta * out: x0 = #CLUSTER_3_NORMAL, cluster 3 treated normal 1518*87056d31SPankaj Gupta * x0 = #CLUSTER_3_IN_RESET, cluster 3 cores held in reset 1519*87056d31SPankaj Gupta * uses x0, x1, x2 1520*87056d31SPankaj Gupta */ 1521*87056d31SPankaj Guptafunc cluster3InReset 1522*87056d31SPankaj Gupta 1523*87056d31SPankaj Gupta /* default return is treat cores normal */ 1524*87056d31SPankaj Gupta mov x0, #CLUSTER_3_NORMAL 1525*87056d31SPankaj Gupta 1526*87056d31SPankaj Gupta /* read RCW_SR27 register */ 1527*87056d31SPankaj Gupta mov x1, #NXP_DCFG_ADDR 1528*87056d31SPankaj Gupta ldr w2, [x1, #RCW_SR27_OFFSET] 1529*87056d31SPankaj Gupta 1530*87056d31SPankaj Gupta /* test the cluster 3 bit */ 1531*87056d31SPankaj Gupta tst w2, #CLUSTER_3_RCW_BIT 1532*87056d31SPankaj Gupta b.eq 1f 1533*87056d31SPankaj Gupta 1534*87056d31SPankaj Gupta /* if we are here, then the bit was set */ 1535*87056d31SPankaj Gupta mov x0, #CLUSTER_3_IN_RESET 1536*87056d31SPankaj Gupta1: 1537*87056d31SPankaj Gupta ret 1538*87056d31SPankaj Guptaendfunc cluster3InReset 1539*87056d31SPankaj Gupta 1540*87056d31SPankaj Gupta 1541*87056d31SPankaj Gupta/* Function checks to see if cores which are to be disabled have been 1542*87056d31SPankaj Gupta * released from reset - if not, it releases them 1543*87056d31SPankaj Gupta * Note: there may be special handling of cluster 3 cores depending upon the 1544*87056d31SPankaj Gupta * sys clk frequency 1545*87056d31SPankaj Gupta * in: none 1546*87056d31SPankaj Gupta * out: none 1547*87056d31SPankaj Gupta * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 1548*87056d31SPankaj Gupta */ 1549*87056d31SPankaj Guptafunc release_disabled 1550*87056d31SPankaj Gupta mov x9, x30 1551*87056d31SPankaj Gupta 1552*87056d31SPankaj Gupta /* check if we need to keep cluster 3 cores in reset */ 1553*87056d31SPankaj Gupta bl cluster3InReset /* 0-2 */ 1554*87056d31SPankaj Gupta mov x8, x0 1555*87056d31SPankaj Gupta 1556*87056d31SPankaj Gupta /* x8 = cluster 3 handling */ 1557*87056d31SPankaj Gupta 1558*87056d31SPankaj Gupta /* read COREDISABLESR */ 1559*87056d31SPankaj Gupta mov x0, #NXP_DCFG_ADDR 1560*87056d31SPankaj Gupta ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET] 1561*87056d31SPankaj Gupta cmp x8, #CLUSTER_3_IN_RESET 1562*87056d31SPankaj Gupta b.ne 4f 1563*87056d31SPankaj Gupta 1564*87056d31SPankaj Gupta /* the cluster 3 cores are to be held in reset, so remove 1565*87056d31SPankaj Gupta * them from the disable mask 1566*87056d31SPankaj Gupta */ 1567*87056d31SPankaj Gupta bic x4, x4, #CLUSTER_3_CORES_MASK 1568*87056d31SPankaj Gupta4: 1569*87056d31SPankaj Gupta /* get the number of cpus on this device */ 1570*87056d31SPankaj Gupta mov x6, #PLATFORM_CORE_COUNT 1571*87056d31SPankaj Gupta 1572*87056d31SPankaj Gupta mov x0, #NXP_RESET_ADDR 1573*87056d31SPankaj Gupta ldr w5, [x0, #BRR_OFFSET] 1574*87056d31SPankaj Gupta 1575*87056d31SPankaj Gupta /* load the core mask for the first core */ 1576*87056d31SPankaj Gupta mov x7, #1 1577*87056d31SPankaj Gupta 1578*87056d31SPankaj Gupta /* x4 = COREDISABLESR 1579*87056d31SPankaj Gupta * x5 = BRR 1580*87056d31SPankaj Gupta * x6 = loop count 1581*87056d31SPankaj Gupta * x7 = core mask bit 1582*87056d31SPankaj Gupta */ 1583*87056d31SPankaj Gupta2: 1584*87056d31SPankaj Gupta /* check if the core is to be disabled */ 1585*87056d31SPankaj Gupta tst x4, x7 1586*87056d31SPankaj Gupta b.eq 1f 1587*87056d31SPankaj Gupta 1588*87056d31SPankaj Gupta /* see if disabled cores have already been released from reset */ 1589*87056d31SPankaj Gupta tst x5, x7 1590*87056d31SPankaj Gupta b.ne 5f 1591*87056d31SPankaj Gupta 1592*87056d31SPankaj Gupta /* if core has not been released, then release it (0-3) */ 1593*87056d31SPankaj Gupta mov x0, x7 1594*87056d31SPankaj Gupta bl _soc_core_release 1595*87056d31SPankaj Gupta 1596*87056d31SPankaj Gupta /* record the core state in the data area (0-3) */ 1597*87056d31SPankaj Gupta mov x0, x7 1598*87056d31SPankaj Gupta mov x1, #CORE_STATE_DATA 1599*87056d31SPankaj Gupta mov x2, #CORE_DISABLED 1600*87056d31SPankaj Gupta bl _setCoreData 1601*87056d31SPankaj Gupta 1602*87056d31SPankaj Gupta1: 1603*87056d31SPankaj Gupta /* see if this is a cluster 3 core */ 1604*87056d31SPankaj Gupta mov x3, #CLUSTER_3_CORES_MASK 1605*87056d31SPankaj Gupta tst x3, x7 1606*87056d31SPankaj Gupta b.eq 5f 1607*87056d31SPankaj Gupta 1608*87056d31SPankaj Gupta /* this is a cluster 3 core - see if it needs to be held in reset */ 1609*87056d31SPankaj Gupta cmp x8, #CLUSTER_3_IN_RESET 1610*87056d31SPankaj Gupta b.ne 5f 1611*87056d31SPankaj Gupta 1612*87056d31SPankaj Gupta /* record the core state as disabled in the data area (0-3) */ 1613*87056d31SPankaj Gupta mov x0, x7 1614*87056d31SPankaj Gupta mov x1, #CORE_STATE_DATA 1615*87056d31SPankaj Gupta mov x2, #CORE_DISABLED 1616*87056d31SPankaj Gupta bl _setCoreData 1617*87056d31SPankaj Gupta 1618*87056d31SPankaj Gupta5: 1619*87056d31SPankaj Gupta /* decrement the counter */ 1620*87056d31SPankaj Gupta subs x6, x6, #1 1621*87056d31SPankaj Gupta b.le 3f 1622*87056d31SPankaj Gupta 1623*87056d31SPankaj Gupta /* shift the core mask to the next core */ 1624*87056d31SPankaj Gupta lsl x7, x7, #1 1625*87056d31SPankaj Gupta /* continue */ 1626*87056d31SPankaj Gupta b 2b 1627*87056d31SPankaj Gupta3: 1628*87056d31SPankaj Gupta cmp x8, #CLUSTER_3_IN_RESET 1629*87056d31SPankaj Gupta b.ne 6f 1630*87056d31SPankaj Gupta 1631*87056d31SPankaj Gupta /* we need to hold the cluster 3 cores in reset, 1632*87056d31SPankaj Gupta * so mark them in the COREDISR and COREDISABLEDSR registers as 1633*87056d31SPankaj Gupta * "disabled", and the rest of the sw stack will leave them alone 1634*87056d31SPankaj Gupta * thinking that they have been disabled 1635*87056d31SPankaj Gupta */ 1636*87056d31SPankaj Gupta mov x0, #NXP_DCFG_ADDR 1637*87056d31SPankaj Gupta ldr w1, [x0, #DCFG_COREDISR_OFFSET] 1638*87056d31SPankaj Gupta orr w1, w1, #CLUSTER_3_CORES_MASK 1639*87056d31SPankaj Gupta str w1, [x0, #DCFG_COREDISR_OFFSET] 1640*87056d31SPankaj Gupta 1641*87056d31SPankaj Gupta ldr w2, [x0, #DCFG_COREDISABLEDSR_OFFSET] 1642*87056d31SPankaj Gupta orr w2, w2, #CLUSTER_3_CORES_MASK 1643*87056d31SPankaj Gupta str w2, [x0, #DCFG_COREDISABLEDSR_OFFSET] 1644*87056d31SPankaj Gupta dsb sy 1645*87056d31SPankaj Gupta isb 1646*87056d31SPankaj Gupta 1647*87056d31SPankaj Gupta#if (PSCI_TEST) 1648*87056d31SPankaj Gupta /* x0 = NXP_DCFG_ADDR : read COREDISABLESR */ 1649*87056d31SPankaj Gupta ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET] 1650*87056d31SPankaj Gupta /* read COREDISR */ 1651*87056d31SPankaj Gupta ldr w3, [x0, #DCFG_COREDISR_OFFSET] 1652*87056d31SPankaj Gupta#endif 1653*87056d31SPankaj Gupta 1654*87056d31SPankaj Gupta6: 1655*87056d31SPankaj Gupta mov x30, x9 1656*87056d31SPankaj Gupta ret 1657*87056d31SPankaj Gupta 1658*87056d31SPankaj Guptaendfunc release_disabled 1659*87056d31SPankaj Gupta 1660*87056d31SPankaj Gupta 1661*87056d31SPankaj Gupta/* Function setc up the TrustZone Address Space Controller (TZASC) 1662*87056d31SPankaj Gupta * in: none 1663*87056d31SPankaj Gupta * out: none 1664*87056d31SPankaj Gupta * uses x0, x1 1665*87056d31SPankaj Gupta */ 1666*87056d31SPankaj Guptafunc init_tzpc 1667*87056d31SPankaj Gupta 1668*87056d31SPankaj Gupta /* set Non Secure access for all devices protected via TZPC */ 1669*87056d31SPankaj Gupta 1670*87056d31SPankaj Gupta /* decode Protection-0 Set Reg */ 1671*87056d31SPankaj Gupta ldr x1, =TZPCDECPROT_0_SET_BASE 1672*87056d31SPankaj Gupta /* set decode region to NS, Bits[7:0] */ 1673*87056d31SPankaj Gupta mov w0, #0xFF 1674*87056d31SPankaj Gupta str w0, [x1] 1675*87056d31SPankaj Gupta 1676*87056d31SPankaj Gupta /* decode Protection-1 Set Reg */ 1677*87056d31SPankaj Gupta ldr x1, =TZPCDECPROT_1_SET_BASE 1678*87056d31SPankaj Gupta /* set decode region to NS, Bits[7:0] */ 1679*87056d31SPankaj Gupta mov w0, #0xFF 1680*87056d31SPankaj Gupta str w0, [x1] 1681*87056d31SPankaj Gupta 1682*87056d31SPankaj Gupta /* decode Protection-2 Set Reg */ 1683*87056d31SPankaj Gupta ldr x1, =TZPCDECPROT_2_SET_BASE 1684*87056d31SPankaj Gupta /* set decode region to NS, Bits[7:0] */ 1685*87056d31SPankaj Gupta mov w0, #0xFF 1686*87056d31SPankaj Gupta str w0, [x1] 1687*87056d31SPankaj Gupta 1688*87056d31SPankaj Gupta /* entire SRAM as NS */ 1689*87056d31SPankaj Gupta /* secure RAM region size Reg */ 1690*87056d31SPankaj Gupta ldr x1, =TZPC_BASE 1691*87056d31SPankaj Gupta /* 0x00000000 = no secure region */ 1692*87056d31SPankaj Gupta mov w0, #0x00000000 1693*87056d31SPankaj Gupta str w0, [x1] 1694*87056d31SPankaj Gupta 1695*87056d31SPankaj Gupta ret 1696*87056d31SPankaj Guptaendfunc init_tzpc 1697*87056d31SPankaj Gupta 1698*87056d31SPankaj Gupta/* write a register in the DCFG block 1699*87056d31SPankaj Gupta * in: x0 = offset 1700*87056d31SPankaj Gupta * in: w1 = value to write 1701*87056d31SPankaj Gupta * uses x0, x1, x2 1702*87056d31SPankaj Gupta */ 1703*87056d31SPankaj Guptafunc _write_reg_dcfg 1704*87056d31SPankaj Gupta ldr x2, =NXP_DCFG_ADDR 1705*87056d31SPankaj Gupta str w1, [x2, x0] 1706*87056d31SPankaj Gupta ret 1707*87056d31SPankaj Guptaendfunc _write_reg_dcfg 1708*87056d31SPankaj Gupta 1709*87056d31SPankaj Gupta 1710*87056d31SPankaj Gupta/* read a register in the DCFG block 1711*87056d31SPankaj Gupta * in: x0 = offset 1712*87056d31SPankaj Gupta * out: w0 = value read 1713*87056d31SPankaj Gupta * uses x0, x1, x2 1714*87056d31SPankaj Gupta */ 1715*87056d31SPankaj Guptafunc _read_reg_dcfg 1716*87056d31SPankaj Gupta ldr x2, =NXP_DCFG_ADDR 1717*87056d31SPankaj Gupta ldr w1, [x2, x0] 1718*87056d31SPankaj Gupta mov w0, w1 1719*87056d31SPankaj Gupta ret 1720*87056d31SPankaj Guptaendfunc _read_reg_dcfg 1721*87056d31SPankaj Gupta 1722*87056d31SPankaj Gupta 1723*87056d31SPankaj Gupta/* Function returns an mpidr value for a core, given a core_mask_lsb 1724*87056d31SPankaj Gupta * in: x0 = core mask lsb 1725*87056d31SPankaj Gupta * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits 1726*87056d31SPankaj Gupta * uses x0, x1 1727*87056d31SPankaj Gupta */ 1728*87056d31SPankaj Guptafunc get_mpidr_value 1729*87056d31SPankaj Gupta 1730*87056d31SPankaj Gupta /* convert a core mask to an SoC core number */ 1731*87056d31SPankaj Gupta clz w0, w0 1732*87056d31SPankaj Gupta mov w1, #31 1733*87056d31SPankaj Gupta sub w0, w1, w0 1734*87056d31SPankaj Gupta 1735*87056d31SPankaj Gupta /* get the mpidr core number from the SoC core number */ 1736*87056d31SPankaj Gupta mov w1, wzr 1737*87056d31SPankaj Gupta tst x0, #1 1738*87056d31SPankaj Gupta b.eq 1f 1739*87056d31SPankaj Gupta orr w1, w1, #1 1740*87056d31SPankaj Gupta 1741*87056d31SPankaj Gupta1: 1742*87056d31SPankaj Gupta /* extract the cluster number */ 1743*87056d31SPankaj Gupta lsr w0, w0, #1 1744*87056d31SPankaj Gupta orr w0, w1, w0, lsl #8 1745*87056d31SPankaj Gupta 1746*87056d31SPankaj Gupta ret 1747*87056d31SPankaj Guptaendfunc get_mpidr_value 1748*87056d31SPankaj Gupta 1749*87056d31SPankaj Gupta 1750*87056d31SPankaj Gupta/* Function returns the redistributor base address for the core specified 1751*87056d31SPankaj Gupta * in x1 1752*87056d31SPankaj Gupta * in: x0 - core mask lsb of specified core 1753*87056d31SPankaj Gupta * out: x0 = redistributor rd base address for specified core 1754*87056d31SPankaj Gupta * uses x0, x1, x2 1755*87056d31SPankaj Gupta */ 1756*87056d31SPankaj Guptafunc get_gic_rd_base 1757*87056d31SPankaj Gupta clz w1, w0 1758*87056d31SPankaj Gupta mov w2, #0x20 1759*87056d31SPankaj Gupta sub w2, w2, w1 1760*87056d31SPankaj Gupta sub w2, w2, #1 1761*87056d31SPankaj Gupta 1762*87056d31SPankaj Gupta ldr x0, =NXP_GICR_ADDR 1763*87056d31SPankaj Gupta mov x1, #GIC_RD_OFFSET 1764*87056d31SPankaj Gupta 1765*87056d31SPankaj Gupta /* x2 = core number 1766*87056d31SPankaj Gupta * loop counter 1767*87056d31SPankaj Gupta */ 1768*87056d31SPankaj Gupta2: 1769*87056d31SPankaj Gupta cbz x2, 1f 1770*87056d31SPankaj Gupta add x0, x0, x1 1771*87056d31SPankaj Gupta sub x2, x2, #1 1772*87056d31SPankaj Gupta b 2b 1773*87056d31SPankaj Gupta1: 1774*87056d31SPankaj Gupta ret 1775*87056d31SPankaj Guptaendfunc get_gic_rd_base 1776*87056d31SPankaj Gupta 1777*87056d31SPankaj Gupta 1778*87056d31SPankaj Gupta/* Function returns the redistributor base address for the core specified 1779*87056d31SPankaj Gupta * in x1 1780*87056d31SPankaj Gupta * in: x0 - core mask lsb of specified core 1781*87056d31SPankaj Gupta * out: x0 = redistributor sgi base address for specified core 1782*87056d31SPankaj Gupta * uses x0, x1, x2 1783*87056d31SPankaj Gupta */ 1784*87056d31SPankaj Guptafunc get_gic_sgi_base 1785*87056d31SPankaj Gupta clz w1, w0 1786*87056d31SPankaj Gupta mov w2, #0x20 1787*87056d31SPankaj Gupta sub w2, w2, w1 1788*87056d31SPankaj Gupta sub w2, w2, #1 1789*87056d31SPankaj Gupta 1790*87056d31SPankaj Gupta ldr x0, =NXP_GICR_SGI_ADDR 1791*87056d31SPankaj Gupta mov x1, #GIC_SGI_OFFSET 1792*87056d31SPankaj Gupta 1793*87056d31SPankaj Gupta /* loop counter */ 1794*87056d31SPankaj Gupta2: 1795*87056d31SPankaj Gupta cbz x2, 1f /* x2 = core number */ 1796*87056d31SPankaj Gupta add x0, x0, x1 1797*87056d31SPankaj Gupta sub x2, x2, #1 1798*87056d31SPankaj Gupta b 2b 1799*87056d31SPankaj Gupta1: 1800*87056d31SPankaj Gupta ret 1801*87056d31SPankaj Guptaendfunc get_gic_sgi_base 1802*87056d31SPankaj Gupta 1803*87056d31SPankaj Gupta/* Function writes a register in the RESET block 1804*87056d31SPankaj Gupta * in: x0 = offset 1805*87056d31SPankaj Gupta * in: w1 = value to write 1806*87056d31SPankaj Gupta * uses x0, x1, x2 1807*87056d31SPankaj Gupta */ 1808*87056d31SPankaj Guptafunc _write_reg_reset 1809*87056d31SPankaj Gupta ldr x2, =NXP_RESET_ADDR 1810*87056d31SPankaj Gupta str w1, [x2, x0] 1811*87056d31SPankaj Gupta ret 1812*87056d31SPankaj Guptaendfunc _write_reg_reset 1813*87056d31SPankaj Gupta 1814*87056d31SPankaj Gupta 1815*87056d31SPankaj Gupta/* Function reads a register in the RESET block 1816*87056d31SPankaj Gupta * in: x0 = offset 1817*87056d31SPankaj Gupta * out: w0 = value read 1818*87056d31SPankaj Gupta * uses x0, x1 1819*87056d31SPankaj Gupta */ 1820*87056d31SPankaj Guptafunc _read_reg_reset 1821*87056d31SPankaj Gupta ldr x1, =NXP_RESET_ADDR 1822*87056d31SPankaj Gupta ldr w0, [x1, x0] 1823*87056d31SPankaj Gupta ret 1824*87056d31SPankaj Guptaendfunc _read_reg_reset 1825