1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * (C) Copyright 2007 4*4882a593Smuzhiyun * Texas Instruments 5*4882a593Smuzhiyun * Karthik Dasu <karthik-dp@ti.com> 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * (C) Copyright 2004 8*4882a593Smuzhiyun * Texas Instruments, <www.ti.com> 9*4882a593Smuzhiyun * Richard Woodruff <r-woodruff2@ti.com> 10*4882a593Smuzhiyun */ 11*4882a593Smuzhiyun#include <linux/linkage.h> 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun#include <asm/assembler.h> 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun#include "omap34xx.h" 16*4882a593Smuzhiyun#include "iomap.h" 17*4882a593Smuzhiyun#include "cm3xxx.h" 18*4882a593Smuzhiyun#include "prm3xxx.h" 19*4882a593Smuzhiyun#include "sdrc.h" 20*4882a593Smuzhiyun#include "sram.h" 21*4882a593Smuzhiyun#include "control.h" 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun/* 24*4882a593Smuzhiyun * Registers access definitions 25*4882a593Smuzhiyun */ 26*4882a593Smuzhiyun#define SDRC_SCRATCHPAD_SEM_OFFS 0xc 27*4882a593Smuzhiyun#define SDRC_SCRATCHPAD_SEM_V OMAP343X_SCRATCHPAD_REGADDR\ 28*4882a593Smuzhiyun (SDRC_SCRATCHPAD_SEM_OFFS) 29*4882a593Smuzhiyun#define PM_PREPWSTST_CORE_P OMAP3430_PRM_BASE + CORE_MOD +\ 30*4882a593Smuzhiyun OMAP3430_PM_PREPWSTST 31*4882a593Smuzhiyun#define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL 32*4882a593Smuzhiyun#define CM_IDLEST1_CORE_V OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1) 33*4882a593Smuzhiyun#define CM_IDLEST_CKGEN_V OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST) 34*4882a593Smuzhiyun#define SRAM_BASE_P OMAP3_SRAM_PA 35*4882a593Smuzhiyun#define CONTROL_STAT OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS 36*4882a593Smuzhiyun#define CONTROL_MEM_RTA_CTRL (OMAP343X_CTRL_BASE +\ 37*4882a593Smuzhiyun OMAP36XX_CONTROL_MEM_RTA_CTRL) 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun/* Move this as correct place is available */ 40*4882a593Smuzhiyun#define SCRATCHPAD_MEM_OFFS 0x310 41*4882a593Smuzhiyun#define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE +\ 42*4882a593Smuzhiyun OMAP343X_CONTROL_MEM_WKUP +\ 43*4882a593Smuzhiyun SCRATCHPAD_MEM_OFFS) 44*4882a593Smuzhiyun#define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER) 45*4882a593Smuzhiyun#define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG) 46*4882a593Smuzhiyun#define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0) 47*4882a593Smuzhiyun#define SDRC_EMR2_0_P (OMAP343X_SDRC_BASE + SDRC_EMR2_0) 48*4882a593Smuzhiyun#define SDRC_MANUAL_0_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_0) 49*4882a593Smuzhiyun#define SDRC_MR_1_P (OMAP343X_SDRC_BASE + SDRC_MR_1) 50*4882a593Smuzhiyun#define SDRC_EMR2_1_P (OMAP343X_SDRC_BASE + SDRC_EMR2_1) 51*4882a593Smuzhiyun#define SDRC_MANUAL_1_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_1) 52*4882a593Smuzhiyun#define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS) 53*4882a593Smuzhiyun#define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL) 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun/* 56*4882a593Smuzhiyun * This file needs be built unconditionally as ARM to interoperate correctly 57*4882a593Smuzhiyun * with non-Thumb-2-capable firmware. 58*4882a593Smuzhiyun */ 59*4882a593Smuzhiyun .arm 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun/* 62*4882a593Smuzhiyun * API functions 63*4882a593Smuzhiyun */ 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun .text 66*4882a593Smuzhiyun/* 67*4882a593Smuzhiyun * L2 cache needs to be toggled for stable OFF mode functionality on 3630. 68*4882a593Smuzhiyun * This function sets up a flag that will allow for this toggling to take 69*4882a593Smuzhiyun * place on 3630. Hopefully some version in the future may not need this. 70*4882a593Smuzhiyun */ 71*4882a593SmuzhiyunENTRY(enable_omap3630_toggle_l2_on_restore) 72*4882a593Smuzhiyun stmfd sp!, {lr} @ save registers on stack 73*4882a593Smuzhiyun /* Setup so that we will disable and enable l2 */ 74*4882a593Smuzhiyun mov r1, #0x1 75*4882a593Smuzhiyun adr r3, l2dis_3630_offset 76*4882a593Smuzhiyun ldr r2, [r3] @ value for offset 77*4882a593Smuzhiyun str r1, [r2, r3] @ write to l2dis_3630 78*4882a593Smuzhiyun ldmfd sp!, {pc} @ restore regs and return 79*4882a593SmuzhiyunENDPROC(enable_omap3630_toggle_l2_on_restore) 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun/* 82*4882a593Smuzhiyun * Function to call rom code to save secure ram context. 83*4882a593Smuzhiyun * 84*4882a593Smuzhiyun * r0 = physical address of the parameters 85*4882a593Smuzhiyun */ 86*4882a593Smuzhiyun .arch armv7-a 87*4882a593Smuzhiyun .arch_extension sec 88*4882a593SmuzhiyunENTRY(save_secure_ram_context) 89*4882a593Smuzhiyun stmfd sp!, {r4 - r11, lr} @ save registers on stack 90*4882a593Smuzhiyun mov r3, r0 @ physical address of parameters 91*4882a593Smuzhiyun mov r0, #25 @ set service ID for PPA 92*4882a593Smuzhiyun mov r12, r0 @ copy secure service ID in r12 93*4882a593Smuzhiyun mov r1, #0 @ set task id for ROM code in r1 94*4882a593Smuzhiyun mov r2, #4 @ set some flags in r2, r6 95*4882a593Smuzhiyun mov r6, #0xff 96*4882a593Smuzhiyun dsb @ data write barrier 97*4882a593Smuzhiyun dmb @ data memory barrier 98*4882a593Smuzhiyun smc #1 @ call SMI monitor (smi #1) 99*4882a593Smuzhiyun nop 100*4882a593Smuzhiyun nop 101*4882a593Smuzhiyun nop 102*4882a593Smuzhiyun nop 103*4882a593Smuzhiyun ldmfd sp!, {r4 - r11, pc} 104*4882a593SmuzhiyunENDPROC(save_secure_ram_context) 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun/* 107*4882a593Smuzhiyun * ====================== 108*4882a593Smuzhiyun * == Idle entry point == 109*4882a593Smuzhiyun * ====================== 110*4882a593Smuzhiyun */ 111*4882a593Smuzhiyun 112*4882a593Smuzhiyun/* 113*4882a593Smuzhiyun * Forces OMAP into idle state 114*4882a593Smuzhiyun * 115*4882a593Smuzhiyun * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed 116*4882a593Smuzhiyun * and executes the WFI instruction. Calling WFI effectively changes the 117*4882a593Smuzhiyun * power domains states to the desired target power states. 118*4882a593Smuzhiyun * 119*4882a593Smuzhiyun * 120*4882a593Smuzhiyun * Notes: 121*4882a593Smuzhiyun * - only the minimum set of functions gets copied to internal SRAM at boot 122*4882a593Smuzhiyun * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function 123*4882a593Smuzhiyun * pointers in SDRAM or SRAM are called depending on the desired low power 124*4882a593Smuzhiyun * target state. 125*4882a593Smuzhiyun * - when the OMAP wakes up it continues at different execution points 126*4882a593Smuzhiyun * depending on the low power mode (non-OFF vs OFF modes), 127*4882a593Smuzhiyun * cf. 'Resume path for xxx mode' comments. 128*4882a593Smuzhiyun */ 129*4882a593Smuzhiyun .align 3 130*4882a593SmuzhiyunENTRY(omap34xx_cpu_suspend) 131*4882a593Smuzhiyun stmfd sp!, {r4 - r11, lr} @ save registers on stack 132*4882a593Smuzhiyun 133*4882a593Smuzhiyun /* 134*4882a593Smuzhiyun * r0 contains information about saving context: 135*4882a593Smuzhiyun * 0 - No context lost 136*4882a593Smuzhiyun * 1 - Only L1 and logic lost 137*4882a593Smuzhiyun * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) 138*4882a593Smuzhiyun * 3 - Both L1 and L2 lost and logic lost 139*4882a593Smuzhiyun */ 140*4882a593Smuzhiyun 141*4882a593Smuzhiyun /* 142*4882a593Smuzhiyun * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi) 143*4882a593Smuzhiyun * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram) 144*4882a593Smuzhiyun */ 145*4882a593Smuzhiyun ldr r4, omap3_do_wfi_sram_addr 146*4882a593Smuzhiyun ldr r5, [r4] 147*4882a593Smuzhiyun cmp r0, #0x0 @ If no context save required, 148*4882a593Smuzhiyun bxeq r5 @ jump to the WFI code in SRAM 149*4882a593Smuzhiyun 150*4882a593Smuzhiyun 151*4882a593Smuzhiyun /* Otherwise fall through to the save context code */ 152*4882a593Smuzhiyunsave_context_wfi: 153*4882a593Smuzhiyun /* 154*4882a593Smuzhiyun * jump out to kernel flush routine 155*4882a593Smuzhiyun * - reuse that code is better 156*4882a593Smuzhiyun * - it executes in a cached space so is faster than refetch per-block 157*4882a593Smuzhiyun * - should be faster and will change with kernel 158*4882a593Smuzhiyun * - 'might' have to copy address, load and jump to it 159*4882a593Smuzhiyun * Flush all data from the L1 data cache before disabling 160*4882a593Smuzhiyun * SCTLR.C bit. 161*4882a593Smuzhiyun */ 162*4882a593Smuzhiyun ldr r1, kernel_flush 163*4882a593Smuzhiyun mov lr, pc 164*4882a593Smuzhiyun bx r1 165*4882a593Smuzhiyun 166*4882a593Smuzhiyun /* 167*4882a593Smuzhiyun * Clear the SCTLR.C bit to prevent further data cache 168*4882a593Smuzhiyun * allocation. Clearing SCTLR.C would make all the data accesses 169*4882a593Smuzhiyun * strongly ordered and would not hit the cache. 170*4882a593Smuzhiyun */ 171*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 0 172*4882a593Smuzhiyun bic r0, r0, #(1 << 2) @ Disable the C bit 173*4882a593Smuzhiyun mcr p15, 0, r0, c1, c0, 0 174*4882a593Smuzhiyun isb 175*4882a593Smuzhiyun 176*4882a593Smuzhiyun /* 177*4882a593Smuzhiyun * Invalidate L1 data cache. Even though only invalidate is 178*4882a593Smuzhiyun * necessary exported flush API is used here. Doing clean 179*4882a593Smuzhiyun * on already clean cache would be almost NOP. 180*4882a593Smuzhiyun */ 181*4882a593Smuzhiyun ldr r1, kernel_flush 182*4882a593Smuzhiyun blx r1 183*4882a593Smuzhiyun b omap3_do_wfi 184*4882a593SmuzhiyunENDPROC(omap34xx_cpu_suspend) 185*4882a593Smuzhiyunomap3_do_wfi_sram_addr: 186*4882a593Smuzhiyun .word omap3_do_wfi_sram 187*4882a593Smuzhiyunkernel_flush: 188*4882a593Smuzhiyun .word v7_flush_dcache_all 189*4882a593Smuzhiyun 190*4882a593Smuzhiyun/* =================================== 191*4882a593Smuzhiyun * == WFI instruction => Enter idle == 192*4882a593Smuzhiyun * =================================== 193*4882a593Smuzhiyun */ 194*4882a593Smuzhiyun 195*4882a593Smuzhiyun/* 196*4882a593Smuzhiyun * Do WFI instruction 197*4882a593Smuzhiyun * Includes the resume path for non-OFF modes 198*4882a593Smuzhiyun * 199*4882a593Smuzhiyun * This code gets copied to internal SRAM and is accessible 200*4882a593Smuzhiyun * from both SDRAM and SRAM: 201*4882a593Smuzhiyun * - executed from SRAM for non-off modes (omap3_do_wfi_sram), 202*4882a593Smuzhiyun * - executed from SDRAM for OFF mode (omap3_do_wfi). 203*4882a593Smuzhiyun */ 204*4882a593Smuzhiyun .align 3 205*4882a593SmuzhiyunENTRY(omap3_do_wfi) 206*4882a593Smuzhiyun ldr r4, sdrc_power @ read the SDRC_POWER register 207*4882a593Smuzhiyun ldr r5, [r4] @ read the contents of SDRC_POWER 208*4882a593Smuzhiyun orr r5, r5, #0x40 @ enable self refresh on idle req 209*4882a593Smuzhiyun str r5, [r4] @ write back to SDRC_POWER register 210*4882a593Smuzhiyun 211*4882a593Smuzhiyun /* Data memory barrier and Data sync barrier */ 212*4882a593Smuzhiyun dsb 213*4882a593Smuzhiyun dmb 214*4882a593Smuzhiyun 215*4882a593Smuzhiyun/* 216*4882a593Smuzhiyun * =================================== 217*4882a593Smuzhiyun * == WFI instruction => Enter idle == 218*4882a593Smuzhiyun * =================================== 219*4882a593Smuzhiyun */ 220*4882a593Smuzhiyun wfi @ wait for interrupt 221*4882a593Smuzhiyun 222*4882a593Smuzhiyun/* 223*4882a593Smuzhiyun * =================================== 224*4882a593Smuzhiyun * == Resume path for non-OFF modes == 225*4882a593Smuzhiyun * =================================== 226*4882a593Smuzhiyun */ 227*4882a593Smuzhiyun nop 228*4882a593Smuzhiyun nop 229*4882a593Smuzhiyun nop 230*4882a593Smuzhiyun nop 231*4882a593Smuzhiyun nop 232*4882a593Smuzhiyun nop 233*4882a593Smuzhiyun nop 234*4882a593Smuzhiyun nop 235*4882a593Smuzhiyun nop 236*4882a593Smuzhiyun nop 237*4882a593Smuzhiyun 238*4882a593Smuzhiyun/* 239*4882a593Smuzhiyun * This function implements the erratum ID i581 WA: 240*4882a593Smuzhiyun * SDRC state restore before accessing the SDRAM 241*4882a593Smuzhiyun * 242*4882a593Smuzhiyun * Only used at return from non-OFF mode. For OFF 243*4882a593Smuzhiyun * mode the ROM code configures the SDRC and 244*4882a593Smuzhiyun * the DPLL before calling the restore code directly 245*4882a593Smuzhiyun * from DDR. 246*4882a593Smuzhiyun */ 247*4882a593Smuzhiyun 248*4882a593Smuzhiyun/* Make sure SDRC accesses are ok */ 249*4882a593Smuzhiyunwait_sdrc_ok: 250*4882a593Smuzhiyun 251*4882a593Smuzhiyun/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ 252*4882a593Smuzhiyun ldr r4, cm_idlest_ckgen 253*4882a593Smuzhiyunwait_dpll3_lock: 254*4882a593Smuzhiyun ldr r5, [r4] 255*4882a593Smuzhiyun tst r5, #1 256*4882a593Smuzhiyun beq wait_dpll3_lock 257*4882a593Smuzhiyun 258*4882a593Smuzhiyun ldr r4, cm_idlest1_core 259*4882a593Smuzhiyunwait_sdrc_ready: 260*4882a593Smuzhiyun ldr r5, [r4] 261*4882a593Smuzhiyun tst r5, #0x2 262*4882a593Smuzhiyun bne wait_sdrc_ready 263*4882a593Smuzhiyun /* allow DLL powerdown upon hw idle req */ 264*4882a593Smuzhiyun ldr r4, sdrc_power 265*4882a593Smuzhiyun ldr r5, [r4] 266*4882a593Smuzhiyun bic r5, r5, #0x40 267*4882a593Smuzhiyun str r5, [r4] 268*4882a593Smuzhiyun 269*4882a593Smuzhiyunis_dll_in_lock_mode: 270*4882a593Smuzhiyun /* Is dll in lock mode? */ 271*4882a593Smuzhiyun ldr r4, sdrc_dlla_ctrl 272*4882a593Smuzhiyun ldr r5, [r4] 273*4882a593Smuzhiyun tst r5, #0x4 274*4882a593Smuzhiyun bne exit_nonoff_modes @ Return if locked 275*4882a593Smuzhiyun /* wait till dll locks */ 276*4882a593Smuzhiyunwait_dll_lock_timed: 277*4882a593Smuzhiyun ldr r4, sdrc_dlla_status 278*4882a593Smuzhiyun /* Wait 20uS for lock */ 279*4882a593Smuzhiyun mov r6, #8 280*4882a593Smuzhiyunwait_dll_lock: 281*4882a593Smuzhiyun subs r6, r6, #0x1 282*4882a593Smuzhiyun beq kick_dll 283*4882a593Smuzhiyun ldr r5, [r4] 284*4882a593Smuzhiyun and r5, r5, #0x4 285*4882a593Smuzhiyun cmp r5, #0x4 286*4882a593Smuzhiyun bne wait_dll_lock 287*4882a593Smuzhiyun b exit_nonoff_modes @ Return when locked 288*4882a593Smuzhiyun 289*4882a593Smuzhiyun /* disable/reenable DLL if not locked */ 290*4882a593Smuzhiyunkick_dll: 291*4882a593Smuzhiyun ldr r4, sdrc_dlla_ctrl 292*4882a593Smuzhiyun ldr r5, [r4] 293*4882a593Smuzhiyun mov r6, r5 294*4882a593Smuzhiyun bic r6, #(1<<3) @ disable dll 295*4882a593Smuzhiyun str r6, [r4] 296*4882a593Smuzhiyun dsb 297*4882a593Smuzhiyun orr r6, r6, #(1<<3) @ enable dll 298*4882a593Smuzhiyun str r6, [r4] 299*4882a593Smuzhiyun dsb 300*4882a593Smuzhiyun b wait_dll_lock_timed 301*4882a593Smuzhiyun 302*4882a593Smuzhiyunexit_nonoff_modes: 303*4882a593Smuzhiyun /* Re-enable C-bit if needed */ 304*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 0 305*4882a593Smuzhiyun tst r0, #(1 << 2) @ Check C bit enabled? 306*4882a593Smuzhiyun orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared 307*4882a593Smuzhiyun mcreq p15, 0, r0, c1, c0, 0 308*4882a593Smuzhiyun isb 309*4882a593Smuzhiyun 310*4882a593Smuzhiyun/* 311*4882a593Smuzhiyun * =================================== 312*4882a593Smuzhiyun * == Exit point from non-OFF modes == 313*4882a593Smuzhiyun * =================================== 314*4882a593Smuzhiyun */ 315*4882a593Smuzhiyun ldmfd sp!, {r4 - r11, pc} @ restore regs and return 316*4882a593SmuzhiyunENDPROC(omap3_do_wfi) 317*4882a593Smuzhiyunsdrc_power: 318*4882a593Smuzhiyun .word SDRC_POWER_V 319*4882a593Smuzhiyuncm_idlest1_core: 320*4882a593Smuzhiyun .word CM_IDLEST1_CORE_V 321*4882a593Smuzhiyuncm_idlest_ckgen: 322*4882a593Smuzhiyun .word CM_IDLEST_CKGEN_V 323*4882a593Smuzhiyunsdrc_dlla_status: 324*4882a593Smuzhiyun .word SDRC_DLLA_STATUS_V 325*4882a593Smuzhiyunsdrc_dlla_ctrl: 326*4882a593Smuzhiyun .word SDRC_DLLA_CTRL_V 327*4882a593SmuzhiyunENTRY(omap3_do_wfi_sz) 328*4882a593Smuzhiyun .word . - omap3_do_wfi 329*4882a593Smuzhiyun 330*4882a593Smuzhiyun 331*4882a593Smuzhiyun/* 332*4882a593Smuzhiyun * ============================== 333*4882a593Smuzhiyun * == Resume path for OFF mode == 334*4882a593Smuzhiyun * ============================== 335*4882a593Smuzhiyun */ 336*4882a593Smuzhiyun 337*4882a593Smuzhiyun/* 338*4882a593Smuzhiyun * The restore_* functions are called by the ROM code 339*4882a593Smuzhiyun * when back from WFI in OFF mode. 340*4882a593Smuzhiyun * Cf. the get_*restore_pointer functions. 341*4882a593Smuzhiyun * 342*4882a593Smuzhiyun * restore_es3: applies to 34xx >= ES3.0 343*4882a593Smuzhiyun * restore_3630: applies to 36xx 344*4882a593Smuzhiyun * restore: common code for 3xxx 345*4882a593Smuzhiyun * 346*4882a593Smuzhiyun * Note: when back from CORE and MPU OFF mode we are running 347*4882a593Smuzhiyun * from SDRAM, without MMU, without the caches and prediction. 348*4882a593Smuzhiyun * Also the SRAM content has been cleared. 349*4882a593Smuzhiyun */ 350*4882a593SmuzhiyunENTRY(omap3_restore_es3) 351*4882a593Smuzhiyun ldr r5, pm_prepwstst_core_p 352*4882a593Smuzhiyun ldr r4, [r5] 353*4882a593Smuzhiyun and r4, r4, #0x3 354*4882a593Smuzhiyun cmp r4, #0x0 @ Check if previous power state of CORE is OFF 355*4882a593Smuzhiyun bne omap3_restore @ Fall through to OMAP3 common code 356*4882a593Smuzhiyun adr r0, es3_sdrc_fix 357*4882a593Smuzhiyun ldr r1, sram_base 358*4882a593Smuzhiyun ldr r2, es3_sdrc_fix_sz 359*4882a593Smuzhiyun mov r2, r2, ror #2 360*4882a593Smuzhiyuncopy_to_sram: 361*4882a593Smuzhiyun ldmia r0!, {r3} @ val = *src 362*4882a593Smuzhiyun stmia r1!, {r3} @ *dst = val 363*4882a593Smuzhiyun subs r2, r2, #0x1 @ num_words-- 364*4882a593Smuzhiyun bne copy_to_sram 365*4882a593Smuzhiyun ldr r1, sram_base 366*4882a593Smuzhiyun blx r1 367*4882a593Smuzhiyun b omap3_restore @ Fall through to OMAP3 common code 368*4882a593SmuzhiyunENDPROC(omap3_restore_es3) 369*4882a593Smuzhiyun 370*4882a593SmuzhiyunENTRY(omap3_restore_3630) 371*4882a593Smuzhiyun ldr r1, pm_prepwstst_core_p 372*4882a593Smuzhiyun ldr r2, [r1] 373*4882a593Smuzhiyun and r2, r2, #0x3 374*4882a593Smuzhiyun cmp r2, #0x0 @ Check if previous power state of CORE is OFF 375*4882a593Smuzhiyun bne omap3_restore @ Fall through to OMAP3 common code 376*4882a593Smuzhiyun /* Disable RTA before giving control */ 377*4882a593Smuzhiyun ldr r1, control_mem_rta 378*4882a593Smuzhiyun mov r2, #OMAP36XX_RTA_DISABLE 379*4882a593Smuzhiyun str r2, [r1] 380*4882a593SmuzhiyunENDPROC(omap3_restore_3630) 381*4882a593Smuzhiyun 382*4882a593Smuzhiyun /* Fall through to common code for the remaining logic */ 383*4882a593Smuzhiyun 384*4882a593SmuzhiyunENTRY(omap3_restore) 385*4882a593Smuzhiyun /* 386*4882a593Smuzhiyun * Read the pwstctrl register to check the reason for mpu reset. 387*4882a593Smuzhiyun * This tells us what was lost. 388*4882a593Smuzhiyun */ 389*4882a593Smuzhiyun ldr r1, pm_pwstctrl_mpu 390*4882a593Smuzhiyun ldr r2, [r1] 391*4882a593Smuzhiyun and r2, r2, #0x3 392*4882a593Smuzhiyun cmp r2, #0x0 @ Check if target power state was OFF or RET 393*4882a593Smuzhiyun bne logic_l1_restore 394*4882a593Smuzhiyun 395*4882a593Smuzhiyun adr r1, l2dis_3630_offset @ address for offset 396*4882a593Smuzhiyun ldr r0, [r1] @ value for offset 397*4882a593Smuzhiyun ldr r0, [r1, r0] @ value at l2dis_3630 398*4882a593Smuzhiyun cmp r0, #0x1 @ should we disable L2 on 3630? 399*4882a593Smuzhiyun bne skipl2dis 400*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 1 401*4882a593Smuzhiyun bic r0, r0, #2 @ disable L2 cache 402*4882a593Smuzhiyun mcr p15, 0, r0, c1, c0, 1 403*4882a593Smuzhiyunskipl2dis: 404*4882a593Smuzhiyun ldr r0, control_stat 405*4882a593Smuzhiyun ldr r1, [r0] 406*4882a593Smuzhiyun and r1, #0x700 407*4882a593Smuzhiyun cmp r1, #0x300 408*4882a593Smuzhiyun beq l2_inv_gp 409*4882a593Smuzhiyun adr r0, l2_inv_api_params_offset 410*4882a593Smuzhiyun ldr r3, [r0] 411*4882a593Smuzhiyun add r3, r3, r0 @ r3 points to dummy parameters 412*4882a593Smuzhiyun mov r0, #40 @ set service ID for PPA 413*4882a593Smuzhiyun mov r12, r0 @ copy secure Service ID in r12 414*4882a593Smuzhiyun mov r1, #0 @ set task id for ROM code in r1 415*4882a593Smuzhiyun mov r2, #4 @ set some flags in r2, r6 416*4882a593Smuzhiyun mov r6, #0xff 417*4882a593Smuzhiyun dsb @ data write barrier 418*4882a593Smuzhiyun dmb @ data memory barrier 419*4882a593Smuzhiyun smc #1 @ call SMI monitor (smi #1) 420*4882a593Smuzhiyun /* Write to Aux control register to set some bits */ 421*4882a593Smuzhiyun mov r0, #42 @ set service ID for PPA 422*4882a593Smuzhiyun mov r12, r0 @ copy secure Service ID in r12 423*4882a593Smuzhiyun mov r1, #0 @ set task id for ROM code in r1 424*4882a593Smuzhiyun mov r2, #4 @ set some flags in r2, r6 425*4882a593Smuzhiyun mov r6, #0xff 426*4882a593Smuzhiyun ldr r4, scratchpad_base 427*4882a593Smuzhiyun ldr r3, [r4, #0xBC] @ r3 points to parameters 428*4882a593Smuzhiyun dsb @ data write barrier 429*4882a593Smuzhiyun dmb @ data memory barrier 430*4882a593Smuzhiyun smc #1 @ call SMI monitor (smi #1) 431*4882a593Smuzhiyun 432*4882a593Smuzhiyun#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE 433*4882a593Smuzhiyun /* Restore L2 aux control register */ 434*4882a593Smuzhiyun @ set service ID for PPA 435*4882a593Smuzhiyun mov r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID 436*4882a593Smuzhiyun mov r12, r0 @ copy service ID in r12 437*4882a593Smuzhiyun mov r1, #0 @ set task ID for ROM code in r1 438*4882a593Smuzhiyun mov r2, #4 @ set some flags in r2, r6 439*4882a593Smuzhiyun mov r6, #0xff 440*4882a593Smuzhiyun ldr r4, scratchpad_base 441*4882a593Smuzhiyun ldr r3, [r4, #0xBC] 442*4882a593Smuzhiyun adds r3, r3, #8 @ r3 points to parameters 443*4882a593Smuzhiyun dsb @ data write barrier 444*4882a593Smuzhiyun dmb @ data memory barrier 445*4882a593Smuzhiyun smc #1 @ call SMI monitor (smi #1) 446*4882a593Smuzhiyun#endif 447*4882a593Smuzhiyun b logic_l1_restore 448*4882a593Smuzhiyun 449*4882a593Smuzhiyun .align 450*4882a593Smuzhiyunl2_inv_api_params_offset: 451*4882a593Smuzhiyun .long l2_inv_api_params - . 452*4882a593Smuzhiyunl2_inv_gp: 453*4882a593Smuzhiyun /* Execute smi to invalidate L2 cache */ 454*4882a593Smuzhiyun mov r12, #0x1 @ set up to invalidate L2 455*4882a593Smuzhiyun smc #0 @ Call SMI monitor (smieq) 456*4882a593Smuzhiyun /* Write to Aux control register to set some bits */ 457*4882a593Smuzhiyun ldr r4, scratchpad_base 458*4882a593Smuzhiyun ldr r3, [r4,#0xBC] 459*4882a593Smuzhiyun ldr r0, [r3,#4] 460*4882a593Smuzhiyun mov r12, #0x3 461*4882a593Smuzhiyun smc #0 @ Call SMI monitor (smieq) 462*4882a593Smuzhiyun ldr r4, scratchpad_base 463*4882a593Smuzhiyun ldr r3, [r4,#0xBC] 464*4882a593Smuzhiyun ldr r0, [r3,#12] 465*4882a593Smuzhiyun mov r12, #0x2 466*4882a593Smuzhiyun smc #0 @ Call SMI monitor (smieq) 467*4882a593Smuzhiyunlogic_l1_restore: 468*4882a593Smuzhiyun adr r0, l2dis_3630_offset @ adress for offset 469*4882a593Smuzhiyun ldr r1, [r0] @ value for offset 470*4882a593Smuzhiyun ldr r1, [r0, r1] @ value at l2dis_3630 471*4882a593Smuzhiyun cmp r1, #0x1 @ Test if L2 re-enable needed on 3630 472*4882a593Smuzhiyun bne skipl2reen 473*4882a593Smuzhiyun mrc p15, 0, r1, c1, c0, 1 474*4882a593Smuzhiyun orr r1, r1, #2 @ re-enable L2 cache 475*4882a593Smuzhiyun mcr p15, 0, r1, c1, c0, 1 476*4882a593Smuzhiyunskipl2reen: 477*4882a593Smuzhiyun 478*4882a593Smuzhiyun /* Now branch to the common CPU resume function */ 479*4882a593Smuzhiyun b cpu_resume 480*4882a593SmuzhiyunENDPROC(omap3_restore) 481*4882a593Smuzhiyun 482*4882a593Smuzhiyun .ltorg 483*4882a593Smuzhiyun 484*4882a593Smuzhiyun/* 485*4882a593Smuzhiyun * Local variables 486*4882a593Smuzhiyun */ 487*4882a593Smuzhiyunpm_prepwstst_core_p: 488*4882a593Smuzhiyun .word PM_PREPWSTST_CORE_P 489*4882a593Smuzhiyunpm_pwstctrl_mpu: 490*4882a593Smuzhiyun .word PM_PWSTCTRL_MPU_P 491*4882a593Smuzhiyunscratchpad_base: 492*4882a593Smuzhiyun .word SCRATCHPAD_BASE_P 493*4882a593Smuzhiyunsram_base: 494*4882a593Smuzhiyun .word SRAM_BASE_P + 0x8000 495*4882a593Smuzhiyuncontrol_stat: 496*4882a593Smuzhiyun .word CONTROL_STAT 497*4882a593Smuzhiyuncontrol_mem_rta: 498*4882a593Smuzhiyun .word CONTROL_MEM_RTA_CTRL 499*4882a593Smuzhiyunl2dis_3630_offset: 500*4882a593Smuzhiyun .long l2dis_3630 - . 501*4882a593Smuzhiyun 502*4882a593Smuzhiyun .data 503*4882a593Smuzhiyun .align 2 504*4882a593Smuzhiyunl2dis_3630: 505*4882a593Smuzhiyun .word 0 506*4882a593Smuzhiyun 507*4882a593Smuzhiyun .data 508*4882a593Smuzhiyun .align 2 509*4882a593Smuzhiyunl2_inv_api_params: 510*4882a593Smuzhiyun .word 0x1, 0x00 511*4882a593Smuzhiyun 512*4882a593Smuzhiyun/* 513*4882a593Smuzhiyun * Internal functions 514*4882a593Smuzhiyun */ 515*4882a593Smuzhiyun 516*4882a593Smuzhiyun/* 517*4882a593Smuzhiyun * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 518*4882a593Smuzhiyun * Copied to and run from SRAM in order to reconfigure the SDRC parameters. 519*4882a593Smuzhiyun */ 520*4882a593Smuzhiyun .text 521*4882a593Smuzhiyun .align 3 522*4882a593SmuzhiyunENTRY(es3_sdrc_fix) 523*4882a593Smuzhiyun ldr r4, sdrc_syscfg @ get config addr 524*4882a593Smuzhiyun ldr r5, [r4] @ get value 525*4882a593Smuzhiyun tst r5, #0x100 @ is part access blocked 526*4882a593Smuzhiyun it eq 527*4882a593Smuzhiyun biceq r5, r5, #0x100 @ clear bit if set 528*4882a593Smuzhiyun str r5, [r4] @ write back change 529*4882a593Smuzhiyun ldr r4, sdrc_mr_0 @ get config addr 530*4882a593Smuzhiyun ldr r5, [r4] @ get value 531*4882a593Smuzhiyun str r5, [r4] @ write back change 532*4882a593Smuzhiyun ldr r4, sdrc_emr2_0 @ get config addr 533*4882a593Smuzhiyun ldr r5, [r4] @ get value 534*4882a593Smuzhiyun str r5, [r4] @ write back change 535*4882a593Smuzhiyun ldr r4, sdrc_manual_0 @ get config addr 536*4882a593Smuzhiyun mov r5, #0x2 @ autorefresh command 537*4882a593Smuzhiyun str r5, [r4] @ kick off refreshes 538*4882a593Smuzhiyun ldr r4, sdrc_mr_1 @ get config addr 539*4882a593Smuzhiyun ldr r5, [r4] @ get value 540*4882a593Smuzhiyun str r5, [r4] @ write back change 541*4882a593Smuzhiyun ldr r4, sdrc_emr2_1 @ get config addr 542*4882a593Smuzhiyun ldr r5, [r4] @ get value 543*4882a593Smuzhiyun str r5, [r4] @ write back change 544*4882a593Smuzhiyun ldr r4, sdrc_manual_1 @ get config addr 545*4882a593Smuzhiyun mov r5, #0x2 @ autorefresh command 546*4882a593Smuzhiyun str r5, [r4] @ kick off refreshes 547*4882a593Smuzhiyun bx lr 548*4882a593Smuzhiyun 549*4882a593Smuzhiyun/* 550*4882a593Smuzhiyun * Local variables 551*4882a593Smuzhiyun */ 552*4882a593Smuzhiyun .align 553*4882a593Smuzhiyunsdrc_syscfg: 554*4882a593Smuzhiyun .word SDRC_SYSCONFIG_P 555*4882a593Smuzhiyunsdrc_mr_0: 556*4882a593Smuzhiyun .word SDRC_MR_0_P 557*4882a593Smuzhiyunsdrc_emr2_0: 558*4882a593Smuzhiyun .word SDRC_EMR2_0_P 559*4882a593Smuzhiyunsdrc_manual_0: 560*4882a593Smuzhiyun .word SDRC_MANUAL_0_P 561*4882a593Smuzhiyunsdrc_mr_1: 562*4882a593Smuzhiyun .word SDRC_MR_1_P 563*4882a593Smuzhiyunsdrc_emr2_1: 564*4882a593Smuzhiyun .word SDRC_EMR2_1_P 565*4882a593Smuzhiyunsdrc_manual_1: 566*4882a593Smuzhiyun .word SDRC_MANUAL_1_P 567*4882a593SmuzhiyunENDPROC(es3_sdrc_fix) 568*4882a593SmuzhiyunENTRY(es3_sdrc_fix_sz) 569*4882a593Smuzhiyun .word . - es3_sdrc_fix 570