xref: /OK3568_Linux_fs/kernel/arch/arm/mach-omap2/omap-mpuss-lowpower.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * OMAP MPUSS low power code
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2011 Texas Instruments, Inc.
6*4882a593Smuzhiyun  *	Santosh Shilimkar <santosh.shilimkar@ti.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
9*4882a593Smuzhiyun  * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
10*4882a593Smuzhiyun  * CPU0 and CPU1 LPRM modules.
11*4882a593Smuzhiyun  * CPU0, CPU1 and MPUSS each have there own power domain and
12*4882a593Smuzhiyun  * hence multiple low power combinations of MPUSS are possible.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
15*4882a593Smuzhiyun  * because the mode is not supported by hw constraints of dormant
16*4882a593Smuzhiyun  * mode. While waking up from the dormant mode, a reset  signal
17*4882a593Smuzhiyun  * to the Cortex-A9 processor must be asserted by the external
18*4882a593Smuzhiyun  * power controller.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * With architectural inputs and hardware recommendations, only
21*4882a593Smuzhiyun  * below modes are supported from power gain vs latency point of view.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  *	CPU0		CPU1		MPUSS
24*4882a593Smuzhiyun  *	----------------------------------------------
25*4882a593Smuzhiyun  *	ON		ON		ON
26*4882a593Smuzhiyun  *	ON(Inactive)	OFF		ON(Inactive)
27*4882a593Smuzhiyun  *	OFF		OFF		CSWR
28*4882a593Smuzhiyun  *	OFF		OFF		OSWR
29*4882a593Smuzhiyun  *	OFF		OFF		OFF(Device OFF *TBD)
30*4882a593Smuzhiyun  *	----------------------------------------------
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  * Note: CPU0 is the master core and it is the last CPU to go down
33*4882a593Smuzhiyun  * and first to wake-up when MPUSS low power states are excercised
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <linux/kernel.h>
37*4882a593Smuzhiyun #include <linux/io.h>
38*4882a593Smuzhiyun #include <linux/errno.h>
39*4882a593Smuzhiyun #include <linux/linkage.h>
40*4882a593Smuzhiyun #include <linux/smp.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include <asm/cacheflush.h>
43*4882a593Smuzhiyun #include <asm/tlbflush.h>
44*4882a593Smuzhiyun #include <asm/smp_scu.h>
45*4882a593Smuzhiyun #include <asm/suspend.h>
46*4882a593Smuzhiyun #include <asm/virt.h>
47*4882a593Smuzhiyun #include <asm/hardware/cache-l2x0.h>
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #include "soc.h"
50*4882a593Smuzhiyun #include "common.h"
51*4882a593Smuzhiyun #include "omap44xx.h"
52*4882a593Smuzhiyun #include "omap4-sar-layout.h"
53*4882a593Smuzhiyun #include "pm.h"
54*4882a593Smuzhiyun #include "prcm_mpu44xx.h"
55*4882a593Smuzhiyun #include "prcm_mpu54xx.h"
56*4882a593Smuzhiyun #include "prminst44xx.h"
57*4882a593Smuzhiyun #include "prcm44xx.h"
58*4882a593Smuzhiyun #include "prm44xx.h"
59*4882a593Smuzhiyun #include "prm-regbits-44xx.h"
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun static void __iomem *sar_base;
62*4882a593Smuzhiyun static u32 old_cpu1_ns_pa_addr;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #if defined(CONFIG_PM) && defined(CONFIG_SMP)
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun struct omap4_cpu_pm_info {
67*4882a593Smuzhiyun 	struct powerdomain *pwrdm;
68*4882a593Smuzhiyun 	void __iomem *scu_sar_addr;
69*4882a593Smuzhiyun 	void __iomem *wkup_sar_addr;
70*4882a593Smuzhiyun 	void __iomem *l2x0_sar_addr;
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun  * struct cpu_pm_ops - CPU pm operations
75*4882a593Smuzhiyun  * @finish_suspend:	CPU suspend finisher function pointer
76*4882a593Smuzhiyun  * @resume:		CPU resume function pointer
77*4882a593Smuzhiyun  * @scu_prepare:	CPU Snoop Control program function pointer
78*4882a593Smuzhiyun  * @hotplug_restart:	CPU restart function pointer
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * Structure holds functions pointer for CPU low power operations like
81*4882a593Smuzhiyun  * suspend, resume and scu programming.
82*4882a593Smuzhiyun  */
83*4882a593Smuzhiyun struct cpu_pm_ops {
84*4882a593Smuzhiyun 	int (*finish_suspend)(unsigned long cpu_state);
85*4882a593Smuzhiyun 	void (*resume)(void);
86*4882a593Smuzhiyun 	void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
87*4882a593Smuzhiyun 	void (*hotplug_restart)(void);
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
91*4882a593Smuzhiyun static struct powerdomain *mpuss_pd;
92*4882a593Smuzhiyun static u32 cpu_context_offset;
93*4882a593Smuzhiyun 
default_finish_suspend(unsigned long cpu_state)94*4882a593Smuzhiyun static int default_finish_suspend(unsigned long cpu_state)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	omap_do_wfi();
97*4882a593Smuzhiyun 	return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
dummy_cpu_resume(void)100*4882a593Smuzhiyun static void dummy_cpu_resume(void)
101*4882a593Smuzhiyun {}
102*4882a593Smuzhiyun 
dummy_scu_prepare(unsigned int cpu_id,unsigned int cpu_state)103*4882a593Smuzhiyun static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
104*4882a593Smuzhiyun {}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun static struct cpu_pm_ops omap_pm_ops = {
107*4882a593Smuzhiyun 	.finish_suspend		= default_finish_suspend,
108*4882a593Smuzhiyun 	.resume			= dummy_cpu_resume,
109*4882a593Smuzhiyun 	.scu_prepare		= dummy_scu_prepare,
110*4882a593Smuzhiyun 	.hotplug_restart	= dummy_cpu_resume,
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun  * Program the wakeup routine address for the CPU0 and CPU1
115*4882a593Smuzhiyun  * used for OFF or DORMANT wakeup.
116*4882a593Smuzhiyun  */
set_cpu_wakeup_addr(unsigned int cpu_id,u32 addr)117*4882a593Smuzhiyun static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (pm_info->wkup_sar_addr)
122*4882a593Smuzhiyun 		writel_relaxed(addr, pm_info->wkup_sar_addr);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * Store the SCU power status value to scratchpad memory
127*4882a593Smuzhiyun  */
scu_pwrst_prepare(unsigned int cpu_id,unsigned int cpu_state)128*4882a593Smuzhiyun static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
131*4882a593Smuzhiyun 	u32 scu_pwr_st;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	switch (cpu_state) {
134*4882a593Smuzhiyun 	case PWRDM_POWER_RET:
135*4882a593Smuzhiyun 		scu_pwr_st = SCU_PM_DORMANT;
136*4882a593Smuzhiyun 		break;
137*4882a593Smuzhiyun 	case PWRDM_POWER_OFF:
138*4882a593Smuzhiyun 		scu_pwr_st = SCU_PM_POWEROFF;
139*4882a593Smuzhiyun 		break;
140*4882a593Smuzhiyun 	case PWRDM_POWER_ON:
141*4882a593Smuzhiyun 	case PWRDM_POWER_INACTIVE:
142*4882a593Smuzhiyun 	default:
143*4882a593Smuzhiyun 		scu_pwr_st = SCU_PM_NORMAL;
144*4882a593Smuzhiyun 		break;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (pm_info->scu_sar_addr)
148*4882a593Smuzhiyun 		writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /* Helper functions for MPUSS OSWR */
mpuss_clear_prev_logic_pwrst(void)152*4882a593Smuzhiyun static inline void mpuss_clear_prev_logic_pwrst(void)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	u32 reg;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
157*4882a593Smuzhiyun 		OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
158*4882a593Smuzhiyun 	omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
159*4882a593Smuzhiyun 		OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
cpu_clear_prev_logic_pwrst(unsigned int cpu_id)162*4882a593Smuzhiyun static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	u32 reg;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (cpu_id) {
167*4882a593Smuzhiyun 		reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
168*4882a593Smuzhiyun 					cpu_context_offset);
169*4882a593Smuzhiyun 		omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
170*4882a593Smuzhiyun 					cpu_context_offset);
171*4882a593Smuzhiyun 	} else {
172*4882a593Smuzhiyun 		reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
173*4882a593Smuzhiyun 					cpu_context_offset);
174*4882a593Smuzhiyun 		omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
175*4882a593Smuzhiyun 					cpu_context_offset);
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun  * Store the CPU cluster state for L2X0 low power operations.
181*4882a593Smuzhiyun  */
l2x0_pwrst_prepare(unsigned int cpu_id,unsigned int save_state)182*4882a593Smuzhiyun static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (pm_info->l2x0_sar_addr)
187*4882a593Smuzhiyun 		writel_relaxed(save_state, pm_info->l2x0_sar_addr);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun  * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
192*4882a593Smuzhiyun  * in every restore MPUSS OFF path.
193*4882a593Smuzhiyun  */
194*4882a593Smuzhiyun #ifdef CONFIG_CACHE_L2X0
save_l2x0_context(void)195*4882a593Smuzhiyun static void __init save_l2x0_context(void)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	void __iomem *l2x0_base = omap4_get_l2cache_base();
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (l2x0_base && sar_base) {
200*4882a593Smuzhiyun 		writel_relaxed(l2x0_saved_regs.aux_ctrl,
201*4882a593Smuzhiyun 			       sar_base + L2X0_AUXCTRL_OFFSET);
202*4882a593Smuzhiyun 		writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
203*4882a593Smuzhiyun 			       sar_base + L2X0_PREFETCH_CTRL_OFFSET);
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun #else
save_l2x0_context(void)207*4882a593Smuzhiyun static void __init save_l2x0_context(void)
208*4882a593Smuzhiyun {}
209*4882a593Smuzhiyun #endif
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /**
212*4882a593Smuzhiyun  * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
213*4882a593Smuzhiyun  * The purpose of this function is to manage low power programming
214*4882a593Smuzhiyun  * of OMAP4 MPUSS subsystem
215*4882a593Smuzhiyun  * @cpu : CPU ID
216*4882a593Smuzhiyun  * @power_state: Low power state.
217*4882a593Smuzhiyun  *
218*4882a593Smuzhiyun  * MPUSS states for the context save:
219*4882a593Smuzhiyun  * save_state =
220*4882a593Smuzhiyun  *	0 - Nothing lost and no need to save: MPUSS INACTIVE
221*4882a593Smuzhiyun  *	1 - CPUx L1 and logic lost: MPUSS CSWR
222*4882a593Smuzhiyun  *	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
223*4882a593Smuzhiyun  *	3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
224*4882a593Smuzhiyun  */
omap4_enter_lowpower(unsigned int cpu,unsigned int power_state)225*4882a593Smuzhiyun int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
228*4882a593Smuzhiyun 	unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (omap_rev() == OMAP4430_REV_ES1_0)
231*4882a593Smuzhiyun 		return -ENXIO;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	switch (power_state) {
234*4882a593Smuzhiyun 	case PWRDM_POWER_ON:
235*4882a593Smuzhiyun 	case PWRDM_POWER_INACTIVE:
236*4882a593Smuzhiyun 		save_state = 0;
237*4882a593Smuzhiyun 		break;
238*4882a593Smuzhiyun 	case PWRDM_POWER_OFF:
239*4882a593Smuzhiyun 		cpu_logic_state = PWRDM_POWER_OFF;
240*4882a593Smuzhiyun 		save_state = 1;
241*4882a593Smuzhiyun 		break;
242*4882a593Smuzhiyun 	case PWRDM_POWER_RET:
243*4882a593Smuzhiyun 		if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
244*4882a593Smuzhiyun 			save_state = 0;
245*4882a593Smuzhiyun 		break;
246*4882a593Smuzhiyun 	default:
247*4882a593Smuzhiyun 		/*
248*4882a593Smuzhiyun 		 * CPUx CSWR is invalid hardware state. Also CPUx OSWR
249*4882a593Smuzhiyun 		 * doesn't make much scense, since logic is lost and $L1
250*4882a593Smuzhiyun 		 * needs to be cleaned because of coherency. This makes
251*4882a593Smuzhiyun 		 * CPUx OSWR equivalent to CPUX OFF and hence not supported
252*4882a593Smuzhiyun 		 */
253*4882a593Smuzhiyun 		WARN_ON(1);
254*4882a593Smuzhiyun 		return -ENXIO;
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	pwrdm_pre_transition(NULL);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/*
260*4882a593Smuzhiyun 	 * Check MPUSS next state and save interrupt controller if needed.
261*4882a593Smuzhiyun 	 * In MPUSS OSWR or device OFF, interrupt controller  contest is lost.
262*4882a593Smuzhiyun 	 */
263*4882a593Smuzhiyun 	mpuss_clear_prev_logic_pwrst();
264*4882a593Smuzhiyun 	if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
265*4882a593Smuzhiyun 		(pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
266*4882a593Smuzhiyun 		save_state = 2;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	cpu_clear_prev_logic_pwrst(cpu);
269*4882a593Smuzhiyun 	pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
270*4882a593Smuzhiyun 	pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state);
271*4882a593Smuzhiyun 	set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
272*4882a593Smuzhiyun 	omap_pm_ops.scu_prepare(cpu, power_state);
273*4882a593Smuzhiyun 	l2x0_pwrst_prepare(cpu, save_state);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/*
276*4882a593Smuzhiyun 	 * Call low level function  with targeted low power state.
277*4882a593Smuzhiyun 	 */
278*4882a593Smuzhiyun 	if (save_state)
279*4882a593Smuzhiyun 		cpu_suspend(save_state, omap_pm_ops.finish_suspend);
280*4882a593Smuzhiyun 	else
281*4882a593Smuzhiyun 		omap_pm_ops.finish_suspend(save_state);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
284*4882a593Smuzhiyun 		gic_dist_enable();
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/*
287*4882a593Smuzhiyun 	 * Restore the CPUx power state to ON otherwise CPUx
288*4882a593Smuzhiyun 	 * power domain can transitions to programmed low power
289*4882a593Smuzhiyun 	 * state while doing WFI outside the low powe code. On
290*4882a593Smuzhiyun 	 * secure devices, CPUx does WFI which can result in
291*4882a593Smuzhiyun 	 * domain transition
292*4882a593Smuzhiyun 	 */
293*4882a593Smuzhiyun 	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	pwrdm_post_transition(NULL);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	return 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun /**
301*4882a593Smuzhiyun  * omap4_hotplug_cpu: OMAP4 CPU hotplug entry
302*4882a593Smuzhiyun  * @cpu : CPU ID
303*4882a593Smuzhiyun  * @power_state: CPU low power state.
304*4882a593Smuzhiyun  */
omap4_hotplug_cpu(unsigned int cpu,unsigned int power_state)305*4882a593Smuzhiyun int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
308*4882a593Smuzhiyun 	unsigned int cpu_state = 0;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (omap_rev() == OMAP4430_REV_ES1_0)
311*4882a593Smuzhiyun 		return -ENXIO;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* Use the achievable power state for the domain */
314*4882a593Smuzhiyun 	power_state = pwrdm_get_valid_lp_state(pm_info->pwrdm,
315*4882a593Smuzhiyun 					       false, power_state);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	if (power_state == PWRDM_POWER_OFF)
318*4882a593Smuzhiyun 		cpu_state = 1;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
321*4882a593Smuzhiyun 	pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
322*4882a593Smuzhiyun 	set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart));
323*4882a593Smuzhiyun 	omap_pm_ops.scu_prepare(cpu, power_state);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/*
326*4882a593Smuzhiyun 	 * CPU never retuns back if targeted power state is OFF mode.
327*4882a593Smuzhiyun 	 * CPU ONLINE follows normal CPU ONLINE ptah via
328*4882a593Smuzhiyun 	 * omap4_secondary_startup().
329*4882a593Smuzhiyun 	 */
330*4882a593Smuzhiyun 	omap_pm_ops.finish_suspend(cpu_state);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
333*4882a593Smuzhiyun 	return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun /*
338*4882a593Smuzhiyun  * Enable Mercury Fast HG retention mode by default.
339*4882a593Smuzhiyun  */
enable_mercury_retention_mode(void)340*4882a593Smuzhiyun static void enable_mercury_retention_mode(void)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	u32 reg;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST,
345*4882a593Smuzhiyun 				  OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
346*4882a593Smuzhiyun 	/* Enable HG_EN, HG_RAMPUP = fast mode */
347*4882a593Smuzhiyun 	reg |= BIT(24) | BIT(25);
348*4882a593Smuzhiyun 	omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST,
349*4882a593Smuzhiyun 				      OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun  * Initialise OMAP4 MPUSS
354*4882a593Smuzhiyun  */
omap4_mpuss_init(void)355*4882a593Smuzhiyun int __init omap4_mpuss_init(void)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct omap4_cpu_pm_info *pm_info;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (omap_rev() == OMAP4430_REV_ES1_0) {
360*4882a593Smuzhiyun 		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
361*4882a593Smuzhiyun 		return -ENODEV;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	/* Initilaise per CPU PM information */
365*4882a593Smuzhiyun 	pm_info = &per_cpu(omap4_pm_info, 0x0);
366*4882a593Smuzhiyun 	if (sar_base) {
367*4882a593Smuzhiyun 		pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
368*4882a593Smuzhiyun 		if (cpu_is_omap44xx())
369*4882a593Smuzhiyun 			pm_info->wkup_sar_addr = sar_base +
370*4882a593Smuzhiyun 				CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
371*4882a593Smuzhiyun 		else
372*4882a593Smuzhiyun 			pm_info->wkup_sar_addr = sar_base +
373*4882a593Smuzhiyun 				OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
374*4882a593Smuzhiyun 		pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 	pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
377*4882a593Smuzhiyun 	if (!pm_info->pwrdm) {
378*4882a593Smuzhiyun 		pr_err("Lookup failed for CPU0 pwrdm\n");
379*4882a593Smuzhiyun 		return -ENODEV;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/* Clear CPU previous power domain state */
383*4882a593Smuzhiyun 	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
384*4882a593Smuzhiyun 	cpu_clear_prev_logic_pwrst(0);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* Initialise CPU0 power domain state to ON */
387*4882a593Smuzhiyun 	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	pm_info = &per_cpu(omap4_pm_info, 0x1);
390*4882a593Smuzhiyun 	if (sar_base) {
391*4882a593Smuzhiyun 		pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
392*4882a593Smuzhiyun 		if (cpu_is_omap44xx())
393*4882a593Smuzhiyun 			pm_info->wkup_sar_addr = sar_base +
394*4882a593Smuzhiyun 				CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
395*4882a593Smuzhiyun 		else
396*4882a593Smuzhiyun 			pm_info->wkup_sar_addr = sar_base +
397*4882a593Smuzhiyun 				OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
398*4882a593Smuzhiyun 		pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
402*4882a593Smuzhiyun 	if (!pm_info->pwrdm) {
403*4882a593Smuzhiyun 		pr_err("Lookup failed for CPU1 pwrdm\n");
404*4882a593Smuzhiyun 		return -ENODEV;
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	/* Clear CPU previous power domain state */
408*4882a593Smuzhiyun 	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
409*4882a593Smuzhiyun 	cpu_clear_prev_logic_pwrst(1);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	/* Initialise CPU1 power domain state to ON */
412*4882a593Smuzhiyun 	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	mpuss_pd = pwrdm_lookup("mpu_pwrdm");
415*4882a593Smuzhiyun 	if (!mpuss_pd) {
416*4882a593Smuzhiyun 		pr_err("Failed to lookup MPUSS power domain\n");
417*4882a593Smuzhiyun 		return -ENODEV;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 	pwrdm_clear_all_prev_pwrst(mpuss_pd);
420*4882a593Smuzhiyun 	mpuss_clear_prev_logic_pwrst();
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	if (sar_base) {
423*4882a593Smuzhiyun 		/* Save device type on scratchpad for low level code to use */
424*4882a593Smuzhiyun 		writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0,
425*4882a593Smuzhiyun 			       sar_base + OMAP_TYPE_OFFSET);
426*4882a593Smuzhiyun 		save_l2x0_context();
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (cpu_is_omap44xx()) {
430*4882a593Smuzhiyun 		omap_pm_ops.finish_suspend = omap4_finish_suspend;
431*4882a593Smuzhiyun 		omap_pm_ops.resume = omap4_cpu_resume;
432*4882a593Smuzhiyun 		omap_pm_ops.scu_prepare = scu_pwrst_prepare;
433*4882a593Smuzhiyun 		omap_pm_ops.hotplug_restart = omap4_secondary_startup;
434*4882a593Smuzhiyun 		cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET;
435*4882a593Smuzhiyun 	} else if (soc_is_omap54xx() || soc_is_dra7xx()) {
436*4882a593Smuzhiyun 		cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET;
437*4882a593Smuzhiyun 		enable_mercury_retention_mode();
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if (cpu_is_omap446x())
441*4882a593Smuzhiyun 		omap_pm_ops.hotplug_restart = omap4460_secondary_startup;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	return 0;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun #endif
447*4882a593Smuzhiyun 
omap4_get_cpu1_ns_pa_addr(void)448*4882a593Smuzhiyun u32 omap4_get_cpu1_ns_pa_addr(void)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	return old_cpu1_ns_pa_addr;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun /*
454*4882a593Smuzhiyun  * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to
455*4882a593Smuzhiyun  * current kernel's secondary_startup() early before
456*4882a593Smuzhiyun  * clockdomains_init(). Otherwise clockdomain_init() can
457*4882a593Smuzhiyun  * wake CPU1 and cause a hang.
458*4882a593Smuzhiyun  */
omap4_mpuss_early_init(void)459*4882a593Smuzhiyun void __init omap4_mpuss_early_init(void)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	unsigned long startup_pa;
462*4882a593Smuzhiyun 	void __iomem *ns_pa_addr;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	if (!(soc_is_omap44xx() || soc_is_omap54xx()))
465*4882a593Smuzhiyun 		return;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	sar_base = omap4_get_sar_ram_base();
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	/* Save old NS_PA_ADDR for validity checks later on */
470*4882a593Smuzhiyun 	if (soc_is_omap44xx())
471*4882a593Smuzhiyun 		ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
472*4882a593Smuzhiyun 	else
473*4882a593Smuzhiyun 		ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
474*4882a593Smuzhiyun 	old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (soc_is_omap443x())
477*4882a593Smuzhiyun 		startup_pa = __pa_symbol(omap4_secondary_startup);
478*4882a593Smuzhiyun 	else if (soc_is_omap446x())
479*4882a593Smuzhiyun 		startup_pa = __pa_symbol(omap4460_secondary_startup);
480*4882a593Smuzhiyun 	else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
481*4882a593Smuzhiyun 		startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
482*4882a593Smuzhiyun 	else
483*4882a593Smuzhiyun 		startup_pa = __pa_symbol(omap5_secondary_startup);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (soc_is_omap44xx())
486*4882a593Smuzhiyun 		writel_relaxed(startup_pa, sar_base +
487*4882a593Smuzhiyun 			       CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
488*4882a593Smuzhiyun 	else
489*4882a593Smuzhiyun 		writel_relaxed(startup_pa, sar_base +
490*4882a593Smuzhiyun 			       OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
491*4882a593Smuzhiyun }
492