xref: /OK3568_Linux_fs/kernel/arch/arm/mach-omap2/cpuidle44xx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * OMAP4+ CPU idle Routines
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2011-2013 Texas Instruments, Inc.
6*4882a593Smuzhiyun  * Santosh Shilimkar <santosh.shilimkar@ti.com>
7*4882a593Smuzhiyun  * Rajendra Nayak <rnayak@ti.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/cpuidle.h>
12*4882a593Smuzhiyun #include <linux/cpu_pm.h>
13*4882a593Smuzhiyun #include <linux/export.h>
14*4882a593Smuzhiyun #include <linux/tick.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm/cpuidle.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "common.h"
19*4882a593Smuzhiyun #include "pm.h"
20*4882a593Smuzhiyun #include "prm.h"
21*4882a593Smuzhiyun #include "soc.h"
22*4882a593Smuzhiyun #include "clockdomain.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define MAX_CPUS	2
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* Machine specific information */
27*4882a593Smuzhiyun struct idle_statedata {
28*4882a593Smuzhiyun 	u32 cpu_state;
29*4882a593Smuzhiyun 	u32 mpu_logic_state;
30*4882a593Smuzhiyun 	u32 mpu_state;
31*4882a593Smuzhiyun 	u32 mpu_state_vote;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static struct idle_statedata omap4_idle_data[] = {
35*4882a593Smuzhiyun 	{
36*4882a593Smuzhiyun 		.cpu_state = PWRDM_POWER_ON,
37*4882a593Smuzhiyun 		.mpu_state = PWRDM_POWER_ON,
38*4882a593Smuzhiyun 		.mpu_logic_state = PWRDM_POWER_RET,
39*4882a593Smuzhiyun 	},
40*4882a593Smuzhiyun 	{
41*4882a593Smuzhiyun 		.cpu_state = PWRDM_POWER_OFF,
42*4882a593Smuzhiyun 		.mpu_state = PWRDM_POWER_RET,
43*4882a593Smuzhiyun 		.mpu_logic_state = PWRDM_POWER_RET,
44*4882a593Smuzhiyun 	},
45*4882a593Smuzhiyun 	{
46*4882a593Smuzhiyun 		.cpu_state = PWRDM_POWER_OFF,
47*4882a593Smuzhiyun 		.mpu_state = PWRDM_POWER_RET,
48*4882a593Smuzhiyun 		.mpu_logic_state = PWRDM_POWER_OFF,
49*4882a593Smuzhiyun 	},
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun static struct idle_statedata omap5_idle_data[] = {
53*4882a593Smuzhiyun 	{
54*4882a593Smuzhiyun 		.cpu_state = PWRDM_POWER_ON,
55*4882a593Smuzhiyun 		.mpu_state = PWRDM_POWER_ON,
56*4882a593Smuzhiyun 		.mpu_logic_state = PWRDM_POWER_ON,
57*4882a593Smuzhiyun 	},
58*4882a593Smuzhiyun 	{
59*4882a593Smuzhiyun 		.cpu_state = PWRDM_POWER_RET,
60*4882a593Smuzhiyun 		.mpu_state = PWRDM_POWER_RET,
61*4882a593Smuzhiyun 		.mpu_logic_state = PWRDM_POWER_RET,
62*4882a593Smuzhiyun 	},
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
66*4882a593Smuzhiyun static struct clockdomain *cpu_clkdm[MAX_CPUS];
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun static atomic_t abort_barrier;
69*4882a593Smuzhiyun static bool cpu_done[MAX_CPUS];
70*4882a593Smuzhiyun static struct idle_statedata *state_ptr = &omap4_idle_data[0];
71*4882a593Smuzhiyun static DEFINE_RAW_SPINLOCK(mpu_lock);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /* Private functions */
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /**
76*4882a593Smuzhiyun  * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
77*4882a593Smuzhiyun  * @dev: cpuidle device
78*4882a593Smuzhiyun  * @drv: cpuidle driver
79*4882a593Smuzhiyun  * @index: the index of state to be entered
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  * Called from the CPUidle framework to program the device to the
82*4882a593Smuzhiyun  * specified low power state selected by the governor.
83*4882a593Smuzhiyun  * Returns the amount of time spent in the low power state.
84*4882a593Smuzhiyun  */
omap_enter_idle_simple(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)85*4882a593Smuzhiyun static int omap_enter_idle_simple(struct cpuidle_device *dev,
86*4882a593Smuzhiyun 			struct cpuidle_driver *drv,
87*4882a593Smuzhiyun 			int index)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	omap_do_wfi();
90*4882a593Smuzhiyun 	return index;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
omap_enter_idle_smp(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)93*4882a593Smuzhiyun static int omap_enter_idle_smp(struct cpuidle_device *dev,
94*4882a593Smuzhiyun 			       struct cpuidle_driver *drv,
95*4882a593Smuzhiyun 			       int index)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct idle_statedata *cx = state_ptr + index;
98*4882a593Smuzhiyun 	unsigned long flag;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&mpu_lock, flag);
101*4882a593Smuzhiyun 	cx->mpu_state_vote++;
102*4882a593Smuzhiyun 	if (cx->mpu_state_vote == num_online_cpus()) {
103*4882a593Smuzhiyun 		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
104*4882a593Smuzhiyun 		omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&mpu_lock, flag);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&mpu_lock, flag);
111*4882a593Smuzhiyun 	if (cx->mpu_state_vote == num_online_cpus())
112*4882a593Smuzhiyun 		omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
113*4882a593Smuzhiyun 	cx->mpu_state_vote--;
114*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&mpu_lock, flag);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return index;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
omap_enter_idle_coupled(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)119*4882a593Smuzhiyun static int omap_enter_idle_coupled(struct cpuidle_device *dev,
120*4882a593Smuzhiyun 			struct cpuidle_driver *drv,
121*4882a593Smuzhiyun 			int index)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct idle_statedata *cx = state_ptr + index;
124*4882a593Smuzhiyun 	u32 mpuss_can_lose_context = 0;
125*4882a593Smuzhiyun 	int error;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/*
128*4882a593Smuzhiyun 	 * CPU0 has to wait and stay ON until CPU1 is OFF state.
129*4882a593Smuzhiyun 	 * This is necessary to honour hardware recommondation
130*4882a593Smuzhiyun 	 * of triggeing all the possible low power modes once CPU1 is
131*4882a593Smuzhiyun 	 * out of coherency and in OFF mode.
132*4882a593Smuzhiyun 	 */
133*4882a593Smuzhiyun 	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
134*4882a593Smuzhiyun 		while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
135*4882a593Smuzhiyun 			cpu_relax();
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 			/*
138*4882a593Smuzhiyun 			 * CPU1 could have already entered & exited idle
139*4882a593Smuzhiyun 			 * without hitting off because of a wakeup
140*4882a593Smuzhiyun 			 * or a failed attempt to hit off mode.  Check for
141*4882a593Smuzhiyun 			 * that here, otherwise we could spin forever
142*4882a593Smuzhiyun 			 * waiting for CPU1 off.
143*4882a593Smuzhiyun 			 */
144*4882a593Smuzhiyun 			if (cpu_done[1])
145*4882a593Smuzhiyun 			    goto fail;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 		}
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
151*4882a593Smuzhiyun 				 (cx->mpu_logic_state == PWRDM_POWER_OFF);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Enter broadcast mode for periodic timers */
154*4882a593Smuzhiyun 	RCU_NONIDLE(tick_broadcast_enable());
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* Enter broadcast mode for one-shot timers */
157*4882a593Smuzhiyun 	RCU_NONIDLE(tick_broadcast_enter());
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/*
160*4882a593Smuzhiyun 	 * Call idle CPU PM enter notifier chain so that
161*4882a593Smuzhiyun 	 * VFP and per CPU interrupt context is saved.
162*4882a593Smuzhiyun 	 */
163*4882a593Smuzhiyun 	error = cpu_pm_enter();
164*4882a593Smuzhiyun 	if (error)
165*4882a593Smuzhiyun 		goto cpu_pm_out;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (dev->cpu == 0) {
168*4882a593Smuzhiyun 		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
169*4882a593Smuzhiyun 		RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		/*
172*4882a593Smuzhiyun 		 * Call idle CPU cluster PM enter notifier chain
173*4882a593Smuzhiyun 		 * to save GIC and wakeupgen context.
174*4882a593Smuzhiyun 		 */
175*4882a593Smuzhiyun 		if (mpuss_can_lose_context) {
176*4882a593Smuzhiyun 			error = cpu_cluster_pm_enter();
177*4882a593Smuzhiyun 			if (error) {
178*4882a593Smuzhiyun 				index = 0;
179*4882a593Smuzhiyun 				cx = state_ptr + index;
180*4882a593Smuzhiyun 				pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
181*4882a593Smuzhiyun 				RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
182*4882a593Smuzhiyun 				mpuss_can_lose_context = 0;
183*4882a593Smuzhiyun 			}
184*4882a593Smuzhiyun 		}
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
188*4882a593Smuzhiyun 	cpu_done[dev->cpu] = true;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/* Wakeup CPU1 only if it is not offlined */
191*4882a593Smuzhiyun 	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
194*4882a593Smuzhiyun 		    mpuss_can_lose_context)
195*4882a593Smuzhiyun 			gic_dist_disable();
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
198*4882a593Smuzhiyun 		RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
199*4882a593Smuzhiyun 		RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
202*4882a593Smuzhiyun 		    mpuss_can_lose_context) {
203*4882a593Smuzhiyun 			while (gic_dist_disabled()) {
204*4882a593Smuzhiyun 				udelay(1);
205*4882a593Smuzhiyun 				cpu_relax();
206*4882a593Smuzhiyun 			}
207*4882a593Smuzhiyun 			gic_timer_retrigger();
208*4882a593Smuzhiyun 		}
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/*
212*4882a593Smuzhiyun 	 * Call idle CPU cluster PM exit notifier chain
213*4882a593Smuzhiyun 	 * to restore GIC and wakeupgen context.
214*4882a593Smuzhiyun 	 */
215*4882a593Smuzhiyun 	if (dev->cpu == 0 && mpuss_can_lose_context)
216*4882a593Smuzhiyun 		cpu_cluster_pm_exit();
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/*
219*4882a593Smuzhiyun 	 * Call idle CPU PM exit notifier chain to restore
220*4882a593Smuzhiyun 	 * VFP and per CPU IRQ context.
221*4882a593Smuzhiyun 	 */
222*4882a593Smuzhiyun 	cpu_pm_exit();
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun cpu_pm_out:
225*4882a593Smuzhiyun 	RCU_NONIDLE(tick_broadcast_exit());
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun fail:
228*4882a593Smuzhiyun 	cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
229*4882a593Smuzhiyun 	cpu_done[dev->cpu] = false;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return index;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun static struct cpuidle_driver omap4_idle_driver = {
235*4882a593Smuzhiyun 	.name				= "omap4_idle",
236*4882a593Smuzhiyun 	.owner				= THIS_MODULE,
237*4882a593Smuzhiyun 	.states = {
238*4882a593Smuzhiyun 		{
239*4882a593Smuzhiyun 			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
240*4882a593Smuzhiyun 			.exit_latency = 2 + 2,
241*4882a593Smuzhiyun 			.target_residency = 5,
242*4882a593Smuzhiyun 			.enter = omap_enter_idle_simple,
243*4882a593Smuzhiyun 			.name = "C1",
244*4882a593Smuzhiyun 			.desc = "CPUx ON, MPUSS ON"
245*4882a593Smuzhiyun 		},
246*4882a593Smuzhiyun 		{
247*4882a593Smuzhiyun 			/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
248*4882a593Smuzhiyun 			.exit_latency = 328 + 440,
249*4882a593Smuzhiyun 			.target_residency = 960,
250*4882a593Smuzhiyun 			.flags = CPUIDLE_FLAG_COUPLED,
251*4882a593Smuzhiyun 			.enter = omap_enter_idle_coupled,
252*4882a593Smuzhiyun 			.name = "C2",
253*4882a593Smuzhiyun 			.desc = "CPUx OFF, MPUSS CSWR",
254*4882a593Smuzhiyun 		},
255*4882a593Smuzhiyun 		{
256*4882a593Smuzhiyun 			/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
257*4882a593Smuzhiyun 			.exit_latency = 460 + 518,
258*4882a593Smuzhiyun 			.target_residency = 1100,
259*4882a593Smuzhiyun 			.flags = CPUIDLE_FLAG_COUPLED,
260*4882a593Smuzhiyun 			.enter = omap_enter_idle_coupled,
261*4882a593Smuzhiyun 			.name = "C3",
262*4882a593Smuzhiyun 			.desc = "CPUx OFF, MPUSS OSWR",
263*4882a593Smuzhiyun 		},
264*4882a593Smuzhiyun 	},
265*4882a593Smuzhiyun 	.state_count = ARRAY_SIZE(omap4_idle_data),
266*4882a593Smuzhiyun 	.safe_state_index = 0,
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun static struct cpuidle_driver omap5_idle_driver = {
270*4882a593Smuzhiyun 	.name				= "omap5_idle",
271*4882a593Smuzhiyun 	.owner				= THIS_MODULE,
272*4882a593Smuzhiyun 	.states = {
273*4882a593Smuzhiyun 		{
274*4882a593Smuzhiyun 			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
275*4882a593Smuzhiyun 			.exit_latency = 2 + 2,
276*4882a593Smuzhiyun 			.target_residency = 5,
277*4882a593Smuzhiyun 			.enter = omap_enter_idle_simple,
278*4882a593Smuzhiyun 			.name = "C1",
279*4882a593Smuzhiyun 			.desc = "CPUx WFI, MPUSS ON"
280*4882a593Smuzhiyun 		},
281*4882a593Smuzhiyun 		{
282*4882a593Smuzhiyun 			/* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
283*4882a593Smuzhiyun 			.exit_latency = 48 + 60,
284*4882a593Smuzhiyun 			.target_residency = 100,
285*4882a593Smuzhiyun 			.flags = CPUIDLE_FLAG_TIMER_STOP,
286*4882a593Smuzhiyun 			.enter = omap_enter_idle_smp,
287*4882a593Smuzhiyun 			.name = "C2",
288*4882a593Smuzhiyun 			.desc = "CPUx CSWR, MPUSS CSWR",
289*4882a593Smuzhiyun 		},
290*4882a593Smuzhiyun 	},
291*4882a593Smuzhiyun 	.state_count = ARRAY_SIZE(omap5_idle_data),
292*4882a593Smuzhiyun 	.safe_state_index = 0,
293*4882a593Smuzhiyun };
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /* Public functions */
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun  * omap4_idle_init - Init routine for OMAP4+ idle
299*4882a593Smuzhiyun  *
300*4882a593Smuzhiyun  * Registers the OMAP4+ specific cpuidle driver to the cpuidle
301*4882a593Smuzhiyun  * framework with the valid set of states.
302*4882a593Smuzhiyun  */
omap4_idle_init(void)303*4882a593Smuzhiyun int __init omap4_idle_init(void)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct cpuidle_driver *idle_driver;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	if (soc_is_omap54xx()) {
308*4882a593Smuzhiyun 		state_ptr = &omap5_idle_data[0];
309*4882a593Smuzhiyun 		idle_driver = &omap5_idle_driver;
310*4882a593Smuzhiyun 	} else {
311*4882a593Smuzhiyun 		state_ptr = &omap4_idle_data[0];
312*4882a593Smuzhiyun 		idle_driver = &omap4_idle_driver;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	mpu_pd = pwrdm_lookup("mpu_pwrdm");
316*4882a593Smuzhiyun 	cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
317*4882a593Smuzhiyun 	cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
318*4882a593Smuzhiyun 	if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
319*4882a593Smuzhiyun 		return -ENODEV;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
322*4882a593Smuzhiyun 	cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
323*4882a593Smuzhiyun 	if (!cpu_clkdm[0] || !cpu_clkdm[1])
324*4882a593Smuzhiyun 		return -ENODEV;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	return cpuidle_register(idle_driver, cpu_online_mask);
327*4882a593Smuzhiyun }
328