1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/arm/mach-omap2/cpuidle34xx.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * OMAP3 CPU IDLE Routines
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2008 Texas Instruments, Inc.
8*4882a593Smuzhiyun * Rajendra Nayak <rnayak@ti.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Copyright (C) 2007 Texas Instruments, Inc.
11*4882a593Smuzhiyun * Karthik Dasu <karthik-dp@ti.com>
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Copyright (C) 2006 Nokia Corporation
14*4882a593Smuzhiyun * Tony Lindgren <tony@atomide.com>
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Copyright (C) 2005 Texas Instruments, Inc.
17*4882a593Smuzhiyun * Richard Woodruff <r-woodruff2@ti.com>
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Based on pm.c for omap2
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/sched.h>
23*4882a593Smuzhiyun #include <linux/cpuidle.h>
24*4882a593Smuzhiyun #include <linux/export.h>
25*4882a593Smuzhiyun #include <linux/cpu_pm.h>
26*4882a593Smuzhiyun #include <asm/cpuidle.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "powerdomain.h"
29*4882a593Smuzhiyun #include "clockdomain.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "pm.h"
32*4882a593Smuzhiyun #include "control.h"
33*4882a593Smuzhiyun #include "common.h"
34*4882a593Smuzhiyun #include "soc.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* Mach specific information to be recorded in the C-state driver_data */
37*4882a593Smuzhiyun struct omap3_idle_statedata {
38*4882a593Smuzhiyun u8 mpu_state;
39*4882a593Smuzhiyun u8 core_state;
40*4882a593Smuzhiyun u8 per_min_state;
41*4882a593Smuzhiyun u8 flags;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * Possible flag bits for struct omap3_idle_statedata.flags:
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * OMAP_CPUIDLE_CX_NO_CLKDM_IDLE: don't allow the MPU clockdomain to go
50*4882a593Smuzhiyun * inactive. This in turn prevents the MPU DPLL from entering autoidle
51*4882a593Smuzhiyun * mode, so wakeup latency is greatly reduced, at the cost of additional
52*4882a593Smuzhiyun * energy consumption. This also prevents the CORE clockdomain from
53*4882a593Smuzhiyun * entering idle.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun #define OMAP_CPUIDLE_CX_NO_CLKDM_IDLE BIT(0)
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * Prevent PER OFF if CORE is not in RETention or OFF as this would
59*4882a593Smuzhiyun * disable PER wakeups completely.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun static struct omap3_idle_statedata omap3_idle_data[] = {
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun .mpu_state = PWRDM_POWER_ON,
64*4882a593Smuzhiyun .core_state = PWRDM_POWER_ON,
65*4882a593Smuzhiyun /* In C1 do not allow PER state lower than CORE state */
66*4882a593Smuzhiyun .per_min_state = PWRDM_POWER_ON,
67*4882a593Smuzhiyun .flags = OMAP_CPUIDLE_CX_NO_CLKDM_IDLE,
68*4882a593Smuzhiyun },
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun .mpu_state = PWRDM_POWER_ON,
71*4882a593Smuzhiyun .core_state = PWRDM_POWER_ON,
72*4882a593Smuzhiyun .per_min_state = PWRDM_POWER_RET,
73*4882a593Smuzhiyun },
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun .mpu_state = PWRDM_POWER_RET,
76*4882a593Smuzhiyun .core_state = PWRDM_POWER_ON,
77*4882a593Smuzhiyun .per_min_state = PWRDM_POWER_RET,
78*4882a593Smuzhiyun },
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun .mpu_state = PWRDM_POWER_OFF,
81*4882a593Smuzhiyun .core_state = PWRDM_POWER_ON,
82*4882a593Smuzhiyun .per_min_state = PWRDM_POWER_RET,
83*4882a593Smuzhiyun },
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun .mpu_state = PWRDM_POWER_RET,
86*4882a593Smuzhiyun .core_state = PWRDM_POWER_RET,
87*4882a593Smuzhiyun .per_min_state = PWRDM_POWER_OFF,
88*4882a593Smuzhiyun },
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun .mpu_state = PWRDM_POWER_OFF,
91*4882a593Smuzhiyun .core_state = PWRDM_POWER_RET,
92*4882a593Smuzhiyun .per_min_state = PWRDM_POWER_OFF,
93*4882a593Smuzhiyun },
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun .mpu_state = PWRDM_POWER_OFF,
96*4882a593Smuzhiyun .core_state = PWRDM_POWER_OFF,
97*4882a593Smuzhiyun .per_min_state = PWRDM_POWER_OFF,
98*4882a593Smuzhiyun },
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /**
102*4882a593Smuzhiyun * omap3_enter_idle - Programs OMAP3 to enter the specified state
103*4882a593Smuzhiyun * @dev: cpuidle device
104*4882a593Smuzhiyun * @drv: cpuidle driver
105*4882a593Smuzhiyun * @index: the index of state to be entered
106*4882a593Smuzhiyun */
omap3_enter_idle(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)107*4882a593Smuzhiyun static int omap3_enter_idle(struct cpuidle_device *dev,
108*4882a593Smuzhiyun struct cpuidle_driver *drv,
109*4882a593Smuzhiyun int index)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct omap3_idle_statedata *cx = &omap3_idle_data[index];
112*4882a593Smuzhiyun int error;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (omap_irq_pending() || need_resched())
115*4882a593Smuzhiyun goto return_sleep_time;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Deny idle for C1 */
118*4882a593Smuzhiyun if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) {
119*4882a593Smuzhiyun clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]);
120*4882a593Smuzhiyun } else {
121*4882a593Smuzhiyun pwrdm_set_next_pwrst(mpu_pd, cx->mpu_state);
122*4882a593Smuzhiyun pwrdm_set_next_pwrst(core_pd, cx->core_state);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * Call idle CPU PM enter notifier chain so that
127*4882a593Smuzhiyun * VFP context is saved.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun if (cx->mpu_state == PWRDM_POWER_OFF) {
130*4882a593Smuzhiyun error = cpu_pm_enter();
131*4882a593Smuzhiyun if (error)
132*4882a593Smuzhiyun goto out_clkdm_set;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* Execute ARM wfi */
136*4882a593Smuzhiyun omap_sram_idle();
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * Call idle CPU PM enter notifier chain to restore
140*4882a593Smuzhiyun * VFP context.
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun if (cx->mpu_state == PWRDM_POWER_OFF &&
143*4882a593Smuzhiyun pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
144*4882a593Smuzhiyun cpu_pm_exit();
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun out_clkdm_set:
147*4882a593Smuzhiyun /* Re-allow idle for C1 */
148*4882a593Smuzhiyun if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
149*4882a593Smuzhiyun clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun return_sleep_time:
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return index;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /**
157*4882a593Smuzhiyun * next_valid_state - Find next valid C-state
158*4882a593Smuzhiyun * @dev: cpuidle device
159*4882a593Smuzhiyun * @drv: cpuidle driver
160*4882a593Smuzhiyun * @index: Index of currently selected c-state
161*4882a593Smuzhiyun *
162*4882a593Smuzhiyun * If the state corresponding to index is valid, index is returned back
163*4882a593Smuzhiyun * to the caller. Else, this function searches for a lower c-state which is
164*4882a593Smuzhiyun * still valid (as defined in omap3_power_states[]) and returns its index.
165*4882a593Smuzhiyun *
166*4882a593Smuzhiyun * A state is valid if the 'valid' field is enabled and
167*4882a593Smuzhiyun * if it satisfies the enable_off_mode condition.
168*4882a593Smuzhiyun */
next_valid_state(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)169*4882a593Smuzhiyun static int next_valid_state(struct cpuidle_device *dev,
170*4882a593Smuzhiyun struct cpuidle_driver *drv, int index)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct omap3_idle_statedata *cx = &omap3_idle_data[index];
173*4882a593Smuzhiyun u32 mpu_deepest_state = PWRDM_POWER_RET;
174*4882a593Smuzhiyun u32 core_deepest_state = PWRDM_POWER_RET;
175*4882a593Smuzhiyun int idx;
176*4882a593Smuzhiyun int next_index = 0; /* C1 is the default value */
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (enable_off_mode) {
179*4882a593Smuzhiyun mpu_deepest_state = PWRDM_POWER_OFF;
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * Erratum i583: valable for ES rev < Es1.2 on 3630.
182*4882a593Smuzhiyun * CORE OFF mode is not supported in a stable form, restrict
183*4882a593Smuzhiyun * instead the CORE state to RET.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
186*4882a593Smuzhiyun core_deepest_state = PWRDM_POWER_OFF;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* Check if current state is valid */
190*4882a593Smuzhiyun if ((cx->mpu_state >= mpu_deepest_state) &&
191*4882a593Smuzhiyun (cx->core_state >= core_deepest_state))
192*4882a593Smuzhiyun return index;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * Drop to next valid state.
196*4882a593Smuzhiyun * Start search from the next (lower) state.
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun for (idx = index - 1; idx >= 0; idx--) {
199*4882a593Smuzhiyun cx = &omap3_idle_data[idx];
200*4882a593Smuzhiyun if ((cx->mpu_state >= mpu_deepest_state) &&
201*4882a593Smuzhiyun (cx->core_state >= core_deepest_state)) {
202*4882a593Smuzhiyun next_index = idx;
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun return next_index;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun * omap3_enter_idle_bm - Checks for any bus activity
212*4882a593Smuzhiyun * @dev: cpuidle device
213*4882a593Smuzhiyun * @drv: cpuidle driver
214*4882a593Smuzhiyun * @index: array index of target state to be programmed
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun * This function checks for any pending activity and then programs
217*4882a593Smuzhiyun * the device to the specified or a safer state.
218*4882a593Smuzhiyun */
omap3_enter_idle_bm(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)219*4882a593Smuzhiyun static int omap3_enter_idle_bm(struct cpuidle_device *dev,
220*4882a593Smuzhiyun struct cpuidle_driver *drv,
221*4882a593Smuzhiyun int index)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun int new_state_idx, ret;
224*4882a593Smuzhiyun u8 per_next_state, per_saved_state;
225*4882a593Smuzhiyun struct omap3_idle_statedata *cx;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * Use only C1 if CAM is active.
229*4882a593Smuzhiyun * CAM does not have wakeup capability in OMAP3.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun if (pwrdm_read_pwrst(cam_pd) == PWRDM_POWER_ON)
232*4882a593Smuzhiyun new_state_idx = drv->safe_state_index;
233*4882a593Smuzhiyun else
234*4882a593Smuzhiyun new_state_idx = next_valid_state(dev, drv, index);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun * FIXME: we currently manage device-specific idle states
238*4882a593Smuzhiyun * for PER and CORE in combination with CPU-specific
239*4882a593Smuzhiyun * idle states. This is wrong, and device-specific
240*4882a593Smuzhiyun * idle management needs to be separated out into
241*4882a593Smuzhiyun * its own code.
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Program PER state */
245*4882a593Smuzhiyun cx = &omap3_idle_data[new_state_idx];
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun per_next_state = pwrdm_read_next_pwrst(per_pd);
248*4882a593Smuzhiyun per_saved_state = per_next_state;
249*4882a593Smuzhiyun if (per_next_state < cx->per_min_state) {
250*4882a593Smuzhiyun per_next_state = cx->per_min_state;
251*4882a593Smuzhiyun pwrdm_set_next_pwrst(per_pd, per_next_state);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun ret = omap3_enter_idle(dev, drv, new_state_idx);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* Restore original PER state if it was modified */
257*4882a593Smuzhiyun if (per_next_state != per_saved_state)
258*4882a593Smuzhiyun pwrdm_set_next_pwrst(per_pd, per_saved_state);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return ret;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun static struct cpuidle_driver omap3_idle_driver = {
264*4882a593Smuzhiyun .name = "omap3_idle",
265*4882a593Smuzhiyun .owner = THIS_MODULE,
266*4882a593Smuzhiyun .states = {
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
269*4882a593Smuzhiyun .exit_latency = 2 + 2,
270*4882a593Smuzhiyun .target_residency = 5,
271*4882a593Smuzhiyun .name = "C1",
272*4882a593Smuzhiyun .desc = "MPU ON + CORE ON",
273*4882a593Smuzhiyun },
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
276*4882a593Smuzhiyun .exit_latency = 10 + 10,
277*4882a593Smuzhiyun .target_residency = 30,
278*4882a593Smuzhiyun .name = "C2",
279*4882a593Smuzhiyun .desc = "MPU ON + CORE ON",
280*4882a593Smuzhiyun },
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
283*4882a593Smuzhiyun .exit_latency = 50 + 50,
284*4882a593Smuzhiyun .target_residency = 300,
285*4882a593Smuzhiyun .name = "C3",
286*4882a593Smuzhiyun .desc = "MPU RET + CORE ON",
287*4882a593Smuzhiyun },
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
290*4882a593Smuzhiyun .exit_latency = 1500 + 1800,
291*4882a593Smuzhiyun .target_residency = 4000,
292*4882a593Smuzhiyun .name = "C4",
293*4882a593Smuzhiyun .desc = "MPU OFF + CORE ON",
294*4882a593Smuzhiyun },
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
297*4882a593Smuzhiyun .exit_latency = 2500 + 7500,
298*4882a593Smuzhiyun .target_residency = 12000,
299*4882a593Smuzhiyun .name = "C5",
300*4882a593Smuzhiyun .desc = "MPU RET + CORE RET",
301*4882a593Smuzhiyun },
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
304*4882a593Smuzhiyun .exit_latency = 3000 + 8500,
305*4882a593Smuzhiyun .target_residency = 15000,
306*4882a593Smuzhiyun .name = "C6",
307*4882a593Smuzhiyun .desc = "MPU OFF + CORE RET",
308*4882a593Smuzhiyun },
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
311*4882a593Smuzhiyun .exit_latency = 10000 + 30000,
312*4882a593Smuzhiyun .target_residency = 30000,
313*4882a593Smuzhiyun .name = "C7",
314*4882a593Smuzhiyun .desc = "MPU OFF + CORE OFF",
315*4882a593Smuzhiyun },
316*4882a593Smuzhiyun },
317*4882a593Smuzhiyun .state_count = ARRAY_SIZE(omap3_idle_data),
318*4882a593Smuzhiyun .safe_state_index = 0,
319*4882a593Smuzhiyun };
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun * Numbers based on measurements made in October 2009 for PM optimized kernel
323*4882a593Smuzhiyun * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
324*4882a593Smuzhiyun * and worst case latencies).
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun static struct cpuidle_driver omap3430_idle_driver = {
327*4882a593Smuzhiyun .name = "omap3430_idle",
328*4882a593Smuzhiyun .owner = THIS_MODULE,
329*4882a593Smuzhiyun .states = {
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
332*4882a593Smuzhiyun .exit_latency = 110 + 162,
333*4882a593Smuzhiyun .target_residency = 5,
334*4882a593Smuzhiyun .name = "C1",
335*4882a593Smuzhiyun .desc = "MPU ON + CORE ON",
336*4882a593Smuzhiyun },
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
339*4882a593Smuzhiyun .exit_latency = 106 + 180,
340*4882a593Smuzhiyun .target_residency = 309,
341*4882a593Smuzhiyun .name = "C2",
342*4882a593Smuzhiyun .desc = "MPU ON + CORE ON",
343*4882a593Smuzhiyun },
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
346*4882a593Smuzhiyun .exit_latency = 107 + 410,
347*4882a593Smuzhiyun .target_residency = 46057,
348*4882a593Smuzhiyun .name = "C3",
349*4882a593Smuzhiyun .desc = "MPU RET + CORE ON",
350*4882a593Smuzhiyun },
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
353*4882a593Smuzhiyun .exit_latency = 121 + 3374,
354*4882a593Smuzhiyun .target_residency = 46057,
355*4882a593Smuzhiyun .name = "C4",
356*4882a593Smuzhiyun .desc = "MPU OFF + CORE ON",
357*4882a593Smuzhiyun },
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
360*4882a593Smuzhiyun .exit_latency = 855 + 1146,
361*4882a593Smuzhiyun .target_residency = 46057,
362*4882a593Smuzhiyun .name = "C5",
363*4882a593Smuzhiyun .desc = "MPU RET + CORE RET",
364*4882a593Smuzhiyun },
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
367*4882a593Smuzhiyun .exit_latency = 7580 + 4134,
368*4882a593Smuzhiyun .target_residency = 484329,
369*4882a593Smuzhiyun .name = "C6",
370*4882a593Smuzhiyun .desc = "MPU OFF + CORE RET",
371*4882a593Smuzhiyun },
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun .enter = omap3_enter_idle_bm,
374*4882a593Smuzhiyun .exit_latency = 7505 + 15274,
375*4882a593Smuzhiyun .target_residency = 484329,
376*4882a593Smuzhiyun .name = "C7",
377*4882a593Smuzhiyun .desc = "MPU OFF + CORE OFF",
378*4882a593Smuzhiyun },
379*4882a593Smuzhiyun },
380*4882a593Smuzhiyun .state_count = ARRAY_SIZE(omap3_idle_data),
381*4882a593Smuzhiyun .safe_state_index = 0,
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* Public functions */
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /**
387*4882a593Smuzhiyun * omap3_idle_init - Init routine for OMAP3 idle
388*4882a593Smuzhiyun *
389*4882a593Smuzhiyun * Registers the OMAP3 specific cpuidle driver to the cpuidle
390*4882a593Smuzhiyun * framework with the valid set of states.
391*4882a593Smuzhiyun */
omap3_idle_init(void)392*4882a593Smuzhiyun int __init omap3_idle_init(void)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun mpu_pd = pwrdm_lookup("mpu_pwrdm");
395*4882a593Smuzhiyun core_pd = pwrdm_lookup("core_pwrdm");
396*4882a593Smuzhiyun per_pd = pwrdm_lookup("per_pwrdm");
397*4882a593Smuzhiyun cam_pd = pwrdm_lookup("cam_pwrdm");
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
400*4882a593Smuzhiyun return -ENODEV;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (cpu_is_omap3430())
403*4882a593Smuzhiyun return cpuidle_register(&omap3430_idle_driver, NULL);
404*4882a593Smuzhiyun else
405*4882a593Smuzhiyun return cpuidle_register(&omap3_idle_driver, NULL);
406*4882a593Smuzhiyun }
407