xref: /OK3568_Linux_fs/kernel/drivers/cpuidle/cpuidle-tegra.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * CPU idle driver for Tegra CPUs
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2010-2013, NVIDIA Corporation.
6*4882a593Smuzhiyun  * Copyright (c) 2011 Google, Inc.
7*4882a593Smuzhiyun  * Author: Colin Cross <ccross@android.com>
8*4882a593Smuzhiyun  *         Gary King <gking@nvidia.com>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Rework for 3.3 by Peter De Schrijver <pdeschrijver@nvidia.com>
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Tegra20/124 driver unification by Dmitry Osipenko <digetx@gmail.com>
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define pr_fmt(fmt)	"tegra-cpuidle: " fmt
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/atomic.h>
18*4882a593Smuzhiyun #include <linux/cpuidle.h>
19*4882a593Smuzhiyun #include <linux/cpumask.h>
20*4882a593Smuzhiyun #include <linux/cpu_pm.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/errno.h>
23*4882a593Smuzhiyun #include <linux/platform_device.h>
24*4882a593Smuzhiyun #include <linux/types.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/clk/tegra.h>
27*4882a593Smuzhiyun #include <linux/firmware/trusted_foundations.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <soc/tegra/cpuidle.h>
30*4882a593Smuzhiyun #include <soc/tegra/flowctrl.h>
31*4882a593Smuzhiyun #include <soc/tegra/fuse.h>
32*4882a593Smuzhiyun #include <soc/tegra/irq.h>
33*4882a593Smuzhiyun #include <soc/tegra/pm.h>
34*4882a593Smuzhiyun #include <soc/tegra/pmc.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <asm/cpuidle.h>
37*4882a593Smuzhiyun #include <asm/firmware.h>
38*4882a593Smuzhiyun #include <asm/smp_plat.h>
39*4882a593Smuzhiyun #include <asm/suspend.h>
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun enum tegra_state {
42*4882a593Smuzhiyun 	TEGRA_C1,
43*4882a593Smuzhiyun 	TEGRA_C7,
44*4882a593Smuzhiyun 	TEGRA_CC6,
45*4882a593Smuzhiyun 	TEGRA_STATE_COUNT,
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun static atomic_t tegra_idle_barrier;
49*4882a593Smuzhiyun static atomic_t tegra_abort_flag;
50*4882a593Smuzhiyun 
tegra_cpuidle_using_firmware(void)51*4882a593Smuzhiyun static inline bool tegra_cpuidle_using_firmware(void)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	return firmware_ops->prepare_idle && firmware_ops->do_idle;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
tegra_cpuidle_report_cpus_state(void)56*4882a593Smuzhiyun static void tegra_cpuidle_report_cpus_state(void)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	unsigned long cpu, lcpu, csr;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	for_each_cpu(lcpu, cpu_possible_mask) {
61*4882a593Smuzhiyun 		cpu = cpu_logical_map(lcpu);
62*4882a593Smuzhiyun 		csr = flowctrl_read_cpu_csr(cpu);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		pr_err("cpu%lu: online=%d flowctrl_csr=0x%08lx\n",
65*4882a593Smuzhiyun 		       cpu, cpu_online(lcpu), csr);
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
tegra_cpuidle_wait_for_secondary_cpus_parking(void)69*4882a593Smuzhiyun static int tegra_cpuidle_wait_for_secondary_cpus_parking(void)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	unsigned int retries = 3;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	while (retries--) {
74*4882a593Smuzhiyun 		unsigned int delay_us = 10;
75*4882a593Smuzhiyun 		unsigned int timeout_us = 500 * 1000 / delay_us;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		/*
78*4882a593Smuzhiyun 		 * The primary CPU0 core shall wait for the secondaries
79*4882a593Smuzhiyun 		 * shutdown in order to power-off CPU's cluster safely.
80*4882a593Smuzhiyun 		 * The timeout value depends on the current CPU frequency,
81*4882a593Smuzhiyun 		 * it takes about 40-150us in average and over 1000us in
82*4882a593Smuzhiyun 		 * a worst case scenario.
83*4882a593Smuzhiyun 		 */
84*4882a593Smuzhiyun 		do {
85*4882a593Smuzhiyun 			if (tegra_cpu_rail_off_ready())
86*4882a593Smuzhiyun 				return 0;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 			udelay(delay_us);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 		} while (timeout_us--);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		pr_err("secondary CPU taking too long to park\n");
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 		tegra_cpuidle_report_cpus_state();
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	pr_err("timed out waiting secondaries to park\n");
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return -ETIMEDOUT;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
tegra_cpuidle_unpark_secondary_cpus(void)102*4882a593Smuzhiyun static void tegra_cpuidle_unpark_secondary_cpus(void)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	unsigned int cpu, lcpu;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	for_each_cpu(lcpu, cpu_online_mask) {
107*4882a593Smuzhiyun 		cpu = cpu_logical_map(lcpu);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 		if (cpu > 0) {
110*4882a593Smuzhiyun 			tegra_enable_cpu_clock(cpu);
111*4882a593Smuzhiyun 			tegra_cpu_out_of_reset(cpu);
112*4882a593Smuzhiyun 			flowctrl_write_cpu_halt(cpu, 0);
113*4882a593Smuzhiyun 		}
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
tegra_cpuidle_cc6_enter(unsigned int cpu)117*4882a593Smuzhiyun static int tegra_cpuidle_cc6_enter(unsigned int cpu)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	int ret;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (cpu > 0) {
122*4882a593Smuzhiyun 		ret = cpu_suspend(cpu, tegra_pm_park_secondary_cpu);
123*4882a593Smuzhiyun 	} else {
124*4882a593Smuzhiyun 		ret = tegra_cpuidle_wait_for_secondary_cpus_parking();
125*4882a593Smuzhiyun 		if (!ret)
126*4882a593Smuzhiyun 			ret = tegra_pm_enter_lp2();
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 		tegra_cpuidle_unpark_secondary_cpus();
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return ret;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
tegra_cpuidle_c7_enter(void)134*4882a593Smuzhiyun static int tegra_cpuidle_c7_enter(void)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	int err;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
139*4882a593Smuzhiyun 	if (err && err != -ENOSYS)
140*4882a593Smuzhiyun 		return err;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	err = call_firmware_op(do_idle, 0);
143*4882a593Smuzhiyun 	if (err != -ENOSYS)
144*4882a593Smuzhiyun 		return err;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
tegra_cpuidle_coupled_barrier(struct cpuidle_device * dev)149*4882a593Smuzhiyun static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	if (tegra_pending_sgi()) {
152*4882a593Smuzhiyun 		/*
153*4882a593Smuzhiyun 		 * CPU got local interrupt that will be lost after GIC's
154*4882a593Smuzhiyun 		 * shutdown because GIC driver doesn't save/restore the
155*4882a593Smuzhiyun 		 * pending SGI state across CPU cluster PM.  Abort and retry
156*4882a593Smuzhiyun 		 * next time.
157*4882a593Smuzhiyun 		 */
158*4882a593Smuzhiyun 		atomic_set(&tegra_abort_flag, 1);
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	cpuidle_coupled_parallel_barrier(dev, &tegra_idle_barrier);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (atomic_read(&tegra_abort_flag)) {
164*4882a593Smuzhiyun 		cpuidle_coupled_parallel_barrier(dev, &tegra_idle_barrier);
165*4882a593Smuzhiyun 		atomic_set(&tegra_abort_flag, 0);
166*4882a593Smuzhiyun 		return -EINTR;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
tegra_cpuidle_state_enter(struct cpuidle_device * dev,int index,unsigned int cpu)172*4882a593Smuzhiyun static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
173*4882a593Smuzhiyun 				     int index, unsigned int cpu)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	int err;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/*
178*4882a593Smuzhiyun 	 * CC6 state is the "CPU cluster power-off" state.  In order to
179*4882a593Smuzhiyun 	 * enter this state, at first the secondary CPU cores need to be
180*4882a593Smuzhiyun 	 * parked into offline mode, then the last CPU should clean out
181*4882a593Smuzhiyun 	 * remaining dirty cache lines into DRAM and trigger Flow Controller
182*4882a593Smuzhiyun 	 * logic that turns off the cluster's power domain (which includes
183*4882a593Smuzhiyun 	 * CPU cores, GIC and L2 cache).
184*4882a593Smuzhiyun 	 */
185*4882a593Smuzhiyun 	if (index == TEGRA_CC6) {
186*4882a593Smuzhiyun 		err = tegra_cpuidle_coupled_barrier(dev);
187*4882a593Smuzhiyun 		if (err)
188*4882a593Smuzhiyun 			return err;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	local_fiq_disable();
192*4882a593Smuzhiyun 	RCU_NONIDLE(tegra_pm_set_cpu_in_lp2());
193*4882a593Smuzhiyun 	cpu_pm_enter();
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	switch (index) {
196*4882a593Smuzhiyun 	case TEGRA_C7:
197*4882a593Smuzhiyun 		err = tegra_cpuidle_c7_enter();
198*4882a593Smuzhiyun 		break;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	case TEGRA_CC6:
201*4882a593Smuzhiyun 		err = tegra_cpuidle_cc6_enter(cpu);
202*4882a593Smuzhiyun 		break;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	default:
205*4882a593Smuzhiyun 		err = -EINVAL;
206*4882a593Smuzhiyun 		break;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	cpu_pm_exit();
210*4882a593Smuzhiyun 	RCU_NONIDLE(tegra_pm_clear_cpu_in_lp2());
211*4882a593Smuzhiyun 	local_fiq_enable();
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	return err ?: index;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
tegra_cpuidle_adjust_state_index(int index,unsigned int cpu)216*4882a593Smuzhiyun static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	/*
219*4882a593Smuzhiyun 	 * On Tegra30 CPU0 can't be power-gated separately from secondary
220*4882a593Smuzhiyun 	 * cores because it gates the whole CPU cluster.
221*4882a593Smuzhiyun 	 */
222*4882a593Smuzhiyun 	if (cpu > 0 || index != TEGRA_C7 || tegra_get_chip_id() != TEGRA30)
223*4882a593Smuzhiyun 		return index;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/* put CPU0 into C1 if C7 is requested and secondaries are online */
226*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_PM_SLEEP) || num_online_cpus() > 1)
227*4882a593Smuzhiyun 		index = TEGRA_C1;
228*4882a593Smuzhiyun 	else
229*4882a593Smuzhiyun 		index = TEGRA_CC6;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return index;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
tegra_cpuidle_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)234*4882a593Smuzhiyun static int tegra_cpuidle_enter(struct cpuidle_device *dev,
235*4882a593Smuzhiyun 			       struct cpuidle_driver *drv,
236*4882a593Smuzhiyun 			       int index)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	unsigned int cpu = cpu_logical_map(dev->cpu);
239*4882a593Smuzhiyun 	int ret;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	index = tegra_cpuidle_adjust_state_index(index, cpu);
242*4882a593Smuzhiyun 	if (dev->states_usage[index].disable)
243*4882a593Smuzhiyun 		return -1;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	if (index == TEGRA_C1)
246*4882a593Smuzhiyun 		ret = arm_cpuidle_simple_enter(dev, drv, index);
247*4882a593Smuzhiyun 	else
248*4882a593Smuzhiyun 		ret = tegra_cpuidle_state_enter(dev, index, cpu);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (ret < 0) {
251*4882a593Smuzhiyun 		if (ret != -EINTR || index != TEGRA_CC6)
252*4882a593Smuzhiyun 			pr_err_once("failed to enter state %d err: %d\n",
253*4882a593Smuzhiyun 				    index, ret);
254*4882a593Smuzhiyun 		index = -1;
255*4882a593Smuzhiyun 	} else {
256*4882a593Smuzhiyun 		index = ret;
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	return index;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
tegra114_enter_s2idle(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)262*4882a593Smuzhiyun static int tegra114_enter_s2idle(struct cpuidle_device *dev,
263*4882a593Smuzhiyun 				 struct cpuidle_driver *drv,
264*4882a593Smuzhiyun 				 int index)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	tegra_cpuidle_enter(dev, drv, index);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun  * The previous versions of Tegra CPUIDLE driver used a different "legacy"
273*4882a593Smuzhiyun  * terminology for naming of the idling states, while this driver uses the
274*4882a593Smuzhiyun  * new terminology.
275*4882a593Smuzhiyun  *
276*4882a593Smuzhiyun  * Mapping of the old terms into the new ones:
277*4882a593Smuzhiyun  *
278*4882a593Smuzhiyun  * Old | New
279*4882a593Smuzhiyun  * ---------
280*4882a593Smuzhiyun  * LP3 | C1	(CPU core clock gating)
281*4882a593Smuzhiyun  * LP2 | C7	(CPU core power gating)
282*4882a593Smuzhiyun  * LP2 | CC6	(CPU cluster power gating)
283*4882a593Smuzhiyun  *
284*4882a593Smuzhiyun  * Note that that the older CPUIDLE driver versions didn't explicitly
285*4882a593Smuzhiyun  * differentiate the LP2 states because these states either used the same
286*4882a593Smuzhiyun  * code path or because CC6 wasn't supported.
287*4882a593Smuzhiyun  */
288*4882a593Smuzhiyun static struct cpuidle_driver tegra_idle_driver = {
289*4882a593Smuzhiyun 	.name = "tegra_idle",
290*4882a593Smuzhiyun 	.states = {
291*4882a593Smuzhiyun 		[TEGRA_C1] = ARM_CPUIDLE_WFI_STATE_PWR(600),
292*4882a593Smuzhiyun 		[TEGRA_C7] = {
293*4882a593Smuzhiyun 			.enter			= tegra_cpuidle_enter,
294*4882a593Smuzhiyun 			.exit_latency		= 2000,
295*4882a593Smuzhiyun 			.target_residency	= 2200,
296*4882a593Smuzhiyun 			.power_usage		= 100,
297*4882a593Smuzhiyun 			.flags			= CPUIDLE_FLAG_TIMER_STOP,
298*4882a593Smuzhiyun 			.name			= "C7",
299*4882a593Smuzhiyun 			.desc			= "CPU core powered off",
300*4882a593Smuzhiyun 		},
301*4882a593Smuzhiyun 		[TEGRA_CC6] = {
302*4882a593Smuzhiyun 			.enter			= tegra_cpuidle_enter,
303*4882a593Smuzhiyun 			.exit_latency		= 5000,
304*4882a593Smuzhiyun 			.target_residency	= 10000,
305*4882a593Smuzhiyun 			.power_usage		= 0,
306*4882a593Smuzhiyun 			.flags			= CPUIDLE_FLAG_TIMER_STOP |
307*4882a593Smuzhiyun 						  CPUIDLE_FLAG_COUPLED,
308*4882a593Smuzhiyun 			.name			= "CC6",
309*4882a593Smuzhiyun 			.desc			= "CPU cluster powered off",
310*4882a593Smuzhiyun 		},
311*4882a593Smuzhiyun 	},
312*4882a593Smuzhiyun 	.state_count = TEGRA_STATE_COUNT,
313*4882a593Smuzhiyun 	.safe_state_index = TEGRA_C1,
314*4882a593Smuzhiyun };
315*4882a593Smuzhiyun 
tegra_cpuidle_disable_state(enum tegra_state state)316*4882a593Smuzhiyun static inline void tegra_cpuidle_disable_state(enum tegra_state state)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	cpuidle_driver_state_disabled(&tegra_idle_driver, state, true);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun  * Tegra20 HW appears to have a bug such that PCIe device interrupts, whether
323*4882a593Smuzhiyun  * they are legacy IRQs or MSI, are lost when CC6 is enabled.  To work around
324*4882a593Smuzhiyun  * this, simply disable CC6 if the PCI driver and DT node are both enabled.
325*4882a593Smuzhiyun  */
tegra_cpuidle_pcie_irqs_in_use(void)326*4882a593Smuzhiyun void tegra_cpuidle_pcie_irqs_in_use(void)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	struct cpuidle_state *state_cc6 = &tegra_idle_driver.states[TEGRA_CC6];
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if ((state_cc6->flags & CPUIDLE_FLAG_UNUSABLE) ||
331*4882a593Smuzhiyun 	    tegra_get_chip_id() != TEGRA20)
332*4882a593Smuzhiyun 		return;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	pr_info("disabling CC6 state, since PCIe IRQs are in use\n");
335*4882a593Smuzhiyun 	tegra_cpuidle_disable_state(TEGRA_CC6);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
tegra_cpuidle_setup_tegra114_c7_state(void)338*4882a593Smuzhiyun static void tegra_cpuidle_setup_tegra114_c7_state(void)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct cpuidle_state *s = &tegra_idle_driver.states[TEGRA_C7];
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	s->enter_s2idle = tegra114_enter_s2idle;
343*4882a593Smuzhiyun 	s->target_residency = 1000;
344*4882a593Smuzhiyun 	s->exit_latency = 500;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
tegra_cpuidle_probe(struct platform_device * pdev)347*4882a593Smuzhiyun static int tegra_cpuidle_probe(struct platform_device *pdev)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	/* LP2 could be disabled in device-tree */
350*4882a593Smuzhiyun 	if (tegra_pmc_get_suspend_mode() < TEGRA_SUSPEND_LP2)
351*4882a593Smuzhiyun 		tegra_cpuidle_disable_state(TEGRA_CC6);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/*
354*4882a593Smuzhiyun 	 * Required suspend-resume functionality, which is provided by the
355*4882a593Smuzhiyun 	 * Tegra-arch core and PMC driver, is unavailable if PM-sleep option
356*4882a593Smuzhiyun 	 * is disabled.
357*4882a593Smuzhiyun 	 */
358*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_PM_SLEEP)) {
359*4882a593Smuzhiyun 		if (!tegra_cpuidle_using_firmware())
360*4882a593Smuzhiyun 			tegra_cpuidle_disable_state(TEGRA_C7);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 		tegra_cpuidle_disable_state(TEGRA_CC6);
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/*
366*4882a593Smuzhiyun 	 * Generic WFI state (also known as C1 or LP3) and the coupled CPU
367*4882a593Smuzhiyun 	 * cluster power-off (CC6 or LP2) states are common for all Tegra SoCs.
368*4882a593Smuzhiyun 	 */
369*4882a593Smuzhiyun 	switch (tegra_get_chip_id()) {
370*4882a593Smuzhiyun 	case TEGRA20:
371*4882a593Smuzhiyun 		/* Tegra20 isn't capable to power-off individual CPU cores */
372*4882a593Smuzhiyun 		tegra_cpuidle_disable_state(TEGRA_C7);
373*4882a593Smuzhiyun 		break;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	case TEGRA30:
376*4882a593Smuzhiyun 		break;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	case TEGRA114:
379*4882a593Smuzhiyun 	case TEGRA124:
380*4882a593Smuzhiyun 		tegra_cpuidle_setup_tegra114_c7_state();
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		/* coupled CC6 (LP2) state isn't implemented yet */
383*4882a593Smuzhiyun 		tegra_cpuidle_disable_state(TEGRA_CC6);
384*4882a593Smuzhiyun 		break;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	default:
387*4882a593Smuzhiyun 		return -EINVAL;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun static struct platform_driver tegra_cpuidle_driver = {
394*4882a593Smuzhiyun 	.probe = tegra_cpuidle_probe,
395*4882a593Smuzhiyun 	.driver = {
396*4882a593Smuzhiyun 		.name = "tegra-cpuidle",
397*4882a593Smuzhiyun 	},
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun builtin_platform_driver(tegra_cpuidle_driver);
400