xref: /OK3568_Linux_fs/kernel/drivers/cpuidle/cpuidle-ux500.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM)
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Based on the work of Rickard Andersson <rickard.andersson@stericsson.com>
6*4882a593Smuzhiyun  * and Jonas Aaberg <jonas.aberg@stericsson.com>.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/cpuidle.h>
11*4882a593Smuzhiyun #include <linux/spinlock.h>
12*4882a593Smuzhiyun #include <linux/atomic.h>
13*4882a593Smuzhiyun #include <linux/smp.h>
14*4882a593Smuzhiyun #include <linux/mfd/dbx500-prcmu.h>
15*4882a593Smuzhiyun #include <linux/platform_data/arm-ux500-pm.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <asm/cpuidle.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun static atomic_t master = ATOMIC_INIT(0);
21*4882a593Smuzhiyun static DEFINE_SPINLOCK(master_lock);
22*4882a593Smuzhiyun 
ux500_enter_idle(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)23*4882a593Smuzhiyun static inline int ux500_enter_idle(struct cpuidle_device *dev,
24*4882a593Smuzhiyun 				   struct cpuidle_driver *drv, int index)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	int this_cpu = smp_processor_id();
27*4882a593Smuzhiyun 	bool recouple = false;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	if (atomic_inc_return(&master) == num_online_cpus()) {
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 		/* With this lock, we prevent the other cpu to exit and enter
32*4882a593Smuzhiyun 		 * this function again and become the master */
33*4882a593Smuzhiyun 		if (!spin_trylock(&master_lock))
34*4882a593Smuzhiyun 			goto wfi;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 		/* decouple the gic from the A9 cores */
37*4882a593Smuzhiyun 		if (prcmu_gic_decouple()) {
38*4882a593Smuzhiyun 			spin_unlock(&master_lock);
39*4882a593Smuzhiyun 			goto out;
40*4882a593Smuzhiyun 		}
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 		/* If an error occur, we will have to recouple the gic
43*4882a593Smuzhiyun 		 * manually */
44*4882a593Smuzhiyun 		recouple = true;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 		/* At this state, as the gic is decoupled, if the other
47*4882a593Smuzhiyun 		 * cpu is in WFI, we have the guarantee it won't be wake
48*4882a593Smuzhiyun 		 * up, so we can safely go to retention */
49*4882a593Smuzhiyun 		if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1))
50*4882a593Smuzhiyun 			goto out;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 		/* The prcmu will be in charge of watching the interrupts
53*4882a593Smuzhiyun 		 * and wake up the cpus */
54*4882a593Smuzhiyun 		if (prcmu_copy_gic_settings())
55*4882a593Smuzhiyun 			goto out;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 		/* Check in the meantime an interrupt did
58*4882a593Smuzhiyun 		 * not occur on the gic ... */
59*4882a593Smuzhiyun 		if (prcmu_gic_pending_irq())
60*4882a593Smuzhiyun 			goto out;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 		/* ... and the prcmu */
63*4882a593Smuzhiyun 		if (prcmu_pending_irq())
64*4882a593Smuzhiyun 			goto out;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 		/* Go to the retention state, the prcmu will wait for the
67*4882a593Smuzhiyun 		 * cpu to go WFI and this is what happens after exiting this
68*4882a593Smuzhiyun 		 * 'master' critical section */
69*4882a593Smuzhiyun 		if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true))
70*4882a593Smuzhiyun 			goto out;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 		/* When we switch to retention, the prcmu is in charge
73*4882a593Smuzhiyun 		 * of recoupling the gic automatically */
74*4882a593Smuzhiyun 		recouple = false;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 		spin_unlock(&master_lock);
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun wfi:
79*4882a593Smuzhiyun 	cpu_do_idle();
80*4882a593Smuzhiyun out:
81*4882a593Smuzhiyun 	atomic_dec(&master);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (recouple) {
84*4882a593Smuzhiyun 		prcmu_gic_recouple();
85*4882a593Smuzhiyun 		spin_unlock(&master_lock);
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return index;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static struct cpuidle_driver ux500_idle_driver = {
92*4882a593Smuzhiyun 	.name = "ux500_idle",
93*4882a593Smuzhiyun 	.owner = THIS_MODULE,
94*4882a593Smuzhiyun 	.states = {
95*4882a593Smuzhiyun 		ARM_CPUIDLE_WFI_STATE,
96*4882a593Smuzhiyun 		{
97*4882a593Smuzhiyun 			.enter		  = ux500_enter_idle,
98*4882a593Smuzhiyun 			.exit_latency	  = 70,
99*4882a593Smuzhiyun 			.target_residency = 260,
100*4882a593Smuzhiyun 			.flags		  = CPUIDLE_FLAG_TIMER_STOP,
101*4882a593Smuzhiyun 			.name		  = "ApIdle",
102*4882a593Smuzhiyun 			.desc		  = "ARM Retention",
103*4882a593Smuzhiyun 		},
104*4882a593Smuzhiyun 	},
105*4882a593Smuzhiyun 	.safe_state_index = 0,
106*4882a593Smuzhiyun 	.state_count = 2,
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun 
dbx500_cpuidle_probe(struct platform_device * pdev)109*4882a593Smuzhiyun static int dbx500_cpuidle_probe(struct platform_device *pdev)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	/* Configure wake up reasons */
112*4882a593Smuzhiyun 	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
113*4882a593Smuzhiyun 			     PRCMU_WAKEUP(ABB));
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return cpuidle_register(&ux500_idle_driver, NULL);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun static struct platform_driver dbx500_cpuidle_plat_driver = {
119*4882a593Smuzhiyun 	.driver = {
120*4882a593Smuzhiyun 		.name = "cpuidle-dbx500",
121*4882a593Smuzhiyun 	},
122*4882a593Smuzhiyun 	.probe = dbx500_cpuidle_probe,
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun builtin_platform_driver(dbx500_cpuidle_plat_driver);
125