xref: /OK3568_Linux_fs/kernel/drivers/cpuidle/cpuidle-cps.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2014 Imagination Technologies
4*4882a593Smuzhiyun  * Author: Paul Burton <paul.burton@mips.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/cpu_pm.h>
8*4882a593Smuzhiyun #include <linux/cpuidle.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <asm/idle.h>
12*4882a593Smuzhiyun #include <asm/pm-cps.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /* Enumeration of the various idle states this driver may enter */
15*4882a593Smuzhiyun enum cps_idle_state {
16*4882a593Smuzhiyun 	STATE_WAIT = 0,		/* MIPS wait instruction, coherent */
17*4882a593Smuzhiyun 	STATE_NC_WAIT,		/* MIPS wait instruction, non-coherent */
18*4882a593Smuzhiyun 	STATE_CLOCK_GATED,	/* Core clock gated */
19*4882a593Smuzhiyun 	STATE_POWER_GATED,	/* Core power gated */
20*4882a593Smuzhiyun 	STATE_COUNT
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun 
cps_nc_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)23*4882a593Smuzhiyun static int cps_nc_enter(struct cpuidle_device *dev,
24*4882a593Smuzhiyun 			struct cpuidle_driver *drv, int index)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	enum cps_pm_state pm_state;
27*4882a593Smuzhiyun 	int err;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	/*
30*4882a593Smuzhiyun 	 * At least one core must remain powered up & clocked in order for the
31*4882a593Smuzhiyun 	 * system to have any hope of functioning.
32*4882a593Smuzhiyun 	 *
33*4882a593Smuzhiyun 	 * TODO: don't treat core 0 specially, just prevent the final core
34*4882a593Smuzhiyun 	 * TODO: remap interrupt affinity temporarily
35*4882a593Smuzhiyun 	 */
36*4882a593Smuzhiyun 	if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT))
37*4882a593Smuzhiyun 		index = STATE_NC_WAIT;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	/* Select the appropriate cps_pm_state */
40*4882a593Smuzhiyun 	switch (index) {
41*4882a593Smuzhiyun 	case STATE_NC_WAIT:
42*4882a593Smuzhiyun 		pm_state = CPS_PM_NC_WAIT;
43*4882a593Smuzhiyun 		break;
44*4882a593Smuzhiyun 	case STATE_CLOCK_GATED:
45*4882a593Smuzhiyun 		pm_state = CPS_PM_CLOCK_GATED;
46*4882a593Smuzhiyun 		break;
47*4882a593Smuzhiyun 	case STATE_POWER_GATED:
48*4882a593Smuzhiyun 		pm_state = CPS_PM_POWER_GATED;
49*4882a593Smuzhiyun 		break;
50*4882a593Smuzhiyun 	default:
51*4882a593Smuzhiyun 		BUG();
52*4882a593Smuzhiyun 		return -EINVAL;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/* Notify listeners the CPU is about to power down */
56*4882a593Smuzhiyun 	if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter())
57*4882a593Smuzhiyun 		return -EINTR;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	/* Enter that state */
60*4882a593Smuzhiyun 	err = cps_pm_enter_state(pm_state);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* Notify listeners the CPU is back up */
63*4882a593Smuzhiyun 	if (pm_state == CPS_PM_POWER_GATED)
64*4882a593Smuzhiyun 		cpu_pm_exit();
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	return err ?: index;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static struct cpuidle_driver cps_driver = {
70*4882a593Smuzhiyun 	.name			= "cpc_cpuidle",
71*4882a593Smuzhiyun 	.owner			= THIS_MODULE,
72*4882a593Smuzhiyun 	.states = {
73*4882a593Smuzhiyun 		[STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE,
74*4882a593Smuzhiyun 		[STATE_NC_WAIT] = {
75*4882a593Smuzhiyun 			.enter	= cps_nc_enter,
76*4882a593Smuzhiyun 			.exit_latency		= 200,
77*4882a593Smuzhiyun 			.target_residency	= 450,
78*4882a593Smuzhiyun 			.name	= "nc-wait",
79*4882a593Smuzhiyun 			.desc	= "non-coherent MIPS wait",
80*4882a593Smuzhiyun 		},
81*4882a593Smuzhiyun 		[STATE_CLOCK_GATED] = {
82*4882a593Smuzhiyun 			.enter	= cps_nc_enter,
83*4882a593Smuzhiyun 			.exit_latency		= 300,
84*4882a593Smuzhiyun 			.target_residency	= 700,
85*4882a593Smuzhiyun 			.flags	= CPUIDLE_FLAG_TIMER_STOP,
86*4882a593Smuzhiyun 			.name	= "clock-gated",
87*4882a593Smuzhiyun 			.desc	= "core clock gated",
88*4882a593Smuzhiyun 		},
89*4882a593Smuzhiyun 		[STATE_POWER_GATED] = {
90*4882a593Smuzhiyun 			.enter	= cps_nc_enter,
91*4882a593Smuzhiyun 			.exit_latency		= 600,
92*4882a593Smuzhiyun 			.target_residency	= 1000,
93*4882a593Smuzhiyun 			.flags	= CPUIDLE_FLAG_TIMER_STOP,
94*4882a593Smuzhiyun 			.name	= "power-gated",
95*4882a593Smuzhiyun 			.desc	= "core power gated",
96*4882a593Smuzhiyun 		},
97*4882a593Smuzhiyun 	},
98*4882a593Smuzhiyun 	.state_count		= STATE_COUNT,
99*4882a593Smuzhiyun 	.safe_state_index	= 0,
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
cps_cpuidle_unregister(void)102*4882a593Smuzhiyun static void __init cps_cpuidle_unregister(void)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	int cpu;
105*4882a593Smuzhiyun 	struct cpuidle_device *device;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
108*4882a593Smuzhiyun 		device = &per_cpu(cpuidle_dev, cpu);
109*4882a593Smuzhiyun 		cpuidle_unregister_device(device);
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	cpuidle_unregister_driver(&cps_driver);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
cps_cpuidle_init(void)115*4882a593Smuzhiyun static int __init cps_cpuidle_init(void)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	int err, cpu, i;
118*4882a593Smuzhiyun 	struct cpuidle_device *device;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/* Detect supported states */
121*4882a593Smuzhiyun 	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
122*4882a593Smuzhiyun 		cps_driver.state_count = STATE_CLOCK_GATED + 1;
123*4882a593Smuzhiyun 	if (!cps_pm_support_state(CPS_PM_CLOCK_GATED))
124*4882a593Smuzhiyun 		cps_driver.state_count = STATE_NC_WAIT + 1;
125*4882a593Smuzhiyun 	if (!cps_pm_support_state(CPS_PM_NC_WAIT))
126*4882a593Smuzhiyun 		cps_driver.state_count = STATE_WAIT + 1;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* Inform the user if some states are unavailable */
129*4882a593Smuzhiyun 	if (cps_driver.state_count < STATE_COUNT) {
130*4882a593Smuzhiyun 		pr_info("cpuidle-cps: limited to ");
131*4882a593Smuzhiyun 		switch (cps_driver.state_count - 1) {
132*4882a593Smuzhiyun 		case STATE_WAIT:
133*4882a593Smuzhiyun 			pr_cont("coherent wait\n");
134*4882a593Smuzhiyun 			break;
135*4882a593Smuzhiyun 		case STATE_NC_WAIT:
136*4882a593Smuzhiyun 			pr_cont("non-coherent wait\n");
137*4882a593Smuzhiyun 			break;
138*4882a593Smuzhiyun 		case STATE_CLOCK_GATED:
139*4882a593Smuzhiyun 			pr_cont("clock gating\n");
140*4882a593Smuzhiyun 			break;
141*4882a593Smuzhiyun 		}
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/*
145*4882a593Smuzhiyun 	 * Set the coupled flag on the appropriate states if this system
146*4882a593Smuzhiyun 	 * requires it.
147*4882a593Smuzhiyun 	 */
148*4882a593Smuzhiyun 	if (coupled_coherence)
149*4882a593Smuzhiyun 		for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++)
150*4882a593Smuzhiyun 			cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	err = cpuidle_register_driver(&cps_driver);
153*4882a593Smuzhiyun 	if (err) {
154*4882a593Smuzhiyun 		pr_err("Failed to register CPS cpuidle driver\n");
155*4882a593Smuzhiyun 		return err;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
159*4882a593Smuzhiyun 		device = &per_cpu(cpuidle_dev, cpu);
160*4882a593Smuzhiyun 		device->cpu = cpu;
161*4882a593Smuzhiyun #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
162*4882a593Smuzhiyun 		cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
163*4882a593Smuzhiyun #endif
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		err = cpuidle_register_device(device);
166*4882a593Smuzhiyun 		if (err) {
167*4882a593Smuzhiyun 			pr_err("Failed to register CPU%d cpuidle device\n",
168*4882a593Smuzhiyun 			       cpu);
169*4882a593Smuzhiyun 			goto err_out;
170*4882a593Smuzhiyun 		}
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	return 0;
174*4882a593Smuzhiyun err_out:
175*4882a593Smuzhiyun 	cps_cpuidle_unregister();
176*4882a593Smuzhiyun 	return err;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun device_initcall(cps_cpuidle_init);
179