xref: /OK3568_Linux_fs/kernel/drivers/cpuidle/cpuidle-big_little.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2013 ARM/Linaro
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Authors: Daniel Lezcano <daniel.lezcano@linaro.org>
6*4882a593Smuzhiyun  *          Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7*4882a593Smuzhiyun  *          Nicolas Pitre <nicolas.pitre@linaro.org>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
10*4882a593Smuzhiyun  * Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #include <linux/cpuidle.h>
13*4882a593Smuzhiyun #include <linux/cpu_pm.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/cpu.h>
18*4882a593Smuzhiyun #include <asm/cputype.h>
19*4882a593Smuzhiyun #include <asm/cpuidle.h>
20*4882a593Smuzhiyun #include <asm/mcpm.h>
21*4882a593Smuzhiyun #include <asm/smp_plat.h>
22*4882a593Smuzhiyun #include <asm/suspend.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "dt_idle_states.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static int bl_enter_powerdown(struct cpuidle_device *dev,
27*4882a593Smuzhiyun 			      struct cpuidle_driver *drv, int idx);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * NB: Owing to current menu governor behaviour big and LITTLE
31*4882a593Smuzhiyun  * index 1 states have to define exit_latency and target_residency for
32*4882a593Smuzhiyun  * cluster state since, when all CPUs in a cluster hit it, the cluster
33*4882a593Smuzhiyun  * can be shutdown. This means that when a single CPU enters this state
34*4882a593Smuzhiyun  * the exit_latency and target_residency values are somewhat overkill.
35*4882a593Smuzhiyun  * There is no notion of cluster states in the menu governor, so CPUs
36*4882a593Smuzhiyun  * have to define CPU states where possibly the cluster will be shutdown
37*4882a593Smuzhiyun  * depending on the state of other CPUs. idle states entry and exit happen
38*4882a593Smuzhiyun  * at random times; however the cluster state provides target_residency
39*4882a593Smuzhiyun  * values as if all CPUs in a cluster enter the state at once; this is
40*4882a593Smuzhiyun  * somewhat optimistic and behaviour should be fixed either in the governor
41*4882a593Smuzhiyun  * or in the MCPM back-ends.
42*4882a593Smuzhiyun  * To make this driver 100% generic the number of states and the exit_latency
43*4882a593Smuzhiyun  * target_residency values must be obtained from device tree bindings.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * exit_latency: refers to the TC2 vexpress test chip and depends on the
46*4882a593Smuzhiyun  * current cluster operating point. It is the time it takes to get the CPU
47*4882a593Smuzhiyun  * up and running when the CPU is powered up on cluster wake-up from shutdown.
48*4882a593Smuzhiyun  * Current values for big and LITTLE clusters are provided for clusters
49*4882a593Smuzhiyun  * running at default operating points.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * target_residency: it is the minimum amount of time the cluster has
52*4882a593Smuzhiyun  * to be down to break even in terms of power consumption. cluster
53*4882a593Smuzhiyun  * shutdown has inherent dynamic power costs (L2 writebacks to DRAM
54*4882a593Smuzhiyun  * being the main factor) that depend on the current operating points.
55*4882a593Smuzhiyun  * The current values for both clusters are provided for a CPU whose half
56*4882a593Smuzhiyun  * of L2 lines are dirty and require cleaning to DRAM, and takes into
57*4882a593Smuzhiyun  * account leakage static power values related to the vexpress TC2 testchip.
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun static struct cpuidle_driver bl_idle_little_driver = {
60*4882a593Smuzhiyun 	.name = "little_idle",
61*4882a593Smuzhiyun 	.owner = THIS_MODULE,
62*4882a593Smuzhiyun 	.states[0] = ARM_CPUIDLE_WFI_STATE,
63*4882a593Smuzhiyun 	.states[1] = {
64*4882a593Smuzhiyun 		.enter			= bl_enter_powerdown,
65*4882a593Smuzhiyun 		.exit_latency		= 700,
66*4882a593Smuzhiyun 		.target_residency	= 2500,
67*4882a593Smuzhiyun 		.flags			= CPUIDLE_FLAG_TIMER_STOP,
68*4882a593Smuzhiyun 		.name			= "C1",
69*4882a593Smuzhiyun 		.desc			= "ARM little-cluster power down",
70*4882a593Smuzhiyun 	},
71*4882a593Smuzhiyun 	.state_count = 2,
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun static const struct of_device_id bl_idle_state_match[] __initconst = {
75*4882a593Smuzhiyun 	{ .compatible = "arm,idle-state",
76*4882a593Smuzhiyun 	  .data = bl_enter_powerdown },
77*4882a593Smuzhiyun 	{ },
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static struct cpuidle_driver bl_idle_big_driver = {
81*4882a593Smuzhiyun 	.name = "big_idle",
82*4882a593Smuzhiyun 	.owner = THIS_MODULE,
83*4882a593Smuzhiyun 	.states[0] = ARM_CPUIDLE_WFI_STATE,
84*4882a593Smuzhiyun 	.states[1] = {
85*4882a593Smuzhiyun 		.enter			= bl_enter_powerdown,
86*4882a593Smuzhiyun 		.exit_latency		= 500,
87*4882a593Smuzhiyun 		.target_residency	= 2000,
88*4882a593Smuzhiyun 		.flags			= CPUIDLE_FLAG_TIMER_STOP,
89*4882a593Smuzhiyun 		.name			= "C1",
90*4882a593Smuzhiyun 		.desc			= "ARM big-cluster power down",
91*4882a593Smuzhiyun 	},
92*4882a593Smuzhiyun 	.state_count = 2,
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun  * notrace prevents trace shims from getting inserted where they
97*4882a593Smuzhiyun  * should not. Global jumps and ldrex/strex must not be inserted
98*4882a593Smuzhiyun  * in power down sequences where caches and MMU may be turned off.
99*4882a593Smuzhiyun  */
bl_powerdown_finisher(unsigned long arg)100*4882a593Smuzhiyun static int notrace bl_powerdown_finisher(unsigned long arg)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	/* MCPM works with HW CPU identifiers */
103*4882a593Smuzhiyun 	unsigned int mpidr = read_cpuid_mpidr();
104*4882a593Smuzhiyun 	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
105*4882a593Smuzhiyun 	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	mcpm_set_entry_vector(cpu, cluster, cpu_resume);
108*4882a593Smuzhiyun 	mcpm_cpu_suspend();
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	/* return value != 0 means failure */
111*4882a593Smuzhiyun 	return 1;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun  * bl_enter_powerdown - Programs CPU to enter the specified state
116*4882a593Smuzhiyun  * @dev: cpuidle device
117*4882a593Smuzhiyun  * @drv: The target state to be programmed
118*4882a593Smuzhiyun  * @idx: state index
119*4882a593Smuzhiyun  *
120*4882a593Smuzhiyun  * Called from the CPUidle framework to program the device to the
121*4882a593Smuzhiyun  * specified target state selected by the governor.
122*4882a593Smuzhiyun  */
bl_enter_powerdown(struct cpuidle_device * dev,struct cpuidle_driver * drv,int idx)123*4882a593Smuzhiyun static int bl_enter_powerdown(struct cpuidle_device *dev,
124*4882a593Smuzhiyun 				struct cpuidle_driver *drv, int idx)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	cpu_pm_enter();
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	cpu_suspend(0, bl_powerdown_finisher);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* signals the MCPM core that CPU is out of low power state */
131*4882a593Smuzhiyun 	mcpm_cpu_powered_up();
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	cpu_pm_exit();
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	return idx;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
bl_idle_driver_init(struct cpuidle_driver * drv,int part_id)138*4882a593Smuzhiyun static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct cpumask *cpumask;
141*4882a593Smuzhiyun 	int cpu;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
144*4882a593Smuzhiyun 	if (!cpumask)
145*4882a593Smuzhiyun 		return -ENOMEM;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	for_each_possible_cpu(cpu)
148*4882a593Smuzhiyun 		if (smp_cpuid_part(cpu) == part_id)
149*4882a593Smuzhiyun 			cpumask_set_cpu(cpu, cpumask);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	drv->cpumask = cpumask;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	return 0;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun static const struct of_device_id compatible_machine_match[] = {
157*4882a593Smuzhiyun 	{ .compatible = "arm,vexpress,v2p-ca15_a7" },
158*4882a593Smuzhiyun 	{ .compatible = "samsung,exynos5420" },
159*4882a593Smuzhiyun 	{ .compatible = "samsung,exynos5800" },
160*4882a593Smuzhiyun 	{},
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun 
bl_idle_init(void)163*4882a593Smuzhiyun static int __init bl_idle_init(void)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	int ret;
166*4882a593Smuzhiyun 	struct device_node *root = of_find_node_by_path("/");
167*4882a593Smuzhiyun 	const struct of_device_id *match_id;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (!root)
170*4882a593Smuzhiyun 		return -ENODEV;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/*
173*4882a593Smuzhiyun 	 * Initialize the driver just for a compliant set of machines
174*4882a593Smuzhiyun 	 */
175*4882a593Smuzhiyun 	match_id = of_match_node(compatible_machine_match, root);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	of_node_put(root);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (!match_id)
180*4882a593Smuzhiyun 		return -ENODEV;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (!mcpm_is_available())
183*4882a593Smuzhiyun 		return -EUNATCH;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/*
186*4882a593Smuzhiyun 	 * For now the differentiation between little and big cores
187*4882a593Smuzhiyun 	 * is based on the part number. A7 cores are considered little
188*4882a593Smuzhiyun 	 * cores, A15 are considered big cores. This distinction may
189*4882a593Smuzhiyun 	 * evolve in the future with a more generic matching approach.
190*4882a593Smuzhiyun 	 */
191*4882a593Smuzhiyun 	ret = bl_idle_driver_init(&bl_idle_little_driver,
192*4882a593Smuzhiyun 				  ARM_CPU_PART_CORTEX_A7);
193*4882a593Smuzhiyun 	if (ret)
194*4882a593Smuzhiyun 		return ret;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15);
197*4882a593Smuzhiyun 	if (ret)
198*4882a593Smuzhiyun 		goto out_uninit_little;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* Start at index 1, index 0 standard WFI */
201*4882a593Smuzhiyun 	ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1);
202*4882a593Smuzhiyun 	if (ret < 0)
203*4882a593Smuzhiyun 		goto out_uninit_big;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/* Start at index 1, index 0 standard WFI */
206*4882a593Smuzhiyun 	ret = dt_init_idle_driver(&bl_idle_little_driver,
207*4882a593Smuzhiyun 				  bl_idle_state_match, 1);
208*4882a593Smuzhiyun 	if (ret < 0)
209*4882a593Smuzhiyun 		goto out_uninit_big;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	ret = cpuidle_register(&bl_idle_little_driver, NULL);
212*4882a593Smuzhiyun 	if (ret)
213*4882a593Smuzhiyun 		goto out_uninit_big;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	ret = cpuidle_register(&bl_idle_big_driver, NULL);
216*4882a593Smuzhiyun 	if (ret)
217*4882a593Smuzhiyun 		goto out_unregister_little;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	return 0;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun out_unregister_little:
222*4882a593Smuzhiyun 	cpuidle_unregister(&bl_idle_little_driver);
223*4882a593Smuzhiyun out_uninit_big:
224*4882a593Smuzhiyun 	kfree(bl_idle_big_driver.cpumask);
225*4882a593Smuzhiyun out_uninit_little:
226*4882a593Smuzhiyun 	kfree(bl_idle_little_driver.cpumask);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	return ret;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun device_initcall(bl_idle_init);
231