xref: /OK3568_Linux_fs/kernel/drivers/acpi/processor_thermal.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6*4882a593Smuzhiyun  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7*4882a593Smuzhiyun  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
8*4882a593Smuzhiyun  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9*4882a593Smuzhiyun  *  			- Added processor hotplug support
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/cpufreq.h>
16*4882a593Smuzhiyun #include <linux/acpi.h>
17*4882a593Smuzhiyun #include <acpi/processor.h>
18*4882a593Smuzhiyun #include <linux/uaccess.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define PREFIX "ACPI: "
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define ACPI_PROCESSOR_CLASS            "processor"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifdef CONFIG_CPU_FREQ
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
27*4882a593Smuzhiyun  * offers (in most cases) voltage scaling in addition to frequency scaling, and
28*4882a593Smuzhiyun  * thus a cubic (instead of linear) reduction of energy. Also, we allow for
29*4882a593Smuzhiyun  * _any_ cpufreq driver and not only the acpi-cpufreq driver.
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define CPUFREQ_THERMAL_MIN_STEP 0
33*4882a593Smuzhiyun #define CPUFREQ_THERMAL_MAX_STEP 3
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define reduction_pctg(cpu) \
38*4882a593Smuzhiyun 	per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * Emulate "per package data" using per cpu data (which should really be
42*4882a593Smuzhiyun  * provided elsewhere)
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
45*4882a593Smuzhiyun  * temporarily. Fortunately that's not a big issue here (I hope)
46*4882a593Smuzhiyun  */
phys_package_first_cpu(int cpu)47*4882a593Smuzhiyun static int phys_package_first_cpu(int cpu)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	int i;
50*4882a593Smuzhiyun 	int id = topology_physical_package_id(cpu);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	for_each_online_cpu(i)
53*4882a593Smuzhiyun 		if (topology_physical_package_id(i) == id)
54*4882a593Smuzhiyun 			return i;
55*4882a593Smuzhiyun 	return 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
cpu_has_cpufreq(unsigned int cpu)58*4882a593Smuzhiyun static int cpu_has_cpufreq(unsigned int cpu)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct cpufreq_policy policy;
61*4882a593Smuzhiyun 	if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
62*4882a593Smuzhiyun 		return 0;
63*4882a593Smuzhiyun 	return 1;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
cpufreq_get_max_state(unsigned int cpu)66*4882a593Smuzhiyun static int cpufreq_get_max_state(unsigned int cpu)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	if (!cpu_has_cpufreq(cpu))
69*4882a593Smuzhiyun 		return 0;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	return CPUFREQ_THERMAL_MAX_STEP;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
cpufreq_get_cur_state(unsigned int cpu)74*4882a593Smuzhiyun static int cpufreq_get_cur_state(unsigned int cpu)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	if (!cpu_has_cpufreq(cpu))
77*4882a593Smuzhiyun 		return 0;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	return reduction_pctg(cpu);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
cpufreq_set_cur_state(unsigned int cpu,int state)82*4882a593Smuzhiyun static int cpufreq_set_cur_state(unsigned int cpu, int state)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	struct cpufreq_policy *policy;
85*4882a593Smuzhiyun 	struct acpi_processor *pr;
86*4882a593Smuzhiyun 	unsigned long max_freq;
87*4882a593Smuzhiyun 	int i, ret;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (!cpu_has_cpufreq(cpu))
90*4882a593Smuzhiyun 		return 0;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	reduction_pctg(cpu) = state;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/*
95*4882a593Smuzhiyun 	 * Update all the CPUs in the same package because they all
96*4882a593Smuzhiyun 	 * contribute to the temperature and often share the same
97*4882a593Smuzhiyun 	 * frequency.
98*4882a593Smuzhiyun 	 */
99*4882a593Smuzhiyun 	for_each_online_cpu(i) {
100*4882a593Smuzhiyun 		if (topology_physical_package_id(i) !=
101*4882a593Smuzhiyun 		    topology_physical_package_id(cpu))
102*4882a593Smuzhiyun 			continue;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 		pr = per_cpu(processors, i);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
107*4882a593Smuzhiyun 			continue;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 		policy = cpufreq_cpu_get(i);
110*4882a593Smuzhiyun 		if (!policy)
111*4882a593Smuzhiyun 			return -EINVAL;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 		max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		cpufreq_cpu_put(policy);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		ret = freq_qos_update_request(&pr->thermal_req, max_freq);
118*4882a593Smuzhiyun 		if (ret < 0) {
119*4882a593Smuzhiyun 			pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
120*4882a593Smuzhiyun 				pr->id, ret);
121*4882a593Smuzhiyun 		}
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 	return 0;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
acpi_thermal_cpufreq_init(struct cpufreq_policy * policy)126*4882a593Smuzhiyun void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	unsigned int cpu;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	for_each_cpu(cpu, policy->related_cpus) {
131*4882a593Smuzhiyun 		struct acpi_processor *pr = per_cpu(processors, cpu);
132*4882a593Smuzhiyun 		int ret;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 		if (!pr)
135*4882a593Smuzhiyun 			continue;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		ret = freq_qos_add_request(&policy->constraints,
138*4882a593Smuzhiyun 					   &pr->thermal_req,
139*4882a593Smuzhiyun 					   FREQ_QOS_MAX, INT_MAX);
140*4882a593Smuzhiyun 		if (ret < 0)
141*4882a593Smuzhiyun 			pr_err("Failed to add freq constraint for CPU%d (%d)\n",
142*4882a593Smuzhiyun 			       cpu, ret);
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
acpi_thermal_cpufreq_exit(struct cpufreq_policy * policy)146*4882a593Smuzhiyun void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	unsigned int cpu;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	for_each_cpu(cpu, policy->related_cpus) {
151*4882a593Smuzhiyun 		struct acpi_processor *pr = per_cpu(processors, cpu);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 		if (pr)
154*4882a593Smuzhiyun 			freq_qos_remove_request(&pr->thermal_req);
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun #else				/* ! CONFIG_CPU_FREQ */
cpufreq_get_max_state(unsigned int cpu)158*4882a593Smuzhiyun static int cpufreq_get_max_state(unsigned int cpu)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
cpufreq_get_cur_state(unsigned int cpu)163*4882a593Smuzhiyun static int cpufreq_get_cur_state(unsigned int cpu)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
cpufreq_set_cur_state(unsigned int cpu,int state)168*4882a593Smuzhiyun static int cpufreq_set_cur_state(unsigned int cpu, int state)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun #endif
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /* thermal cooling device callbacks */
acpi_processor_max_state(struct acpi_processor * pr)176*4882a593Smuzhiyun static int acpi_processor_max_state(struct acpi_processor *pr)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	int max_state = 0;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/*
181*4882a593Smuzhiyun 	 * There exists four states according to
182*4882a593Smuzhiyun 	 * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
183*4882a593Smuzhiyun 	 */
184*4882a593Smuzhiyun 	max_state += cpufreq_get_max_state(pr->id);
185*4882a593Smuzhiyun 	if (pr->flags.throttling)
186*4882a593Smuzhiyun 		max_state += (pr->throttling.state_count -1);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	return max_state;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun static int
processor_get_max_state(struct thermal_cooling_device * cdev,unsigned long * state)191*4882a593Smuzhiyun processor_get_max_state(struct thermal_cooling_device *cdev,
192*4882a593Smuzhiyun 			unsigned long *state)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct acpi_device *device = cdev->devdata;
195*4882a593Smuzhiyun 	struct acpi_processor *pr;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (!device)
198*4882a593Smuzhiyun 		return -EINVAL;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	pr = acpi_driver_data(device);
201*4882a593Smuzhiyun 	if (!pr)
202*4882a593Smuzhiyun 		return -EINVAL;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	*state = acpi_processor_max_state(pr);
205*4882a593Smuzhiyun 	return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun static int
processor_get_cur_state(struct thermal_cooling_device * cdev,unsigned long * cur_state)209*4882a593Smuzhiyun processor_get_cur_state(struct thermal_cooling_device *cdev,
210*4882a593Smuzhiyun 			unsigned long *cur_state)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct acpi_device *device = cdev->devdata;
213*4882a593Smuzhiyun 	struct acpi_processor *pr;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (!device)
216*4882a593Smuzhiyun 		return -EINVAL;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	pr = acpi_driver_data(device);
219*4882a593Smuzhiyun 	if (!pr)
220*4882a593Smuzhiyun 		return -EINVAL;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	*cur_state = cpufreq_get_cur_state(pr->id);
223*4882a593Smuzhiyun 	if (pr->flags.throttling)
224*4882a593Smuzhiyun 		*cur_state += pr->throttling.state;
225*4882a593Smuzhiyun 	return 0;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun static int
processor_set_cur_state(struct thermal_cooling_device * cdev,unsigned long state)229*4882a593Smuzhiyun processor_set_cur_state(struct thermal_cooling_device *cdev,
230*4882a593Smuzhiyun 			unsigned long state)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	struct acpi_device *device = cdev->devdata;
233*4882a593Smuzhiyun 	struct acpi_processor *pr;
234*4882a593Smuzhiyun 	int result = 0;
235*4882a593Smuzhiyun 	int max_pstate;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (!device)
238*4882a593Smuzhiyun 		return -EINVAL;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	pr = acpi_driver_data(device);
241*4882a593Smuzhiyun 	if (!pr)
242*4882a593Smuzhiyun 		return -EINVAL;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	max_pstate = cpufreq_get_max_state(pr->id);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (state > acpi_processor_max_state(pr))
247*4882a593Smuzhiyun 		return -EINVAL;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (state <= max_pstate) {
250*4882a593Smuzhiyun 		if (pr->flags.throttling && pr->throttling.state)
251*4882a593Smuzhiyun 			result = acpi_processor_set_throttling(pr, 0, false);
252*4882a593Smuzhiyun 		cpufreq_set_cur_state(pr->id, state);
253*4882a593Smuzhiyun 	} else {
254*4882a593Smuzhiyun 		cpufreq_set_cur_state(pr->id, max_pstate);
255*4882a593Smuzhiyun 		result = acpi_processor_set_throttling(pr,
256*4882a593Smuzhiyun 				state - max_pstate, false);
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 	return result;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun const struct thermal_cooling_device_ops processor_cooling_ops = {
262*4882a593Smuzhiyun 	.get_max_state = processor_get_max_state,
263*4882a593Smuzhiyun 	.get_cur_state = processor_get_cur_state,
264*4882a593Smuzhiyun 	.set_cur_state = processor_set_cur_state,
265*4882a593Smuzhiyun };
266