xref: /OK3568_Linux_fs/kernel/drivers/powercap/dtpm_cpu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2020 Linaro Limited
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * The DTPM CPU is based on the energy model. It hooks the CPU in the
8*4882a593Smuzhiyun  * DTPM tree which in turns update the power number by propagating the
9*4882a593Smuzhiyun  * power number from the CPU energy model information to the parents.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The association between the power and the performance state, allows
12*4882a593Smuzhiyun  * to set the power of the CPU at the OPP granularity.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * The CPU hotplug is supported and the power numbers will be updated
15*4882a593Smuzhiyun  * if a CPU is hot plugged / unplugged.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun #include <linux/cpumask.h>
18*4882a593Smuzhiyun #include <linux/cpufreq.h>
19*4882a593Smuzhiyun #include <linux/cpuhotplug.h>
20*4882a593Smuzhiyun #include <linux/dtpm.h>
21*4882a593Smuzhiyun #include <linux/energy_model.h>
22*4882a593Smuzhiyun #include <linux/pm_qos.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/units.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static struct dtpm *__parent;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static DEFINE_PER_CPU(struct dtpm *, dtpm_per_cpu);
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct dtpm_cpu {
31*4882a593Smuzhiyun 	struct freq_qos_request qos_req;
32*4882a593Smuzhiyun 	int cpu;
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun  * When a new CPU is inserted at hotplug or boot time, add the power
37*4882a593Smuzhiyun  * contribution and update the dtpm tree.
38*4882a593Smuzhiyun  */
power_add(struct dtpm * dtpm,struct em_perf_domain * em)39*4882a593Smuzhiyun static int power_add(struct dtpm *dtpm, struct em_perf_domain *em)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	u64 power_min, power_max;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	power_min = em->table[0].power;
44*4882a593Smuzhiyun 	power_min *= MICROWATT_PER_MILLIWATT;
45*4882a593Smuzhiyun 	power_min += dtpm->power_min;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	power_max = em->table[em->nr_perf_states - 1].power;
48*4882a593Smuzhiyun 	power_max *= MICROWATT_PER_MILLIWATT;
49*4882a593Smuzhiyun 	power_max += dtpm->power_max;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	return dtpm_update_power(dtpm, power_min, power_max);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * When a CPU is unplugged, remove its power contribution from the
56*4882a593Smuzhiyun  * dtpm tree.
57*4882a593Smuzhiyun  */
power_sub(struct dtpm * dtpm,struct em_perf_domain * em)58*4882a593Smuzhiyun static int power_sub(struct dtpm *dtpm, struct em_perf_domain *em)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	u64 power_min, power_max;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	power_min = em->table[0].power;
63*4882a593Smuzhiyun 	power_min *= MICROWATT_PER_MILLIWATT;
64*4882a593Smuzhiyun 	power_min = dtpm->power_min - power_min;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	power_max = em->table[em->nr_perf_states - 1].power;
67*4882a593Smuzhiyun 	power_max *= MICROWATT_PER_MILLIWATT;
68*4882a593Smuzhiyun 	power_max = dtpm->power_max - power_max;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	return dtpm_update_power(dtpm, power_min, power_max);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
set_pd_power_limit(struct dtpm * dtpm,u64 power_limit)73*4882a593Smuzhiyun static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct dtpm_cpu *dtpm_cpu = dtpm->private;
76*4882a593Smuzhiyun 	struct em_perf_domain *pd;
77*4882a593Smuzhiyun 	struct cpumask cpus;
78*4882a593Smuzhiyun 	unsigned long freq;
79*4882a593Smuzhiyun 	u64 power;
80*4882a593Smuzhiyun 	int i, nr_cpus;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	pd = em_cpu_get(dtpm_cpu->cpu);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	nr_cpus = cpumask_weight(&cpus);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	for (i = 0; i < pd->nr_perf_states; i++) {
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 		power = pd->table[i].power * MICROWATT_PER_MILLIWATT * nr_cpus;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		if (power > power_limit)
93*4882a593Smuzhiyun 			break;
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	freq = pd->table[i - 1].frequency;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	freq_qos_update_request(&dtpm_cpu->qos_req, freq);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	power_limit = pd->table[i - 1].power *
101*4882a593Smuzhiyun 		MICROWATT_PER_MILLIWATT * nr_cpus;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	return power_limit;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
get_pd_power_uw(struct dtpm * dtpm)106*4882a593Smuzhiyun static u64 get_pd_power_uw(struct dtpm *dtpm)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct dtpm_cpu *dtpm_cpu = dtpm->private;
109*4882a593Smuzhiyun 	struct em_perf_domain *pd;
110*4882a593Smuzhiyun 	struct cpumask cpus;
111*4882a593Smuzhiyun 	unsigned long freq;
112*4882a593Smuzhiyun 	int i, nr_cpus;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	pd = em_cpu_get(dtpm_cpu->cpu);
115*4882a593Smuzhiyun 	freq = cpufreq_quick_get(dtpm_cpu->cpu);
116*4882a593Smuzhiyun 	cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
117*4882a593Smuzhiyun 	nr_cpus = cpumask_weight(&cpus);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	for (i = 0; i < pd->nr_perf_states; i++) {
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		if (pd->table[i].frequency < freq)
122*4882a593Smuzhiyun 			continue;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 		return pd->table[i].power *
125*4882a593Smuzhiyun 			MICROWATT_PER_MILLIWATT * nr_cpus;
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
pd_release(struct dtpm * dtpm)131*4882a593Smuzhiyun static void pd_release(struct dtpm *dtpm)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct dtpm_cpu *dtpm_cpu = dtpm->private;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (freq_qos_request_active(&dtpm_cpu->qos_req))
136*4882a593Smuzhiyun 		freq_qos_remove_request(&dtpm_cpu->qos_req);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	kfree(dtpm_cpu);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun static struct dtpm_ops dtpm_ops = {
142*4882a593Smuzhiyun 	.set_power_uw = set_pd_power_limit,
143*4882a593Smuzhiyun 	.get_power_uw = get_pd_power_uw,
144*4882a593Smuzhiyun 	.release = pd_release,
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
cpuhp_dtpm_cpu_offline(unsigned int cpu)147*4882a593Smuzhiyun static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	struct cpufreq_policy *policy;
150*4882a593Smuzhiyun 	struct em_perf_domain *pd;
151*4882a593Smuzhiyun 	struct dtpm *dtpm;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	policy = cpufreq_cpu_get(cpu);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (!policy)
156*4882a593Smuzhiyun 		return 0;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	pd = em_cpu_get(cpu);
159*4882a593Smuzhiyun 	if (!pd)
160*4882a593Smuzhiyun 		return -EINVAL;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	dtpm = per_cpu(dtpm_per_cpu, cpu);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	power_sub(dtpm, pd);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (cpumask_weight(policy->cpus) != 1)
167*4882a593Smuzhiyun 		return 0;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	for_each_cpu(cpu, policy->related_cpus)
170*4882a593Smuzhiyun 		per_cpu(dtpm_per_cpu, cpu) = NULL;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	dtpm_unregister(dtpm);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
cpuhp_dtpm_cpu_online(unsigned int cpu)177*4882a593Smuzhiyun static int cpuhp_dtpm_cpu_online(unsigned int cpu)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct dtpm *dtpm;
180*4882a593Smuzhiyun 	struct dtpm_cpu *dtpm_cpu;
181*4882a593Smuzhiyun 	struct cpufreq_policy *policy;
182*4882a593Smuzhiyun 	struct em_perf_domain *pd;
183*4882a593Smuzhiyun 	char name[CPUFREQ_NAME_LEN];
184*4882a593Smuzhiyun 	int ret = -ENOMEM;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	policy = cpufreq_cpu_get(cpu);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (!policy)
189*4882a593Smuzhiyun 		return 0;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	pd = em_cpu_get(cpu);
192*4882a593Smuzhiyun 	if (!pd)
193*4882a593Smuzhiyun 		return -EINVAL;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	dtpm = per_cpu(dtpm_per_cpu, cpu);
196*4882a593Smuzhiyun 	if (dtpm)
197*4882a593Smuzhiyun 		return power_add(dtpm, pd);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	dtpm = dtpm_alloc(&dtpm_ops);
200*4882a593Smuzhiyun 	if (!dtpm)
201*4882a593Smuzhiyun 		return -EINVAL;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
204*4882a593Smuzhiyun 	if (!dtpm_cpu)
205*4882a593Smuzhiyun 		goto out_kfree_dtpm;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	dtpm->private = dtpm_cpu;
208*4882a593Smuzhiyun 	dtpm_cpu->cpu = cpu;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	for_each_cpu(cpu, policy->related_cpus)
211*4882a593Smuzhiyun 		per_cpu(dtpm_per_cpu, cpu) = dtpm;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	sprintf(name, "cpu%d", dtpm_cpu->cpu);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	ret = dtpm_register(name, dtpm, __parent);
216*4882a593Smuzhiyun 	if (ret)
217*4882a593Smuzhiyun 		goto out_kfree_dtpm_cpu;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	ret = power_add(dtpm, pd);
220*4882a593Smuzhiyun 	if (ret)
221*4882a593Smuzhiyun 		goto out_dtpm_unregister;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	ret = freq_qos_add_request(&policy->constraints,
224*4882a593Smuzhiyun 				   &dtpm_cpu->qos_req, FREQ_QOS_MAX,
225*4882a593Smuzhiyun 				   pd->table[pd->nr_perf_states - 1].frequency);
226*4882a593Smuzhiyun 	if (ret)
227*4882a593Smuzhiyun 		goto out_power_sub;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	return 0;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun out_power_sub:
232*4882a593Smuzhiyun 	power_sub(dtpm, pd);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun out_dtpm_unregister:
235*4882a593Smuzhiyun 	dtpm_unregister(dtpm);
236*4882a593Smuzhiyun 	dtpm_cpu = NULL;
237*4882a593Smuzhiyun 	dtpm = NULL;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun out_kfree_dtpm_cpu:
240*4882a593Smuzhiyun 	for_each_cpu(cpu, policy->related_cpus)
241*4882a593Smuzhiyun 		per_cpu(dtpm_per_cpu, cpu) = NULL;
242*4882a593Smuzhiyun 	kfree(dtpm_cpu);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun out_kfree_dtpm:
245*4882a593Smuzhiyun 	kfree(dtpm);
246*4882a593Smuzhiyun 	return ret;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
dtpm_register_cpu(struct dtpm * parent)249*4882a593Smuzhiyun int dtpm_register_cpu(struct dtpm *parent)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	__parent = parent;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	return cpuhp_setup_state(CPUHP_AP_DTPM_CPU_ONLINE,
254*4882a593Smuzhiyun 				 "dtpm_cpu:online",
255*4882a593Smuzhiyun 				 cpuhp_dtpm_cpu_online,
256*4882a593Smuzhiyun 				 cpuhp_dtpm_cpu_offline);
257*4882a593Smuzhiyun }
258