xref: /OK3568_Linux_fs/kernel/drivers/cpufreq/cpufreq_ondemand.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  drivers/cpufreq/cpufreq_ondemand.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C)  2001 Russell King
6*4882a593Smuzhiyun  *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
7*4882a593Smuzhiyun  *                      Jun Nakajima <jun.nakajima@intel.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/cpu.h>
13*4882a593Smuzhiyun #include <linux/percpu-defs.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/tick.h>
16*4882a593Smuzhiyun #include <linux/sched/cpufreq.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "cpufreq_ondemand.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* On-demand governor macros */
21*4882a593Smuzhiyun #define DEF_FREQUENCY_UP_THRESHOLD		(80)
22*4882a593Smuzhiyun #define DEF_SAMPLING_DOWN_FACTOR		(1)
23*4882a593Smuzhiyun #define MAX_SAMPLING_DOWN_FACTOR		(100000)
24*4882a593Smuzhiyun #define MICRO_FREQUENCY_UP_THRESHOLD		(95)
25*4882a593Smuzhiyun #define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
26*4882a593Smuzhiyun #define MIN_FREQUENCY_UP_THRESHOLD		(1)
27*4882a593Smuzhiyun #define MAX_FREQUENCY_UP_THRESHOLD		(100)
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static struct od_ops od_ops;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static unsigned int default_powersave_bias;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Not all CPUs want IO time to be accounted as busy; this depends on how
35*4882a593Smuzhiyun  * efficient idling at a higher frequency/voltage is.
36*4882a593Smuzhiyun  * Pavel Machek says this is not so for various generations of AMD and old
37*4882a593Smuzhiyun  * Intel systems.
38*4882a593Smuzhiyun  * Mike Chan (android.com) claims this is also not true for ARM.
39*4882a593Smuzhiyun  * Because of this, whitelist specific known (series) of CPUs by default, and
40*4882a593Smuzhiyun  * leave all others up to the user.
41*4882a593Smuzhiyun  */
should_io_be_busy(void)42*4882a593Smuzhiyun static int should_io_be_busy(void)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun #if defined(CONFIG_X86)
45*4882a593Smuzhiyun 	/*
46*4882a593Smuzhiyun 	 * For Intel, Core 2 (model 15) and later have an efficient idle.
47*4882a593Smuzhiyun 	 */
48*4882a593Smuzhiyun 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
49*4882a593Smuzhiyun 			boot_cpu_data.x86 == 6 &&
50*4882a593Smuzhiyun 			boot_cpu_data.x86_model >= 15)
51*4882a593Smuzhiyun 		return 1;
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun 	return 0;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * Find right freq to be set now with powersave_bias on.
58*4882a593Smuzhiyun  * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
59*4882a593Smuzhiyun  * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
60*4882a593Smuzhiyun  */
generic_powersave_bias_target(struct cpufreq_policy * policy,unsigned int freq_next,unsigned int relation)61*4882a593Smuzhiyun static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
62*4882a593Smuzhiyun 		unsigned int freq_next, unsigned int relation)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	unsigned int freq_req, freq_reduc, freq_avg;
65*4882a593Smuzhiyun 	unsigned int freq_hi, freq_lo;
66*4882a593Smuzhiyun 	unsigned int index;
67*4882a593Smuzhiyun 	unsigned int delay_hi_us;
68*4882a593Smuzhiyun 	struct policy_dbs_info *policy_dbs = policy->governor_data;
69*4882a593Smuzhiyun 	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
70*4882a593Smuzhiyun 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
71*4882a593Smuzhiyun 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
72*4882a593Smuzhiyun 	struct cpufreq_frequency_table *freq_table = policy->freq_table;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (!freq_table) {
75*4882a593Smuzhiyun 		dbs_info->freq_lo = 0;
76*4882a593Smuzhiyun 		dbs_info->freq_lo_delay_us = 0;
77*4882a593Smuzhiyun 		return freq_next;
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	index = cpufreq_frequency_table_target(policy, freq_next, relation);
81*4882a593Smuzhiyun 	freq_req = freq_table[index].frequency;
82*4882a593Smuzhiyun 	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
83*4882a593Smuzhiyun 	freq_avg = freq_req - freq_reduc;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/* Find freq bounds for freq_avg in freq_table */
86*4882a593Smuzhiyun 	index = cpufreq_table_find_index_h(policy, freq_avg);
87*4882a593Smuzhiyun 	freq_lo = freq_table[index].frequency;
88*4882a593Smuzhiyun 	index = cpufreq_table_find_index_l(policy, freq_avg);
89*4882a593Smuzhiyun 	freq_hi = freq_table[index].frequency;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* Find out how long we have to be in hi and lo freqs */
92*4882a593Smuzhiyun 	if (freq_hi == freq_lo) {
93*4882a593Smuzhiyun 		dbs_info->freq_lo = 0;
94*4882a593Smuzhiyun 		dbs_info->freq_lo_delay_us = 0;
95*4882a593Smuzhiyun 		return freq_lo;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 	delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
98*4882a593Smuzhiyun 	delay_hi_us += (freq_hi - freq_lo) / 2;
99*4882a593Smuzhiyun 	delay_hi_us /= freq_hi - freq_lo;
100*4882a593Smuzhiyun 	dbs_info->freq_hi_delay_us = delay_hi_us;
101*4882a593Smuzhiyun 	dbs_info->freq_lo = freq_lo;
102*4882a593Smuzhiyun 	dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
103*4882a593Smuzhiyun 	return freq_hi;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
ondemand_powersave_bias_init(struct cpufreq_policy * policy)106*4882a593Smuzhiyun static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	dbs_info->freq_lo = 0;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
dbs_freq_increase(struct cpufreq_policy * policy,unsigned int freq)113*4882a593Smuzhiyun static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct policy_dbs_info *policy_dbs = policy->governor_data;
116*4882a593Smuzhiyun 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
117*4882a593Smuzhiyun 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (od_tuners->powersave_bias)
120*4882a593Smuzhiyun 		freq = od_ops.powersave_bias_target(policy, freq,
121*4882a593Smuzhiyun 				CPUFREQ_RELATION_H);
122*4882a593Smuzhiyun 	else if (policy->cur == policy->max)
123*4882a593Smuzhiyun 		return;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
126*4882a593Smuzhiyun 			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun  * Every sampling_rate, we check, if current idle time is less than 20%
131*4882a593Smuzhiyun  * (default), then we try to increase frequency. Else, we adjust the frequency
132*4882a593Smuzhiyun  * proportional to load.
133*4882a593Smuzhiyun  */
od_update(struct cpufreq_policy * policy)134*4882a593Smuzhiyun static void od_update(struct cpufreq_policy *policy)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct policy_dbs_info *policy_dbs = policy->governor_data;
137*4882a593Smuzhiyun 	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
138*4882a593Smuzhiyun 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
139*4882a593Smuzhiyun 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
140*4882a593Smuzhiyun 	unsigned int load = dbs_update(policy);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	dbs_info->freq_lo = 0;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/* Check for frequency increase */
145*4882a593Smuzhiyun 	if (load > dbs_data->up_threshold) {
146*4882a593Smuzhiyun 		/* If switching to max speed, apply sampling_down_factor */
147*4882a593Smuzhiyun 		if (policy->cur < policy->max)
148*4882a593Smuzhiyun 			policy_dbs->rate_mult = dbs_data->sampling_down_factor;
149*4882a593Smuzhiyun 		dbs_freq_increase(policy, policy->max);
150*4882a593Smuzhiyun 	} else {
151*4882a593Smuzhiyun 		/* Calculate the next frequency proportional to load */
152*4882a593Smuzhiyun 		unsigned int freq_next, min_f, max_f;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 		min_f = policy->cpuinfo.min_freq;
155*4882a593Smuzhiyun 		max_f = policy->cpuinfo.max_freq;
156*4882a593Smuzhiyun 		freq_next = min_f + load * (max_f - min_f) / 100;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		/* No longer fully busy, reset rate_mult */
159*4882a593Smuzhiyun 		policy_dbs->rate_mult = 1;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		if (od_tuners->powersave_bias)
162*4882a593Smuzhiyun 			freq_next = od_ops.powersave_bias_target(policy,
163*4882a593Smuzhiyun 								 freq_next,
164*4882a593Smuzhiyun 								 CPUFREQ_RELATION_L);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
od_dbs_update(struct cpufreq_policy * policy)170*4882a593Smuzhiyun static unsigned int od_dbs_update(struct cpufreq_policy *policy)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct policy_dbs_info *policy_dbs = policy->governor_data;
173*4882a593Smuzhiyun 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
174*4882a593Smuzhiyun 	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
175*4882a593Smuzhiyun 	int sample_type = dbs_info->sample_type;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* Common NORMAL_SAMPLE setup */
178*4882a593Smuzhiyun 	dbs_info->sample_type = OD_NORMAL_SAMPLE;
179*4882a593Smuzhiyun 	/*
180*4882a593Smuzhiyun 	 * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
181*4882a593Smuzhiyun 	 * it then.
182*4882a593Smuzhiyun 	 */
183*4882a593Smuzhiyun 	if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
184*4882a593Smuzhiyun 		__cpufreq_driver_target(policy, dbs_info->freq_lo,
185*4882a593Smuzhiyun 					CPUFREQ_RELATION_H);
186*4882a593Smuzhiyun 		return dbs_info->freq_lo_delay_us;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	od_update(policy);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (dbs_info->freq_lo) {
192*4882a593Smuzhiyun 		/* Setup SUB_SAMPLE */
193*4882a593Smuzhiyun 		dbs_info->sample_type = OD_SUB_SAMPLE;
194*4882a593Smuzhiyun 		return dbs_info->freq_hi_delay_us;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return dbs_data->sampling_rate * policy_dbs->rate_mult;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /************************** sysfs interface ************************/
201*4882a593Smuzhiyun static struct dbs_governor od_dbs_gov;
202*4882a593Smuzhiyun 
store_io_is_busy(struct gov_attr_set * attr_set,const char * buf,size_t count)203*4882a593Smuzhiyun static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
204*4882a593Smuzhiyun 				size_t count)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
207*4882a593Smuzhiyun 	unsigned int input;
208*4882a593Smuzhiyun 	int ret;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	ret = sscanf(buf, "%u", &input);
211*4882a593Smuzhiyun 	if (ret != 1)
212*4882a593Smuzhiyun 		return -EINVAL;
213*4882a593Smuzhiyun 	dbs_data->io_is_busy = !!input;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* we need to re-evaluate prev_cpu_idle */
216*4882a593Smuzhiyun 	gov_update_cpu_data(dbs_data);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	return count;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
store_up_threshold(struct gov_attr_set * attr_set,const char * buf,size_t count)221*4882a593Smuzhiyun static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
222*4882a593Smuzhiyun 				  const char *buf, size_t count)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
225*4882a593Smuzhiyun 	unsigned int input;
226*4882a593Smuzhiyun 	int ret;
227*4882a593Smuzhiyun 	ret = sscanf(buf, "%u", &input);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
230*4882a593Smuzhiyun 			input < MIN_FREQUENCY_UP_THRESHOLD) {
231*4882a593Smuzhiyun 		return -EINVAL;
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	dbs_data->up_threshold = input;
235*4882a593Smuzhiyun 	return count;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
store_sampling_down_factor(struct gov_attr_set * attr_set,const char * buf,size_t count)238*4882a593Smuzhiyun static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
239*4882a593Smuzhiyun 					  const char *buf, size_t count)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
242*4882a593Smuzhiyun 	struct policy_dbs_info *policy_dbs;
243*4882a593Smuzhiyun 	unsigned int input;
244*4882a593Smuzhiyun 	int ret;
245*4882a593Smuzhiyun 	ret = sscanf(buf, "%u", &input);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
248*4882a593Smuzhiyun 		return -EINVAL;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	dbs_data->sampling_down_factor = input;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Reset down sampling multiplier in case it was active */
253*4882a593Smuzhiyun 	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
254*4882a593Smuzhiyun 		/*
255*4882a593Smuzhiyun 		 * Doing this without locking might lead to using different
256*4882a593Smuzhiyun 		 * rate_mult values in od_update() and od_dbs_update().
257*4882a593Smuzhiyun 		 */
258*4882a593Smuzhiyun 		mutex_lock(&policy_dbs->update_mutex);
259*4882a593Smuzhiyun 		policy_dbs->rate_mult = 1;
260*4882a593Smuzhiyun 		mutex_unlock(&policy_dbs->update_mutex);
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	return count;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
store_ignore_nice_load(struct gov_attr_set * attr_set,const char * buf,size_t count)266*4882a593Smuzhiyun static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
267*4882a593Smuzhiyun 				      const char *buf, size_t count)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
270*4882a593Smuzhiyun 	unsigned int input;
271*4882a593Smuzhiyun 	int ret;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	ret = sscanf(buf, "%u", &input);
274*4882a593Smuzhiyun 	if (ret != 1)
275*4882a593Smuzhiyun 		return -EINVAL;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	if (input > 1)
278*4882a593Smuzhiyun 		input = 1;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (input == dbs_data->ignore_nice_load) { /* nothing to do */
281*4882a593Smuzhiyun 		return count;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 	dbs_data->ignore_nice_load = input;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* we need to re-evaluate prev_cpu_idle */
286*4882a593Smuzhiyun 	gov_update_cpu_data(dbs_data);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	return count;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
store_powersave_bias(struct gov_attr_set * attr_set,const char * buf,size_t count)291*4882a593Smuzhiyun static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
292*4882a593Smuzhiyun 				    const char *buf, size_t count)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
295*4882a593Smuzhiyun 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
296*4882a593Smuzhiyun 	struct policy_dbs_info *policy_dbs;
297*4882a593Smuzhiyun 	unsigned int input;
298*4882a593Smuzhiyun 	int ret;
299*4882a593Smuzhiyun 	ret = sscanf(buf, "%u", &input);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (ret != 1)
302*4882a593Smuzhiyun 		return -EINVAL;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (input > 1000)
305*4882a593Smuzhiyun 		input = 1000;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	od_tuners->powersave_bias = input;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
310*4882a593Smuzhiyun 		ondemand_powersave_bias_init(policy_dbs->policy);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	return count;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun gov_show_one_common(sampling_rate);
316*4882a593Smuzhiyun gov_show_one_common(up_threshold);
317*4882a593Smuzhiyun gov_show_one_common(sampling_down_factor);
318*4882a593Smuzhiyun gov_show_one_common(ignore_nice_load);
319*4882a593Smuzhiyun gov_show_one_common(io_is_busy);
320*4882a593Smuzhiyun gov_show_one(od, powersave_bias);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun gov_attr_rw(sampling_rate);
323*4882a593Smuzhiyun gov_attr_rw(io_is_busy);
324*4882a593Smuzhiyun gov_attr_rw(up_threshold);
325*4882a593Smuzhiyun gov_attr_rw(sampling_down_factor);
326*4882a593Smuzhiyun gov_attr_rw(ignore_nice_load);
327*4882a593Smuzhiyun gov_attr_rw(powersave_bias);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun static struct attribute *od_attributes[] = {
330*4882a593Smuzhiyun 	&sampling_rate.attr,
331*4882a593Smuzhiyun 	&up_threshold.attr,
332*4882a593Smuzhiyun 	&sampling_down_factor.attr,
333*4882a593Smuzhiyun 	&ignore_nice_load.attr,
334*4882a593Smuzhiyun 	&powersave_bias.attr,
335*4882a593Smuzhiyun 	&io_is_busy.attr,
336*4882a593Smuzhiyun 	NULL
337*4882a593Smuzhiyun };
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /************************** sysfs end ************************/
340*4882a593Smuzhiyun 
od_alloc(void)341*4882a593Smuzhiyun static struct policy_dbs_info *od_alloc(void)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	struct od_policy_dbs_info *dbs_info;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
346*4882a593Smuzhiyun 	return dbs_info ? &dbs_info->policy_dbs : NULL;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
od_free(struct policy_dbs_info * policy_dbs)349*4882a593Smuzhiyun static void od_free(struct policy_dbs_info *policy_dbs)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	kfree(to_dbs_info(policy_dbs));
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
od_init(struct dbs_data * dbs_data)354*4882a593Smuzhiyun static int od_init(struct dbs_data *dbs_data)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct od_dbs_tuners *tuners;
357*4882a593Smuzhiyun 	u64 idle_time;
358*4882a593Smuzhiyun 	int cpu;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
361*4882a593Smuzhiyun 	if (!tuners)
362*4882a593Smuzhiyun 		return -ENOMEM;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	cpu = get_cpu();
365*4882a593Smuzhiyun 	idle_time = get_cpu_idle_time_us(cpu, NULL);
366*4882a593Smuzhiyun 	put_cpu();
367*4882a593Smuzhiyun 	if (idle_time != -1ULL) {
368*4882a593Smuzhiyun 		/* Idle micro accounting is supported. Use finer thresholds */
369*4882a593Smuzhiyun 		dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
370*4882a593Smuzhiyun 	} else {
371*4882a593Smuzhiyun 		dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
375*4882a593Smuzhiyun 	dbs_data->ignore_nice_load = 0;
376*4882a593Smuzhiyun 	tuners->powersave_bias = default_powersave_bias;
377*4882a593Smuzhiyun 	dbs_data->io_is_busy = should_io_be_busy();
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	dbs_data->tuners = tuners;
380*4882a593Smuzhiyun 	return 0;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
od_exit(struct dbs_data * dbs_data)383*4882a593Smuzhiyun static void od_exit(struct dbs_data *dbs_data)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	kfree(dbs_data->tuners);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
od_start(struct cpufreq_policy * policy)388*4882a593Smuzhiyun static void od_start(struct cpufreq_policy *policy)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	dbs_info->sample_type = OD_NORMAL_SAMPLE;
393*4882a593Smuzhiyun 	ondemand_powersave_bias_init(policy);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun static struct od_ops od_ops = {
397*4882a593Smuzhiyun 	.powersave_bias_target = generic_powersave_bias_target,
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun static struct dbs_governor od_dbs_gov = {
401*4882a593Smuzhiyun 	.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
402*4882a593Smuzhiyun 	.kobj_type = { .default_attrs = od_attributes },
403*4882a593Smuzhiyun 	.gov_dbs_update = od_dbs_update,
404*4882a593Smuzhiyun 	.alloc = od_alloc,
405*4882a593Smuzhiyun 	.free = od_free,
406*4882a593Smuzhiyun 	.init = od_init,
407*4882a593Smuzhiyun 	.exit = od_exit,
408*4882a593Smuzhiyun 	.start = od_start,
409*4882a593Smuzhiyun };
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun #define CPU_FREQ_GOV_ONDEMAND	(od_dbs_gov.gov)
412*4882a593Smuzhiyun 
od_set_powersave_bias(unsigned int powersave_bias)413*4882a593Smuzhiyun static void od_set_powersave_bias(unsigned int powersave_bias)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	unsigned int cpu;
416*4882a593Smuzhiyun 	cpumask_t done;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	default_powersave_bias = powersave_bias;
419*4882a593Smuzhiyun 	cpumask_clear(&done);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	get_online_cpus();
422*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
423*4882a593Smuzhiyun 		struct cpufreq_policy *policy;
424*4882a593Smuzhiyun 		struct policy_dbs_info *policy_dbs;
425*4882a593Smuzhiyun 		struct dbs_data *dbs_data;
426*4882a593Smuzhiyun 		struct od_dbs_tuners *od_tuners;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		if (cpumask_test_cpu(cpu, &done))
429*4882a593Smuzhiyun 			continue;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 		policy = cpufreq_cpu_get_raw(cpu);
432*4882a593Smuzhiyun 		if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND)
433*4882a593Smuzhiyun 			continue;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 		policy_dbs = policy->governor_data;
436*4882a593Smuzhiyun 		if (!policy_dbs)
437*4882a593Smuzhiyun 			continue;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 		cpumask_or(&done, &done, policy->cpus);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		dbs_data = policy_dbs->dbs_data;
442*4882a593Smuzhiyun 		od_tuners = dbs_data->tuners;
443*4882a593Smuzhiyun 		od_tuners->powersave_bias = default_powersave_bias;
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 	put_online_cpus();
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
od_register_powersave_bias_handler(unsigned int (* f)(struct cpufreq_policy *,unsigned int,unsigned int),unsigned int powersave_bias)448*4882a593Smuzhiyun void od_register_powersave_bias_handler(unsigned int (*f)
449*4882a593Smuzhiyun 		(struct cpufreq_policy *, unsigned int, unsigned int),
450*4882a593Smuzhiyun 		unsigned int powersave_bias)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	od_ops.powersave_bias_target = f;
453*4882a593Smuzhiyun 	od_set_powersave_bias(powersave_bias);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
456*4882a593Smuzhiyun 
od_unregister_powersave_bias_handler(void)457*4882a593Smuzhiyun void od_unregister_powersave_bias_handler(void)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	od_ops.powersave_bias_target = generic_powersave_bias_target;
460*4882a593Smuzhiyun 	od_set_powersave_bias(0);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
465*4882a593Smuzhiyun MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
466*4882a593Smuzhiyun MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
467*4882a593Smuzhiyun 	"Low Latency Frequency Transition capable processors");
468*4882a593Smuzhiyun MODULE_LICENSE("GPL");
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
cpufreq_default_governor(void)471*4882a593Smuzhiyun struct cpufreq_governor *cpufreq_default_governor(void)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	return &CPU_FREQ_GOV_ONDEMAND;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun #endif
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND);
478*4882a593Smuzhiyun cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND);
479