xref: /OK3568_Linux_fs/kernel/drivers/cpufreq/cpufreq_governor.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * drivers/cpufreq/cpufreq_governor.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Header file for CPUFreq governors common code
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright	(C) 2001 Russell King
8*4882a593Smuzhiyun  *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
9*4882a593Smuzhiyun  *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
10*4882a593Smuzhiyun  *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
11*4882a593Smuzhiyun  *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #ifndef _CPUFREQ_GOVERNOR_H
15*4882a593Smuzhiyun #define _CPUFREQ_GOVERNOR_H
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/atomic.h>
18*4882a593Smuzhiyun #include <linux/irq_work.h>
19*4882a593Smuzhiyun #include <linux/cpufreq.h>
20*4882a593Smuzhiyun #include <linux/sched/cpufreq.h>
21*4882a593Smuzhiyun #include <linux/kernel_stat.h>
22*4882a593Smuzhiyun #include <linux/module.h>
23*4882a593Smuzhiyun #include <linux/mutex.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* Ondemand Sampling types */
26*4882a593Smuzhiyun enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * Abbreviations:
30*4882a593Smuzhiyun  * dbs: used as a shortform for demand based switching It helps to keep variable
31*4882a593Smuzhiyun  *	names smaller, simpler
32*4882a593Smuzhiyun  * cdbs: common dbs
33*4882a593Smuzhiyun  * od_*: On-demand governor
34*4882a593Smuzhiyun  * cs_*: Conservative governor
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /* Governor demand based switching data (per-policy or global). */
38*4882a593Smuzhiyun struct dbs_data {
39*4882a593Smuzhiyun 	struct gov_attr_set attr_set;
40*4882a593Smuzhiyun 	void *tuners;
41*4882a593Smuzhiyun 	unsigned int ignore_nice_load;
42*4882a593Smuzhiyun 	unsigned int sampling_rate;
43*4882a593Smuzhiyun 	unsigned int sampling_down_factor;
44*4882a593Smuzhiyun 	unsigned int up_threshold;
45*4882a593Smuzhiyun 	unsigned int io_is_busy;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
to_dbs_data(struct gov_attr_set * attr_set)48*4882a593Smuzhiyun static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	return container_of(attr_set, struct dbs_data, attr_set);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define gov_show_one(_gov, file_name)					\
54*4882a593Smuzhiyun static ssize_t show_##file_name						\
55*4882a593Smuzhiyun (struct gov_attr_set *attr_set, char *buf)				\
56*4882a593Smuzhiyun {									\
57*4882a593Smuzhiyun 	struct dbs_data *dbs_data = to_dbs_data(attr_set);		\
58*4882a593Smuzhiyun 	struct _gov##_dbs_tuners *tuners = dbs_data->tuners;		\
59*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", tuners->file_name);			\
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define gov_show_one_common(file_name)					\
63*4882a593Smuzhiyun static ssize_t show_##file_name						\
64*4882a593Smuzhiyun (struct gov_attr_set *attr_set, char *buf)				\
65*4882a593Smuzhiyun {									\
66*4882a593Smuzhiyun 	struct dbs_data *dbs_data = to_dbs_data(attr_set);		\
67*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", dbs_data->file_name);		\
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define gov_attr_ro(_name)						\
71*4882a593Smuzhiyun static struct governor_attr _name =					\
72*4882a593Smuzhiyun __ATTR(_name, 0444, show_##_name, NULL)
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #define gov_attr_rw(_name)						\
75*4882a593Smuzhiyun static struct governor_attr _name =					\
76*4882a593Smuzhiyun __ATTR(_name, 0644, show_##_name, store_##_name)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /* Common to all CPUs of a policy */
79*4882a593Smuzhiyun struct policy_dbs_info {
80*4882a593Smuzhiyun 	struct cpufreq_policy *policy;
81*4882a593Smuzhiyun 	/*
82*4882a593Smuzhiyun 	 * Per policy mutex that serializes load evaluation from limit-change
83*4882a593Smuzhiyun 	 * and work-handler.
84*4882a593Smuzhiyun 	 */
85*4882a593Smuzhiyun 	struct mutex update_mutex;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	u64 last_sample_time;
88*4882a593Smuzhiyun 	s64 sample_delay_ns;
89*4882a593Smuzhiyun 	atomic_t work_count;
90*4882a593Smuzhiyun 	struct irq_work irq_work;
91*4882a593Smuzhiyun 	struct work_struct work;
92*4882a593Smuzhiyun 	/* dbs_data may be shared between multiple policy objects */
93*4882a593Smuzhiyun 	struct dbs_data *dbs_data;
94*4882a593Smuzhiyun 	struct list_head list;
95*4882a593Smuzhiyun 	/* Multiplier for increasing sample delay temporarily. */
96*4882a593Smuzhiyun 	unsigned int rate_mult;
97*4882a593Smuzhiyun 	unsigned int idle_periods;	/* For conservative */
98*4882a593Smuzhiyun 	/* Status indicators */
99*4882a593Smuzhiyun 	bool is_shared;		/* This object is used by multiple CPUs */
100*4882a593Smuzhiyun 	bool work_in_progress;	/* Work is being queued up or in progress */
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
gov_update_sample_delay(struct policy_dbs_info * policy_dbs,unsigned int delay_us)103*4882a593Smuzhiyun static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
104*4882a593Smuzhiyun 					   unsigned int delay_us)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* Per cpu structures */
110*4882a593Smuzhiyun struct cpu_dbs_info {
111*4882a593Smuzhiyun 	u64 prev_cpu_idle;
112*4882a593Smuzhiyun 	u64 prev_update_time;
113*4882a593Smuzhiyun 	u64 prev_cpu_nice;
114*4882a593Smuzhiyun 	/*
115*4882a593Smuzhiyun 	 * Used to keep track of load in the previous interval. However, when
116*4882a593Smuzhiyun 	 * explicitly set to zero, it is used as a flag to ensure that we copy
117*4882a593Smuzhiyun 	 * the previous load to the current interval only once, upon the first
118*4882a593Smuzhiyun 	 * wake-up from idle.
119*4882a593Smuzhiyun 	 */
120*4882a593Smuzhiyun 	unsigned int prev_load;
121*4882a593Smuzhiyun 	struct update_util_data update_util;
122*4882a593Smuzhiyun 	struct policy_dbs_info *policy_dbs;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /* Common Governor data across policies */
126*4882a593Smuzhiyun struct dbs_governor {
127*4882a593Smuzhiyun 	struct cpufreq_governor gov;
128*4882a593Smuzhiyun 	struct kobj_type kobj_type;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/*
131*4882a593Smuzhiyun 	 * Common data for platforms that don't set
132*4882a593Smuzhiyun 	 * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
133*4882a593Smuzhiyun 	 */
134*4882a593Smuzhiyun 	struct dbs_data *gdbs_data;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	unsigned int (*gov_dbs_update)(struct cpufreq_policy *policy);
137*4882a593Smuzhiyun 	struct policy_dbs_info *(*alloc)(void);
138*4882a593Smuzhiyun 	void (*free)(struct policy_dbs_info *policy_dbs);
139*4882a593Smuzhiyun 	int (*init)(struct dbs_data *dbs_data);
140*4882a593Smuzhiyun 	void (*exit)(struct dbs_data *dbs_data);
141*4882a593Smuzhiyun 	void (*start)(struct cpufreq_policy *policy);
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun 
dbs_governor_of(struct cpufreq_policy * policy)144*4882a593Smuzhiyun static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	return container_of(policy->governor, struct dbs_governor, gov);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /* Governor callback routines */
150*4882a593Smuzhiyun int cpufreq_dbs_governor_init(struct cpufreq_policy *policy);
151*4882a593Smuzhiyun void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy);
152*4882a593Smuzhiyun int cpufreq_dbs_governor_start(struct cpufreq_policy *policy);
153*4882a593Smuzhiyun void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy);
154*4882a593Smuzhiyun void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_)			\
157*4882a593Smuzhiyun 	{								\
158*4882a593Smuzhiyun 		.name = _name_,						\
159*4882a593Smuzhiyun 		.flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,			\
160*4882a593Smuzhiyun 		.owner = THIS_MODULE,					\
161*4882a593Smuzhiyun 		.init = cpufreq_dbs_governor_init,			\
162*4882a593Smuzhiyun 		.exit = cpufreq_dbs_governor_exit,			\
163*4882a593Smuzhiyun 		.start = cpufreq_dbs_governor_start,			\
164*4882a593Smuzhiyun 		.stop = cpufreq_dbs_governor_stop,			\
165*4882a593Smuzhiyun 		.limits = cpufreq_dbs_governor_limits,			\
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /* Governor specific operations */
169*4882a593Smuzhiyun struct od_ops {
170*4882a593Smuzhiyun 	unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
171*4882a593Smuzhiyun 			unsigned int freq_next, unsigned int relation);
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun unsigned int dbs_update(struct cpufreq_policy *policy);
175*4882a593Smuzhiyun void od_register_powersave_bias_handler(unsigned int (*f)
176*4882a593Smuzhiyun 		(struct cpufreq_policy *, unsigned int, unsigned int),
177*4882a593Smuzhiyun 		unsigned int powersave_bias);
178*4882a593Smuzhiyun void od_unregister_powersave_bias_handler(void);
179*4882a593Smuzhiyun ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
180*4882a593Smuzhiyun 			    size_t count);
181*4882a593Smuzhiyun void gov_update_cpu_data(struct dbs_data *dbs_data);
182*4882a593Smuzhiyun #endif /* _CPUFREQ_GOVERNOR_H */
183