1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/include/linux/cpufreq.h
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2001 Russell King
6*4882a593Smuzhiyun * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #ifndef _LINUX_CPUFREQ_H
9*4882a593Smuzhiyun #define _LINUX_CPUFREQ_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/cpumask.h>
13*4882a593Smuzhiyun #include <linux/completion.h>
14*4882a593Smuzhiyun #include <linux/kobject.h>
15*4882a593Smuzhiyun #include <linux/notifier.h>
16*4882a593Smuzhiyun #include <linux/pm_qos.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun #include <linux/sysfs.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*********************************************************************
21*4882a593Smuzhiyun * CPUFREQ INTERFACE *
22*4882a593Smuzhiyun *********************************************************************/
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * Frequency values here are CPU kHz
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Maximum transition latency is in nanoseconds - if it's unknown,
27*4882a593Smuzhiyun * CPUFREQ_ETERNAL shall be used.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define CPUFREQ_ETERNAL (-1)
31*4882a593Smuzhiyun #define CPUFREQ_NAME_LEN 16
32*4882a593Smuzhiyun /* Print length for names. Extra 1 space for accommodating '\n' in prints */
33*4882a593Smuzhiyun #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct cpufreq_governor;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun enum cpufreq_table_sorting {
38*4882a593Smuzhiyun CPUFREQ_TABLE_UNSORTED,
39*4882a593Smuzhiyun CPUFREQ_TABLE_SORTED_ASCENDING,
40*4882a593Smuzhiyun CPUFREQ_TABLE_SORTED_DESCENDING
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun struct cpufreq_cpuinfo {
44*4882a593Smuzhiyun unsigned int max_freq;
45*4882a593Smuzhiyun unsigned int min_freq;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* in 10^(-9) s = nanoseconds */
48*4882a593Smuzhiyun unsigned int transition_latency;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun struct cpufreq_policy {
52*4882a593Smuzhiyun /* CPUs sharing clock, require sw coordination */
53*4882a593Smuzhiyun cpumask_var_t cpus; /* Online CPUs only */
54*4882a593Smuzhiyun cpumask_var_t related_cpus; /* Online + Offline CPUs */
55*4882a593Smuzhiyun cpumask_var_t real_cpus; /* Related and present */
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
58*4882a593Smuzhiyun should set cpufreq */
59*4882a593Smuzhiyun unsigned int cpu; /* cpu managing this policy, must be online */
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun struct clk *clk;
62*4882a593Smuzhiyun struct cpufreq_cpuinfo cpuinfo;/* see above */
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun unsigned int min; /* in kHz */
65*4882a593Smuzhiyun unsigned int max; /* in kHz */
66*4882a593Smuzhiyun unsigned int cur; /* in kHz, only needed if cpufreq
67*4882a593Smuzhiyun * governors are used */
68*4882a593Smuzhiyun unsigned int restore_freq; /* = policy->cur before transition */
69*4882a593Smuzhiyun unsigned int suspend_freq; /* freq to set during suspend */
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun unsigned int policy; /* see above */
72*4882a593Smuzhiyun unsigned int last_policy; /* policy before unplug */
73*4882a593Smuzhiyun struct cpufreq_governor *governor; /* see below */
74*4882a593Smuzhiyun void *governor_data;
75*4882a593Smuzhiyun char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun struct work_struct update; /* if update_policy() needs to be
78*4882a593Smuzhiyun * called, but you're in IRQ context */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun struct freq_constraints constraints;
81*4882a593Smuzhiyun struct freq_qos_request *min_freq_req;
82*4882a593Smuzhiyun struct freq_qos_request *max_freq_req;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun struct cpufreq_frequency_table *freq_table;
85*4882a593Smuzhiyun enum cpufreq_table_sorting freq_table_sorted;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct list_head policy_list;
88*4882a593Smuzhiyun struct kobject kobj;
89*4882a593Smuzhiyun struct completion kobj_unregister;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun * The rules for this semaphore:
93*4882a593Smuzhiyun * - Any routine that wants to read from the policy structure will
94*4882a593Smuzhiyun * do a down_read on this semaphore.
95*4882a593Smuzhiyun * - Any routine that will write to the policy structure and/or may take away
96*4882a593Smuzhiyun * the policy altogether (eg. CPU hotplug), will hold this lock in write
97*4882a593Smuzhiyun * mode before doing so.
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun struct rw_semaphore rwsem;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * Fast switch flags:
103*4882a593Smuzhiyun * - fast_switch_possible should be set by the driver if it can
104*4882a593Smuzhiyun * guarantee that frequency can be changed on any CPU sharing the
105*4882a593Smuzhiyun * policy and that the change will affect all of the policy CPUs then.
106*4882a593Smuzhiyun * - fast_switch_enabled is to be set by governors that support fast
107*4882a593Smuzhiyun * frequency switching with the help of cpufreq_enable_fast_switch().
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun bool fast_switch_possible;
110*4882a593Smuzhiyun bool fast_switch_enabled;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
114*4882a593Smuzhiyun * governor.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun bool strict_target;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * Preferred average time interval between consecutive invocations of
120*4882a593Smuzhiyun * the driver to set the frequency for this policy. To be set by the
121*4882a593Smuzhiyun * scaling driver (0, which is the default, means no preference).
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun unsigned int transition_delay_us;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * Remote DVFS flag (Not added to the driver structure as we don't want
127*4882a593Smuzhiyun * to access another structure from scheduler hotpath).
128*4882a593Smuzhiyun *
129*4882a593Smuzhiyun * Should be set if CPUs can do DVFS on behalf of other CPUs from
130*4882a593Smuzhiyun * different cpufreq policies.
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun bool dvfs_possible_from_any_cpu;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
135*4882a593Smuzhiyun unsigned int cached_target_freq;
136*4882a593Smuzhiyun unsigned int cached_resolved_idx;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Synchronization for frequency transitions */
139*4882a593Smuzhiyun bool transition_ongoing; /* Tracks transition status */
140*4882a593Smuzhiyun spinlock_t transition_lock;
141*4882a593Smuzhiyun wait_queue_head_t transition_wait;
142*4882a593Smuzhiyun struct task_struct *transition_task; /* Task which is doing the transition */
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* cpufreq-stats */
145*4882a593Smuzhiyun struct cpufreq_stats *stats;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* For cpufreq driver's internal use */
148*4882a593Smuzhiyun void *driver_data;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* Pointer to the cooling device if used for thermal mitigation */
151*4882a593Smuzhiyun struct thermal_cooling_device *cdev;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun struct notifier_block nb_min;
154*4882a593Smuzhiyun struct notifier_block nb_max;
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
159*4882a593Smuzhiyun * callback for sanitization. That callback is only expected to modify the min
160*4882a593Smuzhiyun * and max values, if necessary, and specifically it must not update the
161*4882a593Smuzhiyun * frequency table.
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun struct cpufreq_policy_data {
164*4882a593Smuzhiyun struct cpufreq_cpuinfo cpuinfo;
165*4882a593Smuzhiyun struct cpufreq_frequency_table *freq_table;
166*4882a593Smuzhiyun unsigned int cpu;
167*4882a593Smuzhiyun unsigned int min; /* in kHz */
168*4882a593Smuzhiyun unsigned int max; /* in kHz */
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun struct cpufreq_freqs {
172*4882a593Smuzhiyun struct cpufreq_policy *policy;
173*4882a593Smuzhiyun unsigned int old;
174*4882a593Smuzhiyun unsigned int new;
175*4882a593Smuzhiyun u8 flags; /* flags of cpufreq_driver, see below. */
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* Only for ACPI */
179*4882a593Smuzhiyun #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
180*4882a593Smuzhiyun #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
181*4882a593Smuzhiyun #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
182*4882a593Smuzhiyun #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun #ifdef CONFIG_CPU_FREQ
185*4882a593Smuzhiyun struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
186*4882a593Smuzhiyun struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
187*4882a593Smuzhiyun void cpufreq_cpu_put(struct cpufreq_policy *policy);
188*4882a593Smuzhiyun #else
cpufreq_cpu_get_raw(unsigned int cpu)189*4882a593Smuzhiyun static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun return NULL;
192*4882a593Smuzhiyun }
cpufreq_cpu_get(unsigned int cpu)193*4882a593Smuzhiyun static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun return NULL;
196*4882a593Smuzhiyun }
cpufreq_cpu_put(struct cpufreq_policy * policy)197*4882a593Smuzhiyun static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
198*4882a593Smuzhiyun #endif
199*4882a593Smuzhiyun
policy_is_inactive(struct cpufreq_policy * policy)200*4882a593Smuzhiyun static inline bool policy_is_inactive(struct cpufreq_policy *policy)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun return cpumask_empty(policy->cpus);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
policy_is_shared(struct cpufreq_policy * policy)205*4882a593Smuzhiyun static inline bool policy_is_shared(struct cpufreq_policy *policy)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun return cpumask_weight(policy->cpus) > 1;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun #ifdef CONFIG_CPU_FREQ
211*4882a593Smuzhiyun unsigned int cpufreq_get(unsigned int cpu);
212*4882a593Smuzhiyun unsigned int cpufreq_quick_get(unsigned int cpu);
213*4882a593Smuzhiyun unsigned int cpufreq_quick_get_max(unsigned int cpu);
214*4882a593Smuzhiyun unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
215*4882a593Smuzhiyun void disable_cpufreq(void);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
220*4882a593Smuzhiyun void cpufreq_cpu_release(struct cpufreq_policy *policy);
221*4882a593Smuzhiyun int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
222*4882a593Smuzhiyun void refresh_frequency_limits(struct cpufreq_policy *policy);
223*4882a593Smuzhiyun void cpufreq_update_policy(unsigned int cpu);
224*4882a593Smuzhiyun void cpufreq_update_limits(unsigned int cpu);
225*4882a593Smuzhiyun bool have_governor_per_policy(void);
226*4882a593Smuzhiyun bool cpufreq_supports_freq_invariance(void);
227*4882a593Smuzhiyun struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
228*4882a593Smuzhiyun void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
229*4882a593Smuzhiyun void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
230*4882a593Smuzhiyun #else
cpufreq_get(unsigned int cpu)231*4882a593Smuzhiyun static inline unsigned int cpufreq_get(unsigned int cpu)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun return 0;
234*4882a593Smuzhiyun }
cpufreq_quick_get(unsigned int cpu)235*4882a593Smuzhiyun static inline unsigned int cpufreq_quick_get(unsigned int cpu)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun }
cpufreq_quick_get_max(unsigned int cpu)239*4882a593Smuzhiyun static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun return 0;
242*4882a593Smuzhiyun }
cpufreq_get_hw_max_freq(unsigned int cpu)243*4882a593Smuzhiyun static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun return 0;
246*4882a593Smuzhiyun }
cpufreq_supports_freq_invariance(void)247*4882a593Smuzhiyun static inline bool cpufreq_supports_freq_invariance(void)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun return false;
250*4882a593Smuzhiyun }
disable_cpufreq(void)251*4882a593Smuzhiyun static inline void disable_cpufreq(void) { }
252*4882a593Smuzhiyun #endif
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun #ifdef CONFIG_CPU_FREQ_STAT
255*4882a593Smuzhiyun void cpufreq_stats_create_table(struct cpufreq_policy *policy);
256*4882a593Smuzhiyun void cpufreq_stats_free_table(struct cpufreq_policy *policy);
257*4882a593Smuzhiyun void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
258*4882a593Smuzhiyun unsigned int new_freq);
259*4882a593Smuzhiyun #else
cpufreq_stats_create_table(struct cpufreq_policy * policy)260*4882a593Smuzhiyun static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
cpufreq_stats_free_table(struct cpufreq_policy * policy)261*4882a593Smuzhiyun static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
cpufreq_stats_record_transition(struct cpufreq_policy * policy,unsigned int new_freq)262*4882a593Smuzhiyun static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
263*4882a593Smuzhiyun unsigned int new_freq) { }
264*4882a593Smuzhiyun #endif /* CONFIG_CPU_FREQ_STAT */
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*********************************************************************
267*4882a593Smuzhiyun * CPUFREQ DRIVER INTERFACE *
268*4882a593Smuzhiyun *********************************************************************/
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
271*4882a593Smuzhiyun #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
272*4882a593Smuzhiyun #define CPUFREQ_RELATION_C 2 /* closest frequency to target */
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun struct freq_attr {
275*4882a593Smuzhiyun struct attribute attr;
276*4882a593Smuzhiyun ssize_t (*show)(struct cpufreq_policy *, char *);
277*4882a593Smuzhiyun ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
278*4882a593Smuzhiyun };
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun #define cpufreq_freq_attr_ro(_name) \
281*4882a593Smuzhiyun static struct freq_attr _name = \
282*4882a593Smuzhiyun __ATTR(_name, 0444, show_##_name, NULL)
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun #define cpufreq_freq_attr_ro_perm(_name, _perm) \
285*4882a593Smuzhiyun static struct freq_attr _name = \
286*4882a593Smuzhiyun __ATTR(_name, _perm, show_##_name, NULL)
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun #define cpufreq_freq_attr_rw(_name) \
289*4882a593Smuzhiyun static struct freq_attr _name = \
290*4882a593Smuzhiyun __ATTR(_name, 0644, show_##_name, store_##_name)
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun #define cpufreq_freq_attr_wo(_name) \
293*4882a593Smuzhiyun static struct freq_attr _name = \
294*4882a593Smuzhiyun __ATTR(_name, 0200, NULL, store_##_name)
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun #define define_one_global_ro(_name) \
297*4882a593Smuzhiyun static struct kobj_attribute _name = \
298*4882a593Smuzhiyun __ATTR(_name, 0444, show_##_name, NULL)
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun #define define_one_global_rw(_name) \
301*4882a593Smuzhiyun static struct kobj_attribute _name = \
302*4882a593Smuzhiyun __ATTR(_name, 0644, show_##_name, store_##_name)
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun struct cpufreq_driver {
306*4882a593Smuzhiyun char name[CPUFREQ_NAME_LEN];
307*4882a593Smuzhiyun u16 flags;
308*4882a593Smuzhiyun void *driver_data;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* needed by all drivers */
311*4882a593Smuzhiyun int (*init)(struct cpufreq_policy *policy);
312*4882a593Smuzhiyun int (*verify)(struct cpufreq_policy_data *policy);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* define one out of two */
315*4882a593Smuzhiyun int (*setpolicy)(struct cpufreq_policy *policy);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun * On failure, should always restore frequency to policy->restore_freq
319*4882a593Smuzhiyun * (i.e. old freq).
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun int (*target)(struct cpufreq_policy *policy,
322*4882a593Smuzhiyun unsigned int target_freq,
323*4882a593Smuzhiyun unsigned int relation); /* Deprecated */
324*4882a593Smuzhiyun int (*target_index)(struct cpufreq_policy *policy,
325*4882a593Smuzhiyun unsigned int index);
326*4882a593Smuzhiyun unsigned int (*fast_switch)(struct cpufreq_policy *policy,
327*4882a593Smuzhiyun unsigned int target_freq);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * Caches and returns the lowest driver-supported frequency greater than
331*4882a593Smuzhiyun * or equal to the target frequency, subject to any driver limitations.
332*4882a593Smuzhiyun * Does not set the frequency. Only to be implemented for drivers with
333*4882a593Smuzhiyun * target().
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
336*4882a593Smuzhiyun unsigned int target_freq);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
340*4882a593Smuzhiyun * unset.
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * get_intermediate should return a stable intermediate frequency
343*4882a593Smuzhiyun * platform wants to switch to and target_intermediate() should set CPU
344*4882a593Smuzhiyun * to that frequency, before jumping to the frequency corresponding
345*4882a593Smuzhiyun * to 'index'. Core will take care of sending notifications and driver
346*4882a593Smuzhiyun * doesn't have to handle them in target_intermediate() or
347*4882a593Smuzhiyun * target_index().
348*4882a593Smuzhiyun *
349*4882a593Smuzhiyun * Drivers can return '0' from get_intermediate() in case they don't
350*4882a593Smuzhiyun * wish to switch to intermediate frequency for some target frequency.
351*4882a593Smuzhiyun * In that case core will directly call ->target_index().
352*4882a593Smuzhiyun */
353*4882a593Smuzhiyun unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
354*4882a593Smuzhiyun unsigned int index);
355*4882a593Smuzhiyun int (*target_intermediate)(struct cpufreq_policy *policy,
356*4882a593Smuzhiyun unsigned int index);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* should be defined, if possible */
359*4882a593Smuzhiyun unsigned int (*get)(unsigned int cpu);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* Called to update policy limits on firmware notifications. */
362*4882a593Smuzhiyun void (*update_limits)(unsigned int cpu);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /* optional */
365*4882a593Smuzhiyun int (*bios_limit)(int cpu, unsigned int *limit);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun int (*online)(struct cpufreq_policy *policy);
368*4882a593Smuzhiyun int (*offline)(struct cpufreq_policy *policy);
369*4882a593Smuzhiyun int (*exit)(struct cpufreq_policy *policy);
370*4882a593Smuzhiyun void (*stop_cpu)(struct cpufreq_policy *policy);
371*4882a593Smuzhiyun int (*suspend)(struct cpufreq_policy *policy);
372*4882a593Smuzhiyun int (*resume)(struct cpufreq_policy *policy);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* Will be called after the driver is fully initialized */
375*4882a593Smuzhiyun void (*ready)(struct cpufreq_policy *policy);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun struct freq_attr **attr;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* platform specific boost support code */
380*4882a593Smuzhiyun bool boost_enabled;
381*4882a593Smuzhiyun int (*set_boost)(struct cpufreq_policy *policy, int state);
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* flags */
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* driver isn't removed even if all ->init() calls failed */
387*4882a593Smuzhiyun #define CPUFREQ_STICKY BIT(0)
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
390*4882a593Smuzhiyun #define CPUFREQ_CONST_LOOPS BIT(1)
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* don't warn on suspend/resume speed mismatches */
393*4882a593Smuzhiyun #define CPUFREQ_PM_NO_WARN BIT(2)
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun * This should be set by platforms having multiple clock-domains, i.e.
397*4882a593Smuzhiyun * supporting multiple policies. With this sysfs directories of governor would
398*4882a593Smuzhiyun * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
399*4882a593Smuzhiyun * governor with different tunables for different clusters.
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun * Driver will do POSTCHANGE notifications from outside of their ->target()
405*4882a593Smuzhiyun * routine and so must set cpufreq_driver->flags with this flag, so that core
406*4882a593Smuzhiyun * can handle them specially.
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun #define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /*
411*4882a593Smuzhiyun * Set by drivers which want cpufreq core to check if CPU is running at a
412*4882a593Smuzhiyun * frequency present in freq-table exposed by the driver. For these drivers if
413*4882a593Smuzhiyun * CPU is found running at an out of table freq, we will try to set it to a freq
414*4882a593Smuzhiyun * from the table. And if that fails, we will stop further boot process by
415*4882a593Smuzhiyun * issuing a BUG_ON().
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * Set by drivers to disallow use of governors with "dynamic_switching" flag
421*4882a593Smuzhiyun * set.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /*
426*4882a593Smuzhiyun * Set by drivers that want the core to automatically register the cpufreq
427*4882a593Smuzhiyun * driver as a thermal cooling device.
428*4882a593Smuzhiyun */
429*4882a593Smuzhiyun #define CPUFREQ_IS_COOLING_DEV BIT(7)
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun * Set by drivers that need to update internale upper and lower boundaries along
433*4882a593Smuzhiyun * with the target frequency and so the core and governors should also invoke
434*4882a593Smuzhiyun * the diver if the target frequency does not change, but the policy min or max
435*4882a593Smuzhiyun * may have changed.
436*4882a593Smuzhiyun */
437*4882a593Smuzhiyun #define CPUFREQ_NEED_UPDATE_LIMITS BIT(8)
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun int cpufreq_register_driver(struct cpufreq_driver *driver_data);
440*4882a593Smuzhiyun int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun bool cpufreq_driver_test_flags(u16 flags);
443*4882a593Smuzhiyun const char *cpufreq_get_current_driver(void);
444*4882a593Smuzhiyun void *cpufreq_get_driver_data(void);
445*4882a593Smuzhiyun
cpufreq_thermal_control_enabled(struct cpufreq_driver * drv)446*4882a593Smuzhiyun static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun return IS_ENABLED(CONFIG_CPU_THERMAL) &&
449*4882a593Smuzhiyun (drv->flags & CPUFREQ_IS_COOLING_DEV);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
cpufreq_verify_within_limits(struct cpufreq_policy_data * policy,unsigned int min,unsigned int max)452*4882a593Smuzhiyun static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
453*4882a593Smuzhiyun unsigned int min,
454*4882a593Smuzhiyun unsigned int max)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun if (policy->min < min)
457*4882a593Smuzhiyun policy->min = min;
458*4882a593Smuzhiyun if (policy->max < min)
459*4882a593Smuzhiyun policy->max = min;
460*4882a593Smuzhiyun if (policy->min > max)
461*4882a593Smuzhiyun policy->min = max;
462*4882a593Smuzhiyun if (policy->max > max)
463*4882a593Smuzhiyun policy->max = max;
464*4882a593Smuzhiyun if (policy->min > policy->max)
465*4882a593Smuzhiyun policy->min = policy->max;
466*4882a593Smuzhiyun return;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun static inline void
cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data * policy)470*4882a593Smuzhiyun cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
473*4882a593Smuzhiyun policy->cpuinfo.max_freq);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun #ifdef CONFIG_CPU_FREQ
477*4882a593Smuzhiyun void cpufreq_suspend(void);
478*4882a593Smuzhiyun void cpufreq_resume(void);
479*4882a593Smuzhiyun int cpufreq_generic_suspend(struct cpufreq_policy *policy);
480*4882a593Smuzhiyun #else
cpufreq_suspend(void)481*4882a593Smuzhiyun static inline void cpufreq_suspend(void) {}
cpufreq_resume(void)482*4882a593Smuzhiyun static inline void cpufreq_resume(void) {}
483*4882a593Smuzhiyun #endif
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /*********************************************************************
486*4882a593Smuzhiyun * CPUFREQ NOTIFIER INTERFACE *
487*4882a593Smuzhiyun *********************************************************************/
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun #define CPUFREQ_TRANSITION_NOTIFIER (0)
490*4882a593Smuzhiyun #define CPUFREQ_POLICY_NOTIFIER (1)
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /* Transition notifiers */
493*4882a593Smuzhiyun #define CPUFREQ_PRECHANGE (0)
494*4882a593Smuzhiyun #define CPUFREQ_POSTCHANGE (1)
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* Policy Notifiers */
497*4882a593Smuzhiyun #define CPUFREQ_CREATE_POLICY (0)
498*4882a593Smuzhiyun #define CPUFREQ_REMOVE_POLICY (1)
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun #ifdef CONFIG_CPU_FREQ
501*4882a593Smuzhiyun int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
502*4882a593Smuzhiyun int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
505*4882a593Smuzhiyun struct cpufreq_freqs *freqs);
506*4882a593Smuzhiyun void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
507*4882a593Smuzhiyun struct cpufreq_freqs *freqs, int transition_failed);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun #else /* CONFIG_CPU_FREQ */
cpufreq_register_notifier(struct notifier_block * nb,unsigned int list)510*4882a593Smuzhiyun static inline int cpufreq_register_notifier(struct notifier_block *nb,
511*4882a593Smuzhiyun unsigned int list)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun return 0;
514*4882a593Smuzhiyun }
cpufreq_unregister_notifier(struct notifier_block * nb,unsigned int list)515*4882a593Smuzhiyun static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
516*4882a593Smuzhiyun unsigned int list)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun return 0;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun #endif /* !CONFIG_CPU_FREQ */
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /**
523*4882a593Smuzhiyun * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
524*4882a593Smuzhiyun * safe)
525*4882a593Smuzhiyun * @old: old value
526*4882a593Smuzhiyun * @div: divisor
527*4882a593Smuzhiyun * @mult: multiplier
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * new = old * mult / div
531*4882a593Smuzhiyun */
cpufreq_scale(unsigned long old,u_int div,u_int mult)532*4882a593Smuzhiyun static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
533*4882a593Smuzhiyun u_int mult)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun #if BITS_PER_LONG == 32
536*4882a593Smuzhiyun u64 result = ((u64) old) * ((u64) mult);
537*4882a593Smuzhiyun do_div(result, div);
538*4882a593Smuzhiyun return (unsigned long) result;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun #elif BITS_PER_LONG == 64
541*4882a593Smuzhiyun unsigned long result = old * ((u64) mult);
542*4882a593Smuzhiyun result /= div;
543*4882a593Smuzhiyun return result;
544*4882a593Smuzhiyun #endif
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /*********************************************************************
548*4882a593Smuzhiyun * CPUFREQ GOVERNORS *
549*4882a593Smuzhiyun *********************************************************************/
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun #define CPUFREQ_POLICY_UNKNOWN (0)
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * If (cpufreq_driver->target) exists, the ->governor decides what frequency
554*4882a593Smuzhiyun * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
555*4882a593Smuzhiyun * two generic policies are available:
556*4882a593Smuzhiyun */
557*4882a593Smuzhiyun #define CPUFREQ_POLICY_POWERSAVE (1)
558*4882a593Smuzhiyun #define CPUFREQ_POLICY_PERFORMANCE (2)
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun * The polling frequency depends on the capability of the processor. Default
562*4882a593Smuzhiyun * polling frequency is 1000 times the transition latency of the processor. The
563*4882a593Smuzhiyun * ondemand governor will work on any processor with transition latency <= 10ms,
564*4882a593Smuzhiyun * using appropriate sampling rate.
565*4882a593Smuzhiyun */
566*4882a593Smuzhiyun #define LATENCY_MULTIPLIER (1000)
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun struct cpufreq_governor {
569*4882a593Smuzhiyun char name[CPUFREQ_NAME_LEN];
570*4882a593Smuzhiyun int (*init)(struct cpufreq_policy *policy);
571*4882a593Smuzhiyun void (*exit)(struct cpufreq_policy *policy);
572*4882a593Smuzhiyun int (*start)(struct cpufreq_policy *policy);
573*4882a593Smuzhiyun void (*stop)(struct cpufreq_policy *policy);
574*4882a593Smuzhiyun void (*limits)(struct cpufreq_policy *policy);
575*4882a593Smuzhiyun ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
576*4882a593Smuzhiyun char *buf);
577*4882a593Smuzhiyun int (*store_setspeed) (struct cpufreq_policy *policy,
578*4882a593Smuzhiyun unsigned int freq);
579*4882a593Smuzhiyun struct list_head governor_list;
580*4882a593Smuzhiyun struct module *owner;
581*4882a593Smuzhiyun u8 flags;
582*4882a593Smuzhiyun };
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /* Governor flags */
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /* For governors which change frequency dynamically by themselves */
587*4882a593Smuzhiyun #define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /* For governors wanting the target frequency to be set exactly */
590*4882a593Smuzhiyun #define CPUFREQ_GOV_STRICT_TARGET BIT(1)
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /* Pass a target to the cpufreq driver */
594*4882a593Smuzhiyun unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
595*4882a593Smuzhiyun unsigned int target_freq);
596*4882a593Smuzhiyun int cpufreq_driver_target(struct cpufreq_policy *policy,
597*4882a593Smuzhiyun unsigned int target_freq,
598*4882a593Smuzhiyun unsigned int relation);
599*4882a593Smuzhiyun int __cpufreq_driver_target(struct cpufreq_policy *policy,
600*4882a593Smuzhiyun unsigned int target_freq,
601*4882a593Smuzhiyun unsigned int relation);
602*4882a593Smuzhiyun unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
603*4882a593Smuzhiyun unsigned int target_freq);
604*4882a593Smuzhiyun unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
605*4882a593Smuzhiyun int cpufreq_register_governor(struct cpufreq_governor *governor);
606*4882a593Smuzhiyun void cpufreq_unregister_governor(struct cpufreq_governor *governor);
607*4882a593Smuzhiyun int cpufreq_start_governor(struct cpufreq_policy *policy);
608*4882a593Smuzhiyun void cpufreq_stop_governor(struct cpufreq_policy *policy);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun #define cpufreq_governor_init(__governor) \
611*4882a593Smuzhiyun static int __init __governor##_init(void) \
612*4882a593Smuzhiyun { \
613*4882a593Smuzhiyun return cpufreq_register_governor(&__governor); \
614*4882a593Smuzhiyun } \
615*4882a593Smuzhiyun core_initcall(__governor##_init)
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun #define cpufreq_governor_exit(__governor) \
618*4882a593Smuzhiyun static void __exit __governor##_exit(void) \
619*4882a593Smuzhiyun { \
620*4882a593Smuzhiyun return cpufreq_unregister_governor(&__governor); \
621*4882a593Smuzhiyun } \
622*4882a593Smuzhiyun module_exit(__governor##_exit)
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun struct cpufreq_governor *cpufreq_default_governor(void);
625*4882a593Smuzhiyun struct cpufreq_governor *cpufreq_fallback_governor(void);
626*4882a593Smuzhiyun
cpufreq_policy_apply_limits(struct cpufreq_policy * policy)627*4882a593Smuzhiyun static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun if (policy->max < policy->cur)
630*4882a593Smuzhiyun __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
631*4882a593Smuzhiyun else if (policy->min > policy->cur)
632*4882a593Smuzhiyun __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /* Governor attribute set */
636*4882a593Smuzhiyun struct gov_attr_set {
637*4882a593Smuzhiyun struct kobject kobj;
638*4882a593Smuzhiyun struct list_head policy_list;
639*4882a593Smuzhiyun struct mutex update_lock;
640*4882a593Smuzhiyun int usage_count;
641*4882a593Smuzhiyun };
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* sysfs ops for cpufreq governors */
644*4882a593Smuzhiyun extern const struct sysfs_ops governor_sysfs_ops;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
647*4882a593Smuzhiyun void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
648*4882a593Smuzhiyun unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /* Governor sysfs attribute */
651*4882a593Smuzhiyun struct governor_attr {
652*4882a593Smuzhiyun struct attribute attr;
653*4882a593Smuzhiyun ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
654*4882a593Smuzhiyun ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
655*4882a593Smuzhiyun size_t count);
656*4882a593Smuzhiyun };
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /*********************************************************************
659*4882a593Smuzhiyun * FREQUENCY TABLE HELPERS *
660*4882a593Smuzhiyun *********************************************************************/
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /* Special Values of .frequency field */
663*4882a593Smuzhiyun #define CPUFREQ_ENTRY_INVALID ~0u
664*4882a593Smuzhiyun #define CPUFREQ_TABLE_END ~1u
665*4882a593Smuzhiyun /* Special Values of .flags field */
666*4882a593Smuzhiyun #define CPUFREQ_BOOST_FREQ (1 << 0)
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun struct cpufreq_frequency_table {
669*4882a593Smuzhiyun unsigned int flags;
670*4882a593Smuzhiyun unsigned int driver_data; /* driver specific data, not used by core */
671*4882a593Smuzhiyun unsigned int frequency; /* kHz - doesn't need to be in ascending
672*4882a593Smuzhiyun * order */
673*4882a593Smuzhiyun };
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
676*4882a593Smuzhiyun int dev_pm_opp_init_cpufreq_table(struct device *dev,
677*4882a593Smuzhiyun struct cpufreq_frequency_table **table);
678*4882a593Smuzhiyun void dev_pm_opp_free_cpufreq_table(struct device *dev,
679*4882a593Smuzhiyun struct cpufreq_frequency_table **table);
680*4882a593Smuzhiyun #else
dev_pm_opp_init_cpufreq_table(struct device * dev,struct cpufreq_frequency_table ** table)681*4882a593Smuzhiyun static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
682*4882a593Smuzhiyun struct cpufreq_frequency_table
683*4882a593Smuzhiyun **table)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun return -EINVAL;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
dev_pm_opp_free_cpufreq_table(struct device * dev,struct cpufreq_frequency_table ** table)688*4882a593Smuzhiyun static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
689*4882a593Smuzhiyun struct cpufreq_frequency_table
690*4882a593Smuzhiyun **table)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun #endif
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /*
696*4882a593Smuzhiyun * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
697*4882a593Smuzhiyun * @pos: the cpufreq_frequency_table * to use as a loop cursor.
698*4882a593Smuzhiyun * @table: the cpufreq_frequency_table * to iterate over.
699*4882a593Smuzhiyun */
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun #define cpufreq_for_each_entry(pos, table) \
702*4882a593Smuzhiyun for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /*
705*4882a593Smuzhiyun * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table
706*4882a593Smuzhiyun * with index
707*4882a593Smuzhiyun * @pos: the cpufreq_frequency_table * to use as a loop cursor.
708*4882a593Smuzhiyun * @table: the cpufreq_frequency_table * to iterate over.
709*4882a593Smuzhiyun * @idx: the table entry currently being processed
710*4882a593Smuzhiyun */
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun #define cpufreq_for_each_entry_idx(pos, table, idx) \
713*4882a593Smuzhiyun for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
714*4882a593Smuzhiyun pos++, idx++)
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /*
717*4882a593Smuzhiyun * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
718*4882a593Smuzhiyun * excluding CPUFREQ_ENTRY_INVALID frequencies.
719*4882a593Smuzhiyun * @pos: the cpufreq_frequency_table * to use as a loop cursor.
720*4882a593Smuzhiyun * @table: the cpufreq_frequency_table * to iterate over.
721*4882a593Smuzhiyun */
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun #define cpufreq_for_each_valid_entry(pos, table) \
724*4882a593Smuzhiyun for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
725*4882a593Smuzhiyun if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
726*4882a593Smuzhiyun continue; \
727*4882a593Smuzhiyun else
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /*
730*4882a593Smuzhiyun * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq
731*4882a593Smuzhiyun * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies.
732*4882a593Smuzhiyun * @pos: the cpufreq_frequency_table * to use as a loop cursor.
733*4882a593Smuzhiyun * @table: the cpufreq_frequency_table * to iterate over.
734*4882a593Smuzhiyun * @idx: the table entry currently being processed
735*4882a593Smuzhiyun */
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
738*4882a593Smuzhiyun cpufreq_for_each_entry_idx(pos, table, idx) \
739*4882a593Smuzhiyun if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
740*4882a593Smuzhiyun continue; \
741*4882a593Smuzhiyun else
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
745*4882a593Smuzhiyun struct cpufreq_frequency_table *table);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
748*4882a593Smuzhiyun struct cpufreq_frequency_table *table);
749*4882a593Smuzhiyun int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
752*4882a593Smuzhiyun unsigned int target_freq,
753*4882a593Smuzhiyun unsigned int relation);
754*4882a593Smuzhiyun int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
755*4882a593Smuzhiyun unsigned int freq);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun #ifdef CONFIG_CPU_FREQ
760*4882a593Smuzhiyun int cpufreq_boost_trigger_state(int state);
761*4882a593Smuzhiyun int cpufreq_boost_enabled(void);
762*4882a593Smuzhiyun int cpufreq_enable_boost_support(void);
763*4882a593Smuzhiyun bool policy_has_boost_freq(struct cpufreq_policy *policy);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun /* Find lowest freq at or above target in a table in ascending order */
cpufreq_table_find_index_al(struct cpufreq_policy * policy,unsigned int target_freq)766*4882a593Smuzhiyun static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
767*4882a593Smuzhiyun unsigned int target_freq)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun struct cpufreq_frequency_table *table = policy->freq_table;
770*4882a593Smuzhiyun struct cpufreq_frequency_table *pos;
771*4882a593Smuzhiyun unsigned int freq;
772*4882a593Smuzhiyun int idx, best = -1;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun cpufreq_for_each_valid_entry_idx(pos, table, idx) {
775*4882a593Smuzhiyun freq = pos->frequency;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun if (freq >= target_freq)
778*4882a593Smuzhiyun return idx;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun best = idx;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun return best;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun /* Find lowest freq at or above target in a table in descending order */
cpufreq_table_find_index_dl(struct cpufreq_policy * policy,unsigned int target_freq)787*4882a593Smuzhiyun static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
788*4882a593Smuzhiyun unsigned int target_freq)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun struct cpufreq_frequency_table *table = policy->freq_table;
791*4882a593Smuzhiyun struct cpufreq_frequency_table *pos;
792*4882a593Smuzhiyun unsigned int freq;
793*4882a593Smuzhiyun int idx, best = -1;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun cpufreq_for_each_valid_entry_idx(pos, table, idx) {
796*4882a593Smuzhiyun freq = pos->frequency;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (freq == target_freq)
799*4882a593Smuzhiyun return idx;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (freq > target_freq) {
802*4882a593Smuzhiyun best = idx;
803*4882a593Smuzhiyun continue;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /* No freq found above target_freq */
807*4882a593Smuzhiyun if (best == -1)
808*4882a593Smuzhiyun return idx;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun return best;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun return best;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /* Works only on sorted freq-tables */
cpufreq_table_find_index_l(struct cpufreq_policy * policy,unsigned int target_freq)817*4882a593Smuzhiyun static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
818*4882a593Smuzhiyun unsigned int target_freq)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun target_freq = clamp_val(target_freq, policy->min, policy->max);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
823*4882a593Smuzhiyun return cpufreq_table_find_index_al(policy, target_freq);
824*4882a593Smuzhiyun else
825*4882a593Smuzhiyun return cpufreq_table_find_index_dl(policy, target_freq);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /* Find highest freq at or below target in a table in ascending order */
cpufreq_table_find_index_ah(struct cpufreq_policy * policy,unsigned int target_freq)829*4882a593Smuzhiyun static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
830*4882a593Smuzhiyun unsigned int target_freq)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun struct cpufreq_frequency_table *table = policy->freq_table;
833*4882a593Smuzhiyun struct cpufreq_frequency_table *pos;
834*4882a593Smuzhiyun unsigned int freq;
835*4882a593Smuzhiyun int idx, best = -1;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun cpufreq_for_each_valid_entry_idx(pos, table, idx) {
838*4882a593Smuzhiyun freq = pos->frequency;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun if (freq == target_freq)
841*4882a593Smuzhiyun return idx;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun if (freq < target_freq) {
844*4882a593Smuzhiyun best = idx;
845*4882a593Smuzhiyun continue;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* No freq found below target_freq */
849*4882a593Smuzhiyun if (best == -1)
850*4882a593Smuzhiyun return idx;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun return best;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun return best;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* Find highest freq at or below target in a table in descending order */
cpufreq_table_find_index_dh(struct cpufreq_policy * policy,unsigned int target_freq)859*4882a593Smuzhiyun static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
860*4882a593Smuzhiyun unsigned int target_freq)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun struct cpufreq_frequency_table *table = policy->freq_table;
863*4882a593Smuzhiyun struct cpufreq_frequency_table *pos;
864*4882a593Smuzhiyun unsigned int freq;
865*4882a593Smuzhiyun int idx, best = -1;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun cpufreq_for_each_valid_entry_idx(pos, table, idx) {
868*4882a593Smuzhiyun freq = pos->frequency;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (freq <= target_freq)
871*4882a593Smuzhiyun return idx;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun best = idx;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun return best;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /* Works only on sorted freq-tables */
cpufreq_table_find_index_h(struct cpufreq_policy * policy,unsigned int target_freq)880*4882a593Smuzhiyun static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
881*4882a593Smuzhiyun unsigned int target_freq)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun target_freq = clamp_val(target_freq, policy->min, policy->max);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
886*4882a593Smuzhiyun return cpufreq_table_find_index_ah(policy, target_freq);
887*4882a593Smuzhiyun else
888*4882a593Smuzhiyun return cpufreq_table_find_index_dh(policy, target_freq);
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /* Find closest freq to target in a table in ascending order */
cpufreq_table_find_index_ac(struct cpufreq_policy * policy,unsigned int target_freq)892*4882a593Smuzhiyun static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
893*4882a593Smuzhiyun unsigned int target_freq)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun struct cpufreq_frequency_table *table = policy->freq_table;
896*4882a593Smuzhiyun struct cpufreq_frequency_table *pos;
897*4882a593Smuzhiyun unsigned int freq;
898*4882a593Smuzhiyun int idx, best = -1;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun cpufreq_for_each_valid_entry_idx(pos, table, idx) {
901*4882a593Smuzhiyun freq = pos->frequency;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (freq == target_freq)
904*4882a593Smuzhiyun return idx;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (freq < target_freq) {
907*4882a593Smuzhiyun best = idx;
908*4882a593Smuzhiyun continue;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /* No freq found below target_freq */
912*4882a593Smuzhiyun if (best == -1)
913*4882a593Smuzhiyun return idx;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun /* Choose the closest freq */
916*4882a593Smuzhiyun if (target_freq - table[best].frequency > freq - target_freq)
917*4882a593Smuzhiyun return idx;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun return best;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun return best;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /* Find closest freq to target in a table in descending order */
cpufreq_table_find_index_dc(struct cpufreq_policy * policy,unsigned int target_freq)926*4882a593Smuzhiyun static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
927*4882a593Smuzhiyun unsigned int target_freq)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun struct cpufreq_frequency_table *table = policy->freq_table;
930*4882a593Smuzhiyun struct cpufreq_frequency_table *pos;
931*4882a593Smuzhiyun unsigned int freq;
932*4882a593Smuzhiyun int idx, best = -1;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun cpufreq_for_each_valid_entry_idx(pos, table, idx) {
935*4882a593Smuzhiyun freq = pos->frequency;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (freq == target_freq)
938*4882a593Smuzhiyun return idx;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (freq > target_freq) {
941*4882a593Smuzhiyun best = idx;
942*4882a593Smuzhiyun continue;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /* No freq found above target_freq */
946*4882a593Smuzhiyun if (best == -1)
947*4882a593Smuzhiyun return idx;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /* Choose the closest freq */
950*4882a593Smuzhiyun if (table[best].frequency - target_freq > target_freq - freq)
951*4882a593Smuzhiyun return idx;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun return best;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun return best;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun /* Works only on sorted freq-tables */
cpufreq_table_find_index_c(struct cpufreq_policy * policy,unsigned int target_freq)960*4882a593Smuzhiyun static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
961*4882a593Smuzhiyun unsigned int target_freq)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun target_freq = clamp_val(target_freq, policy->min, policy->max);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
966*4882a593Smuzhiyun return cpufreq_table_find_index_ac(policy, target_freq);
967*4882a593Smuzhiyun else
968*4882a593Smuzhiyun return cpufreq_table_find_index_dc(policy, target_freq);
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
cpufreq_frequency_table_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)971*4882a593Smuzhiyun static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
972*4882a593Smuzhiyun unsigned int target_freq,
973*4882a593Smuzhiyun unsigned int relation)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
976*4882a593Smuzhiyun return cpufreq_table_index_unsorted(policy, target_freq,
977*4882a593Smuzhiyun relation);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun switch (relation) {
980*4882a593Smuzhiyun case CPUFREQ_RELATION_L:
981*4882a593Smuzhiyun return cpufreq_table_find_index_l(policy, target_freq);
982*4882a593Smuzhiyun case CPUFREQ_RELATION_H:
983*4882a593Smuzhiyun return cpufreq_table_find_index_h(policy, target_freq);
984*4882a593Smuzhiyun case CPUFREQ_RELATION_C:
985*4882a593Smuzhiyun return cpufreq_table_find_index_c(policy, target_freq);
986*4882a593Smuzhiyun default:
987*4882a593Smuzhiyun WARN_ON_ONCE(1);
988*4882a593Smuzhiyun return 0;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun
cpufreq_table_count_valid_entries(const struct cpufreq_policy * policy)992*4882a593Smuzhiyun static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun struct cpufreq_frequency_table *pos;
995*4882a593Smuzhiyun int count = 0;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun if (unlikely(!policy->freq_table))
998*4882a593Smuzhiyun return 0;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun cpufreq_for_each_valid_entry(pos, policy->freq_table)
1001*4882a593Smuzhiyun count++;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun return count;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun #else
cpufreq_boost_trigger_state(int state)1006*4882a593Smuzhiyun static inline int cpufreq_boost_trigger_state(int state)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun return 0;
1009*4882a593Smuzhiyun }
cpufreq_boost_enabled(void)1010*4882a593Smuzhiyun static inline int cpufreq_boost_enabled(void)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun return 0;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
cpufreq_enable_boost_support(void)1015*4882a593Smuzhiyun static inline int cpufreq_enable_boost_support(void)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun return -EINVAL;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
policy_has_boost_freq(struct cpufreq_policy * policy)1020*4882a593Smuzhiyun static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun return false;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun #endif
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun extern void arch_freq_prepare_all(void);
1027*4882a593Smuzhiyun extern unsigned int arch_freq_get_on_cpu(int cpu);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun #ifndef arch_set_freq_scale
1030*4882a593Smuzhiyun static __always_inline
arch_set_freq_scale(const struct cpumask * cpus,unsigned long cur_freq,unsigned long max_freq)1031*4882a593Smuzhiyun void arch_set_freq_scale(const struct cpumask *cpus,
1032*4882a593Smuzhiyun unsigned long cur_freq,
1033*4882a593Smuzhiyun unsigned long max_freq)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun #endif
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /* the following are really really optional */
1039*4882a593Smuzhiyun extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
1040*4882a593Smuzhiyun extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
1041*4882a593Smuzhiyun extern struct freq_attr *cpufreq_generic_attr[];
1042*4882a593Smuzhiyun int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun unsigned int cpufreq_generic_get(unsigned int cpu);
1045*4882a593Smuzhiyun void cpufreq_generic_init(struct cpufreq_policy *policy,
1046*4882a593Smuzhiyun struct cpufreq_frequency_table *table,
1047*4882a593Smuzhiyun unsigned int transition_latency);
1048*4882a593Smuzhiyun #endif /* _LINUX_CPUFREQ_H */
1049