1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/drivers/cpufreq/cpufreq.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2001 Russell King
6*4882a593Smuzhiyun * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7*4882a593Smuzhiyun * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10*4882a593Smuzhiyun * Added handling for CPU hotplug
11*4882a593Smuzhiyun * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12*4882a593Smuzhiyun * Fix handling for CPU hotplug -- affected CPUs
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/cpu.h>
18*4882a593Smuzhiyun #include <linux/cpufreq.h>
19*4882a593Smuzhiyun #include <linux/cpufreq_times.h>
20*4882a593Smuzhiyun #include <linux/cpu_cooling.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/device.h>
23*4882a593Smuzhiyun #include <linux/init.h>
24*4882a593Smuzhiyun #include <linux/kernel_stat.h>
25*4882a593Smuzhiyun #include <linux/module.h>
26*4882a593Smuzhiyun #include <linux/mutex.h>
27*4882a593Smuzhiyun #include <linux/pm_qos.h>
28*4882a593Smuzhiyun #include <linux/slab.h>
29*4882a593Smuzhiyun #include <linux/suspend.h>
30*4882a593Smuzhiyun #include <linux/syscore_ops.h>
31*4882a593Smuzhiyun #include <linux/tick.h>
32*4882a593Smuzhiyun #include <trace/events/power.h>
33*4882a593Smuzhiyun #include <trace/hooks/cpufreq.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun static LIST_HEAD(cpufreq_policy_list);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* Macros to iterate over CPU policies */
38*4882a593Smuzhiyun #define for_each_suitable_policy(__policy, __active) \
39*4882a593Smuzhiyun list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
40*4882a593Smuzhiyun if ((__active) == !policy_is_inactive(__policy))
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define for_each_active_policy(__policy) \
43*4882a593Smuzhiyun for_each_suitable_policy(__policy, true)
44*4882a593Smuzhiyun #define for_each_inactive_policy(__policy) \
45*4882a593Smuzhiyun for_each_suitable_policy(__policy, false)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define for_each_policy(__policy) \
48*4882a593Smuzhiyun list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Iterate over governors */
51*4882a593Smuzhiyun static LIST_HEAD(cpufreq_governor_list);
52*4882a593Smuzhiyun #define for_each_governor(__governor) \
53*4882a593Smuzhiyun list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun static char default_governor[CPUFREQ_NAME_LEN];
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * The "cpufreq driver" - the arch- or hardware-dependent low
59*4882a593Smuzhiyun * level driver of CPUFreq support, and its spinlock. This lock
60*4882a593Smuzhiyun * also protects the cpufreq_cpu_data array.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun static struct cpufreq_driver *cpufreq_driver;
63*4882a593Smuzhiyun static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
64*4882a593Smuzhiyun static DEFINE_RWLOCK(cpufreq_driver_lock);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
cpufreq_supports_freq_invariance(void)67*4882a593Smuzhiyun bool cpufreq_supports_freq_invariance(void)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun return static_branch_likely(&cpufreq_freq_invariance);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Flag to suspend/resume CPUFreq governors */
73*4882a593Smuzhiyun static bool cpufreq_suspended;
74*4882a593Smuzhiyun
has_target(void)75*4882a593Smuzhiyun static inline bool has_target(void)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun return cpufreq_driver->target_index || cpufreq_driver->target;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* internal prototypes */
81*4882a593Smuzhiyun static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
82*4882a593Smuzhiyun static int cpufreq_init_governor(struct cpufreq_policy *policy);
83*4882a593Smuzhiyun static void cpufreq_exit_governor(struct cpufreq_policy *policy);
84*4882a593Smuzhiyun static void cpufreq_governor_limits(struct cpufreq_policy *policy);
85*4882a593Smuzhiyun static int cpufreq_set_policy(struct cpufreq_policy *policy,
86*4882a593Smuzhiyun struct cpufreq_governor *new_gov,
87*4882a593Smuzhiyun unsigned int new_pol);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Two notifier lists: the "policy" list is involved in the
91*4882a593Smuzhiyun * validation process for a new CPU frequency policy; the
92*4882a593Smuzhiyun * "transition" list for kernel code that needs to handle
93*4882a593Smuzhiyun * changes to devices when the CPU clock speed changes.
94*4882a593Smuzhiyun * The mutex locks both lists.
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
97*4882a593Smuzhiyun SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun static int off __read_mostly;
cpufreq_disabled(void)100*4882a593Smuzhiyun static int cpufreq_disabled(void)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return off;
103*4882a593Smuzhiyun }
disable_cpufreq(void)104*4882a593Smuzhiyun void disable_cpufreq(void)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun off = 1;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun static DEFINE_MUTEX(cpufreq_governor_mutex);
109*4882a593Smuzhiyun
have_governor_per_policy(void)110*4882a593Smuzhiyun bool have_governor_per_policy(void)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(have_governor_per_policy);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun static struct kobject *cpufreq_global_kobject;
117*4882a593Smuzhiyun
get_governor_parent_kobj(struct cpufreq_policy * policy)118*4882a593Smuzhiyun struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun if (have_governor_per_policy())
121*4882a593Smuzhiyun return &policy->kobj;
122*4882a593Smuzhiyun else
123*4882a593Smuzhiyun return cpufreq_global_kobject;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
126*4882a593Smuzhiyun
get_cpu_idle_time_jiffy(unsigned int cpu,u64 * wall)127*4882a593Smuzhiyun static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun struct kernel_cpustat kcpustat;
130*4882a593Smuzhiyun u64 cur_wall_time;
131*4882a593Smuzhiyun u64 idle_time;
132*4882a593Smuzhiyun u64 busy_time;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun kcpustat_cpu_fetch(&kcpustat, cpu);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun busy_time = kcpustat.cpustat[CPUTIME_USER];
139*4882a593Smuzhiyun busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
140*4882a593Smuzhiyun busy_time += kcpustat.cpustat[CPUTIME_IRQ];
141*4882a593Smuzhiyun busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
142*4882a593Smuzhiyun busy_time += kcpustat.cpustat[CPUTIME_STEAL];
143*4882a593Smuzhiyun busy_time += kcpustat.cpustat[CPUTIME_NICE];
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun idle_time = cur_wall_time - busy_time;
146*4882a593Smuzhiyun if (wall)
147*4882a593Smuzhiyun *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return div_u64(idle_time, NSEC_PER_USEC);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
get_cpu_idle_time(unsigned int cpu,u64 * wall,int io_busy)152*4882a593Smuzhiyun u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (idle_time == -1ULL)
157*4882a593Smuzhiyun return get_cpu_idle_time_jiffy(cpu, wall);
158*4882a593Smuzhiyun else if (!io_busy)
159*4882a593Smuzhiyun idle_time += get_cpu_iowait_time_us(cpu, wall);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun return idle_time;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_cpu_idle_time);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * This is a generic cpufreq init() routine which can be used by cpufreq
167*4882a593Smuzhiyun * drivers of SMP systems. It will do following:
168*4882a593Smuzhiyun * - validate & show freq table passed
169*4882a593Smuzhiyun * - set policies transition latency
170*4882a593Smuzhiyun * - policy->cpus with all possible CPUs
171*4882a593Smuzhiyun */
cpufreq_generic_init(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table,unsigned int transition_latency)172*4882a593Smuzhiyun void cpufreq_generic_init(struct cpufreq_policy *policy,
173*4882a593Smuzhiyun struct cpufreq_frequency_table *table,
174*4882a593Smuzhiyun unsigned int transition_latency)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun policy->freq_table = table;
177*4882a593Smuzhiyun policy->cpuinfo.transition_latency = transition_latency;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * The driver only supports the SMP configuration where all processors
181*4882a593Smuzhiyun * share the clock and voltage and clock.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun cpumask_setall(policy->cpus);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_generic_init);
186*4882a593Smuzhiyun
cpufreq_cpu_get_raw(unsigned int cpu)187*4882a593Smuzhiyun struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
194*4882a593Smuzhiyun
cpufreq_generic_get(unsigned int cpu)195*4882a593Smuzhiyun unsigned int cpufreq_generic_get(unsigned int cpu)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (!policy || IS_ERR(policy->clk)) {
200*4882a593Smuzhiyun pr_err("%s: No %s associated to cpu: %d\n",
201*4882a593Smuzhiyun __func__, policy ? "clk" : "policy", cpu);
202*4882a593Smuzhiyun return 0;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return clk_get_rate(policy->clk) / 1000;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_generic_get);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
211*4882a593Smuzhiyun * @cpu: CPU to find the policy for.
212*4882a593Smuzhiyun *
213*4882a593Smuzhiyun * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
214*4882a593Smuzhiyun * the kobject reference counter of that policy. Return a valid policy on
215*4882a593Smuzhiyun * success or NULL on failure.
216*4882a593Smuzhiyun *
217*4882a593Smuzhiyun * The policy returned by this function has to be released with the help of
218*4882a593Smuzhiyun * cpufreq_cpu_put() to balance its kobject reference counter properly.
219*4882a593Smuzhiyun */
cpufreq_cpu_get(unsigned int cpu)220*4882a593Smuzhiyun struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun struct cpufreq_policy *policy = NULL;
223*4882a593Smuzhiyun unsigned long flags;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (WARN_ON(cpu >= nr_cpu_ids))
226*4882a593Smuzhiyun return NULL;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* get the cpufreq driver */
229*4882a593Smuzhiyun read_lock_irqsave(&cpufreq_driver_lock, flags);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (cpufreq_driver) {
232*4882a593Smuzhiyun /* get the CPU */
233*4882a593Smuzhiyun policy = cpufreq_cpu_get_raw(cpu);
234*4882a593Smuzhiyun if (policy)
235*4882a593Smuzhiyun kobject_get(&policy->kobj);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun read_unlock_irqrestore(&cpufreq_driver_lock, flags);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return policy;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
246*4882a593Smuzhiyun * @policy: cpufreq policy returned by cpufreq_cpu_get().
247*4882a593Smuzhiyun */
cpufreq_cpu_put(struct cpufreq_policy * policy)248*4882a593Smuzhiyun void cpufreq_cpu_put(struct cpufreq_policy *policy)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun kobject_put(&policy->kobj);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
256*4882a593Smuzhiyun * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
257*4882a593Smuzhiyun */
cpufreq_cpu_release(struct cpufreq_policy * policy)258*4882a593Smuzhiyun void cpufreq_cpu_release(struct cpufreq_policy *policy)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun if (WARN_ON(!policy))
261*4882a593Smuzhiyun return;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun lockdep_assert_held(&policy->rwsem);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun up_write(&policy->rwsem);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun cpufreq_cpu_put(policy);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /**
271*4882a593Smuzhiyun * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
272*4882a593Smuzhiyun * @cpu: CPU to find the policy for.
273*4882a593Smuzhiyun *
274*4882a593Smuzhiyun * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
275*4882a593Smuzhiyun * if the policy returned by it is not NULL, acquire its rwsem for writing.
276*4882a593Smuzhiyun * Return the policy if it is active or release it and return NULL otherwise.
277*4882a593Smuzhiyun *
278*4882a593Smuzhiyun * The policy returned by this function has to be released with the help of
279*4882a593Smuzhiyun * cpufreq_cpu_release() in order to release its rwsem and balance its usage
280*4882a593Smuzhiyun * counter properly.
281*4882a593Smuzhiyun */
cpufreq_cpu_acquire(unsigned int cpu)282*4882a593Smuzhiyun struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (!policy)
287*4882a593Smuzhiyun return NULL;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun down_write(&policy->rwsem);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (policy_is_inactive(policy)) {
292*4882a593Smuzhiyun cpufreq_cpu_release(policy);
293*4882a593Smuzhiyun return NULL;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return policy;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /*********************************************************************
300*4882a593Smuzhiyun * EXTERNALLY AFFECTING FREQUENCY CHANGES *
301*4882a593Smuzhiyun *********************************************************************/
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /*
304*4882a593Smuzhiyun * adjust_jiffies - adjust the system "loops_per_jiffy"
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * This function alters the system "loops_per_jiffy" for the clock
307*4882a593Smuzhiyun * speed change. Note that loops_per_jiffy cannot be updated on SMP
308*4882a593Smuzhiyun * systems as each CPU might be scaled differently. So, use the arch
309*4882a593Smuzhiyun * per-CPU loops_per_jiffy value wherever possible.
310*4882a593Smuzhiyun */
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)311*4882a593Smuzhiyun static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun #ifndef CONFIG_SMP
314*4882a593Smuzhiyun static unsigned long l_p_j_ref;
315*4882a593Smuzhiyun static unsigned int l_p_j_ref_freq;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (ci->flags & CPUFREQ_CONST_LOOPS)
318*4882a593Smuzhiyun return;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (!l_p_j_ref_freq) {
321*4882a593Smuzhiyun l_p_j_ref = loops_per_jiffy;
322*4882a593Smuzhiyun l_p_j_ref_freq = ci->old;
323*4882a593Smuzhiyun pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
324*4882a593Smuzhiyun l_p_j_ref, l_p_j_ref_freq);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
327*4882a593Smuzhiyun loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
328*4882a593Smuzhiyun ci->new);
329*4882a593Smuzhiyun pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
330*4882a593Smuzhiyun loops_per_jiffy, ci->new);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun #endif
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /**
336*4882a593Smuzhiyun * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
337*4882a593Smuzhiyun * @policy: cpufreq policy to enable fast frequency switching for.
338*4882a593Smuzhiyun * @freqs: contain details of the frequency update.
339*4882a593Smuzhiyun * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
340*4882a593Smuzhiyun *
341*4882a593Smuzhiyun * This function calls the transition notifiers and the "adjust_jiffies"
342*4882a593Smuzhiyun * function. It is called twice on all CPU frequency changes that have
343*4882a593Smuzhiyun * external effects.
344*4882a593Smuzhiyun */
cpufreq_notify_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,unsigned int state)345*4882a593Smuzhiyun static void cpufreq_notify_transition(struct cpufreq_policy *policy,
346*4882a593Smuzhiyun struct cpufreq_freqs *freqs,
347*4882a593Smuzhiyun unsigned int state)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun int cpu;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun BUG_ON(irqs_disabled());
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (cpufreq_disabled())
354*4882a593Smuzhiyun return;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun freqs->policy = policy;
357*4882a593Smuzhiyun freqs->flags = cpufreq_driver->flags;
358*4882a593Smuzhiyun pr_debug("notification %u of frequency transition to %u kHz\n",
359*4882a593Smuzhiyun state, freqs->new);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun switch (state) {
362*4882a593Smuzhiyun case CPUFREQ_PRECHANGE:
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * Detect if the driver reported a value as "old frequency"
365*4882a593Smuzhiyun * which is not equal to what the cpufreq core thinks is
366*4882a593Smuzhiyun * "old frequency".
367*4882a593Smuzhiyun */
368*4882a593Smuzhiyun if (policy->cur && policy->cur != freqs->old) {
369*4882a593Smuzhiyun pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
370*4882a593Smuzhiyun freqs->old, policy->cur);
371*4882a593Smuzhiyun freqs->old = policy->cur;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
375*4882a593Smuzhiyun CPUFREQ_PRECHANGE, freqs);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
378*4882a593Smuzhiyun break;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun case CPUFREQ_POSTCHANGE:
381*4882a593Smuzhiyun adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
382*4882a593Smuzhiyun pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
383*4882a593Smuzhiyun cpumask_pr_args(policy->cpus));
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun for_each_cpu(cpu, policy->cpus)
386*4882a593Smuzhiyun trace_cpu_frequency(freqs->new, cpu);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
389*4882a593Smuzhiyun CPUFREQ_POSTCHANGE, freqs);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun cpufreq_stats_record_transition(policy, freqs->new);
392*4882a593Smuzhiyun cpufreq_times_record_transition(policy, freqs->new);
393*4882a593Smuzhiyun policy->cur = freqs->new;
394*4882a593Smuzhiyun trace_android_rvh_cpufreq_transition(policy);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Do post notifications when there are chances that transition has failed */
cpufreq_notify_post_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int transition_failed)399*4882a593Smuzhiyun static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
400*4882a593Smuzhiyun struct cpufreq_freqs *freqs, int transition_failed)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
403*4882a593Smuzhiyun if (!transition_failed)
404*4882a593Smuzhiyun return;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun swap(freqs->old, freqs->new);
407*4882a593Smuzhiyun cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
408*4882a593Smuzhiyun cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
cpufreq_freq_transition_begin(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs)411*4882a593Smuzhiyun void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
412*4882a593Smuzhiyun struct cpufreq_freqs *freqs)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun * Catch double invocations of _begin() which lead to self-deadlock.
417*4882a593Smuzhiyun * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
418*4882a593Smuzhiyun * doesn't invoke _begin() on their behalf, and hence the chances of
419*4882a593Smuzhiyun * double invocations are very low. Moreover, there are scenarios
420*4882a593Smuzhiyun * where these checks can emit false-positive warnings in these
421*4882a593Smuzhiyun * drivers; so we avoid that by skipping them altogether.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
424*4882a593Smuzhiyun && current == policy->transition_task);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun wait:
427*4882a593Smuzhiyun wait_event(policy->transition_wait, !policy->transition_ongoing);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun spin_lock(&policy->transition_lock);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (unlikely(policy->transition_ongoing)) {
432*4882a593Smuzhiyun spin_unlock(&policy->transition_lock);
433*4882a593Smuzhiyun goto wait;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun policy->transition_ongoing = true;
437*4882a593Smuzhiyun policy->transition_task = current;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun spin_unlock(&policy->transition_lock);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
444*4882a593Smuzhiyun
cpufreq_freq_transition_end(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int transition_failed)445*4882a593Smuzhiyun void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
446*4882a593Smuzhiyun struct cpufreq_freqs *freqs, int transition_failed)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun if (WARN_ON(!policy->transition_ongoing))
449*4882a593Smuzhiyun return;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun cpufreq_notify_post_transition(policy, freqs, transition_failed);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun arch_set_freq_scale(policy->related_cpus,
454*4882a593Smuzhiyun policy->cur,
455*4882a593Smuzhiyun policy->cpuinfo.max_freq);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun policy->transition_ongoing = false;
458*4882a593Smuzhiyun policy->transition_task = NULL;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun wake_up(&policy->transition_wait);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /*
465*4882a593Smuzhiyun * Fast frequency switching status count. Positive means "enabled", negative
466*4882a593Smuzhiyun * means "disabled" and 0 means "not decided yet".
467*4882a593Smuzhiyun */
468*4882a593Smuzhiyun static int cpufreq_fast_switch_count;
469*4882a593Smuzhiyun static DEFINE_MUTEX(cpufreq_fast_switch_lock);
470*4882a593Smuzhiyun
cpufreq_list_transition_notifiers(void)471*4882a593Smuzhiyun static void cpufreq_list_transition_notifiers(void)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct notifier_block *nb;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun pr_info("Registered transition notifiers:\n");
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun mutex_lock(&cpufreq_transition_notifier_list.mutex);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
480*4882a593Smuzhiyun pr_info("%pS\n", nb->notifier_call);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun mutex_unlock(&cpufreq_transition_notifier_list.mutex);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /**
486*4882a593Smuzhiyun * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
487*4882a593Smuzhiyun * @policy: cpufreq policy to enable fast frequency switching for.
488*4882a593Smuzhiyun *
489*4882a593Smuzhiyun * Try to enable fast frequency switching for @policy.
490*4882a593Smuzhiyun *
491*4882a593Smuzhiyun * The attempt will fail if there is at least one transition notifier registered
492*4882a593Smuzhiyun * at this point, as fast frequency switching is quite fundamentally at odds
493*4882a593Smuzhiyun * with transition notifiers. Thus if successful, it will make registration of
494*4882a593Smuzhiyun * transition notifiers fail going forward.
495*4882a593Smuzhiyun */
cpufreq_enable_fast_switch(struct cpufreq_policy * policy)496*4882a593Smuzhiyun void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun lockdep_assert_held(&policy->rwsem);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (!policy->fast_switch_possible)
501*4882a593Smuzhiyun return;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun mutex_lock(&cpufreq_fast_switch_lock);
504*4882a593Smuzhiyun if (cpufreq_fast_switch_count >= 0) {
505*4882a593Smuzhiyun cpufreq_fast_switch_count++;
506*4882a593Smuzhiyun policy->fast_switch_enabled = true;
507*4882a593Smuzhiyun } else {
508*4882a593Smuzhiyun pr_warn("CPU%u: Fast frequency switching not enabled\n",
509*4882a593Smuzhiyun policy->cpu);
510*4882a593Smuzhiyun cpufreq_list_transition_notifiers();
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun mutex_unlock(&cpufreq_fast_switch_lock);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /**
517*4882a593Smuzhiyun * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
518*4882a593Smuzhiyun * @policy: cpufreq policy to disable fast frequency switching for.
519*4882a593Smuzhiyun */
cpufreq_disable_fast_switch(struct cpufreq_policy * policy)520*4882a593Smuzhiyun void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun mutex_lock(&cpufreq_fast_switch_lock);
523*4882a593Smuzhiyun if (policy->fast_switch_enabled) {
524*4882a593Smuzhiyun policy->fast_switch_enabled = false;
525*4882a593Smuzhiyun if (!WARN_ON(cpufreq_fast_switch_count <= 0))
526*4882a593Smuzhiyun cpufreq_fast_switch_count--;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun mutex_unlock(&cpufreq_fast_switch_lock);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /**
533*4882a593Smuzhiyun * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
534*4882a593Smuzhiyun * one.
535*4882a593Smuzhiyun * @policy: associated policy to interrogate
536*4882a593Smuzhiyun * @target_freq: target frequency to resolve.
537*4882a593Smuzhiyun *
538*4882a593Smuzhiyun * The target to driver frequency mapping is cached in the policy.
539*4882a593Smuzhiyun *
540*4882a593Smuzhiyun * Return: Lowest driver-supported frequency greater than or equal to the
541*4882a593Smuzhiyun * given target_freq, subject to policy (min/max) and driver limitations.
542*4882a593Smuzhiyun */
cpufreq_driver_resolve_freq(struct cpufreq_policy * policy,unsigned int target_freq)543*4882a593Smuzhiyun unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
544*4882a593Smuzhiyun unsigned int target_freq)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun unsigned int old_target_freq = target_freq;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun target_freq = clamp_val(target_freq, policy->min, policy->max);
549*4882a593Smuzhiyun trace_android_vh_cpufreq_resolve_freq(policy, target_freq, old_target_freq);
550*4882a593Smuzhiyun policy->cached_target_freq = target_freq;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (cpufreq_driver->target_index) {
553*4882a593Smuzhiyun unsigned int idx;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun idx = cpufreq_frequency_table_target(policy, target_freq,
556*4882a593Smuzhiyun CPUFREQ_RELATION_L);
557*4882a593Smuzhiyun policy->cached_resolved_idx = idx;
558*4882a593Smuzhiyun return policy->freq_table[idx].frequency;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (cpufreq_driver->resolve_freq)
562*4882a593Smuzhiyun return cpufreq_driver->resolve_freq(policy, target_freq);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun return target_freq;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
567*4882a593Smuzhiyun
cpufreq_policy_transition_delay_us(struct cpufreq_policy * policy)568*4882a593Smuzhiyun unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun unsigned int latency;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (policy->transition_delay_us)
573*4882a593Smuzhiyun return policy->transition_delay_us;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
576*4882a593Smuzhiyun if (latency) {
577*4882a593Smuzhiyun /*
578*4882a593Smuzhiyun * For platforms that can change the frequency very fast (< 10
579*4882a593Smuzhiyun * us), the above formula gives a decent transition delay. But
580*4882a593Smuzhiyun * for platforms where transition_latency is in milliseconds, it
581*4882a593Smuzhiyun * ends up giving unrealistic values.
582*4882a593Smuzhiyun *
583*4882a593Smuzhiyun * Cap the default transition delay to 10 ms, which seems to be
584*4882a593Smuzhiyun * a reasonable amount of time after which we should reevaluate
585*4882a593Smuzhiyun * the frequency.
586*4882a593Smuzhiyun */
587*4882a593Smuzhiyun return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun return LATENCY_MULTIPLIER;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /*********************************************************************
595*4882a593Smuzhiyun * SYSFS INTERFACE *
596*4882a593Smuzhiyun *********************************************************************/
show_boost(struct kobject * kobj,struct kobj_attribute * attr,char * buf)597*4882a593Smuzhiyun static ssize_t show_boost(struct kobject *kobj,
598*4882a593Smuzhiyun struct kobj_attribute *attr, char *buf)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
store_boost(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)603*4882a593Smuzhiyun static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
604*4882a593Smuzhiyun const char *buf, size_t count)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun int ret, enable;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun ret = sscanf(buf, "%d", &enable);
609*4882a593Smuzhiyun if (ret != 1 || enable < 0 || enable > 1)
610*4882a593Smuzhiyun return -EINVAL;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun if (cpufreq_boost_trigger_state(enable)) {
613*4882a593Smuzhiyun pr_err("%s: Cannot %s BOOST!\n",
614*4882a593Smuzhiyun __func__, enable ? "enable" : "disable");
615*4882a593Smuzhiyun return -EINVAL;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun pr_debug("%s: cpufreq BOOST %s\n",
619*4882a593Smuzhiyun __func__, enable ? "enabled" : "disabled");
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun return count;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun define_one_global_rw(boost);
624*4882a593Smuzhiyun
find_governor(const char * str_governor)625*4882a593Smuzhiyun static struct cpufreq_governor *find_governor(const char *str_governor)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun struct cpufreq_governor *t;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun for_each_governor(t)
630*4882a593Smuzhiyun if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
631*4882a593Smuzhiyun return t;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun return NULL;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
get_governor(const char * str_governor)636*4882a593Smuzhiyun static struct cpufreq_governor *get_governor(const char *str_governor)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun struct cpufreq_governor *t;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun mutex_lock(&cpufreq_governor_mutex);
641*4882a593Smuzhiyun t = find_governor(str_governor);
642*4882a593Smuzhiyun if (!t)
643*4882a593Smuzhiyun goto unlock;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (!try_module_get(t->owner))
646*4882a593Smuzhiyun t = NULL;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun unlock:
649*4882a593Smuzhiyun mutex_unlock(&cpufreq_governor_mutex);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun return t;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
cpufreq_parse_policy(char * str_governor)654*4882a593Smuzhiyun static unsigned int cpufreq_parse_policy(char *str_governor)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
657*4882a593Smuzhiyun return CPUFREQ_POLICY_PERFORMANCE;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
660*4882a593Smuzhiyun return CPUFREQ_POLICY_POWERSAVE;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun return CPUFREQ_POLICY_UNKNOWN;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /**
666*4882a593Smuzhiyun * cpufreq_parse_governor - parse a governor string only for has_target()
667*4882a593Smuzhiyun * @str_governor: Governor name.
668*4882a593Smuzhiyun */
cpufreq_parse_governor(char * str_governor)669*4882a593Smuzhiyun static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun struct cpufreq_governor *t;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun t = get_governor(str_governor);
674*4882a593Smuzhiyun if (t)
675*4882a593Smuzhiyun return t;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (request_module("cpufreq_%s", str_governor))
678*4882a593Smuzhiyun return NULL;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun return get_governor(str_governor);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /*
684*4882a593Smuzhiyun * cpufreq_per_cpu_attr_read() / show_##file_name() -
685*4882a593Smuzhiyun * print out cpufreq information
686*4882a593Smuzhiyun *
687*4882a593Smuzhiyun * Write out information from cpufreq_driver->policy[cpu]; object must be
688*4882a593Smuzhiyun * "unsigned int".
689*4882a593Smuzhiyun */
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun #define show_one(file_name, object) \
692*4882a593Smuzhiyun static ssize_t show_##file_name \
693*4882a593Smuzhiyun (struct cpufreq_policy *policy, char *buf) \
694*4882a593Smuzhiyun { \
695*4882a593Smuzhiyun return sprintf(buf, "%u\n", policy->object); \
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
show_cpuinfo_max_freq(struct cpufreq_policy * policy,char * buf)698*4882a593Smuzhiyun static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun unsigned int max_freq = policy->cpuinfo.max_freq;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun trace_android_vh_show_max_freq(policy, &max_freq);
703*4882a593Smuzhiyun trace_android_rvh_show_max_freq(policy, &max_freq);
704*4882a593Smuzhiyun return sprintf(buf, "%u\n", max_freq);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun show_one(cpuinfo_min_freq, cpuinfo.min_freq);
708*4882a593Smuzhiyun show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
709*4882a593Smuzhiyun show_one(scaling_min_freq, min);
710*4882a593Smuzhiyun show_one(scaling_max_freq, max);
711*4882a593Smuzhiyun
arch_freq_get_on_cpu(int cpu)712*4882a593Smuzhiyun __weak unsigned int arch_freq_get_on_cpu(int cpu)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun return 0;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
show_scaling_cur_freq(struct cpufreq_policy * policy,char * buf)717*4882a593Smuzhiyun static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun ssize_t ret;
720*4882a593Smuzhiyun unsigned int freq;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun freq = arch_freq_get_on_cpu(policy->cpu);
723*4882a593Smuzhiyun if (freq)
724*4882a593Smuzhiyun ret = sprintf(buf, "%u\n", freq);
725*4882a593Smuzhiyun else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
726*4882a593Smuzhiyun ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
727*4882a593Smuzhiyun else
728*4882a593Smuzhiyun ret = sprintf(buf, "%u\n", policy->cur);
729*4882a593Smuzhiyun return ret;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /*
733*4882a593Smuzhiyun * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
734*4882a593Smuzhiyun */
735*4882a593Smuzhiyun #define store_one(file_name, object) \
736*4882a593Smuzhiyun static ssize_t store_##file_name \
737*4882a593Smuzhiyun (struct cpufreq_policy *policy, const char *buf, size_t count) \
738*4882a593Smuzhiyun { \
739*4882a593Smuzhiyun unsigned long val; \
740*4882a593Smuzhiyun int ret; \
741*4882a593Smuzhiyun \
742*4882a593Smuzhiyun ret = sscanf(buf, "%lu", &val); \
743*4882a593Smuzhiyun if (ret != 1) \
744*4882a593Smuzhiyun return -EINVAL; \
745*4882a593Smuzhiyun \
746*4882a593Smuzhiyun ret = freq_qos_update_request(policy->object##_freq_req, val);\
747*4882a593Smuzhiyun return ret >= 0 ? count : ret; \
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun store_one(scaling_min_freq, min);
751*4882a593Smuzhiyun store_one(scaling_max_freq, max);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /*
754*4882a593Smuzhiyun * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
755*4882a593Smuzhiyun */
show_cpuinfo_cur_freq(struct cpufreq_policy * policy,char * buf)756*4882a593Smuzhiyun static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
757*4882a593Smuzhiyun char *buf)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun unsigned int cur_freq = __cpufreq_get(policy);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (cur_freq)
762*4882a593Smuzhiyun return sprintf(buf, "%u\n", cur_freq);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun return sprintf(buf, "<unknown>\n");
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /*
768*4882a593Smuzhiyun * show_scaling_governor - show the current policy for the specified CPU
769*4882a593Smuzhiyun */
show_scaling_governor(struct cpufreq_policy * policy,char * buf)770*4882a593Smuzhiyun static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
773*4882a593Smuzhiyun return sprintf(buf, "powersave\n");
774*4882a593Smuzhiyun else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
775*4882a593Smuzhiyun return sprintf(buf, "performance\n");
776*4882a593Smuzhiyun else if (policy->governor)
777*4882a593Smuzhiyun return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
778*4882a593Smuzhiyun policy->governor->name);
779*4882a593Smuzhiyun return -EINVAL;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /*
783*4882a593Smuzhiyun * store_scaling_governor - store policy for the specified CPU
784*4882a593Smuzhiyun */
store_scaling_governor(struct cpufreq_policy * policy,const char * buf,size_t count)785*4882a593Smuzhiyun static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
786*4882a593Smuzhiyun const char *buf, size_t count)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun char str_governor[16];
789*4882a593Smuzhiyun int ret;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun ret = sscanf(buf, "%15s", str_governor);
792*4882a593Smuzhiyun if (ret != 1)
793*4882a593Smuzhiyun return -EINVAL;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (cpufreq_driver->setpolicy) {
796*4882a593Smuzhiyun unsigned int new_pol;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun new_pol = cpufreq_parse_policy(str_governor);
799*4882a593Smuzhiyun if (!new_pol)
800*4882a593Smuzhiyun return -EINVAL;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun ret = cpufreq_set_policy(policy, NULL, new_pol);
803*4882a593Smuzhiyun } else {
804*4882a593Smuzhiyun struct cpufreq_governor *new_gov;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun new_gov = cpufreq_parse_governor(str_governor);
807*4882a593Smuzhiyun if (!new_gov)
808*4882a593Smuzhiyun return -EINVAL;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun ret = cpufreq_set_policy(policy, new_gov,
811*4882a593Smuzhiyun CPUFREQ_POLICY_UNKNOWN);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun module_put(new_gov->owner);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun return ret ? ret : count;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /*
820*4882a593Smuzhiyun * show_scaling_driver - show the cpufreq driver currently loaded
821*4882a593Smuzhiyun */
show_scaling_driver(struct cpufreq_policy * policy,char * buf)822*4882a593Smuzhiyun static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /*
828*4882a593Smuzhiyun * show_scaling_available_governors - show the available CPUfreq governors
829*4882a593Smuzhiyun */
show_scaling_available_governors(struct cpufreq_policy * policy,char * buf)830*4882a593Smuzhiyun static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
831*4882a593Smuzhiyun char *buf)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun ssize_t i = 0;
834*4882a593Smuzhiyun struct cpufreq_governor *t;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (!has_target()) {
837*4882a593Smuzhiyun i += sprintf(buf, "performance powersave");
838*4882a593Smuzhiyun goto out;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun mutex_lock(&cpufreq_governor_mutex);
842*4882a593Smuzhiyun for_each_governor(t) {
843*4882a593Smuzhiyun if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
844*4882a593Smuzhiyun - (CPUFREQ_NAME_LEN + 2)))
845*4882a593Smuzhiyun break;
846*4882a593Smuzhiyun i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun mutex_unlock(&cpufreq_governor_mutex);
849*4882a593Smuzhiyun out:
850*4882a593Smuzhiyun i += sprintf(&buf[i], "\n");
851*4882a593Smuzhiyun return i;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
cpufreq_show_cpus(const struct cpumask * mask,char * buf)854*4882a593Smuzhiyun ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun ssize_t i = 0;
857*4882a593Smuzhiyun unsigned int cpu;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun for_each_cpu(cpu, mask) {
860*4882a593Smuzhiyun if (i)
861*4882a593Smuzhiyun i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
862*4882a593Smuzhiyun i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
863*4882a593Smuzhiyun if (i >= (PAGE_SIZE - 5))
864*4882a593Smuzhiyun break;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun i += sprintf(&buf[i], "\n");
867*4882a593Smuzhiyun return i;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /*
872*4882a593Smuzhiyun * show_related_cpus - show the CPUs affected by each transition even if
873*4882a593Smuzhiyun * hw coordination is in use
874*4882a593Smuzhiyun */
show_related_cpus(struct cpufreq_policy * policy,char * buf)875*4882a593Smuzhiyun static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun return cpufreq_show_cpus(policy->related_cpus, buf);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /*
881*4882a593Smuzhiyun * show_affected_cpus - show the CPUs affected by each transition
882*4882a593Smuzhiyun */
show_affected_cpus(struct cpufreq_policy * policy,char * buf)883*4882a593Smuzhiyun static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun return cpufreq_show_cpus(policy->cpus, buf);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
store_scaling_setspeed(struct cpufreq_policy * policy,const char * buf,size_t count)888*4882a593Smuzhiyun static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
889*4882a593Smuzhiyun const char *buf, size_t count)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun unsigned int freq = 0;
892*4882a593Smuzhiyun unsigned int ret;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun if (!policy->governor || !policy->governor->store_setspeed)
895*4882a593Smuzhiyun return -EINVAL;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun ret = sscanf(buf, "%u", &freq);
898*4882a593Smuzhiyun if (ret != 1)
899*4882a593Smuzhiyun return -EINVAL;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun policy->governor->store_setspeed(policy, freq);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun return count;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
show_scaling_setspeed(struct cpufreq_policy * policy,char * buf)906*4882a593Smuzhiyun static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun if (!policy->governor || !policy->governor->show_setspeed)
909*4882a593Smuzhiyun return sprintf(buf, "<unsupported>\n");
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun return policy->governor->show_setspeed(policy, buf);
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun /*
915*4882a593Smuzhiyun * show_bios_limit - show the current cpufreq HW/BIOS limitation
916*4882a593Smuzhiyun */
show_bios_limit(struct cpufreq_policy * policy,char * buf)917*4882a593Smuzhiyun static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun unsigned int limit;
920*4882a593Smuzhiyun int ret;
921*4882a593Smuzhiyun ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
922*4882a593Smuzhiyun if (!ret)
923*4882a593Smuzhiyun return sprintf(buf, "%u\n", limit);
924*4882a593Smuzhiyun return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
928*4882a593Smuzhiyun cpufreq_freq_attr_ro(cpuinfo_min_freq);
929*4882a593Smuzhiyun cpufreq_freq_attr_ro(cpuinfo_max_freq);
930*4882a593Smuzhiyun cpufreq_freq_attr_ro(cpuinfo_transition_latency);
931*4882a593Smuzhiyun cpufreq_freq_attr_ro(scaling_available_governors);
932*4882a593Smuzhiyun cpufreq_freq_attr_ro(scaling_driver);
933*4882a593Smuzhiyun cpufreq_freq_attr_ro(scaling_cur_freq);
934*4882a593Smuzhiyun cpufreq_freq_attr_ro(bios_limit);
935*4882a593Smuzhiyun cpufreq_freq_attr_ro(related_cpus);
936*4882a593Smuzhiyun cpufreq_freq_attr_ro(affected_cpus);
937*4882a593Smuzhiyun cpufreq_freq_attr_rw(scaling_min_freq);
938*4882a593Smuzhiyun cpufreq_freq_attr_rw(scaling_max_freq);
939*4882a593Smuzhiyun cpufreq_freq_attr_rw(scaling_governor);
940*4882a593Smuzhiyun cpufreq_freq_attr_rw(scaling_setspeed);
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun static struct attribute *default_attrs[] = {
943*4882a593Smuzhiyun &cpuinfo_min_freq.attr,
944*4882a593Smuzhiyun &cpuinfo_max_freq.attr,
945*4882a593Smuzhiyun &cpuinfo_transition_latency.attr,
946*4882a593Smuzhiyun &scaling_min_freq.attr,
947*4882a593Smuzhiyun &scaling_max_freq.attr,
948*4882a593Smuzhiyun &affected_cpus.attr,
949*4882a593Smuzhiyun &related_cpus.attr,
950*4882a593Smuzhiyun &scaling_governor.attr,
951*4882a593Smuzhiyun &scaling_driver.attr,
952*4882a593Smuzhiyun &scaling_available_governors.attr,
953*4882a593Smuzhiyun &scaling_setspeed.attr,
954*4882a593Smuzhiyun NULL
955*4882a593Smuzhiyun };
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
958*4882a593Smuzhiyun #define to_attr(a) container_of(a, struct freq_attr, attr)
959*4882a593Smuzhiyun
show(struct kobject * kobj,struct attribute * attr,char * buf)960*4882a593Smuzhiyun static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun struct cpufreq_policy *policy = to_policy(kobj);
963*4882a593Smuzhiyun struct freq_attr *fattr = to_attr(attr);
964*4882a593Smuzhiyun ssize_t ret;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun if (!fattr->show)
967*4882a593Smuzhiyun return -EIO;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun down_read(&policy->rwsem);
970*4882a593Smuzhiyun ret = fattr->show(policy, buf);
971*4882a593Smuzhiyun up_read(&policy->rwsem);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun return ret;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)976*4882a593Smuzhiyun static ssize_t store(struct kobject *kobj, struct attribute *attr,
977*4882a593Smuzhiyun const char *buf, size_t count)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun struct cpufreq_policy *policy = to_policy(kobj);
980*4882a593Smuzhiyun struct freq_attr *fattr = to_attr(attr);
981*4882a593Smuzhiyun ssize_t ret = -EINVAL;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun if (!fattr->store)
984*4882a593Smuzhiyun return -EIO;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /*
987*4882a593Smuzhiyun * cpus_read_trylock() is used here to work around a circular lock
988*4882a593Smuzhiyun * dependency problem with respect to the cpufreq_register_driver().
989*4882a593Smuzhiyun */
990*4882a593Smuzhiyun if (!cpus_read_trylock())
991*4882a593Smuzhiyun return -EBUSY;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun if (cpu_online(policy->cpu)) {
994*4882a593Smuzhiyun down_write(&policy->rwsem);
995*4882a593Smuzhiyun ret = fattr->store(policy, buf, count);
996*4882a593Smuzhiyun up_write(&policy->rwsem);
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun cpus_read_unlock();
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun return ret;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
cpufreq_sysfs_release(struct kobject * kobj)1004*4882a593Smuzhiyun static void cpufreq_sysfs_release(struct kobject *kobj)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun struct cpufreq_policy *policy = to_policy(kobj);
1007*4882a593Smuzhiyun pr_debug("last reference is dropped\n");
1008*4882a593Smuzhiyun complete(&policy->kobj_unregister);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun static const struct sysfs_ops sysfs_ops = {
1012*4882a593Smuzhiyun .show = show,
1013*4882a593Smuzhiyun .store = store,
1014*4882a593Smuzhiyun };
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun static struct kobj_type ktype_cpufreq = {
1017*4882a593Smuzhiyun .sysfs_ops = &sysfs_ops,
1018*4882a593Smuzhiyun .default_attrs = default_attrs,
1019*4882a593Smuzhiyun .release = cpufreq_sysfs_release,
1020*4882a593Smuzhiyun };
1021*4882a593Smuzhiyun
add_cpu_dev_symlink(struct cpufreq_policy * policy,unsigned int cpu,struct device * dev)1022*4882a593Smuzhiyun static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1023*4882a593Smuzhiyun struct device *dev)
1024*4882a593Smuzhiyun {
1025*4882a593Smuzhiyun if (unlikely(!dev))
1026*4882a593Smuzhiyun return;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1029*4882a593Smuzhiyun return;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun dev_dbg(dev, "%s: Adding symlink\n", __func__);
1032*4882a593Smuzhiyun if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1033*4882a593Smuzhiyun dev_err(dev, "cpufreq symlink creation failed\n");
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
remove_cpu_dev_symlink(struct cpufreq_policy * policy,struct device * dev)1036*4882a593Smuzhiyun static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1037*4882a593Smuzhiyun struct device *dev)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun dev_dbg(dev, "%s: Removing symlink\n", __func__);
1040*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "cpufreq");
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
cpufreq_add_dev_interface(struct cpufreq_policy * policy)1043*4882a593Smuzhiyun static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun struct freq_attr **drv_attr;
1046*4882a593Smuzhiyun int ret = 0;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /* set up files for this cpu device */
1049*4882a593Smuzhiyun drv_attr = cpufreq_driver->attr;
1050*4882a593Smuzhiyun while (drv_attr && *drv_attr) {
1051*4882a593Smuzhiyun ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1052*4882a593Smuzhiyun if (ret)
1053*4882a593Smuzhiyun return ret;
1054*4882a593Smuzhiyun drv_attr++;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun if (cpufreq_driver->get) {
1057*4882a593Smuzhiyun ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1058*4882a593Smuzhiyun if (ret)
1059*4882a593Smuzhiyun return ret;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1063*4882a593Smuzhiyun if (ret)
1064*4882a593Smuzhiyun return ret;
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun if (cpufreq_driver->bios_limit) {
1067*4882a593Smuzhiyun ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1068*4882a593Smuzhiyun if (ret)
1069*4882a593Smuzhiyun return ret;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun return 0;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
cpufreq_init_policy(struct cpufreq_policy * policy)1075*4882a593Smuzhiyun static int cpufreq_init_policy(struct cpufreq_policy *policy)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun struct cpufreq_governor *gov = NULL;
1078*4882a593Smuzhiyun unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1079*4882a593Smuzhiyun int ret;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun if (has_target()) {
1082*4882a593Smuzhiyun /* Update policy governor to the one used before hotplug. */
1083*4882a593Smuzhiyun gov = get_governor(policy->last_governor);
1084*4882a593Smuzhiyun if (gov) {
1085*4882a593Smuzhiyun pr_debug("Restoring governor %s for cpu %d\n",
1086*4882a593Smuzhiyun gov->name, policy->cpu);
1087*4882a593Smuzhiyun } else {
1088*4882a593Smuzhiyun gov = get_governor(default_governor);
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun if (!gov) {
1092*4882a593Smuzhiyun gov = cpufreq_default_governor();
1093*4882a593Smuzhiyun __module_get(gov->owner);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun } else {
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun /* Use the default policy if there is no last_policy. */
1099*4882a593Smuzhiyun if (policy->last_policy) {
1100*4882a593Smuzhiyun pol = policy->last_policy;
1101*4882a593Smuzhiyun } else {
1102*4882a593Smuzhiyun pol = cpufreq_parse_policy(default_governor);
1103*4882a593Smuzhiyun /*
1104*4882a593Smuzhiyun * In case the default governor is neither "performance"
1105*4882a593Smuzhiyun * nor "powersave", fall back to the initial policy
1106*4882a593Smuzhiyun * value set by the driver.
1107*4882a593Smuzhiyun */
1108*4882a593Smuzhiyun if (pol == CPUFREQ_POLICY_UNKNOWN)
1109*4882a593Smuzhiyun pol = policy->policy;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1112*4882a593Smuzhiyun pol != CPUFREQ_POLICY_POWERSAVE)
1113*4882a593Smuzhiyun return -ENODATA;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun ret = cpufreq_set_policy(policy, gov, pol);
1117*4882a593Smuzhiyun if (gov)
1118*4882a593Smuzhiyun module_put(gov->owner);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun return ret;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
cpufreq_add_policy_cpu(struct cpufreq_policy * policy,unsigned int cpu)1123*4882a593Smuzhiyun static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1124*4882a593Smuzhiyun {
1125*4882a593Smuzhiyun int ret = 0;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /* Has this CPU been taken care of already? */
1128*4882a593Smuzhiyun if (cpumask_test_cpu(cpu, policy->cpus))
1129*4882a593Smuzhiyun return 0;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun down_write(&policy->rwsem);
1132*4882a593Smuzhiyun if (has_target())
1133*4882a593Smuzhiyun cpufreq_stop_governor(policy);
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun cpumask_set_cpu(cpu, policy->cpus);
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun if (has_target()) {
1138*4882a593Smuzhiyun ret = cpufreq_start_governor(policy);
1139*4882a593Smuzhiyun if (ret)
1140*4882a593Smuzhiyun pr_err("%s: Failed to start governor\n", __func__);
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun up_write(&policy->rwsem);
1143*4882a593Smuzhiyun return ret;
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun
refresh_frequency_limits(struct cpufreq_policy * policy)1146*4882a593Smuzhiyun void refresh_frequency_limits(struct cpufreq_policy *policy)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun if (!policy_is_inactive(policy)) {
1149*4882a593Smuzhiyun pr_debug("updating policy for CPU %u\n", policy->cpu);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun cpufreq_set_policy(policy, policy->governor, policy->policy);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun EXPORT_SYMBOL(refresh_frequency_limits);
1155*4882a593Smuzhiyun
handle_update(struct work_struct * work)1156*4882a593Smuzhiyun static void handle_update(struct work_struct *work)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun struct cpufreq_policy *policy =
1159*4882a593Smuzhiyun container_of(work, struct cpufreq_policy, update);
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun pr_debug("handle_update for cpu %u called\n", policy->cpu);
1162*4882a593Smuzhiyun down_write(&policy->rwsem);
1163*4882a593Smuzhiyun refresh_frequency_limits(policy);
1164*4882a593Smuzhiyun up_write(&policy->rwsem);
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
cpufreq_notifier_min(struct notifier_block * nb,unsigned long freq,void * data)1167*4882a593Smuzhiyun static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1168*4882a593Smuzhiyun void *data)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun schedule_work(&policy->update);
1173*4882a593Smuzhiyun return 0;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
cpufreq_notifier_max(struct notifier_block * nb,unsigned long freq,void * data)1176*4882a593Smuzhiyun static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1177*4882a593Smuzhiyun void *data)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun schedule_work(&policy->update);
1182*4882a593Smuzhiyun return 0;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
cpufreq_policy_put_kobj(struct cpufreq_policy * policy)1185*4882a593Smuzhiyun static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun struct kobject *kobj;
1188*4882a593Smuzhiyun struct completion *cmp;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun down_write(&policy->rwsem);
1191*4882a593Smuzhiyun cpufreq_stats_free_table(policy);
1192*4882a593Smuzhiyun kobj = &policy->kobj;
1193*4882a593Smuzhiyun cmp = &policy->kobj_unregister;
1194*4882a593Smuzhiyun up_write(&policy->rwsem);
1195*4882a593Smuzhiyun kobject_put(kobj);
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun /*
1198*4882a593Smuzhiyun * We need to make sure that the underlying kobj is
1199*4882a593Smuzhiyun * actually not referenced anymore by anybody before we
1200*4882a593Smuzhiyun * proceed with unloading.
1201*4882a593Smuzhiyun */
1202*4882a593Smuzhiyun pr_debug("waiting for dropping of refcount\n");
1203*4882a593Smuzhiyun wait_for_completion(cmp);
1204*4882a593Smuzhiyun pr_debug("wait complete\n");
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun
cpufreq_policy_alloc(unsigned int cpu)1207*4882a593Smuzhiyun static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun struct cpufreq_policy *policy;
1210*4882a593Smuzhiyun struct device *dev = get_cpu_device(cpu);
1211*4882a593Smuzhiyun int ret;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (!dev)
1214*4882a593Smuzhiyun return NULL;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1217*4882a593Smuzhiyun if (!policy)
1218*4882a593Smuzhiyun return NULL;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1221*4882a593Smuzhiyun goto err_free_policy;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1224*4882a593Smuzhiyun goto err_free_cpumask;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1227*4882a593Smuzhiyun goto err_free_rcpumask;
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1230*4882a593Smuzhiyun cpufreq_global_kobject, "policy%u", cpu);
1231*4882a593Smuzhiyun if (ret) {
1232*4882a593Smuzhiyun dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1233*4882a593Smuzhiyun /*
1234*4882a593Smuzhiyun * The entire policy object will be freed below, but the extra
1235*4882a593Smuzhiyun * memory allocated for the kobject name needs to be freed by
1236*4882a593Smuzhiyun * releasing the kobject.
1237*4882a593Smuzhiyun */
1238*4882a593Smuzhiyun kobject_put(&policy->kobj);
1239*4882a593Smuzhiyun goto err_free_real_cpus;
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun freq_constraints_init(&policy->constraints);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun policy->nb_min.notifier_call = cpufreq_notifier_min;
1245*4882a593Smuzhiyun policy->nb_max.notifier_call = cpufreq_notifier_max;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1248*4882a593Smuzhiyun &policy->nb_min);
1249*4882a593Smuzhiyun if (ret) {
1250*4882a593Smuzhiyun dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1251*4882a593Smuzhiyun ret, cpumask_pr_args(policy->cpus));
1252*4882a593Smuzhiyun goto err_kobj_remove;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1256*4882a593Smuzhiyun &policy->nb_max);
1257*4882a593Smuzhiyun if (ret) {
1258*4882a593Smuzhiyun dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1259*4882a593Smuzhiyun ret, cpumask_pr_args(policy->cpus));
1260*4882a593Smuzhiyun goto err_min_qos_notifier;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun INIT_LIST_HEAD(&policy->policy_list);
1264*4882a593Smuzhiyun init_rwsem(&policy->rwsem);
1265*4882a593Smuzhiyun spin_lock_init(&policy->transition_lock);
1266*4882a593Smuzhiyun init_waitqueue_head(&policy->transition_wait);
1267*4882a593Smuzhiyun init_completion(&policy->kobj_unregister);
1268*4882a593Smuzhiyun INIT_WORK(&policy->update, handle_update);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun policy->cpu = cpu;
1271*4882a593Smuzhiyun return policy;
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun err_min_qos_notifier:
1274*4882a593Smuzhiyun freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1275*4882a593Smuzhiyun &policy->nb_min);
1276*4882a593Smuzhiyun err_kobj_remove:
1277*4882a593Smuzhiyun cpufreq_policy_put_kobj(policy);
1278*4882a593Smuzhiyun err_free_real_cpus:
1279*4882a593Smuzhiyun free_cpumask_var(policy->real_cpus);
1280*4882a593Smuzhiyun err_free_rcpumask:
1281*4882a593Smuzhiyun free_cpumask_var(policy->related_cpus);
1282*4882a593Smuzhiyun err_free_cpumask:
1283*4882a593Smuzhiyun free_cpumask_var(policy->cpus);
1284*4882a593Smuzhiyun err_free_policy:
1285*4882a593Smuzhiyun kfree(policy);
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun return NULL;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
cpufreq_policy_free(struct cpufreq_policy * policy)1290*4882a593Smuzhiyun static void cpufreq_policy_free(struct cpufreq_policy *policy)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun unsigned long flags;
1293*4882a593Smuzhiyun int cpu;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun /* Remove policy from list */
1296*4882a593Smuzhiyun write_lock_irqsave(&cpufreq_driver_lock, flags);
1297*4882a593Smuzhiyun list_del(&policy->policy_list);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun for_each_cpu(cpu, policy->related_cpus)
1300*4882a593Smuzhiyun per_cpu(cpufreq_cpu_data, cpu) = NULL;
1301*4882a593Smuzhiyun write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1304*4882a593Smuzhiyun &policy->nb_max);
1305*4882a593Smuzhiyun freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1306*4882a593Smuzhiyun &policy->nb_min);
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /* Cancel any pending policy->update work before freeing the policy. */
1309*4882a593Smuzhiyun cancel_work_sync(&policy->update);
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun if (policy->max_freq_req) {
1312*4882a593Smuzhiyun /*
1313*4882a593Smuzhiyun * CPUFREQ_CREATE_POLICY notification is sent only after
1314*4882a593Smuzhiyun * successfully adding max_freq_req request.
1315*4882a593Smuzhiyun */
1316*4882a593Smuzhiyun blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1317*4882a593Smuzhiyun CPUFREQ_REMOVE_POLICY, policy);
1318*4882a593Smuzhiyun freq_qos_remove_request(policy->max_freq_req);
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun freq_qos_remove_request(policy->min_freq_req);
1322*4882a593Smuzhiyun kfree(policy->min_freq_req);
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun cpufreq_policy_put_kobj(policy);
1325*4882a593Smuzhiyun free_cpumask_var(policy->real_cpus);
1326*4882a593Smuzhiyun free_cpumask_var(policy->related_cpus);
1327*4882a593Smuzhiyun free_cpumask_var(policy->cpus);
1328*4882a593Smuzhiyun kfree(policy);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
cpufreq_online(unsigned int cpu)1331*4882a593Smuzhiyun static int cpufreq_online(unsigned int cpu)
1332*4882a593Smuzhiyun {
1333*4882a593Smuzhiyun struct cpufreq_policy *policy;
1334*4882a593Smuzhiyun bool new_policy;
1335*4882a593Smuzhiyun unsigned long flags;
1336*4882a593Smuzhiyun unsigned int j;
1337*4882a593Smuzhiyun int ret;
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun /* Check if this CPU already has a policy to manage it */
1342*4882a593Smuzhiyun policy = per_cpu(cpufreq_cpu_data, cpu);
1343*4882a593Smuzhiyun if (policy) {
1344*4882a593Smuzhiyun WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1345*4882a593Smuzhiyun if (!policy_is_inactive(policy))
1346*4882a593Smuzhiyun return cpufreq_add_policy_cpu(policy, cpu);
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun /* This is the only online CPU for the policy. Start over. */
1349*4882a593Smuzhiyun new_policy = false;
1350*4882a593Smuzhiyun down_write(&policy->rwsem);
1351*4882a593Smuzhiyun policy->cpu = cpu;
1352*4882a593Smuzhiyun policy->governor = NULL;
1353*4882a593Smuzhiyun up_write(&policy->rwsem);
1354*4882a593Smuzhiyun } else {
1355*4882a593Smuzhiyun new_policy = true;
1356*4882a593Smuzhiyun policy = cpufreq_policy_alloc(cpu);
1357*4882a593Smuzhiyun if (!policy)
1358*4882a593Smuzhiyun return -ENOMEM;
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun if (!new_policy && cpufreq_driver->online) {
1362*4882a593Smuzhiyun ret = cpufreq_driver->online(policy);
1363*4882a593Smuzhiyun if (ret) {
1364*4882a593Smuzhiyun pr_debug("%s: %d: initialization failed\n", __func__,
1365*4882a593Smuzhiyun __LINE__);
1366*4882a593Smuzhiyun goto out_exit_policy;
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun /* Recover policy->cpus using related_cpus */
1370*4882a593Smuzhiyun cpumask_copy(policy->cpus, policy->related_cpus);
1371*4882a593Smuzhiyun } else {
1372*4882a593Smuzhiyun cpumask_copy(policy->cpus, cpumask_of(cpu));
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun /*
1375*4882a593Smuzhiyun * Call driver. From then on the cpufreq must be able
1376*4882a593Smuzhiyun * to accept all calls to ->verify and ->setpolicy for this CPU.
1377*4882a593Smuzhiyun */
1378*4882a593Smuzhiyun ret = cpufreq_driver->init(policy);
1379*4882a593Smuzhiyun if (ret) {
1380*4882a593Smuzhiyun pr_debug("%s: %d: initialization failed\n", __func__,
1381*4882a593Smuzhiyun __LINE__);
1382*4882a593Smuzhiyun goto out_free_policy;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /*
1386*4882a593Smuzhiyun * The initialization has succeeded and the policy is online.
1387*4882a593Smuzhiyun * If there is a problem with its frequency table, take it
1388*4882a593Smuzhiyun * offline and drop it.
1389*4882a593Smuzhiyun */
1390*4882a593Smuzhiyun ret = cpufreq_table_validate_and_sort(policy);
1391*4882a593Smuzhiyun if (ret)
1392*4882a593Smuzhiyun goto out_offline_policy;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun /* related_cpus should at least include policy->cpus. */
1395*4882a593Smuzhiyun cpumask_copy(policy->related_cpus, policy->cpus);
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun down_write(&policy->rwsem);
1399*4882a593Smuzhiyun /*
1400*4882a593Smuzhiyun * affected cpus must always be the one, which are online. We aren't
1401*4882a593Smuzhiyun * managing offline cpus here.
1402*4882a593Smuzhiyun */
1403*4882a593Smuzhiyun cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun if (new_policy) {
1406*4882a593Smuzhiyun for_each_cpu(j, policy->related_cpus) {
1407*4882a593Smuzhiyun per_cpu(cpufreq_cpu_data, j) = policy;
1408*4882a593Smuzhiyun add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1412*4882a593Smuzhiyun GFP_KERNEL);
1413*4882a593Smuzhiyun if (!policy->min_freq_req)
1414*4882a593Smuzhiyun goto out_destroy_policy;
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun ret = freq_qos_add_request(&policy->constraints,
1417*4882a593Smuzhiyun policy->min_freq_req, FREQ_QOS_MIN,
1418*4882a593Smuzhiyun FREQ_QOS_MIN_DEFAULT_VALUE);
1419*4882a593Smuzhiyun if (ret < 0) {
1420*4882a593Smuzhiyun /*
1421*4882a593Smuzhiyun * So we don't call freq_qos_remove_request() for an
1422*4882a593Smuzhiyun * uninitialized request.
1423*4882a593Smuzhiyun */
1424*4882a593Smuzhiyun kfree(policy->min_freq_req);
1425*4882a593Smuzhiyun policy->min_freq_req = NULL;
1426*4882a593Smuzhiyun goto out_destroy_policy;
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun /*
1430*4882a593Smuzhiyun * This must be initialized right here to avoid calling
1431*4882a593Smuzhiyun * freq_qos_remove_request() on uninitialized request in case
1432*4882a593Smuzhiyun * of errors.
1433*4882a593Smuzhiyun */
1434*4882a593Smuzhiyun policy->max_freq_req = policy->min_freq_req + 1;
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun ret = freq_qos_add_request(&policy->constraints,
1437*4882a593Smuzhiyun policy->max_freq_req, FREQ_QOS_MAX,
1438*4882a593Smuzhiyun FREQ_QOS_MAX_DEFAULT_VALUE);
1439*4882a593Smuzhiyun if (ret < 0) {
1440*4882a593Smuzhiyun policy->max_freq_req = NULL;
1441*4882a593Smuzhiyun goto out_destroy_policy;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1445*4882a593Smuzhiyun CPUFREQ_CREATE_POLICY, policy);
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun if (cpufreq_driver->get && has_target()) {
1449*4882a593Smuzhiyun policy->cur = cpufreq_driver->get(policy->cpu);
1450*4882a593Smuzhiyun if (!policy->cur) {
1451*4882a593Smuzhiyun pr_err("%s: ->get() failed\n", __func__);
1452*4882a593Smuzhiyun goto out_destroy_policy;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun /*
1457*4882a593Smuzhiyun * Sometimes boot loaders set CPU frequency to a value outside of
1458*4882a593Smuzhiyun * frequency table present with cpufreq core. In such cases CPU might be
1459*4882a593Smuzhiyun * unstable if it has to run on that frequency for long duration of time
1460*4882a593Smuzhiyun * and so its better to set it to a frequency which is specified in
1461*4882a593Smuzhiyun * freq-table. This also makes cpufreq stats inconsistent as
1462*4882a593Smuzhiyun * cpufreq-stats would fail to register because current frequency of CPU
1463*4882a593Smuzhiyun * isn't found in freq-table.
1464*4882a593Smuzhiyun *
1465*4882a593Smuzhiyun * Because we don't want this change to effect boot process badly, we go
1466*4882a593Smuzhiyun * for the next freq which is >= policy->cur ('cur' must be set by now,
1467*4882a593Smuzhiyun * otherwise we will end up setting freq to lowest of the table as 'cur'
1468*4882a593Smuzhiyun * is initialized to zero).
1469*4882a593Smuzhiyun *
1470*4882a593Smuzhiyun * We are passing target-freq as "policy->cur - 1" otherwise
1471*4882a593Smuzhiyun * __cpufreq_driver_target() would simply fail, as policy->cur will be
1472*4882a593Smuzhiyun * equal to target-freq.
1473*4882a593Smuzhiyun */
1474*4882a593Smuzhiyun if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1475*4882a593Smuzhiyun && has_target()) {
1476*4882a593Smuzhiyun unsigned int old_freq = policy->cur;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun /* Are we running at unknown frequency ? */
1479*4882a593Smuzhiyun ret = cpufreq_frequency_table_get_index(policy, old_freq);
1480*4882a593Smuzhiyun if (ret == -EINVAL) {
1481*4882a593Smuzhiyun ret = __cpufreq_driver_target(policy, old_freq - 1,
1482*4882a593Smuzhiyun CPUFREQ_RELATION_L);
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun /*
1485*4882a593Smuzhiyun * Reaching here after boot in a few seconds may not
1486*4882a593Smuzhiyun * mean that system will remain stable at "unknown"
1487*4882a593Smuzhiyun * frequency for longer duration. Hence, a BUG_ON().
1488*4882a593Smuzhiyun */
1489*4882a593Smuzhiyun BUG_ON(ret);
1490*4882a593Smuzhiyun pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1491*4882a593Smuzhiyun __func__, policy->cpu, old_freq, policy->cur);
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun if (new_policy) {
1496*4882a593Smuzhiyun ret = cpufreq_add_dev_interface(policy);
1497*4882a593Smuzhiyun if (ret)
1498*4882a593Smuzhiyun goto out_destroy_policy;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun cpufreq_stats_create_table(policy);
1501*4882a593Smuzhiyun cpufreq_times_create_policy(policy);
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun write_lock_irqsave(&cpufreq_driver_lock, flags);
1504*4882a593Smuzhiyun list_add(&policy->policy_list, &cpufreq_policy_list);
1505*4882a593Smuzhiyun write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun ret = cpufreq_init_policy(policy);
1509*4882a593Smuzhiyun if (ret) {
1510*4882a593Smuzhiyun pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1511*4882a593Smuzhiyun __func__, cpu, ret);
1512*4882a593Smuzhiyun goto out_destroy_policy;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun up_write(&policy->rwsem);
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun kobject_uevent(&policy->kobj, KOBJ_ADD);
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun /* Callback for handling stuff after policy is ready */
1520*4882a593Smuzhiyun if (cpufreq_driver->ready)
1521*4882a593Smuzhiyun cpufreq_driver->ready(policy);
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun if (cpufreq_thermal_control_enabled(cpufreq_driver))
1524*4882a593Smuzhiyun policy->cdev = of_cpufreq_cooling_register(policy);
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun pr_debug("initialization complete\n");
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun return 0;
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun out_destroy_policy:
1531*4882a593Smuzhiyun for_each_cpu(j, policy->real_cpus)
1532*4882a593Smuzhiyun remove_cpu_dev_symlink(policy, get_cpu_device(j));
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun up_write(&policy->rwsem);
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun out_offline_policy:
1537*4882a593Smuzhiyun if (cpufreq_driver->offline)
1538*4882a593Smuzhiyun cpufreq_driver->offline(policy);
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun out_exit_policy:
1541*4882a593Smuzhiyun if (cpufreq_driver->exit)
1542*4882a593Smuzhiyun cpufreq_driver->exit(policy);
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun out_free_policy:
1545*4882a593Smuzhiyun cpufreq_policy_free(policy);
1546*4882a593Smuzhiyun return ret;
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun /**
1550*4882a593Smuzhiyun * cpufreq_add_dev - the cpufreq interface for a CPU device.
1551*4882a593Smuzhiyun * @dev: CPU device.
1552*4882a593Smuzhiyun * @sif: Subsystem interface structure pointer (not used)
1553*4882a593Smuzhiyun */
cpufreq_add_dev(struct device * dev,struct subsys_interface * sif)1554*4882a593Smuzhiyun static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1555*4882a593Smuzhiyun {
1556*4882a593Smuzhiyun struct cpufreq_policy *policy;
1557*4882a593Smuzhiyun unsigned cpu = dev->id;
1558*4882a593Smuzhiyun int ret;
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun if (cpu_online(cpu)) {
1563*4882a593Smuzhiyun ret = cpufreq_online(cpu);
1564*4882a593Smuzhiyun if (ret)
1565*4882a593Smuzhiyun return ret;
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun /* Create sysfs link on CPU registration */
1569*4882a593Smuzhiyun policy = per_cpu(cpufreq_cpu_data, cpu);
1570*4882a593Smuzhiyun if (policy)
1571*4882a593Smuzhiyun add_cpu_dev_symlink(policy, cpu, dev);
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun return 0;
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun
cpufreq_offline(unsigned int cpu)1576*4882a593Smuzhiyun static int cpufreq_offline(unsigned int cpu)
1577*4882a593Smuzhiyun {
1578*4882a593Smuzhiyun struct cpufreq_policy *policy;
1579*4882a593Smuzhiyun int ret;
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun policy = cpufreq_cpu_get_raw(cpu);
1584*4882a593Smuzhiyun if (!policy) {
1585*4882a593Smuzhiyun pr_debug("%s: No cpu_data found\n", __func__);
1586*4882a593Smuzhiyun return 0;
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun down_write(&policy->rwsem);
1590*4882a593Smuzhiyun if (has_target())
1591*4882a593Smuzhiyun cpufreq_stop_governor(policy);
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun cpumask_clear_cpu(cpu, policy->cpus);
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun if (policy_is_inactive(policy)) {
1596*4882a593Smuzhiyun if (has_target())
1597*4882a593Smuzhiyun strncpy(policy->last_governor, policy->governor->name,
1598*4882a593Smuzhiyun CPUFREQ_NAME_LEN);
1599*4882a593Smuzhiyun else
1600*4882a593Smuzhiyun policy->last_policy = policy->policy;
1601*4882a593Smuzhiyun } else if (cpu == policy->cpu) {
1602*4882a593Smuzhiyun /* Nominate new CPU */
1603*4882a593Smuzhiyun policy->cpu = cpumask_any(policy->cpus);
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun /* Start governor again for active policy */
1607*4882a593Smuzhiyun if (!policy_is_inactive(policy)) {
1608*4882a593Smuzhiyun if (has_target()) {
1609*4882a593Smuzhiyun ret = cpufreq_start_governor(policy);
1610*4882a593Smuzhiyun if (ret)
1611*4882a593Smuzhiyun pr_err("%s: Failed to start governor\n", __func__);
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun goto unlock;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1618*4882a593Smuzhiyun cpufreq_cooling_unregister(policy->cdev);
1619*4882a593Smuzhiyun policy->cdev = NULL;
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun if (cpufreq_driver->stop_cpu)
1623*4882a593Smuzhiyun cpufreq_driver->stop_cpu(policy);
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun if (has_target())
1626*4882a593Smuzhiyun cpufreq_exit_governor(policy);
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun /*
1629*4882a593Smuzhiyun * Perform the ->offline() during light-weight tear-down, as
1630*4882a593Smuzhiyun * that allows fast recovery when the CPU comes back.
1631*4882a593Smuzhiyun */
1632*4882a593Smuzhiyun if (cpufreq_driver->offline) {
1633*4882a593Smuzhiyun cpufreq_driver->offline(policy);
1634*4882a593Smuzhiyun } else if (cpufreq_driver->exit) {
1635*4882a593Smuzhiyun cpufreq_driver->exit(policy);
1636*4882a593Smuzhiyun policy->freq_table = NULL;
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun unlock:
1640*4882a593Smuzhiyun up_write(&policy->rwsem);
1641*4882a593Smuzhiyun return 0;
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun /*
1645*4882a593Smuzhiyun * cpufreq_remove_dev - remove a CPU device
1646*4882a593Smuzhiyun *
1647*4882a593Smuzhiyun * Removes the cpufreq interface for a CPU device.
1648*4882a593Smuzhiyun */
cpufreq_remove_dev(struct device * dev,struct subsys_interface * sif)1649*4882a593Smuzhiyun static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1650*4882a593Smuzhiyun {
1651*4882a593Smuzhiyun unsigned int cpu = dev->id;
1652*4882a593Smuzhiyun struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun if (!policy)
1655*4882a593Smuzhiyun return;
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun if (cpu_online(cpu))
1658*4882a593Smuzhiyun cpufreq_offline(cpu);
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun cpumask_clear_cpu(cpu, policy->real_cpus);
1661*4882a593Smuzhiyun remove_cpu_dev_symlink(policy, dev);
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun if (cpumask_empty(policy->real_cpus)) {
1664*4882a593Smuzhiyun /* We did light-weight exit earlier, do full tear down now */
1665*4882a593Smuzhiyun if (cpufreq_driver->offline)
1666*4882a593Smuzhiyun cpufreq_driver->exit(policy);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun cpufreq_policy_free(policy);
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun }
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun /**
1673*4882a593Smuzhiyun * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1674*4882a593Smuzhiyun * in deep trouble.
1675*4882a593Smuzhiyun * @policy: policy managing CPUs
1676*4882a593Smuzhiyun * @new_freq: CPU frequency the CPU actually runs at
1677*4882a593Smuzhiyun *
1678*4882a593Smuzhiyun * We adjust to current frequency first, and need to clean up later.
1679*4882a593Smuzhiyun * So either call to cpufreq_update_policy() or schedule handle_update()).
1680*4882a593Smuzhiyun */
cpufreq_out_of_sync(struct cpufreq_policy * policy,unsigned int new_freq)1681*4882a593Smuzhiyun static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1682*4882a593Smuzhiyun unsigned int new_freq)
1683*4882a593Smuzhiyun {
1684*4882a593Smuzhiyun struct cpufreq_freqs freqs;
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1687*4882a593Smuzhiyun policy->cur, new_freq);
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun freqs.old = policy->cur;
1690*4882a593Smuzhiyun freqs.new = new_freq;
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun cpufreq_freq_transition_begin(policy, &freqs);
1693*4882a593Smuzhiyun cpufreq_freq_transition_end(policy, &freqs, 0);
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun
cpufreq_verify_current_freq(struct cpufreq_policy * policy,bool update)1696*4882a593Smuzhiyun static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1697*4882a593Smuzhiyun {
1698*4882a593Smuzhiyun unsigned int new_freq;
1699*4882a593Smuzhiyun
1700*4882a593Smuzhiyun new_freq = cpufreq_driver->get(policy->cpu);
1701*4882a593Smuzhiyun if (!new_freq)
1702*4882a593Smuzhiyun return 0;
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun /*
1705*4882a593Smuzhiyun * If fast frequency switching is used with the given policy, the check
1706*4882a593Smuzhiyun * against policy->cur is pointless, so skip it in that case.
1707*4882a593Smuzhiyun */
1708*4882a593Smuzhiyun if (policy->fast_switch_enabled || !has_target())
1709*4882a593Smuzhiyun return new_freq;
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun if (policy->cur != new_freq) {
1712*4882a593Smuzhiyun cpufreq_out_of_sync(policy, new_freq);
1713*4882a593Smuzhiyun if (update)
1714*4882a593Smuzhiyun schedule_work(&policy->update);
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun return new_freq;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun /**
1721*4882a593Smuzhiyun * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1722*4882a593Smuzhiyun * @cpu: CPU number
1723*4882a593Smuzhiyun *
1724*4882a593Smuzhiyun * This is the last known freq, without actually getting it from the driver.
1725*4882a593Smuzhiyun * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1726*4882a593Smuzhiyun */
cpufreq_quick_get(unsigned int cpu)1727*4882a593Smuzhiyun unsigned int cpufreq_quick_get(unsigned int cpu)
1728*4882a593Smuzhiyun {
1729*4882a593Smuzhiyun struct cpufreq_policy *policy;
1730*4882a593Smuzhiyun unsigned int ret_freq = 0;
1731*4882a593Smuzhiyun unsigned long flags;
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun read_lock_irqsave(&cpufreq_driver_lock, flags);
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1736*4882a593Smuzhiyun ret_freq = cpufreq_driver->get(cpu);
1737*4882a593Smuzhiyun read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1738*4882a593Smuzhiyun return ret_freq;
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun policy = cpufreq_cpu_get(cpu);
1744*4882a593Smuzhiyun if (policy) {
1745*4882a593Smuzhiyun ret_freq = policy->cur;
1746*4882a593Smuzhiyun cpufreq_cpu_put(policy);
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun return ret_freq;
1750*4882a593Smuzhiyun }
1751*4882a593Smuzhiyun EXPORT_SYMBOL(cpufreq_quick_get);
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun /**
1754*4882a593Smuzhiyun * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1755*4882a593Smuzhiyun * @cpu: CPU number
1756*4882a593Smuzhiyun *
1757*4882a593Smuzhiyun * Just return the max possible frequency for a given CPU.
1758*4882a593Smuzhiyun */
cpufreq_quick_get_max(unsigned int cpu)1759*4882a593Smuzhiyun unsigned int cpufreq_quick_get_max(unsigned int cpu)
1760*4882a593Smuzhiyun {
1761*4882a593Smuzhiyun struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1762*4882a593Smuzhiyun unsigned int ret_freq = 0;
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun if (policy) {
1765*4882a593Smuzhiyun ret_freq = policy->max;
1766*4882a593Smuzhiyun cpufreq_cpu_put(policy);
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun return ret_freq;
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun EXPORT_SYMBOL(cpufreq_quick_get_max);
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun /**
1774*4882a593Smuzhiyun * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1775*4882a593Smuzhiyun * @cpu: CPU number
1776*4882a593Smuzhiyun *
1777*4882a593Smuzhiyun * The default return value is the max_freq field of cpuinfo.
1778*4882a593Smuzhiyun */
cpufreq_get_hw_max_freq(unsigned int cpu)1779*4882a593Smuzhiyun __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1780*4882a593Smuzhiyun {
1781*4882a593Smuzhiyun struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1782*4882a593Smuzhiyun unsigned int ret_freq = 0;
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun if (policy) {
1785*4882a593Smuzhiyun ret_freq = policy->cpuinfo.max_freq;
1786*4882a593Smuzhiyun cpufreq_cpu_put(policy);
1787*4882a593Smuzhiyun }
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun return ret_freq;
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1792*4882a593Smuzhiyun
__cpufreq_get(struct cpufreq_policy * policy)1793*4882a593Smuzhiyun static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun if (unlikely(policy_is_inactive(policy)))
1796*4882a593Smuzhiyun return 0;
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun return cpufreq_verify_current_freq(policy, true);
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun /**
1802*4882a593Smuzhiyun * cpufreq_get - get the current CPU frequency (in kHz)
1803*4882a593Smuzhiyun * @cpu: CPU number
1804*4882a593Smuzhiyun *
1805*4882a593Smuzhiyun * Get the CPU current (static) CPU frequency
1806*4882a593Smuzhiyun */
cpufreq_get(unsigned int cpu)1807*4882a593Smuzhiyun unsigned int cpufreq_get(unsigned int cpu)
1808*4882a593Smuzhiyun {
1809*4882a593Smuzhiyun struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1810*4882a593Smuzhiyun unsigned int ret_freq = 0;
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun if (policy) {
1813*4882a593Smuzhiyun down_read(&policy->rwsem);
1814*4882a593Smuzhiyun if (cpufreq_driver->get)
1815*4882a593Smuzhiyun ret_freq = __cpufreq_get(policy);
1816*4882a593Smuzhiyun up_read(&policy->rwsem);
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun cpufreq_cpu_put(policy);
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun return ret_freq;
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun EXPORT_SYMBOL(cpufreq_get);
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun static struct subsys_interface cpufreq_interface = {
1826*4882a593Smuzhiyun .name = "cpufreq",
1827*4882a593Smuzhiyun .subsys = &cpu_subsys,
1828*4882a593Smuzhiyun .add_dev = cpufreq_add_dev,
1829*4882a593Smuzhiyun .remove_dev = cpufreq_remove_dev,
1830*4882a593Smuzhiyun };
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun /*
1833*4882a593Smuzhiyun * In case platform wants some specific frequency to be configured
1834*4882a593Smuzhiyun * during suspend..
1835*4882a593Smuzhiyun */
cpufreq_generic_suspend(struct cpufreq_policy * policy)1836*4882a593Smuzhiyun int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun int ret;
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun if (!policy->suspend_freq) {
1841*4882a593Smuzhiyun pr_debug("%s: suspend_freq not defined\n", __func__);
1842*4882a593Smuzhiyun return 0;
1843*4882a593Smuzhiyun }
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1846*4882a593Smuzhiyun policy->suspend_freq);
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1849*4882a593Smuzhiyun CPUFREQ_RELATION_H);
1850*4882a593Smuzhiyun if (ret)
1851*4882a593Smuzhiyun pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1852*4882a593Smuzhiyun __func__, policy->suspend_freq, ret);
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun return ret;
1855*4882a593Smuzhiyun }
1856*4882a593Smuzhiyun EXPORT_SYMBOL(cpufreq_generic_suspend);
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun /**
1859*4882a593Smuzhiyun * cpufreq_suspend() - Suspend CPUFreq governors
1860*4882a593Smuzhiyun *
1861*4882a593Smuzhiyun * Called during system wide Suspend/Hibernate cycles for suspending governors
1862*4882a593Smuzhiyun * as some platforms can't change frequency after this point in suspend cycle.
1863*4882a593Smuzhiyun * Because some of the devices (like: i2c, regulators, etc) they use for
1864*4882a593Smuzhiyun * changing frequency are suspended quickly after this point.
1865*4882a593Smuzhiyun */
cpufreq_suspend(void)1866*4882a593Smuzhiyun void cpufreq_suspend(void)
1867*4882a593Smuzhiyun {
1868*4882a593Smuzhiyun struct cpufreq_policy *policy;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun if (!cpufreq_driver)
1871*4882a593Smuzhiyun return;
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun if (!has_target() && !cpufreq_driver->suspend)
1874*4882a593Smuzhiyun goto suspend;
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun pr_debug("%s: Suspending Governors\n", __func__);
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun for_each_active_policy(policy) {
1879*4882a593Smuzhiyun if (has_target()) {
1880*4882a593Smuzhiyun down_write(&policy->rwsem);
1881*4882a593Smuzhiyun cpufreq_stop_governor(policy);
1882*4882a593Smuzhiyun up_write(&policy->rwsem);
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1886*4882a593Smuzhiyun pr_err("%s: Failed to suspend driver: %s\n", __func__,
1887*4882a593Smuzhiyun cpufreq_driver->name);
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun suspend:
1891*4882a593Smuzhiyun cpufreq_suspended = true;
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun /**
1895*4882a593Smuzhiyun * cpufreq_resume() - Resume CPUFreq governors
1896*4882a593Smuzhiyun *
1897*4882a593Smuzhiyun * Called during system wide Suspend/Hibernate cycle for resuming governors that
1898*4882a593Smuzhiyun * are suspended with cpufreq_suspend().
1899*4882a593Smuzhiyun */
cpufreq_resume(void)1900*4882a593Smuzhiyun void cpufreq_resume(void)
1901*4882a593Smuzhiyun {
1902*4882a593Smuzhiyun struct cpufreq_policy *policy;
1903*4882a593Smuzhiyun int ret;
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun if (!cpufreq_driver)
1906*4882a593Smuzhiyun return;
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun if (unlikely(!cpufreq_suspended))
1909*4882a593Smuzhiyun return;
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun cpufreq_suspended = false;
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun if (!has_target() && !cpufreq_driver->resume)
1914*4882a593Smuzhiyun return;
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun pr_debug("%s: Resuming Governors\n", __func__);
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun for_each_active_policy(policy) {
1919*4882a593Smuzhiyun if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1920*4882a593Smuzhiyun pr_err("%s: Failed to resume driver: %p\n", __func__,
1921*4882a593Smuzhiyun policy);
1922*4882a593Smuzhiyun } else if (has_target()) {
1923*4882a593Smuzhiyun down_write(&policy->rwsem);
1924*4882a593Smuzhiyun ret = cpufreq_start_governor(policy);
1925*4882a593Smuzhiyun up_write(&policy->rwsem);
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun if (ret)
1928*4882a593Smuzhiyun pr_err("%s: Failed to start governor for policy: %p\n",
1929*4882a593Smuzhiyun __func__, policy);
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun /**
1935*4882a593Smuzhiyun * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
1936*4882a593Smuzhiyun * @flags: Flags to test against the current cpufreq driver's flags.
1937*4882a593Smuzhiyun *
1938*4882a593Smuzhiyun * Assumes that the driver is there, so callers must ensure that this is the
1939*4882a593Smuzhiyun * case.
1940*4882a593Smuzhiyun */
cpufreq_driver_test_flags(u16 flags)1941*4882a593Smuzhiyun bool cpufreq_driver_test_flags(u16 flags)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun return !!(cpufreq_driver->flags & flags);
1944*4882a593Smuzhiyun }
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun /**
1947*4882a593Smuzhiyun * cpufreq_get_current_driver - return current driver's name
1948*4882a593Smuzhiyun *
1949*4882a593Smuzhiyun * Return the name string of the currently loaded cpufreq driver
1950*4882a593Smuzhiyun * or NULL, if none.
1951*4882a593Smuzhiyun */
cpufreq_get_current_driver(void)1952*4882a593Smuzhiyun const char *cpufreq_get_current_driver(void)
1953*4882a593Smuzhiyun {
1954*4882a593Smuzhiyun if (cpufreq_driver)
1955*4882a593Smuzhiyun return cpufreq_driver->name;
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun return NULL;
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1960*4882a593Smuzhiyun
1961*4882a593Smuzhiyun /**
1962*4882a593Smuzhiyun * cpufreq_get_driver_data - return current driver data
1963*4882a593Smuzhiyun *
1964*4882a593Smuzhiyun * Return the private data of the currently loaded cpufreq
1965*4882a593Smuzhiyun * driver, or NULL if no cpufreq driver is loaded.
1966*4882a593Smuzhiyun */
cpufreq_get_driver_data(void)1967*4882a593Smuzhiyun void *cpufreq_get_driver_data(void)
1968*4882a593Smuzhiyun {
1969*4882a593Smuzhiyun if (cpufreq_driver)
1970*4882a593Smuzhiyun return cpufreq_driver->driver_data;
1971*4882a593Smuzhiyun
1972*4882a593Smuzhiyun return NULL;
1973*4882a593Smuzhiyun }
1974*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun /*********************************************************************
1977*4882a593Smuzhiyun * NOTIFIER LISTS INTERFACE *
1978*4882a593Smuzhiyun *********************************************************************/
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun /**
1981*4882a593Smuzhiyun * cpufreq_register_notifier - register a driver with cpufreq
1982*4882a593Smuzhiyun * @nb: notifier function to register
1983*4882a593Smuzhiyun * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1984*4882a593Smuzhiyun *
1985*4882a593Smuzhiyun * Add a driver to one of two lists: either a list of drivers that
1986*4882a593Smuzhiyun * are notified about clock rate changes (once before and once after
1987*4882a593Smuzhiyun * the transition), or a list of drivers that are notified about
1988*4882a593Smuzhiyun * changes in cpufreq policy.
1989*4882a593Smuzhiyun *
1990*4882a593Smuzhiyun * This function may sleep, and has the same return conditions as
1991*4882a593Smuzhiyun * blocking_notifier_chain_register.
1992*4882a593Smuzhiyun */
cpufreq_register_notifier(struct notifier_block * nb,unsigned int list)1993*4882a593Smuzhiyun int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1994*4882a593Smuzhiyun {
1995*4882a593Smuzhiyun int ret;
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun if (cpufreq_disabled())
1998*4882a593Smuzhiyun return -EINVAL;
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun switch (list) {
2001*4882a593Smuzhiyun case CPUFREQ_TRANSITION_NOTIFIER:
2002*4882a593Smuzhiyun mutex_lock(&cpufreq_fast_switch_lock);
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun if (cpufreq_fast_switch_count > 0) {
2005*4882a593Smuzhiyun mutex_unlock(&cpufreq_fast_switch_lock);
2006*4882a593Smuzhiyun return -EBUSY;
2007*4882a593Smuzhiyun }
2008*4882a593Smuzhiyun ret = srcu_notifier_chain_register(
2009*4882a593Smuzhiyun &cpufreq_transition_notifier_list, nb);
2010*4882a593Smuzhiyun if (!ret)
2011*4882a593Smuzhiyun cpufreq_fast_switch_count--;
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun mutex_unlock(&cpufreq_fast_switch_lock);
2014*4882a593Smuzhiyun break;
2015*4882a593Smuzhiyun case CPUFREQ_POLICY_NOTIFIER:
2016*4882a593Smuzhiyun ret = blocking_notifier_chain_register(
2017*4882a593Smuzhiyun &cpufreq_policy_notifier_list, nb);
2018*4882a593Smuzhiyun break;
2019*4882a593Smuzhiyun default:
2020*4882a593Smuzhiyun ret = -EINVAL;
2021*4882a593Smuzhiyun }
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun return ret;
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun EXPORT_SYMBOL(cpufreq_register_notifier);
2026*4882a593Smuzhiyun
2027*4882a593Smuzhiyun /**
2028*4882a593Smuzhiyun * cpufreq_unregister_notifier - unregister a driver with cpufreq
2029*4882a593Smuzhiyun * @nb: notifier block to be unregistered
2030*4882a593Smuzhiyun * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
2031*4882a593Smuzhiyun *
2032*4882a593Smuzhiyun * Remove a driver from the CPU frequency notifier list.
2033*4882a593Smuzhiyun *
2034*4882a593Smuzhiyun * This function may sleep, and has the same return conditions as
2035*4882a593Smuzhiyun * blocking_notifier_chain_unregister.
2036*4882a593Smuzhiyun */
cpufreq_unregister_notifier(struct notifier_block * nb,unsigned int list)2037*4882a593Smuzhiyun int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2038*4882a593Smuzhiyun {
2039*4882a593Smuzhiyun int ret;
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun if (cpufreq_disabled())
2042*4882a593Smuzhiyun return -EINVAL;
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun switch (list) {
2045*4882a593Smuzhiyun case CPUFREQ_TRANSITION_NOTIFIER:
2046*4882a593Smuzhiyun mutex_lock(&cpufreq_fast_switch_lock);
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun ret = srcu_notifier_chain_unregister(
2049*4882a593Smuzhiyun &cpufreq_transition_notifier_list, nb);
2050*4882a593Smuzhiyun if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2051*4882a593Smuzhiyun cpufreq_fast_switch_count++;
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun mutex_unlock(&cpufreq_fast_switch_lock);
2054*4882a593Smuzhiyun break;
2055*4882a593Smuzhiyun case CPUFREQ_POLICY_NOTIFIER:
2056*4882a593Smuzhiyun ret = blocking_notifier_chain_unregister(
2057*4882a593Smuzhiyun &cpufreq_policy_notifier_list, nb);
2058*4882a593Smuzhiyun break;
2059*4882a593Smuzhiyun default:
2060*4882a593Smuzhiyun ret = -EINVAL;
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun return ret;
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun EXPORT_SYMBOL(cpufreq_unregister_notifier);
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun /*********************************************************************
2069*4882a593Smuzhiyun * GOVERNORS *
2070*4882a593Smuzhiyun *********************************************************************/
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun /**
2073*4882a593Smuzhiyun * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2074*4882a593Smuzhiyun * @policy: cpufreq policy to switch the frequency for.
2075*4882a593Smuzhiyun * @target_freq: New frequency to set (may be approximate).
2076*4882a593Smuzhiyun *
2077*4882a593Smuzhiyun * Carry out a fast frequency switch without sleeping.
2078*4882a593Smuzhiyun *
2079*4882a593Smuzhiyun * The driver's ->fast_switch() callback invoked by this function must be
2080*4882a593Smuzhiyun * suitable for being called from within RCU-sched read-side critical sections
2081*4882a593Smuzhiyun * and it is expected to select the minimum available frequency greater than or
2082*4882a593Smuzhiyun * equal to @target_freq (CPUFREQ_RELATION_L).
2083*4882a593Smuzhiyun *
2084*4882a593Smuzhiyun * This function must not be called if policy->fast_switch_enabled is unset.
2085*4882a593Smuzhiyun *
2086*4882a593Smuzhiyun * Governors calling this function must guarantee that it will never be invoked
2087*4882a593Smuzhiyun * twice in parallel for the same policy and that it will never be called in
2088*4882a593Smuzhiyun * parallel with either ->target() or ->target_index() for the same policy.
2089*4882a593Smuzhiyun *
2090*4882a593Smuzhiyun * Returns the actual frequency set for the CPU.
2091*4882a593Smuzhiyun *
2092*4882a593Smuzhiyun * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2093*4882a593Smuzhiyun * error condition, the hardware configuration must be preserved.
2094*4882a593Smuzhiyun */
cpufreq_driver_fast_switch(struct cpufreq_policy * policy,unsigned int target_freq)2095*4882a593Smuzhiyun unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2096*4882a593Smuzhiyun unsigned int target_freq)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun unsigned int freq;
2099*4882a593Smuzhiyun unsigned int old_target_freq = target_freq;
2100*4882a593Smuzhiyun int cpu;
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun target_freq = clamp_val(target_freq, policy->min, policy->max);
2103*4882a593Smuzhiyun trace_android_vh_cpufreq_fast_switch(policy, target_freq, old_target_freq);
2104*4882a593Smuzhiyun freq = cpufreq_driver->fast_switch(policy, target_freq);
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun if (!freq)
2107*4882a593Smuzhiyun return 0;
2108*4882a593Smuzhiyun
2109*4882a593Smuzhiyun policy->cur = freq;
2110*4882a593Smuzhiyun arch_set_freq_scale(policy->related_cpus, freq,
2111*4882a593Smuzhiyun policy->cpuinfo.max_freq);
2112*4882a593Smuzhiyun cpufreq_stats_record_transition(policy, freq);
2113*4882a593Smuzhiyun cpufreq_times_record_transition(policy, freq);
2114*4882a593Smuzhiyun trace_android_rvh_cpufreq_transition(policy);
2115*4882a593Smuzhiyun
2116*4882a593Smuzhiyun if (trace_cpu_frequency_enabled()) {
2117*4882a593Smuzhiyun for_each_cpu(cpu, policy->cpus)
2118*4882a593Smuzhiyun trace_cpu_frequency(freq, cpu);
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun return freq;
2122*4882a593Smuzhiyun }
2123*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun /* Must set freqs->new to intermediate frequency */
__target_intermediate(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int index)2126*4882a593Smuzhiyun static int __target_intermediate(struct cpufreq_policy *policy,
2127*4882a593Smuzhiyun struct cpufreq_freqs *freqs, int index)
2128*4882a593Smuzhiyun {
2129*4882a593Smuzhiyun int ret;
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun freqs->new = cpufreq_driver->get_intermediate(policy, index);
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun /* We don't need to switch to intermediate freq */
2134*4882a593Smuzhiyun if (!freqs->new)
2135*4882a593Smuzhiyun return 0;
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2138*4882a593Smuzhiyun __func__, policy->cpu, freqs->old, freqs->new);
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun cpufreq_freq_transition_begin(policy, freqs);
2141*4882a593Smuzhiyun ret = cpufreq_driver->target_intermediate(policy, index);
2142*4882a593Smuzhiyun cpufreq_freq_transition_end(policy, freqs, ret);
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun if (ret)
2145*4882a593Smuzhiyun pr_err("%s: Failed to change to intermediate frequency: %d\n",
2146*4882a593Smuzhiyun __func__, ret);
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun return ret;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun
__target_index(struct cpufreq_policy * policy,int index)2151*4882a593Smuzhiyun static int __target_index(struct cpufreq_policy *policy, int index)
2152*4882a593Smuzhiyun {
2153*4882a593Smuzhiyun struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2154*4882a593Smuzhiyun unsigned int intermediate_freq = 0;
2155*4882a593Smuzhiyun unsigned int newfreq = policy->freq_table[index].frequency;
2156*4882a593Smuzhiyun int retval = -EINVAL;
2157*4882a593Smuzhiyun bool notify;
2158*4882a593Smuzhiyun
2159*4882a593Smuzhiyun if (newfreq == policy->cur)
2160*4882a593Smuzhiyun return 0;
2161*4882a593Smuzhiyun
2162*4882a593Smuzhiyun notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2163*4882a593Smuzhiyun if (notify) {
2164*4882a593Smuzhiyun /* Handle switching to intermediate frequency */
2165*4882a593Smuzhiyun if (cpufreq_driver->get_intermediate) {
2166*4882a593Smuzhiyun retval = __target_intermediate(policy, &freqs, index);
2167*4882a593Smuzhiyun if (retval)
2168*4882a593Smuzhiyun return retval;
2169*4882a593Smuzhiyun
2170*4882a593Smuzhiyun intermediate_freq = freqs.new;
2171*4882a593Smuzhiyun /* Set old freq to intermediate */
2172*4882a593Smuzhiyun if (intermediate_freq)
2173*4882a593Smuzhiyun freqs.old = freqs.new;
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun freqs.new = newfreq;
2177*4882a593Smuzhiyun pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2178*4882a593Smuzhiyun __func__, policy->cpu, freqs.old, freqs.new);
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun cpufreq_freq_transition_begin(policy, &freqs);
2181*4882a593Smuzhiyun }
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun retval = cpufreq_driver->target_index(policy, index);
2184*4882a593Smuzhiyun if (retval)
2185*4882a593Smuzhiyun pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2186*4882a593Smuzhiyun retval);
2187*4882a593Smuzhiyun
2188*4882a593Smuzhiyun if (notify) {
2189*4882a593Smuzhiyun cpufreq_freq_transition_end(policy, &freqs, retval);
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun /*
2192*4882a593Smuzhiyun * Failed after setting to intermediate freq? Driver should have
2193*4882a593Smuzhiyun * reverted back to initial frequency and so should we. Check
2194*4882a593Smuzhiyun * here for intermediate_freq instead of get_intermediate, in
2195*4882a593Smuzhiyun * case we haven't switched to intermediate freq at all.
2196*4882a593Smuzhiyun */
2197*4882a593Smuzhiyun if (unlikely(retval && intermediate_freq)) {
2198*4882a593Smuzhiyun freqs.old = intermediate_freq;
2199*4882a593Smuzhiyun freqs.new = policy->restore_freq;
2200*4882a593Smuzhiyun cpufreq_freq_transition_begin(policy, &freqs);
2201*4882a593Smuzhiyun cpufreq_freq_transition_end(policy, &freqs, 0);
2202*4882a593Smuzhiyun }
2203*4882a593Smuzhiyun }
2204*4882a593Smuzhiyun
2205*4882a593Smuzhiyun return retval;
2206*4882a593Smuzhiyun }
2207*4882a593Smuzhiyun
__cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)2208*4882a593Smuzhiyun int __cpufreq_driver_target(struct cpufreq_policy *policy,
2209*4882a593Smuzhiyun unsigned int target_freq,
2210*4882a593Smuzhiyun unsigned int relation)
2211*4882a593Smuzhiyun {
2212*4882a593Smuzhiyun unsigned int old_target_freq = target_freq;
2213*4882a593Smuzhiyun int index;
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun if (cpufreq_disabled())
2216*4882a593Smuzhiyun return -ENODEV;
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun /* Make sure that target_freq is within supported range */
2219*4882a593Smuzhiyun target_freq = clamp_val(target_freq, policy->min, policy->max);
2220*4882a593Smuzhiyun trace_android_vh_cpufreq_target(policy, target_freq, old_target_freq);
2221*4882a593Smuzhiyun
2222*4882a593Smuzhiyun pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2223*4882a593Smuzhiyun policy->cpu, target_freq, relation, old_target_freq);
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun /*
2226*4882a593Smuzhiyun * This might look like a redundant call as we are checking it again
2227*4882a593Smuzhiyun * after finding index. But it is left intentionally for cases where
2228*4882a593Smuzhiyun * exactly same freq is called again and so we can save on few function
2229*4882a593Smuzhiyun * calls.
2230*4882a593Smuzhiyun */
2231*4882a593Smuzhiyun if (target_freq == policy->cur &&
2232*4882a593Smuzhiyun !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2233*4882a593Smuzhiyun return 0;
2234*4882a593Smuzhiyun
2235*4882a593Smuzhiyun /* Save last value to restore later on errors */
2236*4882a593Smuzhiyun policy->restore_freq = policy->cur;
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun if (cpufreq_driver->target)
2239*4882a593Smuzhiyun return cpufreq_driver->target(policy, target_freq, relation);
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun if (!cpufreq_driver->target_index)
2242*4882a593Smuzhiyun return -EINVAL;
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun index = cpufreq_frequency_table_target(policy, target_freq, relation);
2245*4882a593Smuzhiyun
2246*4882a593Smuzhiyun return __target_index(policy, index);
2247*4882a593Smuzhiyun }
2248*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2249*4882a593Smuzhiyun
cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)2250*4882a593Smuzhiyun int cpufreq_driver_target(struct cpufreq_policy *policy,
2251*4882a593Smuzhiyun unsigned int target_freq,
2252*4882a593Smuzhiyun unsigned int relation)
2253*4882a593Smuzhiyun {
2254*4882a593Smuzhiyun int ret;
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun down_write(&policy->rwsem);
2257*4882a593Smuzhiyun
2258*4882a593Smuzhiyun ret = __cpufreq_driver_target(policy, target_freq, relation);
2259*4882a593Smuzhiyun
2260*4882a593Smuzhiyun up_write(&policy->rwsem);
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun return ret;
2263*4882a593Smuzhiyun }
2264*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2265*4882a593Smuzhiyun
cpufreq_fallback_governor(void)2266*4882a593Smuzhiyun __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2267*4882a593Smuzhiyun {
2268*4882a593Smuzhiyun return NULL;
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun
cpufreq_init_governor(struct cpufreq_policy * policy)2271*4882a593Smuzhiyun static int cpufreq_init_governor(struct cpufreq_policy *policy)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun int ret;
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun /* Don't start any governor operations if we are entering suspend */
2276*4882a593Smuzhiyun if (cpufreq_suspended)
2277*4882a593Smuzhiyun return 0;
2278*4882a593Smuzhiyun /*
2279*4882a593Smuzhiyun * Governor might not be initiated here if ACPI _PPC changed
2280*4882a593Smuzhiyun * notification happened, so check it.
2281*4882a593Smuzhiyun */
2282*4882a593Smuzhiyun if (!policy->governor)
2283*4882a593Smuzhiyun return -EINVAL;
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun /* Platform doesn't want dynamic frequency switching ? */
2286*4882a593Smuzhiyun if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2287*4882a593Smuzhiyun cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2288*4882a593Smuzhiyun struct cpufreq_governor *gov = cpufreq_fallback_governor();
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun if (gov) {
2291*4882a593Smuzhiyun pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2292*4882a593Smuzhiyun policy->governor->name, gov->name);
2293*4882a593Smuzhiyun policy->governor = gov;
2294*4882a593Smuzhiyun } else {
2295*4882a593Smuzhiyun return -EINVAL;
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun }
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun if (!try_module_get(policy->governor->owner))
2300*4882a593Smuzhiyun return -EINVAL;
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun if (policy->governor->init) {
2305*4882a593Smuzhiyun ret = policy->governor->init(policy);
2306*4882a593Smuzhiyun if (ret) {
2307*4882a593Smuzhiyun module_put(policy->governor->owner);
2308*4882a593Smuzhiyun return ret;
2309*4882a593Smuzhiyun }
2310*4882a593Smuzhiyun }
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2313*4882a593Smuzhiyun
2314*4882a593Smuzhiyun return 0;
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun
cpufreq_exit_governor(struct cpufreq_policy * policy)2317*4882a593Smuzhiyun static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2318*4882a593Smuzhiyun {
2319*4882a593Smuzhiyun if (cpufreq_suspended || !policy->governor)
2320*4882a593Smuzhiyun return;
2321*4882a593Smuzhiyun
2322*4882a593Smuzhiyun pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun if (policy->governor->exit)
2325*4882a593Smuzhiyun policy->governor->exit(policy);
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun module_put(policy->governor->owner);
2328*4882a593Smuzhiyun }
2329*4882a593Smuzhiyun
cpufreq_start_governor(struct cpufreq_policy * policy)2330*4882a593Smuzhiyun int cpufreq_start_governor(struct cpufreq_policy *policy)
2331*4882a593Smuzhiyun {
2332*4882a593Smuzhiyun int ret;
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun if (cpufreq_suspended)
2335*4882a593Smuzhiyun return 0;
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun if (!policy->governor)
2338*4882a593Smuzhiyun return -EINVAL;
2339*4882a593Smuzhiyun
2340*4882a593Smuzhiyun pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun if (cpufreq_driver->get)
2343*4882a593Smuzhiyun cpufreq_verify_current_freq(policy, false);
2344*4882a593Smuzhiyun
2345*4882a593Smuzhiyun if (policy->governor->start) {
2346*4882a593Smuzhiyun ret = policy->governor->start(policy);
2347*4882a593Smuzhiyun if (ret)
2348*4882a593Smuzhiyun return ret;
2349*4882a593Smuzhiyun }
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun if (policy->governor->limits)
2352*4882a593Smuzhiyun policy->governor->limits(policy);
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun return 0;
2355*4882a593Smuzhiyun }
2356*4882a593Smuzhiyun
cpufreq_stop_governor(struct cpufreq_policy * policy)2357*4882a593Smuzhiyun void cpufreq_stop_governor(struct cpufreq_policy *policy)
2358*4882a593Smuzhiyun {
2359*4882a593Smuzhiyun if (cpufreq_suspended || !policy->governor)
2360*4882a593Smuzhiyun return;
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2363*4882a593Smuzhiyun
2364*4882a593Smuzhiyun if (policy->governor->stop)
2365*4882a593Smuzhiyun policy->governor->stop(policy);
2366*4882a593Smuzhiyun }
2367*4882a593Smuzhiyun
cpufreq_governor_limits(struct cpufreq_policy * policy)2368*4882a593Smuzhiyun static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2369*4882a593Smuzhiyun {
2370*4882a593Smuzhiyun if (cpufreq_suspended || !policy->governor)
2371*4882a593Smuzhiyun return;
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2374*4882a593Smuzhiyun
2375*4882a593Smuzhiyun if (policy->governor->limits)
2376*4882a593Smuzhiyun policy->governor->limits(policy);
2377*4882a593Smuzhiyun }
2378*4882a593Smuzhiyun
cpufreq_register_governor(struct cpufreq_governor * governor)2379*4882a593Smuzhiyun int cpufreq_register_governor(struct cpufreq_governor *governor)
2380*4882a593Smuzhiyun {
2381*4882a593Smuzhiyun int err;
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun if (!governor)
2384*4882a593Smuzhiyun return -EINVAL;
2385*4882a593Smuzhiyun
2386*4882a593Smuzhiyun if (cpufreq_disabled())
2387*4882a593Smuzhiyun return -ENODEV;
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun mutex_lock(&cpufreq_governor_mutex);
2390*4882a593Smuzhiyun
2391*4882a593Smuzhiyun err = -EBUSY;
2392*4882a593Smuzhiyun if (!find_governor(governor->name)) {
2393*4882a593Smuzhiyun err = 0;
2394*4882a593Smuzhiyun list_add(&governor->governor_list, &cpufreq_governor_list);
2395*4882a593Smuzhiyun }
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun mutex_unlock(&cpufreq_governor_mutex);
2398*4882a593Smuzhiyun return err;
2399*4882a593Smuzhiyun }
2400*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2401*4882a593Smuzhiyun
cpufreq_unregister_governor(struct cpufreq_governor * governor)2402*4882a593Smuzhiyun void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2403*4882a593Smuzhiyun {
2404*4882a593Smuzhiyun struct cpufreq_policy *policy;
2405*4882a593Smuzhiyun unsigned long flags;
2406*4882a593Smuzhiyun
2407*4882a593Smuzhiyun if (!governor)
2408*4882a593Smuzhiyun return;
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun if (cpufreq_disabled())
2411*4882a593Smuzhiyun return;
2412*4882a593Smuzhiyun
2413*4882a593Smuzhiyun /* clear last_governor for all inactive policies */
2414*4882a593Smuzhiyun read_lock_irqsave(&cpufreq_driver_lock, flags);
2415*4882a593Smuzhiyun for_each_inactive_policy(policy) {
2416*4882a593Smuzhiyun if (!strcmp(policy->last_governor, governor->name)) {
2417*4882a593Smuzhiyun policy->governor = NULL;
2418*4882a593Smuzhiyun strcpy(policy->last_governor, "\0");
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun mutex_lock(&cpufreq_governor_mutex);
2424*4882a593Smuzhiyun list_del(&governor->governor_list);
2425*4882a593Smuzhiyun mutex_unlock(&cpufreq_governor_mutex);
2426*4882a593Smuzhiyun }
2427*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2428*4882a593Smuzhiyun
2429*4882a593Smuzhiyun
2430*4882a593Smuzhiyun /*********************************************************************
2431*4882a593Smuzhiyun * POLICY INTERFACE *
2432*4882a593Smuzhiyun *********************************************************************/
2433*4882a593Smuzhiyun
2434*4882a593Smuzhiyun /**
2435*4882a593Smuzhiyun * cpufreq_get_policy - get the current cpufreq_policy
2436*4882a593Smuzhiyun * @policy: struct cpufreq_policy into which the current cpufreq_policy
2437*4882a593Smuzhiyun * is written
2438*4882a593Smuzhiyun * @cpu: CPU to find the policy for
2439*4882a593Smuzhiyun *
2440*4882a593Smuzhiyun * Reads the current cpufreq policy.
2441*4882a593Smuzhiyun */
cpufreq_get_policy(struct cpufreq_policy * policy,unsigned int cpu)2442*4882a593Smuzhiyun int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2443*4882a593Smuzhiyun {
2444*4882a593Smuzhiyun struct cpufreq_policy *cpu_policy;
2445*4882a593Smuzhiyun if (!policy)
2446*4882a593Smuzhiyun return -EINVAL;
2447*4882a593Smuzhiyun
2448*4882a593Smuzhiyun cpu_policy = cpufreq_cpu_get(cpu);
2449*4882a593Smuzhiyun if (!cpu_policy)
2450*4882a593Smuzhiyun return -EINVAL;
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun memcpy(policy, cpu_policy, sizeof(*policy));
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun cpufreq_cpu_put(cpu_policy);
2455*4882a593Smuzhiyun return 0;
2456*4882a593Smuzhiyun }
2457*4882a593Smuzhiyun EXPORT_SYMBOL(cpufreq_get_policy);
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun /**
2460*4882a593Smuzhiyun * cpufreq_set_policy - Modify cpufreq policy parameters.
2461*4882a593Smuzhiyun * @policy: Policy object to modify.
2462*4882a593Smuzhiyun * @new_gov: Policy governor pointer.
2463*4882a593Smuzhiyun * @new_pol: Policy value (for drivers with built-in governors).
2464*4882a593Smuzhiyun *
2465*4882a593Smuzhiyun * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2466*4882a593Smuzhiyun * limits to be set for the policy, update @policy with the verified limits
2467*4882a593Smuzhiyun * values and either invoke the driver's ->setpolicy() callback (if present) or
2468*4882a593Smuzhiyun * carry out a governor update for @policy. That is, run the current governor's
2469*4882a593Smuzhiyun * ->limits() callback (if @new_gov points to the same object as the one in
2470*4882a593Smuzhiyun * @policy) or replace the governor for @policy with @new_gov.
2471*4882a593Smuzhiyun *
2472*4882a593Smuzhiyun * The cpuinfo part of @policy is not updated by this function.
2473*4882a593Smuzhiyun */
cpufreq_set_policy(struct cpufreq_policy * policy,struct cpufreq_governor * new_gov,unsigned int new_pol)2474*4882a593Smuzhiyun static int cpufreq_set_policy(struct cpufreq_policy *policy,
2475*4882a593Smuzhiyun struct cpufreq_governor *new_gov,
2476*4882a593Smuzhiyun unsigned int new_pol)
2477*4882a593Smuzhiyun {
2478*4882a593Smuzhiyun struct cpufreq_policy_data new_data;
2479*4882a593Smuzhiyun struct cpufreq_governor *old_gov;
2480*4882a593Smuzhiyun int ret;
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2483*4882a593Smuzhiyun new_data.freq_table = policy->freq_table;
2484*4882a593Smuzhiyun new_data.cpu = policy->cpu;
2485*4882a593Smuzhiyun /*
2486*4882a593Smuzhiyun * PM QoS framework collects all the requests from users and provide us
2487*4882a593Smuzhiyun * the final aggregated value here.
2488*4882a593Smuzhiyun */
2489*4882a593Smuzhiyun new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2490*4882a593Smuzhiyun new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2493*4882a593Smuzhiyun new_data.cpu, new_data.min, new_data.max);
2494*4882a593Smuzhiyun
2495*4882a593Smuzhiyun /*
2496*4882a593Smuzhiyun * Verify that the CPU speed can be set within these limits and make sure
2497*4882a593Smuzhiyun * that min <= max.
2498*4882a593Smuzhiyun */
2499*4882a593Smuzhiyun ret = cpufreq_driver->verify(&new_data);
2500*4882a593Smuzhiyun if (ret)
2501*4882a593Smuzhiyun return ret;
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun policy->min = new_data.min;
2504*4882a593Smuzhiyun policy->max = new_data.max;
2505*4882a593Smuzhiyun trace_cpu_frequency_limits(policy);
2506*4882a593Smuzhiyun
2507*4882a593Smuzhiyun policy->cached_target_freq = UINT_MAX;
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun pr_debug("new min and max freqs are %u - %u kHz\n",
2510*4882a593Smuzhiyun policy->min, policy->max);
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun if (cpufreq_driver->setpolicy) {
2513*4882a593Smuzhiyun policy->policy = new_pol;
2514*4882a593Smuzhiyun pr_debug("setting range\n");
2515*4882a593Smuzhiyun return cpufreq_driver->setpolicy(policy);
2516*4882a593Smuzhiyun }
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun if (new_gov == policy->governor) {
2519*4882a593Smuzhiyun pr_debug("governor limits update\n");
2520*4882a593Smuzhiyun cpufreq_governor_limits(policy);
2521*4882a593Smuzhiyun return 0;
2522*4882a593Smuzhiyun }
2523*4882a593Smuzhiyun
2524*4882a593Smuzhiyun pr_debug("governor switch\n");
2525*4882a593Smuzhiyun
2526*4882a593Smuzhiyun /* save old, working values */
2527*4882a593Smuzhiyun old_gov = policy->governor;
2528*4882a593Smuzhiyun /* end old governor */
2529*4882a593Smuzhiyun if (old_gov) {
2530*4882a593Smuzhiyun cpufreq_stop_governor(policy);
2531*4882a593Smuzhiyun cpufreq_exit_governor(policy);
2532*4882a593Smuzhiyun }
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun /* start new governor */
2535*4882a593Smuzhiyun policy->governor = new_gov;
2536*4882a593Smuzhiyun ret = cpufreq_init_governor(policy);
2537*4882a593Smuzhiyun if (!ret) {
2538*4882a593Smuzhiyun ret = cpufreq_start_governor(policy);
2539*4882a593Smuzhiyun if (!ret) {
2540*4882a593Smuzhiyun pr_debug("governor change\n");
2541*4882a593Smuzhiyun return 0;
2542*4882a593Smuzhiyun }
2543*4882a593Smuzhiyun cpufreq_exit_governor(policy);
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun
2546*4882a593Smuzhiyun /* new governor failed, so re-start old one */
2547*4882a593Smuzhiyun pr_debug("starting governor %s failed\n", policy->governor->name);
2548*4882a593Smuzhiyun if (old_gov) {
2549*4882a593Smuzhiyun policy->governor = old_gov;
2550*4882a593Smuzhiyun if (cpufreq_init_governor(policy))
2551*4882a593Smuzhiyun policy->governor = NULL;
2552*4882a593Smuzhiyun else
2553*4882a593Smuzhiyun cpufreq_start_governor(policy);
2554*4882a593Smuzhiyun }
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun return ret;
2557*4882a593Smuzhiyun }
2558*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency_limits);
2559*4882a593Smuzhiyun
2560*4882a593Smuzhiyun /**
2561*4882a593Smuzhiyun * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2562*4882a593Smuzhiyun * @cpu: CPU to re-evaluate the policy for.
2563*4882a593Smuzhiyun *
2564*4882a593Smuzhiyun * Update the current frequency for the cpufreq policy of @cpu and use
2565*4882a593Smuzhiyun * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2566*4882a593Smuzhiyun * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2567*4882a593Smuzhiyun * for the policy in question, among other things.
2568*4882a593Smuzhiyun */
cpufreq_update_policy(unsigned int cpu)2569*4882a593Smuzhiyun void cpufreq_update_policy(unsigned int cpu)
2570*4882a593Smuzhiyun {
2571*4882a593Smuzhiyun struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2572*4882a593Smuzhiyun
2573*4882a593Smuzhiyun if (!policy)
2574*4882a593Smuzhiyun return;
2575*4882a593Smuzhiyun
2576*4882a593Smuzhiyun /*
2577*4882a593Smuzhiyun * BIOS might change freq behind our back
2578*4882a593Smuzhiyun * -> ask driver for current freq and notify governors about a change
2579*4882a593Smuzhiyun */
2580*4882a593Smuzhiyun if (cpufreq_driver->get && has_target() &&
2581*4882a593Smuzhiyun (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2582*4882a593Smuzhiyun goto unlock;
2583*4882a593Smuzhiyun
2584*4882a593Smuzhiyun refresh_frequency_limits(policy);
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun unlock:
2587*4882a593Smuzhiyun cpufreq_cpu_release(policy);
2588*4882a593Smuzhiyun }
2589*4882a593Smuzhiyun EXPORT_SYMBOL(cpufreq_update_policy);
2590*4882a593Smuzhiyun
2591*4882a593Smuzhiyun /**
2592*4882a593Smuzhiyun * cpufreq_update_limits - Update policy limits for a given CPU.
2593*4882a593Smuzhiyun * @cpu: CPU to update the policy limits for.
2594*4882a593Smuzhiyun *
2595*4882a593Smuzhiyun * Invoke the driver's ->update_limits callback if present or call
2596*4882a593Smuzhiyun * cpufreq_update_policy() for @cpu.
2597*4882a593Smuzhiyun */
cpufreq_update_limits(unsigned int cpu)2598*4882a593Smuzhiyun void cpufreq_update_limits(unsigned int cpu)
2599*4882a593Smuzhiyun {
2600*4882a593Smuzhiyun if (cpufreq_driver->update_limits)
2601*4882a593Smuzhiyun cpufreq_driver->update_limits(cpu);
2602*4882a593Smuzhiyun else
2603*4882a593Smuzhiyun cpufreq_update_policy(cpu);
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2606*4882a593Smuzhiyun
2607*4882a593Smuzhiyun /*********************************************************************
2608*4882a593Smuzhiyun * BOOST *
2609*4882a593Smuzhiyun *********************************************************************/
cpufreq_boost_set_sw(struct cpufreq_policy * policy,int state)2610*4882a593Smuzhiyun static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2611*4882a593Smuzhiyun {
2612*4882a593Smuzhiyun int ret;
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun if (!policy->freq_table)
2615*4882a593Smuzhiyun return -ENXIO;
2616*4882a593Smuzhiyun
2617*4882a593Smuzhiyun ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2618*4882a593Smuzhiyun if (ret) {
2619*4882a593Smuzhiyun pr_err("%s: Policy frequency update failed\n", __func__);
2620*4882a593Smuzhiyun return ret;
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2624*4882a593Smuzhiyun if (ret < 0)
2625*4882a593Smuzhiyun return ret;
2626*4882a593Smuzhiyun
2627*4882a593Smuzhiyun return 0;
2628*4882a593Smuzhiyun }
2629*4882a593Smuzhiyun
cpufreq_boost_trigger_state(int state)2630*4882a593Smuzhiyun int cpufreq_boost_trigger_state(int state)
2631*4882a593Smuzhiyun {
2632*4882a593Smuzhiyun struct cpufreq_policy *policy;
2633*4882a593Smuzhiyun unsigned long flags;
2634*4882a593Smuzhiyun int ret = 0;
2635*4882a593Smuzhiyun
2636*4882a593Smuzhiyun if (cpufreq_driver->boost_enabled == state)
2637*4882a593Smuzhiyun return 0;
2638*4882a593Smuzhiyun
2639*4882a593Smuzhiyun write_lock_irqsave(&cpufreq_driver_lock, flags);
2640*4882a593Smuzhiyun cpufreq_driver->boost_enabled = state;
2641*4882a593Smuzhiyun write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun get_online_cpus();
2644*4882a593Smuzhiyun for_each_active_policy(policy) {
2645*4882a593Smuzhiyun ret = cpufreq_driver->set_boost(policy, state);
2646*4882a593Smuzhiyun if (ret)
2647*4882a593Smuzhiyun goto err_reset_state;
2648*4882a593Smuzhiyun }
2649*4882a593Smuzhiyun put_online_cpus();
2650*4882a593Smuzhiyun
2651*4882a593Smuzhiyun return 0;
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun err_reset_state:
2654*4882a593Smuzhiyun put_online_cpus();
2655*4882a593Smuzhiyun
2656*4882a593Smuzhiyun write_lock_irqsave(&cpufreq_driver_lock, flags);
2657*4882a593Smuzhiyun cpufreq_driver->boost_enabled = !state;
2658*4882a593Smuzhiyun write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2659*4882a593Smuzhiyun
2660*4882a593Smuzhiyun pr_err("%s: Cannot %s BOOST\n",
2661*4882a593Smuzhiyun __func__, state ? "enable" : "disable");
2662*4882a593Smuzhiyun
2663*4882a593Smuzhiyun return ret;
2664*4882a593Smuzhiyun }
2665*4882a593Smuzhiyun
cpufreq_boost_supported(void)2666*4882a593Smuzhiyun static bool cpufreq_boost_supported(void)
2667*4882a593Smuzhiyun {
2668*4882a593Smuzhiyun return cpufreq_driver->set_boost;
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun
create_boost_sysfs_file(void)2671*4882a593Smuzhiyun static int create_boost_sysfs_file(void)
2672*4882a593Smuzhiyun {
2673*4882a593Smuzhiyun int ret;
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2676*4882a593Smuzhiyun if (ret)
2677*4882a593Smuzhiyun pr_err("%s: cannot register global BOOST sysfs file\n",
2678*4882a593Smuzhiyun __func__);
2679*4882a593Smuzhiyun
2680*4882a593Smuzhiyun return ret;
2681*4882a593Smuzhiyun }
2682*4882a593Smuzhiyun
remove_boost_sysfs_file(void)2683*4882a593Smuzhiyun static void remove_boost_sysfs_file(void)
2684*4882a593Smuzhiyun {
2685*4882a593Smuzhiyun if (cpufreq_boost_supported())
2686*4882a593Smuzhiyun sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2687*4882a593Smuzhiyun }
2688*4882a593Smuzhiyun
cpufreq_enable_boost_support(void)2689*4882a593Smuzhiyun int cpufreq_enable_boost_support(void)
2690*4882a593Smuzhiyun {
2691*4882a593Smuzhiyun if (!cpufreq_driver)
2692*4882a593Smuzhiyun return -EINVAL;
2693*4882a593Smuzhiyun
2694*4882a593Smuzhiyun if (cpufreq_boost_supported())
2695*4882a593Smuzhiyun return 0;
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2698*4882a593Smuzhiyun
2699*4882a593Smuzhiyun /* This will get removed on driver unregister */
2700*4882a593Smuzhiyun return create_boost_sysfs_file();
2701*4882a593Smuzhiyun }
2702*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2703*4882a593Smuzhiyun
cpufreq_boost_enabled(void)2704*4882a593Smuzhiyun int cpufreq_boost_enabled(void)
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun return cpufreq_driver->boost_enabled;
2707*4882a593Smuzhiyun }
2708*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun /*********************************************************************
2711*4882a593Smuzhiyun * REGISTER / UNREGISTER CPUFREQ DRIVER *
2712*4882a593Smuzhiyun *********************************************************************/
2713*4882a593Smuzhiyun static enum cpuhp_state hp_online;
2714*4882a593Smuzhiyun
cpuhp_cpufreq_online(unsigned int cpu)2715*4882a593Smuzhiyun static int cpuhp_cpufreq_online(unsigned int cpu)
2716*4882a593Smuzhiyun {
2717*4882a593Smuzhiyun cpufreq_online(cpu);
2718*4882a593Smuzhiyun
2719*4882a593Smuzhiyun return 0;
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun
cpuhp_cpufreq_offline(unsigned int cpu)2722*4882a593Smuzhiyun static int cpuhp_cpufreq_offline(unsigned int cpu)
2723*4882a593Smuzhiyun {
2724*4882a593Smuzhiyun cpufreq_offline(cpu);
2725*4882a593Smuzhiyun
2726*4882a593Smuzhiyun return 0;
2727*4882a593Smuzhiyun }
2728*4882a593Smuzhiyun
2729*4882a593Smuzhiyun /**
2730*4882a593Smuzhiyun * cpufreq_register_driver - register a CPU Frequency driver
2731*4882a593Smuzhiyun * @driver_data: A struct cpufreq_driver containing the values#
2732*4882a593Smuzhiyun * submitted by the CPU Frequency driver.
2733*4882a593Smuzhiyun *
2734*4882a593Smuzhiyun * Registers a CPU Frequency driver to this core code. This code
2735*4882a593Smuzhiyun * returns zero on success, -EEXIST when another driver got here first
2736*4882a593Smuzhiyun * (and isn't unregistered in the meantime).
2737*4882a593Smuzhiyun *
2738*4882a593Smuzhiyun */
cpufreq_register_driver(struct cpufreq_driver * driver_data)2739*4882a593Smuzhiyun int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2740*4882a593Smuzhiyun {
2741*4882a593Smuzhiyun unsigned long flags;
2742*4882a593Smuzhiyun int ret;
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun if (cpufreq_disabled())
2745*4882a593Smuzhiyun return -ENODEV;
2746*4882a593Smuzhiyun
2747*4882a593Smuzhiyun /*
2748*4882a593Smuzhiyun * The cpufreq core depends heavily on the availability of device
2749*4882a593Smuzhiyun * structure, make sure they are available before proceeding further.
2750*4882a593Smuzhiyun */
2751*4882a593Smuzhiyun if (!get_cpu_device(0))
2752*4882a593Smuzhiyun return -EPROBE_DEFER;
2753*4882a593Smuzhiyun
2754*4882a593Smuzhiyun if (!driver_data || !driver_data->verify || !driver_data->init ||
2755*4882a593Smuzhiyun !(driver_data->setpolicy || driver_data->target_index ||
2756*4882a593Smuzhiyun driver_data->target) ||
2757*4882a593Smuzhiyun (driver_data->setpolicy && (driver_data->target_index ||
2758*4882a593Smuzhiyun driver_data->target)) ||
2759*4882a593Smuzhiyun (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2760*4882a593Smuzhiyun (!driver_data->online != !driver_data->offline))
2761*4882a593Smuzhiyun return -EINVAL;
2762*4882a593Smuzhiyun
2763*4882a593Smuzhiyun pr_debug("trying to register driver %s\n", driver_data->name);
2764*4882a593Smuzhiyun
2765*4882a593Smuzhiyun /* Protect against concurrent CPU online/offline. */
2766*4882a593Smuzhiyun cpus_read_lock();
2767*4882a593Smuzhiyun
2768*4882a593Smuzhiyun write_lock_irqsave(&cpufreq_driver_lock, flags);
2769*4882a593Smuzhiyun if (cpufreq_driver) {
2770*4882a593Smuzhiyun write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2771*4882a593Smuzhiyun ret = -EEXIST;
2772*4882a593Smuzhiyun goto out;
2773*4882a593Smuzhiyun }
2774*4882a593Smuzhiyun cpufreq_driver = driver_data;
2775*4882a593Smuzhiyun write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2776*4882a593Smuzhiyun
2777*4882a593Smuzhiyun /*
2778*4882a593Smuzhiyun * Mark support for the scheduler's frequency invariance engine for
2779*4882a593Smuzhiyun * drivers that implement target(), target_index() or fast_switch().
2780*4882a593Smuzhiyun */
2781*4882a593Smuzhiyun if (!cpufreq_driver->setpolicy) {
2782*4882a593Smuzhiyun static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2783*4882a593Smuzhiyun pr_debug("supports frequency invariance");
2784*4882a593Smuzhiyun }
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun if (driver_data->setpolicy)
2787*4882a593Smuzhiyun driver_data->flags |= CPUFREQ_CONST_LOOPS;
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun if (cpufreq_boost_supported()) {
2790*4882a593Smuzhiyun ret = create_boost_sysfs_file();
2791*4882a593Smuzhiyun if (ret)
2792*4882a593Smuzhiyun goto err_null_driver;
2793*4882a593Smuzhiyun }
2794*4882a593Smuzhiyun
2795*4882a593Smuzhiyun ret = subsys_interface_register(&cpufreq_interface);
2796*4882a593Smuzhiyun if (ret)
2797*4882a593Smuzhiyun goto err_boost_unreg;
2798*4882a593Smuzhiyun
2799*4882a593Smuzhiyun if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2800*4882a593Smuzhiyun list_empty(&cpufreq_policy_list)) {
2801*4882a593Smuzhiyun /* if all ->init() calls failed, unregister */
2802*4882a593Smuzhiyun ret = -ENODEV;
2803*4882a593Smuzhiyun pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2804*4882a593Smuzhiyun driver_data->name);
2805*4882a593Smuzhiyun goto err_if_unreg;
2806*4882a593Smuzhiyun }
2807*4882a593Smuzhiyun
2808*4882a593Smuzhiyun ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2809*4882a593Smuzhiyun "cpufreq:online",
2810*4882a593Smuzhiyun cpuhp_cpufreq_online,
2811*4882a593Smuzhiyun cpuhp_cpufreq_offline);
2812*4882a593Smuzhiyun if (ret < 0)
2813*4882a593Smuzhiyun goto err_if_unreg;
2814*4882a593Smuzhiyun hp_online = ret;
2815*4882a593Smuzhiyun ret = 0;
2816*4882a593Smuzhiyun
2817*4882a593Smuzhiyun pr_debug("driver %s up and running\n", driver_data->name);
2818*4882a593Smuzhiyun goto out;
2819*4882a593Smuzhiyun
2820*4882a593Smuzhiyun err_if_unreg:
2821*4882a593Smuzhiyun subsys_interface_unregister(&cpufreq_interface);
2822*4882a593Smuzhiyun err_boost_unreg:
2823*4882a593Smuzhiyun remove_boost_sysfs_file();
2824*4882a593Smuzhiyun err_null_driver:
2825*4882a593Smuzhiyun write_lock_irqsave(&cpufreq_driver_lock, flags);
2826*4882a593Smuzhiyun cpufreq_driver = NULL;
2827*4882a593Smuzhiyun write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2828*4882a593Smuzhiyun out:
2829*4882a593Smuzhiyun cpus_read_unlock();
2830*4882a593Smuzhiyun return ret;
2831*4882a593Smuzhiyun }
2832*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2833*4882a593Smuzhiyun
2834*4882a593Smuzhiyun /*
2835*4882a593Smuzhiyun * cpufreq_unregister_driver - unregister the current CPUFreq driver
2836*4882a593Smuzhiyun *
2837*4882a593Smuzhiyun * Unregister the current CPUFreq driver. Only call this if you have
2838*4882a593Smuzhiyun * the right to do so, i.e. if you have succeeded in initialising before!
2839*4882a593Smuzhiyun * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2840*4882a593Smuzhiyun * currently not initialised.
2841*4882a593Smuzhiyun */
cpufreq_unregister_driver(struct cpufreq_driver * driver)2842*4882a593Smuzhiyun int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2843*4882a593Smuzhiyun {
2844*4882a593Smuzhiyun unsigned long flags;
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun if (!cpufreq_driver || (driver != cpufreq_driver))
2847*4882a593Smuzhiyun return -EINVAL;
2848*4882a593Smuzhiyun
2849*4882a593Smuzhiyun pr_debug("unregistering driver %s\n", driver->name);
2850*4882a593Smuzhiyun
2851*4882a593Smuzhiyun /* Protect against concurrent cpu hotplug */
2852*4882a593Smuzhiyun cpus_read_lock();
2853*4882a593Smuzhiyun subsys_interface_unregister(&cpufreq_interface);
2854*4882a593Smuzhiyun remove_boost_sysfs_file();
2855*4882a593Smuzhiyun static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2856*4882a593Smuzhiyun cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2857*4882a593Smuzhiyun
2858*4882a593Smuzhiyun write_lock_irqsave(&cpufreq_driver_lock, flags);
2859*4882a593Smuzhiyun
2860*4882a593Smuzhiyun cpufreq_driver = NULL;
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2863*4882a593Smuzhiyun cpus_read_unlock();
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun return 0;
2866*4882a593Smuzhiyun }
2867*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2868*4882a593Smuzhiyun
cpufreq_core_init(void)2869*4882a593Smuzhiyun static int __init cpufreq_core_init(void)
2870*4882a593Smuzhiyun {
2871*4882a593Smuzhiyun struct cpufreq_governor *gov = cpufreq_default_governor();
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun if (cpufreq_disabled())
2874*4882a593Smuzhiyun return -ENODEV;
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2877*4882a593Smuzhiyun BUG_ON(!cpufreq_global_kobject);
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun if (!strlen(default_governor))
2880*4882a593Smuzhiyun strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2881*4882a593Smuzhiyun
2882*4882a593Smuzhiyun return 0;
2883*4882a593Smuzhiyun }
2884*4882a593Smuzhiyun module_param(off, int, 0444);
2885*4882a593Smuzhiyun module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2886*4882a593Smuzhiyun core_initcall(cpufreq_core_init);
2887