xref: /OK3568_Linux_fs/kernel/drivers/cpufreq/cpufreq_interactive.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010-2016 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/cpu.h>
21 #include <linux/cpumask.h>
22 #include <linux/cpufreq.h>
23 #ifdef CONFIG_ARCH_ROCKCHIP
24 #include <linux/input.h>
25 #endif
26 #include <linux/irq_work.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/rwsem.h>
30 #include <linux/sched/cpufreq.h>
31 #include <linux/sched/rt.h>
32 #include <linux/sched/task.h>
33 #include <linux/tick.h>
34 #include <linux/time.h>
35 #include <linux/timer.h>
36 #include <linux/kthread.h>
37 #include <linux/slab.h>
38 #include <uapi/linux/sched/types.h>
39 #include <linux/sched/clock.h>
40 
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/cpufreq_interactive.h>
43 
44 #define gov_attr_ro(_name)						\
45 static struct governor_attr _name =					\
46 __ATTR(_name, 0444, show_##_name, NULL)
47 
48 #define gov_attr_wo(_name)						\
49 static struct governor_attr _name =					\
50 __ATTR(_name, 0200, NULL, store_##_name)
51 
52 #define gov_attr_rw(_name)						\
53 static struct governor_attr _name =					\
54 __ATTR(_name, 0644, show_##_name, store_##_name)
55 
56 /* Separate instance required for each 'interactive' directory in sysfs */
57 struct interactive_tunables {
58 	struct gov_attr_set attr_set;
59 
60 	/* Hi speed to bump to from lo speed when load burst (default max) */
61 	unsigned int hispeed_freq;
62 
63 	/* Go to hi speed when CPU load at or above this value. */
64 #define DEFAULT_GO_HISPEED_LOAD 99
65 	unsigned long go_hispeed_load;
66 
67 	/* Target load. Lower values result in higher CPU speeds. */
68 	spinlock_t target_loads_lock;
69 	unsigned int *target_loads;
70 	int ntarget_loads;
71 
72 	/*
73 	 * The minimum amount of time to spend at a frequency before we can ramp
74 	 * down.
75 	 */
76 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
77 	unsigned long min_sample_time;
78 
79 	/* The sample rate of the timer used to increase frequency */
80 	unsigned long sampling_rate;
81 
82 	/*
83 	 * Wait this long before raising speed above hispeed, by default a
84 	 * single timer interval.
85 	 */
86 	spinlock_t above_hispeed_delay_lock;
87 	unsigned int *above_hispeed_delay;
88 	int nabove_hispeed_delay;
89 
90 	/* Non-zero means indefinite speed boost active */
91 	int boost;
92 	/* Duration of a boot pulse in usecs */
93 	int boostpulse_duration;
94 	/* End time of boost pulse in ktime converted to usecs */
95 	u64 boostpulse_endtime;
96 #ifdef CONFIG_ARCH_ROCKCHIP
97 	/* Frequency to which a touch boost takes the cpus to */
98 	unsigned long touchboost_freq;
99 	/* Duration of a touchboost pulse in usecs */
100 	int touchboostpulse_duration_val;
101 	/* End time of touchboost pulse in ktime converted to usecs */
102 	u64 touchboostpulse_endtime;
103 #endif
104 	bool boosted;
105 
106 	/*
107 	 * Max additional time to wait in idle, beyond sampling_rate, at speeds
108 	 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
109 	 */
110 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_SAMPLING_RATE)
111 	unsigned long timer_slack_delay;
112 	unsigned long timer_slack;
113 	bool io_is_busy;
114 };
115 
116 /* Separate instance required for each 'struct cpufreq_policy' */
117 struct interactive_policy {
118 	struct cpufreq_policy *policy;
119 	struct interactive_tunables *tunables;
120 	struct list_head tunables_hook;
121 };
122 
123 /* Separate instance required for each CPU */
124 struct interactive_cpu {
125 	struct update_util_data update_util;
126 	struct interactive_policy *ipolicy;
127 
128 	struct irq_work irq_work;
129 	u64 last_sample_time;
130 	unsigned long next_sample_jiffies;
131 	bool work_in_progress;
132 
133 	struct rw_semaphore enable_sem;
134 	struct timer_list slack_timer;
135 
136 	spinlock_t load_lock; /* protects the next 4 fields */
137 	u64 time_in_idle;
138 	u64 time_in_idle_timestamp;
139 	u64 cputime_speedadj;
140 	u64 cputime_speedadj_timestamp;
141 
142 	spinlock_t target_freq_lock; /*protects target freq */
143 	unsigned int target_freq;
144 
145 	unsigned int floor_freq;
146 	u64 pol_floor_val_time; /* policy floor_validate_time */
147 	u64 loc_floor_val_time; /* per-cpu floor_validate_time */
148 	u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
149 	u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
150 	int cpu;
151 };
152 
153 static DEFINE_PER_CPU(struct interactive_cpu, interactive_cpu);
154 
155 /* Realtime thread handles frequency scaling */
156 static struct task_struct *speedchange_task;
157 static cpumask_t speedchange_cpumask;
158 static spinlock_t speedchange_cpumask_lock;
159 
160 /* Target load. Lower values result in higher CPU speeds. */
161 #define DEFAULT_TARGET_LOAD 90
162 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
163 
164 #define DEFAULT_SAMPLING_RATE (20 * USEC_PER_MSEC)
165 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_SAMPLING_RATE
166 static unsigned int default_above_hispeed_delay[] = {
167 	DEFAULT_ABOVE_HISPEED_DELAY
168 };
169 
170 /* Iterate over interactive policies for tunables */
171 #define for_each_ipolicy(__ip)	\
172 	list_for_each_entry(__ip, &tunables->attr_set.policy_list, tunables_hook)
173 
174 static struct interactive_tunables *global_tunables;
175 static DEFINE_MUTEX(global_tunables_lock);
176 #ifdef CONFIG_ARCH_ROCKCHIP
177 static struct interactive_tunables backup_tunables[2];
178 #endif
179 
update_slack_delay(struct interactive_tunables * tunables)180 static inline void update_slack_delay(struct interactive_tunables *tunables)
181 {
182 	tunables->timer_slack_delay = usecs_to_jiffies(tunables->timer_slack +
183 						       tunables->sampling_rate);
184 }
185 
timer_slack_required(struct interactive_cpu * icpu)186 static bool timer_slack_required(struct interactive_cpu *icpu)
187 {
188 	struct interactive_policy *ipolicy = icpu->ipolicy;
189 	struct interactive_tunables *tunables = ipolicy->tunables;
190 
191 	if (tunables->timer_slack == 0)
192 		return false;
193 
194 	if (icpu->target_freq > ipolicy->policy->min)
195 		return true;
196 
197 	return false;
198 }
199 
gov_slack_timer_start(struct interactive_cpu * icpu,int cpu)200 static void gov_slack_timer_start(struct interactive_cpu *icpu, int cpu)
201 {
202 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
203 
204 	icpu->slack_timer.expires = jiffies + tunables->timer_slack_delay;
205 	add_timer_on(&icpu->slack_timer, cpu);
206 }
207 
gov_slack_timer_modify(struct interactive_cpu * icpu)208 static void gov_slack_timer_modify(struct interactive_cpu *icpu)
209 {
210 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
211 
212 	mod_timer(&icpu->slack_timer, jiffies + tunables->timer_slack_delay);
213 }
214 
slack_timer_resched(struct interactive_cpu * icpu,int cpu,bool modify)215 static void slack_timer_resched(struct interactive_cpu *icpu, int cpu,
216 				bool modify)
217 {
218 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
219 	unsigned long flags;
220 
221 	spin_lock_irqsave(&icpu->load_lock, flags);
222 
223 	icpu->time_in_idle = get_cpu_idle_time(cpu,
224 					       &icpu->time_in_idle_timestamp,
225 					       tunables->io_is_busy);
226 	icpu->cputime_speedadj = 0;
227 	icpu->cputime_speedadj_timestamp = icpu->time_in_idle_timestamp;
228 
229 	if (timer_slack_required(icpu)) {
230 		if (modify)
231 			gov_slack_timer_modify(icpu);
232 		else
233 			gov_slack_timer_start(icpu, cpu);
234 	}
235 
236 	spin_unlock_irqrestore(&icpu->load_lock, flags);
237 }
238 
239 static unsigned int
freq_to_above_hispeed_delay(struct interactive_tunables * tunables,unsigned int freq)240 freq_to_above_hispeed_delay(struct interactive_tunables *tunables,
241 			    unsigned int freq)
242 {
243 	unsigned long flags;
244 	unsigned int ret;
245 	int i;
246 
247 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
248 
249 	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
250 	     freq >= tunables->above_hispeed_delay[i + 1]; i += 2)
251 		;
252 
253 	ret = tunables->above_hispeed_delay[i];
254 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
255 
256 	return ret;
257 }
258 
freq_to_targetload(struct interactive_tunables * tunables,unsigned int freq)259 static unsigned int freq_to_targetload(struct interactive_tunables *tunables,
260 				       unsigned int freq)
261 {
262 	unsigned long flags;
263 	unsigned int ret;
264 	int i;
265 
266 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
267 
268 	for (i = 0; i < tunables->ntarget_loads - 1 &&
269 	     freq >= tunables->target_loads[i + 1]; i += 2)
270 		;
271 
272 	ret = tunables->target_loads[i];
273 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
274 	return ret;
275 }
276 
277 /*
278  * If increasing frequencies never map to a lower target load then
279  * choose_freq() will find the minimum frequency that does not exceed its
280  * target load given the current load.
281  */
choose_freq(struct interactive_cpu * icpu,unsigned int loadadjfreq)282 static unsigned int choose_freq(struct interactive_cpu *icpu,
283 				unsigned int loadadjfreq)
284 {
285 	struct cpufreq_policy *policy = icpu->ipolicy->policy;
286 	struct cpufreq_frequency_table *freq_table = policy->freq_table;
287 	unsigned int prevfreq, freqmin = 0, freqmax = UINT_MAX, tl;
288 	unsigned int freq = policy->cur;
289 	int index;
290 
291 	do {
292 		prevfreq = freq;
293 		tl = freq_to_targetload(icpu->ipolicy->tunables, freq);
294 
295 		/*
296 		 * Find the lowest frequency where the computed load is less
297 		 * than or equal to the target load.
298 		 */
299 
300 		index = cpufreq_frequency_table_target(policy, loadadjfreq / tl,
301 						       CPUFREQ_RELATION_L);
302 
303 		freq = freq_table[index].frequency;
304 
305 		if (freq > prevfreq) {
306 			/* The previous frequency is too low */
307 			freqmin = prevfreq;
308 
309 			if (freq < freqmax)
310 				continue;
311 
312 			/* Find highest frequency that is less than freqmax */
313 			index = cpufreq_frequency_table_target(policy,
314 					freqmax - 1, CPUFREQ_RELATION_H);
315 
316 			freq = freq_table[index].frequency;
317 
318 			if (freq == freqmin) {
319 				/*
320 				 * The first frequency below freqmax has already
321 				 * been found to be too low. freqmax is the
322 				 * lowest speed we found that is fast enough.
323 				 */
324 				freq = freqmax;
325 				break;
326 			}
327 		} else if (freq < prevfreq) {
328 			/* The previous frequency is high enough. */
329 			freqmax = prevfreq;
330 
331 			if (freq > freqmin)
332 				continue;
333 
334 			/* Find lowest frequency that is higher than freqmin */
335 			index = cpufreq_frequency_table_target(policy,
336 					freqmin + 1, CPUFREQ_RELATION_L);
337 
338 			freq = freq_table[index].frequency;
339 
340 			/*
341 			 * If freqmax is the first frequency above
342 			 * freqmin then we have already found that
343 			 * this speed is fast enough.
344 			 */
345 			if (freq == freqmax)
346 				break;
347 		}
348 
349 		/* If same frequency chosen as previous then done. */
350 	} while (freq != prevfreq);
351 
352 	return freq;
353 }
354 
update_load(struct interactive_cpu * icpu,int cpu)355 static u64 update_load(struct interactive_cpu *icpu, int cpu)
356 {
357 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
358 	u64 now_idle, now, active_time, delta_idle, delta_time;
359 
360 	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
361 	delta_idle = (now_idle - icpu->time_in_idle);
362 	delta_time = (now - icpu->time_in_idle_timestamp);
363 
364 	if (delta_time <= delta_idle)
365 		active_time = 0;
366 	else
367 		active_time = delta_time - delta_idle;
368 
369 	icpu->cputime_speedadj += active_time * icpu->ipolicy->policy->cur;
370 
371 	icpu->time_in_idle = now_idle;
372 	icpu->time_in_idle_timestamp = now;
373 
374 	return now;
375 }
376 
377 /* Re-evaluate load to see if a frequency change is required or not */
eval_target_freq(struct interactive_cpu * icpu)378 static void eval_target_freq(struct interactive_cpu *icpu)
379 {
380 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
381 	struct cpufreq_policy *policy = icpu->ipolicy->policy;
382 	struct cpufreq_frequency_table *freq_table = policy->freq_table;
383 	u64 cputime_speedadj, now, max_fvtime;
384 	unsigned int new_freq, loadadjfreq, index, delta_time;
385 	unsigned long flags;
386 	int cpu_load;
387 	int cpu = smp_processor_id();
388 
389 	spin_lock_irqsave(&icpu->load_lock, flags);
390 	now = update_load(icpu, smp_processor_id());
391 	delta_time = (unsigned int)(now - icpu->cputime_speedadj_timestamp);
392 	cputime_speedadj = icpu->cputime_speedadj;
393 	spin_unlock_irqrestore(&icpu->load_lock, flags);
394 
395 	if (!delta_time)
396 		return;
397 
398 	spin_lock_irqsave(&icpu->target_freq_lock, flags);
399 	do_div(cputime_speedadj, delta_time);
400 	loadadjfreq = (unsigned int)cputime_speedadj * 100;
401 	cpu_load = loadadjfreq / policy->cur;
402 	tunables->boosted = tunables->boost ||
403 			    now < tunables->boostpulse_endtime;
404 
405 	if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
406 		if (policy->cur < tunables->hispeed_freq) {
407 			new_freq = tunables->hispeed_freq;
408 		} else {
409 			new_freq = choose_freq(icpu, loadadjfreq);
410 
411 			if (new_freq < tunables->hispeed_freq)
412 				new_freq = tunables->hispeed_freq;
413 		}
414 	} else {
415 		new_freq = choose_freq(icpu, loadadjfreq);
416 		if (new_freq > tunables->hispeed_freq &&
417 		    policy->cur < tunables->hispeed_freq)
418 			new_freq = tunables->hispeed_freq;
419 	}
420 
421 #ifdef CONFIG_ARCH_ROCKCHIP
422 	if (now < tunables->touchboostpulse_endtime &&
423 	    new_freq < tunables->touchboost_freq) {
424 		new_freq = tunables->touchboost_freq;
425 	}
426 #endif
427 	if (policy->cur >= tunables->hispeed_freq &&
428 	    new_freq > policy->cur &&
429 	    now - icpu->pol_hispeed_val_time < freq_to_above_hispeed_delay(tunables, policy->cur)) {
430 		trace_cpufreq_interactive_notyet(cpu, cpu_load,
431 				icpu->target_freq, policy->cur, new_freq);
432 		goto exit;
433 	}
434 
435 	icpu->loc_hispeed_val_time = now;
436 
437 	index = cpufreq_frequency_table_target(policy, new_freq,
438 					       CPUFREQ_RELATION_L);
439 	new_freq = freq_table[index].frequency;
440 
441 	/*
442 	 * Do not scale below floor_freq unless we have been at or above the
443 	 * floor frequency for the minimum sample time since last validated.
444 	 */
445 	max_fvtime = max(icpu->pol_floor_val_time, icpu->loc_floor_val_time);
446 	if (new_freq < icpu->floor_freq && icpu->target_freq >= policy->cur) {
447 		if (now - max_fvtime < tunables->min_sample_time) {
448 			trace_cpufreq_interactive_notyet(cpu, cpu_load,
449 				icpu->target_freq, policy->cur, new_freq);
450 			goto exit;
451 		}
452 	}
453 
454 	/*
455 	 * Update the timestamp for checking whether speed has been held at
456 	 * or above the selected frequency for a minimum of min_sample_time,
457 	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
458 	 * allow the speed to drop as soon as the boostpulse duration expires
459 	 * (or the indefinite boost is turned off).
460 	 */
461 
462 	if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
463 		icpu->floor_freq = new_freq;
464 		if (icpu->target_freq >= policy->cur || new_freq >= policy->cur)
465 			icpu->loc_floor_val_time = now;
466 	}
467 
468 	if (icpu->target_freq == new_freq &&
469 	    icpu->target_freq <= policy->cur) {
470 		trace_cpufreq_interactive_already(cpu, cpu_load,
471 			icpu->target_freq, policy->cur, new_freq);
472 		goto exit;
473 	}
474 
475 	trace_cpufreq_interactive_target(cpu, cpu_load, icpu->target_freq,
476 					 policy->cur, new_freq);
477 
478 	icpu->target_freq = new_freq;
479 	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
480 
481 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
482 	cpumask_set_cpu(cpu, &speedchange_cpumask);
483 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
484 
485 	wake_up_process(speedchange_task);
486 	return;
487 
488 exit:
489 	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
490 }
491 
cpufreq_interactive_update(struct interactive_cpu * icpu)492 static void cpufreq_interactive_update(struct interactive_cpu *icpu)
493 {
494 	eval_target_freq(icpu);
495 	slack_timer_resched(icpu, smp_processor_id(), true);
496 }
497 
cpufreq_interactive_idle_end(void)498 static void cpufreq_interactive_idle_end(void)
499 {
500 	struct interactive_cpu *icpu = &per_cpu(interactive_cpu,
501 						smp_processor_id());
502 	unsigned long sampling_rate;
503 
504 	if (!down_read_trylock(&icpu->enable_sem))
505 		return;
506 
507 	if (icpu->ipolicy) {
508 		/*
509 		 * We haven't sampled load for more than sampling_rate time, do
510 		 * it right now.
511 		 */
512 		if (time_after_eq(jiffies, icpu->next_sample_jiffies)) {
513 			sampling_rate = icpu->ipolicy->tunables->sampling_rate;
514 			icpu->last_sample_time = local_clock();
515 			icpu->next_sample_jiffies = usecs_to_jiffies(sampling_rate) + jiffies;
516 			cpufreq_interactive_update(icpu);
517 		}
518 	}
519 
520 	up_read(&icpu->enable_sem);
521 }
522 
cpufreq_interactive_get_policy_info(struct cpufreq_policy * policy,unsigned int * pmax_freq,u64 * phvt,u64 * pfvt)523 static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
524 						unsigned int *pmax_freq,
525 						u64 *phvt, u64 *pfvt)
526 {
527 	struct interactive_cpu *icpu;
528 	u64 hvt = ~0ULL, fvt = 0;
529 	unsigned int max_freq = 0, i;
530 
531 	for_each_cpu(i, policy->cpus) {
532 		icpu = &per_cpu(interactive_cpu, i);
533 
534 		fvt = max(fvt, icpu->loc_floor_val_time);
535 		if (icpu->target_freq > max_freq) {
536 			max_freq = icpu->target_freq;
537 			hvt = icpu->loc_hispeed_val_time;
538 		} else if (icpu->target_freq == max_freq) {
539 			hvt = min(hvt, icpu->loc_hispeed_val_time);
540 		}
541 	}
542 
543 	*pmax_freq = max_freq;
544 	*phvt = hvt;
545 	*pfvt = fvt;
546 }
547 
cpufreq_interactive_adjust_cpu(unsigned int cpu,struct cpufreq_policy * policy)548 static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
549 					   struct cpufreq_policy *policy)
550 {
551 	struct interactive_cpu *icpu;
552 	u64 hvt, fvt;
553 	unsigned int max_freq;
554 	int i;
555 
556 	cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
557 
558 	for_each_cpu(i, policy->cpus) {
559 		icpu = &per_cpu(interactive_cpu, i);
560 		icpu->pol_floor_val_time = fvt;
561 	}
562 
563 	if (max_freq != policy->cur) {
564 		__cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
565 		for_each_cpu(i, policy->cpus) {
566 			icpu = &per_cpu(interactive_cpu, i);
567 			icpu->pol_hispeed_val_time = hvt;
568 		}
569 	}
570 
571 	trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
572 }
573 
cpufreq_interactive_speedchange_task(void * data)574 static int cpufreq_interactive_speedchange_task(void *data)
575 {
576 	unsigned int cpu;
577 	cpumask_t tmp_mask;
578 	unsigned long flags;
579 
580 again:
581 	set_current_state(TASK_INTERRUPTIBLE);
582 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
583 
584 	if (cpumask_empty(&speedchange_cpumask)) {
585 		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
586 		schedule();
587 
588 		if (kthread_should_stop())
589 			return 0;
590 
591 		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
592 	}
593 
594 	set_current_state(TASK_RUNNING);
595 	tmp_mask = speedchange_cpumask;
596 	cpumask_clear(&speedchange_cpumask);
597 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
598 
599 	for_each_cpu(cpu, &tmp_mask) {
600 		struct interactive_cpu *icpu = &per_cpu(interactive_cpu, cpu);
601 		struct cpufreq_policy *policy;
602 
603 		policy = cpufreq_cpu_get(cpu);
604 		if (!policy)
605 			continue;
606 
607 		down_write(&policy->rwsem);
608 
609 		if (likely(down_read_trylock(&icpu->enable_sem))) {
610 			if (likely(icpu->ipolicy))
611 				cpufreq_interactive_adjust_cpu(cpu, policy);
612 			up_read(&icpu->enable_sem);
613 		}
614 
615 		up_write(&policy->rwsem);
616 		cpufreq_cpu_put(policy);
617 	}
618 
619 	goto again;
620 }
621 
cpufreq_interactive_boost(struct interactive_tunables * tunables)622 static void cpufreq_interactive_boost(struct interactive_tunables *tunables)
623 {
624 	struct interactive_policy *ipolicy;
625 	struct cpufreq_policy *policy;
626 	struct interactive_cpu *icpu;
627 	unsigned long flags[2];
628 	bool wakeup = false;
629 	int i;
630 
631 	tunables->boosted = true;
632 
633 	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
634 
635 	for_each_ipolicy(ipolicy) {
636 		policy = ipolicy->policy;
637 
638 		for_each_cpu(i, policy->cpus) {
639 			icpu = &per_cpu(interactive_cpu, i);
640 
641 			if (!down_read_trylock(&icpu->enable_sem))
642 				continue;
643 
644 			if (!icpu->ipolicy) {
645 				up_read(&icpu->enable_sem);
646 				continue;
647 			}
648 
649 			spin_lock_irqsave(&icpu->target_freq_lock, flags[1]);
650 			if (icpu->target_freq < tunables->hispeed_freq) {
651 				icpu->target_freq = tunables->hispeed_freq;
652 				cpumask_set_cpu(i, &speedchange_cpumask);
653 				icpu->pol_hispeed_val_time = ktime_to_us(ktime_get());
654 				wakeup = true;
655 			}
656 			spin_unlock_irqrestore(&icpu->target_freq_lock, flags[1]);
657 
658 			up_read(&icpu->enable_sem);
659 		}
660 	}
661 
662 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
663 
664 	if (wakeup)
665 		wake_up_process(speedchange_task);
666 }
667 
cpufreq_interactive_notifier(struct notifier_block * nb,unsigned long val,void * data)668 static int cpufreq_interactive_notifier(struct notifier_block *nb,
669 					unsigned long val, void *data)
670 {
671 	struct cpufreq_freqs *freq = data;
672 	struct cpufreq_policy *policy = freq->policy;
673 	struct interactive_cpu *icpu;
674 	unsigned long flags;
675 	int cpu;
676 
677 	if (val != CPUFREQ_POSTCHANGE)
678 		return 0;
679 
680 	for_each_cpu(cpu, policy->cpus) {
681 		icpu = &per_cpu(interactive_cpu, cpu);
682 
683 		if (!down_read_trylock(&icpu->enable_sem))
684 			continue;
685 
686 		if (!icpu->ipolicy) {
687 			up_read(&icpu->enable_sem);
688 			continue;
689 		}
690 
691 		spin_lock_irqsave(&icpu->load_lock, flags);
692 		update_load(icpu, cpu);
693 		spin_unlock_irqrestore(&icpu->load_lock, flags);
694 
695 		up_read(&icpu->enable_sem);
696 	}
697 
698 	return 0;
699 }
700 
701 static struct notifier_block cpufreq_notifier_block = {
702 	.notifier_call = cpufreq_interactive_notifier,
703 };
704 
get_tokenized_data(const char * buf,int * num_tokens)705 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
706 {
707 	const char *cp = buf;
708 	int ntokens = 1, i = 0;
709 	unsigned int *tokenized_data;
710 	int err = -EINVAL;
711 
712 	while ((cp = strpbrk(cp + 1, " :")))
713 		ntokens++;
714 
715 	if (!(ntokens & 0x1))
716 		goto err;
717 
718 	tokenized_data = kcalloc(ntokens, sizeof(*tokenized_data), GFP_KERNEL);
719 	if (!tokenized_data) {
720 		err = -ENOMEM;
721 		goto err;
722 	}
723 
724 	cp = buf;
725 	while (i < ntokens) {
726 		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
727 			goto err_kfree;
728 
729 		cp = strpbrk(cp, " :");
730 		if (!cp)
731 			break;
732 		cp++;
733 	}
734 
735 	if (i != ntokens)
736 		goto err_kfree;
737 
738 	*num_tokens = ntokens;
739 	return tokenized_data;
740 
741 err_kfree:
742 	kfree(tokenized_data);
743 err:
744 	return ERR_PTR(err);
745 }
746 
747 /* Interactive governor sysfs interface */
to_tunables(struct gov_attr_set * attr_set)748 static struct interactive_tunables *to_tunables(struct gov_attr_set *attr_set)
749 {
750 	return container_of(attr_set, struct interactive_tunables, attr_set);
751 }
752 
753 #define show_one(file_name, type)					\
754 static ssize_t show_##file_name(struct gov_attr_set *attr_set, char *buf) \
755 {									\
756 	struct interactive_tunables *tunables = to_tunables(attr_set);	\
757 	return sprintf(buf, type "\n", tunables->file_name);		\
758 }
759 
show_target_loads(struct gov_attr_set * attr_set,char * buf)760 static ssize_t show_target_loads(struct gov_attr_set *attr_set, char *buf)
761 {
762 	struct interactive_tunables *tunables = to_tunables(attr_set);
763 	unsigned long flags;
764 	ssize_t ret = 0;
765 	int i;
766 
767 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
768 
769 	for (i = 0; i < tunables->ntarget_loads; i++)
770 		ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
771 			       i & 0x1 ? ":" : " ");
772 
773 	sprintf(buf + ret - 1, "\n");
774 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
775 
776 	return ret;
777 }
778 
store_target_loads(struct gov_attr_set * attr_set,const char * buf,size_t count)779 static ssize_t store_target_loads(struct gov_attr_set *attr_set,
780 				  const char *buf, size_t count)
781 {
782 	struct interactive_tunables *tunables = to_tunables(attr_set);
783 	unsigned int *new_target_loads;
784 	unsigned long flags;
785 	int ntokens;
786 
787 	new_target_loads = get_tokenized_data(buf, &ntokens);
788 	if (IS_ERR(new_target_loads))
789 		return PTR_ERR(new_target_loads);
790 
791 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
792 	if (tunables->target_loads != default_target_loads)
793 		kfree(tunables->target_loads);
794 	tunables->target_loads = new_target_loads;
795 	tunables->ntarget_loads = ntokens;
796 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
797 
798 	return count;
799 }
800 
show_above_hispeed_delay(struct gov_attr_set * attr_set,char * buf)801 static ssize_t show_above_hispeed_delay(struct gov_attr_set *attr_set,
802 					char *buf)
803 {
804 	struct interactive_tunables *tunables = to_tunables(attr_set);
805 	unsigned long flags;
806 	ssize_t ret = 0;
807 	int i;
808 
809 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
810 
811 	for (i = 0; i < tunables->nabove_hispeed_delay; i++)
812 		ret += sprintf(buf + ret, "%u%s",
813 			       tunables->above_hispeed_delay[i],
814 			       i & 0x1 ? ":" : " ");
815 
816 	sprintf(buf + ret - 1, "\n");
817 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
818 
819 	return ret;
820 }
821 
store_above_hispeed_delay(struct gov_attr_set * attr_set,const char * buf,size_t count)822 static ssize_t store_above_hispeed_delay(struct gov_attr_set *attr_set,
823 					 const char *buf, size_t count)
824 {
825 	struct interactive_tunables *tunables = to_tunables(attr_set);
826 	unsigned int *new_above_hispeed_delay = NULL;
827 	unsigned long flags;
828 	int ntokens;
829 
830 	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
831 	if (IS_ERR(new_above_hispeed_delay))
832 		return PTR_ERR(new_above_hispeed_delay);
833 
834 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
835 	if (tunables->above_hispeed_delay != default_above_hispeed_delay)
836 		kfree(tunables->above_hispeed_delay);
837 	tunables->above_hispeed_delay = new_above_hispeed_delay;
838 	tunables->nabove_hispeed_delay = ntokens;
839 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
840 
841 	return count;
842 }
843 
store_hispeed_freq(struct gov_attr_set * attr_set,const char * buf,size_t count)844 static ssize_t store_hispeed_freq(struct gov_attr_set *attr_set,
845 				  const char *buf, size_t count)
846 {
847 	struct interactive_tunables *tunables = to_tunables(attr_set);
848 	unsigned long int val;
849 	int ret;
850 
851 	ret = kstrtoul(buf, 0, &val);
852 	if (ret < 0)
853 		return ret;
854 
855 	tunables->hispeed_freq = val;
856 
857 	return count;
858 }
859 
store_go_hispeed_load(struct gov_attr_set * attr_set,const char * buf,size_t count)860 static ssize_t store_go_hispeed_load(struct gov_attr_set *attr_set,
861 				     const char *buf, size_t count)
862 {
863 	struct interactive_tunables *tunables = to_tunables(attr_set);
864 	unsigned long val;
865 	int ret;
866 
867 	ret = kstrtoul(buf, 0, &val);
868 	if (ret < 0)
869 		return ret;
870 
871 	tunables->go_hispeed_load = val;
872 
873 	return count;
874 }
875 
store_min_sample_time(struct gov_attr_set * attr_set,const char * buf,size_t count)876 static ssize_t store_min_sample_time(struct gov_attr_set *attr_set,
877 				     const char *buf, size_t count)
878 {
879 	struct interactive_tunables *tunables = to_tunables(attr_set);
880 	unsigned long val;
881 	int ret;
882 
883 	ret = kstrtoul(buf, 0, &val);
884 	if (ret < 0)
885 		return ret;
886 
887 	tunables->min_sample_time = val;
888 
889 	return count;
890 }
891 
show_timer_rate(struct gov_attr_set * attr_set,char * buf)892 static ssize_t show_timer_rate(struct gov_attr_set *attr_set, char *buf)
893 {
894 	struct interactive_tunables *tunables = to_tunables(attr_set);
895 
896 	return sprintf(buf, "%lu\n", tunables->sampling_rate);
897 }
898 
store_timer_rate(struct gov_attr_set * attr_set,const char * buf,size_t count)899 static ssize_t store_timer_rate(struct gov_attr_set *attr_set, const char *buf,
900 				size_t count)
901 {
902 	struct interactive_tunables *tunables = to_tunables(attr_set);
903 	unsigned long val, val_round;
904 	int ret;
905 
906 	ret = kstrtoul(buf, 0, &val);
907 	if (ret < 0)
908 		return ret;
909 
910 	val_round = jiffies_to_usecs(usecs_to_jiffies(val));
911 	if (val != val_round)
912 		pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
913 			val_round);
914 
915 	tunables->sampling_rate = val_round;
916 
917 	return count;
918 }
919 
store_timer_slack(struct gov_attr_set * attr_set,const char * buf,size_t count)920 static ssize_t store_timer_slack(struct gov_attr_set *attr_set, const char *buf,
921 				 size_t count)
922 {
923 	struct interactive_tunables *tunables = to_tunables(attr_set);
924 	unsigned long val;
925 	int ret;
926 
927 	ret = kstrtol(buf, 10, &val);
928 	if (ret < 0)
929 		return ret;
930 
931 	tunables->timer_slack = val;
932 	update_slack_delay(tunables);
933 
934 	return count;
935 }
936 
store_boost(struct gov_attr_set * attr_set,const char * buf,size_t count)937 static ssize_t store_boost(struct gov_attr_set *attr_set, const char *buf,
938 			   size_t count)
939 {
940 	struct interactive_tunables *tunables = to_tunables(attr_set);
941 	unsigned long val;
942 	int ret;
943 
944 	ret = kstrtoul(buf, 0, &val);
945 	if (ret < 0)
946 		return ret;
947 
948 	tunables->boost = val;
949 
950 	if (tunables->boost) {
951 		trace_cpufreq_interactive_boost("on");
952 		if (!tunables->boosted)
953 			cpufreq_interactive_boost(tunables);
954 	} else {
955 		tunables->boostpulse_endtime = ktime_to_us(ktime_get());
956 		trace_cpufreq_interactive_unboost("off");
957 	}
958 
959 	return count;
960 }
961 
store_boostpulse(struct gov_attr_set * attr_set,const char * buf,size_t count)962 static ssize_t store_boostpulse(struct gov_attr_set *attr_set, const char *buf,
963 				size_t count)
964 {
965 	struct interactive_tunables *tunables = to_tunables(attr_set);
966 	unsigned long val;
967 	int ret;
968 
969 	ret = kstrtoul(buf, 0, &val);
970 	if (ret < 0)
971 		return ret;
972 
973 	tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
974 					tunables->boostpulse_duration;
975 	trace_cpufreq_interactive_boost("pulse");
976 	if (!tunables->boosted)
977 		cpufreq_interactive_boost(tunables);
978 
979 	return count;
980 }
981 
store_boostpulse_duration(struct gov_attr_set * attr_set,const char * buf,size_t count)982 static ssize_t store_boostpulse_duration(struct gov_attr_set *attr_set,
983 					 const char *buf, size_t count)
984 {
985 	struct interactive_tunables *tunables = to_tunables(attr_set);
986 	unsigned long val;
987 	int ret;
988 
989 	ret = kstrtoul(buf, 0, &val);
990 	if (ret < 0)
991 		return ret;
992 
993 	tunables->boostpulse_duration = val;
994 
995 	return count;
996 }
997 
store_io_is_busy(struct gov_attr_set * attr_set,const char * buf,size_t count)998 static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
999 				size_t count)
1000 {
1001 	struct interactive_tunables *tunables = to_tunables(attr_set);
1002 	unsigned long val;
1003 	int ret;
1004 
1005 	ret = kstrtoul(buf, 0, &val);
1006 	if (ret < 0)
1007 		return ret;
1008 
1009 	tunables->io_is_busy = val;
1010 
1011 	return count;
1012 }
1013 
1014 show_one(hispeed_freq, "%u");
1015 show_one(go_hispeed_load, "%lu");
1016 show_one(min_sample_time, "%lu");
1017 show_one(timer_slack, "%lu");
1018 show_one(boost, "%u");
1019 show_one(boostpulse_duration, "%u");
1020 show_one(io_is_busy, "%u");
1021 
1022 gov_attr_rw(target_loads);
1023 gov_attr_rw(above_hispeed_delay);
1024 gov_attr_rw(hispeed_freq);
1025 gov_attr_rw(go_hispeed_load);
1026 gov_attr_rw(min_sample_time);
1027 gov_attr_rw(timer_rate);
1028 gov_attr_rw(timer_slack);
1029 gov_attr_rw(boost);
1030 gov_attr_wo(boostpulse);
1031 gov_attr_rw(boostpulse_duration);
1032 gov_attr_rw(io_is_busy);
1033 
1034 static struct attribute *interactive_attributes[] = {
1035 	&target_loads.attr,
1036 	&above_hispeed_delay.attr,
1037 	&hispeed_freq.attr,
1038 	&go_hispeed_load.attr,
1039 	&min_sample_time.attr,
1040 	&timer_rate.attr,
1041 	&timer_slack.attr,
1042 	&boost.attr,
1043 	&boostpulse.attr,
1044 	&boostpulse_duration.attr,
1045 	&io_is_busy.attr,
1046 	NULL
1047 };
1048 
1049 static struct kobj_type interactive_tunables_ktype = {
1050 	.default_attrs = interactive_attributes,
1051 	.sysfs_ops = &governor_sysfs_ops,
1052 };
1053 
cpufreq_interactive_idle_notifier(struct notifier_block * nb,unsigned long val,void * data)1054 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1055 					     unsigned long val, void *data)
1056 {
1057 	if (val == IDLE_END)
1058 		cpufreq_interactive_idle_end();
1059 
1060 	return 0;
1061 }
1062 
1063 static struct notifier_block cpufreq_interactive_idle_nb = {
1064 	.notifier_call = cpufreq_interactive_idle_notifier,
1065 };
1066 
1067 /* Interactive Governor callbacks */
1068 struct interactive_governor {
1069 	struct cpufreq_governor gov;
1070 	unsigned int usage_count;
1071 };
1072 
1073 static struct interactive_governor interactive_gov;
1074 
1075 #define CPU_FREQ_GOV_INTERACTIVE	(&interactive_gov.gov)
1076 
irq_work(struct irq_work * irq_work)1077 static void irq_work(struct irq_work *irq_work)
1078 {
1079 	struct interactive_cpu *icpu = container_of(irq_work, struct
1080 						    interactive_cpu, irq_work);
1081 
1082 	cpufreq_interactive_update(icpu);
1083 	icpu->work_in_progress = false;
1084 }
1085 
update_util_handler(struct update_util_data * data,u64 time,unsigned int flags)1086 static void update_util_handler(struct update_util_data *data, u64 time,
1087 				unsigned int flags)
1088 {
1089 	struct interactive_cpu *icpu = container_of(data,
1090 					struct interactive_cpu, update_util);
1091 	struct interactive_policy *ipolicy = icpu->ipolicy;
1092 	struct interactive_tunables *tunables = ipolicy->tunables;
1093 	u64 delta_ns;
1094 
1095 	/*
1096 	 * The irq-work may not be allowed to be queued up right now.
1097 	 * Possible reasons:
1098 	 * - Work has already been queued up or is in progress.
1099 	 * - It is too early (too little time from the previous sample).
1100 	 */
1101 	if (icpu->work_in_progress)
1102 		return;
1103 
1104 	delta_ns = time - icpu->last_sample_time;
1105 	if ((s64)delta_ns < tunables->sampling_rate * NSEC_PER_USEC)
1106 		return;
1107 
1108 	icpu->last_sample_time = time;
1109 	icpu->next_sample_jiffies = usecs_to_jiffies(tunables->sampling_rate) +
1110 				    jiffies;
1111 
1112 	icpu->work_in_progress = true;
1113 	irq_work_queue_on(&icpu->irq_work, icpu->cpu);
1114 }
1115 
gov_set_update_util(struct interactive_policy * ipolicy)1116 static void gov_set_update_util(struct interactive_policy *ipolicy)
1117 {
1118 	struct cpufreq_policy *policy = ipolicy->policy;
1119 	struct interactive_cpu *icpu;
1120 	int cpu;
1121 
1122 	for_each_cpu(cpu, policy->cpus) {
1123 		icpu = &per_cpu(interactive_cpu, cpu);
1124 
1125 		icpu->last_sample_time = 0;
1126 		icpu->next_sample_jiffies = 0;
1127 		cpufreq_add_update_util_hook(cpu, &icpu->update_util,
1128 					     update_util_handler);
1129 	}
1130 }
1131 
gov_clear_update_util(struct cpufreq_policy * policy)1132 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
1133 {
1134 	int i;
1135 
1136 	for_each_cpu(i, policy->cpus)
1137 		cpufreq_remove_update_util_hook(i);
1138 
1139 	synchronize_rcu();
1140 }
1141 
icpu_cancel_work(struct interactive_cpu * icpu)1142 static void icpu_cancel_work(struct interactive_cpu *icpu)
1143 {
1144 	irq_work_sync(&icpu->irq_work);
1145 	icpu->work_in_progress = false;
1146 	del_timer_sync(&icpu->slack_timer);
1147 }
1148 
1149 static struct interactive_policy *
interactive_policy_alloc(struct cpufreq_policy * policy)1150 interactive_policy_alloc(struct cpufreq_policy *policy)
1151 {
1152 	struct interactive_policy *ipolicy;
1153 
1154 	ipolicy = kzalloc(sizeof(*ipolicy), GFP_KERNEL);
1155 	if (!ipolicy)
1156 		return NULL;
1157 
1158 	ipolicy->policy = policy;
1159 
1160 	return ipolicy;
1161 }
1162 
interactive_policy_free(struct interactive_policy * ipolicy)1163 static void interactive_policy_free(struct interactive_policy *ipolicy)
1164 {
1165 	kfree(ipolicy);
1166 }
1167 
1168 static struct interactive_tunables *
interactive_tunables_alloc(struct interactive_policy * ipolicy)1169 interactive_tunables_alloc(struct interactive_policy *ipolicy)
1170 {
1171 	struct interactive_tunables *tunables;
1172 
1173 	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1174 	if (!tunables)
1175 		return NULL;
1176 
1177 	gov_attr_set_init(&tunables->attr_set, &ipolicy->tunables_hook);
1178 	if (!have_governor_per_policy())
1179 		global_tunables = tunables;
1180 
1181 	ipolicy->tunables = tunables;
1182 
1183 	return tunables;
1184 }
1185 
interactive_tunables_free(struct interactive_tunables * tunables)1186 static void interactive_tunables_free(struct interactive_tunables *tunables)
1187 {
1188 	if (!have_governor_per_policy())
1189 		global_tunables = NULL;
1190 
1191 	kfree(tunables);
1192 }
1193 
1194 #ifdef CONFIG_ARCH_ROCKCHIP
cpufreq_interactive_input_event(struct input_handle * handle,unsigned int type,unsigned int code,int value)1195 static void cpufreq_interactive_input_event(struct input_handle *handle,
1196 					    unsigned int type,
1197 					    unsigned int code,
1198 					    int value)
1199 {
1200 	u64 now, endtime;
1201 	int i;
1202 	int anyboost = 0;
1203 	unsigned long flags[2];
1204 	struct interactive_cpu *pcpu;
1205 	struct interactive_tunables *tunables;
1206 
1207 	if (type != EV_ABS && type != EV_KEY && type != EV_REL)
1208 		return;
1209 
1210 	trace_cpufreq_interactive_boost("touch");
1211 	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
1212 
1213 	now = ktime_to_us(ktime_get());
1214 	for_each_online_cpu(i) {
1215 		pcpu = &per_cpu(interactive_cpu, i);
1216 		if (!down_read_trylock(&pcpu->enable_sem))
1217 			continue;
1218 
1219 		if (!pcpu->ipolicy) {
1220 			up_read(&pcpu->enable_sem);
1221 			continue;
1222 		}
1223 
1224 		tunables = pcpu->ipolicy->tunables;
1225 		if (!tunables) {
1226 			up_read(&pcpu->enable_sem);
1227 			continue;
1228 		}
1229 
1230 		endtime = now + tunables->touchboostpulse_duration_val;
1231 		if (endtime < (tunables->touchboostpulse_endtime +
1232 			       10 * USEC_PER_MSEC)) {
1233 			up_read(&pcpu->enable_sem);
1234 			continue;
1235 		}
1236 		tunables->touchboostpulse_endtime = endtime;
1237 
1238 		spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
1239 		if (pcpu->target_freq < tunables->touchboost_freq) {
1240 			pcpu->target_freq = tunables->touchboost_freq;
1241 			cpumask_set_cpu(i, &speedchange_cpumask);
1242 			pcpu->loc_hispeed_val_time =
1243 					ktime_to_us(ktime_get());
1244 			anyboost = 1;
1245 		}
1246 
1247 		pcpu->floor_freq = tunables->touchboost_freq;
1248 		pcpu->loc_floor_val_time = ktime_to_us(ktime_get());
1249 
1250 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
1251 
1252 		up_read(&pcpu->enable_sem);
1253 	}
1254 
1255 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
1256 
1257 	if (anyboost)
1258 		wake_up_process(speedchange_task);
1259 }
1260 
cpufreq_interactive_input_connect(struct input_handler * handler,struct input_dev * dev,const struct input_device_id * id)1261 static int cpufreq_interactive_input_connect(struct input_handler *handler,
1262 					     struct input_dev *dev,
1263 					     const struct input_device_id *id)
1264 {
1265 	struct input_handle *handle;
1266 	int error;
1267 
1268 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1269 	if (!handle)
1270 		return -ENOMEM;
1271 
1272 	handle->dev = dev;
1273 	handle->handler = handler;
1274 	handle->name = "cpufreq";
1275 
1276 	error = input_register_handle(handle);
1277 	if (error)
1278 		goto err2;
1279 
1280 	error = input_open_device(handle);
1281 	if (error)
1282 		goto err1;
1283 
1284 	return 0;
1285 err1:
1286 	input_unregister_handle(handle);
1287 err2:
1288 	kfree(handle);
1289 	return error;
1290 }
1291 
cpufreq_interactive_input_disconnect(struct input_handle * handle)1292 static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
1293 {
1294 	input_close_device(handle);
1295 	input_unregister_handle(handle);
1296 	kfree(handle);
1297 }
1298 
1299 static const struct input_device_id cpufreq_interactive_ids[] = {
1300 	{
1301 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
1302 			INPUT_DEVICE_ID_MATCH_ABSBIT,
1303 		.evbit = { BIT_MASK(EV_ABS) },
1304 		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
1305 			BIT_MASK(ABS_MT_POSITION_X) |
1306 			BIT_MASK(ABS_MT_POSITION_Y) },
1307 	},
1308 	{
1309 		.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
1310 			INPUT_DEVICE_ID_MATCH_ABSBIT,
1311 		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
1312 		.absbit = { [BIT_WORD(ABS_X)] =
1313 			BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
1314 	},
1315 	{
1316 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
1317 		.evbit = { BIT_MASK(EV_KEY) },
1318 	},
1319 	{/* A mouse like device, at least one button,two relative axes */
1320 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
1321 				INPUT_DEVICE_ID_MATCH_KEYBIT |
1322 				INPUT_DEVICE_ID_MATCH_RELBIT,
1323 		.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
1324 		.keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
1325 		.relbit = { BIT_MASK(REL_X) | BIT_MASK(REL_Y) },
1326 	},
1327 	{/* A separate scrollwheel */
1328 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
1329 				INPUT_DEVICE_ID_MATCH_RELBIT,
1330 		.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
1331 		.relbit = { BIT_MASK(REL_WHEEL) },
1332 	},
1333 	{ },
1334 };
1335 
1336 static struct input_handler cpufreq_interactive_input_handler = {
1337 	.event		= cpufreq_interactive_input_event,
1338 	.connect	= cpufreq_interactive_input_connect,
1339 	.disconnect	= cpufreq_interactive_input_disconnect,
1340 	.name		= "cpufreq_interactive",
1341 	.id_table	= cpufreq_interactive_ids,
1342 };
1343 
rockchip_cpufreq_policy_init(struct interactive_policy * ipolicy)1344 static void rockchip_cpufreq_policy_init(struct interactive_policy *ipolicy)
1345 {
1346 	struct interactive_tunables *tunables = ipolicy->tunables;
1347 	struct gov_attr_set attr_set;
1348 	int index;
1349 
1350 	tunables->min_sample_time = 40 * USEC_PER_MSEC;
1351 	tunables->boostpulse_duration = 40 * USEC_PER_MSEC;
1352 	if (ipolicy->policy->cpu == 0) {
1353 		tunables->hispeed_freq = 1008000;
1354 		tunables->touchboostpulse_duration_val = 500 * USEC_PER_MSEC;
1355 		tunables->touchboost_freq = 1200000;
1356 	} else {
1357 		tunables->hispeed_freq = 816000;
1358 	}
1359 
1360 	index = (ipolicy->policy->cpu == 0) ? 0 : 1;
1361 	if (!backup_tunables[index].sampling_rate) {
1362 		backup_tunables[index] = *tunables;
1363 	} else {
1364 		attr_set = tunables->attr_set;
1365 		*tunables = backup_tunables[index];
1366 		tunables->attr_set = attr_set;
1367 	}
1368 }
1369 #endif
1370 
cpufreq_interactive_init(struct cpufreq_policy * policy)1371 int cpufreq_interactive_init(struct cpufreq_policy *policy)
1372 {
1373 	struct interactive_policy *ipolicy;
1374 	struct interactive_tunables *tunables;
1375 	int ret;
1376 
1377 	/* State should be equivalent to EXIT */
1378 	if (policy->governor_data)
1379 		return -EBUSY;
1380 
1381 	ipolicy = interactive_policy_alloc(policy);
1382 	if (!ipolicy)
1383 		return -ENOMEM;
1384 
1385 	mutex_lock(&global_tunables_lock);
1386 
1387 	if (global_tunables) {
1388 		if (WARN_ON(have_governor_per_policy())) {
1389 			ret = -EINVAL;
1390 			goto free_int_policy;
1391 		}
1392 
1393 		policy->governor_data = ipolicy;
1394 		ipolicy->tunables = global_tunables;
1395 
1396 		gov_attr_set_get(&global_tunables->attr_set,
1397 				 &ipolicy->tunables_hook);
1398 		goto out;
1399 	}
1400 
1401 	tunables = interactive_tunables_alloc(ipolicy);
1402 	if (!tunables) {
1403 		ret = -ENOMEM;
1404 		goto free_int_policy;
1405 	}
1406 
1407 	tunables->hispeed_freq = policy->max;
1408 	tunables->above_hispeed_delay = default_above_hispeed_delay;
1409 	tunables->nabove_hispeed_delay =
1410 		ARRAY_SIZE(default_above_hispeed_delay);
1411 	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1412 	tunables->target_loads = default_target_loads;
1413 	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1414 	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1415 	tunables->boostpulse_duration = DEFAULT_MIN_SAMPLE_TIME;
1416 	tunables->sampling_rate = DEFAULT_SAMPLING_RATE;
1417 	tunables->timer_slack = DEFAULT_TIMER_SLACK;
1418 	update_slack_delay(tunables);
1419 
1420 	spin_lock_init(&tunables->target_loads_lock);
1421 	spin_lock_init(&tunables->above_hispeed_delay_lock);
1422 
1423 	policy->governor_data = ipolicy;
1424 
1425 #ifdef CONFIG_ARCH_ROCKCHIP
1426 	rockchip_cpufreq_policy_init(ipolicy);
1427 #endif
1428 	ret = kobject_init_and_add(&tunables->attr_set.kobj,
1429 				   &interactive_tunables_ktype,
1430 				   get_governor_parent_kobj(policy), "%s",
1431 				   interactive_gov.gov.name);
1432 	if (ret)
1433 		goto fail;
1434 
1435 	/* One time initialization for governor */
1436 	if (!interactive_gov.usage_count++) {
1437 		idle_notifier_register(&cpufreq_interactive_idle_nb);
1438 		cpufreq_register_notifier(&cpufreq_notifier_block,
1439 					  CPUFREQ_TRANSITION_NOTIFIER);
1440 #ifdef CONFIG_ARCH_ROCKCHIP
1441 		ret = input_register_handler(&cpufreq_interactive_input_handler);
1442 #endif
1443 	}
1444 
1445  out:
1446 	mutex_unlock(&global_tunables_lock);
1447 	return 0;
1448 
1449  fail:
1450 	policy->governor_data = NULL;
1451 	interactive_tunables_free(tunables);
1452 
1453  free_int_policy:
1454 	mutex_unlock(&global_tunables_lock);
1455 
1456 	interactive_policy_free(ipolicy);
1457 	pr_err("governor initialization failed (%d)\n", ret);
1458 
1459 	return ret;
1460 }
1461 
cpufreq_interactive_exit(struct cpufreq_policy * policy)1462 void cpufreq_interactive_exit(struct cpufreq_policy *policy)
1463 {
1464 	struct interactive_policy *ipolicy = policy->governor_data;
1465 	struct interactive_tunables *tunables = ipolicy->tunables;
1466 	unsigned int count;
1467 
1468 	mutex_lock(&global_tunables_lock);
1469 
1470 	/* Last policy using the governor ? */
1471 	if (!--interactive_gov.usage_count) {
1472 		cpufreq_unregister_notifier(&cpufreq_notifier_block,
1473 					    CPUFREQ_TRANSITION_NOTIFIER);
1474 		idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1475 #ifdef CONFIG_ARCH_ROCKCHIP
1476 		input_unregister_handler(&cpufreq_interactive_input_handler);
1477 #endif
1478 	}
1479 
1480 	count = gov_attr_set_put(&tunables->attr_set, &ipolicy->tunables_hook);
1481 	policy->governor_data = NULL;
1482 	if (!count) {
1483 #ifdef CONFIG_ARCH_ROCKCHIP
1484 		if (policy->cpu == 0)
1485 			backup_tunables[0] = *tunables;
1486 		else
1487 			backup_tunables[1] = *tunables;
1488 #endif
1489 		interactive_tunables_free(tunables);
1490 	}
1491 
1492 	mutex_unlock(&global_tunables_lock);
1493 
1494 	interactive_policy_free(ipolicy);
1495 }
1496 
cpufreq_interactive_start(struct cpufreq_policy * policy)1497 int cpufreq_interactive_start(struct cpufreq_policy *policy)
1498 {
1499 	struct interactive_policy *ipolicy = policy->governor_data;
1500 	struct interactive_cpu *icpu;
1501 	unsigned int cpu;
1502 
1503 	for_each_cpu(cpu, policy->cpus) {
1504 		icpu = &per_cpu(interactive_cpu, cpu);
1505 
1506 		icpu->target_freq = policy->cur;
1507 		icpu->floor_freq = icpu->target_freq;
1508 		icpu->pol_floor_val_time = ktime_to_us(ktime_get());
1509 		icpu->loc_floor_val_time = icpu->pol_floor_val_time;
1510 		icpu->pol_hispeed_val_time = icpu->pol_floor_val_time;
1511 		icpu->loc_hispeed_val_time = icpu->pol_floor_val_time;
1512 		icpu->cpu = cpu;
1513 
1514 		down_write(&icpu->enable_sem);
1515 		icpu->ipolicy = ipolicy;
1516 		slack_timer_resched(icpu, cpu, false);
1517 		up_write(&icpu->enable_sem);
1518 	}
1519 
1520 	gov_set_update_util(ipolicy);
1521 	return 0;
1522 }
1523 
cpufreq_interactive_stop(struct cpufreq_policy * policy)1524 void cpufreq_interactive_stop(struct cpufreq_policy *policy)
1525 {
1526 	struct interactive_policy *ipolicy = policy->governor_data;
1527 	struct interactive_cpu *icpu;
1528 	unsigned int cpu;
1529 
1530 	gov_clear_update_util(ipolicy->policy);
1531 
1532 	for_each_cpu(cpu, policy->cpus) {
1533 		icpu = &per_cpu(interactive_cpu, cpu);
1534 
1535 		down_write(&icpu->enable_sem);
1536 		icpu_cancel_work(icpu);
1537 		icpu->ipolicy = NULL;
1538 		up_write(&icpu->enable_sem);
1539 	}
1540 }
1541 
cpufreq_interactive_limits(struct cpufreq_policy * policy)1542 void cpufreq_interactive_limits(struct cpufreq_policy *policy)
1543 {
1544 	struct interactive_cpu *icpu;
1545 	unsigned int cpu;
1546 	unsigned long flags;
1547 
1548 	cpufreq_policy_apply_limits(policy);
1549 
1550 	for_each_cpu(cpu, policy->cpus) {
1551 		icpu = &per_cpu(interactive_cpu, cpu);
1552 
1553 		spin_lock_irqsave(&icpu->target_freq_lock, flags);
1554 
1555 		if (policy->max < icpu->target_freq)
1556 			icpu->target_freq = policy->max;
1557 		else if (policy->min > icpu->target_freq)
1558 			icpu->target_freq = policy->min;
1559 
1560 		spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
1561 	}
1562 }
1563 
1564 static struct interactive_governor interactive_gov = {
1565 	.gov = {
1566 		.name			= "interactive",
1567 		.owner			= THIS_MODULE,
1568 		.init			= cpufreq_interactive_init,
1569 		.exit			= cpufreq_interactive_exit,
1570 		.start			= cpufreq_interactive_start,
1571 		.stop			= cpufreq_interactive_stop,
1572 		.limits			= cpufreq_interactive_limits,
1573 	}
1574 };
1575 
cpufreq_interactive_nop_timer(struct timer_list * t)1576 static void cpufreq_interactive_nop_timer(struct timer_list *t)
1577 {
1578 	/*
1579 	 * The purpose of slack-timer is to wake up the CPU from IDLE, in order
1580 	 * to decrease its frequency if it is not set to minimum already.
1581 	 *
1582 	 * This is important for platforms where CPU with higher frequencies
1583 	 * consume higher power even at IDLE.
1584 	 */
1585 }
1586 
cpufreq_interactive_gov_init(void)1587 static int __init cpufreq_interactive_gov_init(void)
1588 {
1589 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1590 	struct interactive_cpu *icpu;
1591 	unsigned int cpu;
1592 
1593 	for_each_possible_cpu(cpu) {
1594 		icpu = &per_cpu(interactive_cpu, cpu);
1595 
1596 		init_irq_work(&icpu->irq_work, irq_work);
1597 		spin_lock_init(&icpu->load_lock);
1598 		spin_lock_init(&icpu->target_freq_lock);
1599 		init_rwsem(&icpu->enable_sem);
1600 
1601 		/* Initialize per-cpu slack-timer */
1602 		timer_setup(&icpu->slack_timer, cpufreq_interactive_nop_timer,
1603 			    TIMER_PINNED);
1604 	}
1605 
1606 	spin_lock_init(&speedchange_cpumask_lock);
1607 	speedchange_task = kthread_create(cpufreq_interactive_speedchange_task,
1608 					  NULL, "cfinteractive");
1609 	if (IS_ERR(speedchange_task))
1610 		return PTR_ERR(speedchange_task);
1611 
1612 	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1613 	get_task_struct(speedchange_task);
1614 
1615 	/* wake up so the thread does not look hung to the freezer */
1616 	wake_up_process(speedchange_task);
1617 
1618 	return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
1619 }
1620 
1621 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
cpufreq_default_governor(void)1622 struct cpufreq_governor *cpufreq_default_governor(void)
1623 {
1624 	return CPU_FREQ_GOV_INTERACTIVE;
1625 }
1626 
1627 fs_initcall(cpufreq_interactive_gov_init);
1628 #else
1629 module_init(cpufreq_interactive_gov_init);
1630 #endif
1631 
cpufreq_interactive_gov_exit(void)1632 static void __exit cpufreq_interactive_gov_exit(void)
1633 {
1634 	cpufreq_unregister_governor(CPU_FREQ_GOV_INTERACTIVE);
1635 	kthread_stop(speedchange_task);
1636 	put_task_struct(speedchange_task);
1637 }
1638 module_exit(cpufreq_interactive_gov_exit);
1639 
1640 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1641 MODULE_DESCRIPTION("'cpufreq_interactive' - A dynamic cpufreq governor for Latency sensitive workloads");
1642 MODULE_LICENSE("GPL");
1643