Lines Matching refs:icpu
186 static bool timer_slack_required(struct interactive_cpu *icpu) in timer_slack_required() argument
188 struct interactive_policy *ipolicy = icpu->ipolicy; in timer_slack_required()
194 if (icpu->target_freq > ipolicy->policy->min) in timer_slack_required()
200 static void gov_slack_timer_start(struct interactive_cpu *icpu, int cpu) in gov_slack_timer_start() argument
202 struct interactive_tunables *tunables = icpu->ipolicy->tunables; in gov_slack_timer_start()
204 icpu->slack_timer.expires = jiffies + tunables->timer_slack_delay; in gov_slack_timer_start()
205 add_timer_on(&icpu->slack_timer, cpu); in gov_slack_timer_start()
208 static void gov_slack_timer_modify(struct interactive_cpu *icpu) in gov_slack_timer_modify() argument
210 struct interactive_tunables *tunables = icpu->ipolicy->tunables; in gov_slack_timer_modify()
212 mod_timer(&icpu->slack_timer, jiffies + tunables->timer_slack_delay); in gov_slack_timer_modify()
215 static void slack_timer_resched(struct interactive_cpu *icpu, int cpu, in slack_timer_resched() argument
218 struct interactive_tunables *tunables = icpu->ipolicy->tunables; in slack_timer_resched()
221 spin_lock_irqsave(&icpu->load_lock, flags); in slack_timer_resched()
223 icpu->time_in_idle = get_cpu_idle_time(cpu, in slack_timer_resched()
224 &icpu->time_in_idle_timestamp, in slack_timer_resched()
226 icpu->cputime_speedadj = 0; in slack_timer_resched()
227 icpu->cputime_speedadj_timestamp = icpu->time_in_idle_timestamp; in slack_timer_resched()
229 if (timer_slack_required(icpu)) { in slack_timer_resched()
231 gov_slack_timer_modify(icpu); in slack_timer_resched()
233 gov_slack_timer_start(icpu, cpu); in slack_timer_resched()
236 spin_unlock_irqrestore(&icpu->load_lock, flags); in slack_timer_resched()
282 static unsigned int choose_freq(struct interactive_cpu *icpu, in choose_freq() argument
285 struct cpufreq_policy *policy = icpu->ipolicy->policy; in choose_freq()
293 tl = freq_to_targetload(icpu->ipolicy->tunables, freq); in choose_freq()
355 static u64 update_load(struct interactive_cpu *icpu, int cpu) in update_load() argument
357 struct interactive_tunables *tunables = icpu->ipolicy->tunables; in update_load()
361 delta_idle = (now_idle - icpu->time_in_idle); in update_load()
362 delta_time = (now - icpu->time_in_idle_timestamp); in update_load()
369 icpu->cputime_speedadj += active_time * icpu->ipolicy->policy->cur; in update_load()
371 icpu->time_in_idle = now_idle; in update_load()
372 icpu->time_in_idle_timestamp = now; in update_load()
378 static void eval_target_freq(struct interactive_cpu *icpu) in eval_target_freq() argument
380 struct interactive_tunables *tunables = icpu->ipolicy->tunables; in eval_target_freq()
381 struct cpufreq_policy *policy = icpu->ipolicy->policy; in eval_target_freq()
389 spin_lock_irqsave(&icpu->load_lock, flags); in eval_target_freq()
390 now = update_load(icpu, smp_processor_id()); in eval_target_freq()
391 delta_time = (unsigned int)(now - icpu->cputime_speedadj_timestamp); in eval_target_freq()
392 cputime_speedadj = icpu->cputime_speedadj; in eval_target_freq()
393 spin_unlock_irqrestore(&icpu->load_lock, flags); in eval_target_freq()
398 spin_lock_irqsave(&icpu->target_freq_lock, flags); in eval_target_freq()
409 new_freq = choose_freq(icpu, loadadjfreq); in eval_target_freq()
415 new_freq = choose_freq(icpu, loadadjfreq); in eval_target_freq()
429 now - icpu->pol_hispeed_val_time < freq_to_above_hispeed_delay(tunables, policy->cur)) { in eval_target_freq()
431 icpu->target_freq, policy->cur, new_freq); in eval_target_freq()
435 icpu->loc_hispeed_val_time = now; in eval_target_freq()
445 max_fvtime = max(icpu->pol_floor_val_time, icpu->loc_floor_val_time); in eval_target_freq()
446 if (new_freq < icpu->floor_freq && icpu->target_freq >= policy->cur) { in eval_target_freq()
449 icpu->target_freq, policy->cur, new_freq); in eval_target_freq()
463 icpu->floor_freq = new_freq; in eval_target_freq()
464 if (icpu->target_freq >= policy->cur || new_freq >= policy->cur) in eval_target_freq()
465 icpu->loc_floor_val_time = now; in eval_target_freq()
468 if (icpu->target_freq == new_freq && in eval_target_freq()
469 icpu->target_freq <= policy->cur) { in eval_target_freq()
471 icpu->target_freq, policy->cur, new_freq); in eval_target_freq()
475 trace_cpufreq_interactive_target(cpu, cpu_load, icpu->target_freq, in eval_target_freq()
478 icpu->target_freq = new_freq; in eval_target_freq()
479 spin_unlock_irqrestore(&icpu->target_freq_lock, flags); in eval_target_freq()
489 spin_unlock_irqrestore(&icpu->target_freq_lock, flags); in eval_target_freq()
492 static void cpufreq_interactive_update(struct interactive_cpu *icpu) in cpufreq_interactive_update() argument
494 eval_target_freq(icpu); in cpufreq_interactive_update()
495 slack_timer_resched(icpu, smp_processor_id(), true); in cpufreq_interactive_update()
500 struct interactive_cpu *icpu = &per_cpu(interactive_cpu, in cpufreq_interactive_idle_end() local
504 if (!down_read_trylock(&icpu->enable_sem)) in cpufreq_interactive_idle_end()
507 if (icpu->ipolicy) { in cpufreq_interactive_idle_end()
512 if (time_after_eq(jiffies, icpu->next_sample_jiffies)) { in cpufreq_interactive_idle_end()
513 sampling_rate = icpu->ipolicy->tunables->sampling_rate; in cpufreq_interactive_idle_end()
514 icpu->last_sample_time = local_clock(); in cpufreq_interactive_idle_end()
515 icpu->next_sample_jiffies = usecs_to_jiffies(sampling_rate) + jiffies; in cpufreq_interactive_idle_end()
516 cpufreq_interactive_update(icpu); in cpufreq_interactive_idle_end()
520 up_read(&icpu->enable_sem); in cpufreq_interactive_idle_end()
527 struct interactive_cpu *icpu; in cpufreq_interactive_get_policy_info() local
532 icpu = &per_cpu(interactive_cpu, i); in cpufreq_interactive_get_policy_info()
534 fvt = max(fvt, icpu->loc_floor_val_time); in cpufreq_interactive_get_policy_info()
535 if (icpu->target_freq > max_freq) { in cpufreq_interactive_get_policy_info()
536 max_freq = icpu->target_freq; in cpufreq_interactive_get_policy_info()
537 hvt = icpu->loc_hispeed_val_time; in cpufreq_interactive_get_policy_info()
538 } else if (icpu->target_freq == max_freq) { in cpufreq_interactive_get_policy_info()
539 hvt = min(hvt, icpu->loc_hispeed_val_time); in cpufreq_interactive_get_policy_info()
551 struct interactive_cpu *icpu; in cpufreq_interactive_adjust_cpu() local
559 icpu = &per_cpu(interactive_cpu, i); in cpufreq_interactive_adjust_cpu()
560 icpu->pol_floor_val_time = fvt; in cpufreq_interactive_adjust_cpu()
566 icpu = &per_cpu(interactive_cpu, i); in cpufreq_interactive_adjust_cpu()
567 icpu->pol_hispeed_val_time = hvt; in cpufreq_interactive_adjust_cpu()
600 struct interactive_cpu *icpu = &per_cpu(interactive_cpu, cpu); in cpufreq_interactive_speedchange_task() local
609 if (likely(down_read_trylock(&icpu->enable_sem))) { in cpufreq_interactive_speedchange_task()
610 if (likely(icpu->ipolicy)) in cpufreq_interactive_speedchange_task()
612 up_read(&icpu->enable_sem); in cpufreq_interactive_speedchange_task()
626 struct interactive_cpu *icpu; in cpufreq_interactive_boost() local
639 icpu = &per_cpu(interactive_cpu, i); in cpufreq_interactive_boost()
641 if (!down_read_trylock(&icpu->enable_sem)) in cpufreq_interactive_boost()
644 if (!icpu->ipolicy) { in cpufreq_interactive_boost()
645 up_read(&icpu->enable_sem); in cpufreq_interactive_boost()
649 spin_lock_irqsave(&icpu->target_freq_lock, flags[1]); in cpufreq_interactive_boost()
650 if (icpu->target_freq < tunables->hispeed_freq) { in cpufreq_interactive_boost()
651 icpu->target_freq = tunables->hispeed_freq; in cpufreq_interactive_boost()
653 icpu->pol_hispeed_val_time = ktime_to_us(ktime_get()); in cpufreq_interactive_boost()
656 spin_unlock_irqrestore(&icpu->target_freq_lock, flags[1]); in cpufreq_interactive_boost()
658 up_read(&icpu->enable_sem); in cpufreq_interactive_boost()
673 struct interactive_cpu *icpu; in cpufreq_interactive_notifier() local
681 icpu = &per_cpu(interactive_cpu, cpu); in cpufreq_interactive_notifier()
683 if (!down_read_trylock(&icpu->enable_sem)) in cpufreq_interactive_notifier()
686 if (!icpu->ipolicy) { in cpufreq_interactive_notifier()
687 up_read(&icpu->enable_sem); in cpufreq_interactive_notifier()
691 spin_lock_irqsave(&icpu->load_lock, flags); in cpufreq_interactive_notifier()
692 update_load(icpu, cpu); in cpufreq_interactive_notifier()
693 spin_unlock_irqrestore(&icpu->load_lock, flags); in cpufreq_interactive_notifier()
695 up_read(&icpu->enable_sem); in cpufreq_interactive_notifier()
1079 struct interactive_cpu *icpu = container_of(irq_work, struct in irq_work() local
1082 cpufreq_interactive_update(icpu); in irq_work()
1083 icpu->work_in_progress = false; in irq_work()
1089 struct interactive_cpu *icpu = container_of(data, in update_util_handler() local
1091 struct interactive_policy *ipolicy = icpu->ipolicy; in update_util_handler()
1101 if (icpu->work_in_progress) in update_util_handler()
1104 delta_ns = time - icpu->last_sample_time; in update_util_handler()
1108 icpu->last_sample_time = time; in update_util_handler()
1109 icpu->next_sample_jiffies = usecs_to_jiffies(tunables->sampling_rate) + in update_util_handler()
1112 icpu->work_in_progress = true; in update_util_handler()
1113 irq_work_queue_on(&icpu->irq_work, icpu->cpu); in update_util_handler()
1119 struct interactive_cpu *icpu; in gov_set_update_util() local
1123 icpu = &per_cpu(interactive_cpu, cpu); in gov_set_update_util()
1125 icpu->last_sample_time = 0; in gov_set_update_util()
1126 icpu->next_sample_jiffies = 0; in gov_set_update_util()
1127 cpufreq_add_update_util_hook(cpu, &icpu->update_util, in gov_set_update_util()
1142 static void icpu_cancel_work(struct interactive_cpu *icpu) in icpu_cancel_work() argument
1144 irq_work_sync(&icpu->irq_work); in icpu_cancel_work()
1145 icpu->work_in_progress = false; in icpu_cancel_work()
1146 del_timer_sync(&icpu->slack_timer); in icpu_cancel_work()
1500 struct interactive_cpu *icpu; in cpufreq_interactive_start() local
1504 icpu = &per_cpu(interactive_cpu, cpu); in cpufreq_interactive_start()
1506 icpu->target_freq = policy->cur; in cpufreq_interactive_start()
1507 icpu->floor_freq = icpu->target_freq; in cpufreq_interactive_start()
1508 icpu->pol_floor_val_time = ktime_to_us(ktime_get()); in cpufreq_interactive_start()
1509 icpu->loc_floor_val_time = icpu->pol_floor_val_time; in cpufreq_interactive_start()
1510 icpu->pol_hispeed_val_time = icpu->pol_floor_val_time; in cpufreq_interactive_start()
1511 icpu->loc_hispeed_val_time = icpu->pol_floor_val_time; in cpufreq_interactive_start()
1512 icpu->cpu = cpu; in cpufreq_interactive_start()
1514 down_write(&icpu->enable_sem); in cpufreq_interactive_start()
1515 icpu->ipolicy = ipolicy; in cpufreq_interactive_start()
1516 slack_timer_resched(icpu, cpu, false); in cpufreq_interactive_start()
1517 up_write(&icpu->enable_sem); in cpufreq_interactive_start()
1527 struct interactive_cpu *icpu; in cpufreq_interactive_stop() local
1533 icpu = &per_cpu(interactive_cpu, cpu); in cpufreq_interactive_stop()
1535 down_write(&icpu->enable_sem); in cpufreq_interactive_stop()
1536 icpu_cancel_work(icpu); in cpufreq_interactive_stop()
1537 icpu->ipolicy = NULL; in cpufreq_interactive_stop()
1538 up_write(&icpu->enable_sem); in cpufreq_interactive_stop()
1544 struct interactive_cpu *icpu; in cpufreq_interactive_limits() local
1551 icpu = &per_cpu(interactive_cpu, cpu); in cpufreq_interactive_limits()
1553 spin_lock_irqsave(&icpu->target_freq_lock, flags); in cpufreq_interactive_limits()
1555 if (policy->max < icpu->target_freq) in cpufreq_interactive_limits()
1556 icpu->target_freq = policy->max; in cpufreq_interactive_limits()
1557 else if (policy->min > icpu->target_freq) in cpufreq_interactive_limits()
1558 icpu->target_freq = policy->min; in cpufreq_interactive_limits()
1560 spin_unlock_irqrestore(&icpu->target_freq_lock, flags); in cpufreq_interactive_limits()
1590 struct interactive_cpu *icpu; in cpufreq_interactive_gov_init() local
1594 icpu = &per_cpu(interactive_cpu, cpu); in cpufreq_interactive_gov_init()
1596 init_irq_work(&icpu->irq_work, irq_work); in cpufreq_interactive_gov_init()
1597 spin_lock_init(&icpu->load_lock); in cpufreq_interactive_gov_init()
1598 spin_lock_init(&icpu->target_freq_lock); in cpufreq_interactive_gov_init()
1599 init_rwsem(&icpu->enable_sem); in cpufreq_interactive_gov_init()
1602 timer_setup(&icpu->slack_timer, cpufreq_interactive_nop_timer, in cpufreq_interactive_gov_init()