Searched refs:SCHED_CAPACITY_SHIFT (Results 1 – 13 of 13) sorted by relevance
150 >> (SCHED_CAPACITY_SHIFT+1); in parse_dt_topology()153 >> (SCHED_CAPACITY_SHIFT-1)) + 1; in parse_dt_topology()
140 ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT); in validate_cpu_freq_invariance_counters()262 scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT, in topology_scale_freq_tick()
223 sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum()225 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum()
123 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX; in update_idle_rq_clock_pelt()
427 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT; in sugov_iowait_apply()
203 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
117 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; in dl_bw_capacity()
3556 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()3711 -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT); in update_cfs_rq_load_avg()
8202 req.util = req.percent << SCHED_CAPACITY_SHIFT; in capacity_from_percent()
55 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; in topology_set_freq_scale()169 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, in topology_normalize_cpu_scale()
662 return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); in get_max_boost_ratio()837 policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT; in acpi_cpufreq_cpu_init()
324 # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT macro325 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
2095 if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt)) in arch_scale_freq_tick()