Searched refs:SCHED_CAPACITY_SCALE (Results 1 – 13 of 13) sorted by relevance
46 set_uclamp_util_min_rt(SCHED_CAPACITY_SCALE); in update_perf_level_locked()101 int cpub_min_cap = SCHED_CAPACITY_SCALE - (SCHED_CAPACITY_SCALE >> 3); in rockchip_perf_init()
1824 static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE;1825 static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE;1829 arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE : in arch_set_max_freq_ratio()2019 turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq); in intel_set_max_freq_ratio()2075 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;2079 u64 freq_scale = SCHED_CAPACITY_SCALE; in arch_scale_freq_tick()2105 if (freq_scale > SCHED_CAPACITY_SCALE) in arch_scale_freq_tick()2106 freq_scale = SCHED_CAPACITY_SCALE; in arch_scale_freq_tick()
107 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one()926 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()927 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()928 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()1092 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()1093 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()1094 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
17 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)370 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); in sugov_iowait_boost()
877 unsigned long value : bits_per(SCHED_CAPACITY_SCALE);878 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);2132 return SCHED_CAPACITY_SCALE; in arch_scale_freq_capacity()2608 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
925 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;928 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;945 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;972 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)986 return SCHED_CAPACITY_SCALE; in uclamp_none()1433 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || in sysctl_sched_uclamp_handler()1434 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { in sysctl_sched_uclamp_handler()1488 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) in uclamp_validate()1495 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) in uclamp_validate()7586 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()[all …]
3986 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)8711 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_lb_stats()8968 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_wakeup_stats()9395 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) / in calculate_imbalance()9398 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / in calculate_imbalance()9422 ) / SCHED_CAPACITY_SCALE; in calculate_imbalance()9525 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / in find_busiest_group()11683 SCHED_CAPACITY_SCALE in sched_trace_rq_cpu_capacity()
116 capacity_orig_of(i) == SCHED_CAPACITY_SCALE) { in dl_bw_capacity()135 return SCHED_CAPACITY_SCALE; in dl_bw_capacity()
35 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;63 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
250 return SCHED_CAPACITY_SCALE; in arch_scale_cpu_capacity()
265 scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE); in topology_scale_freq_tick()
325 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) macro628 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
821 will be SCHED_CAPACITY_SCALE/UCLAMP_BUCKETS_COUNT. The higher the