Lines Matching refs:cpu_map
285 static void perf_domain_debug(const struct cpumask *cpu_map, in perf_domain_debug() argument
291 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); in perf_domain_debug()
350 static bool build_perf_domains(const struct cpumask *cpu_map) in build_perf_domains() argument
352 int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); in build_perf_domains()
354 int cpu = cpumask_first(cpu_map); in build_perf_domains()
369 cpumask_pr_args(cpu_map)); in build_perf_domains()
377 cpumask_pr_args(cpu_map)); in build_perf_domains()
381 for_each_cpu(i, cpu_map) { in build_perf_domains()
404 cpumask_pr_args(cpu_map)); in build_perf_domains()
408 perf_domain_debug(cpu_map, pd); in build_perf_domains()
1219 static void __sdt_free(const struct cpumask *cpu_map);
1220 static int __sdt_alloc(const struct cpumask *cpu_map);
1223 const struct cpumask *cpu_map) in __free_domain_allocs() argument
1234 __sdt_free(cpu_map); in __free_domain_allocs()
1242 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) in __visit_domain_allocation_hell() argument
1246 if (__sdt_alloc(cpu_map)) in __visit_domain_allocation_hell()
1316 const struct cpumask *cpu_map, in sd_init() argument
1372 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in sd_init()
1737 static int __sdt_alloc(const struct cpumask *cpu_map) in __sdt_alloc() argument
1761 for_each_cpu(j, cpu_map) { in __sdt_alloc()
1806 static void __sdt_free(const struct cpumask *cpu_map) in __sdt_free() argument
1814 for_each_cpu(j, cpu_map) { in __sdt_free()
1843 const struct cpumask *cpu_map, struct sched_domain_attr *attr, in build_sched_domain() argument
1846 struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu); in build_sched_domain()
1877 const struct cpumask *cpu_map, int cpu) in topology_span_sane() argument
1891 for_each_cpu(i, cpu_map) { in topology_span_sane()
1913 *asym_cpu_capacity_level(const struct cpumask *cpu_map) in asym_cpu_capacity_level() argument
1921 cap = arch_scale_cpu_capacity(cpumask_first(cpu_map)); in asym_cpu_capacity_level()
1923 for_each_cpu(i, cpu_map) { in asym_cpu_capacity_level()
1938 for_each_cpu(i, cpu_map) { in asym_cpu_capacity_level()
1946 for_each_cpu_and(j, tl->mask(i), cpu_map) { in asym_cpu_capacity_level()
1972 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) in build_sched_domains() argument
1982 if (WARN_ON(cpumask_empty(cpu_map))) in build_sched_domains()
1985 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); in build_sched_domains()
1989 tl_asym = asym_cpu_capacity_level(cpu_map); in build_sched_domains()
1992 for_each_cpu(i, cpu_map) { in build_sched_domains()
2003 if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) in build_sched_domains()
2006 sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i); in build_sched_domains()
2012 if (cpumask_equal(cpu_map, sched_domain_span(sd))) in build_sched_domains()
2018 for_each_cpu(i, cpu_map) { in build_sched_domains()
2033 if (!cpumask_test_cpu(i, cpu_map)) in build_sched_domains()
2044 for_each_cpu(i, cpu_map) { in build_sched_domains()
2061 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); in build_sched_domains()
2067 __free_domain_allocs(&d, alloc_state, cpu_map); in build_sched_domains()
2127 int sched_init_domains(const struct cpumask *cpu_map) in sched_init_domains() argument
2140 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN)); in sched_init_domains()
2151 static void detach_destroy_domains(const struct cpumask *cpu_map) in detach_destroy_domains() argument
2153 unsigned int cpu = cpumask_any(cpu_map); in detach_destroy_domains()
2160 for_each_cpu(i, cpu_map) in detach_destroy_domains()