| /OK3568_Linux_fs/kernel/drivers/infiniband/hw/hfi1/ |
| H A D | affinity.c | 147 possible = cpumask_weight(&node_affinity.real_cpu_mask); in init_real_cpu_mask() 148 ht = cpumask_weight(topology_sibling_cpumask( in init_real_cpu_mask() 179 cpumask_weight(topology_sibling_cpumask( in node_affinity_init() 551 if (cpumask_weight(&entry->comp_vect_mask) == 1) { in _dev_comp_vect_cpu_mask_init() 557 cpumask_weight(&entry->comp_vect_mask) / in _dev_comp_vect_cpu_mask_init() 566 cpumask_weight(&entry->comp_vect_mask) % in _dev_comp_vect_cpu_mask_init() 670 possible = cpumask_weight(&entry->def_intr.mask); in hfi1_dev_affinity_init() 711 if (cpumask_weight(&entry->def_intr.mask) == 0) in hfi1_dev_affinity_init() 731 if (cpumask_weight(&entry->comp_vect_mask) == 0) in hfi1_dev_affinity_init() 1016 possible = cpumask_weight(hw_thread_mask); in find_hw_thread_mask() [all …]
|
| /OK3568_Linux_fs/kernel/kernel/irq/ |
| H A D | affinity.c | 146 ncpus = cpumask_weight(nmsk); in alloc_nodes_vectors() 261 if (!cpumask_weight(cpu_mask)) in __irq_build_affinity_masks() 300 ncpus = cpumask_weight(nmsk); in __irq_build_affinity_masks() 510 set_vecs = cpumask_weight(cpu_possible_mask); in irq_calc_affinity_vectors()
|
| H A D | ipi.c | 40 nr_irqs = cpumask_weight(dest); in irq_reserve_ipi() 143 nr_irqs = cpumask_weight(dest); in irq_destroy_ipi()
|
| /OK3568_Linux_fs/kernel/drivers/powercap/ |
| H A D | dtpm_cpu.c | 86 nr_cpus = cpumask_weight(&cpus); in set_pd_power_limit() 117 nr_cpus = cpumask_weight(&cpus); in get_pd_power_uw() 166 if (cpumask_weight(policy->cpus) != 1) in cpuhp_dtpm_cpu_offline()
|
| /OK3568_Linux_fs/kernel/arch/x86/include/asm/trace/ |
| H A D | hyperv.h | 21 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); 67 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | cpumask.h | 114 #define num_possible_cpus() cpumask_weight(cpu_possible_mask) 115 #define num_present_cpus() cpumask_weight(cpu_present_mask) 116 #define num_active_cpus() cpumask_weight(cpu_active_mask) 561 static inline unsigned int cpumask_weight(const struct cpumask *srcp) in cpumask_weight() function
|
| H A D | topology.h | 39 #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
|
| /OK3568_Linux_fs/kernel/drivers/thermal/ |
| H A D | cpufreq_cooling.c | 217 u32 ncpus = cpumask_weight(policy->related_cpus); in cpufreq_get_requested_power() 275 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); in cpufreq_state2power() 517 num_cpus = cpumask_weight(policy->related_cpus); in __cpufreq_cooling_register()
|
| /OK3568_Linux_fs/kernel/arch/x86/platform/uv/ |
| H A D | uv_nmi.c | 626 k = n - cpumask_weight(uv_nmi_cpu_mask); in uv_nmi_wait_cpus() 684 cpumask_weight(uv_nmi_cpu_mask), in uv_nmi_wait() 694 cpumask_weight(uv_nmi_cpu_mask), in uv_nmi_wait() 988 if (cpumask_weight(uv_nmi_cpu_mask)) in uv_handle_nmi()
|
| /OK3568_Linux_fs/kernel/kernel/sched/ |
| H A D | topology.c | 82 if (!cpumask_weight(sched_group_span(group))) { in sched_domain_debug_one() 177 if (cpumask_weight(sched_domain_span(sd)) == 1) in sd_degenerate() 352 int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); in build_perf_domains() 642 size = cpumask_weight(sched_domain_span(sd)); in update_top_cache_domain() 926 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group() 1092 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group() 1161 sg->group_weight = cpumask_weight(sched_group_span(sg)); in init_sched_groups_capacity() 1330 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init() 2020 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
|
| H A D | deadline.c | 85 return cpumask_weight(rd->span); in dl_bw_cpus() 648 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration() 653 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration() 2393 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain() 2840 trial_cpus = cpumask_weight(trial); in dl_cpuset_cpumask_can_shrink()
|
| /OK3568_Linux_fs/kernel/arch/powerpc/kernel/ |
| H A D | watchdog.c | 162 if (cpumask_weight(&wd_smp_cpus_pending) == 0) in watchdog_smp_panic() 384 if (cpumask_weight(&wd_cpus_enabled) == 1) { in start_watchdog()
|
| /OK3568_Linux_fs/kernel/arch/mips/kernel/ |
| H A D | crash.c | 75 while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { in crash_kexec_prepare_cpus()
|
| /OK3568_Linux_fs/kernel/drivers/infiniband/sw/siw/ |
| H A D | siw_main.c | 97 if (cpu % cpumask_weight(topology_sibling_cpumask(cpu))) in siw_create_tx_threads() 197 num_cpus = cpumask_weight(tx_cpumask); in siw_get_tx_cpu() 201 num_cpus = cpumask_weight(tx_cpumask); in siw_get_tx_cpu()
|
| /OK3568_Linux_fs/kernel/arch/ia64/include/asm/ |
| H A D | acpi.h | 88 low_cpu = cpumask_weight(&early_cpu_possible_map); in per_cpu_scan_finalize()
|
| /OK3568_Linux_fs/kernel/arch/ia64/kernel/ |
| H A D | setup.c | 579 per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? in setup_arch() 580 32 : cpumask_weight(&early_cpu_possible_map)), in setup_arch() 724 cpumask_weight(&cpu_core_map[cpunum])); in show_cpuinfo()
|
| /OK3568_Linux_fs/kernel/arch/x86/kernel/cpu/ |
| H A D | proc.c | 23 cpumask_weight(topology_core_cpumask(cpu))); in show_cpuinfo_core()
|
| /OK3568_Linux_fs/kernel/arch/x86/kernel/cpu/resctrl/ |
| H A D | rdtgroup.c | 341 if (cpumask_weight(tmpmask)) { in cpus_mon_write() 348 if (cpumask_weight(tmpmask)) { in cpus_mon_write() 359 if (cpumask_weight(tmpmask)) { in cpus_mon_write() 394 if (cpumask_weight(tmpmask)) { in cpus_ctrl_write() 413 if (cpumask_weight(tmpmask)) { in cpus_ctrl_write() 418 if (cpumask_weight(tmpmask1)) in cpus_ctrl_write() 488 if (cpumask_weight(tmpmask)) { in rdtgroup_cpus_write()
|
| /OK3568_Linux_fs/kernel/drivers/net/wireguard/ |
| H A D | queueing.h | 113 cpu_index = id % cpumask_weight(cpu_online_mask); in wg_cpumask_choose_online()
|
| /OK3568_Linux_fs/kernel/arch/s390/kernel/ |
| H A D | processor.c | 163 seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n))); in show_cpu_topology()
|
| /OK3568_Linux_fs/kernel/kernel/ |
| H A D | padata.c | 80 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash() 197 if (!cpumask_weight(pd->cpumask.cbcpu)) in padata_do_parallel() 201 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); in padata_do_parallel()
|
| /OK3568_Linux_fs/kernel/arch/alpha/kernel/ |
| H A D | process.c | 128 while (cpumask_weight(cpu_present_mask)) in common_shutdown_1()
|
| /OK3568_Linux_fs/kernel/arch/x86/kernel/ |
| H A D | tsc_sync.c | 343 return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; in loop_timeout()
|
| H A D | smpboot.c | 605 threads = cpumask_weight(topology_sibling_cpumask(cpu)); in set_cpu_sibling_map() 1529 int threads = cpumask_weight(topology_sibling_cpumask(cpu)); in recompute_smt_state() 1547 if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) in remove_siblinginfo()
|
| /OK3568_Linux_fs/kernel/drivers/firmware/psci/ |
| H A D | psci_checker.c | 93 if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { in down_and_up_cpus()
|