Lines Matching refs:cpu
66 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) in topology_set_cpu_scale() argument
68 per_cpu(cpu_scale, cpu) = capacity; in topology_set_cpu_scale()
77 int cpu; in topology_set_thermal_pressure() local
79 for_each_cpu(cpu, cpus) in topology_set_thermal_pressure()
80 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); in topology_set_thermal_pressure()
88 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_show() local
90 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); in cpu_capacity_show()
101 struct device *cpu; in register_cpu_capacity_sysctl() local
104 cpu = get_cpu_device(i); in register_cpu_capacity_sysctl()
105 if (!cpu) { in register_cpu_capacity_sysctl()
110 device_create_file(cpu, &dev_attr_cpu_capacity); in register_cpu_capacity_sysctl()
155 int cpu; in topology_normalize_cpu_scale() local
161 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
162 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); in topology_normalize_cpu_scale()
167 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
168 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); in topology_normalize_cpu_scale()
171 topology_set_cpu_scale(cpu, capacity); in topology_normalize_cpu_scale()
173 cpu, topology_get_cpu_scale(cpu)); in topology_normalize_cpu_scale()
177 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) in topology_parse_cpu_capacity() argument
199 raw_capacity[cpu] = cpu_capacity; in topology_parse_cpu_capacity()
201 cpu_node, raw_capacity[cpu]); in topology_parse_cpu_capacity()
211 per_cpu(freq_factor, cpu) = in topology_parse_cpu_capacity()
239 int cpu; in init_cpu_capacity_callback() local
253 for_each_cpu(cpu, policy->related_cpus) in init_cpu_capacity_callback()
254 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; in init_cpu_capacity_callback()
323 int cpu; in get_cpu_for_node() local
329 cpu = of_cpu_node_to_id(cpu_node); in get_cpu_for_node()
330 if (cpu >= 0) in get_cpu_for_node()
331 topology_parse_cpu_capacity(cpu_node, cpu); in get_cpu_for_node()
337 return cpu; in get_cpu_for_node()
346 int cpu; in parse_core() local
354 cpu = get_cpu_for_node(t); in parse_core()
355 if (cpu >= 0) { in parse_core()
356 cpu_topology[cpu].package_id = package_id; in parse_core()
357 cpu_topology[cpu].core_id = core_id; in parse_core()
358 cpu_topology[cpu].thread_id = i; in parse_core()
359 } else if (cpu != -ENODEV) { in parse_core()
369 cpu = get_cpu_for_node(core); in parse_core()
370 if (cpu >= 0) { in parse_core()
377 cpu_topology[cpu].package_id = package_id; in parse_core()
378 cpu_topology[cpu].core_id = core_id; in parse_core()
379 } else if (leaf && cpu != -ENODEV) { in parse_core()
459 int cpu; in parse_dt_topology() local
485 for_each_possible_cpu(cpu) in parse_dt_topology()
486 if (cpu_topology[cpu].package_id == -1) in parse_dt_topology()
503 const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
505 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); in cpu_coregroup_mask()
508 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { in cpu_coregroup_mask()
510 core_mask = &cpu_topology[cpu].core_sibling; in cpu_coregroup_mask()
512 if (cpu_topology[cpu].llc_id != -1) { in cpu_coregroup_mask()
513 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) in cpu_coregroup_mask()
514 core_mask = &cpu_topology[cpu].llc_sibling; in cpu_coregroup_mask()
523 int cpu; in update_siblings_masks() local
526 for_each_online_cpu(cpu) { in update_siblings_masks()
527 cpu_topo = &cpu_topology[cpu]; in update_siblings_masks()
530 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
538 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
544 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
548 static void clear_cpu_topology(int cpu) in clear_cpu_topology() argument
550 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in clear_cpu_topology()
553 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
556 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
558 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
563 unsigned int cpu; in reset_cpu_topology() local
565 for_each_possible_cpu(cpu) { in reset_cpu_topology()
566 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in reset_cpu_topology()
573 clear_cpu_topology(cpu); in reset_cpu_topology()
577 void remove_cpu_topology(unsigned int cpu) in remove_cpu_topology() argument
581 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
582 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
583 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
584 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
585 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
586 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
588 clear_cpu_topology(cpu); in remove_cpu_topology()