Lines Matching refs:sgc
98 group->sgc->id, in sched_domain_debug_one()
107 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one()
108 printk(KERN_CONT " cap=%lu", group->sgc->capacity); in sched_domain_debug_one()
574 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups()
575 kfree(sg->sgc); in free_sched_groups()
914 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
915 if (atomic_inc_return(&sg->sgc->ref) == 1) in init_overlap_sched_group()
926 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()
927 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
928 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
1073 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1078 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); in get_group()
1092 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()
1093 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()
1094 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
1276 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1277 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
1757 sdd->sgc = alloc_percpu(struct sched_group_capacity *); in __sdt_alloc()
1758 if (!sdd->sgc) in __sdt_alloc()
1765 struct sched_group_capacity *sgc; in __sdt_alloc() local
1790 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), in __sdt_alloc()
1792 if (!sgc) in __sdt_alloc()
1796 sgc->id = j; in __sdt_alloc()
1799 *per_cpu_ptr(sdd->sgc, j) = sgc; in __sdt_alloc()
1828 if (sdd->sgc) in __sdt_free()
1829 kfree(*per_cpu_ptr(sdd->sgc, j)); in __sdt_free()
1837 free_percpu(sdd->sgc); in __sdt_free()
1838 sdd->sgc = NULL; in __sdt_free()