Lines Matching refs:sgc
5718 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { in sched_domain_debug_one()
5720 group->sgc->capacity); in sched_domain_debug_one()
5945 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups()
5946 kfree(sg->sgc); in free_sched_groups()
5964 kfree(sd->groups->sgc); in free_sched_domain()
6171 sg->sgc = *per_cpu_ptr(sdd->sgc, i); in build_overlap_sched_groups()
6172 if (atomic_inc_return(&sg->sgc->ref) == 1) in build_overlap_sched_groups()
6180 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in build_overlap_sched_groups()
6218 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
6219 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ in get_group()
6306 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight); in init_sched_groups_capacity()
6397 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
6398 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
6863 sdd->sgc = alloc_percpu(struct sched_group_capacity *); in __sdt_alloc()
6864 if (!sdd->sgc) in __sdt_alloc()
6870 struct sched_group_capacity *sgc; in __sdt_alloc() local
6888 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), in __sdt_alloc()
6890 if (!sgc) in __sdt_alloc()
6893 *per_cpu_ptr(sdd->sgc, j) = sgc; in __sdt_alloc()
6920 if (sdd->sgc) in __sdt_free()
6921 kfree(*per_cpu_ptr(sdd->sgc, j)); in __sdt_free()
6927 free_percpu(sdd->sgc); in __sdt_free()
6928 sdd->sgc = NULL; in __sdt_free()