SCHED_CAPACITY_SCALE   24 drivers/base/arch_topology.c DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE   38 drivers/base/arch_topology.c DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE  596 include/linux/sched.h 	unsigned int value		: bits_per(SCHED_CAPACITY_SCALE);
SCHED_CAPACITY_SCALE  224 include/linux/sched/topology.h 	return SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE  789 kernel/sched/core.c unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE  792 kernel/sched/core.c unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE  798 kernel/sched/core.c #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
SCHED_CAPACITY_SCALE  817 kernel/sched/core.c 	return SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE 1132 kernel/sched/core.c 	    sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) {
SCHED_CAPACITY_SCALE 1181 kernel/sched/core.c 	if (upper_bound > SCHED_CAPACITY_SCALE)
SCHED_CAPACITY_SCALE 6664 kernel/sched/core.c 		rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE 7249 kernel/sched/core.c 		.util = SCHED_CAPACITY_SCALE,
SCHED_CAPACITY_SCALE 7331 kernel/sched/core.c 	if (util_clamp == SCHED_CAPACITY_SCALE) {
SCHED_CAPACITY_SCALE   16 kernel/sched/cpufreq_schedutil.c #define IOWAIT_BOOST_MIN	(SCHED_CAPACITY_SCALE / 8)
SCHED_CAPACITY_SCALE  366 kernel/sched/cpufreq_schedutil.c 			min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
SCHED_CAPACITY_SCALE 3773 kernel/sched/fair.c 	if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
SCHED_CAPACITY_SCALE 5628 kernel/sched/fair.c 		avg_load = (avg_load * SCHED_CAPACITY_SCALE) /
SCHED_CAPACITY_SCALE 5630 kernel/sched/fair.c 		runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) /
SCHED_CAPACITY_SCALE 8097 kernel/sched/fair.c 	sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
SCHED_CAPACITY_SCALE 8392 kernel/sched/fair.c 		(busiest->load_per_task * SCHED_CAPACITY_SCALE) /
SCHED_CAPACITY_SCALE 8411 kernel/sched/fair.c 	capa_now /= SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE 8422 kernel/sched/fair.c 	    busiest->load_per_task * SCHED_CAPACITY_SCALE) {
SCHED_CAPACITY_SCALE 8426 kernel/sched/fair.c 		tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
SCHED_CAPACITY_SCALE 8431 kernel/sched/fair.c 	capa_move /= SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE 8479 kernel/sched/fair.c 		load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE 8501 kernel/sched/fair.c 	) / SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE 8564 kernel/sched/fair.c 	sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
SCHED_CAPACITY_SCALE  814 kernel/sched/sched.h 	unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
SCHED_CAPACITY_SCALE  815 kernel/sched/sched.h 	unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
SCHED_CAPACITY_SCALE 1990 kernel/sched/sched.h 	return SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE 2397 kernel/sched/sched.h 	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
SCHED_CAPACITY_SCALE   87 kernel/sched/topology.c 		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
SCHED_CAPACITY_SCALE  921 kernel/sched/topology.c 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
SCHED_CAPACITY_SCALE  922 kernel/sched/topology.c 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE  923 kernel/sched/topology.c 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE 1087 kernel/sched/topology.c 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
SCHED_CAPACITY_SCALE 1088 kernel/sched/topology.c 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
SCHED_CAPACITY_SCALE 1089 kernel/sched/topology.c 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;