Lines Matching refs:busiest
5961 struct sched_group *busiest; /* Busiest group in this sd */ member
5980 .busiest = NULL, in init_sd_lb_stats()
6327 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local
6329 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
6332 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()
6335 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
6348 if (!sds->busiest) in update_sd_pick_busiest()
6351 if (group_first_cpu(sds->busiest) > group_first_cpu(sg)) in update_sd_pick_busiest()
6444 sds->busiest = sg; in update_sd_lb_stats()
6497 if (!sds->busiest) in check_asym_packing()
6500 busiest_cpu = group_first_cpu(sds->busiest); in check_asym_packing()
6524 struct sg_lb_stats *local, *busiest; in fix_small_imbalance() local
6527 busiest = &sds->busiest_stat; in fix_small_imbalance()
6531 else if (busiest->load_per_task > local->load_per_task) in fix_small_imbalance()
6535 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()
6536 busiest->group_capacity; in fix_small_imbalance()
6538 if (busiest->avg_load + scaled_busy_load_per_task >= in fix_small_imbalance()
6540 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
6550 capa_now += busiest->group_capacity * in fix_small_imbalance()
6551 min(busiest->load_per_task, busiest->avg_load); in fix_small_imbalance()
6557 if (busiest->avg_load > scaled_busy_load_per_task) { in fix_small_imbalance()
6558 capa_move += busiest->group_capacity * in fix_small_imbalance()
6559 min(busiest->load_per_task, in fix_small_imbalance()
6560 busiest->avg_load - scaled_busy_load_per_task); in fix_small_imbalance()
6564 if (busiest->avg_load * busiest->group_capacity < in fix_small_imbalance()
6565 busiest->load_per_task * SCHED_CAPACITY_SCALE) { in fix_small_imbalance()
6566 tmp = (busiest->avg_load * busiest->group_capacity) / in fix_small_imbalance()
6569 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()
6578 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
6590 struct sg_lb_stats *local, *busiest; in calculate_imbalance() local
6593 busiest = &sds->busiest_stat; in calculate_imbalance()
6595 if (busiest->group_type == group_imbalanced) { in calculate_imbalance()
6600 busiest->load_per_task = in calculate_imbalance()
6601 min(busiest->load_per_task, sds->avg_load); in calculate_imbalance()
6609 if (busiest->avg_load <= sds->avg_load || in calculate_imbalance()
6618 if (busiest->group_type == group_overloaded && in calculate_imbalance()
6620 load_above_capacity = busiest->sum_nr_running * in calculate_imbalance()
6622 if (load_above_capacity > busiest->group_capacity) in calculate_imbalance()
6623 load_above_capacity -= busiest->group_capacity; in calculate_imbalance()
6636 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); in calculate_imbalance()
6640 max_pull * busiest->group_capacity, in calculate_imbalance()
6650 if (env->imbalance < busiest->load_per_task) in calculate_imbalance()
6675 struct sg_lb_stats *local, *busiest; in find_busiest_group() local
6686 busiest = &sds.busiest_stat; in find_busiest_group()
6691 return sds.busiest; in find_busiest_group()
6694 if (!sds.busiest || busiest->sum_nr_running == 0) in find_busiest_group()
6705 if (busiest->group_type == group_imbalanced) in find_busiest_group()
6710 busiest->group_no_capacity) in find_busiest_group()
6717 if (local->avg_load >= busiest->avg_load) in find_busiest_group()
6735 if ((busiest->group_type != group_overloaded) && in find_busiest_group()
6736 (local->idle_cpus <= (busiest->idle_cpus + 1))) in find_busiest_group()
6743 if (100 * busiest->avg_load <= in find_busiest_group()
6751 return sds.busiest; in find_busiest_group()
6764 struct rq *busiest = NULL, *rq; in find_busiest_queue() local
6824 busiest = rq; in find_busiest_queue()
6828 return busiest; in find_busiest_queue()
6918 struct rq *busiest; in load_balance() local
6957 busiest = find_busiest_queue(&env, group); in load_balance()
6958 if (!busiest) { in load_balance()
6963 BUG_ON(busiest == env.dst_rq); in load_balance()
6967 env.src_cpu = busiest->cpu; in load_balance()
6968 env.src_rq = busiest; in load_balance()
6971 if (busiest->nr_running > 1) { in load_balance()
6979 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in load_balance()
6982 raw_spin_lock_irqsave(&busiest->lock, flags); in load_balance()
6998 raw_spin_unlock(&busiest->lock); in load_balance()
7061 cpumask_clear_cpu(cpu_of(busiest), cpus); in load_balance()
7083 raw_spin_lock_irqsave(&busiest->lock, flags); in load_balance()
7090 tsk_cpus_allowed(busiest->curr))) { in load_balance()
7091 raw_spin_unlock_irqrestore(&busiest->lock, in load_balance()
7102 if (!busiest->active_balance) { in load_balance()
7103 busiest->active_balance = 1; in load_balance()
7104 busiest->push_cpu = this_cpu; in load_balance()
7107 raw_spin_unlock_irqrestore(&busiest->lock, flags); in load_balance()
7110 stop_one_cpu_nowait(cpu_of(busiest), in load_balance()
7111 active_load_balance_cpu_stop, busiest, in load_balance()
7112 &busiest->active_balance_work); in load_balance()