Lines Matching refs:busiest

6019 	struct sched_group *busiest;	/* Busiest group in this sd */  member
6038 .busiest = NULL, in init_sd_lb_stats()
6365 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local
6367 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
6370 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()
6373 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
6386 if (!sds->busiest) in update_sd_pick_busiest()
6389 if (group_first_cpu(sds->busiest) > group_first_cpu(sg)) in update_sd_pick_busiest()
6482 sds->busiest = sg; in update_sd_lb_stats()
6535 if (!sds->busiest) in check_asym_packing()
6538 busiest_cpu = group_first_cpu(sds->busiest); in check_asym_packing()
6562 struct sg_lb_stats *local, *busiest; in fix_small_imbalance() local
6565 busiest = &sds->busiest_stat; in fix_small_imbalance()
6569 else if (busiest->load_per_task > local->load_per_task) in fix_small_imbalance()
6573 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()
6574 busiest->group_capacity; in fix_small_imbalance()
6576 if (busiest->avg_load + scaled_busy_load_per_task >= in fix_small_imbalance()
6578 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
6588 capa_now += busiest->group_capacity * in fix_small_imbalance()
6589 min(busiest->load_per_task, busiest->avg_load); in fix_small_imbalance()
6595 if (busiest->avg_load > scaled_busy_load_per_task) { in fix_small_imbalance()
6596 capa_move += busiest->group_capacity * in fix_small_imbalance()
6597 min(busiest->load_per_task, in fix_small_imbalance()
6598 busiest->avg_load - scaled_busy_load_per_task); in fix_small_imbalance()
6602 if (busiest->avg_load * busiest->group_capacity < in fix_small_imbalance()
6603 busiest->load_per_task * SCHED_CAPACITY_SCALE) { in fix_small_imbalance()
6604 tmp = (busiest->avg_load * busiest->group_capacity) / in fix_small_imbalance()
6607 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()
6616 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
6628 struct sg_lb_stats *local, *busiest; in calculate_imbalance() local
6631 busiest = &sds->busiest_stat; in calculate_imbalance()
6633 if (busiest->group_type == group_imbalanced) { in calculate_imbalance()
6638 busiest->load_per_task = in calculate_imbalance()
6639 min(busiest->load_per_task, sds->avg_load); in calculate_imbalance()
6647 if (busiest->avg_load <= sds->avg_load || in calculate_imbalance()
6656 if (busiest->group_type == group_overloaded && in calculate_imbalance()
6658 load_above_capacity = busiest->sum_nr_running * in calculate_imbalance()
6660 if (load_above_capacity > busiest->group_capacity) in calculate_imbalance()
6661 load_above_capacity -= busiest->group_capacity; in calculate_imbalance()
6674 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); in calculate_imbalance()
6678 max_pull * busiest->group_capacity, in calculate_imbalance()
6688 if (env->imbalance < busiest->load_per_task) in calculate_imbalance()
6713 struct sg_lb_stats *local, *busiest; in find_busiest_group() local
6724 busiest = &sds.busiest_stat; in find_busiest_group()
6729 return sds.busiest; in find_busiest_group()
6732 if (!sds.busiest || busiest->sum_nr_running == 0) in find_busiest_group()
6743 if (busiest->group_type == group_imbalanced) in find_busiest_group()
6748 busiest->group_no_capacity) in find_busiest_group()
6755 if (local->avg_load >= busiest->avg_load) in find_busiest_group()
6773 if ((busiest->group_type != group_overloaded) && in find_busiest_group()
6774 (local->idle_cpus <= (busiest->idle_cpus + 1))) in find_busiest_group()
6781 if (100 * busiest->avg_load <= in find_busiest_group()
6789 return sds.busiest; in find_busiest_group()
6802 struct rq *busiest = NULL, *rq; in find_busiest_queue() local
6862 busiest = rq; in find_busiest_queue()
6866 return busiest; in find_busiest_queue()
6956 struct rq *busiest; in load_balance() local
6995 busiest = find_busiest_queue(&env, group); in load_balance()
6996 if (!busiest) { in load_balance()
7001 BUG_ON(busiest == env.dst_rq); in load_balance()
7005 env.src_cpu = busiest->cpu; in load_balance()
7006 env.src_rq = busiest; in load_balance()
7009 if (busiest->nr_running > 1) { in load_balance()
7017 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in load_balance()
7020 raw_spin_lock_irqsave(&busiest->lock, flags); in load_balance()
7036 raw_spin_unlock(&busiest->lock); in load_balance()
7099 cpumask_clear_cpu(cpu_of(busiest), cpus); in load_balance()
7121 raw_spin_lock_irqsave(&busiest->lock, flags); in load_balance()
7128 tsk_cpus_allowed(busiest->curr))) { in load_balance()
7129 raw_spin_unlock_irqrestore(&busiest->lock, in load_balance()
7140 if (!busiest->active_balance) { in load_balance()
7141 busiest->active_balance = 1; in load_balance()
7142 busiest->push_cpu = this_cpu; in load_balance()
7145 raw_spin_unlock_irqrestore(&busiest->lock, flags); in load_balance()
7148 stop_one_cpu_nowait(cpu_of(busiest), in load_balance()
7149 active_load_balance_cpu_stop, busiest, in load_balance()
7150 &busiest->active_balance_work); in load_balance()