Lines Matching refs:rq

249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)  in rq_of()
251 return cfs_rq->rq; in rq_of()
322 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
323 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
379 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
381 return container_of(cfs_rq, struct rq, cfs); in rq_of()
397 struct rq *rq = task_rq(p); in cfs_rq_of() local
399 return &rq->cfs; in cfs_rq_of()
416 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
417 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
730 static void update_curr_fair(struct rq *rq) in update_curr_fair() argument
732 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
859 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
861 rq->nr_numa_running += (p->numa_preferred_nid != -1); in account_numa_enqueue()
862 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); in account_numa_enqueue()
865 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
867 rq->nr_numa_running -= (p->numa_preferred_nid != -1); in account_numa_dequeue()
868 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); in account_numa_dequeue()
1140 struct rq *rq = cpu_rq(cpu); in update_numa_stats() local
1142 ns->nr_running += rq->nr_running; in update_numa_stats()
1262 struct rq *src_rq = cpu_rq(env->src_cpu); in task_numa_compare()
1263 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2244 void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
2276 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
2280 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
2284 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
2297 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue() local
2299 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
2300 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
2703 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) in update_rq_runnable_avg() argument
2705 __update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg, in update_rq_runnable_avg()
2707 __update_tg_runnable_avg(&rq->avg, &rq->cfs); in update_rq_runnable_avg()
2715 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} in update_rq_runnable_avg() argument
2917 void idle_enter_fair(struct rq *this_rq) in idle_enter_fair()
2927 void idle_exit_fair(struct rq *this_rq) in idle_exit_fair()
2932 static int idle_balance(struct rq *this_rq);
2938 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} in update_rq_runnable_avg() argument
2948 static inline int idle_balance(struct rq *rq) in idle_balance() argument
3603 struct rq *rq = data; in tg_unthrottle_up() local
3604 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
3610 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - in tg_unthrottle_up()
3620 struct rq *rq = data; in tg_throttle_down() local
3621 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down()
3625 cfs_rq->throttled_clock_task = rq_clock_task(rq); in tg_throttle_down()
3633 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq() local
3642 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
3661 sub_nr_running(rq, task_delta); in throttle_cfs_rq()
3664 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
3678 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq() local
3684 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
3688 update_rq_clock(rq); in unthrottle_cfs_rq()
3691 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
3696 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
3716 add_nr_running(rq, task_delta); in unthrottle_cfs_rq()
3719 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq()
3720 resched_curr(rq); in unthrottle_cfs_rq()
3733 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime() local
3735 raw_spin_lock(&rq->lock); in distribute_cfs_runtime()
3752 raw_spin_unlock(&rq->lock); in distribute_cfs_runtime()
4084 static void __maybe_unused update_runtime_enabled(struct rq *rq) in update_runtime_enabled() argument
4088 for_each_leaf_cfs_rq(rq, cfs_rq) { in update_runtime_enabled()
4097 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) in unthrottle_offline_cfs_rqs() argument
4101 for_each_leaf_cfs_rq(rq, cfs_rq) { in unthrottle_offline_cfs_rqs()
4159 static inline void update_runtime_enabled(struct rq *rq) {} in update_runtime_enabled() argument
4160 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} in unthrottle_offline_cfs_rqs() argument
4169 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
4174 WARN_ON(task_rq(p) != rq); in hrtick_start_fair()
4182 if (rq->curr == p) in hrtick_start_fair()
4183 resched_curr(rq); in hrtick_start_fair()
4186 hrtick_start(rq, delta); in hrtick_start_fair()
4195 static void hrtick_update(struct rq *rq) in hrtick_update() argument
4197 struct task_struct *curr = rq->curr; in hrtick_update()
4199 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) in hrtick_update()
4203 hrtick_start_fair(rq, curr); in hrtick_update()
4207 hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
4211 static inline void hrtick_update(struct rq *rq) in hrtick_update() argument
4222 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_fair() argument
4258 update_rq_runnable_avg(rq, rq->nr_running); in enqueue_task_fair()
4259 add_nr_running(rq, 1); in enqueue_task_fair()
4261 hrtick_update(rq); in enqueue_task_fair()
4271 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_fair() argument
4319 sub_nr_running(rq, 1); in dequeue_task_fair()
4320 update_rq_runnable_avg(rq, 1); in dequeue_task_fair()
4322 hrtick_update(rq); in dequeue_task_fair()
4341 struct rq *rq = cpu_rq(cpu); in source_load() local
4347 return min(rq->cpu_load[type-1], total); in source_load()
4356 struct rq *rq = cpu_rq(cpu); in target_load() local
4362 return max(rq->cpu_load[type-1], total); in target_load()
4377 struct rq *rq = cpu_rq(cpu); in cpu_avg_load_per_task() local
4378 unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running); in cpu_avg_load_per_task()
4379 unsigned long load_avg = rq->cfs.runnable_load_avg; in cpu_avg_load_per_task()
4711 struct rq *rq = cpu_rq(i); in find_idlest_cpu() local
4712 struct cpuidle_state *idle = idle_get_state(rq); in find_idlest_cpu()
4720 latest_idle_timestamp = rq->idle_stamp; in find_idlest_cpu()
4723 rq->idle_stamp > latest_idle_timestamp) { in find_idlest_cpu()
4729 latest_idle_timestamp = rq->idle_stamp; in find_idlest_cpu()
5013 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup() argument
5015 struct task_struct *curr = rq->curr; in check_preempt_wakeup()
5079 resched_curr(rq); in check_preempt_wakeup()
5089 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
5097 pick_next_task_fair(struct rq *rq, struct task_struct *prev) in pick_next_task_fair() argument
5099 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair()
5177 if (hrtick_enabled(rq)) in pick_next_task_fair()
5178 hrtick_start_fair(rq, p); in pick_next_task_fair()
5182 cfs_rq = &rq->cfs; in pick_next_task_fair()
5188 put_prev_task(rq, prev); in pick_next_task_fair()
5198 if (hrtick_enabled(rq)) in pick_next_task_fair()
5199 hrtick_start_fair(rq, p); in pick_next_task_fair()
5204 new_tasks = idle_balance(rq); in pick_next_task_fair()
5222 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) in put_prev_task_fair() argument
5238 static void yield_task_fair(struct rq *rq) in yield_task_fair() argument
5240 struct task_struct *curr = rq->curr; in yield_task_fair()
5247 if (unlikely(rq->nr_running == 1)) in yield_task_fair()
5253 update_rq_clock(rq); in yield_task_fair()
5263 rq_clock_skip_update(rq, true); in yield_task_fair()
5269 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) in yield_to_task_fair() argument
5280 yield_task_fair(rq); in yield_to_task_fair()
5416 struct rq *src_rq;
5420 struct rq *dst_rq;
5770 static void attach_task(struct rq *rq, struct task_struct *p) in attach_task() argument
5772 lockdep_assert_held(&rq->lock); in attach_task()
5774 BUG_ON(task_rq(p) != rq); in attach_task()
5776 activate_task(rq, p, 0); in attach_task()
5777 check_preempt_curr(rq, p, 0); in attach_task()
5784 static void attach_one_task(struct rq *rq, struct task_struct *p) in attach_one_task() argument
5786 raw_spin_lock(&rq->lock); in attach_one_task()
5787 attach_task(rq, p); in attach_one_task()
5788 raw_spin_unlock(&rq->lock); in attach_one_task()
5841 struct rq *rq = rq_of(cfs_rq); in __update_blocked_averages_cpu() local
5842 update_rq_runnable_avg(rq, rq->nr_running); in __update_blocked_averages_cpu()
5848 struct rq *rq = cpu_rq(cpu); in update_blocked_averages() local
5852 raw_spin_lock_irqsave(&rq->lock, flags); in update_blocked_averages()
5853 update_rq_clock(rq); in update_blocked_averages()
5858 for_each_leaf_cfs_rq(rq, cfs_rq) { in update_blocked_averages()
5864 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu); in update_blocked_averages()
5867 raw_spin_unlock_irqrestore(&rq->lock, flags); in update_blocked_averages()
5877 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load() local
5878 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
6035 struct rq *rq = cpu_rq(cpu); in scale_rt_capacity() local
6043 age_stamp = ACCESS_ONCE(rq->age_stamp); in scale_rt_capacity()
6044 avg = ACCESS_ONCE(rq->rt_avg); in scale_rt_capacity()
6045 delta = __rq_clock_broken(rq) - age_stamp; in scale_rt_capacity()
6110 struct rq *rq = cpu_rq(cpu); in update_group_capacity() local
6123 if (unlikely(!rq->sd)) { in update_group_capacity()
6128 sgc = rq->sd->groups->sgc; in update_group_capacity()
6153 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) in check_cpu_capacity() argument
6155 return ((rq->cpu_capacity * sd->imbalance_pct) < in check_cpu_capacity()
6156 (rq->cpu_capacity_orig * 100)); in check_cpu_capacity()
6272 struct rq *rq = cpu_rq(i); in update_sg_lb_stats() local
6282 sgs->sum_nr_running += rq->cfs.h_nr_running; in update_sg_lb_stats()
6284 if (rq->nr_running > 1) in update_sg_lb_stats()
6288 sgs->nr_numa_running += rq->nr_numa_running; in update_sg_lb_stats()
6289 sgs->nr_preferred_running += rq->nr_preferred_running; in update_sg_lb_stats()
6368 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq() argument
6370 if (rq->nr_running > rq->nr_numa_running) in fbq_classify_rq()
6372 if (rq->nr_running > rq->nr_preferred_running) in fbq_classify_rq()
6382 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq() argument
6761 static struct rq *find_busiest_queue(struct lb_env *env, in find_busiest_queue()
6764 struct rq *busiest = NULL, *rq; in find_busiest_queue() local
6772 rq = cpu_rq(i); in find_busiest_queue()
6773 rt = fbq_classify_rq(rq); in find_busiest_queue()
6806 if (rq->nr_running == 1 && wl > env->imbalance && in find_busiest_queue()
6807 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
6824 busiest = rq; in find_busiest_queue()
6911 static int load_balance(int this_cpu, struct rq *this_rq, in load_balance()
6918 struct rq *busiest; in load_balance()
7205 static int idle_balance(struct rq *this_rq) in idle_balance()
7314 struct rq *busiest_rq = data; in active_load_balance_cpu_stop()
7317 struct rq *target_rq = cpu_rq(target_cpu); in active_load_balance_cpu_stop()
7378 static inline int on_null_domain(struct rq *rq) in on_null_domain() argument
7380 return unlikely(!rcu_dereference_sched(rq->sd)); in on_null_domain()
7538 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) in rebalance_domains() argument
7541 int cpu = rq->cpu; in rebalance_domains()
7589 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { in rebalance_domains()
7613 rq->max_idle_balance_cost = in rebalance_domains()
7624 rq->next_balance = next_balance; in rebalance_domains()
7632 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) in nohz_idle_balance()
7635 struct rq *rq; in nohz_idle_balance() local
7654 rq = cpu_rq(balance_cpu); in nohz_idle_balance()
7660 if (time_after_eq(jiffies, rq->next_balance)) { in nohz_idle_balance()
7661 raw_spin_lock_irq(&rq->lock); in nohz_idle_balance()
7662 update_rq_clock(rq); in nohz_idle_balance()
7663 update_idle_cpu_load(rq); in nohz_idle_balance()
7664 raw_spin_unlock_irq(&rq->lock); in nohz_idle_balance()
7665 rebalance_domains(rq, CPU_IDLE); in nohz_idle_balance()
7668 if (time_after(this_rq->next_balance, rq->next_balance)) in nohz_idle_balance()
7669 this_rq->next_balance = rq->next_balance; in nohz_idle_balance()
7687 static inline bool nohz_kick_needed(struct rq *rq) in nohz_kick_needed() argument
7692 int nr_busy, cpu = rq->cpu; in nohz_kick_needed()
7695 if (unlikely(rq->idle_balance)) in nohz_kick_needed()
7715 if (rq->nr_running >= 2) in nohz_kick_needed()
7731 sd = rcu_dereference(rq->sd); in nohz_kick_needed()
7733 if ((rq->cfs.h_nr_running >= 1) && in nohz_kick_needed()
7734 check_cpu_capacity(rq, sd)) { in nohz_kick_needed()
7752 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { } in nohz_idle_balance()
7761 struct rq *this_rq = this_rq(); in run_rebalance_domains()
7780 void trigger_load_balance(struct rq *rq) in trigger_load_balance() argument
7783 if (unlikely(on_null_domain(rq))) in trigger_load_balance()
7786 if (time_after_eq(jiffies, rq->next_balance)) in trigger_load_balance()
7789 if (nohz_kick_needed(rq)) in trigger_load_balance()
7794 static void rq_online_fair(struct rq *rq) in rq_online_fair() argument
7798 update_runtime_enabled(rq); in rq_online_fair()
7801 static void rq_offline_fair(struct rq *rq) in rq_offline_fair() argument
7806 unthrottle_offline_cfs_rqs(rq); in rq_offline_fair()
7814 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) in task_tick_fair() argument
7825 task_tick_numa(rq, curr); in task_tick_fair()
7827 update_rq_runnable_avg(rq, 1); in task_tick_fair()
7840 struct rq *rq = this_rq(); in task_fork_fair() local
7843 raw_spin_lock_irqsave(&rq->lock, flags); in task_fork_fair()
7845 update_rq_clock(rq); in task_fork_fair()
7872 resched_curr(rq); in task_fork_fair()
7877 raw_spin_unlock_irqrestore(&rq->lock, flags); in task_fork_fair()
7885 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_fair() argument
7895 if (rq->curr == p) { in prio_changed_fair()
7897 resched_curr(rq); in prio_changed_fair()
7899 check_preempt_curr(rq, p, 0); in prio_changed_fair()
7902 static void switched_from_fair(struct rq *rq, struct task_struct *p) in switched_from_fair() argument
7941 static void switched_to_fair(struct rq *rq, struct task_struct *p) in switched_to_fair() argument
7959 if (rq->curr == p) in switched_to_fair()
7960 resched_curr(rq); in switched_to_fair()
7962 check_preempt_curr(rq, p, 0); in switched_to_fair()
7970 static void set_curr_task_fair(struct rq *rq) in set_curr_task_fair() argument
7972 struct sched_entity *se = &rq->curr->se; in set_curr_task_fair()
8108 struct rq *rq = cpu_rq(cpu); in unregister_fair_sched_group() local
8118 raw_spin_lock_irqsave(&rq->lock, flags); in unregister_fair_sched_group()
8120 raw_spin_unlock_irqrestore(&rq->lock, flags); in unregister_fair_sched_group()
8127 struct rq *rq = cpu_rq(cpu); in init_tg_cfs_entry() local
8130 cfs_rq->rq = rq; in init_tg_cfs_entry()
8141 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
8175 struct rq *rq = cpu_rq(i); in sched_group_set_shares() local
8180 raw_spin_lock_irqsave(&rq->lock, flags); in sched_group_set_shares()
8183 update_rq_clock(rq); in sched_group_set_shares()
8186 raw_spin_unlock_irqrestore(&rq->lock, flags); in sched_group_set_shares()
8207 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) in get_rr_interval_fair() argument
8216 if (rq->cfs.load.weight) in get_rr_interval_fair()