Lines Matching refs:rq

114 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
116 static void update_rq_clock_task(struct rq *rq, s64 delta);
118 void update_rq_clock(struct rq *rq) in update_rq_clock() argument
122 lockdep_assert_held(&rq->lock); in update_rq_clock()
124 if (rq->clock_skip_update & RQCF_ACT_SKIP) in update_rq_clock()
127 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
130 rq->clock += delta; in update_rq_clock()
131 update_rq_clock_task(rq, delta); in update_rq_clock()
315 static struct rq *this_rq_lock(void) in this_rq_lock()
316 __acquires(rq->lock) in this_rq_lock()
318 struct rq *rq; in this_rq_lock() local
321 rq = this_rq(); in this_rq_lock()
322 raw_spin_lock(&rq->lock); in this_rq_lock()
324 return rq; in this_rq_lock()
332 static void hrtick_clear(struct rq *rq) in hrtick_clear() argument
334 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
335 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
344 struct rq *rq = container_of(timer, struct rq, hrtick_timer); in hrtick() local
346 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick()
348 raw_spin_lock(&rq->lock); in hrtick()
349 update_rq_clock(rq); in hrtick()
350 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
351 raw_spin_unlock(&rq->lock); in hrtick()
358 static int __hrtick_restart(struct rq *rq) in __hrtick_restart() argument
360 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
371 struct rq *rq = arg; in __hrtick_start() local
373 raw_spin_lock(&rq->lock); in __hrtick_start()
374 __hrtick_restart(rq); in __hrtick_start()
375 rq->hrtick_csd_pending = 0; in __hrtick_start()
376 raw_spin_unlock(&rq->lock); in __hrtick_start()
384 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
386 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
399 if (rq == this_rq()) { in hrtick_start()
400 __hrtick_restart(rq); in hrtick_start()
401 } else if (!rq->hrtick_csd_pending) { in hrtick_start()
402 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
403 rq->hrtick_csd_pending = 1; in hrtick_start()
436 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
443 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, in hrtick_start()
452 static void init_rq_hrtick(struct rq *rq) in init_rq_hrtick() argument
455 rq->hrtick_csd_pending = 0; in init_rq_hrtick()
457 rq->hrtick_csd.flags = 0; in init_rq_hrtick()
458 rq->hrtick_csd.func = __hrtick_start; in init_rq_hrtick()
459 rq->hrtick_csd.info = rq; in init_rq_hrtick()
462 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in init_rq_hrtick()
463 rq->hrtick_timer.function = hrtick; in init_rq_hrtick()
466 static inline void hrtick_clear(struct rq *rq) in hrtick_clear() argument
470 static inline void init_rq_hrtick(struct rq *rq) in init_rq_hrtick() argument
551 void resched_curr(struct rq *rq) in resched_curr() argument
553 struct task_struct *curr = rq->curr; in resched_curr()
556 lockdep_assert_held(&rq->lock); in resched_curr()
561 cpu = cpu_of(rq); in resched_curr()
577 struct rq *rq = cpu_rq(cpu); in resched_cpu() local
580 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) in resched_cpu()
582 resched_curr(rq); in resched_cpu()
583 raw_spin_unlock_irqrestore(&rq->lock, flags); in resched_cpu()
630 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() local
635 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu()
724 void sched_avg_update(struct rq *rq) in sched_avg_update() argument
728 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { in sched_avg_update()
734 asm("" : "+rm" (rq->age_stamp)); in sched_avg_update()
735 rq->age_stamp += period; in sched_avg_update()
736 rq->rt_avg /= 2; in sched_avg_update()
805 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
807 update_rq_clock(rq); in enqueue_task()
808 sched_info_queued(rq, p); in enqueue_task()
809 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
812 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
814 update_rq_clock(rq); in dequeue_task()
815 sched_info_dequeued(rq, p); in dequeue_task()
816 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
819 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
822 rq->nr_uninterruptible--; in activate_task()
824 enqueue_task(rq, p, flags); in activate_task()
827 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
830 rq->nr_uninterruptible++; in deactivate_task()
832 dequeue_task(rq, p, flags); in deactivate_task()
835 static void update_rq_clock_task(struct rq *rq, s64 delta) in update_rq_clock_task() argument
845 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
865 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
870 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task()
871 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
876 rq->prev_steal_time_rq += steal; in update_rq_clock_task()
881 rq->clock_task += delta; in update_rq_clock_task()
885 sched_rt_avg_update(rq, irq_delta + steal); in update_rq_clock_task()
981 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
987 prev_class->switched_from(rq, p); in check_class_changed()
989 p->sched_class->switched_to(rq, p); in check_class_changed()
991 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
994 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
998 if (p->sched_class == rq->curr->sched_class) { in check_preempt_curr()
999 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
1002 if (class == rq->curr->sched_class) in check_preempt_curr()
1005 resched_curr(rq); in check_preempt_curr()
1015 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr()
1016 rq_clock_skip_update(rq, true); in check_preempt_curr()
1061 struct rq *src_rq, *dst_rq; in __migrate_swap_task()
1088 struct rq *src_rq, *dst_rq; in migrate_swap_stop()
1188 struct rq *rq; in wait_task_inactive() local
1197 rq = task_rq(p); in wait_task_inactive()
1210 while (task_running(rq, p)) { in wait_task_inactive()
1221 rq = task_rq_lock(p, &flags); in wait_task_inactive()
1223 running = task_running(rq, p); in wait_task_inactive()
1228 task_rq_unlock(rq, p, &flags); in wait_task_inactive()
1412 struct rq *rq = this_rq(); in ttwu_stat() local
1418 schedstat_inc(rq, ttwu_local); in ttwu_stat()
1439 schedstat_inc(rq, ttwu_count); in ttwu_stat()
1448 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) in ttwu_activate() argument
1450 activate_task(rq, p, en_flags); in ttwu_activate()
1455 wq_worker_waking_up(p, cpu_of(rq)); in ttwu_activate()
1462 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in ttwu_do_wakeup() argument
1464 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
1470 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
1472 if (rq->idle_stamp) { in ttwu_do_wakeup()
1473 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_wakeup()
1474 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_wakeup()
1476 update_avg(&rq->avg_idle, delta); in ttwu_do_wakeup()
1478 if (rq->avg_idle > max) in ttwu_do_wakeup()
1479 rq->avg_idle = max; in ttwu_do_wakeup()
1481 rq->idle_stamp = 0; in ttwu_do_wakeup()
1487 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) in ttwu_do_activate() argument
1491 rq->nr_uninterruptible--; in ttwu_do_activate()
1494 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); in ttwu_do_activate()
1495 ttwu_do_wakeup(rq, p, wake_flags); in ttwu_do_activate()
1506 struct rq *rq; in ttwu_remote() local
1509 rq = __task_rq_lock(p); in ttwu_remote()
1512 update_rq_clock(rq); in ttwu_remote()
1513 ttwu_do_wakeup(rq, p, wake_flags); in ttwu_remote()
1516 __task_rq_unlock(rq); in ttwu_remote()
1524 struct rq *rq = this_rq(); in sched_ttwu_pending() local
1525 struct llist_node *llist = llist_del_all(&rq->wake_list); in sched_ttwu_pending()
1532 raw_spin_lock_irqsave(&rq->lock, flags); in sched_ttwu_pending()
1537 ttwu_do_activate(rq, p, 0); in sched_ttwu_pending()
1540 raw_spin_unlock_irqrestore(&rq->lock, flags); in sched_ttwu_pending()
1583 struct rq *rq = cpu_rq(cpu); in ttwu_queue_remote() local
1586 if (!set_nr_if_polling(rq->idle)) in ttwu_queue_remote()
1595 struct rq *rq = cpu_rq(cpu); in wake_up_if_idle() local
1600 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle()
1603 if (set_nr_if_polling(rq->idle)) { in wake_up_if_idle()
1606 raw_spin_lock_irqsave(&rq->lock, flags); in wake_up_if_idle()
1607 if (is_idle_task(rq->curr)) in wake_up_if_idle()
1610 raw_spin_unlock_irqrestore(&rq->lock, flags); in wake_up_if_idle()
1625 struct rq *rq = cpu_rq(cpu); in ttwu_queue() local
1635 raw_spin_lock(&rq->lock); in ttwu_queue()
1636 ttwu_do_activate(rq, p, 0); in ttwu_queue()
1637 raw_spin_unlock(&rq->lock); in ttwu_queue()
1722 struct rq *rq = task_rq(p); in try_to_wake_up_local() local
1724 if (WARN_ON_ONCE(rq != this_rq()) || in try_to_wake_up_local()
1728 lockdep_assert_held(&rq->lock); in try_to_wake_up_local()
1731 raw_spin_unlock(&rq->lock); in try_to_wake_up_local()
1733 raw_spin_lock(&rq->lock); in try_to_wake_up_local()
1740 ttwu_activate(rq, p, ENQUEUE_WAKEUP); in try_to_wake_up_local()
1742 ttwu_do_wakeup(rq, p, 0); in try_to_wake_up_local()
2080 struct rq *rq; in wake_up_new_task() local
2094 rq = __task_rq_lock(p); in wake_up_new_task()
2095 activate_task(rq, p, 0); in wake_up_new_task()
2098 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
2101 p->sched_class->task_woken(rq, p); in wake_up_new_task()
2103 task_rq_unlock(rq, p, &flags); in wake_up_new_task()
2176 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument
2180 sched_info_switch(rq, prev, next); in prepare_task_switch()
2183 prepare_lock_switch(rq, next); in prepare_task_switch()
2206 static struct rq *finish_task_switch(struct task_struct *prev) in finish_task_switch()
2207 __releases(rq->lock) in finish_task_switch()
2209 struct rq *rq = this_rq(); in finish_task_switch() local
2210 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
2213 rq->prev_mm = NULL; in finish_task_switch()
2230 finish_lock_switch(rq, prev); in finish_task_switch()
2249 return rq; in finish_task_switch()
2255 static inline void post_schedule(struct rq *rq) in post_schedule() argument
2257 if (rq->post_schedule) { in post_schedule()
2260 raw_spin_lock_irqsave(&rq->lock, flags); in post_schedule()
2261 if (rq->curr->sched_class->post_schedule) in post_schedule()
2262 rq->curr->sched_class->post_schedule(rq); in post_schedule()
2263 raw_spin_unlock_irqrestore(&rq->lock, flags); in post_schedule()
2265 rq->post_schedule = 0; in post_schedule()
2271 static inline void post_schedule(struct rq *rq) in post_schedule() argument
2282 __releases(rq->lock) in schedule_tail()
2284 struct rq *rq; in schedule_tail() local
2288 rq = finish_task_switch(prev); in schedule_tail()
2289 post_schedule(rq); in schedule_tail()
2299 static inline struct rq *
2300 context_switch(struct rq *rq, struct task_struct *prev, in context_switch() argument
2305 prepare_task_switch(rq, prev, next); in context_switch()
2325 rq->prev_mm = oldmm; in context_switch()
2333 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); in context_switch()
2401 struct rq *this = cpu_rq(cpu); in nr_iowait_cpu()
2407 struct rq *this = this_rq(); in get_iowait_load()
2456 struct rq *rq; in task_sched_runtime() local
2475 rq = task_rq_lock(p, &flags); in task_sched_runtime()
2481 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
2482 update_rq_clock(rq); in task_sched_runtime()
2483 p->sched_class->update_curr(rq); in task_sched_runtime()
2486 task_rq_unlock(rq, p, &flags); in task_sched_runtime()
2498 struct rq *rq = cpu_rq(cpu); in scheduler_tick() local
2499 struct task_struct *curr = rq->curr; in scheduler_tick()
2503 raw_spin_lock(&rq->lock); in scheduler_tick()
2504 update_rq_clock(rq); in scheduler_tick()
2505 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
2506 update_cpu_load_active(rq); in scheduler_tick()
2507 raw_spin_unlock(&rq->lock); in scheduler_tick()
2512 rq->idle_balance = idle_cpu(cpu); in scheduler_tick()
2513 trigger_load_balance(rq); in scheduler_tick()
2515 rq_last_tick_reset(rq); in scheduler_tick()
2534 struct rq *rq = this_rq(); in scheduler_tick_max_deferment() local
2537 next = rq->last_sched_tick + HZ; in scheduler_tick_max_deferment()
2664 pick_next_task(struct rq *rq, struct task_struct *prev) in pick_next_task() argument
2674 rq->nr_running == rq->cfs.h_nr_running)) { in pick_next_task()
2675 p = fair_sched_class.pick_next_task(rq, prev); in pick_next_task()
2681 p = idle_sched_class.pick_next_task(rq, prev); in pick_next_task()
2688 p = class->pick_next_task(rq, prev); in pick_next_task()
2744 struct rq *rq; in __schedule() local
2749 rq = cpu_rq(cpu); in __schedule()
2751 prev = rq->curr; in __schedule()
2756 hrtick_clear(rq); in __schedule()
2764 raw_spin_lock_irq(&rq->lock); in __schedule()
2766 rq->clock_skip_update <<= 1; /* promote REQ to ACT */ in __schedule()
2773 deactivate_task(rq, prev, DEQUEUE_SLEEP); in __schedule()
2793 update_rq_clock(rq); in __schedule()
2795 next = pick_next_task(rq, prev); in __schedule()
2798 rq->clock_skip_update = 0; in __schedule()
2801 rq->nr_switches++; in __schedule()
2802 rq->curr = next; in __schedule()
2805 rq = context_switch(rq, prev, next); /* unlocks the rq */ in __schedule()
2806 cpu = cpu_of(rq); in __schedule()
2808 raw_spin_unlock_irq(&rq->lock); in __schedule()
2810 post_schedule(rq); in __schedule()
3001 struct rq *rq; in rt_mutex_setprio() local
3006 rq = __task_rq_lock(p); in rt_mutex_setprio()
3020 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
3021 WARN_ON(p != rq->curr); in rt_mutex_setprio()
3030 running = task_current(rq, p); in rt_mutex_setprio()
3032 dequeue_task(rq, p, 0); in rt_mutex_setprio()
3034 put_prev_task(rq, p); in rt_mutex_setprio()
3072 p->sched_class->set_curr_task(rq); in rt_mutex_setprio()
3074 enqueue_task(rq, p, enqueue_flag); in rt_mutex_setprio()
3076 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
3078 __task_rq_unlock(rq); in rt_mutex_setprio()
3086 struct rq *rq; in set_user_nice() local
3094 rq = task_rq_lock(p, &flags); in set_user_nice()
3107 dequeue_task(rq, p, 0); in set_user_nice()
3116 enqueue_task(rq, p, 0); in set_user_nice()
3121 if (delta < 0 || (delta > 0 && task_running(rq, p))) in set_user_nice()
3122 resched_curr(rq); in set_user_nice()
3125 task_rq_unlock(rq, p, &flags); in set_user_nice()
3199 struct rq *rq = cpu_rq(cpu); in idle_cpu() local
3201 if (rq->curr != rq->idle) in idle_cpu()
3204 if (rq->nr_running) in idle_cpu()
3208 if (!llist_empty(&rq->wake_list)) in idle_cpu()
3309 static void __setscheduler(struct rq *rq, struct task_struct *p, in __setscheduler() argument
3424 struct rq *rq; in __sched_setscheduler() local
3523 rq = task_rq_lock(p, &flags); in __sched_setscheduler()
3528 if (p == rq->stop) { in __sched_setscheduler()
3529 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3546 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3560 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3566 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
3574 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
3575 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3585 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3595 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3612 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
3617 running = task_current(rq, p); in __sched_setscheduler()
3619 dequeue_task(rq, p, 0); in __sched_setscheduler()
3621 put_prev_task(rq, p); in __sched_setscheduler()
3624 __setscheduler(rq, p, attr, true); in __sched_setscheduler()
3627 p->sched_class->set_curr_task(rq); in __sched_setscheduler()
3633 enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); in __sched_setscheduler()
3636 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
3637 task_rq_unlock(rq, p, &flags); in __sched_setscheduler()
4214 struct rq *rq = this_rq_lock(); in SYSCALL_DEFINE0() local
4216 schedstat_inc(rq, yld_count); in SYSCALL_DEFINE0()
4217 current->sched_class->yield_task(rq); in SYSCALL_DEFINE0()
4223 __release(rq->lock); in SYSCALL_DEFINE0()
4224 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); in SYSCALL_DEFINE0()
4225 do_raw_spin_unlock(&rq->lock); in SYSCALL_DEFINE0()
4332 struct rq *rq, *p_rq; in yield_to() local
4337 rq = this_rq(); in yield_to()
4345 if (rq->nr_running == 1 && p_rq->nr_running == 1) { in yield_to()
4350 double_rq_lock(rq, p_rq); in yield_to()
4352 double_rq_unlock(rq, p_rq); in yield_to()
4365 yielded = curr->sched_class->yield_to_task(rq, p, preempt); in yield_to()
4367 schedstat_inc(rq, yld_count); in yield_to()
4372 if (preempt && rq != p_rq) in yield_to()
4377 double_rq_unlock(rq, p_rq); in yield_to()
4395 struct rq *rq; in io_schedule_timeout() local
4402 rq = raw_rq(); in io_schedule_timeout()
4403 atomic_inc(&rq->nr_iowait); in io_schedule_timeout()
4406 atomic_dec(&rq->nr_iowait); in io_schedule_timeout()
4483 struct rq *rq; in SYSCALL_DEFINE2() local
4500 rq = task_rq_lock(p, &flags); in SYSCALL_DEFINE2()
4503 time_slice = p->sched_class->get_rr_interval(rq, p); in SYSCALL_DEFINE2()
4504 task_rq_unlock(rq, p, &flags); in SYSCALL_DEFINE2()
4605 struct rq *rq = cpu_rq(cpu); in init_idle() local
4608 raw_spin_lock_irqsave(&rq->lock, flags); in init_idle()
4629 rq->curr = rq->idle = idle; in init_idle()
4634 raw_spin_unlock_irqrestore(&rq->lock, flags); in init_idle()
4734 static struct rq *move_queued_task(struct task_struct *p, int new_cpu) in move_queued_task()
4736 struct rq *rq = task_rq(p); in move_queued_task() local
4738 lockdep_assert_held(&rq->lock); in move_queued_task()
4740 dequeue_task(rq, p, 0); in move_queued_task()
4743 raw_spin_unlock(&rq->lock); in move_queued_task()
4745 rq = cpu_rq(new_cpu); in move_queued_task()
4747 raw_spin_lock(&rq->lock); in move_queued_task()
4750 enqueue_task(rq, p, 0); in move_queued_task()
4751 check_preempt_curr(rq, p, 0); in move_queued_task()
4753 return rq; in move_queued_task()
4791 struct rq *rq; in set_cpus_allowed_ptr() local
4795 rq = task_rq_lock(p, &flags); in set_cpus_allowed_ptr()
4812 if (task_running(rq, p) || p->state == TASK_WAKING) { in set_cpus_allowed_ptr()
4815 task_rq_unlock(rq, p, &flags); in set_cpus_allowed_ptr()
4816 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); in set_cpus_allowed_ptr()
4820 rq = move_queued_task(p, dest_cpu); in set_cpus_allowed_ptr()
4822 task_rq_unlock(rq, p, &flags); in set_cpus_allowed_ptr()
4841 struct rq *rq; in __migrate_task() local
4847 rq = cpu_rq(src_cpu); in __migrate_task()
4850 raw_spin_lock(&rq->lock); in __migrate_task()
4864 rq = move_queued_task(p, dest_cpu); in __migrate_task()
4868 raw_spin_unlock(&rq->lock); in __migrate_task()
4898 struct rq *rq; in sched_setnuma() local
4902 rq = task_rq_lock(p, &flags); in sched_setnuma()
4904 running = task_current(rq, p); in sched_setnuma()
4907 dequeue_task(rq, p, 0); in sched_setnuma()
4909 put_prev_task(rq, p); in sched_setnuma()
4914 p->sched_class->set_curr_task(rq); in sched_setnuma()
4916 enqueue_task(rq, p, 0); in sched_setnuma()
4917 task_rq_unlock(rq, p, &flags); in sched_setnuma()
4972 static void calc_load_migrate(struct rq *rq) in calc_load_migrate() argument
4974 long delta = calc_load_fold_active(rq); in calc_load_migrate()
4979 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) in put_prev_task_fake() argument
5005 struct rq *rq = cpu_rq(dead_cpu); in migrate_tasks() local
5006 struct task_struct *next, *stop = rq->stop; in migrate_tasks()
5018 rq->stop = NULL; in migrate_tasks()
5025 update_rq_clock(rq); in migrate_tasks()
5032 if (rq->nr_running == 1) in migrate_tasks()
5035 next = pick_next_task(rq, &fake_task); in migrate_tasks()
5037 next->sched_class->put_prev_task(rq, next); in migrate_tasks()
5041 raw_spin_unlock(&rq->lock); in migrate_tasks()
5045 raw_spin_lock(&rq->lock); in migrate_tasks()
5048 rq->stop = stop; in migrate_tasks()
5231 static void set_rq_online(struct rq *rq) in set_rq_online() argument
5233 if (!rq->online) { in set_rq_online()
5236 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
5237 rq->online = 1; in set_rq_online()
5241 class->rq_online(rq); in set_rq_online()
5246 static void set_rq_offline(struct rq *rq) in set_rq_offline() argument
5248 if (rq->online) { in set_rq_offline()
5253 class->rq_offline(rq); in set_rq_offline()
5256 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
5257 rq->online = 0; in set_rq_offline()
5270 struct rq *rq = cpu_rq(cpu); in migration_call() local
5275 rq->calc_load_update = calc_load_update; in migration_call()
5280 raw_spin_lock_irqsave(&rq->lock, flags); in migration_call()
5281 if (rq->rd) { in migration_call()
5282 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in migration_call()
5284 set_rq_online(rq); in migration_call()
5286 raw_spin_unlock_irqrestore(&rq->lock, flags); in migration_call()
5293 raw_spin_lock_irqsave(&rq->lock, flags); in migration_call()
5294 if (rq->rd) { in migration_call()
5295 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in migration_call()
5296 set_rq_offline(rq); in migration_call()
5299 BUG_ON(rq->nr_running != 1); /* the migration thread */ in migration_call()
5300 raw_spin_unlock_irqrestore(&rq->lock, flags); in migration_call()
5304 calc_load_migrate(rq); in migration_call()
5327 struct rq *rq = cpu_rq(cpu); in set_cpu_rq_start_time() local
5328 rq->age_stamp = sched_clock_cpu(cpu); in set_cpu_rq_start_time()
5578 static void rq_attach_root(struct rq *rq, struct root_domain *rd) in rq_attach_root() argument
5583 raw_spin_lock_irqsave(&rq->lock, flags); in rq_attach_root()
5585 if (rq->rd) { in rq_attach_root()
5586 old_rd = rq->rd; in rq_attach_root()
5588 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
5589 set_rq_offline(rq); in rq_attach_root()
5591 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
5603 rq->rd = rd; in rq_attach_root()
5605 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
5606 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
5607 set_rq_online(rq); in rq_attach_root()
5609 raw_spin_unlock_irqrestore(&rq->lock, flags); in rq_attach_root()
5773 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain() local
5808 rq_attach_root(rq, rd); in cpu_attach_domain()
5809 tmp = rq->sd; in cpu_attach_domain()
5810 rcu_assign_pointer(rq->sd, sd); in cpu_attach_domain()
7167 struct rq *rq; in sched_init() local
7169 rq = cpu_rq(i); in sched_init()
7170 raw_spin_lock_init(&rq->lock); in sched_init()
7171 rq->nr_running = 0; in sched_init()
7172 rq->calc_load_active = 0; in sched_init()
7173 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
7174 init_cfs_rq(&rq->cfs); in sched_init()
7175 init_rt_rq(&rq->rt); in sched_init()
7176 init_dl_rq(&rq->dl); in sched_init()
7179 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
7200 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
7203 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; in sched_init()
7205 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
7209 rq->cpu_load[j] = 0; in sched_init()
7211 rq->last_load_update_tick = jiffies; in sched_init()
7214 rq->sd = NULL; in sched_init()
7215 rq->rd = NULL; in sched_init()
7216 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()
7217 rq->post_schedule = 0; in sched_init()
7218 rq->active_balance = 0; in sched_init()
7219 rq->next_balance = jiffies; in sched_init()
7220 rq->push_cpu = 0; in sched_init()
7221 rq->cpu = i; in sched_init()
7222 rq->online = 0; in sched_init()
7223 rq->idle_stamp = 0; in sched_init()
7224 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
7225 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
7227 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
7229 rq_attach_root(rq, &def_root_domain); in sched_init()
7231 rq->nohz_flags = 0; in sched_init()
7234 rq->last_sched_tick = 0; in sched_init()
7237 init_rq_hrtick(rq); in sched_init()
7238 atomic_set(&rq->nr_iowait, 0); in sched_init()
7347 static void normalize_task(struct rq *rq, struct task_struct *p) in normalize_task() argument
7358 dequeue_task(rq, p, 0); in normalize_task()
7359 __setscheduler(rq, p, &attr, false); in normalize_task()
7361 enqueue_task(rq, p, 0); in normalize_task()
7362 resched_curr(rq); in normalize_task()
7365 check_class_changed(rq, p, prev_class, old_prio); in normalize_task()
7372 struct rq *rq; in normalize_rt_tasks() local
7399 rq = task_rq_lock(p, &flags); in normalize_rt_tasks()
7400 normalize_task(rq, p); in normalize_rt_tasks()
7401 task_rq_unlock(rq, p, &flags); in normalize_rt_tasks()
7545 struct rq *rq; in sched_move_task() local
7547 rq = task_rq_lock(tsk, &flags); in sched_move_task()
7549 running = task_current(rq, tsk); in sched_move_task()
7553 dequeue_task(rq, tsk, 0); in sched_move_task()
7555 put_prev_task(rq, tsk); in sched_move_task()
7575 tsk->sched_class->set_curr_task(rq); in sched_move_task()
7577 enqueue_task(rq, tsk, 0); in sched_move_task()
7579 task_rq_unlock(rq, tsk, &flags); in sched_move_task()
8131 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() local
8133 raw_spin_lock_irq(&rq->lock); in tg_set_cfs_bandwidth()
8139 raw_spin_unlock_irq(&rq->lock); in tg_set_cfs_bandwidth()