Lines Matching refs:rq
119 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
121 return rt_rq->rq; in rq_of_rt_rq()
129 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
133 return rt_rq->rq; in rq_of_rt_se()
158 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
162 rt_rq->rq = rq; in init_tg_rt_entry()
172 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
230 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
232 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
235 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
244 struct rq *rq = rq_of_rt_se(rt_se); in rt_rq_of_se() local
246 return &rq->rt; in rt_rq_of_se()
259 static int pull_rt_task(struct rq *this_rq);
261 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) in need_pull_rt_task() argument
264 return rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task()
267 static inline int rt_overloaded(struct rq *rq) in rt_overloaded() argument
269 return atomic_read(&rq->rd->rto_count); in rt_overloaded()
272 static inline void rt_set_overload(struct rq *rq) in rt_set_overload() argument
274 if (!rq->online) in rt_set_overload()
277 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload()
288 atomic_inc(&rq->rd->rto_count); in rt_set_overload()
291 static inline void rt_clear_overload(struct rq *rq) in rt_clear_overload() argument
293 if (!rq->online) in rt_clear_overload()
297 atomic_dec(&rq->rd->rto_count); in rt_clear_overload()
298 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload()
348 static inline int has_pushable_tasks(struct rq *rq) in has_pushable_tasks() argument
350 return !plist_head_empty(&rq->rt.pushable_tasks); in has_pushable_tasks()
353 static inline void set_post_schedule(struct rq *rq) in set_post_schedule() argument
359 rq->post_schedule = has_pushable_tasks(rq); in set_post_schedule()
362 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
364 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
366 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
369 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task()
370 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task()
373 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
375 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task()
378 if (has_pushable_tasks(rq)) { in dequeue_pushable_task()
379 p = plist_first_entry(&rq->rt.pushable_tasks, in dequeue_pushable_task()
381 rq->rt.highest_prio.next = p->prio; in dequeue_pushable_task()
383 rq->rt.highest_prio.next = MAX_RT_PRIO; in dequeue_pushable_task()
388 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
392 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
406 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) in need_pull_rt_task() argument
411 static inline int pull_rt_task(struct rq *this_rq) in pull_rt_task()
416 static inline void set_post_schedule(struct rq *rq) in set_post_schedule() argument
459 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
462 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
478 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
481 int cpu = cpu_of(rq); in sched_rt_rq_enqueue()
492 resched_curr(rq); in sched_rt_rq_enqueue()
563 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
564 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
576 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
582 resched_curr(rq); in sched_rt_rq_enqueue()
680 static void __disable_runtime(struct rq *rq) in __disable_runtime() argument
682 struct root_domain *rd = rq->rd; in __disable_runtime()
689 for_each_rt_rq(rt_rq, iter, rq) { in __disable_runtime()
762 static void __enable_runtime(struct rq *rq) in __enable_runtime() argument
773 for_each_rt_rq(rt_rq, iter, rq) { in __enable_runtime()
830 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer() local
832 raw_spin_lock(&rq->lock); in do_sched_rt_period_timer()
852 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
853 rq_clock_skip_update(rq, false); in do_sched_rt_period_timer()
868 raw_spin_unlock(&rq->lock); in do_sched_rt_period_timer()
936 static void update_curr_rt(struct rq *rq) in update_curr_rt() argument
938 struct task_struct *curr = rq->curr; in update_curr_rt()
945 delta_exec = rq_clock_task(rq) - curr->se.exec_start; in update_curr_rt()
955 curr->se.exec_start = rq_clock_task(rq); in update_curr_rt()
958 sched_rt_avg_update(rq, delta_exec); in update_curr_rt()
970 resched_curr(rq); in update_curr_rt()
979 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq() local
981 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
986 BUG_ON(!rq->nr_running); in dequeue_top_rt_rq()
988 sub_nr_running(rq, rt_rq->rt_nr_running); in dequeue_top_rt_rq()
995 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq() local
997 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1004 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1013 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_prio_smp() local
1019 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1022 if (rq->online && prio < prev_prio) in inc_rt_prio_smp()
1023 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); in inc_rt_prio_smp()
1029 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_prio_smp() local
1035 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1038 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1039 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1226 struct rq *rq = rq_of_rt_se(rt_se); in enqueue_rt_entity() local
1231 enqueue_top_rt_rq(&rq->rt); in enqueue_rt_entity()
1236 struct rq *rq = rq_of_rt_se(rt_se); in dequeue_rt_entity() local
1246 enqueue_top_rt_rq(&rq->rt); in dequeue_rt_entity()
1253 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_rt() argument
1262 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_rt()
1263 enqueue_pushable_task(rq, p); in enqueue_task_rt()
1266 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_rt() argument
1270 update_curr_rt(rq); in dequeue_task_rt()
1273 dequeue_pushable_task(rq, p); in dequeue_task_rt()
1294 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) in requeue_task_rt() argument
1305 static void yield_task_rt(struct rq *rq) in yield_task_rt() argument
1307 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
1317 struct rq *rq; in select_task_rq_rt() local
1323 rq = cpu_rq(cpu); in select_task_rq_rt()
1326 curr = ACCESS_ONCE(rq->curr); /* unlocked access */ in select_task_rq_rt()
1369 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) in check_preempt_equal_prio() argument
1375 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_prio()
1376 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) in check_preempt_equal_prio()
1384 && cpupri_find(&rq->rd->cpupri, p, NULL)) in check_preempt_equal_prio()
1392 requeue_task_rt(rq, p, 1); in check_preempt_equal_prio()
1393 resched_curr(rq); in check_preempt_equal_prio()
1401 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_rt() argument
1403 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
1404 resched_curr(rq); in check_preempt_curr_rt()
1421 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) in check_preempt_curr_rt()
1422 check_preempt_equal_prio(rq, p); in check_preempt_curr_rt()
1426 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, in pick_next_rt_entity() argument
1443 static struct task_struct *_pick_next_task_rt(struct rq *rq) in _pick_next_task_rt() argument
1447 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt()
1450 rt_se = pick_next_rt_entity(rq, rt_rq); in _pick_next_task_rt()
1456 p->se.exec_start = rq_clock_task(rq); in _pick_next_task_rt()
1462 pick_next_task_rt(struct rq *rq, struct task_struct *prev) in pick_next_task_rt() argument
1465 struct rt_rq *rt_rq = &rq->rt; in pick_next_task_rt()
1467 if (need_pull_rt_task(rq, prev)) { in pick_next_task_rt()
1468 pull_rt_task(rq); in pick_next_task_rt()
1474 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) || in pick_next_task_rt()
1475 rq->dl.dl_nr_running)) in pick_next_task_rt()
1484 update_curr_rt(rq); in pick_next_task_rt()
1489 put_prev_task(rq, prev); in pick_next_task_rt()
1491 p = _pick_next_task_rt(rq); in pick_next_task_rt()
1494 dequeue_pushable_task(rq, p); in pick_next_task_rt()
1496 set_post_schedule(rq); in pick_next_task_rt()
1501 static void put_prev_task_rt(struct rq *rq, struct task_struct *p) in put_prev_task_rt() argument
1503 update_curr_rt(rq); in put_prev_task_rt()
1510 enqueue_pushable_task(rq, p); in put_prev_task_rt()
1518 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) in pick_rt_task() argument
1520 if (!task_running(rq, p) && in pick_rt_task()
1530 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) in pick_highest_pushable_task() argument
1532 struct plist_head *head = &rq->rt.pushable_tasks; in pick_highest_pushable_task()
1535 if (!has_pushable_tasks(rq)) in pick_highest_pushable_task()
1539 if (pick_rt_task(rq, p, cpu)) in pick_highest_pushable_task()
1623 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) in find_lock_lowest_rq() argument
1625 struct rq *lowest_rq = NULL; in find_lock_lowest_rq()
1632 if ((cpu == -1) || (cpu == rq->cpu)) in find_lock_lowest_rq()
1648 if (double_lock_balance(rq, lowest_rq)) { in find_lock_lowest_rq()
1655 if (unlikely(task_rq(task) != rq || in find_lock_lowest_rq()
1658 task_running(rq, task) || in find_lock_lowest_rq()
1661 double_unlock_balance(rq, lowest_rq); in find_lock_lowest_rq()
1672 double_unlock_balance(rq, lowest_rq); in find_lock_lowest_rq()
1679 static struct task_struct *pick_next_pushable_task(struct rq *rq) in pick_next_pushable_task() argument
1683 if (!has_pushable_tasks(rq)) in pick_next_pushable_task()
1686 p = plist_first_entry(&rq->rt.pushable_tasks, in pick_next_pushable_task()
1689 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
1690 BUG_ON(task_current(rq, p)); in pick_next_pushable_task()
1704 static int push_rt_task(struct rq *rq) in push_rt_task() argument
1707 struct rq *lowest_rq; in push_rt_task()
1710 if (!rq->rt.overloaded) in push_rt_task()
1713 next_task = pick_next_pushable_task(rq); in push_rt_task()
1718 if (unlikely(next_task == rq->curr)) { in push_rt_task()
1728 if (unlikely(next_task->prio < rq->curr->prio)) { in push_rt_task()
1729 resched_curr(rq); in push_rt_task()
1737 lowest_rq = find_lock_lowest_rq(next_task, rq); in push_rt_task()
1748 task = pick_next_pushable_task(rq); in push_rt_task()
1749 if (task_cpu(next_task) == rq->cpu && task == next_task) { in push_rt_task()
1771 deactivate_task(rq, next_task, 0); in push_rt_task()
1778 double_unlock_balance(rq, lowest_rq); in push_rt_task()
1786 static void push_rt_tasks(struct rq *rq) in push_rt_tasks() argument
1789 while (push_rt_task(rq)) in push_rt_tasks()
1803 static int rto_next_cpu(struct rq *rq) in rto_next_cpu() argument
1805 int prev_cpu = rq->rt.push_cpu; in rto_next_cpu()
1808 cpu = cpumask_next(prev_cpu, rq->rd->rto_mask); in rto_next_cpu()
1815 if (prev_cpu < rq->cpu) { in rto_next_cpu()
1816 if (cpu >= rq->cpu) in rto_next_cpu()
1825 cpu = cpumask_first(rq->rd->rto_mask); in rto_next_cpu()
1826 if (cpu >= rq->cpu) in rto_next_cpu()
1829 rq->rt.push_cpu = cpu; in rto_next_cpu()
1835 static int find_next_push_cpu(struct rq *rq) in find_next_push_cpu() argument
1837 struct rq *next_rq; in find_next_push_cpu()
1841 cpu = rto_next_cpu(rq); in find_next_push_cpu()
1847 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr) in find_next_push_cpu()
1857 static void tell_cpu_to_push(struct rq *rq) in tell_cpu_to_push() argument
1861 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { in tell_cpu_to_push()
1862 raw_spin_lock(&rq->rt.push_lock); in tell_cpu_to_push()
1864 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { in tell_cpu_to_push()
1869 rq->rt.push_flags |= RT_PUSH_IPI_RESTART; in tell_cpu_to_push()
1870 raw_spin_unlock(&rq->rt.push_lock); in tell_cpu_to_push()
1873 raw_spin_unlock(&rq->rt.push_lock); in tell_cpu_to_push()
1878 rq->rt.push_cpu = rq->cpu; in tell_cpu_to_push()
1879 cpu = find_next_push_cpu(rq); in tell_cpu_to_push()
1883 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING; in tell_cpu_to_push()
1885 irq_work_queue_on(&rq->rt.push_work, cpu); in tell_cpu_to_push()
1892 struct rq *rq, *src_rq; in try_to_push_tasks() local
1901 rq = cpu_rq(this_cpu); in try_to_push_tasks()
1905 if (has_pushable_tasks(rq)) { in try_to_push_tasks()
1906 raw_spin_lock(&rq->lock); in try_to_push_tasks()
1907 push_rt_task(rq); in try_to_push_tasks()
1908 raw_spin_unlock(&rq->lock); in try_to_push_tasks()
1936 if (unlikely(cpu == rq->cpu)) in try_to_push_tasks()
1951 static int pull_rt_task(struct rq *this_rq) in pull_rt_task()
1955 struct rq *src_rq; in pull_rt_task()
2041 static void post_schedule_rt(struct rq *rq) in post_schedule_rt() argument
2043 push_rt_tasks(rq); in post_schedule_rt()
2050 static void task_woken_rt(struct rq *rq, struct task_struct *p) in task_woken_rt() argument
2052 if (!task_running(rq, p) && in task_woken_rt()
2053 !test_tsk_need_resched(rq->curr) && in task_woken_rt()
2054 has_pushable_tasks(rq) && in task_woken_rt()
2056 (dl_task(rq->curr) || rt_task(rq->curr)) && in task_woken_rt()
2057 (rq->curr->nr_cpus_allowed < 2 || in task_woken_rt()
2058 rq->curr->prio <= p->prio)) in task_woken_rt()
2059 push_rt_tasks(rq); in task_woken_rt()
2065 struct rq *rq; in set_cpus_allowed_rt() local
2082 rq = task_rq(p); in set_cpus_allowed_rt()
2088 if (!task_current(rq, p)) in set_cpus_allowed_rt()
2089 dequeue_pushable_task(rq, p); in set_cpus_allowed_rt()
2090 BUG_ON(!rq->rt.rt_nr_migratory); in set_cpus_allowed_rt()
2091 rq->rt.rt_nr_migratory--; in set_cpus_allowed_rt()
2093 if (!task_current(rq, p)) in set_cpus_allowed_rt()
2094 enqueue_pushable_task(rq, p); in set_cpus_allowed_rt()
2095 rq->rt.rt_nr_migratory++; in set_cpus_allowed_rt()
2098 update_rt_migration(&rq->rt); in set_cpus_allowed_rt()
2102 static void rq_online_rt(struct rq *rq) in rq_online_rt() argument
2104 if (rq->rt.overloaded) in rq_online_rt()
2105 rt_set_overload(rq); in rq_online_rt()
2107 __enable_runtime(rq); in rq_online_rt()
2109 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in rq_online_rt()
2113 static void rq_offline_rt(struct rq *rq) in rq_offline_rt() argument
2115 if (rq->rt.overloaded) in rq_offline_rt()
2116 rt_clear_overload(rq); in rq_offline_rt()
2118 __disable_runtime(rq); in rq_offline_rt()
2120 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); in rq_offline_rt()
2127 static void switched_from_rt(struct rq *rq, struct task_struct *p) in switched_from_rt() argument
2136 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) in switched_from_rt()
2139 if (pull_rt_task(rq)) in switched_from_rt()
2140 resched_curr(rq); in switched_from_rt()
2159 static void switched_to_rt(struct rq *rq, struct task_struct *p) in switched_to_rt() argument
2170 if (task_on_rq_queued(p) && rq->curr != p) { in switched_to_rt()
2172 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded && in switched_to_rt()
2174 push_rt_task(rq) && rq != task_rq(p)) in switched_to_rt()
2177 if (check_resched && p->prio < rq->curr->prio) in switched_to_rt()
2178 resched_curr(rq); in switched_to_rt()
2187 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_rt() argument
2192 if (rq->curr == p) { in prio_changed_rt()
2199 pull_rt_task(rq); in prio_changed_rt()
2206 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) in prio_changed_rt()
2207 resched_curr(rq); in prio_changed_rt()
2211 resched_curr(rq); in prio_changed_rt()
2219 if (p->prio < rq->curr->prio) in prio_changed_rt()
2220 resched_curr(rq); in prio_changed_rt()
2224 static void watchdog(struct rq *rq, struct task_struct *p) in watchdog() argument
2246 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) in task_tick_rt() argument
2250 update_curr_rt(rq); in task_tick_rt()
2252 watchdog(rq, p); in task_tick_rt()
2272 requeue_task_rt(rq, p, 0); in task_tick_rt()
2273 resched_curr(rq); in task_tick_rt()
2279 static void set_curr_task_rt(struct rq *rq) in set_curr_task_rt() argument
2281 struct task_struct *p = rq->curr; in set_curr_task_rt()
2283 p->se.exec_start = rq_clock_task(rq); in set_curr_task_rt()
2286 dequeue_pushable_task(rq, p); in set_curr_task_rt()
2289 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) in get_rr_interval_rt() argument