Lines Matching refs:curr
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; in init_dl_rq()
616 if (dl_task(rq->curr)) in dl_task_timer()
656 struct task_struct *curr = rq->curr; in update_curr_dl() local
657 struct sched_dl_entity *dl_se = &curr->dl; in update_curr_dl()
660 if (!dl_task(curr) || !on_dl_rq(dl_se)) in update_curr_dl()
671 delta_exec = rq_clock_task(rq) - curr->se.exec_start; in update_curr_dl()
675 schedstat_set(curr->se.statistics.exec_max, in update_curr_dl()
676 max(curr->se.statistics.exec_max, delta_exec)); in update_curr_dl()
678 curr->se.sum_exec_runtime += delta_exec; in update_curr_dl()
679 account_group_exec_runtime(curr, delta_exec); in update_curr_dl()
681 curr->se.exec_start = rq_clock_task(rq); in update_curr_dl()
682 cpuacct_charge(curr, delta_exec); in update_curr_dl()
689 __dequeue_task_dl(rq, curr, 0); in update_curr_dl()
690 if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted))) in update_curr_dl()
691 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); in update_curr_dl()
693 if (!is_leftmost(curr, &rq->dl)) in update_curr_dl()
741 if (dl_rq->earliest_dl.curr == 0 || in inc_dl_deadline()
742 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { in inc_dl_deadline()
749 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr; in inc_dl_deadline()
750 dl_rq->earliest_dl.curr = deadline; in inc_dl_deadline()
773 dl_rq->earliest_dl.curr = 0; in dec_dl_deadline()
781 dl_rq->earliest_dl.curr = entry->deadline; in dec_dl_deadline()
960 struct task_struct *p = rq->curr; in yield_task_dl()
969 rq->curr->dl.dl_yielded = 1; in yield_task_dl()
989 struct task_struct *curr; in select_task_rq_dl() local
998 curr = ACCESS_ONCE(rq->curr); /* unlocked access */ in select_task_rq_dl()
1009 if (unlikely(dl_task(curr)) && in select_task_rq_dl()
1010 (curr->nr_cpus_allowed < 2 || in select_task_rq_dl()
1011 !dl_entity_preempt(&p->dl, &curr->dl)) && in select_task_rq_dl()
1030 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_dl()
1031 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) in check_preempt_equal_dl()
1056 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in check_preempt_curr_dl()
1066 if ((p->dl.deadline == rq->curr->dl.deadline) && in check_preempt_curr_dl()
1067 !test_tsk_need_resched(rq->curr)) in check_preempt_curr_dl()
1190 struct task_struct *p = rq->curr; in set_curr_task_dl()
1356 later_rq->dl.earliest_dl.curr)) in find_lock_later_rq()
1406 if (unlikely(next_task == rq->curr)) { in push_dl_task()
1416 if (dl_task(rq->curr) && in push_dl_task()
1417 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && in push_dl_task()
1418 rq->curr->nr_cpus_allowed > 1) { in push_dl_task()
1503 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task()
1527 this_rq->dl.earliest_dl.curr))) { in pull_dl_task()
1528 WARN_ON(p == src_rq->curr); in pull_dl_task()
1536 src_rq->curr->dl.deadline)) in pull_dl_task()
1567 !test_tsk_need_resched(rq->curr) && in task_woken_dl()
1570 dl_task(rq->curr) && in task_woken_dl()
1571 (rq->curr->nr_cpus_allowed < 2 || in task_woken_dl()
1572 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
1649 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); in rq_online_dl()
1725 if (task_on_rq_queued(p) && rq->curr != p) { in switched_to_dl()
1733 if (dl_task(rq->curr)) in switched_to_dl()
1748 if (task_on_rq_queued(p) || rq->curr == p) { in prio_changed_dl()
1764 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && in prio_changed_dl()
1765 rq->curr == p) in prio_changed_dl()