Lines Matching refs:curr

466 	if (cfs_rq->curr)  in update_min_vruntime()
467 vruntime = cfs_rq->curr->vruntime; in update_min_vruntime()
474 if (!cfs_rq->curr) in update_min_vruntime()
697 struct sched_entity *curr = cfs_rq->curr; in update_curr() local
701 if (unlikely(!curr)) in update_curr()
704 delta_exec = now - curr->exec_start; in update_curr()
708 curr->exec_start = now; in update_curr()
710 schedstat_set(curr->statistics.exec_max, in update_curr()
711 max(delta_exec, curr->statistics.exec_max)); in update_curr()
713 curr->sum_exec_runtime += delta_exec; in update_curr()
716 curr->vruntime += calc_delta_fair(delta_exec, curr); in update_curr()
719 if (entity_is_task(curr)) { in update_curr()
720 struct task_struct *curtask = task_of(curr); in update_curr()
722 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); in update_curr()
732 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
750 if (se != cfs_rq->curr) in update_stats_enqueue()
778 if (se != cfs_rq->curr) in update_stats_dequeue()
1274 cur = dst_rq->curr; in task_numa_compare()
1941 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr); in task_numa_group()
2244 void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
2246 struct callback_head *work = &curr->numa_work; in task_tick_numa()
2252 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) in task_tick_numa()
2261 now = curr->se.sum_exec_runtime; in task_tick_numa()
2262 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; in task_tick_numa()
2264 if (now - curr->node_stamp > period) { in task_tick_numa()
2265 if (!curr->node_stamp) in task_tick_numa()
2266 curr->numa_scan_period = task_scan_min(curr); in task_tick_numa()
2267 curr->node_stamp += period; in task_tick_numa()
2269 if (!time_before(jiffies, curr->mm->numa_next_scan)) { in task_tick_numa()
2271 task_work_add(curr, work, true); in task_tick_numa()
2276 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
2366 if (cfs_rq->curr == se) in reweight_entity()
2797 cfs_rq->curr == se)) in update_entity_load_avg()
3089 if (se != cfs_rq->curr) in enqueue_entity()
3171 if (se != cfs_rq->curr) in dequeue_entity()
3195 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) in check_preempt_tick() argument
3201 ideal_runtime = sched_slice(cfs_rq, curr); in check_preempt_tick()
3202 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; in check_preempt_tick()
3209 clear_buddies(cfs_rq, curr); in check_preempt_tick()
3222 delta = curr->vruntime - se->vruntime; in check_preempt_tick()
3247 cfs_rq->curr = se; in set_next_entity()
3263 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3273 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity() argument
3282 if (!left || (curr && entity_before(curr, left))) in pick_next_entity()
3283 left = curr; in pick_next_entity()
3294 if (se == curr) { in pick_next_entity()
3298 if (!second || (curr && entity_before(curr, second))) in pick_next_entity()
3299 second = curr; in pick_next_entity()
3345 cfs_rq->curr = NULL; in put_prev_entity()
3349 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
3359 update_entity_load_avg(curr, 1); in entity_tick()
3381 check_preempt_tick(cfs_rq, curr); in entity_tick()
3559 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) in __account_cfs_rq_runtime()
3719 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq()
3965 if (!cfs_rq->runtime_enabled || cfs_rq->curr) in check_enqueue_throttle()
4182 if (rq->curr == p) in hrtick_start_fair()
4197 struct task_struct *curr = rq->curr; in hrtick_update() local
4199 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) in hrtick_update()
4202 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) in hrtick_update()
4203 hrtick_start_fair(rq, curr); in hrtick_update()
4937 wakeup_gran(struct sched_entity *curr, struct sched_entity *se) in wakeup_gran() argument
4972 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity() argument
4974 s64 gran, vdiff = curr->vruntime - se->vruntime; in wakeup_preempt_entity()
4979 gran = wakeup_gran(curr, se); in wakeup_preempt_entity()
5015 struct task_struct *curr = rq->curr; in check_preempt_wakeup() local
5016 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup()
5017 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in check_preempt_wakeup()
5048 if (test_tsk_need_resched(curr)) in check_preempt_wakeup()
5052 if (unlikely(curr->policy == SCHED_IDLE) && in check_preempt_wakeup()
5089 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
5121 struct sched_entity *curr = cfs_rq->curr; in pick_next_task_fair() local
5129 if (curr) { in pick_next_task_fair()
5130 if (curr->on_rq) in pick_next_task_fair()
5133 curr = NULL; in pick_next_task_fair()
5145 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
5240 struct task_struct *curr = rq->curr; in yield_task_fair() local
5241 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair()
5242 struct sched_entity *se = &curr->se; in yield_task_fair()
5252 if (curr->policy != SCHED_BATCH) { in yield_task_fair()
7090 tsk_cpus_allowed(busiest->curr))) { in load_balance()
7814 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) in task_tick_fair() argument
7817 struct sched_entity *se = &curr->se; in task_tick_fair()
7825 task_tick_numa(rq, curr); in task_tick_fair()
7838 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
7848 curr = cfs_rq->curr; in task_fork_fair()
7862 if (curr) in task_fork_fair()
7863 se->vruntime = curr->vruntime; in task_fork_fair()
7866 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { in task_fork_fair()
7871 swap(curr->vruntime, se->vruntime); in task_fork_fair()
7895 if (rq->curr == p) { in prio_changed_fair()
7959 if (rq->curr == p) in switched_to_fair()
7972 struct sched_entity *se = &rq->curr->se; in set_curr_task_fair()