1/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 *                    Juri Lelli <juri.lelli@gmail.com>,
14 *                    Michael Trimarchi <michael@amarulasolutions.com>,
15 *                    Fabio Checconi <fchecconi@gmail.com>
16 */
17#include "sched.h"
18
19#include <linux/slab.h>
20
21struct dl_bandwidth def_dl_bandwidth;
22
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25	return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30	return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35	struct task_struct *p = dl_task_of(dl_se);
36	struct rq *rq = task_rq(p);
37
38	return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43	return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47{
48	struct sched_dl_entity *dl_se = &p->dl;
49
50	return dl_rq->rb_leftmost == &dl_se->rb_node;
51}
52
53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54{
55	raw_spin_lock_init(&dl_b->dl_runtime_lock);
56	dl_b->dl_period = period;
57	dl_b->dl_runtime = runtime;
58}
59
60void init_dl_bw(struct dl_bw *dl_b)
61{
62	raw_spin_lock_init(&dl_b->lock);
63	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64	if (global_rt_runtime() == RUNTIME_INF)
65		dl_b->bw = -1;
66	else
67		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69	dl_b->total_bw = 0;
70}
71
72void init_dl_rq(struct dl_rq *dl_rq)
73{
74	dl_rq->rb_root = RB_ROOT;
75
76#ifdef CONFIG_SMP
77	/* zero means no -deadline tasks */
78	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79
80	dl_rq->dl_nr_migratory = 0;
81	dl_rq->overloaded = 0;
82	dl_rq->pushable_dl_tasks_root = RB_ROOT;
83#else
84	init_dl_bw(&dl_rq->dl_bw);
85#endif
86}
87
88#ifdef CONFIG_SMP
89
90static inline int dl_overloaded(struct rq *rq)
91{
92	return atomic_read(&rq->rd->dlo_count);
93}
94
95static inline void dl_set_overload(struct rq *rq)
96{
97	if (!rq->online)
98		return;
99
100	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101	/*
102	 * Must be visible before the overload count is
103	 * set (as in sched_rt.c).
104	 *
105	 * Matched by the barrier in pull_dl_task().
106	 */
107	smp_wmb();
108	atomic_inc(&rq->rd->dlo_count);
109}
110
111static inline void dl_clear_overload(struct rq *rq)
112{
113	if (!rq->online)
114		return;
115
116	atomic_dec(&rq->rd->dlo_count);
117	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118}
119
120static void update_dl_migration(struct dl_rq *dl_rq)
121{
122	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123		if (!dl_rq->overloaded) {
124			dl_set_overload(rq_of_dl_rq(dl_rq));
125			dl_rq->overloaded = 1;
126		}
127	} else if (dl_rq->overloaded) {
128		dl_clear_overload(rq_of_dl_rq(dl_rq));
129		dl_rq->overloaded = 0;
130	}
131}
132
133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134{
135	struct task_struct *p = dl_task_of(dl_se);
136
137	if (p->nr_cpus_allowed > 1)
138		dl_rq->dl_nr_migratory++;
139
140	update_dl_migration(dl_rq);
141}
142
143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{
145	struct task_struct *p = dl_task_of(dl_se);
146
147	if (p->nr_cpus_allowed > 1)
148		dl_rq->dl_nr_migratory--;
149
150	update_dl_migration(dl_rq);
151}
152
153/*
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156 */
157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158{
159	struct dl_rq *dl_rq = &rq->dl;
160	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161	struct rb_node *parent = NULL;
162	struct task_struct *entry;
163	int leftmost = 1;
164
165	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166
167	while (*link) {
168		parent = *link;
169		entry = rb_entry(parent, struct task_struct,
170				 pushable_dl_tasks);
171		if (dl_entity_preempt(&p->dl, &entry->dl))
172			link = &parent->rb_left;
173		else {
174			link = &parent->rb_right;
175			leftmost = 0;
176		}
177	}
178
179	if (leftmost)
180		dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181
182	rb_link_node(&p->pushable_dl_tasks, parent, link);
183	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
184}
185
186static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
187{
188	struct dl_rq *dl_rq = &rq->dl;
189
190	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191		return;
192
193	if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194		struct rb_node *next_node;
195
196		next_node = rb_next(&p->pushable_dl_tasks);
197		dl_rq->pushable_dl_tasks_leftmost = next_node;
198	}
199
200	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201	RB_CLEAR_NODE(&p->pushable_dl_tasks);
202}
203
204static inline int has_pushable_dl_tasks(struct rq *rq)
205{
206	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
207}
208
209static int push_dl_task(struct rq *rq);
210
211static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
212{
213	return dl_task(prev);
214}
215
216static inline void set_post_schedule(struct rq *rq)
217{
218	rq->post_schedule = has_pushable_dl_tasks(rq);
219}
220
221static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
222
223static void dl_task_offline_migration(struct rq *rq, struct task_struct *p)
224{
225	struct rq *later_rq = NULL;
226	bool fallback = false;
227
228	later_rq = find_lock_later_rq(p, rq);
229
230	if (!later_rq) {
231		int cpu;
232
233		/*
234		 * If we cannot preempt any rq, fall back to pick any
235		 * online cpu.
236		 */
237		fallback = true;
238		cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
239		if (cpu >= nr_cpu_ids) {
240			/*
241			 * Fail to find any suitable cpu.
242			 * The task will never come back!
243			 */
244			BUG_ON(dl_bandwidth_enabled());
245
246			/*
247			 * If admission control is disabled we
248			 * try a little harder to let the task
249			 * run.
250			 */
251			cpu = cpumask_any(cpu_active_mask);
252		}
253		later_rq = cpu_rq(cpu);
254		double_lock_balance(rq, later_rq);
255	}
256
257	deactivate_task(rq, p, 0);
258	set_task_cpu(p, later_rq->cpu);
259	activate_task(later_rq, p, ENQUEUE_REPLENISH);
260
261	if (!fallback)
262		resched_curr(later_rq);
263
264	double_unlock_balance(rq, later_rq);
265}
266
267#else
268
269static inline
270void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
271{
272}
273
274static inline
275void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
276{
277}
278
279static inline
280void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
281{
282}
283
284static inline
285void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
286{
287}
288
289static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
290{
291	return false;
292}
293
294static inline int pull_dl_task(struct rq *rq)
295{
296	return 0;
297}
298
299static inline void set_post_schedule(struct rq *rq)
300{
301}
302#endif /* CONFIG_SMP */
303
304static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
305static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
306static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
307				  int flags);
308
309/*
310 * We are being explicitly informed that a new instance is starting,
311 * and this means that:
312 *  - the absolute deadline of the entity has to be placed at
313 *    current time + relative deadline;
314 *  - the runtime of the entity has to be set to the maximum value.
315 *
316 * The capability of specifying such event is useful whenever a -deadline
317 * entity wants to (try to!) synchronize its behaviour with the scheduler's
318 * one, and to (try to!) reconcile itself with its own scheduling
319 * parameters.
320 */
321static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
322				       struct sched_dl_entity *pi_se)
323{
324	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
325	struct rq *rq = rq_of_dl_rq(dl_rq);
326
327	WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
328
329	/*
330	 * We use the regular wall clock time to set deadlines in the
331	 * future; in fact, we must consider execution overheads (time
332	 * spent on hardirq context, etc.).
333	 */
334	dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
335	dl_se->runtime = pi_se->dl_runtime;
336	dl_se->dl_new = 0;
337}
338
339/*
340 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
341 * possibility of a entity lasting more than what it declared, and thus
342 * exhausting its runtime.
343 *
344 * Here we are interested in making runtime overrun possible, but we do
345 * not want a entity which is misbehaving to affect the scheduling of all
346 * other entities.
347 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
348 * is used, in order to confine each entity within its own bandwidth.
349 *
350 * This function deals exactly with that, and ensures that when the runtime
351 * of a entity is replenished, its deadline is also postponed. That ensures
352 * the overrunning entity can't interfere with other entity in the system and
353 * can't make them miss their deadlines. Reasons why this kind of overruns
354 * could happen are, typically, a entity voluntarily trying to overcome its
355 * runtime, or it just underestimated it during sched_setattr().
356 */
357static void replenish_dl_entity(struct sched_dl_entity *dl_se,
358				struct sched_dl_entity *pi_se)
359{
360	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
361	struct rq *rq = rq_of_dl_rq(dl_rq);
362
363	BUG_ON(pi_se->dl_runtime <= 0);
364
365	/*
366	 * This could be the case for a !-dl task that is boosted.
367	 * Just go with full inherited parameters.
368	 */
369	if (dl_se->dl_deadline == 0) {
370		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
371		dl_se->runtime = pi_se->dl_runtime;
372	}
373
374	/*
375	 * We keep moving the deadline away until we get some
376	 * available runtime for the entity. This ensures correct
377	 * handling of situations where the runtime overrun is
378	 * arbitrary large.
379	 */
380	while (dl_se->runtime <= 0) {
381		dl_se->deadline += pi_se->dl_period;
382		dl_se->runtime += pi_se->dl_runtime;
383	}
384
385	/*
386	 * At this point, the deadline really should be "in
387	 * the future" with respect to rq->clock. If it's
388	 * not, we are, for some reason, lagging too much!
389	 * Anyway, after having warn userspace abut that,
390	 * we still try to keep the things running by
391	 * resetting the deadline and the budget of the
392	 * entity.
393	 */
394	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
395		printk_deferred_once("sched: DL replenish lagged to much\n");
396		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
397		dl_se->runtime = pi_se->dl_runtime;
398	}
399
400	if (dl_se->dl_yielded)
401		dl_se->dl_yielded = 0;
402	if (dl_se->dl_throttled)
403		dl_se->dl_throttled = 0;
404}
405
406/*
407 * Here we check if --at time t-- an entity (which is probably being
408 * [re]activated or, in general, enqueued) can use its remaining runtime
409 * and its current deadline _without_ exceeding the bandwidth it is
410 * assigned (function returns true if it can't). We are in fact applying
411 * one of the CBS rules: when a task wakes up, if the residual runtime
412 * over residual deadline fits within the allocated bandwidth, then we
413 * can keep the current (absolute) deadline and residual budget without
414 * disrupting the schedulability of the system. Otherwise, we should
415 * refill the runtime and set the deadline a period in the future,
416 * because keeping the current (absolute) deadline of the task would
417 * result in breaking guarantees promised to other tasks (refer to
418 * Documentation/scheduler/sched-deadline.txt for more informations).
419 *
420 * This function returns true if:
421 *
422 *   runtime / (deadline - t) > dl_runtime / dl_period ,
423 *
424 * IOW we can't recycle current parameters.
425 *
426 * Notice that the bandwidth check is done against the period. For
427 * task with deadline equal to period this is the same of using
428 * dl_deadline instead of dl_period in the equation above.
429 */
430static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
431			       struct sched_dl_entity *pi_se, u64 t)
432{
433	u64 left, right;
434
435	/*
436	 * left and right are the two sides of the equation above,
437	 * after a bit of shuffling to use multiplications instead
438	 * of divisions.
439	 *
440	 * Note that none of the time values involved in the two
441	 * multiplications are absolute: dl_deadline and dl_runtime
442	 * are the relative deadline and the maximum runtime of each
443	 * instance, runtime is the runtime left for the last instance
444	 * and (deadline - t), since t is rq->clock, is the time left
445	 * to the (absolute) deadline. Even if overflowing the u64 type
446	 * is very unlikely to occur in both cases, here we scale down
447	 * as we want to avoid that risk at all. Scaling down by 10
448	 * means that we reduce granularity to 1us. We are fine with it,
449	 * since this is only a true/false check and, anyway, thinking
450	 * of anything below microseconds resolution is actually fiction
451	 * (but still we want to give the user that illusion >;).
452	 */
453	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
454	right = ((dl_se->deadline - t) >> DL_SCALE) *
455		(pi_se->dl_runtime >> DL_SCALE);
456
457	return dl_time_before(right, left);
458}
459
460/*
461 * When a -deadline entity is queued back on the runqueue, its runtime and
462 * deadline might need updating.
463 *
464 * The policy here is that we update the deadline of the entity only if:
465 *  - the current deadline is in the past,
466 *  - using the remaining runtime with the current deadline would make
467 *    the entity exceed its bandwidth.
468 */
469static void update_dl_entity(struct sched_dl_entity *dl_se,
470			     struct sched_dl_entity *pi_se)
471{
472	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
473	struct rq *rq = rq_of_dl_rq(dl_rq);
474
475	/*
476	 * The arrival of a new instance needs special treatment, i.e.,
477	 * the actual scheduling parameters have to be "renewed".
478	 */
479	if (dl_se->dl_new) {
480		setup_new_dl_entity(dl_se, pi_se);
481		return;
482	}
483
484	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
485	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
486		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
487		dl_se->runtime = pi_se->dl_runtime;
488	}
489}
490
491/*
492 * If the entity depleted all its runtime, and if we want it to sleep
493 * while waiting for some new execution time to become available, we
494 * set the bandwidth enforcement timer to the replenishment instant
495 * and try to activate it.
496 *
497 * Notice that it is important for the caller to know if the timer
498 * actually started or not (i.e., the replenishment instant is in
499 * the future or in the past).
500 */
501static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
502{
503	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
504	struct rq *rq = rq_of_dl_rq(dl_rq);
505	ktime_t now, act;
506	ktime_t soft, hard;
507	unsigned long range;
508	s64 delta;
509
510	if (boosted)
511		return 0;
512	/*
513	 * We want the timer to fire at the deadline, but considering
514	 * that it is actually coming from rq->clock and not from
515	 * hrtimer's time base reading.
516	 */
517	act = ns_to_ktime(dl_se->deadline);
518	now = hrtimer_cb_get_time(&dl_se->dl_timer);
519	delta = ktime_to_ns(now) - rq_clock(rq);
520	act = ktime_add_ns(act, delta);
521
522	/*
523	 * If the expiry time already passed, e.g., because the value
524	 * chosen as the deadline is too small, don't even try to
525	 * start the timer in the past!
526	 */
527	if (ktime_us_delta(act, now) < 0)
528		return 0;
529
530	hrtimer_set_expires(&dl_se->dl_timer, act);
531
532	soft = hrtimer_get_softexpires(&dl_se->dl_timer);
533	hard = hrtimer_get_expires(&dl_se->dl_timer);
534	range = ktime_to_ns(ktime_sub(hard, soft));
535	__hrtimer_start_range_ns(&dl_se->dl_timer, soft,
536				 range, HRTIMER_MODE_ABS, 0);
537
538	return hrtimer_active(&dl_se->dl_timer);
539}
540
541/*
542 * This is the bandwidth enforcement timer callback. If here, we know
543 * a task is not on its dl_rq, since the fact that the timer was running
544 * means the task is throttled and needs a runtime replenishment.
545 *
546 * However, what we actually do depends on the fact the task is active,
547 * (it is on its rq) or has been removed from there by a call to
548 * dequeue_task_dl(). In the former case we must issue the runtime
549 * replenishment and add the task back to the dl_rq; in the latter, we just
550 * do nothing but clearing dl_throttled, so that runtime and deadline
551 * updating (and the queueing back to dl_rq) will be done by the
552 * next call to enqueue_task_dl().
553 */
554static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
555{
556	struct sched_dl_entity *dl_se = container_of(timer,
557						     struct sched_dl_entity,
558						     dl_timer);
559	struct task_struct *p = dl_task_of(dl_se);
560	unsigned long flags;
561	struct rq *rq;
562
563	rq = task_rq_lock(p, &flags);
564
565	/*
566	 * We need to take care of several possible races here:
567	 *
568	 *   - the task might have changed its scheduling policy
569	 *     to something different than SCHED_DEADLINE
570	 *   - the task might have changed its reservation parameters
571	 *     (through sched_setattr())
572	 *   - the task might have been boosted by someone else and
573	 *     might be in the boosting/deboosting path
574	 *
575	 * In all this cases we bail out, as the task is already
576	 * in the runqueue or is going to be enqueued back anyway.
577	 */
578	if (!dl_task(p) || dl_se->dl_new ||
579	    dl_se->dl_boosted || !dl_se->dl_throttled)
580		goto unlock;
581
582	sched_clock_tick();
583	update_rq_clock(rq);
584
585#ifdef CONFIG_SMP
586	/*
587	 * If we find that the rq the task was on is no longer
588	 * available, we need to select a new rq.
589	 */
590	if (unlikely(!rq->online)) {
591		dl_task_offline_migration(rq, p);
592		goto unlock;
593	}
594#endif
595
596	/*
597	 * If the throttle happened during sched-out; like:
598	 *
599	 *   schedule()
600	 *     deactivate_task()
601	 *       dequeue_task_dl()
602	 *         update_curr_dl()
603	 *           start_dl_timer()
604	 *         __dequeue_task_dl()
605	 *     prev->on_rq = 0;
606	 *
607	 * We can be both throttled and !queued. Replenish the counter
608	 * but do not enqueue -- wait for our wakeup to do that.
609	 */
610	if (!task_on_rq_queued(p)) {
611		replenish_dl_entity(dl_se, dl_se);
612		goto unlock;
613	}
614
615	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
616	if (dl_task(rq->curr))
617		check_preempt_curr_dl(rq, p, 0);
618	else
619		resched_curr(rq);
620#ifdef CONFIG_SMP
621	/*
622	 * Queueing this task back might have overloaded rq,
623	 * check if we need to kick someone away.
624	 */
625	if (has_pushable_dl_tasks(rq))
626		push_dl_task(rq);
627#endif
628unlock:
629	task_rq_unlock(rq, p, &flags);
630
631	return HRTIMER_NORESTART;
632}
633
634void init_dl_task_timer(struct sched_dl_entity *dl_se)
635{
636	struct hrtimer *timer = &dl_se->dl_timer;
637
638	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
639	timer->function = dl_task_timer;
640}
641
642static
643int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
644{
645	return (dl_se->runtime <= 0);
646}
647
648extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
649
650/*
651 * Update the current task's runtime statistics (provided it is still
652 * a -deadline task and has not been removed from the dl_rq).
653 */
654static void update_curr_dl(struct rq *rq)
655{
656	struct task_struct *curr = rq->curr;
657	struct sched_dl_entity *dl_se = &curr->dl;
658	u64 delta_exec;
659
660	if (!dl_task(curr) || !on_dl_rq(dl_se))
661		return;
662
663	/*
664	 * Consumed budget is computed considering the time as
665	 * observed by schedulable tasks (excluding time spent
666	 * in hardirq context, etc.). Deadlines are instead
667	 * computed using hard walltime. This seems to be the more
668	 * natural solution, but the full ramifications of this
669	 * approach need further study.
670	 */
671	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
672	if (unlikely((s64)delta_exec <= 0))
673		return;
674
675	schedstat_set(curr->se.statistics.exec_max,
676		      max(curr->se.statistics.exec_max, delta_exec));
677
678	curr->se.sum_exec_runtime += delta_exec;
679	account_group_exec_runtime(curr, delta_exec);
680
681	curr->se.exec_start = rq_clock_task(rq);
682	cpuacct_charge(curr, delta_exec);
683
684	sched_rt_avg_update(rq, delta_exec);
685
686	dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
687	if (dl_runtime_exceeded(rq, dl_se)) {
688		dl_se->dl_throttled = 1;
689		__dequeue_task_dl(rq, curr, 0);
690		if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
691			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
692
693		if (!is_leftmost(curr, &rq->dl))
694			resched_curr(rq);
695	}
696
697	/*
698	 * Because -- for now -- we share the rt bandwidth, we need to
699	 * account our runtime there too, otherwise actual rt tasks
700	 * would be able to exceed the shared quota.
701	 *
702	 * Account to the root rt group for now.
703	 *
704	 * The solution we're working towards is having the RT groups scheduled
705	 * using deadline servers -- however there's a few nasties to figure
706	 * out before that can happen.
707	 */
708	if (rt_bandwidth_enabled()) {
709		struct rt_rq *rt_rq = &rq->rt;
710
711		raw_spin_lock(&rt_rq->rt_runtime_lock);
712		/*
713		 * We'll let actual RT tasks worry about the overflow here, we
714		 * have our own CBS to keep us inline; only account when RT
715		 * bandwidth is relevant.
716		 */
717		if (sched_rt_bandwidth_account(rt_rq))
718			rt_rq->rt_time += delta_exec;
719		raw_spin_unlock(&rt_rq->rt_runtime_lock);
720	}
721}
722
723#ifdef CONFIG_SMP
724
725static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
726
727static inline u64 next_deadline(struct rq *rq)
728{
729	struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
730
731	if (next && dl_prio(next->prio))
732		return next->dl.deadline;
733	else
734		return 0;
735}
736
737static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
738{
739	struct rq *rq = rq_of_dl_rq(dl_rq);
740
741	if (dl_rq->earliest_dl.curr == 0 ||
742	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
743		/*
744		 * If the dl_rq had no -deadline tasks, or if the new task
745		 * has shorter deadline than the current one on dl_rq, we
746		 * know that the previous earliest becomes our next earliest,
747		 * as the new task becomes the earliest itself.
748		 */
749		dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
750		dl_rq->earliest_dl.curr = deadline;
751		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
752	} else if (dl_rq->earliest_dl.next == 0 ||
753		   dl_time_before(deadline, dl_rq->earliest_dl.next)) {
754		/*
755		 * On the other hand, if the new -deadline task has a
756		 * a later deadline than the earliest one on dl_rq, but
757		 * it is earlier than the next (if any), we must
758		 * recompute the next-earliest.
759		 */
760		dl_rq->earliest_dl.next = next_deadline(rq);
761	}
762}
763
764static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
765{
766	struct rq *rq = rq_of_dl_rq(dl_rq);
767
768	/*
769	 * Since we may have removed our earliest (and/or next earliest)
770	 * task we must recompute them.
771	 */
772	if (!dl_rq->dl_nr_running) {
773		dl_rq->earliest_dl.curr = 0;
774		dl_rq->earliest_dl.next = 0;
775		cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
776	} else {
777		struct rb_node *leftmost = dl_rq->rb_leftmost;
778		struct sched_dl_entity *entry;
779
780		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
781		dl_rq->earliest_dl.curr = entry->deadline;
782		dl_rq->earliest_dl.next = next_deadline(rq);
783		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
784	}
785}
786
787#else
788
789static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
790static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
791
792#endif /* CONFIG_SMP */
793
794static inline
795void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
796{
797	int prio = dl_task_of(dl_se)->prio;
798	u64 deadline = dl_se->deadline;
799
800	WARN_ON(!dl_prio(prio));
801	dl_rq->dl_nr_running++;
802	add_nr_running(rq_of_dl_rq(dl_rq), 1);
803
804	inc_dl_deadline(dl_rq, deadline);
805	inc_dl_migration(dl_se, dl_rq);
806}
807
808static inline
809void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
810{
811	int prio = dl_task_of(dl_se)->prio;
812
813	WARN_ON(!dl_prio(prio));
814	WARN_ON(!dl_rq->dl_nr_running);
815	dl_rq->dl_nr_running--;
816	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
817
818	dec_dl_deadline(dl_rq, dl_se->deadline);
819	dec_dl_migration(dl_se, dl_rq);
820}
821
822static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
823{
824	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
825	struct rb_node **link = &dl_rq->rb_root.rb_node;
826	struct rb_node *parent = NULL;
827	struct sched_dl_entity *entry;
828	int leftmost = 1;
829
830	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
831
832	while (*link) {
833		parent = *link;
834		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
835		if (dl_time_before(dl_se->deadline, entry->deadline))
836			link = &parent->rb_left;
837		else {
838			link = &parent->rb_right;
839			leftmost = 0;
840		}
841	}
842
843	if (leftmost)
844		dl_rq->rb_leftmost = &dl_se->rb_node;
845
846	rb_link_node(&dl_se->rb_node, parent, link);
847	rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
848
849	inc_dl_tasks(dl_se, dl_rq);
850}
851
852static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
853{
854	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
855
856	if (RB_EMPTY_NODE(&dl_se->rb_node))
857		return;
858
859	if (dl_rq->rb_leftmost == &dl_se->rb_node) {
860		struct rb_node *next_node;
861
862		next_node = rb_next(&dl_se->rb_node);
863		dl_rq->rb_leftmost = next_node;
864	}
865
866	rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
867	RB_CLEAR_NODE(&dl_se->rb_node);
868
869	dec_dl_tasks(dl_se, dl_rq);
870}
871
872static void
873enqueue_dl_entity(struct sched_dl_entity *dl_se,
874		  struct sched_dl_entity *pi_se, int flags)
875{
876	BUG_ON(on_dl_rq(dl_se));
877
878	/*
879	 * If this is a wakeup or a new instance, the scheduling
880	 * parameters of the task might need updating. Otherwise,
881	 * we want a replenishment of its runtime.
882	 */
883	if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
884		update_dl_entity(dl_se, pi_se);
885	else if (flags & ENQUEUE_REPLENISH)
886		replenish_dl_entity(dl_se, pi_se);
887
888	__enqueue_dl_entity(dl_se);
889}
890
891static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
892{
893	__dequeue_dl_entity(dl_se);
894}
895
896static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
897{
898	struct task_struct *pi_task = rt_mutex_get_top_task(p);
899	struct sched_dl_entity *pi_se = &p->dl;
900
901	/*
902	 * Use the scheduling parameters of the top pi-waiter
903	 * task if we have one and its (relative) deadline is
904	 * smaller than our one... OTW we keep our runtime and
905	 * deadline.
906	 */
907	if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
908		pi_se = &pi_task->dl;
909	} else if (!dl_prio(p->normal_prio)) {
910		/*
911		 * Special case in which we have a !SCHED_DEADLINE task
912		 * that is going to be deboosted, but exceedes its
913		 * runtime while doing so. No point in replenishing
914		 * it, as it's going to return back to its original
915		 * scheduling class after this.
916		 */
917		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
918		return;
919	}
920
921	/*
922	 * If p is throttled, we do nothing. In fact, if it exhausted
923	 * its budget it needs a replenishment and, since it now is on
924	 * its rq, the bandwidth timer callback (which clearly has not
925	 * run yet) will take care of this.
926	 */
927	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
928		return;
929
930	enqueue_dl_entity(&p->dl, pi_se, flags);
931
932	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
933		enqueue_pushable_dl_task(rq, p);
934}
935
936static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
937{
938	dequeue_dl_entity(&p->dl);
939	dequeue_pushable_dl_task(rq, p);
940}
941
942static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
943{
944	update_curr_dl(rq);
945	__dequeue_task_dl(rq, p, flags);
946}
947
948/*
949 * Yield task semantic for -deadline tasks is:
950 *
951 *   get off from the CPU until our next instance, with
952 *   a new runtime. This is of little use now, since we
953 *   don't have a bandwidth reclaiming mechanism. Anyway,
954 *   bandwidth reclaiming is planned for the future, and
955 *   yield_task_dl will indicate that some spare budget
956 *   is available for other task instances to use it.
957 */
958static void yield_task_dl(struct rq *rq)
959{
960	struct task_struct *p = rq->curr;
961
962	/*
963	 * We make the task go to sleep until its current deadline by
964	 * forcing its runtime to zero. This way, update_curr_dl() stops
965	 * it and the bandwidth timer will wake it up and will give it
966	 * new scheduling parameters (thanks to dl_yielded=1).
967	 */
968	if (p->dl.runtime > 0) {
969		rq->curr->dl.dl_yielded = 1;
970		p->dl.runtime = 0;
971	}
972	update_rq_clock(rq);
973	update_curr_dl(rq);
974	/*
975	 * Tell update_rq_clock() that we've just updated,
976	 * so we don't do microscopic update in schedule()
977	 * and double the fastpath cost.
978	 */
979	rq_clock_skip_update(rq, true);
980}
981
982#ifdef CONFIG_SMP
983
984static int find_later_rq(struct task_struct *task);
985
986static int
987select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
988{
989	struct task_struct *curr;
990	struct rq *rq;
991
992	if (sd_flag != SD_BALANCE_WAKE)
993		goto out;
994
995	rq = cpu_rq(cpu);
996
997	rcu_read_lock();
998	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
999
1000	/*
1001	 * If we are dealing with a -deadline task, we must
1002	 * decide where to wake it up.
1003	 * If it has a later deadline and the current task
1004	 * on this rq can't move (provided the waking task
1005	 * can!) we prefer to send it somewhere else. On the
1006	 * other hand, if it has a shorter deadline, we
1007	 * try to make it stay here, it might be important.
1008	 */
1009	if (unlikely(dl_task(curr)) &&
1010	    (curr->nr_cpus_allowed < 2 ||
1011	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1012	    (p->nr_cpus_allowed > 1)) {
1013		int target = find_later_rq(p);
1014
1015		if (target != -1)
1016			cpu = target;
1017	}
1018	rcu_read_unlock();
1019
1020out:
1021	return cpu;
1022}
1023
1024static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1025{
1026	/*
1027	 * Current can't be migrated, useless to reschedule,
1028	 * let's hope p can move out.
1029	 */
1030	if (rq->curr->nr_cpus_allowed == 1 ||
1031	    cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1032		return;
1033
1034	/*
1035	 * p is migratable, so let's not schedule it and
1036	 * see if it is pushed or pulled somewhere else.
1037	 */
1038	if (p->nr_cpus_allowed != 1 &&
1039	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1040		return;
1041
1042	resched_curr(rq);
1043}
1044
1045static int pull_dl_task(struct rq *this_rq);
1046
1047#endif /* CONFIG_SMP */
1048
1049/*
1050 * Only called when both the current and waking task are -deadline
1051 * tasks.
1052 */
1053static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1054				  int flags)
1055{
1056	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1057		resched_curr(rq);
1058		return;
1059	}
1060
1061#ifdef CONFIG_SMP
1062	/*
1063	 * In the unlikely case current and p have the same deadline
1064	 * let us try to decide what's the best thing to do...
1065	 */
1066	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1067	    !test_tsk_need_resched(rq->curr))
1068		check_preempt_equal_dl(rq, p);
1069#endif /* CONFIG_SMP */
1070}
1071
1072#ifdef CONFIG_SCHED_HRTICK
1073static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1074{
1075	hrtick_start(rq, p->dl.runtime);
1076}
1077#else /* !CONFIG_SCHED_HRTICK */
1078static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1079{
1080}
1081#endif
1082
1083static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1084						   struct dl_rq *dl_rq)
1085{
1086	struct rb_node *left = dl_rq->rb_leftmost;
1087
1088	if (!left)
1089		return NULL;
1090
1091	return rb_entry(left, struct sched_dl_entity, rb_node);
1092}
1093
1094struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1095{
1096	struct sched_dl_entity *dl_se;
1097	struct task_struct *p;
1098	struct dl_rq *dl_rq;
1099
1100	dl_rq = &rq->dl;
1101
1102	if (need_pull_dl_task(rq, prev)) {
1103		pull_dl_task(rq);
1104		/*
1105		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1106		 * means a stop task can slip in, in which case we need to
1107		 * re-start task selection.
1108		 */
1109		if (rq->stop && task_on_rq_queued(rq->stop))
1110			return RETRY_TASK;
1111	}
1112
1113	/*
1114	 * When prev is DL, we may throttle it in put_prev_task().
1115	 * So, we update time before we check for dl_nr_running.
1116	 */
1117	if (prev->sched_class == &dl_sched_class)
1118		update_curr_dl(rq);
1119
1120	if (unlikely(!dl_rq->dl_nr_running))
1121		return NULL;
1122
1123	put_prev_task(rq, prev);
1124
1125	dl_se = pick_next_dl_entity(rq, dl_rq);
1126	BUG_ON(!dl_se);
1127
1128	p = dl_task_of(dl_se);
1129	p->se.exec_start = rq_clock_task(rq);
1130
1131	/* Running task will never be pushed. */
1132       dequeue_pushable_dl_task(rq, p);
1133
1134	if (hrtick_enabled(rq))
1135		start_hrtick_dl(rq, p);
1136
1137	set_post_schedule(rq);
1138
1139	return p;
1140}
1141
1142static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1143{
1144	update_curr_dl(rq);
1145
1146	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1147		enqueue_pushable_dl_task(rq, p);
1148}
1149
1150static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1151{
1152	update_curr_dl(rq);
1153
1154	/*
1155	 * Even when we have runtime, update_curr_dl() might have resulted in us
1156	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1157	 * be set and schedule() will start a new hrtick for the next task.
1158	 */
1159	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1160	    is_leftmost(p, &rq->dl))
1161		start_hrtick_dl(rq, p);
1162}
1163
1164static void task_fork_dl(struct task_struct *p)
1165{
1166	/*
1167	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1168	 * sched_fork()
1169	 */
1170}
1171
1172static void task_dead_dl(struct task_struct *p)
1173{
1174	struct hrtimer *timer = &p->dl.dl_timer;
1175	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1176
1177	/*
1178	 * Since we are TASK_DEAD we won't slip out of the domain!
1179	 */
1180	raw_spin_lock_irq(&dl_b->lock);
1181	/* XXX we should retain the bw until 0-lag */
1182	dl_b->total_bw -= p->dl.dl_bw;
1183	raw_spin_unlock_irq(&dl_b->lock);
1184
1185	hrtimer_cancel(timer);
1186}
1187
1188static void set_curr_task_dl(struct rq *rq)
1189{
1190	struct task_struct *p = rq->curr;
1191
1192	p->se.exec_start = rq_clock_task(rq);
1193
1194	/* You can't push away the running task */
1195	dequeue_pushable_dl_task(rq, p);
1196}
1197
1198#ifdef CONFIG_SMP
1199
1200/* Only try algorithms three times */
1201#define DL_MAX_TRIES 3
1202
1203static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1204{
1205	if (!task_running(rq, p) &&
1206	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1207		return 1;
1208	return 0;
1209}
1210
1211/* Returns the second earliest -deadline task, NULL otherwise */
1212static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1213{
1214	struct rb_node *next_node = rq->dl.rb_leftmost;
1215	struct sched_dl_entity *dl_se;
1216	struct task_struct *p = NULL;
1217
1218next_node:
1219	next_node = rb_next(next_node);
1220	if (next_node) {
1221		dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1222		p = dl_task_of(dl_se);
1223
1224		if (pick_dl_task(rq, p, cpu))
1225			return p;
1226
1227		goto next_node;
1228	}
1229
1230	return NULL;
1231}
1232
1233static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1234
1235static int find_later_rq(struct task_struct *task)
1236{
1237	struct sched_domain *sd;
1238	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1239	int this_cpu = smp_processor_id();
1240	int best_cpu, cpu = task_cpu(task);
1241
1242	/* Make sure the mask is initialized first */
1243	if (unlikely(!later_mask))
1244		return -1;
1245
1246	if (task->nr_cpus_allowed == 1)
1247		return -1;
1248
1249	/*
1250	 * We have to consider system topology and task affinity
1251	 * first, then we can look for a suitable cpu.
1252	 */
1253	best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1254			task, later_mask);
1255	if (best_cpu == -1)
1256		return -1;
1257
1258	/*
1259	 * If we are here, some target has been found,
1260	 * the most suitable of which is cached in best_cpu.
1261	 * This is, among the runqueues where the current tasks
1262	 * have later deadlines than the task's one, the rq
1263	 * with the latest possible one.
1264	 *
1265	 * Now we check how well this matches with task's
1266	 * affinity and system topology.
1267	 *
1268	 * The last cpu where the task run is our first
1269	 * guess, since it is most likely cache-hot there.
1270	 */
1271	if (cpumask_test_cpu(cpu, later_mask))
1272		return cpu;
1273	/*
1274	 * Check if this_cpu is to be skipped (i.e., it is
1275	 * not in the mask) or not.
1276	 */
1277	if (!cpumask_test_cpu(this_cpu, later_mask))
1278		this_cpu = -1;
1279
1280	rcu_read_lock();
1281	for_each_domain(cpu, sd) {
1282		if (sd->flags & SD_WAKE_AFFINE) {
1283
1284			/*
1285			 * If possible, preempting this_cpu is
1286			 * cheaper than migrating.
1287			 */
1288			if (this_cpu != -1 &&
1289			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1290				rcu_read_unlock();
1291				return this_cpu;
1292			}
1293
1294			/*
1295			 * Last chance: if best_cpu is valid and is
1296			 * in the mask, that becomes our choice.
1297			 */
1298			if (best_cpu < nr_cpu_ids &&
1299			    cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1300				rcu_read_unlock();
1301				return best_cpu;
1302			}
1303		}
1304	}
1305	rcu_read_unlock();
1306
1307	/*
1308	 * At this point, all our guesses failed, we just return
1309	 * 'something', and let the caller sort the things out.
1310	 */
1311	if (this_cpu != -1)
1312		return this_cpu;
1313
1314	cpu = cpumask_any(later_mask);
1315	if (cpu < nr_cpu_ids)
1316		return cpu;
1317
1318	return -1;
1319}
1320
1321/* Locks the rq it finds */
1322static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1323{
1324	struct rq *later_rq = NULL;
1325	int tries;
1326	int cpu;
1327
1328	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1329		cpu = find_later_rq(task);
1330
1331		if ((cpu == -1) || (cpu == rq->cpu))
1332			break;
1333
1334		later_rq = cpu_rq(cpu);
1335
1336		/* Retry if something changed. */
1337		if (double_lock_balance(rq, later_rq)) {
1338			if (unlikely(task_rq(task) != rq ||
1339				     !cpumask_test_cpu(later_rq->cpu,
1340				                       &task->cpus_allowed) ||
1341				     task_running(rq, task) ||
1342				     !task_on_rq_queued(task))) {
1343				double_unlock_balance(rq, later_rq);
1344				later_rq = NULL;
1345				break;
1346			}
1347		}
1348
1349		/*
1350		 * If the rq we found has no -deadline task, or
1351		 * its earliest one has a later deadline than our
1352		 * task, the rq is a good one.
1353		 */
1354		if (!later_rq->dl.dl_nr_running ||
1355		    dl_time_before(task->dl.deadline,
1356				   later_rq->dl.earliest_dl.curr))
1357			break;
1358
1359		/* Otherwise we try again. */
1360		double_unlock_balance(rq, later_rq);
1361		later_rq = NULL;
1362	}
1363
1364	return later_rq;
1365}
1366
1367static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1368{
1369	struct task_struct *p;
1370
1371	if (!has_pushable_dl_tasks(rq))
1372		return NULL;
1373
1374	p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1375		     struct task_struct, pushable_dl_tasks);
1376
1377	BUG_ON(rq->cpu != task_cpu(p));
1378	BUG_ON(task_current(rq, p));
1379	BUG_ON(p->nr_cpus_allowed <= 1);
1380
1381	BUG_ON(!task_on_rq_queued(p));
1382	BUG_ON(!dl_task(p));
1383
1384	return p;
1385}
1386
1387/*
1388 * See if the non running -deadline tasks on this rq
1389 * can be sent to some other CPU where they can preempt
1390 * and start executing.
1391 */
1392static int push_dl_task(struct rq *rq)
1393{
1394	struct task_struct *next_task;
1395	struct rq *later_rq;
1396	int ret = 0;
1397
1398	if (!rq->dl.overloaded)
1399		return 0;
1400
1401	next_task = pick_next_pushable_dl_task(rq);
1402	if (!next_task)
1403		return 0;
1404
1405retry:
1406	if (unlikely(next_task == rq->curr)) {
1407		WARN_ON(1);
1408		return 0;
1409	}
1410
1411	/*
1412	 * If next_task preempts rq->curr, and rq->curr
1413	 * can move away, it makes sense to just reschedule
1414	 * without going further in pushing next_task.
1415	 */
1416	if (dl_task(rq->curr) &&
1417	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1418	    rq->curr->nr_cpus_allowed > 1) {
1419		resched_curr(rq);
1420		return 0;
1421	}
1422
1423	/* We might release rq lock */
1424	get_task_struct(next_task);
1425
1426	/* Will lock the rq it'll find */
1427	later_rq = find_lock_later_rq(next_task, rq);
1428	if (!later_rq) {
1429		struct task_struct *task;
1430
1431		/*
1432		 * We must check all this again, since
1433		 * find_lock_later_rq releases rq->lock and it is
1434		 * then possible that next_task has migrated.
1435		 */
1436		task = pick_next_pushable_dl_task(rq);
1437		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1438			/*
1439			 * The task is still there. We don't try
1440			 * again, some other cpu will pull it when ready.
1441			 */
1442			goto out;
1443		}
1444
1445		if (!task)
1446			/* No more tasks */
1447			goto out;
1448
1449		put_task_struct(next_task);
1450		next_task = task;
1451		goto retry;
1452	}
1453
1454	deactivate_task(rq, next_task, 0);
1455	set_task_cpu(next_task, later_rq->cpu);
1456	activate_task(later_rq, next_task, 0);
1457	ret = 1;
1458
1459	resched_curr(later_rq);
1460
1461	double_unlock_balance(rq, later_rq);
1462
1463out:
1464	put_task_struct(next_task);
1465
1466	return ret;
1467}
1468
1469static void push_dl_tasks(struct rq *rq)
1470{
1471	/* Terminates as it moves a -deadline task */
1472	while (push_dl_task(rq))
1473		;
1474}
1475
1476static int pull_dl_task(struct rq *this_rq)
1477{
1478	int this_cpu = this_rq->cpu, ret = 0, cpu;
1479	struct task_struct *p;
1480	struct rq *src_rq;
1481	u64 dmin = LONG_MAX;
1482
1483	if (likely(!dl_overloaded(this_rq)))
1484		return 0;
1485
1486	/*
1487	 * Match the barrier from dl_set_overloaded; this guarantees that if we
1488	 * see overloaded we must also see the dlo_mask bit.
1489	 */
1490	smp_rmb();
1491
1492	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1493		if (this_cpu == cpu)
1494			continue;
1495
1496		src_rq = cpu_rq(cpu);
1497
1498		/*
1499		 * It looks racy, abd it is! However, as in sched_rt.c,
1500		 * we are fine with this.
1501		 */
1502		if (this_rq->dl.dl_nr_running &&
1503		    dl_time_before(this_rq->dl.earliest_dl.curr,
1504				   src_rq->dl.earliest_dl.next))
1505			continue;
1506
1507		/* Might drop this_rq->lock */
1508		double_lock_balance(this_rq, src_rq);
1509
1510		/*
1511		 * If there are no more pullable tasks on the
1512		 * rq, we're done with it.
1513		 */
1514		if (src_rq->dl.dl_nr_running <= 1)
1515			goto skip;
1516
1517		p = pick_next_earliest_dl_task(src_rq, this_cpu);
1518
1519		/*
1520		 * We found a task to be pulled if:
1521		 *  - it preempts our current (if there's one),
1522		 *  - it will preempt the last one we pulled (if any).
1523		 */
1524		if (p && dl_time_before(p->dl.deadline, dmin) &&
1525		    (!this_rq->dl.dl_nr_running ||
1526		     dl_time_before(p->dl.deadline,
1527				    this_rq->dl.earliest_dl.curr))) {
1528			WARN_ON(p == src_rq->curr);
1529			WARN_ON(!task_on_rq_queued(p));
1530
1531			/*
1532			 * Then we pull iff p has actually an earlier
1533			 * deadline than the current task of its runqueue.
1534			 */
1535			if (dl_time_before(p->dl.deadline,
1536					   src_rq->curr->dl.deadline))
1537				goto skip;
1538
1539			ret = 1;
1540
1541			deactivate_task(src_rq, p, 0);
1542			set_task_cpu(p, this_cpu);
1543			activate_task(this_rq, p, 0);
1544			dmin = p->dl.deadline;
1545
1546			/* Is there any other task even earlier? */
1547		}
1548skip:
1549		double_unlock_balance(this_rq, src_rq);
1550	}
1551
1552	return ret;
1553}
1554
1555static void post_schedule_dl(struct rq *rq)
1556{
1557	push_dl_tasks(rq);
1558}
1559
1560/*
1561 * Since the task is not running and a reschedule is not going to happen
1562 * anytime soon on its runqueue, we try pushing it away now.
1563 */
1564static void task_woken_dl(struct rq *rq, struct task_struct *p)
1565{
1566	if (!task_running(rq, p) &&
1567	    !test_tsk_need_resched(rq->curr) &&
1568	    has_pushable_dl_tasks(rq) &&
1569	    p->nr_cpus_allowed > 1 &&
1570	    dl_task(rq->curr) &&
1571	    (rq->curr->nr_cpus_allowed < 2 ||
1572	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1573		push_dl_tasks(rq);
1574	}
1575}
1576
1577static void set_cpus_allowed_dl(struct task_struct *p,
1578				const struct cpumask *new_mask)
1579{
1580	struct rq *rq;
1581	struct root_domain *src_rd;
1582	int weight;
1583
1584	BUG_ON(!dl_task(p));
1585
1586	rq = task_rq(p);
1587	src_rd = rq->rd;
1588	/*
1589	 * Migrating a SCHED_DEADLINE task between exclusive
1590	 * cpusets (different root_domains) entails a bandwidth
1591	 * update. We already made space for us in the destination
1592	 * domain (see cpuset_can_attach()).
1593	 */
1594	if (!cpumask_intersects(src_rd->span, new_mask)) {
1595		struct dl_bw *src_dl_b;
1596
1597		src_dl_b = dl_bw_of(cpu_of(rq));
1598		/*
1599		 * We now free resources of the root_domain we are migrating
1600		 * off. In the worst case, sched_setattr() may temporary fail
1601		 * until we complete the update.
1602		 */
1603		raw_spin_lock(&src_dl_b->lock);
1604		__dl_clear(src_dl_b, p->dl.dl_bw);
1605		raw_spin_unlock(&src_dl_b->lock);
1606	}
1607
1608	/*
1609	 * Update only if the task is actually running (i.e.,
1610	 * it is on the rq AND it is not throttled).
1611	 */
1612	if (!on_dl_rq(&p->dl))
1613		return;
1614
1615	weight = cpumask_weight(new_mask);
1616
1617	/*
1618	 * Only update if the process changes its state from whether it
1619	 * can migrate or not.
1620	 */
1621	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1622		return;
1623
1624	/*
1625	 * The process used to be able to migrate OR it can now migrate
1626	 */
1627	if (weight <= 1) {
1628		if (!task_current(rq, p))
1629			dequeue_pushable_dl_task(rq, p);
1630		BUG_ON(!rq->dl.dl_nr_migratory);
1631		rq->dl.dl_nr_migratory--;
1632	} else {
1633		if (!task_current(rq, p))
1634			enqueue_pushable_dl_task(rq, p);
1635		rq->dl.dl_nr_migratory++;
1636	}
1637
1638	update_dl_migration(&rq->dl);
1639}
1640
1641/* Assumes rq->lock is held */
1642static void rq_online_dl(struct rq *rq)
1643{
1644	if (rq->dl.overloaded)
1645		dl_set_overload(rq);
1646
1647	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1648	if (rq->dl.dl_nr_running > 0)
1649		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1650}
1651
1652/* Assumes rq->lock is held */
1653static void rq_offline_dl(struct rq *rq)
1654{
1655	if (rq->dl.overloaded)
1656		dl_clear_overload(rq);
1657
1658	cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1659	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1660}
1661
1662void init_sched_dl_class(void)
1663{
1664	unsigned int i;
1665
1666	for_each_possible_cpu(i)
1667		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1668					GFP_KERNEL, cpu_to_node(i));
1669}
1670
1671#endif /* CONFIG_SMP */
1672
1673/*
1674 *  Ensure p's dl_timer is cancelled. May drop rq->lock for a while.
1675 */
1676static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
1677{
1678	struct hrtimer *dl_timer = &p->dl.dl_timer;
1679
1680	/* Nobody will change task's class if pi_lock is held */
1681	lockdep_assert_held(&p->pi_lock);
1682
1683	if (hrtimer_active(dl_timer)) {
1684		int ret = hrtimer_try_to_cancel(dl_timer);
1685
1686		if (unlikely(ret == -1)) {
1687			/*
1688			 * Note, p may migrate OR new deadline tasks
1689			 * may appear in rq when we are unlocking it.
1690			 * A caller of us must be fine with that.
1691			 */
1692			raw_spin_unlock(&rq->lock);
1693			hrtimer_cancel(dl_timer);
1694			raw_spin_lock(&rq->lock);
1695		}
1696	}
1697}
1698
1699static void switched_from_dl(struct rq *rq, struct task_struct *p)
1700{
1701	/* XXX we should retain the bw until 0-lag */
1702	cancel_dl_timer(rq, p);
1703	__dl_clear_params(p);
1704
1705	/*
1706	 * Since this might be the only -deadline task on the rq,
1707	 * this is the right place to try to pull some other one
1708	 * from an overloaded cpu, if any.
1709	 */
1710	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1711		return;
1712
1713	if (pull_dl_task(rq))
1714		resched_curr(rq);
1715}
1716
1717/*
1718 * When switching to -deadline, we may overload the rq, then
1719 * we try to push someone off, if possible.
1720 */
1721static void switched_to_dl(struct rq *rq, struct task_struct *p)
1722{
1723	int check_resched = 1;
1724
1725	if (task_on_rq_queued(p) && rq->curr != p) {
1726#ifdef CONFIG_SMP
1727		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded &&
1728			push_dl_task(rq) && rq != task_rq(p))
1729			/* Only reschedule if pushing failed */
1730			check_resched = 0;
1731#endif /* CONFIG_SMP */
1732		if (check_resched) {
1733			if (dl_task(rq->curr))
1734				check_preempt_curr_dl(rq, p, 0);
1735			else
1736				resched_curr(rq);
1737		}
1738	}
1739}
1740
1741/*
1742 * If the scheduling parameters of a -deadline task changed,
1743 * a push or pull operation might be needed.
1744 */
1745static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1746			    int oldprio)
1747{
1748	if (task_on_rq_queued(p) || rq->curr == p) {
1749#ifdef CONFIG_SMP
1750		/*
1751		 * This might be too much, but unfortunately
1752		 * we don't have the old deadline value, and
1753		 * we can't argue if the task is increasing
1754		 * or lowering its prio, so...
1755		 */
1756		if (!rq->dl.overloaded)
1757			pull_dl_task(rq);
1758
1759		/*
1760		 * If we now have a earlier deadline task than p,
1761		 * then reschedule, provided p is still on this
1762		 * runqueue.
1763		 */
1764		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1765		    rq->curr == p)
1766			resched_curr(rq);
1767#else
1768		/*
1769		 * Again, we don't know if p has a earlier
1770		 * or later deadline, so let's blindly set a
1771		 * (maybe not needed) rescheduling point.
1772		 */
1773		resched_curr(rq);
1774#endif /* CONFIG_SMP */
1775	} else
1776		switched_to_dl(rq, p);
1777}
1778
1779const struct sched_class dl_sched_class = {
1780	.next			= &rt_sched_class,
1781	.enqueue_task		= enqueue_task_dl,
1782	.dequeue_task		= dequeue_task_dl,
1783	.yield_task		= yield_task_dl,
1784
1785	.check_preempt_curr	= check_preempt_curr_dl,
1786
1787	.pick_next_task		= pick_next_task_dl,
1788	.put_prev_task		= put_prev_task_dl,
1789
1790#ifdef CONFIG_SMP
1791	.select_task_rq		= select_task_rq_dl,
1792	.set_cpus_allowed       = set_cpus_allowed_dl,
1793	.rq_online              = rq_online_dl,
1794	.rq_offline             = rq_offline_dl,
1795	.post_schedule		= post_schedule_dl,
1796	.task_woken		= task_woken_dl,
1797#endif
1798
1799	.set_curr_task		= set_curr_task_dl,
1800	.task_tick		= task_tick_dl,
1801	.task_fork              = task_fork_dl,
1802	.task_dead		= task_dead_dl,
1803
1804	.prio_changed           = prio_changed_dl,
1805	.switched_from		= switched_from_dl,
1806	.switched_to		= switched_to_dl,
1807
1808	.update_curr		= update_curr_dl,
1809};
1810
1811#ifdef CONFIG_SCHED_DEBUG
1812extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1813
1814void print_dl_stats(struct seq_file *m, int cpu)
1815{
1816	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1817}
1818#endif /* CONFIG_SCHED_DEBUG */
1819