1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 * Juri Lelli <juri.lelli@gmail.com>,
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
17 */
18 #include "sched.h"
19 #include "pelt.h"
20
21 struct dl_bandwidth def_dl_bandwidth;
22
dl_task_of(struct sched_dl_entity * dl_se)23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 {
25 return container_of(dl_se, struct task_struct, dl);
26 }
27
rq_of_dl_rq(struct dl_rq * dl_rq)28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 {
30 return container_of(dl_rq, struct rq, dl);
31 }
32
dl_rq_of_se(struct sched_dl_entity * dl_se)33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 {
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39 }
40
on_dl_rq(struct sched_dl_entity * dl_se)41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 {
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44 }
45
46 #ifdef CONFIG_RT_MUTEXES
pi_of(struct sched_dl_entity * dl_se)47 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
48 {
49 return dl_se->pi_se;
50 }
51
is_dl_boosted(struct sched_dl_entity * dl_se)52 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
53 {
54 return pi_of(dl_se) != dl_se;
55 }
56 #else
pi_of(struct sched_dl_entity * dl_se)57 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
58 {
59 return dl_se;
60 }
61
is_dl_boosted(struct sched_dl_entity * dl_se)62 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
63 {
64 return false;
65 }
66 #endif
67
68 #ifdef CONFIG_SMP
dl_bw_of(int i)69 static inline struct dl_bw *dl_bw_of(int i)
70 {
71 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
72 "sched RCU must be held");
73 return &cpu_rq(i)->rd->dl_bw;
74 }
75
dl_bw_cpus(int i)76 static inline int dl_bw_cpus(int i)
77 {
78 struct root_domain *rd = cpu_rq(i)->rd;
79 int cpus;
80
81 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
82 "sched RCU must be held");
83
84 if (cpumask_subset(rd->span, cpu_active_mask))
85 return cpumask_weight(rd->span);
86
87 cpus = 0;
88
89 for_each_cpu_and(i, rd->span, cpu_active_mask)
90 cpus++;
91
92 return cpus;
93 }
94
__dl_bw_capacity(int i)95 static inline unsigned long __dl_bw_capacity(int i)
96 {
97 struct root_domain *rd = cpu_rq(i)->rd;
98 unsigned long cap = 0;
99
100 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
101 "sched RCU must be held");
102
103 for_each_cpu_and(i, rd->span, cpu_active_mask)
104 cap += capacity_orig_of(i);
105
106 return cap;
107 }
108
109 /*
110 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
111 * of the CPU the task is running on rather rd's \Sum CPU capacity.
112 */
dl_bw_capacity(int i)113 static inline unsigned long dl_bw_capacity(int i)
114 {
115 if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
116 capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
117 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
118 } else {
119 return __dl_bw_capacity(i);
120 }
121 }
122 #else
dl_bw_of(int i)123 static inline struct dl_bw *dl_bw_of(int i)
124 {
125 return &cpu_rq(i)->dl.dl_bw;
126 }
127
dl_bw_cpus(int i)128 static inline int dl_bw_cpus(int i)
129 {
130 return 1;
131 }
132
dl_bw_capacity(int i)133 static inline unsigned long dl_bw_capacity(int i)
134 {
135 return SCHED_CAPACITY_SCALE;
136 }
137 #endif
138
139 static inline
__add_running_bw(u64 dl_bw,struct dl_rq * dl_rq)140 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
141 {
142 u64 old = dl_rq->running_bw;
143
144 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
145 dl_rq->running_bw += dl_bw;
146 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
147 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
148 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
149 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
150 }
151
152 static inline
__sub_running_bw(u64 dl_bw,struct dl_rq * dl_rq)153 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
154 {
155 u64 old = dl_rq->running_bw;
156
157 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
158 dl_rq->running_bw -= dl_bw;
159 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
160 if (dl_rq->running_bw > old)
161 dl_rq->running_bw = 0;
162 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
163 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
164 }
165
166 static inline
__add_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)167 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
168 {
169 u64 old = dl_rq->this_bw;
170
171 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
172 dl_rq->this_bw += dl_bw;
173 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
174 }
175
176 static inline
__sub_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)177 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
178 {
179 u64 old = dl_rq->this_bw;
180
181 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
182 dl_rq->this_bw -= dl_bw;
183 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
184 if (dl_rq->this_bw > old)
185 dl_rq->this_bw = 0;
186 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
187 }
188
189 static inline
add_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)190 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
191 {
192 if (!dl_entity_is_special(dl_se))
193 __add_rq_bw(dl_se->dl_bw, dl_rq);
194 }
195
196 static inline
sub_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)197 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
198 {
199 if (!dl_entity_is_special(dl_se))
200 __sub_rq_bw(dl_se->dl_bw, dl_rq);
201 }
202
203 static inline
add_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)204 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
205 {
206 if (!dl_entity_is_special(dl_se))
207 __add_running_bw(dl_se->dl_bw, dl_rq);
208 }
209
210 static inline
sub_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)211 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
212 {
213 if (!dl_entity_is_special(dl_se))
214 __sub_running_bw(dl_se->dl_bw, dl_rq);
215 }
216
dl_change_utilization(struct task_struct * p,u64 new_bw)217 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
218 {
219 struct rq *rq;
220
221 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
222
223 if (task_on_rq_queued(p))
224 return;
225
226 rq = task_rq(p);
227 if (p->dl.dl_non_contending) {
228 sub_running_bw(&p->dl, &rq->dl);
229 p->dl.dl_non_contending = 0;
230 /*
231 * If the timer handler is currently running and the
232 * timer cannot be cancelled, inactive_task_timer()
233 * will see that dl_not_contending is not set, and
234 * will not touch the rq's active utilization,
235 * so we are still safe.
236 */
237 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
238 put_task_struct(p);
239 }
240 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
241 __add_rq_bw(new_bw, &rq->dl);
242 }
243
244 /*
245 * The utilization of a task cannot be immediately removed from
246 * the rq active utilization (running_bw) when the task blocks.
247 * Instead, we have to wait for the so called "0-lag time".
248 *
249 * If a task blocks before the "0-lag time", a timer (the inactive
250 * timer) is armed, and running_bw is decreased when the timer
251 * fires.
252 *
253 * If the task wakes up again before the inactive timer fires,
254 * the timer is cancelled, whereas if the task wakes up after the
255 * inactive timer fired (and running_bw has been decreased) the
256 * task's utilization has to be added to running_bw again.
257 * A flag in the deadline scheduling entity (dl_non_contending)
258 * is used to avoid race conditions between the inactive timer handler
259 * and task wakeups.
260 *
261 * The following diagram shows how running_bw is updated. A task is
262 * "ACTIVE" when its utilization contributes to running_bw; an
263 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
264 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
265 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
266 * time already passed, which does not contribute to running_bw anymore.
267 * +------------------+
268 * wakeup | ACTIVE |
269 * +------------------>+ contending |
270 * | add_running_bw | |
271 * | +----+------+------+
272 * | | ^
273 * | dequeue | |
274 * +--------+-------+ | |
275 * | | t >= 0-lag | | wakeup
276 * | INACTIVE |<---------------+ |
277 * | | sub_running_bw | |
278 * +--------+-------+ | |
279 * ^ | |
280 * | t < 0-lag | |
281 * | | |
282 * | V |
283 * | +----+------+------+
284 * | sub_running_bw | ACTIVE |
285 * +-------------------+ |
286 * inactive timer | non contending |
287 * fired +------------------+
288 *
289 * The task_non_contending() function is invoked when a task
290 * blocks, and checks if the 0-lag time already passed or
291 * not (in the first case, it directly updates running_bw;
292 * in the second case, it arms the inactive timer).
293 *
294 * The task_contending() function is invoked when a task wakes
295 * up, and checks if the task is still in the "ACTIVE non contending"
296 * state or not (in the second case, it updates running_bw).
297 */
task_non_contending(struct task_struct * p)298 static void task_non_contending(struct task_struct *p)
299 {
300 struct sched_dl_entity *dl_se = &p->dl;
301 struct hrtimer *timer = &dl_se->inactive_timer;
302 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
303 struct rq *rq = rq_of_dl_rq(dl_rq);
304 s64 zerolag_time;
305
306 /*
307 * If this is a non-deadline task that has been boosted,
308 * do nothing
309 */
310 if (dl_se->dl_runtime == 0)
311 return;
312
313 if (dl_entity_is_special(dl_se))
314 return;
315
316 WARN_ON(dl_se->dl_non_contending);
317
318 zerolag_time = dl_se->deadline -
319 div64_long((dl_se->runtime * dl_se->dl_period),
320 dl_se->dl_runtime);
321
322 /*
323 * Using relative times instead of the absolute "0-lag time"
324 * allows to simplify the code
325 */
326 zerolag_time -= rq_clock(rq);
327
328 /*
329 * If the "0-lag time" already passed, decrease the active
330 * utilization now, instead of starting a timer
331 */
332 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
333 if (dl_task(p))
334 sub_running_bw(dl_se, dl_rq);
335 if (!dl_task(p) || p->state == TASK_DEAD) {
336 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
337
338 if (p->state == TASK_DEAD)
339 sub_rq_bw(&p->dl, &rq->dl);
340 raw_spin_lock(&dl_b->lock);
341 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
342 __dl_clear_params(p);
343 raw_spin_unlock(&dl_b->lock);
344 }
345
346 return;
347 }
348
349 dl_se->dl_non_contending = 1;
350 get_task_struct(p);
351 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
352 }
353
task_contending(struct sched_dl_entity * dl_se,int flags)354 static void task_contending(struct sched_dl_entity *dl_se, int flags)
355 {
356 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
357
358 /*
359 * If this is a non-deadline task that has been boosted,
360 * do nothing
361 */
362 if (dl_se->dl_runtime == 0)
363 return;
364
365 if (flags & ENQUEUE_MIGRATED)
366 add_rq_bw(dl_se, dl_rq);
367
368 if (dl_se->dl_non_contending) {
369 dl_se->dl_non_contending = 0;
370 /*
371 * If the timer handler is currently running and the
372 * timer cannot be cancelled, inactive_task_timer()
373 * will see that dl_not_contending is not set, and
374 * will not touch the rq's active utilization,
375 * so we are still safe.
376 */
377 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
378 put_task_struct(dl_task_of(dl_se));
379 } else {
380 /*
381 * Since "dl_non_contending" is not set, the
382 * task's utilization has already been removed from
383 * active utilization (either when the task blocked,
384 * when the "inactive timer" fired).
385 * So, add it back.
386 */
387 add_running_bw(dl_se, dl_rq);
388 }
389 }
390
is_leftmost(struct task_struct * p,struct dl_rq * dl_rq)391 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
392 {
393 struct sched_dl_entity *dl_se = &p->dl;
394
395 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
396 }
397
398 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
399
init_dl_bandwidth(struct dl_bandwidth * dl_b,u64 period,u64 runtime)400 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
401 {
402 raw_spin_lock_init(&dl_b->dl_runtime_lock);
403 dl_b->dl_period = period;
404 dl_b->dl_runtime = runtime;
405 }
406
init_dl_bw(struct dl_bw * dl_b)407 void init_dl_bw(struct dl_bw *dl_b)
408 {
409 raw_spin_lock_init(&dl_b->lock);
410 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
411 if (global_rt_runtime() == RUNTIME_INF)
412 dl_b->bw = -1;
413 else
414 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
415 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
416 dl_b->total_bw = 0;
417 }
418
init_dl_rq(struct dl_rq * dl_rq)419 void init_dl_rq(struct dl_rq *dl_rq)
420 {
421 dl_rq->root = RB_ROOT_CACHED;
422
423 #ifdef CONFIG_SMP
424 /* zero means no -deadline tasks */
425 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
426
427 dl_rq->dl_nr_migratory = 0;
428 dl_rq->overloaded = 0;
429 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
430 #else
431 init_dl_bw(&dl_rq->dl_bw);
432 #endif
433
434 dl_rq->running_bw = 0;
435 dl_rq->this_bw = 0;
436 init_dl_rq_bw_ratio(dl_rq);
437 }
438
439 #ifdef CONFIG_SMP
440
dl_overloaded(struct rq * rq)441 static inline int dl_overloaded(struct rq *rq)
442 {
443 return atomic_read(&rq->rd->dlo_count);
444 }
445
dl_set_overload(struct rq * rq)446 static inline void dl_set_overload(struct rq *rq)
447 {
448 if (!rq->online)
449 return;
450
451 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
452 /*
453 * Must be visible before the overload count is
454 * set (as in sched_rt.c).
455 *
456 * Matched by the barrier in pull_dl_task().
457 */
458 smp_wmb();
459 atomic_inc(&rq->rd->dlo_count);
460 }
461
dl_clear_overload(struct rq * rq)462 static inline void dl_clear_overload(struct rq *rq)
463 {
464 if (!rq->online)
465 return;
466
467 atomic_dec(&rq->rd->dlo_count);
468 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
469 }
470
update_dl_migration(struct dl_rq * dl_rq)471 static void update_dl_migration(struct dl_rq *dl_rq)
472 {
473 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
474 if (!dl_rq->overloaded) {
475 dl_set_overload(rq_of_dl_rq(dl_rq));
476 dl_rq->overloaded = 1;
477 }
478 } else if (dl_rq->overloaded) {
479 dl_clear_overload(rq_of_dl_rq(dl_rq));
480 dl_rq->overloaded = 0;
481 }
482 }
483
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)484 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
485 {
486 struct task_struct *p = dl_task_of(dl_se);
487
488 if (p->nr_cpus_allowed > 1)
489 dl_rq->dl_nr_migratory++;
490
491 update_dl_migration(dl_rq);
492 }
493
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)494 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
495 {
496 struct task_struct *p = dl_task_of(dl_se);
497
498 if (p->nr_cpus_allowed > 1)
499 dl_rq->dl_nr_migratory--;
500
501 update_dl_migration(dl_rq);
502 }
503
504 /*
505 * The list of pushable -deadline task is not a plist, like in
506 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
507 */
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)508 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
509 {
510 struct dl_rq *dl_rq = &rq->dl;
511 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
512 struct rb_node *parent = NULL;
513 struct task_struct *entry;
514 bool leftmost = true;
515
516 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
517
518 while (*link) {
519 parent = *link;
520 entry = rb_entry(parent, struct task_struct,
521 pushable_dl_tasks);
522 if (dl_entity_preempt(&p->dl, &entry->dl))
523 link = &parent->rb_left;
524 else {
525 link = &parent->rb_right;
526 leftmost = false;
527 }
528 }
529
530 if (leftmost)
531 dl_rq->earliest_dl.next = p->dl.deadline;
532
533 rb_link_node(&p->pushable_dl_tasks, parent, link);
534 rb_insert_color_cached(&p->pushable_dl_tasks,
535 &dl_rq->pushable_dl_tasks_root, leftmost);
536 }
537
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)538 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
539 {
540 struct dl_rq *dl_rq = &rq->dl;
541
542 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
543 return;
544
545 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
546 struct rb_node *next_node;
547
548 next_node = rb_next(&p->pushable_dl_tasks);
549 if (next_node) {
550 dl_rq->earliest_dl.next = rb_entry(next_node,
551 struct task_struct, pushable_dl_tasks)->dl.deadline;
552 }
553 }
554
555 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
556 RB_CLEAR_NODE(&p->pushable_dl_tasks);
557 }
558
has_pushable_dl_tasks(struct rq * rq)559 static inline int has_pushable_dl_tasks(struct rq *rq)
560 {
561 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
562 }
563
564 static int push_dl_task(struct rq *rq);
565
need_pull_dl_task(struct rq * rq,struct task_struct * prev)566 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
567 {
568 return dl_task(prev);
569 }
570
571 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
572 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
573
574 static void push_dl_tasks(struct rq *);
575 static void pull_dl_task(struct rq *);
576
deadline_queue_push_tasks(struct rq * rq)577 static inline void deadline_queue_push_tasks(struct rq *rq)
578 {
579 if (!has_pushable_dl_tasks(rq))
580 return;
581
582 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
583 }
584
deadline_queue_pull_task(struct rq * rq)585 static inline void deadline_queue_pull_task(struct rq *rq)
586 {
587 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
588 }
589
590 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
591
dl_task_offline_migration(struct rq * rq,struct task_struct * p)592 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
593 {
594 struct rq *later_rq = NULL;
595 struct dl_bw *dl_b;
596
597 later_rq = find_lock_later_rq(p, rq);
598 if (!later_rq) {
599 int cpu;
600
601 /*
602 * If we cannot preempt any rq, fall back to pick any
603 * online CPU:
604 */
605 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
606 if (cpu >= nr_cpu_ids) {
607 /*
608 * Failed to find any suitable CPU.
609 * The task will never come back!
610 */
611 BUG_ON(dl_bandwidth_enabled());
612
613 /*
614 * If admission control is disabled we
615 * try a little harder to let the task
616 * run.
617 */
618 cpu = cpumask_any(cpu_active_mask);
619 }
620 later_rq = cpu_rq(cpu);
621 double_lock_balance(rq, later_rq);
622 }
623
624 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
625 /*
626 * Inactive timer is armed (or callback is running, but
627 * waiting for us to release rq locks). In any case, when it
628 * will fire (or continue), it will see running_bw of this
629 * task migrated to later_rq (and correctly handle it).
630 */
631 sub_running_bw(&p->dl, &rq->dl);
632 sub_rq_bw(&p->dl, &rq->dl);
633
634 add_rq_bw(&p->dl, &later_rq->dl);
635 add_running_bw(&p->dl, &later_rq->dl);
636 } else {
637 sub_rq_bw(&p->dl, &rq->dl);
638 add_rq_bw(&p->dl, &later_rq->dl);
639 }
640
641 /*
642 * And we finally need to fixup root_domain(s) bandwidth accounting,
643 * since p is still hanging out in the old (now moved to default) root
644 * domain.
645 */
646 dl_b = &rq->rd->dl_bw;
647 raw_spin_lock(&dl_b->lock);
648 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
649 raw_spin_unlock(&dl_b->lock);
650
651 dl_b = &later_rq->rd->dl_bw;
652 raw_spin_lock(&dl_b->lock);
653 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
654 raw_spin_unlock(&dl_b->lock);
655
656 set_task_cpu(p, later_rq->cpu);
657 double_unlock_balance(later_rq, rq);
658
659 return later_rq;
660 }
661
662 #else
663
664 static inline
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)665 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
666 {
667 }
668
669 static inline
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)670 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
671 {
672 }
673
674 static inline
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)675 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
676 {
677 }
678
679 static inline
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)680 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
681 {
682 }
683
need_pull_dl_task(struct rq * rq,struct task_struct * prev)684 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
685 {
686 return false;
687 }
688
pull_dl_task(struct rq * rq)689 static inline void pull_dl_task(struct rq *rq)
690 {
691 }
692
deadline_queue_push_tasks(struct rq * rq)693 static inline void deadline_queue_push_tasks(struct rq *rq)
694 {
695 }
696
deadline_queue_pull_task(struct rq * rq)697 static inline void deadline_queue_pull_task(struct rq *rq)
698 {
699 }
700 #endif /* CONFIG_SMP */
701
702 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
703 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
704 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
705
706 /*
707 * We are being explicitly informed that a new instance is starting,
708 * and this means that:
709 * - the absolute deadline of the entity has to be placed at
710 * current time + relative deadline;
711 * - the runtime of the entity has to be set to the maximum value.
712 *
713 * The capability of specifying such event is useful whenever a -deadline
714 * entity wants to (try to!) synchronize its behaviour with the scheduler's
715 * one, and to (try to!) reconcile itself with its own scheduling
716 * parameters.
717 */
setup_new_dl_entity(struct sched_dl_entity * dl_se)718 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
719 {
720 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
721 struct rq *rq = rq_of_dl_rq(dl_rq);
722
723 WARN_ON(is_dl_boosted(dl_se));
724 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
725
726 /*
727 * We are racing with the deadline timer. So, do nothing because
728 * the deadline timer handler will take care of properly recharging
729 * the runtime and postponing the deadline
730 */
731 if (dl_se->dl_throttled)
732 return;
733
734 /*
735 * We use the regular wall clock time to set deadlines in the
736 * future; in fact, we must consider execution overheads (time
737 * spent on hardirq context, etc.).
738 */
739 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
740 dl_se->runtime = dl_se->dl_runtime;
741 }
742
743 /*
744 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
745 * possibility of a entity lasting more than what it declared, and thus
746 * exhausting its runtime.
747 *
748 * Here we are interested in making runtime overrun possible, but we do
749 * not want a entity which is misbehaving to affect the scheduling of all
750 * other entities.
751 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
752 * is used, in order to confine each entity within its own bandwidth.
753 *
754 * This function deals exactly with that, and ensures that when the runtime
755 * of a entity is replenished, its deadline is also postponed. That ensures
756 * the overrunning entity can't interfere with other entity in the system and
757 * can't make them miss their deadlines. Reasons why this kind of overruns
758 * could happen are, typically, a entity voluntarily trying to overcome its
759 * runtime, or it just underestimated it during sched_setattr().
760 */
replenish_dl_entity(struct sched_dl_entity * dl_se)761 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
762 {
763 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
764 struct rq *rq = rq_of_dl_rq(dl_rq);
765
766 BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
767
768 /*
769 * This could be the case for a !-dl task that is boosted.
770 * Just go with full inherited parameters.
771 */
772 if (dl_se->dl_deadline == 0) {
773 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
774 dl_se->runtime = pi_of(dl_se)->dl_runtime;
775 }
776
777 if (dl_se->dl_yielded && dl_se->runtime > 0)
778 dl_se->runtime = 0;
779
780 /*
781 * We keep moving the deadline away until we get some
782 * available runtime for the entity. This ensures correct
783 * handling of situations where the runtime overrun is
784 * arbitrary large.
785 */
786 while (dl_se->runtime <= 0) {
787 dl_se->deadline += pi_of(dl_se)->dl_period;
788 dl_se->runtime += pi_of(dl_se)->dl_runtime;
789 }
790
791 /*
792 * At this point, the deadline really should be "in
793 * the future" with respect to rq->clock. If it's
794 * not, we are, for some reason, lagging too much!
795 * Anyway, after having warn userspace abut that,
796 * we still try to keep the things running by
797 * resetting the deadline and the budget of the
798 * entity.
799 */
800 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
801 printk_deferred_once("sched: DL replenish lagged too much\n");
802 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
803 dl_se->runtime = pi_of(dl_se)->dl_runtime;
804 }
805
806 if (dl_se->dl_yielded)
807 dl_se->dl_yielded = 0;
808 if (dl_se->dl_throttled)
809 dl_se->dl_throttled = 0;
810 }
811
812 /*
813 * Here we check if --at time t-- an entity (which is probably being
814 * [re]activated or, in general, enqueued) can use its remaining runtime
815 * and its current deadline _without_ exceeding the bandwidth it is
816 * assigned (function returns true if it can't). We are in fact applying
817 * one of the CBS rules: when a task wakes up, if the residual runtime
818 * over residual deadline fits within the allocated bandwidth, then we
819 * can keep the current (absolute) deadline and residual budget without
820 * disrupting the schedulability of the system. Otherwise, we should
821 * refill the runtime and set the deadline a period in the future,
822 * because keeping the current (absolute) deadline of the task would
823 * result in breaking guarantees promised to other tasks (refer to
824 * Documentation/scheduler/sched-deadline.rst for more information).
825 *
826 * This function returns true if:
827 *
828 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
829 *
830 * IOW we can't recycle current parameters.
831 *
832 * Notice that the bandwidth check is done against the deadline. For
833 * task with deadline equal to period this is the same of using
834 * dl_period instead of dl_deadline in the equation above.
835 */
dl_entity_overflow(struct sched_dl_entity * dl_se,u64 t)836 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
837 {
838 u64 left, right;
839
840 /*
841 * left and right are the two sides of the equation above,
842 * after a bit of shuffling to use multiplications instead
843 * of divisions.
844 *
845 * Note that none of the time values involved in the two
846 * multiplications are absolute: dl_deadline and dl_runtime
847 * are the relative deadline and the maximum runtime of each
848 * instance, runtime is the runtime left for the last instance
849 * and (deadline - t), since t is rq->clock, is the time left
850 * to the (absolute) deadline. Even if overflowing the u64 type
851 * is very unlikely to occur in both cases, here we scale down
852 * as we want to avoid that risk at all. Scaling down by 10
853 * means that we reduce granularity to 1us. We are fine with it,
854 * since this is only a true/false check and, anyway, thinking
855 * of anything below microseconds resolution is actually fiction
856 * (but still we want to give the user that illusion >;).
857 */
858 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
859 right = ((dl_se->deadline - t) >> DL_SCALE) *
860 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
861
862 return dl_time_before(right, left);
863 }
864
865 /*
866 * Revised wakeup rule [1]: For self-suspending tasks, rather then
867 * re-initializing task's runtime and deadline, the revised wakeup
868 * rule adjusts the task's runtime to avoid the task to overrun its
869 * density.
870 *
871 * Reasoning: a task may overrun the density if:
872 * runtime / (deadline - t) > dl_runtime / dl_deadline
873 *
874 * Therefore, runtime can be adjusted to:
875 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
876 *
877 * In such way that runtime will be equal to the maximum density
878 * the task can use without breaking any rule.
879 *
880 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
881 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
882 */
883 static void
update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq)884 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
885 {
886 u64 laxity = dl_se->deadline - rq_clock(rq);
887
888 /*
889 * If the task has deadline < period, and the deadline is in the past,
890 * it should already be throttled before this check.
891 *
892 * See update_dl_entity() comments for further details.
893 */
894 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
895
896 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
897 }
898
899 /*
900 * Regarding the deadline, a task with implicit deadline has a relative
901 * deadline == relative period. A task with constrained deadline has a
902 * relative deadline <= relative period.
903 *
904 * We support constrained deadline tasks. However, there are some restrictions
905 * applied only for tasks which do not have an implicit deadline. See
906 * update_dl_entity() to know more about such restrictions.
907 *
908 * The dl_is_implicit() returns true if the task has an implicit deadline.
909 */
dl_is_implicit(struct sched_dl_entity * dl_se)910 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
911 {
912 return dl_se->dl_deadline == dl_se->dl_period;
913 }
914
915 /*
916 * When a deadline entity is placed in the runqueue, its runtime and deadline
917 * might need to be updated. This is done by a CBS wake up rule. There are two
918 * different rules: 1) the original CBS; and 2) the Revisited CBS.
919 *
920 * When the task is starting a new period, the Original CBS is used. In this
921 * case, the runtime is replenished and a new absolute deadline is set.
922 *
923 * When a task is queued before the begin of the next period, using the
924 * remaining runtime and deadline could make the entity to overflow, see
925 * dl_entity_overflow() to find more about runtime overflow. When such case
926 * is detected, the runtime and deadline need to be updated.
927 *
928 * If the task has an implicit deadline, i.e., deadline == period, the Original
929 * CBS is applied. the runtime is replenished and a new absolute deadline is
930 * set, as in the previous cases.
931 *
932 * However, the Original CBS does not work properly for tasks with
933 * deadline < period, which are said to have a constrained deadline. By
934 * applying the Original CBS, a constrained deadline task would be able to run
935 * runtime/deadline in a period. With deadline < period, the task would
936 * overrun the runtime/period allowed bandwidth, breaking the admission test.
937 *
938 * In order to prevent this misbehave, the Revisited CBS is used for
939 * constrained deadline tasks when a runtime overflow is detected. In the
940 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
941 * the remaining runtime of the task is reduced to avoid runtime overflow.
942 * Please refer to the comments update_dl_revised_wakeup() function to find
943 * more about the Revised CBS rule.
944 */
update_dl_entity(struct sched_dl_entity * dl_se)945 static void update_dl_entity(struct sched_dl_entity *dl_se)
946 {
947 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
948 struct rq *rq = rq_of_dl_rq(dl_rq);
949
950 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
951 dl_entity_overflow(dl_se, rq_clock(rq))) {
952
953 if (unlikely(!dl_is_implicit(dl_se) &&
954 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
955 !is_dl_boosted(dl_se))) {
956 update_dl_revised_wakeup(dl_se, rq);
957 return;
958 }
959
960 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
961 dl_se->runtime = pi_of(dl_se)->dl_runtime;
962 }
963 }
964
dl_next_period(struct sched_dl_entity * dl_se)965 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
966 {
967 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
968 }
969
970 /*
971 * If the entity depleted all its runtime, and if we want it to sleep
972 * while waiting for some new execution time to become available, we
973 * set the bandwidth replenishment timer to the replenishment instant
974 * and try to activate it.
975 *
976 * Notice that it is important for the caller to know if the timer
977 * actually started or not (i.e., the replenishment instant is in
978 * the future or in the past).
979 */
start_dl_timer(struct task_struct * p)980 static int start_dl_timer(struct task_struct *p)
981 {
982 struct sched_dl_entity *dl_se = &p->dl;
983 struct hrtimer *timer = &dl_se->dl_timer;
984 struct rq *rq = task_rq(p);
985 ktime_t now, act;
986 s64 delta;
987
988 lockdep_assert_held(&rq->lock);
989
990 /*
991 * We want the timer to fire at the deadline, but considering
992 * that it is actually coming from rq->clock and not from
993 * hrtimer's time base reading.
994 */
995 act = ns_to_ktime(dl_next_period(dl_se));
996 now = hrtimer_cb_get_time(timer);
997 delta = ktime_to_ns(now) - rq_clock(rq);
998 act = ktime_add_ns(act, delta);
999
1000 /*
1001 * If the expiry time already passed, e.g., because the value
1002 * chosen as the deadline is too small, don't even try to
1003 * start the timer in the past!
1004 */
1005 if (ktime_us_delta(act, now) < 0)
1006 return 0;
1007
1008 /*
1009 * !enqueued will guarantee another callback; even if one is already in
1010 * progress. This ensures a balanced {get,put}_task_struct().
1011 *
1012 * The race against __run_timer() clearing the enqueued state is
1013 * harmless because we're holding task_rq()->lock, therefore the timer
1014 * expiring after we've done the check will wait on its task_rq_lock()
1015 * and observe our state.
1016 */
1017 if (!hrtimer_is_queued(timer)) {
1018 get_task_struct(p);
1019 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1020 }
1021
1022 return 1;
1023 }
1024
1025 /*
1026 * This is the bandwidth enforcement timer callback. If here, we know
1027 * a task is not on its dl_rq, since the fact that the timer was running
1028 * means the task is throttled and needs a runtime replenishment.
1029 *
1030 * However, what we actually do depends on the fact the task is active,
1031 * (it is on its rq) or has been removed from there by a call to
1032 * dequeue_task_dl(). In the former case we must issue the runtime
1033 * replenishment and add the task back to the dl_rq; in the latter, we just
1034 * do nothing but clearing dl_throttled, so that runtime and deadline
1035 * updating (and the queueing back to dl_rq) will be done by the
1036 * next call to enqueue_task_dl().
1037 */
dl_task_timer(struct hrtimer * timer)1038 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1039 {
1040 struct sched_dl_entity *dl_se = container_of(timer,
1041 struct sched_dl_entity,
1042 dl_timer);
1043 struct task_struct *p = dl_task_of(dl_se);
1044 struct rq_flags rf;
1045 struct rq *rq;
1046
1047 rq = task_rq_lock(p, &rf);
1048
1049 /*
1050 * The task might have changed its scheduling policy to something
1051 * different than SCHED_DEADLINE (through switched_from_dl()).
1052 */
1053 if (!dl_task(p))
1054 goto unlock;
1055
1056 /*
1057 * The task might have been boosted by someone else and might be in the
1058 * boosting/deboosting path, its not throttled.
1059 */
1060 if (is_dl_boosted(dl_se))
1061 goto unlock;
1062
1063 /*
1064 * Spurious timer due to start_dl_timer() race; or we already received
1065 * a replenishment from rt_mutex_setprio().
1066 */
1067 if (!dl_se->dl_throttled)
1068 goto unlock;
1069
1070 sched_clock_tick();
1071 update_rq_clock(rq);
1072
1073 /*
1074 * If the throttle happened during sched-out; like:
1075 *
1076 * schedule()
1077 * deactivate_task()
1078 * dequeue_task_dl()
1079 * update_curr_dl()
1080 * start_dl_timer()
1081 * __dequeue_task_dl()
1082 * prev->on_rq = 0;
1083 *
1084 * We can be both throttled and !queued. Replenish the counter
1085 * but do not enqueue -- wait for our wakeup to do that.
1086 */
1087 if (!task_on_rq_queued(p)) {
1088 replenish_dl_entity(dl_se);
1089 goto unlock;
1090 }
1091
1092 #ifdef CONFIG_SMP
1093 if (unlikely(!rq->online)) {
1094 /*
1095 * If the runqueue is no longer available, migrate the
1096 * task elsewhere. This necessarily changes rq.
1097 */
1098 lockdep_unpin_lock(&rq->lock, rf.cookie);
1099 rq = dl_task_offline_migration(rq, p);
1100 rf.cookie = lockdep_pin_lock(&rq->lock);
1101 update_rq_clock(rq);
1102
1103 /*
1104 * Now that the task has been migrated to the new RQ and we
1105 * have that locked, proceed as normal and enqueue the task
1106 * there.
1107 */
1108 }
1109 #endif
1110
1111 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1112 if (dl_task(rq->curr))
1113 check_preempt_curr_dl(rq, p, 0);
1114 else
1115 resched_curr(rq);
1116
1117 #ifdef CONFIG_SMP
1118 /*
1119 * Queueing this task back might have overloaded rq, check if we need
1120 * to kick someone away.
1121 */
1122 if (has_pushable_dl_tasks(rq)) {
1123 /*
1124 * Nothing relies on rq->lock after this, so its safe to drop
1125 * rq->lock.
1126 */
1127 rq_unpin_lock(rq, &rf);
1128 push_dl_task(rq);
1129 rq_repin_lock(rq, &rf);
1130 }
1131 #endif
1132
1133 unlock:
1134 task_rq_unlock(rq, p, &rf);
1135
1136 /*
1137 * This can free the task_struct, including this hrtimer, do not touch
1138 * anything related to that after this.
1139 */
1140 put_task_struct(p);
1141
1142 return HRTIMER_NORESTART;
1143 }
1144
init_dl_task_timer(struct sched_dl_entity * dl_se)1145 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1146 {
1147 struct hrtimer *timer = &dl_se->dl_timer;
1148
1149 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1150 timer->function = dl_task_timer;
1151 }
1152
1153 /*
1154 * During the activation, CBS checks if it can reuse the current task's
1155 * runtime and period. If the deadline of the task is in the past, CBS
1156 * cannot use the runtime, and so it replenishes the task. This rule
1157 * works fine for implicit deadline tasks (deadline == period), and the
1158 * CBS was designed for implicit deadline tasks. However, a task with
1159 * constrained deadline (deadline < period) might be awakened after the
1160 * deadline, but before the next period. In this case, replenishing the
1161 * task would allow it to run for runtime / deadline. As in this case
1162 * deadline < period, CBS enables a task to run for more than the
1163 * runtime / period. In a very loaded system, this can cause a domino
1164 * effect, making other tasks miss their deadlines.
1165 *
1166 * To avoid this problem, in the activation of a constrained deadline
1167 * task after the deadline but before the next period, throttle the
1168 * task and set the replenishing timer to the begin of the next period,
1169 * unless it is boosted.
1170 */
dl_check_constrained_dl(struct sched_dl_entity * dl_se)1171 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1172 {
1173 struct task_struct *p = dl_task_of(dl_se);
1174 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1175
1176 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1177 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1178 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1179 return;
1180 dl_se->dl_throttled = 1;
1181 if (dl_se->runtime > 0)
1182 dl_se->runtime = 0;
1183 }
1184 }
1185
1186 static
dl_runtime_exceeded(struct sched_dl_entity * dl_se)1187 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1188 {
1189 return (dl_se->runtime <= 0);
1190 }
1191
1192 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1193
1194 /*
1195 * This function implements the GRUB accounting rule:
1196 * according to the GRUB reclaiming algorithm, the runtime is
1197 * not decreased as "dq = -dt", but as
1198 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1199 * where u is the utilization of the task, Umax is the maximum reclaimable
1200 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1201 * as the difference between the "total runqueue utilization" and the
1202 * runqueue active utilization, and Uextra is the (per runqueue) extra
1203 * reclaimable utilization.
1204 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1205 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1206 * BW_SHIFT.
1207 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1208 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1209 * Since delta is a 64 bit variable, to have an overflow its value
1210 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1211 * So, overflow is not an issue here.
1212 */
grub_reclaim(u64 delta,struct rq * rq,struct sched_dl_entity * dl_se)1213 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1214 {
1215 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1216 u64 u_act;
1217 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1218
1219 /*
1220 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1221 * we compare u_inact + rq->dl.extra_bw with
1222 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1223 * u_inact + rq->dl.extra_bw can be larger than
1224 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1225 * leading to wrong results)
1226 */
1227 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1228 u_act = u_act_min;
1229 else
1230 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1231
1232 return (delta * u_act) >> BW_SHIFT;
1233 }
1234
1235 /*
1236 * Update the current task's runtime statistics (provided it is still
1237 * a -deadline task and has not been removed from the dl_rq).
1238 */
update_curr_dl(struct rq * rq)1239 static void update_curr_dl(struct rq *rq)
1240 {
1241 struct task_struct *curr = rq->curr;
1242 struct sched_dl_entity *dl_se = &curr->dl;
1243 u64 delta_exec, scaled_delta_exec;
1244 int cpu = cpu_of(rq);
1245 u64 now;
1246
1247 if (!dl_task(curr) || !on_dl_rq(dl_se))
1248 return;
1249
1250 /*
1251 * Consumed budget is computed considering the time as
1252 * observed by schedulable tasks (excluding time spent
1253 * in hardirq context, etc.). Deadlines are instead
1254 * computed using hard walltime. This seems to be the more
1255 * natural solution, but the full ramifications of this
1256 * approach need further study.
1257 */
1258 now = rq_clock_task(rq);
1259 delta_exec = now - curr->se.exec_start;
1260 if (unlikely((s64)delta_exec <= 0)) {
1261 if (unlikely(dl_se->dl_yielded))
1262 goto throttle;
1263 return;
1264 }
1265
1266 schedstat_set(curr->se.statistics.exec_max,
1267 max(curr->se.statistics.exec_max, delta_exec));
1268
1269 curr->se.sum_exec_runtime += delta_exec;
1270 account_group_exec_runtime(curr, delta_exec);
1271
1272 curr->se.exec_start = now;
1273 cgroup_account_cputime(curr, delta_exec);
1274
1275 if (dl_entity_is_special(dl_se))
1276 return;
1277
1278 /*
1279 * For tasks that participate in GRUB, we implement GRUB-PA: the
1280 * spare reclaimed bandwidth is used to clock down frequency.
1281 *
1282 * For the others, we still need to scale reservation parameters
1283 * according to current frequency and CPU maximum capacity.
1284 */
1285 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1286 scaled_delta_exec = grub_reclaim(delta_exec,
1287 rq,
1288 &curr->dl);
1289 } else {
1290 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1291 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1292
1293 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1294 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1295 }
1296
1297 dl_se->runtime -= scaled_delta_exec;
1298
1299 throttle:
1300 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1301 dl_se->dl_throttled = 1;
1302
1303 /* If requested, inform the user about runtime overruns. */
1304 if (dl_runtime_exceeded(dl_se) &&
1305 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1306 dl_se->dl_overrun = 1;
1307
1308 __dequeue_task_dl(rq, curr, 0);
1309 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1310 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1311
1312 if (!is_leftmost(curr, &rq->dl))
1313 resched_curr(rq);
1314 }
1315
1316 /*
1317 * Because -- for now -- we share the rt bandwidth, we need to
1318 * account our runtime there too, otherwise actual rt tasks
1319 * would be able to exceed the shared quota.
1320 *
1321 * Account to the root rt group for now.
1322 *
1323 * The solution we're working towards is having the RT groups scheduled
1324 * using deadline servers -- however there's a few nasties to figure
1325 * out before that can happen.
1326 */
1327 if (rt_bandwidth_enabled()) {
1328 struct rt_rq *rt_rq = &rq->rt;
1329
1330 raw_spin_lock(&rt_rq->rt_runtime_lock);
1331 /*
1332 * We'll let actual RT tasks worry about the overflow here, we
1333 * have our own CBS to keep us inline; only account when RT
1334 * bandwidth is relevant.
1335 */
1336 if (sched_rt_bandwidth_account(rt_rq))
1337 rt_rq->rt_time += delta_exec;
1338 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1339 }
1340 }
1341
inactive_task_timer(struct hrtimer * timer)1342 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1343 {
1344 struct sched_dl_entity *dl_se = container_of(timer,
1345 struct sched_dl_entity,
1346 inactive_timer);
1347 struct task_struct *p = dl_task_of(dl_se);
1348 struct rq_flags rf;
1349 struct rq *rq;
1350
1351 rq = task_rq_lock(p, &rf);
1352
1353 sched_clock_tick();
1354 update_rq_clock(rq);
1355
1356 if (!dl_task(p) || p->state == TASK_DEAD) {
1357 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1358
1359 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1360 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1361 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1362 dl_se->dl_non_contending = 0;
1363 }
1364
1365 raw_spin_lock(&dl_b->lock);
1366 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1367 raw_spin_unlock(&dl_b->lock);
1368 __dl_clear_params(p);
1369
1370 goto unlock;
1371 }
1372 if (dl_se->dl_non_contending == 0)
1373 goto unlock;
1374
1375 sub_running_bw(dl_se, &rq->dl);
1376 dl_se->dl_non_contending = 0;
1377 unlock:
1378 task_rq_unlock(rq, p, &rf);
1379 put_task_struct(p);
1380
1381 return HRTIMER_NORESTART;
1382 }
1383
init_dl_inactive_task_timer(struct sched_dl_entity * dl_se)1384 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1385 {
1386 struct hrtimer *timer = &dl_se->inactive_timer;
1387
1388 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1389 timer->function = inactive_task_timer;
1390 }
1391
1392 #ifdef CONFIG_SMP
1393
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1394 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1395 {
1396 struct rq *rq = rq_of_dl_rq(dl_rq);
1397
1398 if (dl_rq->earliest_dl.curr == 0 ||
1399 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1400 dl_rq->earliest_dl.curr = deadline;
1401 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1402 }
1403 }
1404
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1405 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1406 {
1407 struct rq *rq = rq_of_dl_rq(dl_rq);
1408
1409 /*
1410 * Since we may have removed our earliest (and/or next earliest)
1411 * task we must recompute them.
1412 */
1413 if (!dl_rq->dl_nr_running) {
1414 dl_rq->earliest_dl.curr = 0;
1415 dl_rq->earliest_dl.next = 0;
1416 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1417 } else {
1418 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1419 struct sched_dl_entity *entry;
1420
1421 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1422 dl_rq->earliest_dl.curr = entry->deadline;
1423 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1424 }
1425 }
1426
1427 #else
1428
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1429 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1430 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1431
1432 #endif /* CONFIG_SMP */
1433
1434 static inline
inc_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1435 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1436 {
1437 int prio = dl_task_of(dl_se)->prio;
1438 u64 deadline = dl_se->deadline;
1439
1440 WARN_ON(!dl_prio(prio));
1441 dl_rq->dl_nr_running++;
1442 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1443
1444 inc_dl_deadline(dl_rq, deadline);
1445 inc_dl_migration(dl_se, dl_rq);
1446 }
1447
1448 static inline
dec_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1449 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1450 {
1451 int prio = dl_task_of(dl_se)->prio;
1452
1453 WARN_ON(!dl_prio(prio));
1454 WARN_ON(!dl_rq->dl_nr_running);
1455 dl_rq->dl_nr_running--;
1456 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1457
1458 dec_dl_deadline(dl_rq, dl_se->deadline);
1459 dec_dl_migration(dl_se, dl_rq);
1460 }
1461
__enqueue_dl_entity(struct sched_dl_entity * dl_se)1462 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1463 {
1464 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1465 struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1466 struct rb_node *parent = NULL;
1467 struct sched_dl_entity *entry;
1468 int leftmost = 1;
1469
1470 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1471
1472 while (*link) {
1473 parent = *link;
1474 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1475 if (dl_time_before(dl_se->deadline, entry->deadline))
1476 link = &parent->rb_left;
1477 else {
1478 link = &parent->rb_right;
1479 leftmost = 0;
1480 }
1481 }
1482
1483 rb_link_node(&dl_se->rb_node, parent, link);
1484 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1485
1486 inc_dl_tasks(dl_se, dl_rq);
1487 }
1488
__dequeue_dl_entity(struct sched_dl_entity * dl_se)1489 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1490 {
1491 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1492
1493 if (RB_EMPTY_NODE(&dl_se->rb_node))
1494 return;
1495
1496 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1497 RB_CLEAR_NODE(&dl_se->rb_node);
1498
1499 dec_dl_tasks(dl_se, dl_rq);
1500 }
1501
1502 static void
enqueue_dl_entity(struct sched_dl_entity * dl_se,int flags)1503 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1504 {
1505 BUG_ON(on_dl_rq(dl_se));
1506
1507 /*
1508 * If this is a wakeup or a new instance, the scheduling
1509 * parameters of the task might need updating. Otherwise,
1510 * we want a replenishment of its runtime.
1511 */
1512 if (flags & ENQUEUE_WAKEUP) {
1513 task_contending(dl_se, flags);
1514 update_dl_entity(dl_se);
1515 } else if (flags & ENQUEUE_REPLENISH) {
1516 replenish_dl_entity(dl_se);
1517 } else if ((flags & ENQUEUE_RESTORE) &&
1518 dl_time_before(dl_se->deadline,
1519 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1520 setup_new_dl_entity(dl_se);
1521 }
1522
1523 __enqueue_dl_entity(dl_se);
1524 }
1525
dequeue_dl_entity(struct sched_dl_entity * dl_se)1526 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1527 {
1528 __dequeue_dl_entity(dl_se);
1529 }
1530
enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags)1531 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1532 {
1533 if (is_dl_boosted(&p->dl)) {
1534 /*
1535 * Because of delays in the detection of the overrun of a
1536 * thread's runtime, it might be the case that a thread
1537 * goes to sleep in a rt mutex with negative runtime. As
1538 * a consequence, the thread will be throttled.
1539 *
1540 * While waiting for the mutex, this thread can also be
1541 * boosted via PI, resulting in a thread that is throttled
1542 * and boosted at the same time.
1543 *
1544 * In this case, the boost overrides the throttle.
1545 */
1546 if (p->dl.dl_throttled) {
1547 /*
1548 * The replenish timer needs to be canceled. No
1549 * problem if it fires concurrently: boosted threads
1550 * are ignored in dl_task_timer().
1551 */
1552 hrtimer_try_to_cancel(&p->dl.dl_timer);
1553 p->dl.dl_throttled = 0;
1554 }
1555 } else if (!dl_prio(p->normal_prio)) {
1556 /*
1557 * Special case in which we have a !SCHED_DEADLINE task that is going
1558 * to be deboosted, but exceeds its runtime while doing so. No point in
1559 * replenishing it, as it's going to return back to its original
1560 * scheduling class after this. If it has been throttled, we need to
1561 * clear the flag, otherwise the task may wake up as throttled after
1562 * being boosted again with no means to replenish the runtime and clear
1563 * the throttle.
1564 */
1565 p->dl.dl_throttled = 0;
1566 if (!(flags & ENQUEUE_REPLENISH))
1567 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
1568 task_pid_nr(p));
1569
1570 return;
1571 }
1572
1573 /*
1574 * Check if a constrained deadline task was activated
1575 * after the deadline but before the next period.
1576 * If that is the case, the task will be throttled and
1577 * the replenishment timer will be set to the next period.
1578 */
1579 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1580 dl_check_constrained_dl(&p->dl);
1581
1582 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1583 add_rq_bw(&p->dl, &rq->dl);
1584 add_running_bw(&p->dl, &rq->dl);
1585 }
1586
1587 /*
1588 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1589 * its budget it needs a replenishment and, since it now is on
1590 * its rq, the bandwidth timer callback (which clearly has not
1591 * run yet) will take care of this.
1592 * However, the active utilization does not depend on the fact
1593 * that the task is on the runqueue or not (but depends on the
1594 * task's state - in GRUB parlance, "inactive" vs "active contending").
1595 * In other words, even if a task is throttled its utilization must
1596 * be counted in the active utilization; hence, we need to call
1597 * add_running_bw().
1598 */
1599 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1600 if (flags & ENQUEUE_WAKEUP)
1601 task_contending(&p->dl, flags);
1602
1603 return;
1604 }
1605
1606 enqueue_dl_entity(&p->dl, flags);
1607
1608 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1609 enqueue_pushable_dl_task(rq, p);
1610 }
1611
__dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1612 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1613 {
1614 dequeue_dl_entity(&p->dl);
1615 dequeue_pushable_dl_task(rq, p);
1616 }
1617
dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1618 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1619 {
1620 update_curr_dl(rq);
1621 __dequeue_task_dl(rq, p, flags);
1622
1623 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1624 sub_running_bw(&p->dl, &rq->dl);
1625 sub_rq_bw(&p->dl, &rq->dl);
1626 }
1627
1628 /*
1629 * This check allows to start the inactive timer (or to immediately
1630 * decrease the active utilization, if needed) in two cases:
1631 * when the task blocks and when it is terminating
1632 * (p->state == TASK_DEAD). We can handle the two cases in the same
1633 * way, because from GRUB's point of view the same thing is happening
1634 * (the task moves from "active contending" to "active non contending"
1635 * or "inactive")
1636 */
1637 if (flags & DEQUEUE_SLEEP)
1638 task_non_contending(p);
1639 }
1640
1641 /*
1642 * Yield task semantic for -deadline tasks is:
1643 *
1644 * get off from the CPU until our next instance, with
1645 * a new runtime. This is of little use now, since we
1646 * don't have a bandwidth reclaiming mechanism. Anyway,
1647 * bandwidth reclaiming is planned for the future, and
1648 * yield_task_dl will indicate that some spare budget
1649 * is available for other task instances to use it.
1650 */
yield_task_dl(struct rq * rq)1651 static void yield_task_dl(struct rq *rq)
1652 {
1653 /*
1654 * We make the task go to sleep until its current deadline by
1655 * forcing its runtime to zero. This way, update_curr_dl() stops
1656 * it and the bandwidth timer will wake it up and will give it
1657 * new scheduling parameters (thanks to dl_yielded=1).
1658 */
1659 rq->curr->dl.dl_yielded = 1;
1660
1661 update_rq_clock(rq);
1662 update_curr_dl(rq);
1663 /*
1664 * Tell update_rq_clock() that we've just updated,
1665 * so we don't do microscopic update in schedule()
1666 * and double the fastpath cost.
1667 */
1668 rq_clock_skip_update(rq);
1669 }
1670
1671 #ifdef CONFIG_SMP
1672
1673 static int find_later_rq(struct task_struct *task);
1674
1675 static int
select_task_rq_dl(struct task_struct * p,int cpu,int sd_flag,int flags)1676 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1677 {
1678 struct task_struct *curr;
1679 bool select_rq;
1680 struct rq *rq;
1681
1682 if (sd_flag != SD_BALANCE_WAKE)
1683 goto out;
1684
1685 rq = cpu_rq(cpu);
1686
1687 rcu_read_lock();
1688 curr = READ_ONCE(rq->curr); /* unlocked access */
1689
1690 /*
1691 * If we are dealing with a -deadline task, we must
1692 * decide where to wake it up.
1693 * If it has a later deadline and the current task
1694 * on this rq can't move (provided the waking task
1695 * can!) we prefer to send it somewhere else. On the
1696 * other hand, if it has a shorter deadline, we
1697 * try to make it stay here, it might be important.
1698 */
1699 select_rq = unlikely(dl_task(curr)) &&
1700 (curr->nr_cpus_allowed < 2 ||
1701 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1702 p->nr_cpus_allowed > 1;
1703
1704 /*
1705 * Take the capacity of the CPU into account to
1706 * ensure it fits the requirement of the task.
1707 */
1708 if (static_branch_unlikely(&sched_asym_cpucapacity))
1709 select_rq |= !dl_task_fits_capacity(p, cpu);
1710
1711 if (select_rq) {
1712 int target = find_later_rq(p);
1713
1714 if (target != -1 &&
1715 (dl_time_before(p->dl.deadline,
1716 cpu_rq(target)->dl.earliest_dl.curr) ||
1717 (cpu_rq(target)->dl.dl_nr_running == 0)))
1718 cpu = target;
1719 }
1720 rcu_read_unlock();
1721
1722 out:
1723 return cpu;
1724 }
1725
migrate_task_rq_dl(struct task_struct * p,int new_cpu __maybe_unused)1726 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1727 {
1728 struct rq *rq;
1729
1730 if (p->state != TASK_WAKING)
1731 return;
1732
1733 rq = task_rq(p);
1734 /*
1735 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1736 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1737 * rq->lock is not... So, lock it
1738 */
1739 raw_spin_lock(&rq->lock);
1740 if (p->dl.dl_non_contending) {
1741 update_rq_clock(rq);
1742 sub_running_bw(&p->dl, &rq->dl);
1743 p->dl.dl_non_contending = 0;
1744 /*
1745 * If the timer handler is currently running and the
1746 * timer cannot be cancelled, inactive_task_timer()
1747 * will see that dl_not_contending is not set, and
1748 * will not touch the rq's active utilization,
1749 * so we are still safe.
1750 */
1751 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1752 put_task_struct(p);
1753 }
1754 sub_rq_bw(&p->dl, &rq->dl);
1755 raw_spin_unlock(&rq->lock);
1756 }
1757
check_preempt_equal_dl(struct rq * rq,struct task_struct * p)1758 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1759 {
1760 /*
1761 * Current can't be migrated, useless to reschedule,
1762 * let's hope p can move out.
1763 */
1764 if (rq->curr->nr_cpus_allowed == 1 ||
1765 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1766 return;
1767
1768 /*
1769 * p is migratable, so let's not schedule it and
1770 * see if it is pushed or pulled somewhere else.
1771 */
1772 if (p->nr_cpus_allowed != 1 &&
1773 cpudl_find(&rq->rd->cpudl, p, NULL))
1774 return;
1775
1776 resched_curr(rq);
1777 }
1778
balance_dl(struct rq * rq,struct task_struct * p,struct rq_flags * rf)1779 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1780 {
1781 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1782 /*
1783 * This is OK, because current is on_cpu, which avoids it being
1784 * picked for load-balance and preemption/IRQs are still
1785 * disabled avoiding further scheduler activity on it and we've
1786 * not yet started the picking loop.
1787 */
1788 rq_unpin_lock(rq, rf);
1789 pull_dl_task(rq);
1790 rq_repin_lock(rq, rf);
1791 }
1792
1793 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1794 }
1795 #endif /* CONFIG_SMP */
1796
1797 /*
1798 * Only called when both the current and waking task are -deadline
1799 * tasks.
1800 */
check_preempt_curr_dl(struct rq * rq,struct task_struct * p,int flags)1801 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1802 int flags)
1803 {
1804 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1805 resched_curr(rq);
1806 return;
1807 }
1808
1809 #ifdef CONFIG_SMP
1810 /*
1811 * In the unlikely case current and p have the same deadline
1812 * let us try to decide what's the best thing to do...
1813 */
1814 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1815 !test_tsk_need_resched(rq->curr))
1816 check_preempt_equal_dl(rq, p);
1817 #endif /* CONFIG_SMP */
1818 }
1819
1820 #ifdef CONFIG_SCHED_HRTICK
start_hrtick_dl(struct rq * rq,struct task_struct * p)1821 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1822 {
1823 hrtick_start(rq, p->dl.runtime);
1824 }
1825 #else /* !CONFIG_SCHED_HRTICK */
start_hrtick_dl(struct rq * rq,struct task_struct * p)1826 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1827 {
1828 }
1829 #endif
1830
set_next_task_dl(struct rq * rq,struct task_struct * p,bool first)1831 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1832 {
1833 p->se.exec_start = rq_clock_task(rq);
1834
1835 /* You can't push away the running task */
1836 dequeue_pushable_dl_task(rq, p);
1837
1838 if (!first)
1839 return;
1840
1841 if (hrtick_enabled(rq))
1842 start_hrtick_dl(rq, p);
1843
1844 if (rq->curr->sched_class != &dl_sched_class)
1845 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1846
1847 deadline_queue_push_tasks(rq);
1848 }
1849
pick_next_dl_entity(struct rq * rq,struct dl_rq * dl_rq)1850 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1851 struct dl_rq *dl_rq)
1852 {
1853 struct rb_node *left = rb_first_cached(&dl_rq->root);
1854
1855 if (!left)
1856 return NULL;
1857
1858 return rb_entry(left, struct sched_dl_entity, rb_node);
1859 }
1860
pick_next_task_dl(struct rq * rq)1861 static struct task_struct *pick_next_task_dl(struct rq *rq)
1862 {
1863 struct sched_dl_entity *dl_se;
1864 struct dl_rq *dl_rq = &rq->dl;
1865 struct task_struct *p;
1866
1867 if (!sched_dl_runnable(rq))
1868 return NULL;
1869
1870 dl_se = pick_next_dl_entity(rq, dl_rq);
1871 BUG_ON(!dl_se);
1872 p = dl_task_of(dl_se);
1873 set_next_task_dl(rq, p, true);
1874 return p;
1875 }
1876
put_prev_task_dl(struct rq * rq,struct task_struct * p)1877 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1878 {
1879 update_curr_dl(rq);
1880
1881 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1882 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1883 enqueue_pushable_dl_task(rq, p);
1884 }
1885
1886 /*
1887 * scheduler tick hitting a task of our scheduling class.
1888 *
1889 * NOTE: This function can be called remotely by the tick offload that
1890 * goes along full dynticks. Therefore no local assumption can be made
1891 * and everything must be accessed through the @rq and @curr passed in
1892 * parameters.
1893 */
task_tick_dl(struct rq * rq,struct task_struct * p,int queued)1894 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1895 {
1896 update_curr_dl(rq);
1897
1898 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1899 /*
1900 * Even when we have runtime, update_curr_dl() might have resulted in us
1901 * not being the leftmost task anymore. In that case NEED_RESCHED will
1902 * be set and schedule() will start a new hrtick for the next task.
1903 */
1904 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1905 is_leftmost(p, &rq->dl))
1906 start_hrtick_dl(rq, p);
1907 }
1908
task_fork_dl(struct task_struct * p)1909 static void task_fork_dl(struct task_struct *p)
1910 {
1911 /*
1912 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1913 * sched_fork()
1914 */
1915 }
1916
1917 #ifdef CONFIG_SMP
1918
1919 /* Only try algorithms three times */
1920 #define DL_MAX_TRIES 3
1921
pick_dl_task(struct rq * rq,struct task_struct * p,int cpu)1922 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1923 {
1924 if (!task_running(rq, p) &&
1925 cpumask_test_cpu(cpu, p->cpus_ptr))
1926 return 1;
1927 return 0;
1928 }
1929
1930 /*
1931 * Return the earliest pushable rq's task, which is suitable to be executed
1932 * on the CPU, NULL otherwise:
1933 */
pick_earliest_pushable_dl_task(struct rq * rq,int cpu)1934 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1935 {
1936 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1937 struct task_struct *p = NULL;
1938
1939 if (!has_pushable_dl_tasks(rq))
1940 return NULL;
1941
1942 next_node:
1943 if (next_node) {
1944 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1945
1946 if (pick_dl_task(rq, p, cpu))
1947 return p;
1948
1949 next_node = rb_next(next_node);
1950 goto next_node;
1951 }
1952
1953 return NULL;
1954 }
1955
1956 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1957
find_later_rq(struct task_struct * task)1958 static int find_later_rq(struct task_struct *task)
1959 {
1960 struct sched_domain *sd;
1961 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1962 int this_cpu = smp_processor_id();
1963 int cpu = task_cpu(task);
1964
1965 /* Make sure the mask is initialized first */
1966 if (unlikely(!later_mask))
1967 return -1;
1968
1969 if (task->nr_cpus_allowed == 1)
1970 return -1;
1971
1972 /*
1973 * We have to consider system topology and task affinity
1974 * first, then we can look for a suitable CPU.
1975 */
1976 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1977 return -1;
1978
1979 /*
1980 * If we are here, some targets have been found, including
1981 * the most suitable which is, among the runqueues where the
1982 * current tasks have later deadlines than the task's one, the
1983 * rq with the latest possible one.
1984 *
1985 * Now we check how well this matches with task's
1986 * affinity and system topology.
1987 *
1988 * The last CPU where the task run is our first
1989 * guess, since it is most likely cache-hot there.
1990 */
1991 if (cpumask_test_cpu(cpu, later_mask))
1992 return cpu;
1993 /*
1994 * Check if this_cpu is to be skipped (i.e., it is
1995 * not in the mask) or not.
1996 */
1997 if (!cpumask_test_cpu(this_cpu, later_mask))
1998 this_cpu = -1;
1999
2000 rcu_read_lock();
2001 for_each_domain(cpu, sd) {
2002 if (sd->flags & SD_WAKE_AFFINE) {
2003 int best_cpu;
2004
2005 /*
2006 * If possible, preempting this_cpu is
2007 * cheaper than migrating.
2008 */
2009 if (this_cpu != -1 &&
2010 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2011 rcu_read_unlock();
2012 return this_cpu;
2013 }
2014
2015 best_cpu = cpumask_first_and(later_mask,
2016 sched_domain_span(sd));
2017 /*
2018 * Last chance: if a CPU being in both later_mask
2019 * and current sd span is valid, that becomes our
2020 * choice. Of course, the latest possible CPU is
2021 * already under consideration through later_mask.
2022 */
2023 if (best_cpu < nr_cpu_ids) {
2024 rcu_read_unlock();
2025 return best_cpu;
2026 }
2027 }
2028 }
2029 rcu_read_unlock();
2030
2031 /*
2032 * At this point, all our guesses failed, we just return
2033 * 'something', and let the caller sort the things out.
2034 */
2035 if (this_cpu != -1)
2036 return this_cpu;
2037
2038 cpu = cpumask_any(later_mask);
2039 if (cpu < nr_cpu_ids)
2040 return cpu;
2041
2042 return -1;
2043 }
2044
2045 /* Locks the rq it finds */
find_lock_later_rq(struct task_struct * task,struct rq * rq)2046 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2047 {
2048 struct rq *later_rq = NULL;
2049 int tries;
2050 int cpu;
2051
2052 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2053 cpu = find_later_rq(task);
2054
2055 if ((cpu == -1) || (cpu == rq->cpu))
2056 break;
2057
2058 later_rq = cpu_rq(cpu);
2059
2060 if (later_rq->dl.dl_nr_running &&
2061 !dl_time_before(task->dl.deadline,
2062 later_rq->dl.earliest_dl.curr)) {
2063 /*
2064 * Target rq has tasks of equal or earlier deadline,
2065 * retrying does not release any lock and is unlikely
2066 * to yield a different result.
2067 */
2068 later_rq = NULL;
2069 break;
2070 }
2071
2072 /* Retry if something changed. */
2073 if (double_lock_balance(rq, later_rq)) {
2074 if (unlikely(task_rq(task) != rq ||
2075 !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
2076 task_running(rq, task) ||
2077 !dl_task(task) ||
2078 !task_on_rq_queued(task))) {
2079 double_unlock_balance(rq, later_rq);
2080 later_rq = NULL;
2081 break;
2082 }
2083 }
2084
2085 /*
2086 * If the rq we found has no -deadline task, or
2087 * its earliest one has a later deadline than our
2088 * task, the rq is a good one.
2089 */
2090 if (!later_rq->dl.dl_nr_running ||
2091 dl_time_before(task->dl.deadline,
2092 later_rq->dl.earliest_dl.curr))
2093 break;
2094
2095 /* Otherwise we try again. */
2096 double_unlock_balance(rq, later_rq);
2097 later_rq = NULL;
2098 }
2099
2100 return later_rq;
2101 }
2102
pick_next_pushable_dl_task(struct rq * rq)2103 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2104 {
2105 struct task_struct *p;
2106
2107 if (!has_pushable_dl_tasks(rq))
2108 return NULL;
2109
2110 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2111 struct task_struct, pushable_dl_tasks);
2112
2113 BUG_ON(rq->cpu != task_cpu(p));
2114 BUG_ON(task_current(rq, p));
2115 BUG_ON(p->nr_cpus_allowed <= 1);
2116
2117 BUG_ON(!task_on_rq_queued(p));
2118 BUG_ON(!dl_task(p));
2119
2120 return p;
2121 }
2122
2123 /*
2124 * See if the non running -deadline tasks on this rq
2125 * can be sent to some other CPU where they can preempt
2126 * and start executing.
2127 */
push_dl_task(struct rq * rq)2128 static int push_dl_task(struct rq *rq)
2129 {
2130 struct task_struct *next_task;
2131 struct rq *later_rq;
2132 int ret = 0;
2133
2134 if (!rq->dl.overloaded)
2135 return 0;
2136
2137 next_task = pick_next_pushable_dl_task(rq);
2138 if (!next_task)
2139 return 0;
2140
2141 retry:
2142 if (WARN_ON(next_task == rq->curr))
2143 return 0;
2144
2145 /*
2146 * If next_task preempts rq->curr, and rq->curr
2147 * can move away, it makes sense to just reschedule
2148 * without going further in pushing next_task.
2149 */
2150 if (dl_task(rq->curr) &&
2151 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2152 rq->curr->nr_cpus_allowed > 1) {
2153 resched_curr(rq);
2154 return 0;
2155 }
2156
2157 /* We might release rq lock */
2158 get_task_struct(next_task);
2159
2160 /* Will lock the rq it'll find */
2161 later_rq = find_lock_later_rq(next_task, rq);
2162 if (!later_rq) {
2163 struct task_struct *task;
2164
2165 /*
2166 * We must check all this again, since
2167 * find_lock_later_rq releases rq->lock and it is
2168 * then possible that next_task has migrated.
2169 */
2170 task = pick_next_pushable_dl_task(rq);
2171 if (task == next_task) {
2172 /*
2173 * The task is still there. We don't try
2174 * again, some other CPU will pull it when ready.
2175 */
2176 goto out;
2177 }
2178
2179 if (!task)
2180 /* No more tasks */
2181 goto out;
2182
2183 put_task_struct(next_task);
2184 next_task = task;
2185 goto retry;
2186 }
2187
2188 deactivate_task(rq, next_task, 0);
2189 set_task_cpu(next_task, later_rq->cpu);
2190
2191 /*
2192 * Update the later_rq clock here, because the clock is used
2193 * by the cpufreq_update_util() inside __add_running_bw().
2194 */
2195 update_rq_clock(later_rq);
2196 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2197 ret = 1;
2198
2199 resched_curr(later_rq);
2200
2201 double_unlock_balance(rq, later_rq);
2202
2203 out:
2204 put_task_struct(next_task);
2205
2206 return ret;
2207 }
2208
push_dl_tasks(struct rq * rq)2209 static void push_dl_tasks(struct rq *rq)
2210 {
2211 /* push_dl_task() will return true if it moved a -deadline task */
2212 while (push_dl_task(rq))
2213 ;
2214 }
2215
pull_dl_task(struct rq * this_rq)2216 static void pull_dl_task(struct rq *this_rq)
2217 {
2218 int this_cpu = this_rq->cpu, cpu;
2219 struct task_struct *p;
2220 bool resched = false;
2221 struct rq *src_rq;
2222 u64 dmin = LONG_MAX;
2223
2224 if (likely(!dl_overloaded(this_rq)))
2225 return;
2226
2227 /*
2228 * Match the barrier from dl_set_overloaded; this guarantees that if we
2229 * see overloaded we must also see the dlo_mask bit.
2230 */
2231 smp_rmb();
2232
2233 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2234 if (this_cpu == cpu)
2235 continue;
2236
2237 src_rq = cpu_rq(cpu);
2238
2239 /*
2240 * It looks racy, abd it is! However, as in sched_rt.c,
2241 * we are fine with this.
2242 */
2243 if (this_rq->dl.dl_nr_running &&
2244 dl_time_before(this_rq->dl.earliest_dl.curr,
2245 src_rq->dl.earliest_dl.next))
2246 continue;
2247
2248 /* Might drop this_rq->lock */
2249 double_lock_balance(this_rq, src_rq);
2250
2251 /*
2252 * If there are no more pullable tasks on the
2253 * rq, we're done with it.
2254 */
2255 if (src_rq->dl.dl_nr_running <= 1)
2256 goto skip;
2257
2258 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2259
2260 /*
2261 * We found a task to be pulled if:
2262 * - it preempts our current (if there's one),
2263 * - it will preempt the last one we pulled (if any).
2264 */
2265 if (p && dl_time_before(p->dl.deadline, dmin) &&
2266 (!this_rq->dl.dl_nr_running ||
2267 dl_time_before(p->dl.deadline,
2268 this_rq->dl.earliest_dl.curr))) {
2269 WARN_ON(p == src_rq->curr);
2270 WARN_ON(!task_on_rq_queued(p));
2271
2272 /*
2273 * Then we pull iff p has actually an earlier
2274 * deadline than the current task of its runqueue.
2275 */
2276 if (dl_time_before(p->dl.deadline,
2277 src_rq->curr->dl.deadline))
2278 goto skip;
2279
2280 resched = true;
2281
2282 deactivate_task(src_rq, p, 0);
2283 set_task_cpu(p, this_cpu);
2284 activate_task(this_rq, p, 0);
2285 dmin = p->dl.deadline;
2286
2287 /* Is there any other task even earlier? */
2288 }
2289 skip:
2290 double_unlock_balance(this_rq, src_rq);
2291 }
2292
2293 if (resched)
2294 resched_curr(this_rq);
2295 }
2296
2297 /*
2298 * Since the task is not running and a reschedule is not going to happen
2299 * anytime soon on its runqueue, we try pushing it away now.
2300 */
task_woken_dl(struct rq * rq,struct task_struct * p)2301 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2302 {
2303 if (!task_running(rq, p) &&
2304 !test_tsk_need_resched(rq->curr) &&
2305 p->nr_cpus_allowed > 1 &&
2306 dl_task(rq->curr) &&
2307 (rq->curr->nr_cpus_allowed < 2 ||
2308 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2309 push_dl_tasks(rq);
2310 }
2311 }
2312
set_cpus_allowed_dl(struct task_struct * p,const struct cpumask * new_mask)2313 static void set_cpus_allowed_dl(struct task_struct *p,
2314 const struct cpumask *new_mask)
2315 {
2316 struct root_domain *src_rd;
2317 struct rq *rq;
2318
2319 BUG_ON(!dl_task(p));
2320
2321 rq = task_rq(p);
2322 src_rd = rq->rd;
2323 /*
2324 * Migrating a SCHED_DEADLINE task between exclusive
2325 * cpusets (different root_domains) entails a bandwidth
2326 * update. We already made space for us in the destination
2327 * domain (see cpuset_can_attach()).
2328 */
2329 if (!cpumask_intersects(src_rd->span, new_mask)) {
2330 struct dl_bw *src_dl_b;
2331
2332 src_dl_b = dl_bw_of(cpu_of(rq));
2333 /*
2334 * We now free resources of the root_domain we are migrating
2335 * off. In the worst case, sched_setattr() may temporary fail
2336 * until we complete the update.
2337 */
2338 raw_spin_lock(&src_dl_b->lock);
2339 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2340 raw_spin_unlock(&src_dl_b->lock);
2341 }
2342
2343 set_cpus_allowed_common(p, new_mask);
2344 }
2345
2346 /* Assumes rq->lock is held */
rq_online_dl(struct rq * rq)2347 static void rq_online_dl(struct rq *rq)
2348 {
2349 if (rq->dl.overloaded)
2350 dl_set_overload(rq);
2351
2352 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2353 if (rq->dl.dl_nr_running > 0)
2354 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2355 }
2356
2357 /* Assumes rq->lock is held */
rq_offline_dl(struct rq * rq)2358 static void rq_offline_dl(struct rq *rq)
2359 {
2360 if (rq->dl.overloaded)
2361 dl_clear_overload(rq);
2362
2363 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2364 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2365 }
2366
init_sched_dl_class(void)2367 void __init init_sched_dl_class(void)
2368 {
2369 unsigned int i;
2370
2371 for_each_possible_cpu(i)
2372 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2373 GFP_KERNEL, cpu_to_node(i));
2374 }
2375
dl_add_task_root_domain(struct task_struct * p)2376 void dl_add_task_root_domain(struct task_struct *p)
2377 {
2378 struct rq_flags rf;
2379 struct rq *rq;
2380 struct dl_bw *dl_b;
2381
2382 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2383 if (!dl_task(p)) {
2384 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2385 return;
2386 }
2387
2388 rq = __task_rq_lock(p, &rf);
2389
2390 dl_b = &rq->rd->dl_bw;
2391 raw_spin_lock(&dl_b->lock);
2392
2393 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2394
2395 raw_spin_unlock(&dl_b->lock);
2396
2397 task_rq_unlock(rq, p, &rf);
2398 }
2399
dl_clear_root_domain(struct root_domain * rd)2400 void dl_clear_root_domain(struct root_domain *rd)
2401 {
2402 unsigned long flags;
2403
2404 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2405 rd->dl_bw.total_bw = 0;
2406 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2407 }
2408
2409 #endif /* CONFIG_SMP */
2410
switched_from_dl(struct rq * rq,struct task_struct * p)2411 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2412 {
2413 /*
2414 * task_non_contending() can start the "inactive timer" (if the 0-lag
2415 * time is in the future). If the task switches back to dl before
2416 * the "inactive timer" fires, it can continue to consume its current
2417 * runtime using its current deadline. If it stays outside of
2418 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2419 * will reset the task parameters.
2420 */
2421 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2422 task_non_contending(p);
2423
2424 if (!task_on_rq_queued(p)) {
2425 /*
2426 * Inactive timer is armed. However, p is leaving DEADLINE and
2427 * might migrate away from this rq while continuing to run on
2428 * some other class. We need to remove its contribution from
2429 * this rq running_bw now, or sub_rq_bw (below) will complain.
2430 */
2431 if (p->dl.dl_non_contending)
2432 sub_running_bw(&p->dl, &rq->dl);
2433 sub_rq_bw(&p->dl, &rq->dl);
2434 }
2435
2436 /*
2437 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2438 * at the 0-lag time, because the task could have been migrated
2439 * while SCHED_OTHER in the meanwhile.
2440 */
2441 if (p->dl.dl_non_contending)
2442 p->dl.dl_non_contending = 0;
2443
2444 /*
2445 * Since this might be the only -deadline task on the rq,
2446 * this is the right place to try to pull some other one
2447 * from an overloaded CPU, if any.
2448 */
2449 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2450 return;
2451
2452 deadline_queue_pull_task(rq);
2453 }
2454
2455 /*
2456 * When switching to -deadline, we may overload the rq, then
2457 * we try to push someone off, if possible.
2458 */
switched_to_dl(struct rq * rq,struct task_struct * p)2459 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2460 {
2461 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2462 put_task_struct(p);
2463
2464 /* If p is not queued we will update its parameters at next wakeup. */
2465 if (!task_on_rq_queued(p)) {
2466 add_rq_bw(&p->dl, &rq->dl);
2467
2468 return;
2469 }
2470
2471 if (rq->curr != p) {
2472 #ifdef CONFIG_SMP
2473 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2474 deadline_queue_push_tasks(rq);
2475 #endif
2476 if (dl_task(rq->curr))
2477 check_preempt_curr_dl(rq, p, 0);
2478 else
2479 resched_curr(rq);
2480 } else {
2481 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2482 }
2483 }
2484
2485 /*
2486 * If the scheduling parameters of a -deadline task changed,
2487 * a push or pull operation might be needed.
2488 */
prio_changed_dl(struct rq * rq,struct task_struct * p,int oldprio)2489 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2490 int oldprio)
2491 {
2492 if (task_on_rq_queued(p) || rq->curr == p) {
2493 #ifdef CONFIG_SMP
2494 /*
2495 * This might be too much, but unfortunately
2496 * we don't have the old deadline value, and
2497 * we can't argue if the task is increasing
2498 * or lowering its prio, so...
2499 */
2500 if (!rq->dl.overloaded)
2501 deadline_queue_pull_task(rq);
2502
2503 /*
2504 * If we now have a earlier deadline task than p,
2505 * then reschedule, provided p is still on this
2506 * runqueue.
2507 */
2508 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2509 resched_curr(rq);
2510 #else
2511 /*
2512 * Again, we don't know if p has a earlier
2513 * or later deadline, so let's blindly set a
2514 * (maybe not needed) rescheduling point.
2515 */
2516 resched_curr(rq);
2517 #endif /* CONFIG_SMP */
2518 }
2519 }
2520
2521 const struct sched_class dl_sched_class
2522 __section("__dl_sched_class") = {
2523 .enqueue_task = enqueue_task_dl,
2524 .dequeue_task = dequeue_task_dl,
2525 .yield_task = yield_task_dl,
2526
2527 .check_preempt_curr = check_preempt_curr_dl,
2528
2529 .pick_next_task = pick_next_task_dl,
2530 .put_prev_task = put_prev_task_dl,
2531 .set_next_task = set_next_task_dl,
2532
2533 #ifdef CONFIG_SMP
2534 .balance = balance_dl,
2535 .select_task_rq = select_task_rq_dl,
2536 .migrate_task_rq = migrate_task_rq_dl,
2537 .set_cpus_allowed = set_cpus_allowed_dl,
2538 .rq_online = rq_online_dl,
2539 .rq_offline = rq_offline_dl,
2540 .task_woken = task_woken_dl,
2541 #endif
2542
2543 .task_tick = task_tick_dl,
2544 .task_fork = task_fork_dl,
2545
2546 .prio_changed = prio_changed_dl,
2547 .switched_from = switched_from_dl,
2548 .switched_to = switched_to_dl,
2549
2550 .update_curr = update_curr_dl,
2551 };
2552
sched_dl_global_validate(void)2553 int sched_dl_global_validate(void)
2554 {
2555 u64 runtime = global_rt_runtime();
2556 u64 period = global_rt_period();
2557 u64 new_bw = to_ratio(period, runtime);
2558 struct dl_bw *dl_b;
2559 int cpu, cpus, ret = 0;
2560 unsigned long flags;
2561
2562 /*
2563 * Here we want to check the bandwidth not being set to some
2564 * value smaller than the currently allocated bandwidth in
2565 * any of the root_domains.
2566 *
2567 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2568 * cycling on root_domains... Discussion on different/better
2569 * solutions is welcome!
2570 */
2571 for_each_possible_cpu(cpu) {
2572 rcu_read_lock_sched();
2573 dl_b = dl_bw_of(cpu);
2574 cpus = dl_bw_cpus(cpu);
2575
2576 raw_spin_lock_irqsave(&dl_b->lock, flags);
2577 if (new_bw * cpus < dl_b->total_bw)
2578 ret = -EBUSY;
2579 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2580
2581 rcu_read_unlock_sched();
2582
2583 if (ret)
2584 break;
2585 }
2586
2587 return ret;
2588 }
2589
init_dl_rq_bw_ratio(struct dl_rq * dl_rq)2590 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2591 {
2592 if (global_rt_runtime() == RUNTIME_INF) {
2593 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2594 dl_rq->extra_bw = 1 << BW_SHIFT;
2595 } else {
2596 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2597 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2598 dl_rq->extra_bw = to_ratio(global_rt_period(),
2599 global_rt_runtime());
2600 }
2601 }
2602
sched_dl_do_global(void)2603 void sched_dl_do_global(void)
2604 {
2605 u64 new_bw = -1;
2606 struct dl_bw *dl_b;
2607 int cpu;
2608 unsigned long flags;
2609
2610 def_dl_bandwidth.dl_period = global_rt_period();
2611 def_dl_bandwidth.dl_runtime = global_rt_runtime();
2612
2613 if (global_rt_runtime() != RUNTIME_INF)
2614 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2615
2616 /*
2617 * FIXME: As above...
2618 */
2619 for_each_possible_cpu(cpu) {
2620 rcu_read_lock_sched();
2621 dl_b = dl_bw_of(cpu);
2622
2623 raw_spin_lock_irqsave(&dl_b->lock, flags);
2624 dl_b->bw = new_bw;
2625 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2626
2627 rcu_read_unlock_sched();
2628 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2629 }
2630 }
2631
2632 /*
2633 * We must be sure that accepting a new task (or allowing changing the
2634 * parameters of an existing one) is consistent with the bandwidth
2635 * constraints. If yes, this function also accordingly updates the currently
2636 * allocated bandwidth to reflect the new situation.
2637 *
2638 * This function is called while holding p's rq->lock.
2639 */
sched_dl_overflow(struct task_struct * p,int policy,const struct sched_attr * attr)2640 int sched_dl_overflow(struct task_struct *p, int policy,
2641 const struct sched_attr *attr)
2642 {
2643 u64 period = attr->sched_period ?: attr->sched_deadline;
2644 u64 runtime = attr->sched_runtime;
2645 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2646 int cpus, err = -1, cpu = task_cpu(p);
2647 struct dl_bw *dl_b = dl_bw_of(cpu);
2648 unsigned long cap;
2649
2650 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2651 return 0;
2652
2653 /* !deadline task may carry old deadline bandwidth */
2654 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2655 return 0;
2656
2657 /*
2658 * Either if a task, enters, leave, or stays -deadline but changes
2659 * its parameters, we may need to update accordingly the total
2660 * allocated bandwidth of the container.
2661 */
2662 raw_spin_lock(&dl_b->lock);
2663 cpus = dl_bw_cpus(cpu);
2664 cap = dl_bw_capacity(cpu);
2665
2666 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2667 !__dl_overflow(dl_b, cap, 0, new_bw)) {
2668 if (hrtimer_active(&p->dl.inactive_timer))
2669 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2670 __dl_add(dl_b, new_bw, cpus);
2671 err = 0;
2672 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2673 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2674 /*
2675 * XXX this is slightly incorrect: when the task
2676 * utilization decreases, we should delay the total
2677 * utilization change until the task's 0-lag point.
2678 * But this would require to set the task's "inactive
2679 * timer" when the task is not inactive.
2680 */
2681 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2682 __dl_add(dl_b, new_bw, cpus);
2683 dl_change_utilization(p, new_bw);
2684 err = 0;
2685 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2686 /*
2687 * Do not decrease the total deadline utilization here,
2688 * switched_from_dl() will take care to do it at the correct
2689 * (0-lag) time.
2690 */
2691 err = 0;
2692 }
2693 raw_spin_unlock(&dl_b->lock);
2694
2695 return err;
2696 }
2697
2698 /*
2699 * This function initializes the sched_dl_entity of a newly becoming
2700 * SCHED_DEADLINE task.
2701 *
2702 * Only the static values are considered here, the actual runtime and the
2703 * absolute deadline will be properly calculated when the task is enqueued
2704 * for the first time with its new policy.
2705 */
__setparam_dl(struct task_struct * p,const struct sched_attr * attr)2706 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2707 {
2708 struct sched_dl_entity *dl_se = &p->dl;
2709
2710 dl_se->dl_runtime = attr->sched_runtime;
2711 dl_se->dl_deadline = attr->sched_deadline;
2712 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2713 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
2714 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2715 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2716 }
2717
__getparam_dl(struct task_struct * p,struct sched_attr * attr)2718 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2719 {
2720 struct sched_dl_entity *dl_se = &p->dl;
2721
2722 attr->sched_priority = p->rt_priority;
2723 attr->sched_runtime = dl_se->dl_runtime;
2724 attr->sched_deadline = dl_se->dl_deadline;
2725 attr->sched_period = dl_se->dl_period;
2726 attr->sched_flags &= ~SCHED_DL_FLAGS;
2727 attr->sched_flags |= dl_se->flags;
2728 }
2729
2730 /*
2731 * Default limits for DL period; on the top end we guard against small util
2732 * tasks still getting rediculous long effective runtimes, on the bottom end we
2733 * guard against timer DoS.
2734 */
2735 unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
2736 unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
2737
2738 /*
2739 * This function validates the new parameters of a -deadline task.
2740 * We ask for the deadline not being zero, and greater or equal
2741 * than the runtime, as well as the period of being zero or
2742 * greater than deadline. Furthermore, we have to be sure that
2743 * user parameters are above the internal resolution of 1us (we
2744 * check sched_runtime only since it is always the smaller one) and
2745 * below 2^63 ns (we have to check both sched_deadline and
2746 * sched_period, as the latter can be zero).
2747 */
__checkparam_dl(const struct sched_attr * attr)2748 bool __checkparam_dl(const struct sched_attr *attr)
2749 {
2750 u64 period, max, min;
2751
2752 /* special dl tasks don't actually use any parameter */
2753 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2754 return true;
2755
2756 /* deadline != 0 */
2757 if (attr->sched_deadline == 0)
2758 return false;
2759
2760 /*
2761 * Since we truncate DL_SCALE bits, make sure we're at least
2762 * that big.
2763 */
2764 if (attr->sched_runtime < (1ULL << DL_SCALE))
2765 return false;
2766
2767 /*
2768 * Since we use the MSB for wrap-around and sign issues, make
2769 * sure it's not set (mind that period can be equal to zero).
2770 */
2771 if (attr->sched_deadline & (1ULL << 63) ||
2772 attr->sched_period & (1ULL << 63))
2773 return false;
2774
2775 period = attr->sched_period;
2776 if (!period)
2777 period = attr->sched_deadline;
2778
2779 /* runtime <= deadline <= period (if period != 0) */
2780 if (period < attr->sched_deadline ||
2781 attr->sched_deadline < attr->sched_runtime)
2782 return false;
2783
2784 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
2785 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
2786
2787 if (period < min || period > max)
2788 return false;
2789
2790 return true;
2791 }
2792
2793 /*
2794 * This function clears the sched_dl_entity static params.
2795 */
__dl_clear_params(struct task_struct * p)2796 void __dl_clear_params(struct task_struct *p)
2797 {
2798 struct sched_dl_entity *dl_se = &p->dl;
2799
2800 dl_se->dl_runtime = 0;
2801 dl_se->dl_deadline = 0;
2802 dl_se->dl_period = 0;
2803 dl_se->flags = 0;
2804 dl_se->dl_bw = 0;
2805 dl_se->dl_density = 0;
2806
2807 dl_se->dl_throttled = 0;
2808 dl_se->dl_yielded = 0;
2809 dl_se->dl_non_contending = 0;
2810 dl_se->dl_overrun = 0;
2811
2812 #ifdef CONFIG_RT_MUTEXES
2813 dl_se->pi_se = dl_se;
2814 #endif
2815 }
2816
dl_param_changed(struct task_struct * p,const struct sched_attr * attr)2817 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2818 {
2819 struct sched_dl_entity *dl_se = &p->dl;
2820
2821 if (dl_se->dl_runtime != attr->sched_runtime ||
2822 dl_se->dl_deadline != attr->sched_deadline ||
2823 dl_se->dl_period != attr->sched_period ||
2824 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
2825 return true;
2826
2827 return false;
2828 }
2829
2830 #ifdef CONFIG_SMP
dl_cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)2831 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2832 const struct cpumask *trial)
2833 {
2834 int ret = 1, trial_cpus;
2835 struct dl_bw *cur_dl_b;
2836 unsigned long flags;
2837
2838 rcu_read_lock_sched();
2839 cur_dl_b = dl_bw_of(cpumask_any(cur));
2840 trial_cpus = cpumask_weight(trial);
2841
2842 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2843 if (cur_dl_b->bw != -1 &&
2844 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2845 ret = 0;
2846 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2847 rcu_read_unlock_sched();
2848
2849 return ret;
2850 }
2851
dl_cpu_busy(int cpu,struct task_struct * p)2852 int dl_cpu_busy(int cpu, struct task_struct *p)
2853 {
2854 unsigned long flags, cap;
2855 struct dl_bw *dl_b;
2856 bool overflow;
2857
2858 rcu_read_lock_sched();
2859 dl_b = dl_bw_of(cpu);
2860 raw_spin_lock_irqsave(&dl_b->lock, flags);
2861 cap = dl_bw_capacity(cpu);
2862 overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0);
2863
2864 if (!overflow && p) {
2865 /*
2866 * We reserve space for this task in the destination
2867 * root_domain, as we can't fail after this point.
2868 * We will free resources in the source root_domain
2869 * later on (see set_cpus_allowed_dl()).
2870 */
2871 __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
2872 }
2873
2874 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2875 rcu_read_unlock_sched();
2876
2877 return overflow ? -EBUSY : 0;
2878 }
2879 #endif
2880
2881 #ifdef CONFIG_SCHED_DEBUG
print_dl_stats(struct seq_file * m,int cpu)2882 void print_dl_stats(struct seq_file *m, int cpu)
2883 {
2884 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2885 }
2886 #endif /* CONFIG_SCHED_DEBUG */
2887