1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include "sched.h"
18
19#include <linux/slab.h>
20
21struct dl_bandwidth def_dl_bandwidth;
22
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47{
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51}
52
53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54{
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58}
59
60void init_dl_bw(struct dl_bw *dl_b)
61{
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 if (global_rt_runtime() == RUNTIME_INF)
65 dl_b->bw = -1;
66 else
67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
70}
71
72void init_dl_rq(struct dl_rq *dl_rq)
73{
74 dl_rq->rb_root = RB_ROOT;
75
76#ifdef CONFIG_SMP
77
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
83#else
84 init_dl_bw(&dl_rq->dl_bw);
85#endif
86}
87
88#ifdef CONFIG_SMP
89
90static inline int dl_overloaded(struct rq *rq)
91{
92 return atomic_read(&rq->rd->dlo_count);
93}
94
95static inline void dl_set_overload(struct rq *rq)
96{
97 if (!rq->online)
98 return;
99
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101
102
103
104
105
106
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
109}
110
111static inline void dl_clear_overload(struct rq *rq)
112{
113 if (!rq->online)
114 return;
115
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118}
119
120static void update_dl_migration(struct dl_rq *dl_rq)
121{
122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
126 }
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
130 }
131}
132
133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134{
135 struct task_struct *p = dl_task_of(dl_se);
136
137 if (p->nr_cpus_allowed > 1)
138 dl_rq->dl_nr_migratory++;
139
140 update_dl_migration(dl_rq);
141}
142
143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{
145 struct task_struct *p = dl_task_of(dl_se);
146
147 if (p->nr_cpus_allowed > 1)
148 dl_rq->dl_nr_migratory--;
149
150 update_dl_migration(dl_rq);
151}
152
153
154
155
156
157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158{
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
164
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
176 }
177 }
178
179 if (leftmost) {
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181 dl_rq->earliest_dl.next = p->dl.deadline;
182 }
183
184 rb_link_node(&p->pushable_dl_tasks, parent, link);
185 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
186}
187
188static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
189{
190 struct dl_rq *dl_rq = &rq->dl;
191
192 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
193 return;
194
195 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
196 struct rb_node *next_node;
197
198 next_node = rb_next(&p->pushable_dl_tasks);
199 dl_rq->pushable_dl_tasks_leftmost = next_node;
200 if (next_node) {
201 dl_rq->earliest_dl.next = rb_entry(next_node,
202 struct task_struct, pushable_dl_tasks)->dl.deadline;
203 }
204 }
205
206 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
207 RB_CLEAR_NODE(&p->pushable_dl_tasks);
208}
209
210static inline int has_pushable_dl_tasks(struct rq *rq)
211{
212 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
213}
214
215static int push_dl_task(struct rq *rq);
216
217static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
218{
219 return dl_task(prev);
220}
221
222static DEFINE_PER_CPU(struct callback_head, dl_push_head);
223static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
224
225static void push_dl_tasks(struct rq *);
226static void pull_dl_task(struct rq *);
227
228static inline void queue_push_tasks(struct rq *rq)
229{
230 if (!has_pushable_dl_tasks(rq))
231 return;
232
233 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
234}
235
236static inline void queue_pull_task(struct rq *rq)
237{
238 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
239}
240
241static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
242
243static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
244{
245 struct rq *later_rq = NULL;
246
247 later_rq = find_lock_later_rq(p, rq);
248 if (!later_rq) {
249 int cpu;
250
251
252
253
254
255 cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
256 if (cpu >= nr_cpu_ids) {
257
258
259
260
261 BUG_ON(dl_bandwidth_enabled());
262
263
264
265
266
267
268 cpu = cpumask_any(cpu_active_mask);
269 }
270 later_rq = cpu_rq(cpu);
271 double_lock_balance(rq, later_rq);
272 }
273
274 set_task_cpu(p, later_rq->cpu);
275 double_unlock_balance(later_rq, rq);
276
277 return later_rq;
278}
279
280#else
281
282static inline
283void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
284{
285}
286
287static inline
288void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
289{
290}
291
292static inline
293void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
294{
295}
296
297static inline
298void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
299{
300}
301
302static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
303{
304 return false;
305}
306
307static inline void pull_dl_task(struct rq *rq)
308{
309}
310
311static inline void queue_push_tasks(struct rq *rq)
312{
313}
314
315static inline void queue_pull_task(struct rq *rq)
316{
317}
318#endif
319
320static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
321static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
322static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
323 int flags);
324
325
326
327
328
329
330
331
332
333
334
335
336
337static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
338{
339 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
340 struct rq *rq = rq_of_dl_rq(dl_rq);
341
342 WARN_ON(dl_se->dl_boosted);
343 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
344
345
346
347
348
349
350 if (dl_se->dl_throttled)
351 return;
352
353
354
355
356
357
358 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
359 dl_se->runtime = dl_se->dl_runtime;
360}
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380static void replenish_dl_entity(struct sched_dl_entity *dl_se,
381 struct sched_dl_entity *pi_se)
382{
383 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
384 struct rq *rq = rq_of_dl_rq(dl_rq);
385
386 BUG_ON(pi_se->dl_runtime <= 0);
387
388
389
390
391
392 if (dl_se->dl_deadline == 0) {
393 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
394 dl_se->runtime = pi_se->dl_runtime;
395 }
396
397 if (dl_se->dl_yielded && dl_se->runtime > 0)
398 dl_se->runtime = 0;
399
400
401
402
403
404
405
406 while (dl_se->runtime <= 0) {
407 dl_se->deadline += pi_se->dl_period;
408 dl_se->runtime += pi_se->dl_runtime;
409 }
410
411
412
413
414
415
416
417
418
419
420 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
421 printk_deferred_once("sched: DL replenish lagged too much\n");
422 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
423 dl_se->runtime = pi_se->dl_runtime;
424 }
425
426 if (dl_se->dl_yielded)
427 dl_se->dl_yielded = 0;
428 if (dl_se->dl_throttled)
429 dl_se->dl_throttled = 0;
430}
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
457 struct sched_dl_entity *pi_se, u64 t)
458{
459 u64 left, right;
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
480 right = ((dl_se->deadline - t) >> DL_SCALE) *
481 (pi_se->dl_runtime >> DL_SCALE);
482
483 return dl_time_before(right, left);
484}
485
486
487
488
489
490
491
492
493
494
495static void update_dl_entity(struct sched_dl_entity *dl_se,
496 struct sched_dl_entity *pi_se)
497{
498 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
499 struct rq *rq = rq_of_dl_rq(dl_rq);
500
501 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
502 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
503 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
504 dl_se->runtime = pi_se->dl_runtime;
505 }
506}
507
508static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
509{
510 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
511}
512
513
514
515
516
517
518
519
520
521
522
523static int start_dl_timer(struct task_struct *p)
524{
525 struct sched_dl_entity *dl_se = &p->dl;
526 struct hrtimer *timer = &dl_se->dl_timer;
527 struct rq *rq = task_rq(p);
528 ktime_t now, act;
529 s64 delta;
530
531 lockdep_assert_held(&rq->lock);
532
533
534
535
536
537
538 act = ns_to_ktime(dl_next_period(dl_se));
539 now = hrtimer_cb_get_time(timer);
540 delta = ktime_to_ns(now) - rq_clock(rq);
541 act = ktime_add_ns(act, delta);
542
543
544
545
546
547
548 if (ktime_us_delta(act, now) < 0)
549 return 0;
550
551
552
553
554
555
556
557
558
559
560 if (!hrtimer_is_queued(timer)) {
561 get_task_struct(p);
562 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
563 }
564
565 return 1;
566}
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
582{
583 struct sched_dl_entity *dl_se = container_of(timer,
584 struct sched_dl_entity,
585 dl_timer);
586 struct task_struct *p = dl_task_of(dl_se);
587 struct rq_flags rf;
588 struct rq *rq;
589
590 rq = task_rq_lock(p, &rf);
591
592
593
594
595
596 if (!dl_task(p)) {
597 __dl_clear_params(p);
598 goto unlock;
599 }
600
601
602
603
604
605 if (dl_se->dl_boosted)
606 goto unlock;
607
608
609
610
611
612 if (!dl_se->dl_throttled)
613 goto unlock;
614
615 sched_clock_tick();
616 update_rq_clock(rq);
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632 if (!task_on_rq_queued(p)) {
633 replenish_dl_entity(dl_se, dl_se);
634 goto unlock;
635 }
636
637#ifdef CONFIG_SMP
638 if (unlikely(!rq->online)) {
639
640
641
642
643 lockdep_unpin_lock(&rq->lock, rf.cookie);
644 rq = dl_task_offline_migration(rq, p);
645 rf.cookie = lockdep_pin_lock(&rq->lock);
646 update_rq_clock(rq);
647
648
649
650
651
652
653 }
654#endif
655
656 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
657 if (dl_task(rq->curr))
658 check_preempt_curr_dl(rq, p, 0);
659 else
660 resched_curr(rq);
661
662#ifdef CONFIG_SMP
663
664
665
666
667 if (has_pushable_dl_tasks(rq)) {
668
669
670
671
672 rq_unpin_lock(rq, &rf);
673 push_dl_task(rq);
674 rq_repin_lock(rq, &rf);
675 }
676#endif
677
678unlock:
679 task_rq_unlock(rq, p, &rf);
680
681
682
683
684
685 put_task_struct(p);
686
687 return HRTIMER_NORESTART;
688}
689
690void init_dl_task_timer(struct sched_dl_entity *dl_se)
691{
692 struct hrtimer *timer = &dl_se->dl_timer;
693
694 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
695 timer->function = dl_task_timer;
696}
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
717{
718 struct task_struct *p = dl_task_of(dl_se);
719 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
720
721 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
722 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
723 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
724 return;
725 dl_se->dl_throttled = 1;
726 }
727}
728
729static
730int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
731{
732 return (dl_se->runtime <= 0);
733}
734
735extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
736
737
738
739
740
741static void update_curr_dl(struct rq *rq)
742{
743 struct task_struct *curr = rq->curr;
744 struct sched_dl_entity *dl_se = &curr->dl;
745 u64 delta_exec;
746
747 if (!dl_task(curr) || !on_dl_rq(dl_se))
748 return;
749
750
751
752
753
754
755
756
757
758 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
759 if (unlikely((s64)delta_exec <= 0)) {
760 if (unlikely(dl_se->dl_yielded))
761 goto throttle;
762 return;
763 }
764
765
766 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
767
768 schedstat_set(curr->se.statistics.exec_max,
769 max(curr->se.statistics.exec_max, delta_exec));
770
771 curr->se.sum_exec_runtime += delta_exec;
772 account_group_exec_runtime(curr, delta_exec);
773
774 curr->se.exec_start = rq_clock_task(rq);
775 cpuacct_charge(curr, delta_exec);
776
777 sched_rt_avg_update(rq, delta_exec);
778
779 dl_se->runtime -= delta_exec;
780
781throttle:
782 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
783 dl_se->dl_throttled = 1;
784 __dequeue_task_dl(rq, curr, 0);
785 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
786 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
787
788 if (!is_leftmost(curr, &rq->dl))
789 resched_curr(rq);
790 }
791
792
793
794
795
796
797
798
799
800
801
802
803 if (rt_bandwidth_enabled()) {
804 struct rt_rq *rt_rq = &rq->rt;
805
806 raw_spin_lock(&rt_rq->rt_runtime_lock);
807
808
809
810
811
812 if (sched_rt_bandwidth_account(rt_rq))
813 rt_rq->rt_time += delta_exec;
814 raw_spin_unlock(&rt_rq->rt_runtime_lock);
815 }
816}
817
818#ifdef CONFIG_SMP
819
820static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
821{
822 struct rq *rq = rq_of_dl_rq(dl_rq);
823
824 if (dl_rq->earliest_dl.curr == 0 ||
825 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
826 dl_rq->earliest_dl.curr = deadline;
827 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
828 }
829}
830
831static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
832{
833 struct rq *rq = rq_of_dl_rq(dl_rq);
834
835
836
837
838
839 if (!dl_rq->dl_nr_running) {
840 dl_rq->earliest_dl.curr = 0;
841 dl_rq->earliest_dl.next = 0;
842 cpudl_clear(&rq->rd->cpudl, rq->cpu);
843 } else {
844 struct rb_node *leftmost = dl_rq->rb_leftmost;
845 struct sched_dl_entity *entry;
846
847 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
848 dl_rq->earliest_dl.curr = entry->deadline;
849 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
850 }
851}
852
853#else
854
855static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
856static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
857
858#endif
859
860static inline
861void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
862{
863 int prio = dl_task_of(dl_se)->prio;
864 u64 deadline = dl_se->deadline;
865
866 WARN_ON(!dl_prio(prio));
867 dl_rq->dl_nr_running++;
868 add_nr_running(rq_of_dl_rq(dl_rq), 1);
869
870 inc_dl_deadline(dl_rq, deadline);
871 inc_dl_migration(dl_se, dl_rq);
872}
873
874static inline
875void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
876{
877 int prio = dl_task_of(dl_se)->prio;
878
879 WARN_ON(!dl_prio(prio));
880 WARN_ON(!dl_rq->dl_nr_running);
881 dl_rq->dl_nr_running--;
882 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
883
884 dec_dl_deadline(dl_rq, dl_se->deadline);
885 dec_dl_migration(dl_se, dl_rq);
886}
887
888static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
889{
890 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
891 struct rb_node **link = &dl_rq->rb_root.rb_node;
892 struct rb_node *parent = NULL;
893 struct sched_dl_entity *entry;
894 int leftmost = 1;
895
896 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
897
898 while (*link) {
899 parent = *link;
900 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
901 if (dl_time_before(dl_se->deadline, entry->deadline))
902 link = &parent->rb_left;
903 else {
904 link = &parent->rb_right;
905 leftmost = 0;
906 }
907 }
908
909 if (leftmost)
910 dl_rq->rb_leftmost = &dl_se->rb_node;
911
912 rb_link_node(&dl_se->rb_node, parent, link);
913 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
914
915 inc_dl_tasks(dl_se, dl_rq);
916}
917
918static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
919{
920 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
921
922 if (RB_EMPTY_NODE(&dl_se->rb_node))
923 return;
924
925 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
926 struct rb_node *next_node;
927
928 next_node = rb_next(&dl_se->rb_node);
929 dl_rq->rb_leftmost = next_node;
930 }
931
932 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
933 RB_CLEAR_NODE(&dl_se->rb_node);
934
935 dec_dl_tasks(dl_se, dl_rq);
936}
937
938static void
939enqueue_dl_entity(struct sched_dl_entity *dl_se,
940 struct sched_dl_entity *pi_se, int flags)
941{
942 BUG_ON(on_dl_rq(dl_se));
943
944
945
946
947
948
949 if (flags & ENQUEUE_WAKEUP)
950 update_dl_entity(dl_se, pi_se);
951 else if (flags & ENQUEUE_REPLENISH)
952 replenish_dl_entity(dl_se, pi_se);
953
954 __enqueue_dl_entity(dl_se);
955}
956
957static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
958{
959 __dequeue_dl_entity(dl_se);
960}
961
962static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
963{
964 return dl_se->dl_deadline < dl_se->dl_period;
965}
966
967static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
968{
969 struct task_struct *pi_task = rt_mutex_get_top_task(p);
970 struct sched_dl_entity *pi_se = &p->dl;
971
972
973
974
975
976
977
978 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
979 pi_se = &pi_task->dl;
980 } else if (!dl_prio(p->normal_prio)) {
981
982
983
984
985
986
987
988 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
989 return;
990 }
991
992
993
994
995
996
997
998 if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
999 dl_check_constrained_dl(&p->dl);
1000
1001
1002
1003
1004
1005
1006
1007 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
1008 return;
1009
1010 enqueue_dl_entity(&p->dl, pi_se, flags);
1011
1012 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1013 enqueue_pushable_dl_task(rq, p);
1014}
1015
1016static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1017{
1018 dequeue_dl_entity(&p->dl);
1019 dequeue_pushable_dl_task(rq, p);
1020}
1021
1022static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1023{
1024 update_curr_dl(rq);
1025 __dequeue_task_dl(rq, p, flags);
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static void yield_task_dl(struct rq *rq)
1039{
1040
1041
1042
1043
1044
1045
1046 rq->curr->dl.dl_yielded = 1;
1047
1048 update_rq_clock(rq);
1049 update_curr_dl(rq);
1050
1051
1052
1053
1054
1055 rq_clock_skip_update(rq, true);
1056}
1057
1058#ifdef CONFIG_SMP
1059
1060static int find_later_rq(struct task_struct *task);
1061
1062static int
1063select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1064{
1065 struct task_struct *curr;
1066 struct rq *rq;
1067
1068 if (sd_flag != SD_BALANCE_WAKE)
1069 goto out;
1070
1071 rq = cpu_rq(cpu);
1072
1073 rcu_read_lock();
1074 curr = READ_ONCE(rq->curr);
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 if (unlikely(dl_task(curr)) &&
1086 (curr->nr_cpus_allowed < 2 ||
1087 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1088 (p->nr_cpus_allowed > 1)) {
1089 int target = find_later_rq(p);
1090
1091 if (target != -1 &&
1092 (dl_time_before(p->dl.deadline,
1093 cpu_rq(target)->dl.earliest_dl.curr) ||
1094 (cpu_rq(target)->dl.dl_nr_running == 0)))
1095 cpu = target;
1096 }
1097 rcu_read_unlock();
1098
1099out:
1100 return cpu;
1101}
1102
1103static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1104{
1105
1106
1107
1108
1109 if (rq->curr->nr_cpus_allowed == 1 ||
1110 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1111 return;
1112
1113
1114
1115
1116
1117 if (p->nr_cpus_allowed != 1 &&
1118 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1119 return;
1120
1121 resched_curr(rq);
1122}
1123
1124#endif
1125
1126
1127
1128
1129
1130static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1131 int flags)
1132{
1133 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1134 resched_curr(rq);
1135 return;
1136 }
1137
1138#ifdef CONFIG_SMP
1139
1140
1141
1142
1143 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1144 !test_tsk_need_resched(rq->curr))
1145 check_preempt_equal_dl(rq, p);
1146#endif
1147}
1148
1149#ifdef CONFIG_SCHED_HRTICK
1150static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1151{
1152 hrtick_start(rq, p->dl.runtime);
1153}
1154#else
1155static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1156{
1157}
1158#endif
1159
1160static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1161 struct dl_rq *dl_rq)
1162{
1163 struct rb_node *left = dl_rq->rb_leftmost;
1164
1165 if (!left)
1166 return NULL;
1167
1168 return rb_entry(left, struct sched_dl_entity, rb_node);
1169}
1170
1171struct task_struct *
1172pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1173{
1174 struct sched_dl_entity *dl_se;
1175 struct task_struct *p;
1176 struct dl_rq *dl_rq;
1177
1178 dl_rq = &rq->dl;
1179
1180 if (need_pull_dl_task(rq, prev)) {
1181
1182
1183
1184
1185
1186
1187 rq_unpin_lock(rq, rf);
1188 pull_dl_task(rq);
1189 rq_repin_lock(rq, rf);
1190
1191
1192
1193
1194
1195 if (rq->stop && task_on_rq_queued(rq->stop))
1196 return RETRY_TASK;
1197 }
1198
1199
1200
1201
1202
1203 if (prev->sched_class == &dl_sched_class)
1204 update_curr_dl(rq);
1205
1206 if (unlikely(!dl_rq->dl_nr_running))
1207 return NULL;
1208
1209 put_prev_task(rq, prev);
1210
1211 dl_se = pick_next_dl_entity(rq, dl_rq);
1212 BUG_ON(!dl_se);
1213
1214 p = dl_task_of(dl_se);
1215 p->se.exec_start = rq_clock_task(rq);
1216
1217
1218 dequeue_pushable_dl_task(rq, p);
1219
1220 if (hrtick_enabled(rq))
1221 start_hrtick_dl(rq, p);
1222
1223 queue_push_tasks(rq);
1224
1225 return p;
1226}
1227
1228static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1229{
1230 update_curr_dl(rq);
1231
1232 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1233 enqueue_pushable_dl_task(rq, p);
1234}
1235
1236static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1237{
1238 update_curr_dl(rq);
1239
1240
1241
1242
1243
1244
1245 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1246 is_leftmost(p, &rq->dl))
1247 start_hrtick_dl(rq, p);
1248}
1249
1250static void task_fork_dl(struct task_struct *p)
1251{
1252
1253
1254
1255
1256}
1257
1258static void task_dead_dl(struct task_struct *p)
1259{
1260 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1261
1262
1263
1264
1265 raw_spin_lock_irq(&dl_b->lock);
1266
1267 dl_b->total_bw -= p->dl.dl_bw;
1268 raw_spin_unlock_irq(&dl_b->lock);
1269}
1270
1271static void set_curr_task_dl(struct rq *rq)
1272{
1273 struct task_struct *p = rq->curr;
1274
1275 p->se.exec_start = rq_clock_task(rq);
1276
1277
1278 dequeue_pushable_dl_task(rq, p);
1279}
1280
1281#ifdef CONFIG_SMP
1282
1283
1284#define DL_MAX_TRIES 3
1285
1286static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1287{
1288 if (!task_running(rq, p) &&
1289 cpumask_test_cpu(cpu, &p->cpus_allowed))
1290 return 1;
1291 return 0;
1292}
1293
1294
1295
1296
1297
1298static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1299{
1300 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1301 struct task_struct *p = NULL;
1302
1303 if (!has_pushable_dl_tasks(rq))
1304 return NULL;
1305
1306next_node:
1307 if (next_node) {
1308 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1309
1310 if (pick_dl_task(rq, p, cpu))
1311 return p;
1312
1313 next_node = rb_next(next_node);
1314 goto next_node;
1315 }
1316
1317 return NULL;
1318}
1319
1320static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1321
1322static int find_later_rq(struct task_struct *task)
1323{
1324 struct sched_domain *sd;
1325 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1326 int this_cpu = smp_processor_id();
1327 int best_cpu, cpu = task_cpu(task);
1328
1329
1330 if (unlikely(!later_mask))
1331 return -1;
1332
1333 if (task->nr_cpus_allowed == 1)
1334 return -1;
1335
1336
1337
1338
1339
1340 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1341 task, later_mask);
1342 if (best_cpu == -1)
1343 return -1;
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 if (cpumask_test_cpu(cpu, later_mask))
1359 return cpu;
1360
1361
1362
1363
1364 if (!cpumask_test_cpu(this_cpu, later_mask))
1365 this_cpu = -1;
1366
1367 rcu_read_lock();
1368 for_each_domain(cpu, sd) {
1369 if (sd->flags & SD_WAKE_AFFINE) {
1370
1371
1372
1373
1374
1375 if (this_cpu != -1 &&
1376 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1377 rcu_read_unlock();
1378 return this_cpu;
1379 }
1380
1381
1382
1383
1384
1385 if (best_cpu < nr_cpu_ids &&
1386 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1387 rcu_read_unlock();
1388 return best_cpu;
1389 }
1390 }
1391 }
1392 rcu_read_unlock();
1393
1394
1395
1396
1397
1398 if (this_cpu != -1)
1399 return this_cpu;
1400
1401 cpu = cpumask_any(later_mask);
1402 if (cpu < nr_cpu_ids)
1403 return cpu;
1404
1405 return -1;
1406}
1407
1408
1409static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1410{
1411 struct rq *later_rq = NULL;
1412 int tries;
1413 int cpu;
1414
1415 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1416 cpu = find_later_rq(task);
1417
1418 if ((cpu == -1) || (cpu == rq->cpu))
1419 break;
1420
1421 later_rq = cpu_rq(cpu);
1422
1423 if (later_rq->dl.dl_nr_running &&
1424 !dl_time_before(task->dl.deadline,
1425 later_rq->dl.earliest_dl.curr)) {
1426
1427
1428
1429
1430
1431 later_rq = NULL;
1432 break;
1433 }
1434
1435
1436 if (double_lock_balance(rq, later_rq)) {
1437 if (unlikely(task_rq(task) != rq ||
1438 !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
1439 task_running(rq, task) ||
1440 !dl_task(task) ||
1441 !task_on_rq_queued(task))) {
1442 double_unlock_balance(rq, later_rq);
1443 later_rq = NULL;
1444 break;
1445 }
1446 }
1447
1448
1449
1450
1451
1452
1453 if (!later_rq->dl.dl_nr_running ||
1454 dl_time_before(task->dl.deadline,
1455 later_rq->dl.earliest_dl.curr))
1456 break;
1457
1458
1459 double_unlock_balance(rq, later_rq);
1460 later_rq = NULL;
1461 }
1462
1463 return later_rq;
1464}
1465
1466static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1467{
1468 struct task_struct *p;
1469
1470 if (!has_pushable_dl_tasks(rq))
1471 return NULL;
1472
1473 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1474 struct task_struct, pushable_dl_tasks);
1475
1476 BUG_ON(rq->cpu != task_cpu(p));
1477 BUG_ON(task_current(rq, p));
1478 BUG_ON(p->nr_cpus_allowed <= 1);
1479
1480 BUG_ON(!task_on_rq_queued(p));
1481 BUG_ON(!dl_task(p));
1482
1483 return p;
1484}
1485
1486
1487
1488
1489
1490
1491static int push_dl_task(struct rq *rq)
1492{
1493 struct task_struct *next_task;
1494 struct rq *later_rq;
1495 int ret = 0;
1496
1497 if (!rq->dl.overloaded)
1498 return 0;
1499
1500 next_task = pick_next_pushable_dl_task(rq);
1501 if (!next_task)
1502 return 0;
1503
1504retry:
1505 if (unlikely(next_task == rq->curr)) {
1506 WARN_ON(1);
1507 return 0;
1508 }
1509
1510
1511
1512
1513
1514
1515 if (dl_task(rq->curr) &&
1516 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1517 rq->curr->nr_cpus_allowed > 1) {
1518 resched_curr(rq);
1519 return 0;
1520 }
1521
1522
1523 get_task_struct(next_task);
1524
1525
1526 later_rq = find_lock_later_rq(next_task, rq);
1527 if (!later_rq) {
1528 struct task_struct *task;
1529
1530
1531
1532
1533
1534
1535 task = pick_next_pushable_dl_task(rq);
1536 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1537
1538
1539
1540
1541 goto out;
1542 }
1543
1544 if (!task)
1545
1546 goto out;
1547
1548 put_task_struct(next_task);
1549 next_task = task;
1550 goto retry;
1551 }
1552
1553 deactivate_task(rq, next_task, 0);
1554 set_task_cpu(next_task, later_rq->cpu);
1555 activate_task(later_rq, next_task, 0);
1556 ret = 1;
1557
1558 resched_curr(later_rq);
1559
1560 double_unlock_balance(rq, later_rq);
1561
1562out:
1563 put_task_struct(next_task);
1564
1565 return ret;
1566}
1567
1568static void push_dl_tasks(struct rq *rq)
1569{
1570
1571 while (push_dl_task(rq))
1572 ;
1573}
1574
1575static void pull_dl_task(struct rq *this_rq)
1576{
1577 int this_cpu = this_rq->cpu, cpu;
1578 struct task_struct *p;
1579 bool resched = false;
1580 struct rq *src_rq;
1581 u64 dmin = LONG_MAX;
1582
1583 if (likely(!dl_overloaded(this_rq)))
1584 return;
1585
1586
1587
1588
1589
1590 smp_rmb();
1591
1592 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1593 if (this_cpu == cpu)
1594 continue;
1595
1596 src_rq = cpu_rq(cpu);
1597
1598
1599
1600
1601
1602 if (this_rq->dl.dl_nr_running &&
1603 dl_time_before(this_rq->dl.earliest_dl.curr,
1604 src_rq->dl.earliest_dl.next))
1605 continue;
1606
1607
1608 double_lock_balance(this_rq, src_rq);
1609
1610
1611
1612
1613
1614 if (src_rq->dl.dl_nr_running <= 1)
1615 goto skip;
1616
1617 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1618
1619
1620
1621
1622
1623
1624 if (p && dl_time_before(p->dl.deadline, dmin) &&
1625 (!this_rq->dl.dl_nr_running ||
1626 dl_time_before(p->dl.deadline,
1627 this_rq->dl.earliest_dl.curr))) {
1628 WARN_ON(p == src_rq->curr);
1629 WARN_ON(!task_on_rq_queued(p));
1630
1631
1632
1633
1634
1635 if (dl_time_before(p->dl.deadline,
1636 src_rq->curr->dl.deadline))
1637 goto skip;
1638
1639 resched = true;
1640
1641 deactivate_task(src_rq, p, 0);
1642 set_task_cpu(p, this_cpu);
1643 activate_task(this_rq, p, 0);
1644 dmin = p->dl.deadline;
1645
1646
1647 }
1648skip:
1649 double_unlock_balance(this_rq, src_rq);
1650 }
1651
1652 if (resched)
1653 resched_curr(this_rq);
1654}
1655
1656
1657
1658
1659
1660static void task_woken_dl(struct rq *rq, struct task_struct *p)
1661{
1662 if (!task_running(rq, p) &&
1663 !test_tsk_need_resched(rq->curr) &&
1664 p->nr_cpus_allowed > 1 &&
1665 dl_task(rq->curr) &&
1666 (rq->curr->nr_cpus_allowed < 2 ||
1667 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1668 push_dl_tasks(rq);
1669 }
1670}
1671
1672static void set_cpus_allowed_dl(struct task_struct *p,
1673 const struct cpumask *new_mask)
1674{
1675 struct root_domain *src_rd;
1676 struct rq *rq;
1677
1678 BUG_ON(!dl_task(p));
1679
1680 rq = task_rq(p);
1681 src_rd = rq->rd;
1682
1683
1684
1685
1686
1687
1688 if (!cpumask_intersects(src_rd->span, new_mask)) {
1689 struct dl_bw *src_dl_b;
1690
1691 src_dl_b = dl_bw_of(cpu_of(rq));
1692
1693
1694
1695
1696
1697 raw_spin_lock(&src_dl_b->lock);
1698 __dl_clear(src_dl_b, p->dl.dl_bw);
1699 raw_spin_unlock(&src_dl_b->lock);
1700 }
1701
1702 set_cpus_allowed_common(p, new_mask);
1703}
1704
1705
1706static void rq_online_dl(struct rq *rq)
1707{
1708 if (rq->dl.overloaded)
1709 dl_set_overload(rq);
1710
1711 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1712 if (rq->dl.dl_nr_running > 0)
1713 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1714}
1715
1716
1717static void rq_offline_dl(struct rq *rq)
1718{
1719 if (rq->dl.overloaded)
1720 dl_clear_overload(rq);
1721
1722 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1723 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1724}
1725
1726void __init init_sched_dl_class(void)
1727{
1728 unsigned int i;
1729
1730 for_each_possible_cpu(i)
1731 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1732 GFP_KERNEL, cpu_to_node(i));
1733}
1734
1735#endif
1736
1737static void switched_from_dl(struct rq *rq, struct task_struct *p)
1738{
1739
1740
1741
1742
1743
1744
1745 if (!start_dl_timer(p))
1746 __dl_clear_params(p);
1747
1748
1749
1750
1751
1752
1753 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1754 return;
1755
1756 queue_pull_task(rq);
1757}
1758
1759
1760
1761
1762
1763static void switched_to_dl(struct rq *rq, struct task_struct *p)
1764{
1765
1766
1767 if (!task_on_rq_queued(p))
1768 return;
1769
1770
1771
1772
1773
1774
1775 if (dl_time_before(p->dl.deadline, rq_clock(rq)))
1776 setup_new_dl_entity(&p->dl);
1777
1778 if (rq->curr != p) {
1779#ifdef CONFIG_SMP
1780 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
1781 queue_push_tasks(rq);
1782#endif
1783 if (dl_task(rq->curr))
1784 check_preempt_curr_dl(rq, p, 0);
1785 else
1786 resched_curr(rq);
1787 }
1788}
1789
1790
1791
1792
1793
1794static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1795 int oldprio)
1796{
1797 if (task_on_rq_queued(p) || rq->curr == p) {
1798#ifdef CONFIG_SMP
1799
1800
1801
1802
1803
1804
1805 if (!rq->dl.overloaded)
1806 queue_pull_task(rq);
1807
1808
1809
1810
1811
1812
1813 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1814 resched_curr(rq);
1815#else
1816
1817
1818
1819
1820
1821 resched_curr(rq);
1822#endif
1823 }
1824}
1825
1826const struct sched_class dl_sched_class = {
1827 .next = &rt_sched_class,
1828 .enqueue_task = enqueue_task_dl,
1829 .dequeue_task = dequeue_task_dl,
1830 .yield_task = yield_task_dl,
1831
1832 .check_preempt_curr = check_preempt_curr_dl,
1833
1834 .pick_next_task = pick_next_task_dl,
1835 .put_prev_task = put_prev_task_dl,
1836
1837#ifdef CONFIG_SMP
1838 .select_task_rq = select_task_rq_dl,
1839 .set_cpus_allowed = set_cpus_allowed_dl,
1840 .rq_online = rq_online_dl,
1841 .rq_offline = rq_offline_dl,
1842 .task_woken = task_woken_dl,
1843#endif
1844
1845 .set_curr_task = set_curr_task_dl,
1846 .task_tick = task_tick_dl,
1847 .task_fork = task_fork_dl,
1848 .task_dead = task_dead_dl,
1849
1850 .prio_changed = prio_changed_dl,
1851 .switched_from = switched_from_dl,
1852 .switched_to = switched_to_dl,
1853
1854 .update_curr = update_curr_dl,
1855};
1856
1857#ifdef CONFIG_SCHED_DEBUG
1858extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1859
1860void print_dl_stats(struct seq_file *m, int cpu)
1861{
1862 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1863}
1864#endif
1865