1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include "sched.h"
18
19#include <linux/slab.h>
20
21struct dl_bandwidth def_dl_bandwidth;
22
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47{
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51}
52
53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54{
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58}
59
60void init_dl_bw(struct dl_bw *dl_b)
61{
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 if (global_rt_runtime() == RUNTIME_INF)
65 dl_b->bw = -1;
66 else
67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
70}
71
72void init_dl_rq(struct dl_rq *dl_rq)
73{
74 dl_rq->rb_root = RB_ROOT;
75
76#ifdef CONFIG_SMP
77
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
83#else
84 init_dl_bw(&dl_rq->dl_bw);
85#endif
86}
87
88#ifdef CONFIG_SMP
89
90static inline int dl_overloaded(struct rq *rq)
91{
92 return atomic_read(&rq->rd->dlo_count);
93}
94
95static inline void dl_set_overload(struct rq *rq)
96{
97 if (!rq->online)
98 return;
99
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101
102
103
104
105
106
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
109}
110
111static inline void dl_clear_overload(struct rq *rq)
112{
113 if (!rq->online)
114 return;
115
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118}
119
120static void update_dl_migration(struct dl_rq *dl_rq)
121{
122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
126 }
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
130 }
131}
132
133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134{
135 struct task_struct *p = dl_task_of(dl_se);
136
137 if (p->nr_cpus_allowed > 1)
138 dl_rq->dl_nr_migratory++;
139
140 update_dl_migration(dl_rq);
141}
142
143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{
145 struct task_struct *p = dl_task_of(dl_se);
146
147 if (p->nr_cpus_allowed > 1)
148 dl_rq->dl_nr_migratory--;
149
150 update_dl_migration(dl_rq);
151}
152
153
154
155
156
157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158{
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
164
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
176 }
177 }
178
179 if (leftmost)
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181
182 rb_link_node(&p->pushable_dl_tasks, parent, link);
183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
184}
185
186static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
187{
188 struct dl_rq *dl_rq = &rq->dl;
189
190 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191 return;
192
193 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194 struct rb_node *next_node;
195
196 next_node = rb_next(&p->pushable_dl_tasks);
197 dl_rq->pushable_dl_tasks_leftmost = next_node;
198 }
199
200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201 RB_CLEAR_NODE(&p->pushable_dl_tasks);
202}
203
204static inline int has_pushable_dl_tasks(struct rq *rq)
205{
206 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
207}
208
209static int push_dl_task(struct rq *rq);
210
211static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
212{
213 return dl_task(prev);
214}
215
216static DEFINE_PER_CPU(struct callback_head, dl_push_head);
217static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
218
219static void push_dl_tasks(struct rq *);
220static void pull_dl_task(struct rq *);
221
222static inline void queue_push_tasks(struct rq *rq)
223{
224 if (!has_pushable_dl_tasks(rq))
225 return;
226
227 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
228}
229
230static inline void queue_pull_task(struct rq *rq)
231{
232 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
233}
234
235static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
236
237static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
238{
239 struct rq *later_rq = NULL;
240 bool fallback = false;
241
242 later_rq = find_lock_later_rq(p, rq);
243
244 if (!later_rq) {
245 int cpu;
246
247
248
249
250
251 fallback = true;
252 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
253 if (cpu >= nr_cpu_ids) {
254
255
256
257
258 BUG_ON(dl_bandwidth_enabled());
259
260
261
262
263
264
265 cpu = cpumask_any(cpu_active_mask);
266 }
267 later_rq = cpu_rq(cpu);
268 double_lock_balance(rq, later_rq);
269 }
270
271
272
273
274 deactivate_task(rq, p, 0);
275 set_task_cpu(p, later_rq->cpu);
276 activate_task(later_rq, p, 0);
277
278 if (!fallback)
279 resched_curr(later_rq);
280
281 double_unlock_balance(later_rq, rq);
282
283 return later_rq;
284}
285
286#else
287
288static inline
289void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
290{
291}
292
293static inline
294void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
295{
296}
297
298static inline
299void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
300{
301}
302
303static inline
304void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
305{
306}
307
308static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
309{
310 return false;
311}
312
313static inline void pull_dl_task(struct rq *rq)
314{
315}
316
317static inline void queue_push_tasks(struct rq *rq)
318{
319}
320
321static inline void queue_pull_task(struct rq *rq)
322{
323}
324#endif
325
326static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
327static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
328static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
329 int flags);
330
331
332
333
334
335
336
337
338
339
340
341
342
343static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
344 struct sched_dl_entity *pi_se)
345{
346 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
347 struct rq *rq = rq_of_dl_rq(dl_rq);
348
349 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
350
351
352
353
354
355
356 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
357 dl_se->runtime = pi_se->dl_runtime;
358 dl_se->dl_new = 0;
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379static void replenish_dl_entity(struct sched_dl_entity *dl_se,
380 struct sched_dl_entity *pi_se)
381{
382 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
383 struct rq *rq = rq_of_dl_rq(dl_rq);
384
385 BUG_ON(pi_se->dl_runtime <= 0);
386
387
388
389
390
391 if (dl_se->dl_deadline == 0) {
392 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
393 dl_se->runtime = pi_se->dl_runtime;
394 }
395
396
397
398
399
400
401
402 while (dl_se->runtime <= 0) {
403 dl_se->deadline += pi_se->dl_period;
404 dl_se->runtime += pi_se->dl_runtime;
405 }
406
407
408
409
410
411
412
413
414
415
416 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
417 printk_deferred_once("sched: DL replenish lagged to much\n");
418 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
419 dl_se->runtime = pi_se->dl_runtime;
420 }
421
422 if (dl_se->dl_yielded)
423 dl_se->dl_yielded = 0;
424 if (dl_se->dl_throttled)
425 dl_se->dl_throttled = 0;
426}
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
453 struct sched_dl_entity *pi_se, u64 t)
454{
455 u64 left, right;
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
476 right = ((dl_se->deadline - t) >> DL_SCALE) *
477 (pi_se->dl_runtime >> DL_SCALE);
478
479 return dl_time_before(right, left);
480}
481
482
483
484
485
486
487
488
489
490
491static void update_dl_entity(struct sched_dl_entity *dl_se,
492 struct sched_dl_entity *pi_se)
493{
494 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
495 struct rq *rq = rq_of_dl_rq(dl_rq);
496
497
498
499
500
501 if (dl_se->dl_new) {
502 setup_new_dl_entity(dl_se, pi_se);
503 return;
504 }
505
506 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
507 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
508 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
509 dl_se->runtime = pi_se->dl_runtime;
510 }
511}
512
513
514
515
516
517
518
519
520
521
522
523static int start_dl_timer(struct task_struct *p)
524{
525 struct sched_dl_entity *dl_se = &p->dl;
526 struct hrtimer *timer = &dl_se->dl_timer;
527 struct rq *rq = task_rq(p);
528 ktime_t now, act;
529 s64 delta;
530
531 lockdep_assert_held(&rq->lock);
532
533
534
535
536
537
538 act = ns_to_ktime(dl_se->deadline);
539 now = hrtimer_cb_get_time(timer);
540 delta = ktime_to_ns(now) - rq_clock(rq);
541 act = ktime_add_ns(act, delta);
542
543
544
545
546
547
548 if (ktime_us_delta(act, now) < 0)
549 return 0;
550
551
552
553
554
555
556
557
558
559
560 if (!hrtimer_is_queued(timer)) {
561 get_task_struct(p);
562 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
563 }
564
565 return 1;
566}
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
582{
583 struct sched_dl_entity *dl_se = container_of(timer,
584 struct sched_dl_entity,
585 dl_timer);
586 struct task_struct *p = dl_task_of(dl_se);
587 unsigned long flags;
588 struct rq *rq;
589
590 rq = task_rq_lock(p, &flags);
591
592
593
594
595
596 if (!dl_task(p)) {
597 __dl_clear_params(p);
598 goto unlock;
599 }
600
601
602
603
604
605
606
607
608 if (dl_se->dl_new)
609 goto unlock;
610
611
612
613
614
615 if (dl_se->dl_boosted)
616 goto unlock;
617
618
619
620
621
622 if (!dl_se->dl_throttled)
623 goto unlock;
624
625 sched_clock_tick();
626 update_rq_clock(rq);
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642 if (!task_on_rq_queued(p)) {
643 replenish_dl_entity(dl_se, dl_se);
644 goto unlock;
645 }
646
647 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
648 if (dl_task(rq->curr))
649 check_preempt_curr_dl(rq, p, 0);
650 else
651 resched_curr(rq);
652
653#ifdef CONFIG_SMP
654
655
656
657
658
659
660
661
662
663
664 if (unlikely(!rq->online))
665 rq = dl_task_offline_migration(rq, p);
666
667
668
669
670
671 if (has_pushable_dl_tasks(rq)) {
672
673
674
675
676 lockdep_unpin_lock(&rq->lock);
677 push_dl_task(rq);
678 lockdep_pin_lock(&rq->lock);
679 }
680#endif
681
682unlock:
683 task_rq_unlock(rq, p, &flags);
684
685
686
687
688
689 put_task_struct(p);
690
691 return HRTIMER_NORESTART;
692}
693
694void init_dl_task_timer(struct sched_dl_entity *dl_se)
695{
696 struct hrtimer *timer = &dl_se->dl_timer;
697
698 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
699 timer->function = dl_task_timer;
700}
701
702static
703int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
704{
705 return (dl_se->runtime <= 0);
706}
707
708extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
709
710
711
712
713
714static void update_curr_dl(struct rq *rq)
715{
716 struct task_struct *curr = rq->curr;
717 struct sched_dl_entity *dl_se = &curr->dl;
718 u64 delta_exec;
719
720 if (!dl_task(curr) || !on_dl_rq(dl_se))
721 return;
722
723
724
725
726
727
728
729
730
731 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
732 if (unlikely((s64)delta_exec <= 0))
733 return;
734
735 schedstat_set(curr->se.statistics.exec_max,
736 max(curr->se.statistics.exec_max, delta_exec));
737
738 curr->se.sum_exec_runtime += delta_exec;
739 account_group_exec_runtime(curr, delta_exec);
740
741 curr->se.exec_start = rq_clock_task(rq);
742 cpuacct_charge(curr, delta_exec);
743
744 sched_rt_avg_update(rq, delta_exec);
745
746 dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
747 if (dl_runtime_exceeded(dl_se)) {
748 dl_se->dl_throttled = 1;
749 __dequeue_task_dl(rq, curr, 0);
750 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
751 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
752
753 if (!is_leftmost(curr, &rq->dl))
754 resched_curr(rq);
755 }
756
757
758
759
760
761
762
763
764
765
766
767
768 if (rt_bandwidth_enabled()) {
769 struct rt_rq *rt_rq = &rq->rt;
770
771 raw_spin_lock(&rt_rq->rt_runtime_lock);
772
773
774
775
776
777 if (sched_rt_bandwidth_account(rt_rq))
778 rt_rq->rt_time += delta_exec;
779 raw_spin_unlock(&rt_rq->rt_runtime_lock);
780 }
781}
782
783#ifdef CONFIG_SMP
784
785static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
786
787static inline u64 next_deadline(struct rq *rq)
788{
789 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
790
791 if (next && dl_prio(next->prio))
792 return next->dl.deadline;
793 else
794 return 0;
795}
796
797static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
798{
799 struct rq *rq = rq_of_dl_rq(dl_rq);
800
801 if (dl_rq->earliest_dl.curr == 0 ||
802 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
803
804
805
806
807
808
809 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
810 dl_rq->earliest_dl.curr = deadline;
811 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
812 } else if (dl_rq->earliest_dl.next == 0 ||
813 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
814
815
816
817
818
819
820 dl_rq->earliest_dl.next = next_deadline(rq);
821 }
822}
823
824static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
825{
826 struct rq *rq = rq_of_dl_rq(dl_rq);
827
828
829
830
831
832 if (!dl_rq->dl_nr_running) {
833 dl_rq->earliest_dl.curr = 0;
834 dl_rq->earliest_dl.next = 0;
835 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
836 } else {
837 struct rb_node *leftmost = dl_rq->rb_leftmost;
838 struct sched_dl_entity *entry;
839
840 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
841 dl_rq->earliest_dl.curr = entry->deadline;
842 dl_rq->earliest_dl.next = next_deadline(rq);
843 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
844 }
845}
846
847#else
848
849static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
850static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
851
852#endif
853
854static inline
855void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
856{
857 int prio = dl_task_of(dl_se)->prio;
858 u64 deadline = dl_se->deadline;
859
860 WARN_ON(!dl_prio(prio));
861 dl_rq->dl_nr_running++;
862 add_nr_running(rq_of_dl_rq(dl_rq), 1);
863
864 inc_dl_deadline(dl_rq, deadline);
865 inc_dl_migration(dl_se, dl_rq);
866}
867
868static inline
869void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
870{
871 int prio = dl_task_of(dl_se)->prio;
872
873 WARN_ON(!dl_prio(prio));
874 WARN_ON(!dl_rq->dl_nr_running);
875 dl_rq->dl_nr_running--;
876 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
877
878 dec_dl_deadline(dl_rq, dl_se->deadline);
879 dec_dl_migration(dl_se, dl_rq);
880}
881
882static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
883{
884 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
885 struct rb_node **link = &dl_rq->rb_root.rb_node;
886 struct rb_node *parent = NULL;
887 struct sched_dl_entity *entry;
888 int leftmost = 1;
889
890 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
891
892 while (*link) {
893 parent = *link;
894 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
895 if (dl_time_before(dl_se->deadline, entry->deadline))
896 link = &parent->rb_left;
897 else {
898 link = &parent->rb_right;
899 leftmost = 0;
900 }
901 }
902
903 if (leftmost)
904 dl_rq->rb_leftmost = &dl_se->rb_node;
905
906 rb_link_node(&dl_se->rb_node, parent, link);
907 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
908
909 inc_dl_tasks(dl_se, dl_rq);
910}
911
912static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
913{
914 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
915
916 if (RB_EMPTY_NODE(&dl_se->rb_node))
917 return;
918
919 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
920 struct rb_node *next_node;
921
922 next_node = rb_next(&dl_se->rb_node);
923 dl_rq->rb_leftmost = next_node;
924 }
925
926 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
927 RB_CLEAR_NODE(&dl_se->rb_node);
928
929 dec_dl_tasks(dl_se, dl_rq);
930}
931
932static void
933enqueue_dl_entity(struct sched_dl_entity *dl_se,
934 struct sched_dl_entity *pi_se, int flags)
935{
936 BUG_ON(on_dl_rq(dl_se));
937
938
939
940
941
942
943 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
944 update_dl_entity(dl_se, pi_se);
945 else if (flags & ENQUEUE_REPLENISH)
946 replenish_dl_entity(dl_se, pi_se);
947
948 __enqueue_dl_entity(dl_se);
949}
950
951static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
952{
953 __dequeue_dl_entity(dl_se);
954}
955
956static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
957{
958 struct task_struct *pi_task = rt_mutex_get_top_task(p);
959 struct sched_dl_entity *pi_se = &p->dl;
960
961
962
963
964
965
966
967 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
968 pi_se = &pi_task->dl;
969 } else if (!dl_prio(p->normal_prio)) {
970
971
972
973
974
975
976
977 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
978 return;
979 }
980
981
982
983
984
985
986
987 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
988 return;
989
990 enqueue_dl_entity(&p->dl, pi_se, flags);
991
992 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
993 enqueue_pushable_dl_task(rq, p);
994}
995
996static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
997{
998 dequeue_dl_entity(&p->dl);
999 dequeue_pushable_dl_task(rq, p);
1000}
1001
1002static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1003{
1004 update_curr_dl(rq);
1005 __dequeue_task_dl(rq, p, flags);
1006}
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018static void yield_task_dl(struct rq *rq)
1019{
1020 struct task_struct *p = rq->curr;
1021
1022
1023
1024
1025
1026
1027
1028 if (p->dl.runtime > 0) {
1029 rq->curr->dl.dl_yielded = 1;
1030 p->dl.runtime = 0;
1031 }
1032 update_rq_clock(rq);
1033 update_curr_dl(rq);
1034
1035
1036
1037
1038
1039 rq_clock_skip_update(rq, true);
1040}
1041
1042#ifdef CONFIG_SMP
1043
1044static int find_later_rq(struct task_struct *task);
1045
1046static int
1047select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1048{
1049 struct task_struct *curr;
1050 struct rq *rq;
1051
1052 if (sd_flag != SD_BALANCE_WAKE)
1053 goto out;
1054
1055 rq = cpu_rq(cpu);
1056
1057 rcu_read_lock();
1058 curr = READ_ONCE(rq->curr);
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 if (unlikely(dl_task(curr)) &&
1070 (curr->nr_cpus_allowed < 2 ||
1071 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1072 (p->nr_cpus_allowed > 1)) {
1073 int target = find_later_rq(p);
1074
1075 if (target != -1 &&
1076 (dl_time_before(p->dl.deadline,
1077 cpu_rq(target)->dl.earliest_dl.curr) ||
1078 (cpu_rq(target)->dl.dl_nr_running == 0)))
1079 cpu = target;
1080 }
1081 rcu_read_unlock();
1082
1083out:
1084 return cpu;
1085}
1086
1087static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1088{
1089
1090
1091
1092
1093 if (rq->curr->nr_cpus_allowed == 1 ||
1094 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1095 return;
1096
1097
1098
1099
1100
1101 if (p->nr_cpus_allowed != 1 &&
1102 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1103 return;
1104
1105 resched_curr(rq);
1106}
1107
1108#endif
1109
1110
1111
1112
1113
1114static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1115 int flags)
1116{
1117 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1118 resched_curr(rq);
1119 return;
1120 }
1121
1122#ifdef CONFIG_SMP
1123
1124
1125
1126
1127 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1128 !test_tsk_need_resched(rq->curr))
1129 check_preempt_equal_dl(rq, p);
1130#endif
1131}
1132
1133#ifdef CONFIG_SCHED_HRTICK
1134static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1135{
1136 hrtick_start(rq, p->dl.runtime);
1137}
1138#else
1139static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1140{
1141}
1142#endif
1143
1144static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1145 struct dl_rq *dl_rq)
1146{
1147 struct rb_node *left = dl_rq->rb_leftmost;
1148
1149 if (!left)
1150 return NULL;
1151
1152 return rb_entry(left, struct sched_dl_entity, rb_node);
1153}
1154
1155struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1156{
1157 struct sched_dl_entity *dl_se;
1158 struct task_struct *p;
1159 struct dl_rq *dl_rq;
1160
1161 dl_rq = &rq->dl;
1162
1163 if (need_pull_dl_task(rq, prev)) {
1164
1165
1166
1167
1168
1169
1170 lockdep_unpin_lock(&rq->lock);
1171 pull_dl_task(rq);
1172 lockdep_pin_lock(&rq->lock);
1173
1174
1175
1176
1177
1178 if (rq->stop && task_on_rq_queued(rq->stop))
1179 return RETRY_TASK;
1180 }
1181
1182
1183
1184
1185
1186 if (prev->sched_class == &dl_sched_class)
1187 update_curr_dl(rq);
1188
1189 if (unlikely(!dl_rq->dl_nr_running))
1190 return NULL;
1191
1192 put_prev_task(rq, prev);
1193
1194 dl_se = pick_next_dl_entity(rq, dl_rq);
1195 BUG_ON(!dl_se);
1196
1197 p = dl_task_of(dl_se);
1198 p->se.exec_start = rq_clock_task(rq);
1199
1200
1201 dequeue_pushable_dl_task(rq, p);
1202
1203 if (hrtick_enabled(rq))
1204 start_hrtick_dl(rq, p);
1205
1206 queue_push_tasks(rq);
1207
1208 return p;
1209}
1210
1211static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1212{
1213 update_curr_dl(rq);
1214
1215 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1216 enqueue_pushable_dl_task(rq, p);
1217}
1218
1219static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1220{
1221 update_curr_dl(rq);
1222
1223
1224
1225
1226
1227
1228 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1229 is_leftmost(p, &rq->dl))
1230 start_hrtick_dl(rq, p);
1231}
1232
1233static void task_fork_dl(struct task_struct *p)
1234{
1235
1236
1237
1238
1239}
1240
1241static void task_dead_dl(struct task_struct *p)
1242{
1243 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1244
1245
1246
1247
1248 raw_spin_lock_irq(&dl_b->lock);
1249
1250 dl_b->total_bw -= p->dl.dl_bw;
1251 raw_spin_unlock_irq(&dl_b->lock);
1252}
1253
1254static void set_curr_task_dl(struct rq *rq)
1255{
1256 struct task_struct *p = rq->curr;
1257
1258 p->se.exec_start = rq_clock_task(rq);
1259
1260
1261 dequeue_pushable_dl_task(rq, p);
1262}
1263
1264#ifdef CONFIG_SMP
1265
1266
1267#define DL_MAX_TRIES 3
1268
1269static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1270{
1271 if (!task_running(rq, p) &&
1272 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1273 return 1;
1274 return 0;
1275}
1276
1277
1278static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1279{
1280 struct rb_node *next_node = rq->dl.rb_leftmost;
1281 struct sched_dl_entity *dl_se;
1282 struct task_struct *p = NULL;
1283
1284next_node:
1285 next_node = rb_next(next_node);
1286 if (next_node) {
1287 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1288 p = dl_task_of(dl_se);
1289
1290 if (pick_dl_task(rq, p, cpu))
1291 return p;
1292
1293 goto next_node;
1294 }
1295
1296 return NULL;
1297}
1298
1299
1300
1301
1302
1303static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1304{
1305 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1306 struct task_struct *p = NULL;
1307
1308 if (!has_pushable_dl_tasks(rq))
1309 return NULL;
1310
1311next_node:
1312 if (next_node) {
1313 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1314
1315 if (pick_dl_task(rq, p, cpu))
1316 return p;
1317
1318 next_node = rb_next(next_node);
1319 goto next_node;
1320 }
1321
1322 return NULL;
1323}
1324
1325static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1326
1327static int find_later_rq(struct task_struct *task)
1328{
1329 struct sched_domain *sd;
1330 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1331 int this_cpu = smp_processor_id();
1332 int best_cpu, cpu = task_cpu(task);
1333
1334
1335 if (unlikely(!later_mask))
1336 return -1;
1337
1338 if (task->nr_cpus_allowed == 1)
1339 return -1;
1340
1341
1342
1343
1344
1345 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1346 task, later_mask);
1347 if (best_cpu == -1)
1348 return -1;
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363 if (cpumask_test_cpu(cpu, later_mask))
1364 return cpu;
1365
1366
1367
1368
1369 if (!cpumask_test_cpu(this_cpu, later_mask))
1370 this_cpu = -1;
1371
1372 rcu_read_lock();
1373 for_each_domain(cpu, sd) {
1374 if (sd->flags & SD_WAKE_AFFINE) {
1375
1376
1377
1378
1379
1380 if (this_cpu != -1 &&
1381 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1382 rcu_read_unlock();
1383 return this_cpu;
1384 }
1385
1386
1387
1388
1389
1390 if (best_cpu < nr_cpu_ids &&
1391 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1392 rcu_read_unlock();
1393 return best_cpu;
1394 }
1395 }
1396 }
1397 rcu_read_unlock();
1398
1399
1400
1401
1402
1403 if (this_cpu != -1)
1404 return this_cpu;
1405
1406 cpu = cpumask_any(later_mask);
1407 if (cpu < nr_cpu_ids)
1408 return cpu;
1409
1410 return -1;
1411}
1412
1413
1414static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1415{
1416 struct rq *later_rq = NULL;
1417 int tries;
1418 int cpu;
1419
1420 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1421 cpu = find_later_rq(task);
1422
1423 if ((cpu == -1) || (cpu == rq->cpu))
1424 break;
1425
1426 later_rq = cpu_rq(cpu);
1427
1428 if (later_rq->dl.dl_nr_running &&
1429 !dl_time_before(task->dl.deadline,
1430 later_rq->dl.earliest_dl.curr)) {
1431
1432
1433
1434
1435
1436 later_rq = NULL;
1437 break;
1438 }
1439
1440
1441 if (double_lock_balance(rq, later_rq)) {
1442 if (unlikely(task_rq(task) != rq ||
1443 !cpumask_test_cpu(later_rq->cpu,
1444 &task->cpus_allowed) ||
1445 task_running(rq, task) ||
1446 !task_on_rq_queued(task))) {
1447 double_unlock_balance(rq, later_rq);
1448 later_rq = NULL;
1449 break;
1450 }
1451 }
1452
1453
1454
1455
1456
1457
1458 if (!later_rq->dl.dl_nr_running ||
1459 dl_time_before(task->dl.deadline,
1460 later_rq->dl.earliest_dl.curr))
1461 break;
1462
1463
1464 double_unlock_balance(rq, later_rq);
1465 later_rq = NULL;
1466 }
1467
1468 return later_rq;
1469}
1470
1471static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1472{
1473 struct task_struct *p;
1474
1475 if (!has_pushable_dl_tasks(rq))
1476 return NULL;
1477
1478 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1479 struct task_struct, pushable_dl_tasks);
1480
1481 BUG_ON(rq->cpu != task_cpu(p));
1482 BUG_ON(task_current(rq, p));
1483 BUG_ON(p->nr_cpus_allowed <= 1);
1484
1485 BUG_ON(!task_on_rq_queued(p));
1486 BUG_ON(!dl_task(p));
1487
1488 return p;
1489}
1490
1491
1492
1493
1494
1495
1496static int push_dl_task(struct rq *rq)
1497{
1498 struct task_struct *next_task;
1499 struct rq *later_rq;
1500 int ret = 0;
1501
1502 if (!rq->dl.overloaded)
1503 return 0;
1504
1505 next_task = pick_next_pushable_dl_task(rq);
1506 if (!next_task)
1507 return 0;
1508
1509retry:
1510 if (unlikely(next_task == rq->curr)) {
1511 WARN_ON(1);
1512 return 0;
1513 }
1514
1515
1516
1517
1518
1519
1520 if (dl_task(rq->curr) &&
1521 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1522 rq->curr->nr_cpus_allowed > 1) {
1523 resched_curr(rq);
1524 return 0;
1525 }
1526
1527
1528 get_task_struct(next_task);
1529
1530
1531 later_rq = find_lock_later_rq(next_task, rq);
1532 if (!later_rq) {
1533 struct task_struct *task;
1534
1535
1536
1537
1538
1539
1540 task = pick_next_pushable_dl_task(rq);
1541 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1542
1543
1544
1545
1546 goto out;
1547 }
1548
1549 if (!task)
1550
1551 goto out;
1552
1553 put_task_struct(next_task);
1554 next_task = task;
1555 goto retry;
1556 }
1557
1558 deactivate_task(rq, next_task, 0);
1559 set_task_cpu(next_task, later_rq->cpu);
1560 activate_task(later_rq, next_task, 0);
1561 ret = 1;
1562
1563 resched_curr(later_rq);
1564
1565 double_unlock_balance(rq, later_rq);
1566
1567out:
1568 put_task_struct(next_task);
1569
1570 return ret;
1571}
1572
1573static void push_dl_tasks(struct rq *rq)
1574{
1575
1576 while (push_dl_task(rq))
1577 ;
1578}
1579
1580static void pull_dl_task(struct rq *this_rq)
1581{
1582 int this_cpu = this_rq->cpu, cpu;
1583 struct task_struct *p;
1584 bool resched = false;
1585 struct rq *src_rq;
1586 u64 dmin = LONG_MAX;
1587
1588 if (likely(!dl_overloaded(this_rq)))
1589 return;
1590
1591
1592
1593
1594
1595 smp_rmb();
1596
1597 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1598 if (this_cpu == cpu)
1599 continue;
1600
1601 src_rq = cpu_rq(cpu);
1602
1603
1604
1605
1606
1607 if (this_rq->dl.dl_nr_running &&
1608 dl_time_before(this_rq->dl.earliest_dl.curr,
1609 src_rq->dl.earliest_dl.next))
1610 continue;
1611
1612
1613 double_lock_balance(this_rq, src_rq);
1614
1615
1616
1617
1618
1619 if (src_rq->dl.dl_nr_running <= 1)
1620 goto skip;
1621
1622 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1623
1624
1625
1626
1627
1628
1629 if (p && dl_time_before(p->dl.deadline, dmin) &&
1630 (!this_rq->dl.dl_nr_running ||
1631 dl_time_before(p->dl.deadline,
1632 this_rq->dl.earliest_dl.curr))) {
1633 WARN_ON(p == src_rq->curr);
1634 WARN_ON(!task_on_rq_queued(p));
1635
1636
1637
1638
1639
1640 if (dl_time_before(p->dl.deadline,
1641 src_rq->curr->dl.deadline))
1642 goto skip;
1643
1644 resched = true;
1645
1646 deactivate_task(src_rq, p, 0);
1647 set_task_cpu(p, this_cpu);
1648 activate_task(this_rq, p, 0);
1649 dmin = p->dl.deadline;
1650
1651
1652 }
1653skip:
1654 double_unlock_balance(this_rq, src_rq);
1655 }
1656
1657 if (resched)
1658 resched_curr(this_rq);
1659}
1660
1661
1662
1663
1664
1665static void task_woken_dl(struct rq *rq, struct task_struct *p)
1666{
1667 if (!task_running(rq, p) &&
1668 !test_tsk_need_resched(rq->curr) &&
1669 p->nr_cpus_allowed > 1 &&
1670 dl_task(rq->curr) &&
1671 (rq->curr->nr_cpus_allowed < 2 ||
1672 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1673 push_dl_tasks(rq);
1674 }
1675}
1676
1677static void set_cpus_allowed_dl(struct task_struct *p,
1678 const struct cpumask *new_mask)
1679{
1680 struct root_domain *src_rd;
1681 struct rq *rq;
1682
1683 BUG_ON(!dl_task(p));
1684
1685 rq = task_rq(p);
1686 src_rd = rq->rd;
1687
1688
1689
1690
1691
1692
1693 if (!cpumask_intersects(src_rd->span, new_mask)) {
1694 struct dl_bw *src_dl_b;
1695
1696 src_dl_b = dl_bw_of(cpu_of(rq));
1697
1698
1699
1700
1701
1702 raw_spin_lock(&src_dl_b->lock);
1703 __dl_clear(src_dl_b, p->dl.dl_bw);
1704 raw_spin_unlock(&src_dl_b->lock);
1705 }
1706
1707 set_cpus_allowed_common(p, new_mask);
1708}
1709
1710
1711static void rq_online_dl(struct rq *rq)
1712{
1713 if (rq->dl.overloaded)
1714 dl_set_overload(rq);
1715
1716 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1717 if (rq->dl.dl_nr_running > 0)
1718 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1719}
1720
1721
1722static void rq_offline_dl(struct rq *rq)
1723{
1724 if (rq->dl.overloaded)
1725 dl_clear_overload(rq);
1726
1727 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1728 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1729}
1730
1731void __init init_sched_dl_class(void)
1732{
1733 unsigned int i;
1734
1735 for_each_possible_cpu(i)
1736 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1737 GFP_KERNEL, cpu_to_node(i));
1738}
1739
1740#endif
1741
1742static void switched_from_dl(struct rq *rq, struct task_struct *p)
1743{
1744
1745
1746
1747
1748
1749
1750 if (!start_dl_timer(p))
1751 __dl_clear_params(p);
1752
1753
1754
1755
1756
1757
1758 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1759 return;
1760
1761 queue_pull_task(rq);
1762}
1763
1764
1765
1766
1767
1768static void switched_to_dl(struct rq *rq, struct task_struct *p)
1769{
1770 if (task_on_rq_queued(p) && rq->curr != p) {
1771#ifdef CONFIG_SMP
1772 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
1773 queue_push_tasks(rq);
1774#else
1775 if (dl_task(rq->curr))
1776 check_preempt_curr_dl(rq, p, 0);
1777 else
1778 resched_curr(rq);
1779#endif
1780 }
1781}
1782
1783
1784
1785
1786
1787static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1788 int oldprio)
1789{
1790 if (task_on_rq_queued(p) || rq->curr == p) {
1791#ifdef CONFIG_SMP
1792
1793
1794
1795
1796
1797
1798 if (!rq->dl.overloaded)
1799 queue_pull_task(rq);
1800
1801
1802
1803
1804
1805
1806 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1807 resched_curr(rq);
1808#else
1809
1810
1811
1812
1813
1814 resched_curr(rq);
1815#endif
1816 } else
1817 switched_to_dl(rq, p);
1818}
1819
1820const struct sched_class dl_sched_class = {
1821 .next = &rt_sched_class,
1822 .enqueue_task = enqueue_task_dl,
1823 .dequeue_task = dequeue_task_dl,
1824 .yield_task = yield_task_dl,
1825
1826 .check_preempt_curr = check_preempt_curr_dl,
1827
1828 .pick_next_task = pick_next_task_dl,
1829 .put_prev_task = put_prev_task_dl,
1830
1831#ifdef CONFIG_SMP
1832 .select_task_rq = select_task_rq_dl,
1833 .set_cpus_allowed = set_cpus_allowed_dl,
1834 .rq_online = rq_online_dl,
1835 .rq_offline = rq_offline_dl,
1836 .task_woken = task_woken_dl,
1837#endif
1838
1839 .set_curr_task = set_curr_task_dl,
1840 .task_tick = task_tick_dl,
1841 .task_fork = task_fork_dl,
1842 .task_dead = task_dead_dl,
1843
1844 .prio_changed = prio_changed_dl,
1845 .switched_from = switched_from_dl,
1846 .switched_to = switched_to_dl,
1847
1848 .update_curr = update_curr_dl,
1849};
1850
1851#ifdef CONFIG_SCHED_DEBUG
1852extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1853
1854void print_dl_stats(struct seq_file *m, int cpu)
1855{
1856 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1857}
1858#endif
1859