1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include "sched.h"
18
19#include <linux/slab.h>
20
21struct dl_bandwidth def_dl_bandwidth;
22
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline
47void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
48{
49 u64 old = dl_rq->running_bw;
50
51 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
52 dl_rq->running_bw += dl_bw;
53 SCHED_WARN_ON(dl_rq->running_bw < old);
54 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
55}
56
57static inline
58void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
59{
60 u64 old = dl_rq->running_bw;
61
62 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
63 dl_rq->running_bw -= dl_bw;
64 SCHED_WARN_ON(dl_rq->running_bw > old);
65 if (dl_rq->running_bw > old)
66 dl_rq->running_bw = 0;
67}
68
69static inline
70void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
71{
72 u64 old = dl_rq->this_bw;
73
74 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
75 dl_rq->this_bw += dl_bw;
76 SCHED_WARN_ON(dl_rq->this_bw < old);
77}
78
79static inline
80void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
81{
82 u64 old = dl_rq->this_bw;
83
84 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
85 dl_rq->this_bw -= dl_bw;
86 SCHED_WARN_ON(dl_rq->this_bw > old);
87 if (dl_rq->this_bw > old)
88 dl_rq->this_bw = 0;
89 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
90}
91
92void dl_change_utilization(struct task_struct *p, u64 new_bw)
93{
94 struct rq *rq;
95
96 if (task_on_rq_queued(p))
97 return;
98
99 rq = task_rq(p);
100 if (p->dl.dl_non_contending) {
101 sub_running_bw(p->dl.dl_bw, &rq->dl);
102 p->dl.dl_non_contending = 0;
103
104
105
106
107
108
109
110 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
111 put_task_struct(p);
112 }
113 sub_rq_bw(p->dl.dl_bw, &rq->dl);
114 add_rq_bw(new_bw, &rq->dl);
115}
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171static void task_non_contending(struct task_struct *p)
172{
173 struct sched_dl_entity *dl_se = &p->dl;
174 struct hrtimer *timer = &dl_se->inactive_timer;
175 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
176 struct rq *rq = rq_of_dl_rq(dl_rq);
177 s64 zerolag_time;
178
179
180
181
182
183 if (dl_se->dl_runtime == 0)
184 return;
185
186 WARN_ON(hrtimer_active(&dl_se->inactive_timer));
187 WARN_ON(dl_se->dl_non_contending);
188
189 zerolag_time = dl_se->deadline -
190 div64_long((dl_se->runtime * dl_se->dl_period),
191 dl_se->dl_runtime);
192
193
194
195
196
197 zerolag_time -= rq_clock(rq);
198
199
200
201
202
203 if (zerolag_time < 0) {
204 if (dl_task(p))
205 sub_running_bw(dl_se->dl_bw, dl_rq);
206 if (!dl_task(p) || p->state == TASK_DEAD) {
207 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
208
209 if (p->state == TASK_DEAD)
210 sub_rq_bw(p->dl.dl_bw, &rq->dl);
211 raw_spin_lock(&dl_b->lock);
212 __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
213 __dl_clear_params(p);
214 raw_spin_unlock(&dl_b->lock);
215 }
216
217 return;
218 }
219
220 dl_se->dl_non_contending = 1;
221 get_task_struct(p);
222 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
223}
224
225static void task_contending(struct sched_dl_entity *dl_se, int flags)
226{
227 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
228
229
230
231
232
233 if (dl_se->dl_runtime == 0)
234 return;
235
236 if (flags & ENQUEUE_MIGRATED)
237 add_rq_bw(dl_se->dl_bw, dl_rq);
238
239 if (dl_se->dl_non_contending) {
240 dl_se->dl_non_contending = 0;
241
242
243
244
245
246
247
248 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
249 put_task_struct(dl_task_of(dl_se));
250 } else {
251
252
253
254
255
256
257
258 add_running_bw(dl_se->dl_bw, dl_rq);
259 }
260}
261
262static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
263{
264 struct sched_dl_entity *dl_se = &p->dl;
265
266 return dl_rq->rb_leftmost == &dl_se->rb_node;
267}
268
269void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
270{
271 raw_spin_lock_init(&dl_b->dl_runtime_lock);
272 dl_b->dl_period = period;
273 dl_b->dl_runtime = runtime;
274}
275
276void init_dl_bw(struct dl_bw *dl_b)
277{
278 raw_spin_lock_init(&dl_b->lock);
279 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
280 if (global_rt_runtime() == RUNTIME_INF)
281 dl_b->bw = -1;
282 else
283 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
284 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
285 dl_b->total_bw = 0;
286}
287
288void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
289{
290 dl_rq->rb_root = RB_ROOT;
291
292#ifdef CONFIG_SMP
293
294 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
295
296 dl_rq->dl_nr_migratory = 0;
297 dl_rq->overloaded = 0;
298 dl_rq->pushable_dl_tasks_root = RB_ROOT;
299#else
300 init_dl_bw(&dl_rq->dl_bw);
301#endif
302
303 dl_rq->running_bw = 0;
304 dl_rq->this_bw = 0;
305 init_dl_rq_bw_ratio(dl_rq);
306}
307
308#ifdef CONFIG_SMP
309
310static inline int dl_overloaded(struct rq *rq)
311{
312 return atomic_read(&rq->rd->dlo_count);
313}
314
315static inline void dl_set_overload(struct rq *rq)
316{
317 if (!rq->online)
318 return;
319
320 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
321
322
323
324
325
326
327 smp_wmb();
328 atomic_inc(&rq->rd->dlo_count);
329}
330
331static inline void dl_clear_overload(struct rq *rq)
332{
333 if (!rq->online)
334 return;
335
336 atomic_dec(&rq->rd->dlo_count);
337 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
338}
339
340static void update_dl_migration(struct dl_rq *dl_rq)
341{
342 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
343 if (!dl_rq->overloaded) {
344 dl_set_overload(rq_of_dl_rq(dl_rq));
345 dl_rq->overloaded = 1;
346 }
347 } else if (dl_rq->overloaded) {
348 dl_clear_overload(rq_of_dl_rq(dl_rq));
349 dl_rq->overloaded = 0;
350 }
351}
352
353static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
354{
355 struct task_struct *p = dl_task_of(dl_se);
356
357 if (p->nr_cpus_allowed > 1)
358 dl_rq->dl_nr_migratory++;
359
360 update_dl_migration(dl_rq);
361}
362
363static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
364{
365 struct task_struct *p = dl_task_of(dl_se);
366
367 if (p->nr_cpus_allowed > 1)
368 dl_rq->dl_nr_migratory--;
369
370 update_dl_migration(dl_rq);
371}
372
373
374
375
376
377static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
378{
379 struct dl_rq *dl_rq = &rq->dl;
380 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
381 struct rb_node *parent = NULL;
382 struct task_struct *entry;
383 int leftmost = 1;
384
385 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
386
387 while (*link) {
388 parent = *link;
389 entry = rb_entry(parent, struct task_struct,
390 pushable_dl_tasks);
391 if (dl_entity_preempt(&p->dl, &entry->dl))
392 link = &parent->rb_left;
393 else {
394 link = &parent->rb_right;
395 leftmost = 0;
396 }
397 }
398
399 if (leftmost) {
400 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
401 dl_rq->earliest_dl.next = p->dl.deadline;
402 }
403
404 rb_link_node(&p->pushable_dl_tasks, parent, link);
405 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
406}
407
408static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
409{
410 struct dl_rq *dl_rq = &rq->dl;
411
412 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
413 return;
414
415 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
416 struct rb_node *next_node;
417
418 next_node = rb_next(&p->pushable_dl_tasks);
419 dl_rq->pushable_dl_tasks_leftmost = next_node;
420 if (next_node) {
421 dl_rq->earliest_dl.next = rb_entry(next_node,
422 struct task_struct, pushable_dl_tasks)->dl.deadline;
423 }
424 }
425
426 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
427 RB_CLEAR_NODE(&p->pushable_dl_tasks);
428}
429
430static inline int has_pushable_dl_tasks(struct rq *rq)
431{
432 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
433}
434
435static int push_dl_task(struct rq *rq);
436
437static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
438
439static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
440{
441 struct rq *later_rq = NULL;
442 bool fallback = false;
443
444 later_rq = find_lock_later_rq(p, rq);
445
446 if (!later_rq) {
447 int cpu;
448
449
450
451
452
453 fallback = true;
454 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
455 if (cpu >= nr_cpu_ids) {
456
457
458
459
460 BUG_ON(dl_bandwidth_enabled());
461
462
463
464
465
466
467 cpu = cpumask_any(cpu_active_mask);
468 }
469 later_rq = cpu_rq(cpu);
470 double_lock_balance(rq, later_rq);
471 }
472
473
474
475
476 deactivate_task(rq, p, 0);
477 set_task_cpu(p, later_rq->cpu);
478 activate_task(later_rq, p, 0);
479
480 if (!fallback)
481 resched_curr(later_rq);
482
483 double_unlock_balance(later_rq, rq);
484
485 return later_rq;
486}
487
488#else
489
490static inline
491void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
492{
493}
494
495static inline
496void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
497{
498}
499
500static inline
501void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
502{
503}
504
505static inline
506void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
507{
508}
509
510#endif
511
512static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
513static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
514static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
515 int flags);
516
517
518
519
520
521
522
523
524
525
526
527
528
529static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
530{
531 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
532 struct rq *rq = rq_of_dl_rq(dl_rq);
533
534 WARN_ON(dl_se->dl_boosted);
535 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
536
537
538
539
540
541
542 if (dl_se->dl_throttled)
543 return;
544
545
546
547
548
549
550 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
551 dl_se->runtime = dl_se->dl_runtime;
552}
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572static void replenish_dl_entity(struct sched_dl_entity *dl_se,
573 struct sched_dl_entity *pi_se)
574{
575 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
576 struct rq *rq = rq_of_dl_rq(dl_rq);
577
578 BUG_ON(pi_se->dl_runtime <= 0);
579
580
581
582
583
584 if (dl_se->dl_deadline == 0) {
585 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
586 dl_se->runtime = pi_se->dl_runtime;
587 }
588
589 if (dl_se->dl_yielded && dl_se->runtime > 0)
590 dl_se->runtime = 0;
591
592
593
594
595
596
597
598 while (dl_se->runtime <= 0) {
599 dl_se->deadline += pi_se->dl_period;
600 dl_se->runtime += pi_se->dl_runtime;
601 }
602
603
604
605
606
607
608
609
610
611
612 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
613 printk_deferred_once("sched: DL replenish lagged too much\n");
614 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
615 dl_se->runtime = pi_se->dl_runtime;
616 }
617
618 if (dl_se->dl_yielded)
619 dl_se->dl_yielded = 0;
620 if (dl_se->dl_throttled)
621 dl_se->dl_throttled = 0;
622}
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
648 struct sched_dl_entity *pi_se, u64 t)
649{
650 u64 left, right;
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
671 right = ((dl_se->deadline - t) >> DL_SCALE) *
672 (pi_se->dl_runtime >> DL_SCALE);
673
674 return dl_time_before(right, left);
675}
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695static void
696update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
697{
698 u64 laxity = dl_se->deadline - rq_clock(rq);
699
700
701
702
703
704
705
706 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
707
708 dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
709}
710
711
712
713
714
715
716
717
718
719
720
721
722static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
723{
724 return dl_se->dl_deadline == dl_se->dl_period;
725}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757static void update_dl_entity(struct sched_dl_entity *dl_se,
758 struct sched_dl_entity *pi_se)
759{
760 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
761 struct rq *rq = rq_of_dl_rq(dl_rq);
762
763 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
764 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
765
766 if (unlikely(!dl_is_implicit(dl_se) &&
767 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
768 !dl_se->dl_boosted)){
769 update_dl_revised_wakeup(dl_se, rq);
770 return;
771 }
772
773 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
774 dl_se->runtime = pi_se->dl_runtime;
775 }
776}
777
778static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
779{
780 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
781}
782
783
784
785
786
787
788
789
790
791
792
793static int start_dl_timer(struct task_struct *p)
794{
795 struct sched_dl_entity *dl_se = &p->dl;
796 struct hrtimer *timer = &dl_se->dl_timer;
797 struct rq *rq = task_rq(p);
798 ktime_t now, act;
799 s64 delta;
800
801 lockdep_assert_held(&rq->lock);
802
803
804
805
806
807
808 act = ns_to_ktime(dl_next_period(dl_se));
809 now = hrtimer_cb_get_time(timer);
810 delta = ktime_to_ns(now) - rq_clock(rq);
811 act = ktime_add_ns(act, delta);
812
813
814
815
816
817
818 if (ktime_us_delta(act, now) < 0)
819 return 0;
820
821
822
823
824
825
826
827
828
829
830 if (!hrtimer_is_queued(timer)) {
831 get_task_struct(p);
832 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
833 }
834
835 return 1;
836}
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
852{
853 struct sched_dl_entity *dl_se = container_of(timer,
854 struct sched_dl_entity,
855 dl_timer);
856 struct task_struct *p = dl_task_of(dl_se);
857 unsigned long flags;
858 struct rq *rq;
859
860 rq = task_rq_lock(p, &flags);
861
862
863
864
865
866 if (!dl_task(p))
867 goto unlock;
868
869
870
871
872
873 if (dl_se->dl_boosted)
874 goto unlock;
875
876
877
878
879
880 if (!dl_se->dl_throttled)
881 goto unlock;
882
883 sched_clock_tick();
884 update_rq_clock(rq);
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900 if (!task_on_rq_queued(p)) {
901 replenish_dl_entity(dl_se, dl_se);
902 goto unlock;
903 }
904
905 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
906 if (dl_task(rq->curr))
907 check_preempt_curr_dl(rq, p, 0);
908 else
909 resched_curr(rq);
910
911#ifdef CONFIG_SMP
912
913
914
915
916
917
918
919
920
921
922 if (unlikely(!rq->online)) {
923 rq = dl_task_offline_migration(rq, p);
924 update_rq_clock(rq);
925 }
926
927
928
929
930
931 if (has_pushable_dl_tasks(rq))
932 push_dl_task(rq);
933#endif
934
935unlock:
936 task_rq_unlock(rq, p, &flags);
937
938
939
940
941
942 put_task_struct(p);
943
944 return HRTIMER_NORESTART;
945}
946
947void init_dl_task_timer(struct sched_dl_entity *dl_se)
948{
949 struct hrtimer *timer = &dl_se->dl_timer;
950
951 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
952 timer->function = dl_task_timer;
953}
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
974{
975 struct task_struct *p = dl_task_of(dl_se);
976 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
977
978 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
979 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
980 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
981 return;
982 dl_se->dl_throttled = 1;
983 if (dl_se->runtime > 0)
984 dl_se->runtime = 0;
985 }
986}
987
988static
989int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
990{
991 return (dl_se->runtime <= 0);
992}
993
994extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1016{
1017 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw;
1018 u64 u_act;
1019 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1030 u_act = u_act_min;
1031 else
1032 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1033
1034 return (delta * u_act) >> BW_SHIFT;
1035}
1036
1037
1038
1039
1040
1041static void update_curr_dl(struct rq *rq)
1042{
1043 struct task_struct *curr = rq->curr;
1044 struct sched_dl_entity *dl_se = &curr->dl;
1045 u64 delta_exec;
1046
1047 if (!dl_task(curr) || !on_dl_rq(dl_se))
1048 return;
1049
1050
1051 if (cpu_of(rq) == smp_processor_id())
1052 cpufreq_trigger_update(rq_clock(rq));
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
1063 if (unlikely((s64)delta_exec <= 0)) {
1064 if (unlikely(dl_se->dl_yielded))
1065 goto throttle;
1066 return;
1067 }
1068
1069 schedstat_set(curr->se.statistics->exec_max,
1070 max(curr->se.statistics->exec_max, delta_exec));
1071
1072 curr->se.sum_exec_runtime += delta_exec;
1073 account_group_exec_runtime(curr, delta_exec);
1074
1075 curr->se.exec_start = rq_clock_task(rq);
1076 cpuacct_charge(curr, delta_exec);
1077
1078 sched_rt_avg_update(rq, delta_exec);
1079
1080 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
1081 delta_exec = grub_reclaim(delta_exec, rq, &curr->dl);
1082 dl_se->runtime -= delta_exec;
1083
1084throttle:
1085 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1086 dl_se->dl_throttled = 1;
1087 __dequeue_task_dl(rq, curr, 0);
1088 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1089 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1090
1091 if (!is_leftmost(curr, &rq->dl))
1092 resched_curr(rq);
1093 }
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 if (rt_bandwidth_enabled()) {
1107 struct rt_rq *rt_rq = &rq->rt;
1108
1109 raw_spin_lock(&rt_rq->rt_runtime_lock);
1110
1111
1112
1113
1114
1115 if (sched_rt_bandwidth_account(rt_rq))
1116 rt_rq->rt_time += delta_exec;
1117 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1118 }
1119}
1120
1121static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1122{
1123 struct sched_dl_entity *dl_se = container_of(timer,
1124 struct sched_dl_entity,
1125 inactive_timer);
1126 struct task_struct *p = dl_task_of(dl_se);
1127 unsigned long flags;
1128 struct rq *rq;
1129
1130 rq = task_rq_lock(p, &flags);
1131
1132 if (!dl_task(p) || p->state == TASK_DEAD) {
1133 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1134
1135 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1136 sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
1137 sub_rq_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
1138 dl_se->dl_non_contending = 0;
1139 }
1140
1141 raw_spin_lock(&dl_b->lock);
1142 __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1143 raw_spin_unlock(&dl_b->lock);
1144 __dl_clear_params(p);
1145
1146 goto unlock;
1147 }
1148 if (dl_se->dl_non_contending == 0)
1149 goto unlock;
1150
1151 sched_clock_tick();
1152 update_rq_clock(rq);
1153
1154 sub_running_bw(dl_se->dl_bw, &rq->dl);
1155 dl_se->dl_non_contending = 0;
1156unlock:
1157 task_rq_unlock(rq, p, &flags);
1158 put_task_struct(p);
1159
1160 return HRTIMER_NORESTART;
1161}
1162
1163void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1164{
1165 struct hrtimer *timer = &dl_se->inactive_timer;
1166
1167 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1168 timer->function = inactive_task_timer;
1169}
1170
1171#ifdef CONFIG_SMP
1172
1173static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1174{
1175 struct rq *rq = rq_of_dl_rq(dl_rq);
1176
1177 if (dl_rq->earliest_dl.curr == 0 ||
1178 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1179 dl_rq->earliest_dl.curr = deadline;
1180 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
1181 }
1182}
1183
1184static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1185{
1186 struct rq *rq = rq_of_dl_rq(dl_rq);
1187
1188
1189
1190
1191
1192 if (!dl_rq->dl_nr_running) {
1193 dl_rq->earliest_dl.curr = 0;
1194 dl_rq->earliest_dl.next = 0;
1195 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1196 } else {
1197 struct rb_node *leftmost = dl_rq->rb_leftmost;
1198 struct sched_dl_entity *entry;
1199
1200 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1201 dl_rq->earliest_dl.curr = entry->deadline;
1202 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
1203 }
1204}
1205
1206#else
1207
1208static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1209static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1210
1211#endif
1212
1213static inline
1214void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1215{
1216 int prio = dl_task_of(dl_se)->prio;
1217 u64 deadline = dl_se->deadline;
1218
1219 WARN_ON(!dl_prio(prio));
1220 dl_rq->dl_nr_running++;
1221 inc_nr_running(rq_of_dl_rq(dl_rq));
1222
1223 inc_dl_deadline(dl_rq, deadline);
1224 inc_dl_migration(dl_se, dl_rq);
1225}
1226
1227static inline
1228void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1229{
1230 int prio = dl_task_of(dl_se)->prio;
1231
1232 WARN_ON(!dl_prio(prio));
1233 WARN_ON(!dl_rq->dl_nr_running);
1234 dl_rq->dl_nr_running--;
1235 dec_nr_running(rq_of_dl_rq(dl_rq));
1236
1237 dec_dl_deadline(dl_rq, dl_se->deadline);
1238 dec_dl_migration(dl_se, dl_rq);
1239}
1240
1241static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1242{
1243 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1244 struct rb_node **link = &dl_rq->rb_root.rb_node;
1245 struct rb_node *parent = NULL;
1246 struct sched_dl_entity *entry;
1247 int leftmost = 1;
1248
1249 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1250
1251 while (*link) {
1252 parent = *link;
1253 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1254 if (dl_time_before(dl_se->deadline, entry->deadline))
1255 link = &parent->rb_left;
1256 else {
1257 link = &parent->rb_right;
1258 leftmost = 0;
1259 }
1260 }
1261
1262 if (leftmost)
1263 dl_rq->rb_leftmost = &dl_se->rb_node;
1264
1265 rb_link_node(&dl_se->rb_node, parent, link);
1266 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
1267
1268 inc_dl_tasks(dl_se, dl_rq);
1269}
1270
1271static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1272{
1273 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1274
1275 if (RB_EMPTY_NODE(&dl_se->rb_node))
1276 return;
1277
1278 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
1279 struct rb_node *next_node;
1280
1281 next_node = rb_next(&dl_se->rb_node);
1282 dl_rq->rb_leftmost = next_node;
1283 }
1284
1285 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
1286 RB_CLEAR_NODE(&dl_se->rb_node);
1287
1288 dec_dl_tasks(dl_se, dl_rq);
1289}
1290
1291static void
1292enqueue_dl_entity(struct sched_dl_entity *dl_se,
1293 struct sched_dl_entity *pi_se, int flags)
1294{
1295 BUG_ON(on_dl_rq(dl_se));
1296
1297
1298
1299
1300
1301
1302 if (flags & ENQUEUE_WAKEUP) {
1303 task_contending(dl_se, flags);
1304 update_dl_entity(dl_se, pi_se);
1305 } else if (flags & ENQUEUE_REPLENISH) {
1306 replenish_dl_entity(dl_se, pi_se);
1307 }
1308
1309 __enqueue_dl_entity(dl_se);
1310}
1311
1312static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1313{
1314 __dequeue_dl_entity(dl_se);
1315}
1316
1317static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1318{
1319 struct task_struct *pi_task = rt_mutex_get_top_task(p);
1320 struct sched_dl_entity *pi_se = &p->dl;
1321
1322
1323
1324
1325
1326
1327
1328 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
1329 pi_se = &pi_task->dl;
1330 } else if (!dl_prio(p->normal_prio)) {
1331
1332
1333
1334
1335
1336
1337
1338 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1339 return;
1340 }
1341
1342
1343
1344
1345
1346
1347
1348 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1349 dl_check_constrained_dl(&p->dl);
1350
1351 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1352 add_rq_bw(p->dl.dl_bw, &rq->dl);
1353 add_running_bw(p->dl.dl_bw, &rq->dl);
1354 }
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1369 if (flags & ENQUEUE_WAKEUP)
1370 task_contending(&p->dl, flags);
1371
1372 return;
1373 }
1374
1375 enqueue_dl_entity(&p->dl, pi_se, flags);
1376
1377 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1378 enqueue_pushable_dl_task(rq, p);
1379}
1380
1381static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1382{
1383 dequeue_dl_entity(&p->dl);
1384 dequeue_pushable_dl_task(rq, p);
1385}
1386
1387static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1388{
1389 update_curr_dl(rq);
1390 __dequeue_task_dl(rq, p, flags);
1391
1392 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1393 sub_running_bw(p->dl.dl_bw, &rq->dl);
1394 sub_rq_bw(p->dl.dl_bw, &rq->dl);
1395 }
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406 if (flags & DEQUEUE_SLEEP)
1407 task_non_contending(p);
1408}
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420static void yield_task_dl(struct rq *rq)
1421{
1422
1423
1424
1425
1426
1427
1428 rq->curr->dl.dl_yielded = 1;
1429
1430 update_rq_clock(rq);
1431 update_curr_dl(rq);
1432
1433
1434
1435
1436
1437 rq->skip_clock_update = 1;
1438}
1439
1440#ifdef CONFIG_SMP
1441
1442static int find_later_rq(struct task_struct *task);
1443
1444static int
1445select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1446{
1447 struct task_struct *curr;
1448 struct rq *rq;
1449
1450 if (sd_flag != SD_BALANCE_WAKE)
1451 goto out;
1452
1453 rq = cpu_rq(cpu);
1454
1455 rcu_read_lock();
1456 curr = ACCESS_ONCE(rq->curr);
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 if (unlikely(dl_task(curr)) &&
1468 (curr->nr_cpus_allowed < 2 ||
1469 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1470 (p->nr_cpus_allowed > 1)) {
1471 int target = find_later_rq(p);
1472
1473 if (target != -1 &&
1474 (dl_time_before(p->dl.deadline,
1475 cpu_rq(target)->dl.earliest_dl.curr) ||
1476 (cpu_rq(target)->dl.dl_nr_running == 0)))
1477 cpu = target;
1478 }
1479 rcu_read_unlock();
1480
1481out:
1482 return cpu;
1483}
1484
1485static void migrate_task_rq_dl(struct task_struct *p, int next_cpu)
1486{
1487 struct rq *rq;
1488
1489 if (p->state != TASK_WAKING)
1490 return;
1491
1492 rq = task_rq(p);
1493
1494
1495
1496
1497
1498 raw_spin_lock(&rq->lock);
1499 if (p->dl.dl_non_contending) {
1500 sub_running_bw(p->dl.dl_bw, &rq->dl);
1501 p->dl.dl_non_contending = 0;
1502
1503
1504
1505
1506
1507
1508
1509 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1510 put_task_struct(p);
1511 }
1512 sub_rq_bw(p->dl.dl_bw, &rq->dl);
1513 raw_spin_unlock(&rq->lock);
1514}
1515
1516static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1517{
1518
1519
1520
1521
1522 if (rq->curr->nr_cpus_allowed == 1 ||
1523 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1524 return;
1525
1526
1527
1528
1529
1530 if (p->nr_cpus_allowed != 1 &&
1531 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1532 return;
1533
1534 resched_curr(rq);
1535}
1536
1537#endif
1538
1539
1540
1541
1542
1543static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1544 int flags)
1545{
1546 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1547 resched_curr(rq);
1548 return;
1549 }
1550
1551#ifdef CONFIG_SMP
1552
1553
1554
1555
1556 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1557 !test_tsk_need_resched(rq->curr))
1558 check_preempt_equal_dl(rq, p);
1559#endif
1560}
1561
1562#ifdef CONFIG_SCHED_HRTICK
1563static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1564{
1565 hrtick_start(rq, p->dl.runtime);
1566}
1567#else
1568static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1569{
1570}
1571#endif
1572
1573static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1574 struct dl_rq *dl_rq)
1575{
1576 struct rb_node *left = dl_rq->rb_leftmost;
1577
1578 if (!left)
1579 return NULL;
1580
1581 return rb_entry(left, struct sched_dl_entity, rb_node);
1582}
1583
1584struct task_struct *pick_next_task_dl(struct rq *rq)
1585{
1586 struct sched_dl_entity *dl_se;
1587 struct task_struct *p;
1588 struct dl_rq *dl_rq;
1589
1590 dl_rq = &rq->dl;
1591
1592 if (unlikely(!dl_rq->dl_nr_running))
1593 return NULL;
1594
1595 dl_se = pick_next_dl_entity(rq, dl_rq);
1596 BUG_ON(!dl_se);
1597
1598 p = dl_task_of(dl_se);
1599 p->se.exec_start = rq_clock_task(rq);
1600
1601
1602 dequeue_pushable_dl_task(rq, p);
1603
1604 if (hrtick_enabled(rq))
1605 start_hrtick_dl(rq, p);
1606
1607#ifdef CONFIG_SMP
1608 rq->post_schedule = has_pushable_dl_tasks(rq);
1609#endif
1610
1611 return p;
1612}
1613
1614static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1615{
1616 update_curr_dl(rq);
1617
1618 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1619 enqueue_pushable_dl_task(rq, p);
1620}
1621
1622static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1623{
1624 update_curr_dl(rq);
1625
1626
1627
1628
1629
1630
1631 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1632 is_leftmost(p, &rq->dl))
1633 start_hrtick_dl(rq, p);
1634}
1635
1636static void task_fork_dl(struct task_struct *p)
1637{
1638
1639
1640
1641
1642}
1643
1644static void set_curr_task_dl(struct rq *rq)
1645{
1646 struct task_struct *p = rq->curr;
1647
1648 p->se.exec_start = rq_clock_task(rq);
1649
1650
1651 dequeue_pushable_dl_task(rq, p);
1652}
1653
1654#ifdef CONFIG_SMP
1655
1656
1657#define DL_MAX_TRIES 3
1658
1659static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1660{
1661 if (!task_running(rq, p) &&
1662 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1663 return 1;
1664 return 0;
1665}
1666
1667
1668
1669
1670
1671static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1672{
1673 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1674 struct task_struct *p = NULL;
1675
1676 if (!has_pushable_dl_tasks(rq))
1677 return NULL;
1678
1679next_node:
1680 if (next_node) {
1681 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1682
1683 if (pick_dl_task(rq, p, cpu))
1684 return p;
1685
1686 next_node = rb_next(next_node);
1687 goto next_node;
1688 }
1689
1690 return NULL;
1691}
1692
1693static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1694
1695static int find_later_rq(struct task_struct *task)
1696{
1697 struct sched_domain *sd;
1698 struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
1699 int this_cpu = smp_processor_id();
1700 int best_cpu, cpu = task_cpu(task);
1701
1702
1703 if (unlikely(!later_mask))
1704 return -1;
1705
1706 if (task->nr_cpus_allowed == 1)
1707 return -1;
1708
1709
1710
1711
1712
1713 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1714 task, later_mask);
1715 if (best_cpu == -1)
1716 return -1;
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731 if (cpumask_test_cpu(cpu, later_mask))
1732 return cpu;
1733
1734
1735
1736
1737 if (!cpumask_test_cpu(this_cpu, later_mask))
1738 this_cpu = -1;
1739
1740 rcu_read_lock();
1741 for_each_domain(cpu, sd) {
1742 if (sd->flags & SD_WAKE_AFFINE) {
1743
1744
1745
1746
1747
1748 if (this_cpu != -1 &&
1749 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1750 rcu_read_unlock();
1751 return this_cpu;
1752 }
1753
1754
1755
1756
1757
1758 if (best_cpu < nr_cpu_ids &&
1759 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1760 rcu_read_unlock();
1761 return best_cpu;
1762 }
1763 }
1764 }
1765 rcu_read_unlock();
1766
1767
1768
1769
1770
1771 if (this_cpu != -1)
1772 return this_cpu;
1773
1774 cpu = cpumask_any(later_mask);
1775 if (cpu < nr_cpu_ids)
1776 return cpu;
1777
1778 return -1;
1779}
1780
1781
1782static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1783{
1784 struct rq *later_rq = NULL;
1785 int tries;
1786 int cpu;
1787
1788 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1789 cpu = find_later_rq(task);
1790
1791 if ((cpu == -1) || (cpu == rq->cpu))
1792 break;
1793
1794 later_rq = cpu_rq(cpu);
1795
1796 if (later_rq->dl.dl_nr_running &&
1797 !dl_time_before(task->dl.deadline,
1798 later_rq->dl.earliest_dl.curr)) {
1799
1800
1801
1802
1803
1804 later_rq = NULL;
1805 break;
1806 }
1807
1808
1809 if (double_lock_balance(rq, later_rq)) {
1810 if (unlikely(task_rq(task) != rq ||
1811 !cpumask_test_cpu(later_rq->cpu,
1812 &task->cpus_allowed) ||
1813 task_running(rq, task) ||
1814 !task_on_rq_queued(task))) {
1815 double_unlock_balance(rq, later_rq);
1816 later_rq = NULL;
1817 break;
1818 }
1819 }
1820
1821
1822
1823
1824
1825
1826 if (!later_rq->dl.dl_nr_running ||
1827 dl_time_before(task->dl.deadline,
1828 later_rq->dl.earliest_dl.curr))
1829 break;
1830
1831
1832 double_unlock_balance(rq, later_rq);
1833 later_rq = NULL;
1834 }
1835
1836 return later_rq;
1837}
1838
1839static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1840{
1841 struct task_struct *p;
1842
1843 if (!has_pushable_dl_tasks(rq))
1844 return NULL;
1845
1846 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1847 struct task_struct, pushable_dl_tasks);
1848
1849 BUG_ON(rq->cpu != task_cpu(p));
1850 BUG_ON(task_current(rq, p));
1851 BUG_ON(p->nr_cpus_allowed <= 1);
1852
1853 BUG_ON(!task_on_rq_queued(p));
1854 BUG_ON(!dl_task(p));
1855
1856 return p;
1857}
1858
1859
1860
1861
1862
1863
1864static int push_dl_task(struct rq *rq)
1865{
1866 struct task_struct *next_task;
1867 struct rq *later_rq;
1868 int ret = 0;
1869
1870 if (!rq->dl.overloaded)
1871 return 0;
1872
1873 next_task = pick_next_pushable_dl_task(rq);
1874 if (!next_task)
1875 return 0;
1876
1877retry:
1878 if (unlikely(next_task == rq->curr)) {
1879 WARN_ON(1);
1880 return 0;
1881 }
1882
1883
1884
1885
1886
1887
1888 if (dl_task(rq->curr) &&
1889 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1890 rq->curr->nr_cpus_allowed > 1) {
1891 resched_curr(rq);
1892 return 0;
1893 }
1894
1895
1896 get_task_struct(next_task);
1897
1898
1899 later_rq = find_lock_later_rq(next_task, rq);
1900 if (!later_rq) {
1901 struct task_struct *task;
1902
1903
1904
1905
1906
1907
1908 task = pick_next_pushable_dl_task(rq);
1909 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1910
1911
1912
1913
1914 goto out;
1915 }
1916
1917 if (!task)
1918
1919 goto out;
1920
1921 put_task_struct(next_task);
1922 next_task = task;
1923 goto retry;
1924 }
1925
1926 deactivate_task(rq, next_task, 0);
1927 sub_running_bw(next_task->dl.dl_bw, &rq->dl);
1928 sub_rq_bw(next_task->dl.dl_bw, &rq->dl);
1929 set_task_cpu(next_task, later_rq->cpu);
1930 add_rq_bw(next_task->dl.dl_bw, &later_rq->dl);
1931 add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
1932 activate_task(later_rq, next_task, 0);
1933 ret = 1;
1934
1935 resched_curr(later_rq);
1936
1937 double_unlock_balance(rq, later_rq);
1938
1939out:
1940 put_task_struct(next_task);
1941
1942 return ret;
1943}
1944
1945static void push_dl_tasks(struct rq *rq)
1946{
1947
1948 while (push_dl_task(rq))
1949 ;
1950}
1951
1952static int pull_dl_task(struct rq *this_rq)
1953{
1954 int this_cpu = this_rq->cpu, ret = 0, cpu;
1955 struct task_struct *p;
1956 struct rq *src_rq;
1957 u64 dmin = LONG_MAX;
1958
1959 if (likely(!dl_overloaded(this_rq)))
1960 return 0;
1961
1962
1963
1964
1965
1966 smp_rmb();
1967
1968 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1969 if (this_cpu == cpu)
1970 continue;
1971
1972 src_rq = cpu_rq(cpu);
1973
1974
1975
1976
1977
1978 if (this_rq->dl.dl_nr_running &&
1979 dl_time_before(this_rq->dl.earliest_dl.curr,
1980 src_rq->dl.earliest_dl.next))
1981 continue;
1982
1983
1984 double_lock_balance(this_rq, src_rq);
1985
1986
1987
1988
1989
1990 if (src_rq->dl.dl_nr_running <= 1)
1991 goto skip;
1992
1993 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1994
1995
1996
1997
1998
1999
2000 if (p && dl_time_before(p->dl.deadline, dmin) &&
2001 (!this_rq->dl.dl_nr_running ||
2002 dl_time_before(p->dl.deadline,
2003 this_rq->dl.earliest_dl.curr))) {
2004 WARN_ON(p == src_rq->curr);
2005 WARN_ON(!task_on_rq_queued(p));
2006
2007
2008
2009
2010
2011 if (dl_time_before(p->dl.deadline,
2012 src_rq->curr->dl.deadline))
2013 goto skip;
2014
2015 ret = 1;
2016
2017 deactivate_task(src_rq, p, 0);
2018 sub_running_bw(p->dl.dl_bw, &src_rq->dl);
2019 sub_rq_bw(p->dl.dl_bw, &src_rq->dl);
2020 set_task_cpu(p, this_cpu);
2021 add_rq_bw(p->dl.dl_bw, &this_rq->dl);
2022 add_running_bw(p->dl.dl_bw, &this_rq->dl);
2023 activate_task(this_rq, p, 0);
2024 dmin = p->dl.deadline;
2025
2026
2027 }
2028skip:
2029 double_unlock_balance(this_rq, src_rq);
2030 }
2031
2032 return ret;
2033}
2034
2035static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
2036{
2037
2038 if (dl_task(prev))
2039 pull_dl_task(rq);
2040}
2041
2042static void post_schedule_dl(struct rq *rq)
2043{
2044 push_dl_tasks(rq);
2045}
2046
2047
2048
2049
2050
2051static void task_woken_dl(struct rq *rq, struct task_struct *p)
2052{
2053 if (!task_running(rq, p) &&
2054 !test_tsk_need_resched(rq->curr) &&
2055 p->nr_cpus_allowed > 1 &&
2056 dl_task(rq->curr) &&
2057 (rq->curr->nr_cpus_allowed < 2 ||
2058 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2059 push_dl_tasks(rq);
2060 }
2061}
2062
2063static void set_cpus_allowed_dl(struct task_struct *p,
2064 const struct cpumask *new_mask)
2065{
2066 struct rq *rq;
2067 struct root_domain *src_rd;
2068 int weight;
2069
2070 BUG_ON(!dl_task(p));
2071
2072 rq = task_rq(p);
2073 src_rd = rq->rd;
2074
2075
2076
2077
2078
2079
2080 if (!cpumask_intersects(src_rd->span, new_mask)) {
2081 struct dl_bw *src_dl_b;
2082
2083 src_dl_b = dl_bw_of(cpu_of(rq));
2084
2085
2086
2087
2088
2089 raw_spin_lock(&src_dl_b->lock);
2090 __dl_clear(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2091 raw_spin_unlock(&src_dl_b->lock);
2092 }
2093
2094
2095
2096
2097
2098 if (!on_dl_rq(&p->dl))
2099 return;
2100
2101 weight = cpumask_weight(new_mask);
2102
2103
2104
2105
2106
2107 if ((p->nr_cpus_allowed > 1) == (weight > 1))
2108 return;
2109
2110
2111
2112
2113 if (weight <= 1) {
2114 if (!task_current(rq, p))
2115 dequeue_pushable_dl_task(rq, p);
2116 BUG_ON(!rq->dl.dl_nr_migratory);
2117 rq->dl.dl_nr_migratory--;
2118 } else {
2119 if (!task_current(rq, p))
2120 enqueue_pushable_dl_task(rq, p);
2121 rq->dl.dl_nr_migratory++;
2122 }
2123
2124 update_dl_migration(&rq->dl);
2125}
2126
2127
2128static void rq_online_dl(struct rq *rq)
2129{
2130 if (rq->dl.overloaded)
2131 dl_set_overload(rq);
2132
2133 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2134 if (rq->dl.dl_nr_running > 0)
2135 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
2136}
2137
2138
2139static void rq_offline_dl(struct rq *rq)
2140{
2141 if (rq->dl.overloaded)
2142 dl_clear_overload(rq);
2143
2144 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
2145 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2146}
2147
2148void __init init_sched_dl_class(void)
2149{
2150 unsigned int i;
2151
2152 for_each_possible_cpu(i)
2153 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2154 GFP_KERNEL, cpu_to_node(i));
2155}
2156
2157#endif
2158
2159static void switched_from_dl(struct rq *rq, struct task_struct *p)
2160{
2161
2162
2163
2164
2165
2166
2167
2168
2169 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2170 task_non_contending(p);
2171
2172 if (!task_on_rq_queued(p))
2173 sub_rq_bw(p->dl.dl_bw, &rq->dl);
2174
2175
2176
2177
2178
2179
2180 if (p->dl.dl_non_contending)
2181 p->dl.dl_non_contending = 0;
2182
2183
2184
2185
2186
2187
2188 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2189 return;
2190
2191 if (pull_dl_task(rq))
2192 resched_curr(rq);
2193}
2194
2195
2196
2197
2198
2199static void switched_to_dl(struct rq *rq, struct task_struct *p)
2200{
2201 int check_resched = 1;
2202
2203 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2204 put_task_struct(p);
2205
2206
2207 if (!task_on_rq_queued(p)) {
2208 add_rq_bw(p->dl.dl_bw, &rq->dl);
2209
2210 return;
2211 }
2212
2213
2214
2215
2216
2217 if (dl_time_before(p->dl.deadline, rq_clock(rq)))
2218 setup_new_dl_entity(&p->dl);
2219
2220 if (rq->curr != p) {
2221#ifdef CONFIG_SMP
2222 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded &&
2223 push_dl_task(rq) && rq != task_rq(p))
2224
2225 check_resched = 0;
2226#endif
2227 if (check_resched) {
2228 if (dl_task(rq->curr))
2229 check_preempt_curr_dl(rq, p, 0);
2230 else
2231 resched_curr(rq);
2232 }
2233 }
2234}
2235
2236
2237
2238
2239
2240static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2241 int oldprio)
2242{
2243 if (task_on_rq_queued(p) || rq->curr == p) {
2244#ifdef CONFIG_SMP
2245
2246
2247
2248
2249
2250
2251 if (!rq->dl.overloaded)
2252 pull_dl_task(rq);
2253
2254
2255
2256
2257
2258
2259 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
2260 rq->curr == p)
2261 resched_curr(rq);
2262#else
2263
2264
2265
2266
2267
2268 resched_curr(rq);
2269#endif
2270 }
2271}
2272
2273const struct sched_class dl_sched_class = {
2274 .next = &rt_sched_class,
2275 .enqueue_task = enqueue_task_dl,
2276 .dequeue_task = dequeue_task_dl,
2277 .yield_task = yield_task_dl,
2278
2279 .check_preempt_curr = check_preempt_curr_dl,
2280
2281 .pick_next_task = pick_next_task_dl,
2282 .put_prev_task = put_prev_task_dl,
2283
2284#ifdef CONFIG_SMP
2285 .select_task_rq = select_task_rq_dl,
2286 .migrate_task_rq = migrate_task_rq_dl,
2287 .set_cpus_allowed = set_cpus_allowed_dl,
2288 .rq_online = rq_online_dl,
2289 .rq_offline = rq_offline_dl,
2290 .pre_schedule = pre_schedule_dl,
2291 .post_schedule = post_schedule_dl,
2292 .task_woken = task_woken_dl,
2293#endif
2294
2295 .set_curr_task = set_curr_task_dl,
2296 .task_tick = task_tick_dl,
2297 .task_fork = task_fork_dl,
2298
2299 .prio_changed = prio_changed_dl,
2300 .switched_from = switched_from_dl,
2301 .switched_to = switched_to_dl,
2302 .update_curr = update_curr_dl,
2303};
2304