1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "sched.h"
19#include "pelt.h"
20
21struct dl_bandwidth def_dl_bandwidth;
22
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46#ifdef CONFIG_SMP
47static inline struct dl_bw *dl_bw_of(int i)
48{
49 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
50 "sched RCU must be held");
51 return &cpu_rq(i)->rd->dl_bw;
52}
53
54static inline int dl_bw_cpus(int i)
55{
56 struct root_domain *rd = cpu_rq(i)->rd;
57 int cpus = 0;
58
59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
60 "sched RCU must be held");
61 for_each_cpu_and(i, rd->span, cpu_active_mask)
62 cpus++;
63
64 return cpus;
65}
66#else
67static inline struct dl_bw *dl_bw_of(int i)
68{
69 return &cpu_rq(i)->dl.dl_bw;
70}
71
72static inline int dl_bw_cpus(int i)
73{
74 return 1;
75}
76#endif
77
78static inline
79void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
80{
81 u64 old = dl_rq->running_bw;
82
83 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
84 dl_rq->running_bw += dl_bw;
85 SCHED_WARN_ON(dl_rq->running_bw < old);
86 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
87
88 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
89}
90
91static inline
92void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
93{
94 u64 old = dl_rq->running_bw;
95
96 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
97 dl_rq->running_bw -= dl_bw;
98 SCHED_WARN_ON(dl_rq->running_bw > old);
99 if (dl_rq->running_bw > old)
100 dl_rq->running_bw = 0;
101
102 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
103}
104
105static inline
106void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
107{
108 u64 old = dl_rq->this_bw;
109
110 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
111 dl_rq->this_bw += dl_bw;
112 SCHED_WARN_ON(dl_rq->this_bw < old);
113}
114
115static inline
116void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
117{
118 u64 old = dl_rq->this_bw;
119
120 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
121 dl_rq->this_bw -= dl_bw;
122 SCHED_WARN_ON(dl_rq->this_bw > old);
123 if (dl_rq->this_bw > old)
124 dl_rq->this_bw = 0;
125 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
126}
127
128static inline
129void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
130{
131 if (!dl_entity_is_special(dl_se))
132 __add_rq_bw(dl_se->dl_bw, dl_rq);
133}
134
135static inline
136void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
137{
138 if (!dl_entity_is_special(dl_se))
139 __sub_rq_bw(dl_se->dl_bw, dl_rq);
140}
141
142static inline
143void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{
145 if (!dl_entity_is_special(dl_se))
146 __add_running_bw(dl_se->dl_bw, dl_rq);
147}
148
149static inline
150void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
151{
152 if (!dl_entity_is_special(dl_se))
153 __sub_running_bw(dl_se->dl_bw, dl_rq);
154}
155
156void dl_change_utilization(struct task_struct *p, u64 new_bw)
157{
158 struct rq *rq;
159
160 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
161
162 if (task_on_rq_queued(p))
163 return;
164
165 rq = task_rq(p);
166 if (p->dl.dl_non_contending) {
167 sub_running_bw(&p->dl, &rq->dl);
168 p->dl.dl_non_contending = 0;
169
170
171
172
173
174
175
176 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
177 put_task_struct(p);
178 }
179 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
180 __add_rq_bw(new_bw, &rq->dl);
181}
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237static void task_non_contending(struct task_struct *p)
238{
239 struct sched_dl_entity *dl_se = &p->dl;
240 struct hrtimer *timer = &dl_se->inactive_timer;
241 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
242 struct rq *rq = rq_of_dl_rq(dl_rq);
243 s64 zerolag_time;
244
245
246
247
248
249 if (dl_se->dl_runtime == 0)
250 return;
251
252 if (dl_entity_is_special(dl_se))
253 return;
254
255 WARN_ON(dl_se->dl_non_contending);
256
257 zerolag_time = dl_se->deadline -
258 div64_long((dl_se->runtime * dl_se->dl_period),
259 dl_se->dl_runtime);
260
261
262
263
264
265 zerolag_time -= rq_clock(rq);
266
267
268
269
270
271 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
272 if (dl_task(p))
273 sub_running_bw(dl_se, dl_rq);
274 if (!dl_task(p) || p->state == TASK_DEAD) {
275 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
276
277 if (p->state == TASK_DEAD)
278 sub_rq_bw(&p->dl, &rq->dl);
279 raw_spin_lock(&dl_b->lock);
280 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
281 __dl_clear_params(p);
282 raw_spin_unlock(&dl_b->lock);
283 }
284
285 return;
286 }
287
288 dl_se->dl_non_contending = 1;
289 get_task_struct(p);
290 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
291}
292
293static void task_contending(struct sched_dl_entity *dl_se, int flags)
294{
295 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
296
297
298
299
300
301 if (dl_se->dl_runtime == 0)
302 return;
303
304 if (flags & ENQUEUE_MIGRATED)
305 add_rq_bw(dl_se, dl_rq);
306
307 if (dl_se->dl_non_contending) {
308 dl_se->dl_non_contending = 0;
309
310
311
312
313
314
315
316 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
317 put_task_struct(dl_task_of(dl_se));
318 } else {
319
320
321
322
323
324
325
326 add_running_bw(dl_se, dl_rq);
327 }
328}
329
330static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
331{
332 struct sched_dl_entity *dl_se = &p->dl;
333
334 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
335}
336
337void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
338{
339 raw_spin_lock_init(&dl_b->dl_runtime_lock);
340 dl_b->dl_period = period;
341 dl_b->dl_runtime = runtime;
342}
343
344void init_dl_bw(struct dl_bw *dl_b)
345{
346 raw_spin_lock_init(&dl_b->lock);
347 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
348 if (global_rt_runtime() == RUNTIME_INF)
349 dl_b->bw = -1;
350 else
351 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
352 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
353 dl_b->total_bw = 0;
354}
355
356void init_dl_rq(struct dl_rq *dl_rq)
357{
358 dl_rq->root = RB_ROOT_CACHED;
359
360#ifdef CONFIG_SMP
361
362 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
363
364 dl_rq->dl_nr_migratory = 0;
365 dl_rq->overloaded = 0;
366 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
367#else
368 init_dl_bw(&dl_rq->dl_bw);
369#endif
370
371 dl_rq->running_bw = 0;
372 dl_rq->this_bw = 0;
373 init_dl_rq_bw_ratio(dl_rq);
374}
375
376#ifdef CONFIG_SMP
377
378static inline int dl_overloaded(struct rq *rq)
379{
380 return atomic_read(&rq->rd->dlo_count);
381}
382
383static inline void dl_set_overload(struct rq *rq)
384{
385 if (!rq->online)
386 return;
387
388 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
389
390
391
392
393
394
395 smp_wmb();
396 atomic_inc(&rq->rd->dlo_count);
397}
398
399static inline void dl_clear_overload(struct rq *rq)
400{
401 if (!rq->online)
402 return;
403
404 atomic_dec(&rq->rd->dlo_count);
405 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
406}
407
408static void update_dl_migration(struct dl_rq *dl_rq)
409{
410 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
411 if (!dl_rq->overloaded) {
412 dl_set_overload(rq_of_dl_rq(dl_rq));
413 dl_rq->overloaded = 1;
414 }
415 } else if (dl_rq->overloaded) {
416 dl_clear_overload(rq_of_dl_rq(dl_rq));
417 dl_rq->overloaded = 0;
418 }
419}
420
421static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
422{
423 struct task_struct *p = dl_task_of(dl_se);
424
425 if (p->nr_cpus_allowed > 1)
426 dl_rq->dl_nr_migratory++;
427
428 update_dl_migration(dl_rq);
429}
430
431static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
432{
433 struct task_struct *p = dl_task_of(dl_se);
434
435 if (p->nr_cpus_allowed > 1)
436 dl_rq->dl_nr_migratory--;
437
438 update_dl_migration(dl_rq);
439}
440
441
442
443
444
445static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
446{
447 struct dl_rq *dl_rq = &rq->dl;
448 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
449 struct rb_node *parent = NULL;
450 struct task_struct *entry;
451 bool leftmost = true;
452
453 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
454
455 while (*link) {
456 parent = *link;
457 entry = rb_entry(parent, struct task_struct,
458 pushable_dl_tasks);
459 if (dl_entity_preempt(&p->dl, &entry->dl))
460 link = &parent->rb_left;
461 else {
462 link = &parent->rb_right;
463 leftmost = false;
464 }
465 }
466
467 if (leftmost)
468 dl_rq->earliest_dl.next = p->dl.deadline;
469
470 rb_link_node(&p->pushable_dl_tasks, parent, link);
471 rb_insert_color_cached(&p->pushable_dl_tasks,
472 &dl_rq->pushable_dl_tasks_root, leftmost);
473}
474
475static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
476{
477 struct dl_rq *dl_rq = &rq->dl;
478
479 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
480 return;
481
482 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
483 struct rb_node *next_node;
484
485 next_node = rb_next(&p->pushable_dl_tasks);
486 if (next_node) {
487 dl_rq->earliest_dl.next = rb_entry(next_node,
488 struct task_struct, pushable_dl_tasks)->dl.deadline;
489 }
490 }
491
492 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
493 RB_CLEAR_NODE(&p->pushable_dl_tasks);
494}
495
496static inline int has_pushable_dl_tasks(struct rq *rq)
497{
498 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
499}
500
501static int push_dl_task(struct rq *rq);
502
503static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
504{
505 return dl_task(prev);
506}
507
508static DEFINE_PER_CPU(struct callback_head, dl_push_head);
509static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
510
511static void push_dl_tasks(struct rq *);
512static void pull_dl_task(struct rq *);
513
514static inline void deadline_queue_push_tasks(struct rq *rq)
515{
516 if (!has_pushable_dl_tasks(rq))
517 return;
518
519 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
520}
521
522static inline void deadline_queue_pull_task(struct rq *rq)
523{
524 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
525}
526
527static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
528
529static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
530{
531 struct rq *later_rq = NULL;
532 struct dl_bw *dl_b;
533
534 later_rq = find_lock_later_rq(p, rq);
535 if (!later_rq) {
536 int cpu;
537
538
539
540
541
542 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
543 if (cpu >= nr_cpu_ids) {
544
545
546
547
548 BUG_ON(dl_bandwidth_enabled());
549
550
551
552
553
554
555 cpu = cpumask_any(cpu_active_mask);
556 }
557 later_rq = cpu_rq(cpu);
558 double_lock_balance(rq, later_rq);
559 }
560
561 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
562
563
564
565
566
567
568 sub_running_bw(&p->dl, &rq->dl);
569 sub_rq_bw(&p->dl, &rq->dl);
570
571 add_rq_bw(&p->dl, &later_rq->dl);
572 add_running_bw(&p->dl, &later_rq->dl);
573 } else {
574 sub_rq_bw(&p->dl, &rq->dl);
575 add_rq_bw(&p->dl, &later_rq->dl);
576 }
577
578
579
580
581
582
583 dl_b = &rq->rd->dl_bw;
584 raw_spin_lock(&dl_b->lock);
585 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
586 raw_spin_unlock(&dl_b->lock);
587
588 dl_b = &later_rq->rd->dl_bw;
589 raw_spin_lock(&dl_b->lock);
590 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
591 raw_spin_unlock(&dl_b->lock);
592
593 set_task_cpu(p, later_rq->cpu);
594 double_unlock_balance(later_rq, rq);
595
596 return later_rq;
597}
598
599#else
600
601static inline
602void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
603{
604}
605
606static inline
607void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
608{
609}
610
611static inline
612void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
613{
614}
615
616static inline
617void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
618{
619}
620
621static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
622{
623 return false;
624}
625
626static inline void pull_dl_task(struct rq *rq)
627{
628}
629
630static inline void deadline_queue_push_tasks(struct rq *rq)
631{
632}
633
634static inline void deadline_queue_pull_task(struct rq *rq)
635{
636}
637#endif
638
639static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
640static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
641static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
642
643
644
645
646
647
648
649
650
651
652
653
654
655static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
656{
657 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
658 struct rq *rq = rq_of_dl_rq(dl_rq);
659
660 WARN_ON(dl_se->dl_boosted);
661 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
662
663
664
665
666
667
668 if (dl_se->dl_throttled)
669 return;
670
671
672
673
674
675
676 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
677 dl_se->runtime = dl_se->dl_runtime;
678}
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698static void replenish_dl_entity(struct sched_dl_entity *dl_se,
699 struct sched_dl_entity *pi_se)
700{
701 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
702 struct rq *rq = rq_of_dl_rq(dl_rq);
703
704 BUG_ON(pi_se->dl_runtime <= 0);
705
706
707
708
709
710 if (dl_se->dl_deadline == 0) {
711 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
712 dl_se->runtime = pi_se->dl_runtime;
713 }
714
715 if (dl_se->dl_yielded && dl_se->runtime > 0)
716 dl_se->runtime = 0;
717
718
719
720
721
722
723
724 while (dl_se->runtime <= 0) {
725 dl_se->deadline += pi_se->dl_period;
726 dl_se->runtime += pi_se->dl_runtime;
727 }
728
729
730
731
732
733
734
735
736
737
738 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
739 printk_deferred_once("sched: DL replenish lagged too much\n");
740 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
741 dl_se->runtime = pi_se->dl_runtime;
742 }
743
744 if (dl_se->dl_yielded)
745 dl_se->dl_yielded = 0;
746 if (dl_se->dl_throttled)
747 dl_se->dl_throttled = 0;
748}
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
775 struct sched_dl_entity *pi_se, u64 t)
776{
777 u64 left, right;
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
798 right = ((dl_se->deadline - t) >> DL_SCALE) *
799 (pi_se->dl_runtime >> DL_SCALE);
800
801 return dl_time_before(right, left);
802}
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822static void
823update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
824{
825 u64 laxity = dl_se->deadline - rq_clock(rq);
826
827
828
829
830
831
832
833 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
834
835 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
836}
837
838
839
840
841
842
843
844
845
846
847
848
849static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
850{
851 return dl_se->dl_deadline == dl_se->dl_period;
852}
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884static void update_dl_entity(struct sched_dl_entity *dl_se,
885 struct sched_dl_entity *pi_se)
886{
887 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
888 struct rq *rq = rq_of_dl_rq(dl_rq);
889
890 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
891 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
892
893 if (unlikely(!dl_is_implicit(dl_se) &&
894 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
895 !dl_se->dl_boosted)){
896 update_dl_revised_wakeup(dl_se, rq);
897 return;
898 }
899
900 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
901 dl_se->runtime = pi_se->dl_runtime;
902 }
903}
904
905static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
906{
907 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
908}
909
910
911
912
913
914
915
916
917
918
919
920static int start_dl_timer(struct task_struct *p)
921{
922 struct sched_dl_entity *dl_se = &p->dl;
923 struct hrtimer *timer = &dl_se->dl_timer;
924 struct rq *rq = task_rq(p);
925 ktime_t now, act;
926 s64 delta;
927
928 lockdep_assert_held(&rq->lock);
929
930
931
932
933
934
935 act = ns_to_ktime(dl_next_period(dl_se));
936 now = hrtimer_cb_get_time(timer);
937 delta = ktime_to_ns(now) - rq_clock(rq);
938 act = ktime_add_ns(act, delta);
939
940
941
942
943
944
945 if (ktime_us_delta(act, now) < 0)
946 return 0;
947
948
949
950
951
952
953
954
955
956
957 if (!hrtimer_is_queued(timer)) {
958 get_task_struct(p);
959 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
960 }
961
962 return 1;
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
979{
980 struct sched_dl_entity *dl_se = container_of(timer,
981 struct sched_dl_entity,
982 dl_timer);
983 struct task_struct *p = dl_task_of(dl_se);
984 struct rq_flags rf;
985 struct rq *rq;
986
987 rq = task_rq_lock(p, &rf);
988
989
990
991
992
993 if (!dl_task(p))
994 goto unlock;
995
996
997
998
999
1000 if (dl_se->dl_boosted)
1001 goto unlock;
1002
1003
1004
1005
1006
1007 if (!dl_se->dl_throttled)
1008 goto unlock;
1009
1010 sched_clock_tick();
1011 update_rq_clock(rq);
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 if (!task_on_rq_queued(p)) {
1028 replenish_dl_entity(dl_se, dl_se);
1029 goto unlock;
1030 }
1031
1032#ifdef CONFIG_SMP
1033 if (unlikely(!rq->online)) {
1034
1035
1036
1037
1038 lockdep_unpin_lock(&rq->lock, rf.cookie);
1039 rq = dl_task_offline_migration(rq, p);
1040 rf.cookie = lockdep_pin_lock(&rq->lock);
1041 update_rq_clock(rq);
1042
1043
1044
1045
1046
1047
1048 }
1049#endif
1050
1051 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1052 if (dl_task(rq->curr))
1053 check_preempt_curr_dl(rq, p, 0);
1054 else
1055 resched_curr(rq);
1056
1057#ifdef CONFIG_SMP
1058
1059
1060
1061
1062 if (has_pushable_dl_tasks(rq)) {
1063
1064
1065
1066
1067 rq_unpin_lock(rq, &rf);
1068 push_dl_task(rq);
1069 rq_repin_lock(rq, &rf);
1070 }
1071#endif
1072
1073unlock:
1074 task_rq_unlock(rq, p, &rf);
1075
1076
1077
1078
1079
1080 put_task_struct(p);
1081
1082 return HRTIMER_NORESTART;
1083}
1084
1085void init_dl_task_timer(struct sched_dl_entity *dl_se)
1086{
1087 struct hrtimer *timer = &dl_se->dl_timer;
1088
1089 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1090 timer->function = dl_task_timer;
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1112{
1113 struct task_struct *p = dl_task_of(dl_se);
1114 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1115
1116 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1117 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1118 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1119 return;
1120 dl_se->dl_throttled = 1;
1121 if (dl_se->runtime > 0)
1122 dl_se->runtime = 0;
1123 }
1124}
1125
1126static
1127int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1128{
1129 return (dl_se->runtime <= 0);
1130}
1131
1132extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1154{
1155 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw;
1156 u64 u_act;
1157 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1168 u_act = u_act_min;
1169 else
1170 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1171
1172 return (delta * u_act) >> BW_SHIFT;
1173}
1174
1175
1176
1177
1178
1179static void update_curr_dl(struct rq *rq)
1180{
1181 struct task_struct *curr = rq->curr;
1182 struct sched_dl_entity *dl_se = &curr->dl;
1183 u64 delta_exec, scaled_delta_exec;
1184 int cpu = cpu_of(rq);
1185 u64 now;
1186
1187 if (!dl_task(curr) || !on_dl_rq(dl_se))
1188 return;
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 now = rq_clock_task(rq);
1199 delta_exec = now - curr->se.exec_start;
1200 if (unlikely((s64)delta_exec <= 0)) {
1201 if (unlikely(dl_se->dl_yielded))
1202 goto throttle;
1203 return;
1204 }
1205
1206 schedstat_set(curr->se.statistics.exec_max,
1207 max(curr->se.statistics.exec_max, delta_exec));
1208
1209 curr->se.sum_exec_runtime += delta_exec;
1210 account_group_exec_runtime(curr, delta_exec);
1211
1212 curr->se.exec_start = now;
1213 cgroup_account_cputime(curr, delta_exec);
1214
1215 if (dl_entity_is_special(dl_se))
1216 return;
1217
1218
1219
1220
1221
1222
1223
1224
1225 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1226 scaled_delta_exec = grub_reclaim(delta_exec,
1227 rq,
1228 &curr->dl);
1229 } else {
1230 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1231 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1232
1233 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1234 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1235 }
1236
1237 dl_se->runtime -= scaled_delta_exec;
1238
1239throttle:
1240 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1241 dl_se->dl_throttled = 1;
1242
1243
1244 if (dl_runtime_exceeded(dl_se) &&
1245 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1246 dl_se->dl_overrun = 1;
1247
1248 __dequeue_task_dl(rq, curr, 0);
1249 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1250 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1251
1252 if (!is_leftmost(curr, &rq->dl))
1253 resched_curr(rq);
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 if (rt_bandwidth_enabled()) {
1268 struct rt_rq *rt_rq = &rq->rt;
1269
1270 raw_spin_lock(&rt_rq->rt_runtime_lock);
1271
1272
1273
1274
1275
1276 if (sched_rt_bandwidth_account(rt_rq))
1277 rt_rq->rt_time += delta_exec;
1278 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1279 }
1280}
1281
1282static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1283{
1284 struct sched_dl_entity *dl_se = container_of(timer,
1285 struct sched_dl_entity,
1286 inactive_timer);
1287 struct task_struct *p = dl_task_of(dl_se);
1288 struct rq_flags rf;
1289 struct rq *rq;
1290
1291 rq = task_rq_lock(p, &rf);
1292
1293 sched_clock_tick();
1294 update_rq_clock(rq);
1295
1296 if (!dl_task(p) || p->state == TASK_DEAD) {
1297 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1298
1299 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1300 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1301 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1302 dl_se->dl_non_contending = 0;
1303 }
1304
1305 raw_spin_lock(&dl_b->lock);
1306 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1307 raw_spin_unlock(&dl_b->lock);
1308 __dl_clear_params(p);
1309
1310 goto unlock;
1311 }
1312 if (dl_se->dl_non_contending == 0)
1313 goto unlock;
1314
1315 sub_running_bw(dl_se, &rq->dl);
1316 dl_se->dl_non_contending = 0;
1317unlock:
1318 task_rq_unlock(rq, p, &rf);
1319 put_task_struct(p);
1320
1321 return HRTIMER_NORESTART;
1322}
1323
1324void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1325{
1326 struct hrtimer *timer = &dl_se->inactive_timer;
1327
1328 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1329 timer->function = inactive_task_timer;
1330}
1331
1332#ifdef CONFIG_SMP
1333
1334static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1335{
1336 struct rq *rq = rq_of_dl_rq(dl_rq);
1337
1338 if (dl_rq->earliest_dl.curr == 0 ||
1339 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1340 dl_rq->earliest_dl.curr = deadline;
1341 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1342 }
1343}
1344
1345static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1346{
1347 struct rq *rq = rq_of_dl_rq(dl_rq);
1348
1349
1350
1351
1352
1353 if (!dl_rq->dl_nr_running) {
1354 dl_rq->earliest_dl.curr = 0;
1355 dl_rq->earliest_dl.next = 0;
1356 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1357 } else {
1358 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1359 struct sched_dl_entity *entry;
1360
1361 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1362 dl_rq->earliest_dl.curr = entry->deadline;
1363 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1364 }
1365}
1366
1367#else
1368
1369static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1370static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1371
1372#endif
1373
1374static inline
1375void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1376{
1377 int prio = dl_task_of(dl_se)->prio;
1378 u64 deadline = dl_se->deadline;
1379
1380 WARN_ON(!dl_prio(prio));
1381 dl_rq->dl_nr_running++;
1382 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1383
1384 inc_dl_deadline(dl_rq, deadline);
1385 inc_dl_migration(dl_se, dl_rq);
1386}
1387
1388static inline
1389void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1390{
1391 int prio = dl_task_of(dl_se)->prio;
1392
1393 WARN_ON(!dl_prio(prio));
1394 WARN_ON(!dl_rq->dl_nr_running);
1395 dl_rq->dl_nr_running--;
1396 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1397
1398 dec_dl_deadline(dl_rq, dl_se->deadline);
1399 dec_dl_migration(dl_se, dl_rq);
1400}
1401
1402static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1403{
1404 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1405 struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1406 struct rb_node *parent = NULL;
1407 struct sched_dl_entity *entry;
1408 int leftmost = 1;
1409
1410 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1411
1412 while (*link) {
1413 parent = *link;
1414 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1415 if (dl_time_before(dl_se->deadline, entry->deadline))
1416 link = &parent->rb_left;
1417 else {
1418 link = &parent->rb_right;
1419 leftmost = 0;
1420 }
1421 }
1422
1423 rb_link_node(&dl_se->rb_node, parent, link);
1424 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1425
1426 inc_dl_tasks(dl_se, dl_rq);
1427}
1428
1429static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1430{
1431 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1432
1433 if (RB_EMPTY_NODE(&dl_se->rb_node))
1434 return;
1435
1436 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1437 RB_CLEAR_NODE(&dl_se->rb_node);
1438
1439 dec_dl_tasks(dl_se, dl_rq);
1440}
1441
1442static void
1443enqueue_dl_entity(struct sched_dl_entity *dl_se,
1444 struct sched_dl_entity *pi_se, int flags)
1445{
1446 BUG_ON(on_dl_rq(dl_se));
1447
1448
1449
1450
1451
1452
1453 if (flags & ENQUEUE_WAKEUP) {
1454 task_contending(dl_se, flags);
1455 update_dl_entity(dl_se, pi_se);
1456 } else if (flags & ENQUEUE_REPLENISH) {
1457 replenish_dl_entity(dl_se, pi_se);
1458 } else if ((flags & ENQUEUE_RESTORE) &&
1459 dl_time_before(dl_se->deadline,
1460 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1461 setup_new_dl_entity(dl_se);
1462 }
1463
1464 __enqueue_dl_entity(dl_se);
1465}
1466
1467static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1468{
1469 __dequeue_dl_entity(dl_se);
1470}
1471
1472static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1473{
1474 struct task_struct *pi_task = rt_mutex_get_top_task(p);
1475 struct sched_dl_entity *pi_se = &p->dl;
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485 if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1486 pi_se = &pi_task->dl;
1487 } else if (!dl_prio(p->normal_prio)) {
1488
1489
1490
1491
1492
1493
1494
1495 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1496 return;
1497 }
1498
1499
1500
1501
1502
1503
1504
1505 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1506 dl_check_constrained_dl(&p->dl);
1507
1508 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1509 add_rq_bw(&p->dl, &rq->dl);
1510 add_running_bw(&p->dl, &rq->dl);
1511 }
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1526 if (flags & ENQUEUE_WAKEUP)
1527 task_contending(&p->dl, flags);
1528
1529 return;
1530 }
1531
1532 enqueue_dl_entity(&p->dl, pi_se, flags);
1533
1534 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1535 enqueue_pushable_dl_task(rq, p);
1536}
1537
1538static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1539{
1540 dequeue_dl_entity(&p->dl);
1541 dequeue_pushable_dl_task(rq, p);
1542}
1543
1544static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1545{
1546 update_curr_dl(rq);
1547 __dequeue_task_dl(rq, p, flags);
1548
1549 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1550 sub_running_bw(&p->dl, &rq->dl);
1551 sub_rq_bw(&p->dl, &rq->dl);
1552 }
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 if (flags & DEQUEUE_SLEEP)
1564 task_non_contending(p);
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577static void yield_task_dl(struct rq *rq)
1578{
1579
1580
1581
1582
1583
1584
1585 rq->curr->dl.dl_yielded = 1;
1586
1587 update_rq_clock(rq);
1588 update_curr_dl(rq);
1589
1590
1591
1592
1593
1594 rq_clock_skip_update(rq);
1595}
1596
1597#ifdef CONFIG_SMP
1598
1599static int find_later_rq(struct task_struct *task);
1600
1601static int
1602select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1603{
1604 struct task_struct *curr;
1605 struct rq *rq;
1606
1607 if (sd_flag != SD_BALANCE_WAKE)
1608 goto out;
1609
1610 rq = cpu_rq(cpu);
1611
1612 rcu_read_lock();
1613 curr = READ_ONCE(rq->curr);
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624 if (unlikely(dl_task(curr)) &&
1625 (curr->nr_cpus_allowed < 2 ||
1626 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1627 (p->nr_cpus_allowed > 1)) {
1628 int target = find_later_rq(p);
1629
1630 if (target != -1 &&
1631 (dl_time_before(p->dl.deadline,
1632 cpu_rq(target)->dl.earliest_dl.curr) ||
1633 (cpu_rq(target)->dl.dl_nr_running == 0)))
1634 cpu = target;
1635 }
1636 rcu_read_unlock();
1637
1638out:
1639 return cpu;
1640}
1641
1642static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1643{
1644 struct rq *rq;
1645
1646 if (p->state != TASK_WAKING)
1647 return;
1648
1649 rq = task_rq(p);
1650
1651
1652
1653
1654
1655 raw_spin_lock(&rq->lock);
1656 if (p->dl.dl_non_contending) {
1657 sub_running_bw(&p->dl, &rq->dl);
1658 p->dl.dl_non_contending = 0;
1659
1660
1661
1662
1663
1664
1665
1666 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1667 put_task_struct(p);
1668 }
1669 sub_rq_bw(&p->dl, &rq->dl);
1670 raw_spin_unlock(&rq->lock);
1671}
1672
1673static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1674{
1675
1676
1677
1678
1679 if (rq->curr->nr_cpus_allowed == 1 ||
1680 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1681 return;
1682
1683
1684
1685
1686
1687 if (p->nr_cpus_allowed != 1 &&
1688 cpudl_find(&rq->rd->cpudl, p, NULL))
1689 return;
1690
1691 resched_curr(rq);
1692}
1693
1694static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1695{
1696 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1697
1698
1699
1700
1701
1702
1703 rq_unpin_lock(rq, rf);
1704 pull_dl_task(rq);
1705 rq_repin_lock(rq, rf);
1706 }
1707
1708 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1709}
1710#endif
1711
1712
1713
1714
1715
1716static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1717 int flags)
1718{
1719 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1720 resched_curr(rq);
1721 return;
1722 }
1723
1724#ifdef CONFIG_SMP
1725
1726
1727
1728
1729 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1730 !test_tsk_need_resched(rq->curr))
1731 check_preempt_equal_dl(rq, p);
1732#endif
1733}
1734
1735#ifdef CONFIG_SCHED_HRTICK
1736static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1737{
1738 hrtick_start(rq, p->dl.runtime);
1739}
1740#else
1741static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1742{
1743}
1744#endif
1745
1746static void set_next_task_dl(struct rq *rq, struct task_struct *p)
1747{
1748 p->se.exec_start = rq_clock_task(rq);
1749
1750
1751 dequeue_pushable_dl_task(rq, p);
1752
1753 if (hrtick_enabled(rq))
1754 start_hrtick_dl(rq, p);
1755
1756 if (rq->curr->sched_class != &dl_sched_class)
1757 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1758
1759 deadline_queue_push_tasks(rq);
1760}
1761
1762static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1763 struct dl_rq *dl_rq)
1764{
1765 struct rb_node *left = rb_first_cached(&dl_rq->root);
1766
1767 if (!left)
1768 return NULL;
1769
1770 return rb_entry(left, struct sched_dl_entity, rb_node);
1771}
1772
1773static struct task_struct *
1774pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1775{
1776 struct sched_dl_entity *dl_se;
1777 struct dl_rq *dl_rq = &rq->dl;
1778 struct task_struct *p;
1779
1780 WARN_ON_ONCE(prev || rf);
1781
1782 if (!sched_dl_runnable(rq))
1783 return NULL;
1784
1785 dl_se = pick_next_dl_entity(rq, dl_rq);
1786 BUG_ON(!dl_se);
1787 p = dl_task_of(dl_se);
1788 set_next_task_dl(rq, p);
1789 return p;
1790}
1791
1792static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1793{
1794 update_curr_dl(rq);
1795
1796 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1797 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1798 enqueue_pushable_dl_task(rq, p);
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1810{
1811 update_curr_dl(rq);
1812
1813 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1814
1815
1816
1817
1818
1819 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1820 is_leftmost(p, &rq->dl))
1821 start_hrtick_dl(rq, p);
1822}
1823
1824static void task_fork_dl(struct task_struct *p)
1825{
1826
1827
1828
1829
1830}
1831
1832#ifdef CONFIG_SMP
1833
1834
1835#define DL_MAX_TRIES 3
1836
1837static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1838{
1839 if (!task_running(rq, p) &&
1840 cpumask_test_cpu(cpu, p->cpus_ptr))
1841 return 1;
1842 return 0;
1843}
1844
1845
1846
1847
1848
1849static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1850{
1851 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1852 struct task_struct *p = NULL;
1853
1854 if (!has_pushable_dl_tasks(rq))
1855 return NULL;
1856
1857next_node:
1858 if (next_node) {
1859 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1860
1861 if (pick_dl_task(rq, p, cpu))
1862 return p;
1863
1864 next_node = rb_next(next_node);
1865 goto next_node;
1866 }
1867
1868 return NULL;
1869}
1870
1871static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1872
1873static int find_later_rq(struct task_struct *task)
1874{
1875 struct sched_domain *sd;
1876 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1877 int this_cpu = smp_processor_id();
1878 int cpu = task_cpu(task);
1879
1880
1881 if (unlikely(!later_mask))
1882 return -1;
1883
1884 if (task->nr_cpus_allowed == 1)
1885 return -1;
1886
1887
1888
1889
1890
1891 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1892 return -1;
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 if (cpumask_test_cpu(cpu, later_mask))
1907 return cpu;
1908
1909
1910
1911
1912 if (!cpumask_test_cpu(this_cpu, later_mask))
1913 this_cpu = -1;
1914
1915 rcu_read_lock();
1916 for_each_domain(cpu, sd) {
1917 if (sd->flags & SD_WAKE_AFFINE) {
1918 int best_cpu;
1919
1920
1921
1922
1923
1924 if (this_cpu != -1 &&
1925 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1926 rcu_read_unlock();
1927 return this_cpu;
1928 }
1929
1930 best_cpu = cpumask_first_and(later_mask,
1931 sched_domain_span(sd));
1932
1933
1934
1935
1936
1937
1938 if (best_cpu < nr_cpu_ids) {
1939 rcu_read_unlock();
1940 return best_cpu;
1941 }
1942 }
1943 }
1944 rcu_read_unlock();
1945
1946
1947
1948
1949
1950 if (this_cpu != -1)
1951 return this_cpu;
1952
1953 cpu = cpumask_any(later_mask);
1954 if (cpu < nr_cpu_ids)
1955 return cpu;
1956
1957 return -1;
1958}
1959
1960
1961static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1962{
1963 struct rq *later_rq = NULL;
1964 int tries;
1965 int cpu;
1966
1967 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1968 cpu = find_later_rq(task);
1969
1970 if ((cpu == -1) || (cpu == rq->cpu))
1971 break;
1972
1973 later_rq = cpu_rq(cpu);
1974
1975 if (later_rq->dl.dl_nr_running &&
1976 !dl_time_before(task->dl.deadline,
1977 later_rq->dl.earliest_dl.curr)) {
1978
1979
1980
1981
1982
1983 later_rq = NULL;
1984 break;
1985 }
1986
1987
1988 if (double_lock_balance(rq, later_rq)) {
1989 if (unlikely(task_rq(task) != rq ||
1990 !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
1991 task_running(rq, task) ||
1992 !dl_task(task) ||
1993 !task_on_rq_queued(task))) {
1994 double_unlock_balance(rq, later_rq);
1995 later_rq = NULL;
1996 break;
1997 }
1998 }
1999
2000
2001
2002
2003
2004
2005 if (!later_rq->dl.dl_nr_running ||
2006 dl_time_before(task->dl.deadline,
2007 later_rq->dl.earliest_dl.curr))
2008 break;
2009
2010
2011 double_unlock_balance(rq, later_rq);
2012 later_rq = NULL;
2013 }
2014
2015 return later_rq;
2016}
2017
2018static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2019{
2020 struct task_struct *p;
2021
2022 if (!has_pushable_dl_tasks(rq))
2023 return NULL;
2024
2025 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2026 struct task_struct, pushable_dl_tasks);
2027
2028 BUG_ON(rq->cpu != task_cpu(p));
2029 BUG_ON(task_current(rq, p));
2030 BUG_ON(p->nr_cpus_allowed <= 1);
2031
2032 BUG_ON(!task_on_rq_queued(p));
2033 BUG_ON(!dl_task(p));
2034
2035 return p;
2036}
2037
2038
2039
2040
2041
2042
2043static int push_dl_task(struct rq *rq)
2044{
2045 struct task_struct *next_task;
2046 struct rq *later_rq;
2047 int ret = 0;
2048
2049 if (!rq->dl.overloaded)
2050 return 0;
2051
2052 next_task = pick_next_pushable_dl_task(rq);
2053 if (!next_task)
2054 return 0;
2055
2056retry:
2057 if (WARN_ON(next_task == rq->curr))
2058 return 0;
2059
2060
2061
2062
2063
2064
2065 if (dl_task(rq->curr) &&
2066 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2067 rq->curr->nr_cpus_allowed > 1) {
2068 resched_curr(rq);
2069 return 0;
2070 }
2071
2072
2073 get_task_struct(next_task);
2074
2075
2076 later_rq = find_lock_later_rq(next_task, rq);
2077 if (!later_rq) {
2078 struct task_struct *task;
2079
2080
2081
2082
2083
2084
2085 task = pick_next_pushable_dl_task(rq);
2086 if (task == next_task) {
2087
2088
2089
2090
2091 goto out;
2092 }
2093
2094 if (!task)
2095
2096 goto out;
2097
2098 put_task_struct(next_task);
2099 next_task = task;
2100 goto retry;
2101 }
2102
2103 deactivate_task(rq, next_task, 0);
2104 set_task_cpu(next_task, later_rq->cpu);
2105
2106
2107
2108
2109
2110 update_rq_clock(later_rq);
2111 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2112 ret = 1;
2113
2114 resched_curr(later_rq);
2115
2116 double_unlock_balance(rq, later_rq);
2117
2118out:
2119 put_task_struct(next_task);
2120
2121 return ret;
2122}
2123
2124static void push_dl_tasks(struct rq *rq)
2125{
2126
2127 while (push_dl_task(rq))
2128 ;
2129}
2130
2131static void pull_dl_task(struct rq *this_rq)
2132{
2133 int this_cpu = this_rq->cpu, cpu;
2134 struct task_struct *p;
2135 bool resched = false;
2136 struct rq *src_rq;
2137 u64 dmin = LONG_MAX;
2138
2139 if (likely(!dl_overloaded(this_rq)))
2140 return;
2141
2142
2143
2144
2145
2146 smp_rmb();
2147
2148 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2149 if (this_cpu == cpu)
2150 continue;
2151
2152 src_rq = cpu_rq(cpu);
2153
2154
2155
2156
2157
2158 if (this_rq->dl.dl_nr_running &&
2159 dl_time_before(this_rq->dl.earliest_dl.curr,
2160 src_rq->dl.earliest_dl.next))
2161 continue;
2162
2163
2164 double_lock_balance(this_rq, src_rq);
2165
2166
2167
2168
2169
2170 if (src_rq->dl.dl_nr_running <= 1)
2171 goto skip;
2172
2173 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2174
2175
2176
2177
2178
2179
2180 if (p && dl_time_before(p->dl.deadline, dmin) &&
2181 (!this_rq->dl.dl_nr_running ||
2182 dl_time_before(p->dl.deadline,
2183 this_rq->dl.earliest_dl.curr))) {
2184 WARN_ON(p == src_rq->curr);
2185 WARN_ON(!task_on_rq_queued(p));
2186
2187
2188
2189
2190
2191 if (dl_time_before(p->dl.deadline,
2192 src_rq->curr->dl.deadline))
2193 goto skip;
2194
2195 resched = true;
2196
2197 deactivate_task(src_rq, p, 0);
2198 set_task_cpu(p, this_cpu);
2199 activate_task(this_rq, p, 0);
2200 dmin = p->dl.deadline;
2201
2202
2203 }
2204skip:
2205 double_unlock_balance(this_rq, src_rq);
2206 }
2207
2208 if (resched)
2209 resched_curr(this_rq);
2210}
2211
2212
2213
2214
2215
2216static void task_woken_dl(struct rq *rq, struct task_struct *p)
2217{
2218 if (!task_running(rq, p) &&
2219 !test_tsk_need_resched(rq->curr) &&
2220 p->nr_cpus_allowed > 1 &&
2221 dl_task(rq->curr) &&
2222 (rq->curr->nr_cpus_allowed < 2 ||
2223 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2224 push_dl_tasks(rq);
2225 }
2226}
2227
2228static void set_cpus_allowed_dl(struct task_struct *p,
2229 const struct cpumask *new_mask)
2230{
2231 struct root_domain *src_rd;
2232 struct rq *rq;
2233
2234 BUG_ON(!dl_task(p));
2235
2236 rq = task_rq(p);
2237 src_rd = rq->rd;
2238
2239
2240
2241
2242
2243
2244 if (!cpumask_intersects(src_rd->span, new_mask)) {
2245 struct dl_bw *src_dl_b;
2246
2247 src_dl_b = dl_bw_of(cpu_of(rq));
2248
2249
2250
2251
2252
2253 raw_spin_lock(&src_dl_b->lock);
2254 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2255 raw_spin_unlock(&src_dl_b->lock);
2256 }
2257
2258 set_cpus_allowed_common(p, new_mask);
2259}
2260
2261
2262static void rq_online_dl(struct rq *rq)
2263{
2264 if (rq->dl.overloaded)
2265 dl_set_overload(rq);
2266
2267 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2268 if (rq->dl.dl_nr_running > 0)
2269 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2270}
2271
2272
2273static void rq_offline_dl(struct rq *rq)
2274{
2275 if (rq->dl.overloaded)
2276 dl_clear_overload(rq);
2277
2278 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2279 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2280}
2281
2282void __init init_sched_dl_class(void)
2283{
2284 unsigned int i;
2285
2286 for_each_possible_cpu(i)
2287 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2288 GFP_KERNEL, cpu_to_node(i));
2289}
2290
2291void dl_add_task_root_domain(struct task_struct *p)
2292{
2293 struct rq_flags rf;
2294 struct rq *rq;
2295 struct dl_bw *dl_b;
2296
2297 rq = task_rq_lock(p, &rf);
2298 if (!dl_task(p))
2299 goto unlock;
2300
2301 dl_b = &rq->rd->dl_bw;
2302 raw_spin_lock(&dl_b->lock);
2303
2304 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2305
2306 raw_spin_unlock(&dl_b->lock);
2307
2308unlock:
2309 task_rq_unlock(rq, p, &rf);
2310}
2311
2312void dl_clear_root_domain(struct root_domain *rd)
2313{
2314 unsigned long flags;
2315
2316 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2317 rd->dl_bw.total_bw = 0;
2318 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2319}
2320
2321#endif
2322
2323static void switched_from_dl(struct rq *rq, struct task_struct *p)
2324{
2325
2326
2327
2328
2329
2330
2331
2332
2333 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2334 task_non_contending(p);
2335
2336 if (!task_on_rq_queued(p)) {
2337
2338
2339
2340
2341
2342
2343 if (p->dl.dl_non_contending)
2344 sub_running_bw(&p->dl, &rq->dl);
2345 sub_rq_bw(&p->dl, &rq->dl);
2346 }
2347
2348
2349
2350
2351
2352
2353 if (p->dl.dl_non_contending)
2354 p->dl.dl_non_contending = 0;
2355
2356
2357
2358
2359
2360
2361 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2362 return;
2363
2364 deadline_queue_pull_task(rq);
2365}
2366
2367
2368
2369
2370
2371static void switched_to_dl(struct rq *rq, struct task_struct *p)
2372{
2373 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2374 put_task_struct(p);
2375
2376
2377 if (!task_on_rq_queued(p)) {
2378 add_rq_bw(&p->dl, &rq->dl);
2379
2380 return;
2381 }
2382
2383 if (rq->curr != p) {
2384#ifdef CONFIG_SMP
2385 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2386 deadline_queue_push_tasks(rq);
2387#endif
2388 if (dl_task(rq->curr))
2389 check_preempt_curr_dl(rq, p, 0);
2390 else
2391 resched_curr(rq);
2392 }
2393}
2394
2395
2396
2397
2398
2399static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2400 int oldprio)
2401{
2402 if (task_on_rq_queued(p) || rq->curr == p) {
2403#ifdef CONFIG_SMP
2404
2405
2406
2407
2408
2409
2410 if (!rq->dl.overloaded)
2411 deadline_queue_pull_task(rq);
2412
2413
2414
2415
2416
2417
2418 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2419 resched_curr(rq);
2420#else
2421
2422
2423
2424
2425
2426 resched_curr(rq);
2427#endif
2428 }
2429}
2430
2431const struct sched_class dl_sched_class = {
2432 .next = &rt_sched_class,
2433 .enqueue_task = enqueue_task_dl,
2434 .dequeue_task = dequeue_task_dl,
2435 .yield_task = yield_task_dl,
2436
2437 .check_preempt_curr = check_preempt_curr_dl,
2438
2439 .pick_next_task = pick_next_task_dl,
2440 .put_prev_task = put_prev_task_dl,
2441 .set_next_task = set_next_task_dl,
2442
2443#ifdef CONFIG_SMP
2444 .balance = balance_dl,
2445 .select_task_rq = select_task_rq_dl,
2446 .migrate_task_rq = migrate_task_rq_dl,
2447 .set_cpus_allowed = set_cpus_allowed_dl,
2448 .rq_online = rq_online_dl,
2449 .rq_offline = rq_offline_dl,
2450 .task_woken = task_woken_dl,
2451#endif
2452
2453 .task_tick = task_tick_dl,
2454 .task_fork = task_fork_dl,
2455
2456 .prio_changed = prio_changed_dl,
2457 .switched_from = switched_from_dl,
2458 .switched_to = switched_to_dl,
2459
2460 .update_curr = update_curr_dl,
2461};
2462
2463int sched_dl_global_validate(void)
2464{
2465 u64 runtime = global_rt_runtime();
2466 u64 period = global_rt_period();
2467 u64 new_bw = to_ratio(period, runtime);
2468 struct dl_bw *dl_b;
2469 int cpu, ret = 0;
2470 unsigned long flags;
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481 for_each_possible_cpu(cpu) {
2482 rcu_read_lock_sched();
2483 dl_b = dl_bw_of(cpu);
2484
2485 raw_spin_lock_irqsave(&dl_b->lock, flags);
2486 if (new_bw < dl_b->total_bw)
2487 ret = -EBUSY;
2488 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2489
2490 rcu_read_unlock_sched();
2491
2492 if (ret)
2493 break;
2494 }
2495
2496 return ret;
2497}
2498
2499void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2500{
2501 if (global_rt_runtime() == RUNTIME_INF) {
2502 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2503 dl_rq->extra_bw = 1 << BW_SHIFT;
2504 } else {
2505 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2506 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2507 dl_rq->extra_bw = to_ratio(global_rt_period(),
2508 global_rt_runtime());
2509 }
2510}
2511
2512void sched_dl_do_global(void)
2513{
2514 u64 new_bw = -1;
2515 struct dl_bw *dl_b;
2516 int cpu;
2517 unsigned long flags;
2518
2519 def_dl_bandwidth.dl_period = global_rt_period();
2520 def_dl_bandwidth.dl_runtime = global_rt_runtime();
2521
2522 if (global_rt_runtime() != RUNTIME_INF)
2523 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2524
2525
2526
2527
2528 for_each_possible_cpu(cpu) {
2529 rcu_read_lock_sched();
2530 dl_b = dl_bw_of(cpu);
2531
2532 raw_spin_lock_irqsave(&dl_b->lock, flags);
2533 dl_b->bw = new_bw;
2534 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2535
2536 rcu_read_unlock_sched();
2537 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2538 }
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549int sched_dl_overflow(struct task_struct *p, int policy,
2550 const struct sched_attr *attr)
2551{
2552 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2553 u64 period = attr->sched_period ?: attr->sched_deadline;
2554 u64 runtime = attr->sched_runtime;
2555 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2556 int cpus, err = -1;
2557
2558 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2559 return 0;
2560
2561
2562 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2563 return 0;
2564
2565
2566
2567
2568
2569
2570 raw_spin_lock(&dl_b->lock);
2571 cpus = dl_bw_cpus(task_cpu(p));
2572 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2573 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2574 if (hrtimer_active(&p->dl.inactive_timer))
2575 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2576 __dl_add(dl_b, new_bw, cpus);
2577 err = 0;
2578 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2579 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2580
2581
2582
2583
2584
2585
2586
2587 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2588 __dl_add(dl_b, new_bw, cpus);
2589 dl_change_utilization(p, new_bw);
2590 err = 0;
2591 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2592
2593
2594
2595
2596
2597 err = 0;
2598 }
2599 raw_spin_unlock(&dl_b->lock);
2600
2601 return err;
2602}
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2613{
2614 struct sched_dl_entity *dl_se = &p->dl;
2615
2616 dl_se->dl_runtime = attr->sched_runtime;
2617 dl_se->dl_deadline = attr->sched_deadline;
2618 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2619 dl_se->flags = attr->sched_flags;
2620 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2621 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2622}
2623
2624void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2625{
2626 struct sched_dl_entity *dl_se = &p->dl;
2627
2628 attr->sched_priority = p->rt_priority;
2629 attr->sched_runtime = dl_se->dl_runtime;
2630 attr->sched_deadline = dl_se->dl_deadline;
2631 attr->sched_period = dl_se->dl_period;
2632 attr->sched_flags = dl_se->flags;
2633}
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645bool __checkparam_dl(const struct sched_attr *attr)
2646{
2647
2648 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2649 return true;
2650
2651
2652 if (attr->sched_deadline == 0)
2653 return false;
2654
2655
2656
2657
2658
2659 if (attr->sched_runtime < (1ULL << DL_SCALE))
2660 return false;
2661
2662
2663
2664
2665
2666 if (attr->sched_deadline & (1ULL << 63) ||
2667 attr->sched_period & (1ULL << 63))
2668 return false;
2669
2670
2671 if ((attr->sched_period != 0 &&
2672 attr->sched_period < attr->sched_deadline) ||
2673 attr->sched_deadline < attr->sched_runtime)
2674 return false;
2675
2676 return true;
2677}
2678
2679
2680
2681
2682void __dl_clear_params(struct task_struct *p)
2683{
2684 struct sched_dl_entity *dl_se = &p->dl;
2685
2686 dl_se->dl_runtime = 0;
2687 dl_se->dl_deadline = 0;
2688 dl_se->dl_period = 0;
2689 dl_se->flags = 0;
2690 dl_se->dl_bw = 0;
2691 dl_se->dl_density = 0;
2692
2693 dl_se->dl_throttled = 0;
2694 dl_se->dl_yielded = 0;
2695 dl_se->dl_non_contending = 0;
2696 dl_se->dl_overrun = 0;
2697}
2698
2699bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2700{
2701 struct sched_dl_entity *dl_se = &p->dl;
2702
2703 if (dl_se->dl_runtime != attr->sched_runtime ||
2704 dl_se->dl_deadline != attr->sched_deadline ||
2705 dl_se->dl_period != attr->sched_period ||
2706 dl_se->flags != attr->sched_flags)
2707 return true;
2708
2709 return false;
2710}
2711
2712#ifdef CONFIG_SMP
2713int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2714{
2715 unsigned int dest_cpu;
2716 struct dl_bw *dl_b;
2717 bool overflow;
2718 int cpus, ret;
2719 unsigned long flags;
2720
2721 dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2722
2723 rcu_read_lock_sched();
2724 dl_b = dl_bw_of(dest_cpu);
2725 raw_spin_lock_irqsave(&dl_b->lock, flags);
2726 cpus = dl_bw_cpus(dest_cpu);
2727 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2728 if (overflow) {
2729 ret = -EBUSY;
2730 } else {
2731
2732
2733
2734
2735
2736
2737 __dl_add(dl_b, p->dl.dl_bw, cpus);
2738 ret = 0;
2739 }
2740 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2741 rcu_read_unlock_sched();
2742
2743 return ret;
2744}
2745
2746int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2747 const struct cpumask *trial)
2748{
2749 int ret = 1, trial_cpus;
2750 struct dl_bw *cur_dl_b;
2751 unsigned long flags;
2752
2753 rcu_read_lock_sched();
2754 cur_dl_b = dl_bw_of(cpumask_any(cur));
2755 trial_cpus = cpumask_weight(trial);
2756
2757 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2758 if (cur_dl_b->bw != -1 &&
2759 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2760 ret = 0;
2761 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2762 rcu_read_unlock_sched();
2763
2764 return ret;
2765}
2766
2767bool dl_cpu_busy(unsigned int cpu)
2768{
2769 unsigned long flags;
2770 struct dl_bw *dl_b;
2771 bool overflow;
2772 int cpus;
2773
2774 rcu_read_lock_sched();
2775 dl_b = dl_bw_of(cpu);
2776 raw_spin_lock_irqsave(&dl_b->lock, flags);
2777 cpus = dl_bw_cpus(cpu);
2778 overflow = __dl_overflow(dl_b, cpus, 0, 0);
2779 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2780 rcu_read_unlock_sched();
2781
2782 return overflow;
2783}
2784#endif
2785
2786#ifdef CONFIG_SCHED_DEBUG
2787void print_dl_stats(struct seq_file *m, int cpu)
2788{
2789 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2790}
2791#endif
2792