1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "sched.h"
19#include "pelt.h"
20
21struct dl_bandwidth def_dl_bandwidth;
22
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46#ifdef CONFIG_RT_MUTEXES
47static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
48{
49 return dl_se->pi_se;
50}
51
52static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
53{
54 return pi_of(dl_se) != dl_se;
55}
56#else
57static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
58{
59 return dl_se;
60}
61
62static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
63{
64 return false;
65}
66#endif
67
68#ifdef CONFIG_SMP
69static inline struct dl_bw *dl_bw_of(int i)
70{
71 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
72 "sched RCU must be held");
73 return &cpu_rq(i)->rd->dl_bw;
74}
75
76static inline int dl_bw_cpus(int i)
77{
78 struct root_domain *rd = cpu_rq(i)->rd;
79 int cpus;
80
81 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
82 "sched RCU must be held");
83
84 if (cpumask_subset(rd->span, cpu_active_mask))
85 return cpumask_weight(rd->span);
86
87 cpus = 0;
88
89 for_each_cpu_and(i, rd->span, cpu_active_mask)
90 cpus++;
91
92 return cpus;
93}
94
95static inline unsigned long __dl_bw_capacity(int i)
96{
97 struct root_domain *rd = cpu_rq(i)->rd;
98 unsigned long cap = 0;
99
100 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
101 "sched RCU must be held");
102
103 for_each_cpu_and(i, rd->span, cpu_active_mask)
104 cap += capacity_orig_of(i);
105
106 return cap;
107}
108
109
110
111
112
113static inline unsigned long dl_bw_capacity(int i)
114{
115 if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
116 capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
117 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
118 } else {
119 return __dl_bw_capacity(i);
120 }
121}
122
123static inline bool dl_bw_visited(int cpu, u64 gen)
124{
125 struct root_domain *rd = cpu_rq(cpu)->rd;
126
127 if (rd->visit_gen == gen)
128 return true;
129
130 rd->visit_gen = gen;
131 return false;
132}
133#else
134static inline struct dl_bw *dl_bw_of(int i)
135{
136 return &cpu_rq(i)->dl.dl_bw;
137}
138
139static inline int dl_bw_cpus(int i)
140{
141 return 1;
142}
143
144static inline unsigned long dl_bw_capacity(int i)
145{
146 return SCHED_CAPACITY_SCALE;
147}
148
149static inline bool dl_bw_visited(int cpu, u64 gen)
150{
151 return false;
152}
153#endif
154
155static inline
156void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
157{
158 u64 old = dl_rq->running_bw;
159
160 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
161 dl_rq->running_bw += dl_bw;
162 SCHED_WARN_ON(dl_rq->running_bw < old);
163 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
164
165 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
166}
167
168static inline
169void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
170{
171 u64 old = dl_rq->running_bw;
172
173 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
174 dl_rq->running_bw -= dl_bw;
175 SCHED_WARN_ON(dl_rq->running_bw > old);
176 if (dl_rq->running_bw > old)
177 dl_rq->running_bw = 0;
178
179 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
180}
181
182static inline
183void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
184{
185 u64 old = dl_rq->this_bw;
186
187 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
188 dl_rq->this_bw += dl_bw;
189 SCHED_WARN_ON(dl_rq->this_bw < old);
190}
191
192static inline
193void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
194{
195 u64 old = dl_rq->this_bw;
196
197 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
198 dl_rq->this_bw -= dl_bw;
199 SCHED_WARN_ON(dl_rq->this_bw > old);
200 if (dl_rq->this_bw > old)
201 dl_rq->this_bw = 0;
202 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
203}
204
205static inline
206void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
207{
208 if (!dl_entity_is_special(dl_se))
209 __add_rq_bw(dl_se->dl_bw, dl_rq);
210}
211
212static inline
213void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
214{
215 if (!dl_entity_is_special(dl_se))
216 __sub_rq_bw(dl_se->dl_bw, dl_rq);
217}
218
219static inline
220void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
221{
222 if (!dl_entity_is_special(dl_se))
223 __add_running_bw(dl_se->dl_bw, dl_rq);
224}
225
226static inline
227void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
228{
229 if (!dl_entity_is_special(dl_se))
230 __sub_running_bw(dl_se->dl_bw, dl_rq);
231}
232
233static void dl_change_utilization(struct task_struct *p, u64 new_bw)
234{
235 struct rq *rq;
236
237 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
238
239 if (task_on_rq_queued(p))
240 return;
241
242 rq = task_rq(p);
243 if (p->dl.dl_non_contending) {
244 sub_running_bw(&p->dl, &rq->dl);
245 p->dl.dl_non_contending = 0;
246
247
248
249
250
251
252
253 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
254 put_task_struct(p);
255 }
256 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
257 __add_rq_bw(new_bw, &rq->dl);
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314static void task_non_contending(struct task_struct *p)
315{
316 struct sched_dl_entity *dl_se = &p->dl;
317 struct hrtimer *timer = &dl_se->inactive_timer;
318 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
319 struct rq *rq = rq_of_dl_rq(dl_rq);
320 s64 zerolag_time;
321
322
323
324
325
326 if (dl_se->dl_runtime == 0)
327 return;
328
329 if (dl_entity_is_special(dl_se))
330 return;
331
332 WARN_ON(dl_se->dl_non_contending);
333
334 zerolag_time = dl_se->deadline -
335 div64_long((dl_se->runtime * dl_se->dl_period),
336 dl_se->dl_runtime);
337
338
339
340
341
342 zerolag_time -= rq_clock(rq);
343
344
345
346
347
348 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
349 if (dl_task(p))
350 sub_running_bw(dl_se, dl_rq);
351 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
352 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
353
354 if (READ_ONCE(p->__state) == TASK_DEAD)
355 sub_rq_bw(&p->dl, &rq->dl);
356 raw_spin_lock(&dl_b->lock);
357 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
358 __dl_clear_params(p);
359 raw_spin_unlock(&dl_b->lock);
360 }
361
362 return;
363 }
364
365 dl_se->dl_non_contending = 1;
366 get_task_struct(p);
367 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
368}
369
370static void task_contending(struct sched_dl_entity *dl_se, int flags)
371{
372 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
373
374
375
376
377
378 if (dl_se->dl_runtime == 0)
379 return;
380
381 if (flags & ENQUEUE_MIGRATED)
382 add_rq_bw(dl_se, dl_rq);
383
384 if (dl_se->dl_non_contending) {
385 dl_se->dl_non_contending = 0;
386
387
388
389
390
391
392
393 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
394 put_task_struct(dl_task_of(dl_se));
395 } else {
396
397
398
399
400
401
402
403 add_running_bw(dl_se, dl_rq);
404 }
405}
406
407static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
408{
409 struct sched_dl_entity *dl_se = &p->dl;
410
411 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
412}
413
414static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
415
416void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
417{
418 raw_spin_lock_init(&dl_b->dl_runtime_lock);
419 dl_b->dl_period = period;
420 dl_b->dl_runtime = runtime;
421}
422
423void init_dl_bw(struct dl_bw *dl_b)
424{
425 raw_spin_lock_init(&dl_b->lock);
426 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
427 if (global_rt_runtime() == RUNTIME_INF)
428 dl_b->bw = -1;
429 else
430 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
431 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
432 dl_b->total_bw = 0;
433}
434
435void init_dl_rq(struct dl_rq *dl_rq)
436{
437 dl_rq->root = RB_ROOT_CACHED;
438
439#ifdef CONFIG_SMP
440
441 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
442
443 dl_rq->dl_nr_migratory = 0;
444 dl_rq->overloaded = 0;
445 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
446#else
447 init_dl_bw(&dl_rq->dl_bw);
448#endif
449
450 dl_rq->running_bw = 0;
451 dl_rq->this_bw = 0;
452 init_dl_rq_bw_ratio(dl_rq);
453}
454
455#ifdef CONFIG_SMP
456
457static inline int dl_overloaded(struct rq *rq)
458{
459 return atomic_read(&rq->rd->dlo_count);
460}
461
462static inline void dl_set_overload(struct rq *rq)
463{
464 if (!rq->online)
465 return;
466
467 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
468
469
470
471
472
473
474 smp_wmb();
475 atomic_inc(&rq->rd->dlo_count);
476}
477
478static inline void dl_clear_overload(struct rq *rq)
479{
480 if (!rq->online)
481 return;
482
483 atomic_dec(&rq->rd->dlo_count);
484 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
485}
486
487static void update_dl_migration(struct dl_rq *dl_rq)
488{
489 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
490 if (!dl_rq->overloaded) {
491 dl_set_overload(rq_of_dl_rq(dl_rq));
492 dl_rq->overloaded = 1;
493 }
494 } else if (dl_rq->overloaded) {
495 dl_clear_overload(rq_of_dl_rq(dl_rq));
496 dl_rq->overloaded = 0;
497 }
498}
499
500static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
501{
502 struct task_struct *p = dl_task_of(dl_se);
503
504 if (p->nr_cpus_allowed > 1)
505 dl_rq->dl_nr_migratory++;
506
507 update_dl_migration(dl_rq);
508}
509
510static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
511{
512 struct task_struct *p = dl_task_of(dl_se);
513
514 if (p->nr_cpus_allowed > 1)
515 dl_rq->dl_nr_migratory--;
516
517 update_dl_migration(dl_rq);
518}
519
520#define __node_2_pdl(node) \
521 rb_entry((node), struct task_struct, pushable_dl_tasks)
522
523static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
524{
525 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
526}
527
528
529
530
531
532static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
533{
534 struct rb_node *leftmost;
535
536 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
537
538 leftmost = rb_add_cached(&p->pushable_dl_tasks,
539 &rq->dl.pushable_dl_tasks_root,
540 __pushable_less);
541 if (leftmost)
542 rq->dl.earliest_dl.next = p->dl.deadline;
543}
544
545static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
546{
547 struct dl_rq *dl_rq = &rq->dl;
548 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
549 struct rb_node *leftmost;
550
551 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
552 return;
553
554 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
555 if (leftmost)
556 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
557
558 RB_CLEAR_NODE(&p->pushable_dl_tasks);
559}
560
561static inline int has_pushable_dl_tasks(struct rq *rq)
562{
563 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
564}
565
566static int push_dl_task(struct rq *rq);
567
568static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
569{
570 return rq->online && dl_task(prev);
571}
572
573static DEFINE_PER_CPU(struct callback_head, dl_push_head);
574static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
575
576static void push_dl_tasks(struct rq *);
577static void pull_dl_task(struct rq *);
578
579static inline void deadline_queue_push_tasks(struct rq *rq)
580{
581 if (!has_pushable_dl_tasks(rq))
582 return;
583
584 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
585}
586
587static inline void deadline_queue_pull_task(struct rq *rq)
588{
589 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
590}
591
592static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
593
594static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
595{
596 struct rq *later_rq = NULL;
597 struct dl_bw *dl_b;
598
599 later_rq = find_lock_later_rq(p, rq);
600 if (!later_rq) {
601 int cpu;
602
603
604
605
606
607 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
608 if (cpu >= nr_cpu_ids) {
609
610
611
612
613 BUG_ON(dl_bandwidth_enabled());
614
615
616
617
618
619
620 cpu = cpumask_any(cpu_active_mask);
621 }
622 later_rq = cpu_rq(cpu);
623 double_lock_balance(rq, later_rq);
624 }
625
626 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
627
628
629
630
631
632
633 sub_running_bw(&p->dl, &rq->dl);
634 sub_rq_bw(&p->dl, &rq->dl);
635
636 add_rq_bw(&p->dl, &later_rq->dl);
637 add_running_bw(&p->dl, &later_rq->dl);
638 } else {
639 sub_rq_bw(&p->dl, &rq->dl);
640 add_rq_bw(&p->dl, &later_rq->dl);
641 }
642
643
644
645
646
647
648 dl_b = &rq->rd->dl_bw;
649 raw_spin_lock(&dl_b->lock);
650 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
651 raw_spin_unlock(&dl_b->lock);
652
653 dl_b = &later_rq->rd->dl_bw;
654 raw_spin_lock(&dl_b->lock);
655 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
656 raw_spin_unlock(&dl_b->lock);
657
658 set_task_cpu(p, later_rq->cpu);
659 double_unlock_balance(later_rq, rq);
660
661 return later_rq;
662}
663
664#else
665
666static inline
667void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
668{
669}
670
671static inline
672void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
673{
674}
675
676static inline
677void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
678{
679}
680
681static inline
682void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
683{
684}
685
686static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
687{
688 return false;
689}
690
691static inline void pull_dl_task(struct rq *rq)
692{
693}
694
695static inline void deadline_queue_push_tasks(struct rq *rq)
696{
697}
698
699static inline void deadline_queue_pull_task(struct rq *rq)
700{
701}
702#endif
703
704static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
705static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
706static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
707
708
709
710
711
712
713
714
715
716
717
718
719
720static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
721{
722 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
723 struct rq *rq = rq_of_dl_rq(dl_rq);
724
725 WARN_ON(is_dl_boosted(dl_se));
726 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
727
728
729
730
731
732
733 if (dl_se->dl_throttled)
734 return;
735
736
737
738
739
740
741 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
742 dl_se->runtime = dl_se->dl_runtime;
743}
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763static void replenish_dl_entity(struct sched_dl_entity *dl_se)
764{
765 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
766 struct rq *rq = rq_of_dl_rq(dl_rq);
767
768 BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
769
770
771
772
773
774 if (dl_se->dl_deadline == 0) {
775 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
776 dl_se->runtime = pi_of(dl_se)->dl_runtime;
777 }
778
779 if (dl_se->dl_yielded && dl_se->runtime > 0)
780 dl_se->runtime = 0;
781
782
783
784
785
786
787
788 while (dl_se->runtime <= 0) {
789 dl_se->deadline += pi_of(dl_se)->dl_period;
790 dl_se->runtime += pi_of(dl_se)->dl_runtime;
791 }
792
793
794
795
796
797
798
799
800
801
802 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
803 printk_deferred_once("sched: DL replenish lagged too much\n");
804 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
805 dl_se->runtime = pi_of(dl_se)->dl_runtime;
806 }
807
808 if (dl_se->dl_yielded)
809 dl_se->dl_yielded = 0;
810 if (dl_se->dl_throttled)
811 dl_se->dl_throttled = 0;
812}
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
839{
840 u64 left, right;
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
861 right = ((dl_se->deadline - t) >> DL_SCALE) *
862 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
863
864 return dl_time_before(right, left);
865}
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885static void
886update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
887{
888 u64 laxity = dl_se->deadline - rq_clock(rq);
889
890
891
892
893
894
895
896 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
897
898 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
899}
900
901
902
903
904
905
906
907
908
909
910
911
912static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
913{
914 return dl_se->dl_deadline == dl_se->dl_period;
915}
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947static void update_dl_entity(struct sched_dl_entity *dl_se)
948{
949 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
950 struct rq *rq = rq_of_dl_rq(dl_rq);
951
952 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
953 dl_entity_overflow(dl_se, rq_clock(rq))) {
954
955 if (unlikely(!dl_is_implicit(dl_se) &&
956 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
957 !is_dl_boosted(dl_se))) {
958 update_dl_revised_wakeup(dl_se, rq);
959 return;
960 }
961
962 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
963 dl_se->runtime = pi_of(dl_se)->dl_runtime;
964 }
965}
966
967static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
968{
969 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
970}
971
972
973
974
975
976
977
978
979
980
981
982static int start_dl_timer(struct task_struct *p)
983{
984 struct sched_dl_entity *dl_se = &p->dl;
985 struct hrtimer *timer = &dl_se->dl_timer;
986 struct rq *rq = task_rq(p);
987 ktime_t now, act;
988 s64 delta;
989
990 lockdep_assert_rq_held(rq);
991
992
993
994
995
996
997 act = ns_to_ktime(dl_next_period(dl_se));
998 now = hrtimer_cb_get_time(timer);
999 delta = ktime_to_ns(now) - rq_clock(rq);
1000 act = ktime_add_ns(act, delta);
1001
1002
1003
1004
1005
1006
1007 if (ktime_us_delta(act, now) < 0)
1008 return 0;
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019 if (!hrtimer_is_queued(timer)) {
1020 get_task_struct(p);
1021 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1022 }
1023
1024 return 1;
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1041{
1042 struct sched_dl_entity *dl_se = container_of(timer,
1043 struct sched_dl_entity,
1044 dl_timer);
1045 struct task_struct *p = dl_task_of(dl_se);
1046 struct rq_flags rf;
1047 struct rq *rq;
1048
1049 rq = task_rq_lock(p, &rf);
1050
1051
1052
1053
1054
1055 if (!dl_task(p))
1056 goto unlock;
1057
1058
1059
1060
1061
1062 if (is_dl_boosted(dl_se))
1063 goto unlock;
1064
1065
1066
1067
1068
1069 if (!dl_se->dl_throttled)
1070 goto unlock;
1071
1072 sched_clock_tick();
1073 update_rq_clock(rq);
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 if (!task_on_rq_queued(p)) {
1090 replenish_dl_entity(dl_se);
1091 goto unlock;
1092 }
1093
1094#ifdef CONFIG_SMP
1095 if (unlikely(!rq->online)) {
1096
1097
1098
1099
1100 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1101 rq = dl_task_offline_migration(rq, p);
1102 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1103 update_rq_clock(rq);
1104
1105
1106
1107
1108
1109
1110 }
1111#endif
1112
1113 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1114 if (dl_task(rq->curr))
1115 check_preempt_curr_dl(rq, p, 0);
1116 else
1117 resched_curr(rq);
1118
1119#ifdef CONFIG_SMP
1120
1121
1122
1123
1124 if (has_pushable_dl_tasks(rq)) {
1125
1126
1127
1128
1129 rq_unpin_lock(rq, &rf);
1130 push_dl_task(rq);
1131 rq_repin_lock(rq, &rf);
1132 }
1133#endif
1134
1135unlock:
1136 task_rq_unlock(rq, p, &rf);
1137
1138
1139
1140
1141
1142 put_task_struct(p);
1143
1144 return HRTIMER_NORESTART;
1145}
1146
1147void init_dl_task_timer(struct sched_dl_entity *dl_se)
1148{
1149 struct hrtimer *timer = &dl_se->dl_timer;
1150
1151 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1152 timer->function = dl_task_timer;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1174{
1175 struct task_struct *p = dl_task_of(dl_se);
1176 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1177
1178 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1179 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1180 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1181 return;
1182 dl_se->dl_throttled = 1;
1183 if (dl_se->runtime > 0)
1184 dl_se->runtime = 0;
1185 }
1186}
1187
1188static
1189int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1190{
1191 return (dl_se->runtime <= 0);
1192}
1193
1194extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1216{
1217 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw;
1218 u64 u_act;
1219 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1230 u_act = u_act_min;
1231 else
1232 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1233
1234 return (delta * u_act) >> BW_SHIFT;
1235}
1236
1237
1238
1239
1240
1241static void update_curr_dl(struct rq *rq)
1242{
1243 struct task_struct *curr = rq->curr;
1244 struct sched_dl_entity *dl_se = &curr->dl;
1245 u64 delta_exec, scaled_delta_exec;
1246 int cpu = cpu_of(rq);
1247 u64 now;
1248
1249 if (!dl_task(curr) || !on_dl_rq(dl_se))
1250 return;
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 now = rq_clock_task(rq);
1261 delta_exec = now - curr->se.exec_start;
1262 if (unlikely((s64)delta_exec <= 0)) {
1263 if (unlikely(dl_se->dl_yielded))
1264 goto throttle;
1265 return;
1266 }
1267
1268 schedstat_set(curr->stats.exec_max,
1269 max(curr->stats.exec_max, delta_exec));
1270
1271 trace_sched_stat_runtime(curr, delta_exec, 0);
1272
1273 curr->se.sum_exec_runtime += delta_exec;
1274 account_group_exec_runtime(curr, delta_exec);
1275
1276 curr->se.exec_start = now;
1277 cgroup_account_cputime(curr, delta_exec);
1278
1279 if (dl_entity_is_special(dl_se))
1280 return;
1281
1282
1283
1284
1285
1286
1287
1288
1289 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1290 scaled_delta_exec = grub_reclaim(delta_exec,
1291 rq,
1292 &curr->dl);
1293 } else {
1294 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1295 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1296
1297 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1298 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1299 }
1300
1301 dl_se->runtime -= scaled_delta_exec;
1302
1303throttle:
1304 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1305 dl_se->dl_throttled = 1;
1306
1307
1308 if (dl_runtime_exceeded(dl_se) &&
1309 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1310 dl_se->dl_overrun = 1;
1311
1312 __dequeue_task_dl(rq, curr, 0);
1313 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1314 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1315
1316 if (!is_leftmost(curr, &rq->dl))
1317 resched_curr(rq);
1318 }
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 if (rt_bandwidth_enabled()) {
1332 struct rt_rq *rt_rq = &rq->rt;
1333
1334 raw_spin_lock(&rt_rq->rt_runtime_lock);
1335
1336
1337
1338
1339
1340 if (sched_rt_bandwidth_account(rt_rq))
1341 rt_rq->rt_time += delta_exec;
1342 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1343 }
1344}
1345
1346static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1347{
1348 struct sched_dl_entity *dl_se = container_of(timer,
1349 struct sched_dl_entity,
1350 inactive_timer);
1351 struct task_struct *p = dl_task_of(dl_se);
1352 struct rq_flags rf;
1353 struct rq *rq;
1354
1355 rq = task_rq_lock(p, &rf);
1356
1357 sched_clock_tick();
1358 update_rq_clock(rq);
1359
1360 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1361 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1362
1363 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1364 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1365 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1366 dl_se->dl_non_contending = 0;
1367 }
1368
1369 raw_spin_lock(&dl_b->lock);
1370 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1371 raw_spin_unlock(&dl_b->lock);
1372 __dl_clear_params(p);
1373
1374 goto unlock;
1375 }
1376 if (dl_se->dl_non_contending == 0)
1377 goto unlock;
1378
1379 sub_running_bw(dl_se, &rq->dl);
1380 dl_se->dl_non_contending = 0;
1381unlock:
1382 task_rq_unlock(rq, p, &rf);
1383 put_task_struct(p);
1384
1385 return HRTIMER_NORESTART;
1386}
1387
1388void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1389{
1390 struct hrtimer *timer = &dl_se->inactive_timer;
1391
1392 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1393 timer->function = inactive_task_timer;
1394}
1395
1396#ifdef CONFIG_SMP
1397
1398static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1399{
1400 struct rq *rq = rq_of_dl_rq(dl_rq);
1401
1402 if (dl_rq->earliest_dl.curr == 0 ||
1403 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1404 if (dl_rq->earliest_dl.curr == 0)
1405 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1406 dl_rq->earliest_dl.curr = deadline;
1407 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1408 }
1409}
1410
1411static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1412{
1413 struct rq *rq = rq_of_dl_rq(dl_rq);
1414
1415
1416
1417
1418
1419 if (!dl_rq->dl_nr_running) {
1420 dl_rq->earliest_dl.curr = 0;
1421 dl_rq->earliest_dl.next = 0;
1422 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1423 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1424 } else {
1425 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1426 struct sched_dl_entity *entry;
1427
1428 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1429 dl_rq->earliest_dl.curr = entry->deadline;
1430 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1431 }
1432}
1433
1434#else
1435
1436static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1437static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1438
1439#endif
1440
1441static inline
1442void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1443{
1444 int prio = dl_task_of(dl_se)->prio;
1445 u64 deadline = dl_se->deadline;
1446
1447 WARN_ON(!dl_prio(prio));
1448 dl_rq->dl_nr_running++;
1449 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1450
1451 inc_dl_deadline(dl_rq, deadline);
1452 inc_dl_migration(dl_se, dl_rq);
1453}
1454
1455static inline
1456void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1457{
1458 int prio = dl_task_of(dl_se)->prio;
1459
1460 WARN_ON(!dl_prio(prio));
1461 WARN_ON(!dl_rq->dl_nr_running);
1462 dl_rq->dl_nr_running--;
1463 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1464
1465 dec_dl_deadline(dl_rq, dl_se->deadline);
1466 dec_dl_migration(dl_se, dl_rq);
1467}
1468
1469#define __node_2_dle(node) \
1470 rb_entry((node), struct sched_dl_entity, rb_node)
1471
1472static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1473{
1474 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1475}
1476
1477static inline struct sched_statistics *
1478__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1479{
1480 return &dl_task_of(dl_se)->stats;
1481}
1482
1483static inline void
1484update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1485{
1486 struct sched_statistics *stats;
1487
1488 if (!schedstat_enabled())
1489 return;
1490
1491 stats = __schedstats_from_dl_se(dl_se);
1492 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1493}
1494
1495static inline void
1496update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1497{
1498 struct sched_statistics *stats;
1499
1500 if (!schedstat_enabled())
1501 return;
1502
1503 stats = __schedstats_from_dl_se(dl_se);
1504 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1505}
1506
1507static inline void
1508update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1509{
1510 struct sched_statistics *stats;
1511
1512 if (!schedstat_enabled())
1513 return;
1514
1515 stats = __schedstats_from_dl_se(dl_se);
1516 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1517}
1518
1519static inline void
1520update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1521 int flags)
1522{
1523 if (!schedstat_enabled())
1524 return;
1525
1526 if (flags & ENQUEUE_WAKEUP)
1527 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1528}
1529
1530static inline void
1531update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1532 int flags)
1533{
1534 struct task_struct *p = dl_task_of(dl_se);
1535
1536 if (!schedstat_enabled())
1537 return;
1538
1539 if ((flags & DEQUEUE_SLEEP)) {
1540 unsigned int state;
1541
1542 state = READ_ONCE(p->__state);
1543 if (state & TASK_INTERRUPTIBLE)
1544 __schedstat_set(p->stats.sleep_start,
1545 rq_clock(rq_of_dl_rq(dl_rq)));
1546
1547 if (state & TASK_UNINTERRUPTIBLE)
1548 __schedstat_set(p->stats.block_start,
1549 rq_clock(rq_of_dl_rq(dl_rq)));
1550 }
1551}
1552
1553static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1554{
1555 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1556
1557 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1558
1559 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1560
1561 inc_dl_tasks(dl_se, dl_rq);
1562}
1563
1564static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1565{
1566 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1567
1568 if (RB_EMPTY_NODE(&dl_se->rb_node))
1569 return;
1570
1571 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1572
1573 RB_CLEAR_NODE(&dl_se->rb_node);
1574
1575 dec_dl_tasks(dl_se, dl_rq);
1576}
1577
1578static void
1579enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1580{
1581 BUG_ON(on_dl_rq(dl_se));
1582
1583 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
1584
1585
1586
1587
1588
1589
1590 if (flags & ENQUEUE_WAKEUP) {
1591 task_contending(dl_se, flags);
1592 update_dl_entity(dl_se);
1593 } else if (flags & ENQUEUE_REPLENISH) {
1594 replenish_dl_entity(dl_se);
1595 } else if ((flags & ENQUEUE_RESTORE) &&
1596 dl_time_before(dl_se->deadline,
1597 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1598 setup_new_dl_entity(dl_se);
1599 }
1600
1601 __enqueue_dl_entity(dl_se);
1602}
1603
1604static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1605{
1606 __dequeue_dl_entity(dl_se);
1607}
1608
1609static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1610{
1611 if (is_dl_boosted(&p->dl)) {
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624 if (p->dl.dl_throttled) {
1625
1626
1627
1628
1629
1630 hrtimer_try_to_cancel(&p->dl.dl_timer);
1631 p->dl.dl_throttled = 0;
1632 }
1633 } else if (!dl_prio(p->normal_prio)) {
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643 p->dl.dl_throttled = 0;
1644 BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1645 return;
1646 }
1647
1648
1649
1650
1651
1652
1653
1654 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1655 dl_check_constrained_dl(&p->dl);
1656
1657 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1658 add_rq_bw(&p->dl, &rq->dl);
1659 add_running_bw(&p->dl, &rq->dl);
1660 }
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1675 if (flags & ENQUEUE_WAKEUP)
1676 task_contending(&p->dl, flags);
1677
1678 return;
1679 }
1680
1681 check_schedstat_required();
1682 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
1683
1684 enqueue_dl_entity(&p->dl, flags);
1685
1686 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1687 enqueue_pushable_dl_task(rq, p);
1688}
1689
1690static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1691{
1692 update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
1693 dequeue_dl_entity(&p->dl);
1694 dequeue_pushable_dl_task(rq, p);
1695}
1696
1697static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1698{
1699 update_curr_dl(rq);
1700 __dequeue_task_dl(rq, p, flags);
1701
1702 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1703 sub_running_bw(&p->dl, &rq->dl);
1704 sub_rq_bw(&p->dl, &rq->dl);
1705 }
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 if (flags & DEQUEUE_SLEEP)
1717 task_non_contending(p);
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730static void yield_task_dl(struct rq *rq)
1731{
1732
1733
1734
1735
1736
1737
1738 rq->curr->dl.dl_yielded = 1;
1739
1740 update_rq_clock(rq);
1741 update_curr_dl(rq);
1742
1743
1744
1745
1746
1747 rq_clock_skip_update(rq);
1748}
1749
1750#ifdef CONFIG_SMP
1751
1752static int find_later_rq(struct task_struct *task);
1753
1754static int
1755select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1756{
1757 struct task_struct *curr;
1758 bool select_rq;
1759 struct rq *rq;
1760
1761 if (!(flags & WF_TTWU))
1762 goto out;
1763
1764 rq = cpu_rq(cpu);
1765
1766 rcu_read_lock();
1767 curr = READ_ONCE(rq->curr);
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778 select_rq = unlikely(dl_task(curr)) &&
1779 (curr->nr_cpus_allowed < 2 ||
1780 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1781 p->nr_cpus_allowed > 1;
1782
1783
1784
1785
1786
1787 if (static_branch_unlikely(&sched_asym_cpucapacity))
1788 select_rq |= !dl_task_fits_capacity(p, cpu);
1789
1790 if (select_rq) {
1791 int target = find_later_rq(p);
1792
1793 if (target != -1 &&
1794 (dl_time_before(p->dl.deadline,
1795 cpu_rq(target)->dl.earliest_dl.curr) ||
1796 (cpu_rq(target)->dl.dl_nr_running == 0)))
1797 cpu = target;
1798 }
1799 rcu_read_unlock();
1800
1801out:
1802 return cpu;
1803}
1804
1805static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1806{
1807 struct rq *rq;
1808
1809 if (READ_ONCE(p->__state) != TASK_WAKING)
1810 return;
1811
1812 rq = task_rq(p);
1813
1814
1815
1816
1817
1818 raw_spin_rq_lock(rq);
1819 if (p->dl.dl_non_contending) {
1820 update_rq_clock(rq);
1821 sub_running_bw(&p->dl, &rq->dl);
1822 p->dl.dl_non_contending = 0;
1823
1824
1825
1826
1827
1828
1829
1830 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1831 put_task_struct(p);
1832 }
1833 sub_rq_bw(&p->dl, &rq->dl);
1834 raw_spin_rq_unlock(rq);
1835}
1836
1837static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1838{
1839
1840
1841
1842
1843 if (rq->curr->nr_cpus_allowed == 1 ||
1844 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1845 return;
1846
1847
1848
1849
1850
1851 if (p->nr_cpus_allowed != 1 &&
1852 cpudl_find(&rq->rd->cpudl, p, NULL))
1853 return;
1854
1855 resched_curr(rq);
1856}
1857
1858static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1859{
1860 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1861
1862
1863
1864
1865
1866
1867 rq_unpin_lock(rq, rf);
1868 pull_dl_task(rq);
1869 rq_repin_lock(rq, rf);
1870 }
1871
1872 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1873}
1874#endif
1875
1876
1877
1878
1879
1880static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1881 int flags)
1882{
1883 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1884 resched_curr(rq);
1885 return;
1886 }
1887
1888#ifdef CONFIG_SMP
1889
1890
1891
1892
1893 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1894 !test_tsk_need_resched(rq->curr))
1895 check_preempt_equal_dl(rq, p);
1896#endif
1897}
1898
1899#ifdef CONFIG_SCHED_HRTICK
1900static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1901{
1902 hrtick_start(rq, p->dl.runtime);
1903}
1904#else
1905static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1906{
1907}
1908#endif
1909
1910static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1911{
1912 struct sched_dl_entity *dl_se = &p->dl;
1913 struct dl_rq *dl_rq = &rq->dl;
1914
1915 p->se.exec_start = rq_clock_task(rq);
1916 if (on_dl_rq(&p->dl))
1917 update_stats_wait_end_dl(dl_rq, dl_se);
1918
1919
1920 dequeue_pushable_dl_task(rq, p);
1921
1922 if (!first)
1923 return;
1924
1925 if (hrtick_enabled_dl(rq))
1926 start_hrtick_dl(rq, p);
1927
1928 if (rq->curr->sched_class != &dl_sched_class)
1929 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1930
1931 deadline_queue_push_tasks(rq);
1932}
1933
1934static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1935 struct dl_rq *dl_rq)
1936{
1937 struct rb_node *left = rb_first_cached(&dl_rq->root);
1938
1939 if (!left)
1940 return NULL;
1941
1942 return rb_entry(left, struct sched_dl_entity, rb_node);
1943}
1944
1945static struct task_struct *pick_task_dl(struct rq *rq)
1946{
1947 struct sched_dl_entity *dl_se;
1948 struct dl_rq *dl_rq = &rq->dl;
1949 struct task_struct *p;
1950
1951 if (!sched_dl_runnable(rq))
1952 return NULL;
1953
1954 dl_se = pick_next_dl_entity(rq, dl_rq);
1955 BUG_ON(!dl_se);
1956 p = dl_task_of(dl_se);
1957
1958 return p;
1959}
1960
1961static struct task_struct *pick_next_task_dl(struct rq *rq)
1962{
1963 struct task_struct *p;
1964
1965 p = pick_task_dl(rq);
1966 if (p)
1967 set_next_task_dl(rq, p, true);
1968
1969 return p;
1970}
1971
1972static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1973{
1974 struct sched_dl_entity *dl_se = &p->dl;
1975 struct dl_rq *dl_rq = &rq->dl;
1976
1977 if (on_dl_rq(&p->dl))
1978 update_stats_wait_start_dl(dl_rq, dl_se);
1979
1980 update_curr_dl(rq);
1981
1982 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1983 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1984 enqueue_pushable_dl_task(rq, p);
1985}
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1996{
1997 update_curr_dl(rq);
1998
1999 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2000
2001
2002
2003
2004
2005 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2006 is_leftmost(p, &rq->dl))
2007 start_hrtick_dl(rq, p);
2008}
2009
2010static void task_fork_dl(struct task_struct *p)
2011{
2012
2013
2014
2015
2016}
2017
2018#ifdef CONFIG_SMP
2019
2020
2021#define DL_MAX_TRIES 3
2022
2023static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2024{
2025 if (!task_running(rq, p) &&
2026 cpumask_test_cpu(cpu, &p->cpus_mask))
2027 return 1;
2028 return 0;
2029}
2030
2031
2032
2033
2034
2035static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2036{
2037 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
2038 struct task_struct *p = NULL;
2039
2040 if (!has_pushable_dl_tasks(rq))
2041 return NULL;
2042
2043next_node:
2044 if (next_node) {
2045 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
2046
2047 if (pick_dl_task(rq, p, cpu))
2048 return p;
2049
2050 next_node = rb_next(next_node);
2051 goto next_node;
2052 }
2053
2054 return NULL;
2055}
2056
2057static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2058
2059static int find_later_rq(struct task_struct *task)
2060{
2061 struct sched_domain *sd;
2062 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2063 int this_cpu = smp_processor_id();
2064 int cpu = task_cpu(task);
2065
2066
2067 if (unlikely(!later_mask))
2068 return -1;
2069
2070 if (task->nr_cpus_allowed == 1)
2071 return -1;
2072
2073
2074
2075
2076
2077 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2078 return -1;
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092 if (cpumask_test_cpu(cpu, later_mask))
2093 return cpu;
2094
2095
2096
2097
2098 if (!cpumask_test_cpu(this_cpu, later_mask))
2099 this_cpu = -1;
2100
2101 rcu_read_lock();
2102 for_each_domain(cpu, sd) {
2103 if (sd->flags & SD_WAKE_AFFINE) {
2104 int best_cpu;
2105
2106
2107
2108
2109
2110 if (this_cpu != -1 &&
2111 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2112 rcu_read_unlock();
2113 return this_cpu;
2114 }
2115
2116 best_cpu = cpumask_any_and_distribute(later_mask,
2117 sched_domain_span(sd));
2118
2119
2120
2121
2122
2123
2124 if (best_cpu < nr_cpu_ids) {
2125 rcu_read_unlock();
2126 return best_cpu;
2127 }
2128 }
2129 }
2130 rcu_read_unlock();
2131
2132
2133
2134
2135
2136 if (this_cpu != -1)
2137 return this_cpu;
2138
2139 cpu = cpumask_any_distribute(later_mask);
2140 if (cpu < nr_cpu_ids)
2141 return cpu;
2142
2143 return -1;
2144}
2145
2146
2147static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2148{
2149 struct rq *later_rq = NULL;
2150 int tries;
2151 int cpu;
2152
2153 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2154 cpu = find_later_rq(task);
2155
2156 if ((cpu == -1) || (cpu == rq->cpu))
2157 break;
2158
2159 later_rq = cpu_rq(cpu);
2160
2161 if (later_rq->dl.dl_nr_running &&
2162 !dl_time_before(task->dl.deadline,
2163 later_rq->dl.earliest_dl.curr)) {
2164
2165
2166
2167
2168
2169 later_rq = NULL;
2170 break;
2171 }
2172
2173
2174 if (double_lock_balance(rq, later_rq)) {
2175 if (unlikely(task_rq(task) != rq ||
2176 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2177 task_running(rq, task) ||
2178 !dl_task(task) ||
2179 !task_on_rq_queued(task))) {
2180 double_unlock_balance(rq, later_rq);
2181 later_rq = NULL;
2182 break;
2183 }
2184 }
2185
2186
2187
2188
2189
2190
2191 if (!later_rq->dl.dl_nr_running ||
2192 dl_time_before(task->dl.deadline,
2193 later_rq->dl.earliest_dl.curr))
2194 break;
2195
2196
2197 double_unlock_balance(rq, later_rq);
2198 later_rq = NULL;
2199 }
2200
2201 return later_rq;
2202}
2203
2204static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2205{
2206 struct task_struct *p;
2207
2208 if (!has_pushable_dl_tasks(rq))
2209 return NULL;
2210
2211 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2212 struct task_struct, pushable_dl_tasks);
2213
2214 BUG_ON(rq->cpu != task_cpu(p));
2215 BUG_ON(task_current(rq, p));
2216 BUG_ON(p->nr_cpus_allowed <= 1);
2217
2218 BUG_ON(!task_on_rq_queued(p));
2219 BUG_ON(!dl_task(p));
2220
2221 return p;
2222}
2223
2224
2225
2226
2227
2228
2229static int push_dl_task(struct rq *rq)
2230{
2231 struct task_struct *next_task;
2232 struct rq *later_rq;
2233 int ret = 0;
2234
2235 if (!rq->dl.overloaded)
2236 return 0;
2237
2238 next_task = pick_next_pushable_dl_task(rq);
2239 if (!next_task)
2240 return 0;
2241
2242retry:
2243 if (is_migration_disabled(next_task))
2244 return 0;
2245
2246 if (WARN_ON(next_task == rq->curr))
2247 return 0;
2248
2249
2250
2251
2252
2253
2254 if (dl_task(rq->curr) &&
2255 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2256 rq->curr->nr_cpus_allowed > 1) {
2257 resched_curr(rq);
2258 return 0;
2259 }
2260
2261
2262 get_task_struct(next_task);
2263
2264
2265 later_rq = find_lock_later_rq(next_task, rq);
2266 if (!later_rq) {
2267 struct task_struct *task;
2268
2269
2270
2271
2272
2273
2274 task = pick_next_pushable_dl_task(rq);
2275 if (task == next_task) {
2276
2277
2278
2279
2280 goto out;
2281 }
2282
2283 if (!task)
2284
2285 goto out;
2286
2287 put_task_struct(next_task);
2288 next_task = task;
2289 goto retry;
2290 }
2291
2292 deactivate_task(rq, next_task, 0);
2293 set_task_cpu(next_task, later_rq->cpu);
2294
2295
2296
2297
2298
2299 update_rq_clock(later_rq);
2300 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2301 ret = 1;
2302
2303 resched_curr(later_rq);
2304
2305 double_unlock_balance(rq, later_rq);
2306
2307out:
2308 put_task_struct(next_task);
2309
2310 return ret;
2311}
2312
2313static void push_dl_tasks(struct rq *rq)
2314{
2315
2316 while (push_dl_task(rq))
2317 ;
2318}
2319
2320static void pull_dl_task(struct rq *this_rq)
2321{
2322 int this_cpu = this_rq->cpu, cpu;
2323 struct task_struct *p, *push_task;
2324 bool resched = false;
2325 struct rq *src_rq;
2326 u64 dmin = LONG_MAX;
2327
2328 if (likely(!dl_overloaded(this_rq)))
2329 return;
2330
2331
2332
2333
2334
2335 smp_rmb();
2336
2337 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2338 if (this_cpu == cpu)
2339 continue;
2340
2341 src_rq = cpu_rq(cpu);
2342
2343
2344
2345
2346
2347 if (this_rq->dl.dl_nr_running &&
2348 dl_time_before(this_rq->dl.earliest_dl.curr,
2349 src_rq->dl.earliest_dl.next))
2350 continue;
2351
2352
2353 push_task = NULL;
2354 double_lock_balance(this_rq, src_rq);
2355
2356
2357
2358
2359
2360 if (src_rq->dl.dl_nr_running <= 1)
2361 goto skip;
2362
2363 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2364
2365
2366
2367
2368
2369
2370 if (p && dl_time_before(p->dl.deadline, dmin) &&
2371 (!this_rq->dl.dl_nr_running ||
2372 dl_time_before(p->dl.deadline,
2373 this_rq->dl.earliest_dl.curr))) {
2374 WARN_ON(p == src_rq->curr);
2375 WARN_ON(!task_on_rq_queued(p));
2376
2377
2378
2379
2380
2381 if (dl_time_before(p->dl.deadline,
2382 src_rq->curr->dl.deadline))
2383 goto skip;
2384
2385 if (is_migration_disabled(p)) {
2386 push_task = get_push_task(src_rq);
2387 } else {
2388 deactivate_task(src_rq, p, 0);
2389 set_task_cpu(p, this_cpu);
2390 activate_task(this_rq, p, 0);
2391 dmin = p->dl.deadline;
2392 resched = true;
2393 }
2394
2395
2396 }
2397skip:
2398 double_unlock_balance(this_rq, src_rq);
2399
2400 if (push_task) {
2401 raw_spin_rq_unlock(this_rq);
2402 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2403 push_task, &src_rq->push_work);
2404 raw_spin_rq_lock(this_rq);
2405 }
2406 }
2407
2408 if (resched)
2409 resched_curr(this_rq);
2410}
2411
2412
2413
2414
2415
2416static void task_woken_dl(struct rq *rq, struct task_struct *p)
2417{
2418 if (!task_running(rq, p) &&
2419 !test_tsk_need_resched(rq->curr) &&
2420 p->nr_cpus_allowed > 1 &&
2421 dl_task(rq->curr) &&
2422 (rq->curr->nr_cpus_allowed < 2 ||
2423 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2424 push_dl_tasks(rq);
2425 }
2426}
2427
2428static void set_cpus_allowed_dl(struct task_struct *p,
2429 const struct cpumask *new_mask,
2430 u32 flags)
2431{
2432 struct root_domain *src_rd;
2433 struct rq *rq;
2434
2435 BUG_ON(!dl_task(p));
2436
2437 rq = task_rq(p);
2438 src_rd = rq->rd;
2439
2440
2441
2442
2443
2444
2445 if (!cpumask_intersects(src_rd->span, new_mask)) {
2446 struct dl_bw *src_dl_b;
2447
2448 src_dl_b = dl_bw_of(cpu_of(rq));
2449
2450
2451
2452
2453
2454 raw_spin_lock(&src_dl_b->lock);
2455 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2456 raw_spin_unlock(&src_dl_b->lock);
2457 }
2458
2459 set_cpus_allowed_common(p, new_mask, flags);
2460}
2461
2462
2463static void rq_online_dl(struct rq *rq)
2464{
2465 if (rq->dl.overloaded)
2466 dl_set_overload(rq);
2467
2468 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2469 if (rq->dl.dl_nr_running > 0)
2470 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2471}
2472
2473
2474static void rq_offline_dl(struct rq *rq)
2475{
2476 if (rq->dl.overloaded)
2477 dl_clear_overload(rq);
2478
2479 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2480 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2481}
2482
2483void __init init_sched_dl_class(void)
2484{
2485 unsigned int i;
2486
2487 for_each_possible_cpu(i)
2488 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2489 GFP_KERNEL, cpu_to_node(i));
2490}
2491
2492void dl_add_task_root_domain(struct task_struct *p)
2493{
2494 struct rq_flags rf;
2495 struct rq *rq;
2496 struct dl_bw *dl_b;
2497
2498 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2499 if (!dl_task(p)) {
2500 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2501 return;
2502 }
2503
2504 rq = __task_rq_lock(p, &rf);
2505
2506 dl_b = &rq->rd->dl_bw;
2507 raw_spin_lock(&dl_b->lock);
2508
2509 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2510
2511 raw_spin_unlock(&dl_b->lock);
2512
2513 task_rq_unlock(rq, p, &rf);
2514}
2515
2516void dl_clear_root_domain(struct root_domain *rd)
2517{
2518 unsigned long flags;
2519
2520 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2521 rd->dl_bw.total_bw = 0;
2522 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2523}
2524
2525#endif
2526
2527static void switched_from_dl(struct rq *rq, struct task_struct *p)
2528{
2529
2530
2531
2532
2533
2534
2535
2536
2537 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2538 task_non_contending(p);
2539
2540 if (!task_on_rq_queued(p)) {
2541
2542
2543
2544
2545
2546
2547 if (p->dl.dl_non_contending)
2548 sub_running_bw(&p->dl, &rq->dl);
2549 sub_rq_bw(&p->dl, &rq->dl);
2550 }
2551
2552
2553
2554
2555
2556
2557 if (p->dl.dl_non_contending)
2558 p->dl.dl_non_contending = 0;
2559
2560
2561
2562
2563
2564
2565 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2566 return;
2567
2568 deadline_queue_pull_task(rq);
2569}
2570
2571
2572
2573
2574
2575static void switched_to_dl(struct rq *rq, struct task_struct *p)
2576{
2577 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2578 put_task_struct(p);
2579
2580
2581 if (!task_on_rq_queued(p)) {
2582 add_rq_bw(&p->dl, &rq->dl);
2583
2584 return;
2585 }
2586
2587 if (rq->curr != p) {
2588#ifdef CONFIG_SMP
2589 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2590 deadline_queue_push_tasks(rq);
2591#endif
2592 if (dl_task(rq->curr))
2593 check_preempt_curr_dl(rq, p, 0);
2594 else
2595 resched_curr(rq);
2596 } else {
2597 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2598 }
2599}
2600
2601
2602
2603
2604
2605static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2606 int oldprio)
2607{
2608 if (task_on_rq_queued(p) || task_current(rq, p)) {
2609#ifdef CONFIG_SMP
2610
2611
2612
2613
2614
2615
2616 if (!rq->dl.overloaded)
2617 deadline_queue_pull_task(rq);
2618
2619
2620
2621
2622
2623
2624 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2625 resched_curr(rq);
2626#else
2627
2628
2629
2630
2631
2632 resched_curr(rq);
2633#endif
2634 }
2635}
2636
2637DEFINE_SCHED_CLASS(dl) = {
2638
2639 .enqueue_task = enqueue_task_dl,
2640 .dequeue_task = dequeue_task_dl,
2641 .yield_task = yield_task_dl,
2642
2643 .check_preempt_curr = check_preempt_curr_dl,
2644
2645 .pick_next_task = pick_next_task_dl,
2646 .put_prev_task = put_prev_task_dl,
2647 .set_next_task = set_next_task_dl,
2648
2649#ifdef CONFIG_SMP
2650 .balance = balance_dl,
2651 .pick_task = pick_task_dl,
2652 .select_task_rq = select_task_rq_dl,
2653 .migrate_task_rq = migrate_task_rq_dl,
2654 .set_cpus_allowed = set_cpus_allowed_dl,
2655 .rq_online = rq_online_dl,
2656 .rq_offline = rq_offline_dl,
2657 .task_woken = task_woken_dl,
2658 .find_lock_rq = find_lock_later_rq,
2659#endif
2660
2661 .task_tick = task_tick_dl,
2662 .task_fork = task_fork_dl,
2663
2664 .prio_changed = prio_changed_dl,
2665 .switched_from = switched_from_dl,
2666 .switched_to = switched_to_dl,
2667
2668 .update_curr = update_curr_dl,
2669};
2670
2671
2672static u64 dl_generation;
2673
2674int sched_dl_global_validate(void)
2675{
2676 u64 runtime = global_rt_runtime();
2677 u64 period = global_rt_period();
2678 u64 new_bw = to_ratio(period, runtime);
2679 u64 gen = ++dl_generation;
2680 struct dl_bw *dl_b;
2681 int cpu, cpus, ret = 0;
2682 unsigned long flags;
2683
2684
2685
2686
2687
2688
2689 for_each_possible_cpu(cpu) {
2690 rcu_read_lock_sched();
2691
2692 if (dl_bw_visited(cpu, gen))
2693 goto next;
2694
2695 dl_b = dl_bw_of(cpu);
2696 cpus = dl_bw_cpus(cpu);
2697
2698 raw_spin_lock_irqsave(&dl_b->lock, flags);
2699 if (new_bw * cpus < dl_b->total_bw)
2700 ret = -EBUSY;
2701 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2702
2703next:
2704 rcu_read_unlock_sched();
2705
2706 if (ret)
2707 break;
2708 }
2709
2710 return ret;
2711}
2712
2713static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2714{
2715 if (global_rt_runtime() == RUNTIME_INF) {
2716 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2717 dl_rq->extra_bw = 1 << BW_SHIFT;
2718 } else {
2719 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2720 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2721 dl_rq->extra_bw = to_ratio(global_rt_period(),
2722 global_rt_runtime());
2723 }
2724}
2725
2726void sched_dl_do_global(void)
2727{
2728 u64 new_bw = -1;
2729 u64 gen = ++dl_generation;
2730 struct dl_bw *dl_b;
2731 int cpu;
2732 unsigned long flags;
2733
2734 def_dl_bandwidth.dl_period = global_rt_period();
2735 def_dl_bandwidth.dl_runtime = global_rt_runtime();
2736
2737 if (global_rt_runtime() != RUNTIME_INF)
2738 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2739
2740 for_each_possible_cpu(cpu) {
2741 rcu_read_lock_sched();
2742
2743 if (dl_bw_visited(cpu, gen)) {
2744 rcu_read_unlock_sched();
2745 continue;
2746 }
2747
2748 dl_b = dl_bw_of(cpu);
2749
2750 raw_spin_lock_irqsave(&dl_b->lock, flags);
2751 dl_b->bw = new_bw;
2752 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2753
2754 rcu_read_unlock_sched();
2755 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2756 }
2757}
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767int sched_dl_overflow(struct task_struct *p, int policy,
2768 const struct sched_attr *attr)
2769{
2770 u64 period = attr->sched_period ?: attr->sched_deadline;
2771 u64 runtime = attr->sched_runtime;
2772 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2773 int cpus, err = -1, cpu = task_cpu(p);
2774 struct dl_bw *dl_b = dl_bw_of(cpu);
2775 unsigned long cap;
2776
2777 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2778 return 0;
2779
2780
2781 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2782 return 0;
2783
2784
2785
2786
2787
2788
2789 raw_spin_lock(&dl_b->lock);
2790 cpus = dl_bw_cpus(cpu);
2791 cap = dl_bw_capacity(cpu);
2792
2793 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2794 !__dl_overflow(dl_b, cap, 0, new_bw)) {
2795 if (hrtimer_active(&p->dl.inactive_timer))
2796 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2797 __dl_add(dl_b, new_bw, cpus);
2798 err = 0;
2799 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2800 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2801
2802
2803
2804
2805
2806
2807
2808 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2809 __dl_add(dl_b, new_bw, cpus);
2810 dl_change_utilization(p, new_bw);
2811 err = 0;
2812 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2813
2814
2815
2816
2817
2818 err = 0;
2819 }
2820 raw_spin_unlock(&dl_b->lock);
2821
2822 return err;
2823}
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2834{
2835 struct sched_dl_entity *dl_se = &p->dl;
2836
2837 dl_se->dl_runtime = attr->sched_runtime;
2838 dl_se->dl_deadline = attr->sched_deadline;
2839 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2840 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
2841 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2842 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2843}
2844
2845void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2846{
2847 struct sched_dl_entity *dl_se = &p->dl;
2848
2849 attr->sched_priority = p->rt_priority;
2850 attr->sched_runtime = dl_se->dl_runtime;
2851 attr->sched_deadline = dl_se->dl_deadline;
2852 attr->sched_period = dl_se->dl_period;
2853 attr->sched_flags &= ~SCHED_DL_FLAGS;
2854 attr->sched_flags |= dl_se->flags;
2855}
2856
2857
2858
2859
2860
2861
2862unsigned int sysctl_sched_dl_period_max = 1 << 22;
2863unsigned int sysctl_sched_dl_period_min = 100;
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875bool __checkparam_dl(const struct sched_attr *attr)
2876{
2877 u64 period, max, min;
2878
2879
2880 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2881 return true;
2882
2883
2884 if (attr->sched_deadline == 0)
2885 return false;
2886
2887
2888
2889
2890
2891 if (attr->sched_runtime < (1ULL << DL_SCALE))
2892 return false;
2893
2894
2895
2896
2897
2898 if (attr->sched_deadline & (1ULL << 63) ||
2899 attr->sched_period & (1ULL << 63))
2900 return false;
2901
2902 period = attr->sched_period;
2903 if (!period)
2904 period = attr->sched_deadline;
2905
2906
2907 if (period < attr->sched_deadline ||
2908 attr->sched_deadline < attr->sched_runtime)
2909 return false;
2910
2911 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
2912 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
2913
2914 if (period < min || period > max)
2915 return false;
2916
2917 return true;
2918}
2919
2920
2921
2922
2923void __dl_clear_params(struct task_struct *p)
2924{
2925 struct sched_dl_entity *dl_se = &p->dl;
2926
2927 dl_se->dl_runtime = 0;
2928 dl_se->dl_deadline = 0;
2929 dl_se->dl_period = 0;
2930 dl_se->flags = 0;
2931 dl_se->dl_bw = 0;
2932 dl_se->dl_density = 0;
2933
2934 dl_se->dl_throttled = 0;
2935 dl_se->dl_yielded = 0;
2936 dl_se->dl_non_contending = 0;
2937 dl_se->dl_overrun = 0;
2938
2939#ifdef CONFIG_RT_MUTEXES
2940 dl_se->pi_se = dl_se;
2941#endif
2942}
2943
2944bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2945{
2946 struct sched_dl_entity *dl_se = &p->dl;
2947
2948 if (dl_se->dl_runtime != attr->sched_runtime ||
2949 dl_se->dl_deadline != attr->sched_deadline ||
2950 dl_se->dl_period != attr->sched_period ||
2951 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
2952 return true;
2953
2954 return false;
2955}
2956
2957#ifdef CONFIG_SMP
2958int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2959{
2960 unsigned long flags, cap;
2961 unsigned int dest_cpu;
2962 struct dl_bw *dl_b;
2963 bool overflow;
2964 int ret;
2965
2966 dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2967
2968 rcu_read_lock_sched();
2969 dl_b = dl_bw_of(dest_cpu);
2970 raw_spin_lock_irqsave(&dl_b->lock, flags);
2971 cap = dl_bw_capacity(dest_cpu);
2972 overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
2973 if (overflow) {
2974 ret = -EBUSY;
2975 } else {
2976
2977
2978
2979
2980
2981
2982 int cpus = dl_bw_cpus(dest_cpu);
2983
2984 __dl_add(dl_b, p->dl.dl_bw, cpus);
2985 ret = 0;
2986 }
2987 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2988 rcu_read_unlock_sched();
2989
2990 return ret;
2991}
2992
2993int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2994 const struct cpumask *trial)
2995{
2996 int ret = 1, trial_cpus;
2997 struct dl_bw *cur_dl_b;
2998 unsigned long flags;
2999
3000 rcu_read_lock_sched();
3001 cur_dl_b = dl_bw_of(cpumask_any(cur));
3002 trial_cpus = cpumask_weight(trial);
3003
3004 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3005 if (cur_dl_b->bw != -1 &&
3006 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
3007 ret = 0;
3008 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3009 rcu_read_unlock_sched();
3010
3011 return ret;
3012}
3013
3014bool dl_cpu_busy(unsigned int cpu)
3015{
3016 unsigned long flags, cap;
3017 struct dl_bw *dl_b;
3018 bool overflow;
3019
3020 rcu_read_lock_sched();
3021 dl_b = dl_bw_of(cpu);
3022 raw_spin_lock_irqsave(&dl_b->lock, flags);
3023 cap = dl_bw_capacity(cpu);
3024 overflow = __dl_overflow(dl_b, cap, 0, 0);
3025 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3026 rcu_read_unlock_sched();
3027
3028 return overflow;
3029}
3030#endif
3031
3032#ifdef CONFIG_SCHED_DEBUG
3033void print_dl_stats(struct seq_file *m, int cpu)
3034{
3035 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3036}
3037#endif
3038