1
2
3
4
5
6#include "sched.h"
7
8#include <linux/slab.h>
9
10int sched_rr_timeslice = RR_TIMESLICE;
11
12static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14struct rt_bandwidth def_rt_bandwidth;
15
16static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17{
18 struct rt_bandwidth *rt_b =
19 container_of(timer, struct rt_bandwidth, rt_period_timer);
20 ktime_t now;
21 int overrun;
22 int idle = 0;
23
24 for (;;) {
25 now = hrtimer_cb_get_time(timer);
26 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28 if (!overrun)
29 break;
30
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 }
33
34 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35}
36
37void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38{
39 rt_b->rt_period = ns_to_ktime(period);
40 rt_b->rt_runtime = runtime;
41
42 raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44 hrtimer_init(&rt_b->rt_period_timer,
45 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46 rt_b->rt_period_timer.function = sched_rt_period_timer;
47}
48
49static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50{
51 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52 return;
53
54 if (hrtimer_active(&rt_b->rt_period_timer))
55 return;
56
57 raw_spin_lock(&rt_b->rt_runtime_lock);
58 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59 raw_spin_unlock(&rt_b->rt_runtime_lock);
60}
61
62void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63{
64 struct rt_prio_array *array;
65 int i;
66
67 array = &rt_rq->active;
68 for (i = 0; i < MAX_RT_PRIO; i++) {
69 INIT_LIST_HEAD(array->queue + i);
70 __clear_bit(i, array->bitmap);
71 }
72
73 __set_bit(MAX_RT_PRIO, array->bitmap);
74
75#if defined CONFIG_SMP
76 rt_rq->highest_prio.curr = MAX_RT_PRIO;
77 rt_rq->highest_prio.next = MAX_RT_PRIO;
78 rt_rq->rt_nr_migratory = 0;
79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks);
81#endif
82
83 rt_rq->rt_queued = 0;
84
85 rt_rq->rt_time = 0;
86 rt_rq->rt_throttled = 0;
87 rt_rq->rt_runtime = 0;
88 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
89}
90
91#ifdef CONFIG_RT_GROUP_SCHED
92static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
93{
94 hrtimer_cancel(&rt_b->rt_period_timer);
95}
96
97#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
98
99static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
100{
101#ifdef CONFIG_SCHED_DEBUG
102 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
103#endif
104 return container_of(rt_se, struct task_struct, rt);
105}
106
107static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
108{
109 return rt_rq->rq;
110}
111
112static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
113{
114 return rt_se->rt_rq;
115}
116
117static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
118{
119 struct rt_rq *rt_rq = rt_se->rt_rq;
120
121 return rt_rq->rq;
122}
123
124void free_rt_sched_group(struct task_group *tg)
125{
126 int i;
127
128 if (tg->rt_se)
129 destroy_rt_bandwidth(&tg->rt_bandwidth);
130
131 for_each_possible_cpu(i) {
132 if (tg->rt_rq)
133 kfree(tg->rt_rq[i]);
134 if (tg->rt_se)
135 kfree(tg->rt_se[i]);
136 }
137
138 kfree(tg->rt_rq);
139 kfree(tg->rt_se);
140}
141
142void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
143 struct sched_rt_entity *rt_se, int cpu,
144 struct sched_rt_entity *parent)
145{
146 struct rq *rq = cpu_rq(cpu);
147
148 rt_rq->highest_prio.curr = MAX_RT_PRIO;
149 rt_rq->rt_nr_boosted = 0;
150 rt_rq->rq = rq;
151 rt_rq->tg = tg;
152
153 tg->rt_rq[cpu] = rt_rq;
154 tg->rt_se[cpu] = rt_se;
155
156 if (!rt_se)
157 return;
158
159 if (!parent)
160 rt_se->rt_rq = &rq->rt;
161 else
162 rt_se->rt_rq = parent->my_q;
163
164 rt_se->my_q = rt_rq;
165 rt_se->parent = parent;
166 INIT_LIST_HEAD(&rt_se->run_list);
167}
168
169int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
170{
171 struct rt_rq *rt_rq;
172 struct sched_rt_entity *rt_se;
173 int i;
174
175 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
176 if (!tg->rt_rq)
177 goto err;
178 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
179 if (!tg->rt_se)
180 goto err;
181
182 init_rt_bandwidth(&tg->rt_bandwidth,
183 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
184
185 for_each_possible_cpu(i) {
186 rt_rq = kzalloc_node(sizeof(struct rt_rq),
187 GFP_KERNEL, cpu_to_node(i));
188 if (!rt_rq)
189 goto err;
190
191 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
192 GFP_KERNEL, cpu_to_node(i));
193 if (!rt_se)
194 goto err_free_rq;
195
196 init_rt_rq(rt_rq, cpu_rq(i));
197 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
198 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
199 }
200
201 return 1;
202
203err_free_rq:
204 kfree(rt_rq);
205err:
206 return 0;
207}
208
209#else
210
211#define rt_entity_is_task(rt_se) (1)
212
213static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
214{
215 return container_of(rt_se, struct task_struct, rt);
216}
217
218static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
219{
220 return container_of(rt_rq, struct rq, rt);
221}
222
223static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
224{
225 struct task_struct *p = rt_task_of(rt_se);
226
227 return task_rq(p);
228}
229
230static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
231{
232 struct rq *rq = rq_of_rt_se(rt_se);
233
234 return &rq->rt;
235}
236
237void free_rt_sched_group(struct task_group *tg) { }
238
239int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
240{
241 return 1;
242}
243#endif
244
245#ifdef CONFIG_SMP
246
247static int pull_rt_task(struct rq *this_rq);
248
249static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
250{
251
252 return rq->rt.highest_prio.curr > prev->prio;
253}
254
255static inline int rt_overloaded(struct rq *rq)
256{
257 return atomic_read(&rq->rd->rto_count);
258}
259
260static inline void rt_set_overload(struct rq *rq)
261{
262 if (!rq->online)
263 return;
264
265 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
266
267
268
269
270
271
272
273
274
275 smp_wmb();
276 atomic_inc(&rq->rd->rto_count);
277}
278
279static inline void rt_clear_overload(struct rq *rq)
280{
281 if (!rq->online)
282 return;
283
284
285 atomic_dec(&rq->rd->rto_count);
286 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
287}
288
289static void update_rt_migration(struct rt_rq *rt_rq)
290{
291 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
292 if (!rt_rq->overloaded) {
293 rt_set_overload(rq_of_rt_rq(rt_rq));
294 rt_rq->overloaded = 1;
295 }
296 } else if (rt_rq->overloaded) {
297 rt_clear_overload(rq_of_rt_rq(rt_rq));
298 rt_rq->overloaded = 0;
299 }
300}
301
302static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
303{
304 struct task_struct *p;
305
306 if (!rt_entity_is_task(rt_se))
307 return;
308
309 p = rt_task_of(rt_se);
310 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
311
312 rt_rq->rt_nr_total++;
313 if (p->nr_cpus_allowed > 1)
314 rt_rq->rt_nr_migratory++;
315
316 update_rt_migration(rt_rq);
317}
318
319static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
320{
321 struct task_struct *p;
322
323 if (!rt_entity_is_task(rt_se))
324 return;
325
326 p = rt_task_of(rt_se);
327 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
328
329 rt_rq->rt_nr_total--;
330 if (p->nr_cpus_allowed > 1)
331 rt_rq->rt_nr_migratory--;
332
333 update_rt_migration(rt_rq);
334}
335
336static inline int has_pushable_tasks(struct rq *rq)
337{
338 return !plist_head_empty(&rq->rt.pushable_tasks);
339}
340
341static inline void set_post_schedule(struct rq *rq)
342{
343
344
345
346
347 rq->post_schedule = has_pushable_tasks(rq);
348}
349
350static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
351{
352 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
353 plist_node_init(&p->pushable_tasks, p->prio);
354 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
355
356
357 if (p->prio < rq->rt.highest_prio.next)
358 rq->rt.highest_prio.next = p->prio;
359}
360
361static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
362{
363 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
364
365
366 if (has_pushable_tasks(rq)) {
367 p = plist_first_entry(&rq->rt.pushable_tasks,
368 struct task_struct, pushable_tasks);
369 rq->rt.highest_prio.next = p->prio;
370 } else
371 rq->rt.highest_prio.next = MAX_RT_PRIO;
372}
373
374#else
375
376static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
377{
378}
379
380static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
381{
382}
383
384static inline
385void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
386{
387}
388
389static inline
390void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
391{
392}
393
394static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
395{
396 return false;
397}
398
399static inline int pull_rt_task(struct rq *this_rq)
400{
401 return 0;
402}
403
404static inline void set_post_schedule(struct rq *rq)
405{
406}
407#endif
408
409static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
410static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
411
412static inline int on_rt_rq(struct sched_rt_entity *rt_se)
413{
414 return !list_empty(&rt_se->run_list);
415}
416
417#ifdef CONFIG_RT_GROUP_SCHED
418
419static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
420{
421 if (!rt_rq->tg)
422 return RUNTIME_INF;
423
424 return rt_rq->rt_runtime;
425}
426
427static inline u64 sched_rt_period(struct rt_rq *rt_rq)
428{
429 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
430}
431
432typedef struct task_group *rt_rq_iter_t;
433
434static inline struct task_group *next_task_group(struct task_group *tg)
435{
436 do {
437 tg = list_entry_rcu(tg->list.next,
438 typeof(struct task_group), list);
439 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
440
441 if (&tg->list == &task_groups)
442 tg = NULL;
443
444 return tg;
445}
446
447#define for_each_rt_rq(rt_rq, iter, rq) \
448 for (iter = container_of(&task_groups, typeof(*iter), list); \
449 (iter = next_task_group(iter)) && \
450 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
451
452#define for_each_sched_rt_entity(rt_se) \
453 for (; rt_se; rt_se = rt_se->parent)
454
455static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
456{
457 return rt_se->my_q;
458}
459
460static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
461static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
462
463static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
464{
465 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
466 struct rq *rq = rq_of_rt_rq(rt_rq);
467 struct sched_rt_entity *rt_se;
468
469 int cpu = cpu_of(rq);
470
471 rt_se = rt_rq->tg->rt_se[cpu];
472
473 if (rt_rq->rt_nr_running) {
474 if (!rt_se)
475 enqueue_top_rt_rq(rt_rq);
476 else if (!on_rt_rq(rt_se))
477 enqueue_rt_entity(rt_se, false);
478
479 if (rt_rq->highest_prio.curr < curr->prio)
480 resched_curr(rq);
481 }
482}
483
484static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
485{
486 struct sched_rt_entity *rt_se;
487 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
488
489 rt_se = rt_rq->tg->rt_se[cpu];
490
491 if (!rt_se)
492 dequeue_top_rt_rq(rt_rq);
493 else if (on_rt_rq(rt_se))
494 dequeue_rt_entity(rt_se);
495}
496
497static inline int rt_rq_throttled(struct rt_rq *rt_rq)
498{
499 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
500}
501
502static int rt_se_boosted(struct sched_rt_entity *rt_se)
503{
504 struct rt_rq *rt_rq = group_rt_rq(rt_se);
505 struct task_struct *p;
506
507 if (rt_rq)
508 return !!rt_rq->rt_nr_boosted;
509
510 p = rt_task_of(rt_se);
511 return p->prio != p->normal_prio;
512}
513
514#ifdef CONFIG_SMP
515static inline const struct cpumask *sched_rt_period_mask(void)
516{
517 return this_rq()->rd->span;
518}
519#else
520static inline const struct cpumask *sched_rt_period_mask(void)
521{
522 return cpu_online_mask;
523}
524#endif
525
526static inline
527struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
528{
529 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
530}
531
532static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
533{
534 return &rt_rq->tg->rt_bandwidth;
535}
536
537#else
538
539static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
540{
541 return rt_rq->rt_runtime;
542}
543
544static inline u64 sched_rt_period(struct rt_rq *rt_rq)
545{
546 return ktime_to_ns(def_rt_bandwidth.rt_period);
547}
548
549typedef struct rt_rq *rt_rq_iter_t;
550
551#define for_each_rt_rq(rt_rq, iter, rq) \
552 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
553
554#define for_each_sched_rt_entity(rt_se) \
555 for (; rt_se; rt_se = NULL)
556
557static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
558{
559 return NULL;
560}
561
562static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
563{
564 struct rq *rq = rq_of_rt_rq(rt_rq);
565
566 if (!rt_rq->rt_nr_running)
567 return;
568
569 enqueue_top_rt_rq(rt_rq);
570 resched_curr(rq);
571}
572
573static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
574{
575 dequeue_top_rt_rq(rt_rq);
576}
577
578static inline int rt_rq_throttled(struct rt_rq *rt_rq)
579{
580 return rt_rq->rt_throttled;
581}
582
583static inline const struct cpumask *sched_rt_period_mask(void)
584{
585 return cpu_online_mask;
586}
587
588static inline
589struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
590{
591 return &cpu_rq(cpu)->rt;
592}
593
594static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
595{
596 return &def_rt_bandwidth;
597}
598
599#endif
600
601bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
602{
603 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
604
605 return (hrtimer_active(&rt_b->rt_period_timer) ||
606 rt_rq->rt_time < rt_b->rt_runtime);
607}
608
609#ifdef CONFIG_SMP
610
611
612
613static int do_balance_runtime(struct rt_rq *rt_rq)
614{
615 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
616 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
617 int i, weight, more = 0;
618 u64 rt_period;
619
620 weight = cpumask_weight(rd->span);
621
622 raw_spin_lock(&rt_b->rt_runtime_lock);
623 rt_period = ktime_to_ns(rt_b->rt_period);
624 for_each_cpu(i, rd->span) {
625 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
626 s64 diff;
627
628 if (iter == rt_rq)
629 continue;
630
631 raw_spin_lock(&iter->rt_runtime_lock);
632
633
634
635
636
637 if (iter->rt_runtime == RUNTIME_INF)
638 goto next;
639
640
641
642
643
644 diff = iter->rt_runtime - iter->rt_time;
645 if (diff > 0) {
646 diff = div_u64((u64)diff, weight);
647 if (rt_rq->rt_runtime + diff > rt_period)
648 diff = rt_period - rt_rq->rt_runtime;
649 iter->rt_runtime -= diff;
650 rt_rq->rt_runtime += diff;
651 more = 1;
652 if (rt_rq->rt_runtime == rt_period) {
653 raw_spin_unlock(&iter->rt_runtime_lock);
654 break;
655 }
656 }
657next:
658 raw_spin_unlock(&iter->rt_runtime_lock);
659 }
660 raw_spin_unlock(&rt_b->rt_runtime_lock);
661
662 return more;
663}
664
665
666
667
668static void __disable_runtime(struct rq *rq)
669{
670 struct root_domain *rd = rq->rd;
671 rt_rq_iter_t iter;
672 struct rt_rq *rt_rq;
673
674 if (unlikely(!scheduler_running))
675 return;
676
677 for_each_rt_rq(rt_rq, iter, rq) {
678 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
679 s64 want;
680 int i;
681
682 raw_spin_lock(&rt_b->rt_runtime_lock);
683 raw_spin_lock(&rt_rq->rt_runtime_lock);
684
685
686
687
688
689 if (rt_rq->rt_runtime == RUNTIME_INF ||
690 rt_rq->rt_runtime == rt_b->rt_runtime)
691 goto balanced;
692 raw_spin_unlock(&rt_rq->rt_runtime_lock);
693
694
695
696
697
698
699 want = rt_b->rt_runtime - rt_rq->rt_runtime;
700
701
702
703
704 for_each_cpu(i, rd->span) {
705 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
706 s64 diff;
707
708
709
710
711 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
712 continue;
713
714 raw_spin_lock(&iter->rt_runtime_lock);
715 if (want > 0) {
716 diff = min_t(s64, iter->rt_runtime, want);
717 iter->rt_runtime -= diff;
718 want -= diff;
719 } else {
720 iter->rt_runtime -= want;
721 want -= want;
722 }
723 raw_spin_unlock(&iter->rt_runtime_lock);
724
725 if (!want)
726 break;
727 }
728
729 raw_spin_lock(&rt_rq->rt_runtime_lock);
730
731
732
733
734 BUG_ON(want);
735balanced:
736
737
738
739
740 rt_rq->rt_runtime = RUNTIME_INF;
741 rt_rq->rt_throttled = 0;
742 raw_spin_unlock(&rt_rq->rt_runtime_lock);
743 raw_spin_unlock(&rt_b->rt_runtime_lock);
744
745
746 sched_rt_rq_enqueue(rt_rq);
747 }
748}
749
750static void __enable_runtime(struct rq *rq)
751{
752 rt_rq_iter_t iter;
753 struct rt_rq *rt_rq;
754
755 if (unlikely(!scheduler_running))
756 return;
757
758
759
760
761 for_each_rt_rq(rt_rq, iter, rq) {
762 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
763
764 raw_spin_lock(&rt_b->rt_runtime_lock);
765 raw_spin_lock(&rt_rq->rt_runtime_lock);
766 rt_rq->rt_runtime = rt_b->rt_runtime;
767 rt_rq->rt_time = 0;
768 rt_rq->rt_throttled = 0;
769 raw_spin_unlock(&rt_rq->rt_runtime_lock);
770 raw_spin_unlock(&rt_b->rt_runtime_lock);
771 }
772}
773
774static int balance_runtime(struct rt_rq *rt_rq)
775{
776 int more = 0;
777
778 if (!sched_feat(RT_RUNTIME_SHARE))
779 return more;
780
781 if (rt_rq->rt_time > rt_rq->rt_runtime) {
782 raw_spin_unlock(&rt_rq->rt_runtime_lock);
783 more = do_balance_runtime(rt_rq);
784 raw_spin_lock(&rt_rq->rt_runtime_lock);
785 }
786
787 return more;
788}
789#else
790static inline int balance_runtime(struct rt_rq *rt_rq)
791{
792 return 0;
793}
794#endif
795
796static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
797{
798 int i, idle = 1, throttled = 0;
799 const struct cpumask *span;
800
801 span = sched_rt_period_mask();
802#ifdef CONFIG_RT_GROUP_SCHED
803
804
805
806
807
808
809
810
811
812 if (rt_b == &root_task_group.rt_bandwidth)
813 span = cpu_online_mask;
814#endif
815 for_each_cpu(i, span) {
816 int enqueue = 0;
817 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
818 struct rq *rq = rq_of_rt_rq(rt_rq);
819
820 raw_spin_lock(&rq->lock);
821 if (rt_rq->rt_time) {
822 u64 runtime;
823
824 raw_spin_lock(&rt_rq->rt_runtime_lock);
825 if (rt_rq->rt_throttled)
826 balance_runtime(rt_rq);
827 runtime = rt_rq->rt_runtime;
828 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
829 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
830 rt_rq->rt_throttled = 0;
831 enqueue = 1;
832
833
834
835
836
837
838
839
840 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
841 rq_clock_skip_update(rq, false);
842 }
843 if (rt_rq->rt_time || rt_rq->rt_nr_running)
844 idle = 0;
845 raw_spin_unlock(&rt_rq->rt_runtime_lock);
846 } else if (rt_rq->rt_nr_running) {
847 idle = 0;
848 if (!rt_rq_throttled(rt_rq))
849 enqueue = 1;
850 }
851 if (rt_rq->rt_throttled)
852 throttled = 1;
853
854 if (enqueue)
855 sched_rt_rq_enqueue(rt_rq);
856 raw_spin_unlock(&rq->lock);
857 }
858
859 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
860 return 1;
861
862 return idle;
863}
864
865static inline int rt_se_prio(struct sched_rt_entity *rt_se)
866{
867#ifdef CONFIG_RT_GROUP_SCHED
868 struct rt_rq *rt_rq = group_rt_rq(rt_se);
869
870 if (rt_rq)
871 return rt_rq->highest_prio.curr;
872#endif
873
874 return rt_task_of(rt_se)->prio;
875}
876
877static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
878{
879 u64 runtime = sched_rt_runtime(rt_rq);
880
881 if (rt_rq->rt_throttled)
882 return rt_rq_throttled(rt_rq);
883
884 if (runtime >= sched_rt_period(rt_rq))
885 return 0;
886
887 balance_runtime(rt_rq);
888 runtime = sched_rt_runtime(rt_rq);
889 if (runtime == RUNTIME_INF)
890 return 0;
891
892 if (rt_rq->rt_time > runtime) {
893 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
894
895
896
897
898
899 if (likely(rt_b->rt_runtime)) {
900 rt_rq->rt_throttled = 1;
901 printk_deferred_once("sched: RT throttling activated\n");
902 } else {
903
904
905
906
907
908 rt_rq->rt_time = 0;
909 }
910
911 if (rt_rq_throttled(rt_rq)) {
912 sched_rt_rq_dequeue(rt_rq);
913 return 1;
914 }
915 }
916
917 return 0;
918}
919
920
921
922
923
924static void update_curr_rt(struct rq *rq)
925{
926 struct task_struct *curr = rq->curr;
927 struct sched_rt_entity *rt_se = &curr->rt;
928 u64 delta_exec;
929
930 if (curr->sched_class != &rt_sched_class)
931 return;
932
933 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
934 if (unlikely((s64)delta_exec <= 0))
935 return;
936
937 schedstat_set(curr->se.statistics.exec_max,
938 max(curr->se.statistics.exec_max, delta_exec));
939
940 curr->se.sum_exec_runtime += delta_exec;
941 account_group_exec_runtime(curr, delta_exec);
942
943 curr->se.exec_start = rq_clock_task(rq);
944 cpuacct_charge(curr, delta_exec);
945
946 sched_rt_avg_update(rq, delta_exec);
947
948 if (!rt_bandwidth_enabled())
949 return;
950
951 for_each_sched_rt_entity(rt_se) {
952 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
953
954 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
955 raw_spin_lock(&rt_rq->rt_runtime_lock);
956 rt_rq->rt_time += delta_exec;
957 if (sched_rt_runtime_exceeded(rt_rq))
958 resched_curr(rq);
959 raw_spin_unlock(&rt_rq->rt_runtime_lock);
960 }
961 }
962}
963
964static void
965dequeue_top_rt_rq(struct rt_rq *rt_rq)
966{
967 struct rq *rq = rq_of_rt_rq(rt_rq);
968
969 BUG_ON(&rq->rt != rt_rq);
970
971 if (!rt_rq->rt_queued)
972 return;
973
974 BUG_ON(!rq->nr_running);
975
976 sub_nr_running(rq, rt_rq->rt_nr_running);
977 rt_rq->rt_queued = 0;
978}
979
980static void
981enqueue_top_rt_rq(struct rt_rq *rt_rq)
982{
983 struct rq *rq = rq_of_rt_rq(rt_rq);
984
985 BUG_ON(&rq->rt != rt_rq);
986
987 if (rt_rq->rt_queued)
988 return;
989 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
990 return;
991
992 add_nr_running(rq, rt_rq->rt_nr_running);
993 rt_rq->rt_queued = 1;
994}
995
996#if defined CONFIG_SMP
997
998static void
999inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1000{
1001 struct rq *rq = rq_of_rt_rq(rt_rq);
1002
1003#ifdef CONFIG_RT_GROUP_SCHED
1004
1005
1006
1007 if (&rq->rt != rt_rq)
1008 return;
1009#endif
1010 if (rq->online && prio < prev_prio)
1011 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1012}
1013
1014static void
1015dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1016{
1017 struct rq *rq = rq_of_rt_rq(rt_rq);
1018
1019#ifdef CONFIG_RT_GROUP_SCHED
1020
1021
1022
1023 if (&rq->rt != rt_rq)
1024 return;
1025#endif
1026 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1027 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1028}
1029
1030#else
1031
1032static inline
1033void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1034static inline
1035void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1036
1037#endif
1038
1039#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1040static void
1041inc_rt_prio(struct rt_rq *rt_rq, int prio)
1042{
1043 int prev_prio = rt_rq->highest_prio.curr;
1044
1045 if (prio < prev_prio)
1046 rt_rq->highest_prio.curr = prio;
1047
1048 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1049}
1050
1051static void
1052dec_rt_prio(struct rt_rq *rt_rq, int prio)
1053{
1054 int prev_prio = rt_rq->highest_prio.curr;
1055
1056 if (rt_rq->rt_nr_running) {
1057
1058 WARN_ON(prio < prev_prio);
1059
1060
1061
1062
1063
1064 if (prio == prev_prio) {
1065 struct rt_prio_array *array = &rt_rq->active;
1066
1067 rt_rq->highest_prio.curr =
1068 sched_find_first_bit(array->bitmap);
1069 }
1070
1071 } else
1072 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1073
1074 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1075}
1076
1077#else
1078
1079static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1080static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1081
1082#endif
1083
1084#ifdef CONFIG_RT_GROUP_SCHED
1085
1086static void
1087inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1088{
1089 if (rt_se_boosted(rt_se))
1090 rt_rq->rt_nr_boosted++;
1091
1092 if (rt_rq->tg)
1093 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1094}
1095
1096static void
1097dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1098{
1099 if (rt_se_boosted(rt_se))
1100 rt_rq->rt_nr_boosted--;
1101
1102 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1103}
1104
1105#else
1106
1107static void
1108inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1109{
1110 start_rt_bandwidth(&def_rt_bandwidth);
1111}
1112
1113static inline
1114void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1115
1116#endif
1117
1118static inline
1119unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1120{
1121 struct rt_rq *group_rq = group_rt_rq(rt_se);
1122
1123 if (group_rq)
1124 return group_rq->rt_nr_running;
1125 else
1126 return 1;
1127}
1128
1129static inline
1130void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1131{
1132 int prio = rt_se_prio(rt_se);
1133
1134 WARN_ON(!rt_prio(prio));
1135 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1136
1137 inc_rt_prio(rt_rq, prio);
1138 inc_rt_migration(rt_se, rt_rq);
1139 inc_rt_group(rt_se, rt_rq);
1140}
1141
1142static inline
1143void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1144{
1145 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1146 WARN_ON(!rt_rq->rt_nr_running);
1147 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1148
1149 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1150 dec_rt_migration(rt_se, rt_rq);
1151 dec_rt_group(rt_se, rt_rq);
1152}
1153
1154static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1155{
1156 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1157 struct rt_prio_array *array = &rt_rq->active;
1158 struct rt_rq *group_rq = group_rt_rq(rt_se);
1159 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1160
1161
1162
1163
1164
1165
1166
1167 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1168 return;
1169
1170 if (head)
1171 list_add(&rt_se->run_list, queue);
1172 else
1173 list_add_tail(&rt_se->run_list, queue);
1174 __set_bit(rt_se_prio(rt_se), array->bitmap);
1175
1176 inc_rt_tasks(rt_se, rt_rq);
1177}
1178
1179static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1180{
1181 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1182 struct rt_prio_array *array = &rt_rq->active;
1183
1184 list_del_init(&rt_se->run_list);
1185 if (list_empty(array->queue + rt_se_prio(rt_se)))
1186 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1187
1188 dec_rt_tasks(rt_se, rt_rq);
1189}
1190
1191
1192
1193
1194
1195static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1196{
1197 struct sched_rt_entity *back = NULL;
1198
1199 for_each_sched_rt_entity(rt_se) {
1200 rt_se->back = back;
1201 back = rt_se;
1202 }
1203
1204 dequeue_top_rt_rq(rt_rq_of_se(back));
1205
1206 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1207 if (on_rt_rq(rt_se))
1208 __dequeue_rt_entity(rt_se);
1209 }
1210}
1211
1212static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1213{
1214 struct rq *rq = rq_of_rt_se(rt_se);
1215
1216 dequeue_rt_stack(rt_se);
1217 for_each_sched_rt_entity(rt_se)
1218 __enqueue_rt_entity(rt_se, head);
1219 enqueue_top_rt_rq(&rq->rt);
1220}
1221
1222static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1223{
1224 struct rq *rq = rq_of_rt_se(rt_se);
1225
1226 dequeue_rt_stack(rt_se);
1227
1228 for_each_sched_rt_entity(rt_se) {
1229 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1230
1231 if (rt_rq && rt_rq->rt_nr_running)
1232 __enqueue_rt_entity(rt_se, false);
1233 }
1234 enqueue_top_rt_rq(&rq->rt);
1235}
1236
1237
1238
1239
1240static void
1241enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1242{
1243 struct sched_rt_entity *rt_se = &p->rt;
1244
1245 if (flags & ENQUEUE_WAKEUP)
1246 rt_se->timeout = 0;
1247
1248 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1249
1250 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1251 enqueue_pushable_task(rq, p);
1252}
1253
1254static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1255{
1256 struct sched_rt_entity *rt_se = &p->rt;
1257
1258 update_curr_rt(rq);
1259 dequeue_rt_entity(rt_se);
1260
1261 dequeue_pushable_task(rq, p);
1262}
1263
1264
1265
1266
1267
1268static void
1269requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1270{
1271 if (on_rt_rq(rt_se)) {
1272 struct rt_prio_array *array = &rt_rq->active;
1273 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1274
1275 if (head)
1276 list_move(&rt_se->run_list, queue);
1277 else
1278 list_move_tail(&rt_se->run_list, queue);
1279 }
1280}
1281
1282static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1283{
1284 struct sched_rt_entity *rt_se = &p->rt;
1285 struct rt_rq *rt_rq;
1286
1287 for_each_sched_rt_entity(rt_se) {
1288 rt_rq = rt_rq_of_se(rt_se);
1289 requeue_rt_entity(rt_rq, rt_se, head);
1290 }
1291}
1292
1293static void yield_task_rt(struct rq *rq)
1294{
1295 requeue_task_rt(rq, rq->curr, 0);
1296}
1297
1298#ifdef CONFIG_SMP
1299static int find_lowest_rq(struct task_struct *task);
1300
1301static int
1302select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1303{
1304 struct task_struct *curr;
1305 struct rq *rq;
1306
1307
1308 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1309 goto out;
1310
1311 rq = cpu_rq(cpu);
1312
1313 rcu_read_lock();
1314 curr = ACCESS_ONCE(rq->curr);
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 if (curr && unlikely(rt_task(curr)) &&
1339 (curr->nr_cpus_allowed < 2 ||
1340 curr->prio <= p->prio)) {
1341 int target = find_lowest_rq(p);
1342
1343
1344
1345
1346
1347 if (target != -1 &&
1348 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1349 cpu = target;
1350 }
1351 rcu_read_unlock();
1352
1353out:
1354 return cpu;
1355}
1356
1357static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1358{
1359
1360
1361
1362
1363 if (rq->curr->nr_cpus_allowed == 1 ||
1364 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1365 return;
1366
1367
1368
1369
1370
1371 if (p->nr_cpus_allowed != 1
1372 && cpupri_find(&rq->rd->cpupri, p, NULL))
1373 return;
1374
1375
1376
1377
1378
1379
1380 requeue_task_rt(rq, p, 1);
1381 resched_curr(rq);
1382}
1383
1384#endif
1385
1386
1387
1388
1389static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1390{
1391 if (p->prio < rq->curr->prio) {
1392 resched_curr(rq);
1393 return;
1394 }
1395
1396#ifdef CONFIG_SMP
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1410 check_preempt_equal_prio(rq, p);
1411#endif
1412}
1413
1414static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1415 struct rt_rq *rt_rq)
1416{
1417 struct rt_prio_array *array = &rt_rq->active;
1418 struct sched_rt_entity *next = NULL;
1419 struct list_head *queue;
1420 int idx;
1421
1422 idx = sched_find_first_bit(array->bitmap);
1423 BUG_ON(idx >= MAX_RT_PRIO);
1424
1425 queue = array->queue + idx;
1426 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1427
1428 return next;
1429}
1430
1431static struct task_struct *_pick_next_task_rt(struct rq *rq)
1432{
1433 struct sched_rt_entity *rt_se;
1434 struct task_struct *p;
1435 struct rt_rq *rt_rq = &rq->rt;
1436
1437 do {
1438 rt_se = pick_next_rt_entity(rq, rt_rq);
1439 BUG_ON(!rt_se);
1440 rt_rq = group_rt_rq(rt_se);
1441 } while (rt_rq);
1442
1443 p = rt_task_of(rt_se);
1444 p->se.exec_start = rq_clock_task(rq);
1445
1446 return p;
1447}
1448
1449static struct task_struct *
1450pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1451{
1452 struct task_struct *p;
1453 struct rt_rq *rt_rq = &rq->rt;
1454
1455 if (need_pull_rt_task(rq, prev)) {
1456 pull_rt_task(rq);
1457
1458
1459
1460
1461
1462 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1463 rq->dl.dl_nr_running))
1464 return RETRY_TASK;
1465 }
1466
1467
1468
1469
1470
1471 if (prev->sched_class == &rt_sched_class)
1472 update_curr_rt(rq);
1473
1474 if (!rt_rq->rt_queued)
1475 return NULL;
1476
1477 put_prev_task(rq, prev);
1478
1479 p = _pick_next_task_rt(rq);
1480
1481
1482 dequeue_pushable_task(rq, p);
1483
1484 set_post_schedule(rq);
1485
1486 return p;
1487}
1488
1489static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1490{
1491 update_curr_rt(rq);
1492
1493
1494
1495
1496
1497 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1498 enqueue_pushable_task(rq, p);
1499}
1500
1501#ifdef CONFIG_SMP
1502
1503
1504#define RT_MAX_TRIES 3
1505
1506static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1507{
1508 if (!task_running(rq, p) &&
1509 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1510 return 1;
1511 return 0;
1512}
1513
1514
1515
1516
1517
1518static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1519{
1520 struct plist_head *head = &rq->rt.pushable_tasks;
1521 struct task_struct *p;
1522
1523 if (!has_pushable_tasks(rq))
1524 return NULL;
1525
1526 plist_for_each_entry(p, head, pushable_tasks) {
1527 if (pick_rt_task(rq, p, cpu))
1528 return p;
1529 }
1530
1531 return NULL;
1532}
1533
1534static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1535
1536static int find_lowest_rq(struct task_struct *task)
1537{
1538 struct sched_domain *sd;
1539 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1540 int this_cpu = smp_processor_id();
1541 int cpu = task_cpu(task);
1542
1543
1544 if (unlikely(!lowest_mask))
1545 return -1;
1546
1547 if (task->nr_cpus_allowed == 1)
1548 return -1;
1549
1550 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1551 return -1;
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 if (cpumask_test_cpu(cpu, lowest_mask))
1562 return cpu;
1563
1564
1565
1566
1567
1568 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1569 this_cpu = -1;
1570
1571 rcu_read_lock();
1572 for_each_domain(cpu, sd) {
1573 if (sd->flags & SD_WAKE_AFFINE) {
1574 int best_cpu;
1575
1576
1577
1578
1579
1580 if (this_cpu != -1 &&
1581 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1582 rcu_read_unlock();
1583 return this_cpu;
1584 }
1585
1586 best_cpu = cpumask_first_and(lowest_mask,
1587 sched_domain_span(sd));
1588 if (best_cpu < nr_cpu_ids) {
1589 rcu_read_unlock();
1590 return best_cpu;
1591 }
1592 }
1593 }
1594 rcu_read_unlock();
1595
1596
1597
1598
1599
1600
1601 if (this_cpu != -1)
1602 return this_cpu;
1603
1604 cpu = cpumask_any(lowest_mask);
1605 if (cpu < nr_cpu_ids)
1606 return cpu;
1607 return -1;
1608}
1609
1610
1611static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1612{
1613 struct rq *lowest_rq = NULL;
1614 int tries;
1615 int cpu;
1616
1617 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1618 cpu = find_lowest_rq(task);
1619
1620 if ((cpu == -1) || (cpu == rq->cpu))
1621 break;
1622
1623 lowest_rq = cpu_rq(cpu);
1624
1625 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1626
1627
1628
1629
1630
1631 lowest_rq = NULL;
1632 break;
1633 }
1634
1635
1636 if (double_lock_balance(rq, lowest_rq)) {
1637
1638
1639
1640
1641
1642
1643 if (unlikely(task_rq(task) != rq ||
1644 !cpumask_test_cpu(lowest_rq->cpu,
1645 tsk_cpus_allowed(task)) ||
1646 task_running(rq, task) ||
1647 !task_on_rq_queued(task))) {
1648
1649 double_unlock_balance(rq, lowest_rq);
1650 lowest_rq = NULL;
1651 break;
1652 }
1653 }
1654
1655
1656 if (lowest_rq->rt.highest_prio.curr > task->prio)
1657 break;
1658
1659
1660 double_unlock_balance(rq, lowest_rq);
1661 lowest_rq = NULL;
1662 }
1663
1664 return lowest_rq;
1665}
1666
1667static struct task_struct *pick_next_pushable_task(struct rq *rq)
1668{
1669 struct task_struct *p;
1670
1671 if (!has_pushable_tasks(rq))
1672 return NULL;
1673
1674 p = plist_first_entry(&rq->rt.pushable_tasks,
1675 struct task_struct, pushable_tasks);
1676
1677 BUG_ON(rq->cpu != task_cpu(p));
1678 BUG_ON(task_current(rq, p));
1679 BUG_ON(p->nr_cpus_allowed <= 1);
1680
1681 BUG_ON(!task_on_rq_queued(p));
1682 BUG_ON(!rt_task(p));
1683
1684 return p;
1685}
1686
1687
1688
1689
1690
1691
1692static int push_rt_task(struct rq *rq)
1693{
1694 struct task_struct *next_task;
1695 struct rq *lowest_rq;
1696 int ret = 0;
1697
1698 if (!rq->rt.overloaded)
1699 return 0;
1700
1701 next_task = pick_next_pushable_task(rq);
1702 if (!next_task)
1703 return 0;
1704
1705retry:
1706 if (unlikely(next_task == rq->curr)) {
1707 WARN_ON(1);
1708 return 0;
1709 }
1710
1711
1712
1713
1714
1715
1716 if (unlikely(next_task->prio < rq->curr->prio)) {
1717 resched_curr(rq);
1718 return 0;
1719 }
1720
1721
1722 get_task_struct(next_task);
1723
1724
1725 lowest_rq = find_lock_lowest_rq(next_task, rq);
1726 if (!lowest_rq) {
1727 struct task_struct *task;
1728
1729
1730
1731
1732
1733
1734
1735
1736 task = pick_next_pushable_task(rq);
1737 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1738
1739
1740
1741
1742
1743
1744 goto out;
1745 }
1746
1747 if (!task)
1748
1749 goto out;
1750
1751
1752
1753
1754 put_task_struct(next_task);
1755 next_task = task;
1756 goto retry;
1757 }
1758
1759 deactivate_task(rq, next_task, 0);
1760 set_task_cpu(next_task, lowest_rq->cpu);
1761 activate_task(lowest_rq, next_task, 0);
1762 ret = 1;
1763
1764 resched_curr(lowest_rq);
1765
1766 double_unlock_balance(rq, lowest_rq);
1767
1768out:
1769 put_task_struct(next_task);
1770
1771 return ret;
1772}
1773
1774static void push_rt_tasks(struct rq *rq)
1775{
1776
1777 while (push_rt_task(rq))
1778 ;
1779}
1780
1781static int pull_rt_task(struct rq *this_rq)
1782{
1783 int this_cpu = this_rq->cpu, ret = 0, cpu;
1784 struct task_struct *p;
1785 struct rq *src_rq;
1786
1787 if (likely(!rt_overloaded(this_rq)))
1788 return 0;
1789
1790
1791
1792
1793
1794 smp_rmb();
1795
1796 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1797 if (this_cpu == cpu)
1798 continue;
1799
1800 src_rq = cpu_rq(cpu);
1801
1802
1803
1804
1805
1806
1807
1808
1809 if (src_rq->rt.highest_prio.next >=
1810 this_rq->rt.highest_prio.curr)
1811 continue;
1812
1813
1814
1815
1816
1817
1818 double_lock_balance(this_rq, src_rq);
1819
1820
1821
1822
1823
1824 p = pick_highest_pushable_task(src_rq, this_cpu);
1825
1826
1827
1828
1829
1830 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1831 WARN_ON(p == src_rq->curr);
1832 WARN_ON(!task_on_rq_queued(p));
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842 if (p->prio < src_rq->curr->prio)
1843 goto skip;
1844
1845 ret = 1;
1846
1847 deactivate_task(src_rq, p, 0);
1848 set_task_cpu(p, this_cpu);
1849 activate_task(this_rq, p, 0);
1850
1851
1852
1853
1854
1855
1856 }
1857skip:
1858 double_unlock_balance(this_rq, src_rq);
1859 }
1860
1861 return ret;
1862}
1863
1864static void post_schedule_rt(struct rq *rq)
1865{
1866 push_rt_tasks(rq);
1867}
1868
1869
1870
1871
1872
1873static void task_woken_rt(struct rq *rq, struct task_struct *p)
1874{
1875 if (!task_running(rq, p) &&
1876 !test_tsk_need_resched(rq->curr) &&
1877 has_pushable_tasks(rq) &&
1878 p->nr_cpus_allowed > 1 &&
1879 (dl_task(rq->curr) || rt_task(rq->curr)) &&
1880 (rq->curr->nr_cpus_allowed < 2 ||
1881 rq->curr->prio <= p->prio))
1882 push_rt_tasks(rq);
1883}
1884
1885static void set_cpus_allowed_rt(struct task_struct *p,
1886 const struct cpumask *new_mask)
1887{
1888 struct rq *rq;
1889 int weight;
1890
1891 BUG_ON(!rt_task(p));
1892
1893 if (!task_on_rq_queued(p))
1894 return;
1895
1896 weight = cpumask_weight(new_mask);
1897
1898
1899
1900
1901
1902 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1903 return;
1904
1905 rq = task_rq(p);
1906
1907
1908
1909
1910 if (weight <= 1) {
1911 if (!task_current(rq, p))
1912 dequeue_pushable_task(rq, p);
1913 BUG_ON(!rq->rt.rt_nr_migratory);
1914 rq->rt.rt_nr_migratory--;
1915 } else {
1916 if (!task_current(rq, p))
1917 enqueue_pushable_task(rq, p);
1918 rq->rt.rt_nr_migratory++;
1919 }
1920
1921 update_rt_migration(&rq->rt);
1922}
1923
1924
1925static void rq_online_rt(struct rq *rq)
1926{
1927 if (rq->rt.overloaded)
1928 rt_set_overload(rq);
1929
1930 __enable_runtime(rq);
1931
1932 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1933}
1934
1935
1936static void rq_offline_rt(struct rq *rq)
1937{
1938 if (rq->rt.overloaded)
1939 rt_clear_overload(rq);
1940
1941 __disable_runtime(rq);
1942
1943 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1944}
1945
1946
1947
1948
1949
1950static void switched_from_rt(struct rq *rq, struct task_struct *p)
1951{
1952
1953
1954
1955
1956
1957
1958
1959 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
1960 return;
1961
1962 if (pull_rt_task(rq))
1963 resched_curr(rq);
1964}
1965
1966void __init init_sched_rt_class(void)
1967{
1968 unsigned int i;
1969
1970 for_each_possible_cpu(i) {
1971 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1972 GFP_KERNEL, cpu_to_node(i));
1973 }
1974}
1975#endif
1976
1977
1978
1979
1980
1981
1982static void switched_to_rt(struct rq *rq, struct task_struct *p)
1983{
1984 int check_resched = 1;
1985
1986
1987
1988
1989
1990
1991
1992
1993 if (task_on_rq_queued(p) && rq->curr != p) {
1994#ifdef CONFIG_SMP
1995 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
1996
1997 push_rt_task(rq) && rq != task_rq(p))
1998 check_resched = 0;
1999#endif
2000 if (check_resched && p->prio < rq->curr->prio)
2001 resched_curr(rq);
2002 }
2003}
2004
2005
2006
2007
2008
2009static void
2010prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2011{
2012 if (!task_on_rq_queued(p))
2013 return;
2014
2015 if (rq->curr == p) {
2016#ifdef CONFIG_SMP
2017
2018
2019
2020
2021 if (oldprio < p->prio)
2022 pull_rt_task(rq);
2023
2024
2025
2026
2027
2028
2029 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
2030 resched_curr(rq);
2031#else
2032
2033 if (oldprio < p->prio)
2034 resched_curr(rq);
2035#endif
2036 } else {
2037
2038
2039
2040
2041
2042 if (p->prio < rq->curr->prio)
2043 resched_curr(rq);
2044 }
2045}
2046
2047static void watchdog(struct rq *rq, struct task_struct *p)
2048{
2049 unsigned long soft, hard;
2050
2051
2052 soft = task_rlimit(p, RLIMIT_RTTIME);
2053 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2054
2055 if (soft != RLIM_INFINITY) {
2056 unsigned long next;
2057
2058 if (p->rt.watchdog_stamp != jiffies) {
2059 p->rt.timeout++;
2060 p->rt.watchdog_stamp = jiffies;
2061 }
2062
2063 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2064 if (p->rt.timeout > next)
2065 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2066 }
2067}
2068
2069static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2070{
2071 struct sched_rt_entity *rt_se = &p->rt;
2072
2073 update_curr_rt(rq);
2074
2075 watchdog(rq, p);
2076
2077
2078
2079
2080
2081 if (p->policy != SCHED_RR)
2082 return;
2083
2084 if (--p->rt.time_slice)
2085 return;
2086
2087 p->rt.time_slice = sched_rr_timeslice;
2088
2089
2090
2091
2092
2093 for_each_sched_rt_entity(rt_se) {
2094 if (rt_se->run_list.prev != rt_se->run_list.next) {
2095 requeue_task_rt(rq, p, 0);
2096 resched_curr(rq);
2097 return;
2098 }
2099 }
2100}
2101
2102static void set_curr_task_rt(struct rq *rq)
2103{
2104 struct task_struct *p = rq->curr;
2105
2106 p->se.exec_start = rq_clock_task(rq);
2107
2108
2109 dequeue_pushable_task(rq, p);
2110}
2111
2112static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2113{
2114
2115
2116
2117 if (task->policy == SCHED_RR)
2118 return sched_rr_timeslice;
2119 else
2120 return 0;
2121}
2122
2123const struct sched_class rt_sched_class = {
2124 .next = &fair_sched_class,
2125 .enqueue_task = enqueue_task_rt,
2126 .dequeue_task = dequeue_task_rt,
2127 .yield_task = yield_task_rt,
2128
2129 .check_preempt_curr = check_preempt_curr_rt,
2130
2131 .pick_next_task = pick_next_task_rt,
2132 .put_prev_task = put_prev_task_rt,
2133
2134#ifdef CONFIG_SMP
2135 .select_task_rq = select_task_rq_rt,
2136
2137 .set_cpus_allowed = set_cpus_allowed_rt,
2138 .rq_online = rq_online_rt,
2139 .rq_offline = rq_offline_rt,
2140 .post_schedule = post_schedule_rt,
2141 .task_woken = task_woken_rt,
2142 .switched_from = switched_from_rt,
2143#endif
2144
2145 .set_curr_task = set_curr_task_rt,
2146 .task_tick = task_tick_rt,
2147
2148 .get_rr_interval = get_rr_interval_rt,
2149
2150 .prio_changed = prio_changed_rt,
2151 .switched_to = switched_to_rt,
2152
2153 .update_curr = update_curr_rt,
2154};
2155
2156#ifdef CONFIG_SCHED_DEBUG
2157extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2158
2159void print_rt_stats(struct seq_file *m, int cpu)
2160{
2161 rt_rq_iter_t iter;
2162 struct rt_rq *rt_rq;
2163
2164 rcu_read_lock();
2165 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2166 print_rt_rq(m, cpu, rt_rq);
2167 rcu_read_unlock();
2168}
2169#endif
2170