1
2
3
4
5
6#include "sched.h"
7
8#include <linux/slab.h>
9#include <linux/irq_work.h>
10
11int sched_rr_timeslice = RR_TIMESLICE;
12
13static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14
15struct rt_bandwidth def_rt_bandwidth;
16
17static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
18{
19 struct rt_bandwidth *rt_b =
20 container_of(timer, struct rt_bandwidth, rt_period_timer);
21 int idle = 0;
22 int overrun;
23
24 raw_spin_lock(&rt_b->rt_runtime_lock);
25 for (;;) {
26 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
27 if (!overrun)
28 break;
29
30 raw_spin_unlock(&rt_b->rt_runtime_lock);
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 raw_spin_lock(&rt_b->rt_runtime_lock);
33 }
34 if (idle)
35 rt_b->rt_period_active = 0;
36 raw_spin_unlock(&rt_b->rt_runtime_lock);
37
38 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
39}
40
41void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
42{
43 rt_b->rt_period = ns_to_ktime(period);
44 rt_b->rt_runtime = runtime;
45
46 raw_spin_lock_init(&rt_b->rt_runtime_lock);
47
48 hrtimer_init(&rt_b->rt_period_timer,
49 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
50 rt_b->rt_period_timer.function = sched_rt_period_timer;
51}
52
53static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
54{
55 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
56 return;
57
58 raw_spin_lock(&rt_b->rt_runtime_lock);
59 if (!rt_b->rt_period_active) {
60 rt_b->rt_period_active = 1;
61 hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
62 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
63 }
64 raw_spin_unlock(&rt_b->rt_runtime_lock);
65}
66
67#ifdef CONFIG_SMP
68static void push_irq_work_func(struct irq_work *work);
69#endif
70
71void init_rt_rq(struct rt_rq *rt_rq)
72{
73 struct rt_prio_array *array;
74 int i;
75
76 array = &rt_rq->active;
77 for (i = 0; i < MAX_RT_PRIO; i++) {
78 INIT_LIST_HEAD(array->queue + i);
79 __clear_bit(i, array->bitmap);
80 }
81
82 __set_bit(MAX_RT_PRIO, array->bitmap);
83
84#if defined CONFIG_SMP
85 rt_rq->highest_prio.curr = MAX_RT_PRIO;
86 rt_rq->highest_prio.next = MAX_RT_PRIO;
87 rt_rq->rt_nr_migratory = 0;
88 rt_rq->overloaded = 0;
89 plist_head_init(&rt_rq->pushable_tasks);
90
91#ifdef HAVE_RT_PUSH_IPI
92 rt_rq->push_flags = 0;
93 rt_rq->push_cpu = nr_cpu_ids;
94 raw_spin_lock_init(&rt_rq->push_lock);
95 init_irq_work(&rt_rq->push_work, push_irq_work_func);
96#endif
97#endif
98
99 rt_rq->rt_queued = 0;
100
101 rt_rq->rt_time = 0;
102 rt_rq->rt_throttled = 0;
103 rt_rq->rt_runtime = 0;
104 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
105}
106
107#ifdef CONFIG_RT_GROUP_SCHED
108static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
109{
110 hrtimer_cancel(&rt_b->rt_period_timer);
111}
112
113#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
114
115static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
116{
117#ifdef CONFIG_SCHED_DEBUG
118 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
119#endif
120 return container_of(rt_se, struct task_struct, rt);
121}
122
123static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
124{
125 return rt_rq->rq;
126}
127
128static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
129{
130 return rt_se->rt_rq;
131}
132
133static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
134{
135 struct rt_rq *rt_rq = rt_se->rt_rq;
136
137 return rt_rq->rq;
138}
139
140void free_rt_sched_group(struct task_group *tg)
141{
142 int i;
143
144 if (tg->rt_se)
145 destroy_rt_bandwidth(&tg->rt_bandwidth);
146
147 for_each_possible_cpu(i) {
148 if (tg->rt_rq)
149 kfree(tg->rt_rq[i]);
150 if (tg->rt_se)
151 kfree(tg->rt_se[i]);
152 }
153
154 kfree(tg->rt_rq);
155 kfree(tg->rt_se);
156}
157
158void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
159 struct sched_rt_entity *rt_se, int cpu,
160 struct sched_rt_entity *parent)
161{
162 struct rq *rq = cpu_rq(cpu);
163
164 rt_rq->highest_prio.curr = MAX_RT_PRIO;
165 rt_rq->rt_nr_boosted = 0;
166 rt_rq->rq = rq;
167 rt_rq->tg = tg;
168
169 tg->rt_rq[cpu] = rt_rq;
170 tg->rt_se[cpu] = rt_se;
171
172 if (!rt_se)
173 return;
174
175 if (!parent)
176 rt_se->rt_rq = &rq->rt;
177 else
178 rt_se->rt_rq = parent->my_q;
179
180 rt_se->my_q = rt_rq;
181 rt_se->parent = parent;
182 INIT_LIST_HEAD(&rt_se->run_list);
183}
184
185int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
186{
187 struct rt_rq *rt_rq;
188 struct sched_rt_entity *rt_se;
189 int i;
190
191 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
192 if (!tg->rt_rq)
193 goto err;
194 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
195 if (!tg->rt_se)
196 goto err;
197
198 init_rt_bandwidth(&tg->rt_bandwidth,
199 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
200
201 for_each_possible_cpu(i) {
202 rt_rq = kzalloc_node(sizeof(struct rt_rq),
203 GFP_KERNEL, cpu_to_node(i));
204 if (!rt_rq)
205 goto err;
206
207 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
208 GFP_KERNEL, cpu_to_node(i));
209 if (!rt_se)
210 goto err_free_rq;
211
212 init_rt_rq(rt_rq);
213 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
214 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
215 }
216
217 return 1;
218
219err_free_rq:
220 kfree(rt_rq);
221err:
222 return 0;
223}
224
225#else
226
227#define rt_entity_is_task(rt_se) (1)
228
229static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
230{
231 return container_of(rt_se, struct task_struct, rt);
232}
233
234static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
235{
236 return container_of(rt_rq, struct rq, rt);
237}
238
239static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
240{
241 struct task_struct *p = rt_task_of(rt_se);
242
243 return task_rq(p);
244}
245
246static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
247{
248 struct rq *rq = rq_of_rt_se(rt_se);
249
250 return &rq->rt;
251}
252
253void free_rt_sched_group(struct task_group *tg) { }
254
255int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
256{
257 return 1;
258}
259#endif
260
261#ifdef CONFIG_SMP
262
263static void pull_rt_task(struct rq *this_rq);
264
265static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
266{
267
268 return rq->rt.highest_prio.curr > prev->prio;
269}
270
271static inline int rt_overloaded(struct rq *rq)
272{
273 return atomic_read(&rq->rd->rto_count);
274}
275
276static inline void rt_set_overload(struct rq *rq)
277{
278 if (!rq->online)
279 return;
280
281 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
282
283
284
285
286
287
288
289
290
291 smp_wmb();
292 atomic_inc(&rq->rd->rto_count);
293}
294
295static inline void rt_clear_overload(struct rq *rq)
296{
297 if (!rq->online)
298 return;
299
300
301 atomic_dec(&rq->rd->rto_count);
302 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
303}
304
305static void update_rt_migration(struct rt_rq *rt_rq)
306{
307 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
308 if (!rt_rq->overloaded) {
309 rt_set_overload(rq_of_rt_rq(rt_rq));
310 rt_rq->overloaded = 1;
311 }
312 } else if (rt_rq->overloaded) {
313 rt_clear_overload(rq_of_rt_rq(rt_rq));
314 rt_rq->overloaded = 0;
315 }
316}
317
318static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
319{
320 struct task_struct *p;
321
322 if (!rt_entity_is_task(rt_se))
323 return;
324
325 p = rt_task_of(rt_se);
326 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
327
328 rt_rq->rt_nr_total++;
329 if (p->nr_cpus_allowed > 1)
330 rt_rq->rt_nr_migratory++;
331
332 update_rt_migration(rt_rq);
333}
334
335static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
336{
337 struct task_struct *p;
338
339 if (!rt_entity_is_task(rt_se))
340 return;
341
342 p = rt_task_of(rt_se);
343 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
344
345 rt_rq->rt_nr_total--;
346 if (p->nr_cpus_allowed > 1)
347 rt_rq->rt_nr_migratory--;
348
349 update_rt_migration(rt_rq);
350}
351
352static inline int has_pushable_tasks(struct rq *rq)
353{
354 return !plist_head_empty(&rq->rt.pushable_tasks);
355}
356
357static DEFINE_PER_CPU(struct callback_head, rt_push_head);
358static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
359
360static void push_rt_tasks(struct rq *);
361static void pull_rt_task(struct rq *);
362
363static inline void queue_push_tasks(struct rq *rq)
364{
365 if (!has_pushable_tasks(rq))
366 return;
367
368 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
369}
370
371static inline void queue_pull_task(struct rq *rq)
372{
373 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
374}
375
376static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
377{
378 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
379 plist_node_init(&p->pushable_tasks, p->prio);
380 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
381
382
383 if (p->prio < rq->rt.highest_prio.next)
384 rq->rt.highest_prio.next = p->prio;
385}
386
387static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
388{
389 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
390
391
392 if (has_pushable_tasks(rq)) {
393 p = plist_first_entry(&rq->rt.pushable_tasks,
394 struct task_struct, pushable_tasks);
395 rq->rt.highest_prio.next = p->prio;
396 } else
397 rq->rt.highest_prio.next = MAX_RT_PRIO;
398}
399
400#else
401
402static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
403{
404}
405
406static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
407{
408}
409
410static inline
411void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
412{
413}
414
415static inline
416void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
417{
418}
419
420static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
421{
422 return false;
423}
424
425static inline void pull_rt_task(struct rq *this_rq)
426{
427}
428
429static inline void queue_push_tasks(struct rq *rq)
430{
431}
432#endif
433
434static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
435static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
436
437static inline int on_rt_rq(struct sched_rt_entity *rt_se)
438{
439 return !list_empty(&rt_se->run_list);
440}
441
442#ifdef CONFIG_RT_GROUP_SCHED
443
444static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
445{
446 if (!rt_rq->tg)
447 return RUNTIME_INF;
448
449 return rt_rq->rt_runtime;
450}
451
452static inline u64 sched_rt_period(struct rt_rq *rt_rq)
453{
454 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
455}
456
457typedef struct task_group *rt_rq_iter_t;
458
459static inline struct task_group *next_task_group(struct task_group *tg)
460{
461 do {
462 tg = list_entry_rcu(tg->list.next,
463 typeof(struct task_group), list);
464 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
465
466 if (&tg->list == &task_groups)
467 tg = NULL;
468
469 return tg;
470}
471
472#define for_each_rt_rq(rt_rq, iter, rq) \
473 for (iter = container_of(&task_groups, typeof(*iter), list); \
474 (iter = next_task_group(iter)) && \
475 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
476
477#define for_each_sched_rt_entity(rt_se) \
478 for (; rt_se; rt_se = rt_se->parent)
479
480static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
481{
482 return rt_se->my_q;
483}
484
485static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
486static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
487
488static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
489{
490 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
491 struct rq *rq = rq_of_rt_rq(rt_rq);
492 struct sched_rt_entity *rt_se;
493
494 int cpu = cpu_of(rq);
495
496 rt_se = rt_rq->tg->rt_se[cpu];
497
498 if (rt_rq->rt_nr_running) {
499 if (!rt_se)
500 enqueue_top_rt_rq(rt_rq);
501 else if (!on_rt_rq(rt_se))
502 enqueue_rt_entity(rt_se, false);
503
504 if (rt_rq->highest_prio.curr < curr->prio)
505 resched_curr(rq);
506 }
507}
508
509static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
510{
511 struct sched_rt_entity *rt_se;
512 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
513
514 rt_se = rt_rq->tg->rt_se[cpu];
515
516 if (!rt_se)
517 dequeue_top_rt_rq(rt_rq);
518 else if (on_rt_rq(rt_se))
519 dequeue_rt_entity(rt_se);
520}
521
522static inline int rt_rq_throttled(struct rt_rq *rt_rq)
523{
524 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
525}
526
527static int rt_se_boosted(struct sched_rt_entity *rt_se)
528{
529 struct rt_rq *rt_rq = group_rt_rq(rt_se);
530 struct task_struct *p;
531
532 if (rt_rq)
533 return !!rt_rq->rt_nr_boosted;
534
535 p = rt_task_of(rt_se);
536 return p->prio != p->normal_prio;
537}
538
539#ifdef CONFIG_SMP
540static inline const struct cpumask *sched_rt_period_mask(void)
541{
542 return this_rq()->rd->span;
543}
544#else
545static inline const struct cpumask *sched_rt_period_mask(void)
546{
547 return cpu_online_mask;
548}
549#endif
550
551static inline
552struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
553{
554 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
555}
556
557static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
558{
559 return &rt_rq->tg->rt_bandwidth;
560}
561
562#else
563
564static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
565{
566 return rt_rq->rt_runtime;
567}
568
569static inline u64 sched_rt_period(struct rt_rq *rt_rq)
570{
571 return ktime_to_ns(def_rt_bandwidth.rt_period);
572}
573
574typedef struct rt_rq *rt_rq_iter_t;
575
576#define for_each_rt_rq(rt_rq, iter, rq) \
577 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
578
579#define for_each_sched_rt_entity(rt_se) \
580 for (; rt_se; rt_se = NULL)
581
582static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
583{
584 return NULL;
585}
586
587static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
588{
589 struct rq *rq = rq_of_rt_rq(rt_rq);
590
591 if (!rt_rq->rt_nr_running)
592 return;
593
594 enqueue_top_rt_rq(rt_rq);
595 resched_curr(rq);
596}
597
598static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
599{
600 dequeue_top_rt_rq(rt_rq);
601}
602
603static inline int rt_rq_throttled(struct rt_rq *rt_rq)
604{
605 return rt_rq->rt_throttled;
606}
607
608static inline const struct cpumask *sched_rt_period_mask(void)
609{
610 return cpu_online_mask;
611}
612
613static inline
614struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
615{
616 return &cpu_rq(cpu)->rt;
617}
618
619static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
620{
621 return &def_rt_bandwidth;
622}
623
624#endif
625
626bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
627{
628 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
629
630 return (hrtimer_active(&rt_b->rt_period_timer) ||
631 rt_rq->rt_time < rt_b->rt_runtime);
632}
633
634#ifdef CONFIG_SMP
635
636
637
638static int do_balance_runtime(struct rt_rq *rt_rq)
639{
640 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
641 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
642 int i, weight, more = 0;
643 u64 rt_period;
644
645 weight = cpumask_weight(rd->span);
646
647 raw_spin_lock(&rt_b->rt_runtime_lock);
648 rt_period = ktime_to_ns(rt_b->rt_period);
649 for_each_cpu(i, rd->span) {
650 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
651 s64 diff;
652
653 if (iter == rt_rq)
654 continue;
655
656 raw_spin_lock(&iter->rt_runtime_lock);
657
658
659
660
661
662 if (iter->rt_runtime == RUNTIME_INF)
663 goto next;
664
665
666
667
668
669 diff = iter->rt_runtime - iter->rt_time;
670 if (diff > 0) {
671 diff = div_u64((u64)diff, weight);
672 if (rt_rq->rt_runtime + diff > rt_period)
673 diff = rt_period - rt_rq->rt_runtime;
674 iter->rt_runtime -= diff;
675 rt_rq->rt_runtime += diff;
676 more = 1;
677 if (rt_rq->rt_runtime == rt_period) {
678 raw_spin_unlock(&iter->rt_runtime_lock);
679 break;
680 }
681 }
682next:
683 raw_spin_unlock(&iter->rt_runtime_lock);
684 }
685 raw_spin_unlock(&rt_b->rt_runtime_lock);
686
687 return more;
688}
689
690
691
692
693static void __disable_runtime(struct rq *rq)
694{
695 struct root_domain *rd = rq->rd;
696 rt_rq_iter_t iter;
697 struct rt_rq *rt_rq;
698
699 if (unlikely(!scheduler_running))
700 return;
701
702 for_each_rt_rq(rt_rq, iter, rq) {
703 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
704 s64 want;
705 int i;
706
707 raw_spin_lock(&rt_b->rt_runtime_lock);
708 raw_spin_lock(&rt_rq->rt_runtime_lock);
709
710
711
712
713
714 if (rt_rq->rt_runtime == RUNTIME_INF ||
715 rt_rq->rt_runtime == rt_b->rt_runtime)
716 goto balanced;
717 raw_spin_unlock(&rt_rq->rt_runtime_lock);
718
719
720
721
722
723
724 want = rt_b->rt_runtime - rt_rq->rt_runtime;
725
726
727
728
729 for_each_cpu(i, rd->span) {
730 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
731 s64 diff;
732
733
734
735
736 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
737 continue;
738
739 raw_spin_lock(&iter->rt_runtime_lock);
740 if (want > 0) {
741 diff = min_t(s64, iter->rt_runtime, want);
742 iter->rt_runtime -= diff;
743 want -= diff;
744 } else {
745 iter->rt_runtime -= want;
746 want -= want;
747 }
748 raw_spin_unlock(&iter->rt_runtime_lock);
749
750 if (!want)
751 break;
752 }
753
754 raw_spin_lock(&rt_rq->rt_runtime_lock);
755
756
757
758
759 BUG_ON(want);
760balanced:
761
762
763
764
765 rt_rq->rt_runtime = RUNTIME_INF;
766 rt_rq->rt_throttled = 0;
767 raw_spin_unlock(&rt_rq->rt_runtime_lock);
768 raw_spin_unlock(&rt_b->rt_runtime_lock);
769
770
771 sched_rt_rq_enqueue(rt_rq);
772 }
773}
774
775static void __enable_runtime(struct rq *rq)
776{
777 rt_rq_iter_t iter;
778 struct rt_rq *rt_rq;
779
780 if (unlikely(!scheduler_running))
781 return;
782
783
784
785
786 for_each_rt_rq(rt_rq, iter, rq) {
787 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
788
789 raw_spin_lock(&rt_b->rt_runtime_lock);
790 raw_spin_lock(&rt_rq->rt_runtime_lock);
791 rt_rq->rt_runtime = rt_b->rt_runtime;
792 rt_rq->rt_time = 0;
793 rt_rq->rt_throttled = 0;
794 raw_spin_unlock(&rt_rq->rt_runtime_lock);
795 raw_spin_unlock(&rt_b->rt_runtime_lock);
796 }
797}
798
799static int balance_runtime(struct rt_rq *rt_rq)
800{
801 int more = 0;
802
803 if (!sched_feat(RT_RUNTIME_SHARE))
804 return more;
805
806 if (rt_rq->rt_time > rt_rq->rt_runtime) {
807 raw_spin_unlock(&rt_rq->rt_runtime_lock);
808 more = do_balance_runtime(rt_rq);
809 raw_spin_lock(&rt_rq->rt_runtime_lock);
810 }
811
812 return more;
813}
814#else
815static inline int balance_runtime(struct rt_rq *rt_rq)
816{
817 return 0;
818}
819#endif
820
821static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
822{
823 int i, idle = 1, throttled = 0;
824 const struct cpumask *span;
825
826 span = sched_rt_period_mask();
827#ifdef CONFIG_RT_GROUP_SCHED
828
829
830
831
832
833
834
835
836
837 if (rt_b == &root_task_group.rt_bandwidth)
838 span = cpu_online_mask;
839#endif
840 for_each_cpu(i, span) {
841 int enqueue = 0;
842 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
843 struct rq *rq = rq_of_rt_rq(rt_rq);
844
845 raw_spin_lock(&rq->lock);
846 if (rt_rq->rt_time) {
847 u64 runtime;
848
849 raw_spin_lock(&rt_rq->rt_runtime_lock);
850 if (rt_rq->rt_throttled)
851 balance_runtime(rt_rq);
852 runtime = rt_rq->rt_runtime;
853 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
854 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
855 rt_rq->rt_throttled = 0;
856 enqueue = 1;
857
858
859
860
861
862
863
864
865 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
866 rq_clock_skip_update(rq, false);
867 }
868 if (rt_rq->rt_time || rt_rq->rt_nr_running)
869 idle = 0;
870 raw_spin_unlock(&rt_rq->rt_runtime_lock);
871 } else if (rt_rq->rt_nr_running) {
872 idle = 0;
873 if (!rt_rq_throttled(rt_rq))
874 enqueue = 1;
875 }
876 if (rt_rq->rt_throttled)
877 throttled = 1;
878
879 if (enqueue)
880 sched_rt_rq_enqueue(rt_rq);
881 raw_spin_unlock(&rq->lock);
882 }
883
884 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
885 return 1;
886
887 return idle;
888}
889
890static inline int rt_se_prio(struct sched_rt_entity *rt_se)
891{
892#ifdef CONFIG_RT_GROUP_SCHED
893 struct rt_rq *rt_rq = group_rt_rq(rt_se);
894
895 if (rt_rq)
896 return rt_rq->highest_prio.curr;
897#endif
898
899 return rt_task_of(rt_se)->prio;
900}
901
902static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
903{
904 u64 runtime = sched_rt_runtime(rt_rq);
905
906 if (rt_rq->rt_throttled)
907 return rt_rq_throttled(rt_rq);
908
909 if (runtime >= sched_rt_period(rt_rq))
910 return 0;
911
912 balance_runtime(rt_rq);
913 runtime = sched_rt_runtime(rt_rq);
914 if (runtime == RUNTIME_INF)
915 return 0;
916
917 if (rt_rq->rt_time > runtime) {
918 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
919
920
921
922
923
924 if (likely(rt_b->rt_runtime)) {
925 rt_rq->rt_throttled = 1;
926 printk_deferred_once("sched: RT throttling activated\n");
927 } else {
928
929
930
931
932
933 rt_rq->rt_time = 0;
934 }
935
936 if (rt_rq_throttled(rt_rq)) {
937 sched_rt_rq_dequeue(rt_rq);
938 return 1;
939 }
940 }
941
942 return 0;
943}
944
945
946
947
948
949static void update_curr_rt(struct rq *rq)
950{
951 struct task_struct *curr = rq->curr;
952 struct sched_rt_entity *rt_se = &curr->rt;
953 u64 delta_exec;
954
955 if (curr->sched_class != &rt_sched_class)
956 return;
957
958 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
959 if (unlikely((s64)delta_exec <= 0))
960 return;
961
962 schedstat_set(curr->se.statistics.exec_max,
963 max(curr->se.statistics.exec_max, delta_exec));
964
965 curr->se.sum_exec_runtime += delta_exec;
966 account_group_exec_runtime(curr, delta_exec);
967
968 curr->se.exec_start = rq_clock_task(rq);
969 cpuacct_charge(curr, delta_exec);
970
971 sched_rt_avg_update(rq, delta_exec);
972
973 if (!rt_bandwidth_enabled())
974 return;
975
976 for_each_sched_rt_entity(rt_se) {
977 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
978
979 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
980 raw_spin_lock(&rt_rq->rt_runtime_lock);
981 rt_rq->rt_time += delta_exec;
982 if (sched_rt_runtime_exceeded(rt_rq))
983 resched_curr(rq);
984 raw_spin_unlock(&rt_rq->rt_runtime_lock);
985 }
986 }
987}
988
989static void
990dequeue_top_rt_rq(struct rt_rq *rt_rq)
991{
992 struct rq *rq = rq_of_rt_rq(rt_rq);
993
994 BUG_ON(&rq->rt != rt_rq);
995
996 if (!rt_rq->rt_queued)
997 return;
998
999 BUG_ON(!rq->nr_running);
1000
1001 sub_nr_running(rq, rt_rq->rt_nr_running);
1002 rt_rq->rt_queued = 0;
1003}
1004
1005static void
1006enqueue_top_rt_rq(struct rt_rq *rt_rq)
1007{
1008 struct rq *rq = rq_of_rt_rq(rt_rq);
1009
1010 BUG_ON(&rq->rt != rt_rq);
1011
1012 if (rt_rq->rt_queued)
1013 return;
1014 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1015 return;
1016
1017 add_nr_running(rq, rt_rq->rt_nr_running);
1018 rt_rq->rt_queued = 1;
1019}
1020
1021#if defined CONFIG_SMP
1022
1023static void
1024inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1025{
1026 struct rq *rq = rq_of_rt_rq(rt_rq);
1027
1028#ifdef CONFIG_RT_GROUP_SCHED
1029
1030
1031
1032 if (&rq->rt != rt_rq)
1033 return;
1034#endif
1035 if (rq->online && prio < prev_prio)
1036 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1037}
1038
1039static void
1040dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1041{
1042 struct rq *rq = rq_of_rt_rq(rt_rq);
1043
1044#ifdef CONFIG_RT_GROUP_SCHED
1045
1046
1047
1048 if (&rq->rt != rt_rq)
1049 return;
1050#endif
1051 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1052 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1053}
1054
1055#else
1056
1057static inline
1058void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1059static inline
1060void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1061
1062#endif
1063
1064#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1065static void
1066inc_rt_prio(struct rt_rq *rt_rq, int prio)
1067{
1068 int prev_prio = rt_rq->highest_prio.curr;
1069
1070 if (prio < prev_prio)
1071 rt_rq->highest_prio.curr = prio;
1072
1073 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1074}
1075
1076static void
1077dec_rt_prio(struct rt_rq *rt_rq, int prio)
1078{
1079 int prev_prio = rt_rq->highest_prio.curr;
1080
1081 if (rt_rq->rt_nr_running) {
1082
1083 WARN_ON(prio < prev_prio);
1084
1085
1086
1087
1088
1089 if (prio == prev_prio) {
1090 struct rt_prio_array *array = &rt_rq->active;
1091
1092 rt_rq->highest_prio.curr =
1093 sched_find_first_bit(array->bitmap);
1094 }
1095
1096 } else
1097 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1098
1099 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1100}
1101
1102#else
1103
1104static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1105static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1106
1107#endif
1108
1109#ifdef CONFIG_RT_GROUP_SCHED
1110
1111static void
1112inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1113{
1114 if (rt_se_boosted(rt_se))
1115 rt_rq->rt_nr_boosted++;
1116
1117 if (rt_rq->tg)
1118 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1119}
1120
1121static void
1122dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1123{
1124 if (rt_se_boosted(rt_se))
1125 rt_rq->rt_nr_boosted--;
1126
1127 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1128}
1129
1130#else
1131
1132static void
1133inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1134{
1135 start_rt_bandwidth(&def_rt_bandwidth);
1136}
1137
1138static inline
1139void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1140
1141#endif
1142
1143static inline
1144unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1145{
1146 struct rt_rq *group_rq = group_rt_rq(rt_se);
1147
1148 if (group_rq)
1149 return group_rq->rt_nr_running;
1150 else
1151 return 1;
1152}
1153
1154static inline
1155void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1156{
1157 int prio = rt_se_prio(rt_se);
1158
1159 WARN_ON(!rt_prio(prio));
1160 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1161
1162 inc_rt_prio(rt_rq, prio);
1163 inc_rt_migration(rt_se, rt_rq);
1164 inc_rt_group(rt_se, rt_rq);
1165}
1166
1167static inline
1168void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1169{
1170 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1171 WARN_ON(!rt_rq->rt_nr_running);
1172 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1173
1174 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1175 dec_rt_migration(rt_se, rt_rq);
1176 dec_rt_group(rt_se, rt_rq);
1177}
1178
1179static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1180{
1181 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1182 struct rt_prio_array *array = &rt_rq->active;
1183 struct rt_rq *group_rq = group_rt_rq(rt_se);
1184 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1185
1186
1187
1188
1189
1190
1191
1192 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1193 return;
1194
1195 if (head)
1196 list_add(&rt_se->run_list, queue);
1197 else
1198 list_add_tail(&rt_se->run_list, queue);
1199 __set_bit(rt_se_prio(rt_se), array->bitmap);
1200
1201 inc_rt_tasks(rt_se, rt_rq);
1202}
1203
1204static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1205{
1206 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1207 struct rt_prio_array *array = &rt_rq->active;
1208
1209 list_del_init(&rt_se->run_list);
1210 if (list_empty(array->queue + rt_se_prio(rt_se)))
1211 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1212
1213 dec_rt_tasks(rt_se, rt_rq);
1214}
1215
1216
1217
1218
1219
1220static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1221{
1222 struct sched_rt_entity *back = NULL;
1223
1224 for_each_sched_rt_entity(rt_se) {
1225 rt_se->back = back;
1226 back = rt_se;
1227 }
1228
1229 dequeue_top_rt_rq(rt_rq_of_se(back));
1230
1231 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1232 if (on_rt_rq(rt_se))
1233 __dequeue_rt_entity(rt_se);
1234 }
1235}
1236
1237static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1238{
1239 struct rq *rq = rq_of_rt_se(rt_se);
1240
1241 dequeue_rt_stack(rt_se);
1242 for_each_sched_rt_entity(rt_se)
1243 __enqueue_rt_entity(rt_se, head);
1244 enqueue_top_rt_rq(&rq->rt);
1245}
1246
1247static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1248{
1249 struct rq *rq = rq_of_rt_se(rt_se);
1250
1251 dequeue_rt_stack(rt_se);
1252
1253 for_each_sched_rt_entity(rt_se) {
1254 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1255
1256 if (rt_rq && rt_rq->rt_nr_running)
1257 __enqueue_rt_entity(rt_se, false);
1258 }
1259 enqueue_top_rt_rq(&rq->rt);
1260}
1261
1262
1263
1264
1265static void
1266enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1267{
1268 struct sched_rt_entity *rt_se = &p->rt;
1269
1270 if (flags & ENQUEUE_WAKEUP)
1271 rt_se->timeout = 0;
1272
1273 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1274
1275 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1276 enqueue_pushable_task(rq, p);
1277}
1278
1279static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1280{
1281 struct sched_rt_entity *rt_se = &p->rt;
1282
1283 update_curr_rt(rq);
1284 dequeue_rt_entity(rt_se);
1285
1286 dequeue_pushable_task(rq, p);
1287}
1288
1289
1290
1291
1292
1293static void
1294requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1295{
1296 if (on_rt_rq(rt_se)) {
1297 struct rt_prio_array *array = &rt_rq->active;
1298 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1299
1300 if (head)
1301 list_move(&rt_se->run_list, queue);
1302 else
1303 list_move_tail(&rt_se->run_list, queue);
1304 }
1305}
1306
1307static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1308{
1309 struct sched_rt_entity *rt_se = &p->rt;
1310 struct rt_rq *rt_rq;
1311
1312 for_each_sched_rt_entity(rt_se) {
1313 rt_rq = rt_rq_of_se(rt_se);
1314 requeue_rt_entity(rt_rq, rt_se, head);
1315 }
1316}
1317
1318static void yield_task_rt(struct rq *rq)
1319{
1320 requeue_task_rt(rq, rq->curr, 0);
1321}
1322
1323#ifdef CONFIG_SMP
1324static int find_lowest_rq(struct task_struct *task);
1325
1326static int
1327select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1328{
1329 struct task_struct *curr;
1330 struct rq *rq;
1331
1332
1333 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1334 goto out;
1335
1336 rq = cpu_rq(cpu);
1337
1338 rcu_read_lock();
1339 curr = READ_ONCE(rq->curr);
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363 if (curr && unlikely(rt_task(curr)) &&
1364 (curr->nr_cpus_allowed < 2 ||
1365 curr->prio <= p->prio)) {
1366 int target = find_lowest_rq(p);
1367
1368
1369
1370
1371
1372 if (target != -1 &&
1373 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1374 cpu = target;
1375 }
1376 rcu_read_unlock();
1377
1378out:
1379 return cpu;
1380}
1381
1382static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1383{
1384
1385
1386
1387
1388 if (rq->curr->nr_cpus_allowed == 1 ||
1389 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1390 return;
1391
1392
1393
1394
1395
1396 if (p->nr_cpus_allowed != 1
1397 && cpupri_find(&rq->rd->cpupri, p, NULL))
1398 return;
1399
1400
1401
1402
1403
1404
1405 requeue_task_rt(rq, p, 1);
1406 resched_curr(rq);
1407}
1408
1409#endif
1410
1411
1412
1413
1414static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1415{
1416 if (p->prio < rq->curr->prio) {
1417 resched_curr(rq);
1418 return;
1419 }
1420
1421#ifdef CONFIG_SMP
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1435 check_preempt_equal_prio(rq, p);
1436#endif
1437}
1438
1439static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1440 struct rt_rq *rt_rq)
1441{
1442 struct rt_prio_array *array = &rt_rq->active;
1443 struct sched_rt_entity *next = NULL;
1444 struct list_head *queue;
1445 int idx;
1446
1447 idx = sched_find_first_bit(array->bitmap);
1448 BUG_ON(idx >= MAX_RT_PRIO);
1449
1450 queue = array->queue + idx;
1451 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1452
1453 return next;
1454}
1455
1456static struct task_struct *_pick_next_task_rt(struct rq *rq)
1457{
1458 struct sched_rt_entity *rt_se;
1459 struct task_struct *p;
1460 struct rt_rq *rt_rq = &rq->rt;
1461
1462 do {
1463 rt_se = pick_next_rt_entity(rq, rt_rq);
1464 BUG_ON(!rt_se);
1465 rt_rq = group_rt_rq(rt_se);
1466 } while (rt_rq);
1467
1468 p = rt_task_of(rt_se);
1469 p->se.exec_start = rq_clock_task(rq);
1470
1471 return p;
1472}
1473
1474static struct task_struct *
1475pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1476{
1477 struct task_struct *p;
1478 struct rt_rq *rt_rq = &rq->rt;
1479
1480 if (need_pull_rt_task(rq, prev)) {
1481
1482
1483
1484
1485
1486
1487 lockdep_unpin_lock(&rq->lock);
1488 pull_rt_task(rq);
1489 lockdep_pin_lock(&rq->lock);
1490
1491
1492
1493
1494
1495 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1496 rq->dl.dl_nr_running))
1497 return RETRY_TASK;
1498 }
1499
1500
1501
1502
1503
1504 if (prev->sched_class == &rt_sched_class)
1505 update_curr_rt(rq);
1506
1507 if (!rt_rq->rt_queued)
1508 return NULL;
1509
1510 put_prev_task(rq, prev);
1511
1512 p = _pick_next_task_rt(rq);
1513
1514
1515 dequeue_pushable_task(rq, p);
1516
1517 queue_push_tasks(rq);
1518
1519 return p;
1520}
1521
1522static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1523{
1524 update_curr_rt(rq);
1525
1526
1527
1528
1529
1530 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1531 enqueue_pushable_task(rq, p);
1532}
1533
1534#ifdef CONFIG_SMP
1535
1536
1537#define RT_MAX_TRIES 3
1538
1539static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1540{
1541 if (!task_running(rq, p) &&
1542 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1543 return 1;
1544 return 0;
1545}
1546
1547
1548
1549
1550
1551static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1552{
1553 struct plist_head *head = &rq->rt.pushable_tasks;
1554 struct task_struct *p;
1555
1556 if (!has_pushable_tasks(rq))
1557 return NULL;
1558
1559 plist_for_each_entry(p, head, pushable_tasks) {
1560 if (pick_rt_task(rq, p, cpu))
1561 return p;
1562 }
1563
1564 return NULL;
1565}
1566
1567static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1568
1569static int find_lowest_rq(struct task_struct *task)
1570{
1571 struct sched_domain *sd;
1572 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1573 int this_cpu = smp_processor_id();
1574 int cpu = task_cpu(task);
1575
1576
1577 if (unlikely(!lowest_mask))
1578 return -1;
1579
1580 if (task->nr_cpus_allowed == 1)
1581 return -1;
1582
1583 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1584 return -1;
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594 if (cpumask_test_cpu(cpu, lowest_mask))
1595 return cpu;
1596
1597
1598
1599
1600
1601 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1602 this_cpu = -1;
1603
1604 rcu_read_lock();
1605 for_each_domain(cpu, sd) {
1606 if (sd->flags & SD_WAKE_AFFINE) {
1607 int best_cpu;
1608
1609
1610
1611
1612
1613 if (this_cpu != -1 &&
1614 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1615 rcu_read_unlock();
1616 return this_cpu;
1617 }
1618
1619 best_cpu = cpumask_first_and(lowest_mask,
1620 sched_domain_span(sd));
1621 if (best_cpu < nr_cpu_ids) {
1622 rcu_read_unlock();
1623 return best_cpu;
1624 }
1625 }
1626 }
1627 rcu_read_unlock();
1628
1629
1630
1631
1632
1633
1634 if (this_cpu != -1)
1635 return this_cpu;
1636
1637 cpu = cpumask_any(lowest_mask);
1638 if (cpu < nr_cpu_ids)
1639 return cpu;
1640 return -1;
1641}
1642
1643
1644static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1645{
1646 struct rq *lowest_rq = NULL;
1647 int tries;
1648 int cpu;
1649
1650 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1651 cpu = find_lowest_rq(task);
1652
1653 if ((cpu == -1) || (cpu == rq->cpu))
1654 break;
1655
1656 lowest_rq = cpu_rq(cpu);
1657
1658 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1659
1660
1661
1662
1663
1664 lowest_rq = NULL;
1665 break;
1666 }
1667
1668
1669 if (double_lock_balance(rq, lowest_rq)) {
1670
1671
1672
1673
1674
1675
1676 if (unlikely(task_rq(task) != rq ||
1677 !cpumask_test_cpu(lowest_rq->cpu,
1678 tsk_cpus_allowed(task)) ||
1679 task_running(rq, task) ||
1680 !task_on_rq_queued(task))) {
1681
1682 double_unlock_balance(rq, lowest_rq);
1683 lowest_rq = NULL;
1684 break;
1685 }
1686 }
1687
1688
1689 if (lowest_rq->rt.highest_prio.curr > task->prio)
1690 break;
1691
1692
1693 double_unlock_balance(rq, lowest_rq);
1694 lowest_rq = NULL;
1695 }
1696
1697 return lowest_rq;
1698}
1699
1700static struct task_struct *pick_next_pushable_task(struct rq *rq)
1701{
1702 struct task_struct *p;
1703
1704 if (!has_pushable_tasks(rq))
1705 return NULL;
1706
1707 p = plist_first_entry(&rq->rt.pushable_tasks,
1708 struct task_struct, pushable_tasks);
1709
1710 BUG_ON(rq->cpu != task_cpu(p));
1711 BUG_ON(task_current(rq, p));
1712 BUG_ON(p->nr_cpus_allowed <= 1);
1713
1714 BUG_ON(!task_on_rq_queued(p));
1715 BUG_ON(!rt_task(p));
1716
1717 return p;
1718}
1719
1720
1721
1722
1723
1724
1725static int push_rt_task(struct rq *rq)
1726{
1727 struct task_struct *next_task;
1728 struct rq *lowest_rq;
1729 int ret = 0;
1730
1731 if (!rq->rt.overloaded)
1732 return 0;
1733
1734 next_task = pick_next_pushable_task(rq);
1735 if (!next_task)
1736 return 0;
1737
1738retry:
1739 if (unlikely(next_task == rq->curr)) {
1740 WARN_ON(1);
1741 return 0;
1742 }
1743
1744
1745
1746
1747
1748
1749 if (unlikely(next_task->prio < rq->curr->prio)) {
1750 resched_curr(rq);
1751 return 0;
1752 }
1753
1754
1755 get_task_struct(next_task);
1756
1757
1758 lowest_rq = find_lock_lowest_rq(next_task, rq);
1759 if (!lowest_rq) {
1760 struct task_struct *task;
1761
1762
1763
1764
1765
1766
1767
1768
1769 task = pick_next_pushable_task(rq);
1770 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1771
1772
1773
1774
1775
1776
1777 goto out;
1778 }
1779
1780 if (!task)
1781
1782 goto out;
1783
1784
1785
1786
1787 put_task_struct(next_task);
1788 next_task = task;
1789 goto retry;
1790 }
1791
1792 deactivate_task(rq, next_task, 0);
1793 set_task_cpu(next_task, lowest_rq->cpu);
1794 activate_task(lowest_rq, next_task, 0);
1795 ret = 1;
1796
1797 resched_curr(lowest_rq);
1798
1799 double_unlock_balance(rq, lowest_rq);
1800
1801out:
1802 put_task_struct(next_task);
1803
1804 return ret;
1805}
1806
1807static void push_rt_tasks(struct rq *rq)
1808{
1809
1810 while (push_rt_task(rq))
1811 ;
1812}
1813
1814#ifdef HAVE_RT_PUSH_IPI
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static int rto_next_cpu(struct rq *rq)
1825{
1826 int prev_cpu = rq->rt.push_cpu;
1827 int cpu;
1828
1829 cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1830
1831
1832
1833
1834
1835
1836 if (prev_cpu < rq->cpu) {
1837 if (cpu >= rq->cpu)
1838 return nr_cpu_ids;
1839
1840 } else if (cpu >= nr_cpu_ids) {
1841
1842
1843
1844
1845
1846 cpu = cpumask_first(rq->rd->rto_mask);
1847 if (cpu >= rq->cpu)
1848 return nr_cpu_ids;
1849 }
1850 rq->rt.push_cpu = cpu;
1851
1852
1853 return cpu;
1854}
1855
1856static int find_next_push_cpu(struct rq *rq)
1857{
1858 struct rq *next_rq;
1859 int cpu;
1860
1861 while (1) {
1862 cpu = rto_next_cpu(rq);
1863 if (cpu >= nr_cpu_ids)
1864 break;
1865 next_rq = cpu_rq(cpu);
1866
1867
1868 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1869 break;
1870 }
1871
1872 return cpu;
1873}
1874
1875#define RT_PUSH_IPI_EXECUTING 1
1876#define RT_PUSH_IPI_RESTART 2
1877
1878static void tell_cpu_to_push(struct rq *rq)
1879{
1880 int cpu;
1881
1882 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1883 raw_spin_lock(&rq->rt.push_lock);
1884
1885 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1886
1887
1888
1889
1890 rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1891 raw_spin_unlock(&rq->rt.push_lock);
1892 return;
1893 }
1894 raw_spin_unlock(&rq->rt.push_lock);
1895 }
1896
1897
1898
1899 rq->rt.push_cpu = rq->cpu;
1900 cpu = find_next_push_cpu(rq);
1901 if (cpu >= nr_cpu_ids)
1902 return;
1903
1904 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1905
1906 irq_work_queue_on(&rq->rt.push_work, cpu);
1907}
1908
1909
1910static void try_to_push_tasks(void *arg)
1911{
1912 struct rt_rq *rt_rq = arg;
1913 struct rq *rq, *src_rq;
1914 int this_cpu;
1915 int cpu;
1916
1917 this_cpu = rt_rq->push_cpu;
1918
1919
1920 BUG_ON(this_cpu != smp_processor_id());
1921
1922 rq = cpu_rq(this_cpu);
1923 src_rq = rq_of_rt_rq(rt_rq);
1924
1925again:
1926 if (has_pushable_tasks(rq)) {
1927 raw_spin_lock(&rq->lock);
1928 push_rt_task(rq);
1929 raw_spin_unlock(&rq->lock);
1930 }
1931
1932
1933 raw_spin_lock(&rt_rq->push_lock);
1934
1935
1936
1937
1938 if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1939 rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1940 rt_rq->push_cpu = src_rq->cpu;
1941 }
1942
1943 cpu = find_next_push_cpu(src_rq);
1944
1945 if (cpu >= nr_cpu_ids)
1946 rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1947 raw_spin_unlock(&rt_rq->push_lock);
1948
1949 if (cpu >= nr_cpu_ids)
1950 return;
1951
1952
1953
1954
1955
1956
1957 if (unlikely(cpu == rq->cpu))
1958 goto again;
1959
1960
1961 irq_work_queue_on(&rt_rq->push_work, cpu);
1962}
1963
1964static void push_irq_work_func(struct irq_work *work)
1965{
1966 struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
1967
1968 try_to_push_tasks(rt_rq);
1969}
1970#endif
1971
1972static void pull_rt_task(struct rq *this_rq)
1973{
1974 int this_cpu = this_rq->cpu, cpu;
1975 bool resched = false;
1976 struct task_struct *p;
1977 struct rq *src_rq;
1978
1979 if (likely(!rt_overloaded(this_rq)))
1980 return;
1981
1982
1983
1984
1985
1986 smp_rmb();
1987
1988#ifdef HAVE_RT_PUSH_IPI
1989 if (sched_feat(RT_PUSH_IPI)) {
1990 tell_cpu_to_push(this_rq);
1991 return;
1992 }
1993#endif
1994
1995 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1996 if (this_cpu == cpu)
1997 continue;
1998
1999 src_rq = cpu_rq(cpu);
2000
2001
2002
2003
2004
2005
2006
2007
2008 if (src_rq->rt.highest_prio.next >=
2009 this_rq->rt.highest_prio.curr)
2010 continue;
2011
2012
2013
2014
2015
2016
2017 double_lock_balance(this_rq, src_rq);
2018
2019
2020
2021
2022
2023 p = pick_highest_pushable_task(src_rq, this_cpu);
2024
2025
2026
2027
2028
2029 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2030 WARN_ON(p == src_rq->curr);
2031 WARN_ON(!task_on_rq_queued(p));
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041 if (p->prio < src_rq->curr->prio)
2042 goto skip;
2043
2044 resched = true;
2045
2046 deactivate_task(src_rq, p, 0);
2047 set_task_cpu(p, this_cpu);
2048 activate_task(this_rq, p, 0);
2049
2050
2051
2052
2053
2054
2055 }
2056skip:
2057 double_unlock_balance(this_rq, src_rq);
2058 }
2059
2060 if (resched)
2061 resched_curr(this_rq);
2062}
2063
2064
2065
2066
2067
2068static void task_woken_rt(struct rq *rq, struct task_struct *p)
2069{
2070 if (!task_running(rq, p) &&
2071 !test_tsk_need_resched(rq->curr) &&
2072 p->nr_cpus_allowed > 1 &&
2073 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2074 (rq->curr->nr_cpus_allowed < 2 ||
2075 rq->curr->prio <= p->prio))
2076 push_rt_tasks(rq);
2077}
2078
2079
2080static void rq_online_rt(struct rq *rq)
2081{
2082 if (rq->rt.overloaded)
2083 rt_set_overload(rq);
2084
2085 __enable_runtime(rq);
2086
2087 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2088}
2089
2090
2091static void rq_offline_rt(struct rq *rq)
2092{
2093 if (rq->rt.overloaded)
2094 rt_clear_overload(rq);
2095
2096 __disable_runtime(rq);
2097
2098 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2099}
2100
2101
2102
2103
2104
2105static void switched_from_rt(struct rq *rq, struct task_struct *p)
2106{
2107
2108
2109
2110
2111
2112
2113
2114 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2115 return;
2116
2117 queue_pull_task(rq);
2118}
2119
2120void __init init_sched_rt_class(void)
2121{
2122 unsigned int i;
2123
2124 for_each_possible_cpu(i) {
2125 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2126 GFP_KERNEL, cpu_to_node(i));
2127 }
2128}
2129#endif
2130
2131
2132
2133
2134
2135
2136static void switched_to_rt(struct rq *rq, struct task_struct *p)
2137{
2138
2139
2140
2141
2142
2143
2144
2145 if (task_on_rq_queued(p) && rq->curr != p) {
2146#ifdef CONFIG_SMP
2147 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2148 queue_push_tasks(rq);
2149#else
2150 if (p->prio < rq->curr->prio)
2151 resched_curr(rq);
2152#endif
2153 }
2154}
2155
2156
2157
2158
2159
2160static void
2161prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2162{
2163 if (!task_on_rq_queued(p))
2164 return;
2165
2166 if (rq->curr == p) {
2167#ifdef CONFIG_SMP
2168
2169
2170
2171
2172 if (oldprio < p->prio)
2173 queue_pull_task(rq);
2174
2175
2176
2177
2178
2179 if (p->prio > rq->rt.highest_prio.curr)
2180 resched_curr(rq);
2181#else
2182
2183 if (oldprio < p->prio)
2184 resched_curr(rq);
2185#endif
2186 } else {
2187
2188
2189
2190
2191
2192 if (p->prio < rq->curr->prio)
2193 resched_curr(rq);
2194 }
2195}
2196
2197static void watchdog(struct rq *rq, struct task_struct *p)
2198{
2199 unsigned long soft, hard;
2200
2201
2202 soft = task_rlimit(p, RLIMIT_RTTIME);
2203 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2204
2205 if (soft != RLIM_INFINITY) {
2206 unsigned long next;
2207
2208 if (p->rt.watchdog_stamp != jiffies) {
2209 p->rt.timeout++;
2210 p->rt.watchdog_stamp = jiffies;
2211 }
2212
2213 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2214 if (p->rt.timeout > next)
2215 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2216 }
2217}
2218
2219static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2220{
2221 struct sched_rt_entity *rt_se = &p->rt;
2222
2223 update_curr_rt(rq);
2224
2225 watchdog(rq, p);
2226
2227
2228
2229
2230
2231 if (p->policy != SCHED_RR)
2232 return;
2233
2234 if (--p->rt.time_slice)
2235 return;
2236
2237 p->rt.time_slice = sched_rr_timeslice;
2238
2239
2240
2241
2242
2243 for_each_sched_rt_entity(rt_se) {
2244 if (rt_se->run_list.prev != rt_se->run_list.next) {
2245 requeue_task_rt(rq, p, 0);
2246 resched_curr(rq);
2247 return;
2248 }
2249 }
2250}
2251
2252static void set_curr_task_rt(struct rq *rq)
2253{
2254 struct task_struct *p = rq->curr;
2255
2256 p->se.exec_start = rq_clock_task(rq);
2257
2258
2259 dequeue_pushable_task(rq, p);
2260}
2261
2262static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2263{
2264
2265
2266
2267 if (task->policy == SCHED_RR)
2268 return sched_rr_timeslice;
2269 else
2270 return 0;
2271}
2272
2273const struct sched_class rt_sched_class = {
2274 .next = &fair_sched_class,
2275 .enqueue_task = enqueue_task_rt,
2276 .dequeue_task = dequeue_task_rt,
2277 .yield_task = yield_task_rt,
2278
2279 .check_preempt_curr = check_preempt_curr_rt,
2280
2281 .pick_next_task = pick_next_task_rt,
2282 .put_prev_task = put_prev_task_rt,
2283
2284#ifdef CONFIG_SMP
2285 .select_task_rq = select_task_rq_rt,
2286
2287 .set_cpus_allowed = set_cpus_allowed_common,
2288 .rq_online = rq_online_rt,
2289 .rq_offline = rq_offline_rt,
2290 .task_woken = task_woken_rt,
2291 .switched_from = switched_from_rt,
2292#endif
2293
2294 .set_curr_task = set_curr_task_rt,
2295 .task_tick = task_tick_rt,
2296
2297 .get_rr_interval = get_rr_interval_rt,
2298
2299 .prio_changed = prio_changed_rt,
2300 .switched_to = switched_to_rt,
2301
2302 .update_curr = update_curr_rt,
2303};
2304
2305#ifdef CONFIG_SCHED_DEBUG
2306extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2307
2308void print_rt_stats(struct seq_file *m, int cpu)
2309{
2310 rt_rq_iter_t iter;
2311 struct rt_rq *rt_rq;
2312
2313 rcu_read_lock();
2314 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2315 print_rt_rq(m, cpu, rt_rq);
2316 rcu_read_unlock();
2317}
2318#endif
2319