1
2
3
4
5
6#include "sched.h"
7
8#include <linux/slab.h>
9#include <linux/irq_work.h>
10
11int sched_rr_timeslice = RR_TIMESLICE;
12
13static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14
15struct rt_bandwidth def_rt_bandwidth;
16
17static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
18{
19 struct rt_bandwidth *rt_b =
20 container_of(timer, struct rt_bandwidth, rt_period_timer);
21 int idle = 0;
22 int overrun;
23
24 raw_spin_lock(&rt_b->rt_runtime_lock);
25 for (;;) {
26 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
27 if (!overrun)
28 break;
29
30 raw_spin_unlock(&rt_b->rt_runtime_lock);
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 raw_spin_lock(&rt_b->rt_runtime_lock);
33 }
34 if (idle)
35 rt_b->rt_period_active = 0;
36 raw_spin_unlock(&rt_b->rt_runtime_lock);
37
38 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
39}
40
41void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
42{
43 rt_b->rt_period = ns_to_ktime(period);
44 rt_b->rt_runtime = runtime;
45
46 raw_spin_lock_init(&rt_b->rt_runtime_lock);
47
48 hrtimer_init(&rt_b->rt_period_timer,
49 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
50 rt_b->rt_period_timer.function = sched_rt_period_timer;
51}
52
53static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
54{
55 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
56 return;
57
58 raw_spin_lock(&rt_b->rt_runtime_lock);
59 if (!rt_b->rt_period_active) {
60 rt_b->rt_period_active = 1;
61
62
63
64
65
66
67
68
69 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
70 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
71 }
72 raw_spin_unlock(&rt_b->rt_runtime_lock);
73}
74
75#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
76static void push_irq_work_func(struct irq_work *work);
77#endif
78
79void init_rt_rq(struct rt_rq *rt_rq)
80{
81 struct rt_prio_array *array;
82 int i;
83
84 array = &rt_rq->active;
85 for (i = 0; i < MAX_RT_PRIO; i++) {
86 INIT_LIST_HEAD(array->queue + i);
87 __clear_bit(i, array->bitmap);
88 }
89
90 __set_bit(MAX_RT_PRIO, array->bitmap);
91
92#if defined CONFIG_SMP
93 rt_rq->highest_prio.curr = MAX_RT_PRIO;
94 rt_rq->highest_prio.next = MAX_RT_PRIO;
95 rt_rq->rt_nr_migratory = 0;
96 rt_rq->overloaded = 0;
97 plist_head_init(&rt_rq->pushable_tasks);
98
99#ifdef HAVE_RT_PUSH_IPI
100 rt_rq->push_flags = 0;
101 rt_rq->push_cpu = nr_cpu_ids;
102 raw_spin_lock_init(&rt_rq->push_lock);
103 init_irq_work(&rt_rq->push_work, push_irq_work_func);
104#endif
105#endif
106
107 rt_rq->rt_queued = 0;
108
109 rt_rq->rt_time = 0;
110 rt_rq->rt_throttled = 0;
111 rt_rq->rt_runtime = 0;
112 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
113}
114
115#ifdef CONFIG_RT_GROUP_SCHED
116static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
117{
118 hrtimer_cancel(&rt_b->rt_period_timer);
119}
120
121#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
122
123static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
124{
125#ifdef CONFIG_SCHED_DEBUG
126 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
127#endif
128 return container_of(rt_se, struct task_struct, rt);
129}
130
131static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
132{
133 return rt_rq->rq;
134}
135
136static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
137{
138 return rt_se->rt_rq;
139}
140
141static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
142{
143 struct rt_rq *rt_rq = rt_se->rt_rq;
144
145 return rt_rq->rq;
146}
147
148void free_rt_sched_group(struct task_group *tg)
149{
150 int i;
151
152 if (tg->rt_se)
153 destroy_rt_bandwidth(&tg->rt_bandwidth);
154
155 for_each_possible_cpu(i) {
156 if (tg->rt_rq)
157 kfree(tg->rt_rq[i]);
158 if (tg->rt_se)
159 kfree(tg->rt_se[i]);
160 }
161
162 kfree(tg->rt_rq);
163 kfree(tg->rt_se);
164}
165
166void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
167 struct sched_rt_entity *rt_se, int cpu,
168 struct sched_rt_entity *parent)
169{
170 struct rq *rq = cpu_rq(cpu);
171
172 rt_rq->highest_prio.curr = MAX_RT_PRIO;
173 rt_rq->rt_nr_boosted = 0;
174 rt_rq->rq = rq;
175 rt_rq->tg = tg;
176
177 tg->rt_rq[cpu] = rt_rq;
178 tg->rt_se[cpu] = rt_se;
179
180 if (!rt_se)
181 return;
182
183 if (!parent)
184 rt_se->rt_rq = &rq->rt;
185 else
186 rt_se->rt_rq = parent->my_q;
187
188 rt_se->my_q = rt_rq;
189 rt_se->parent = parent;
190 INIT_LIST_HEAD(&rt_se->run_list);
191}
192
193int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
194{
195 struct rt_rq *rt_rq;
196 struct sched_rt_entity *rt_se;
197 int i;
198
199 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
200 if (!tg->rt_rq)
201 goto err;
202 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
203 if (!tg->rt_se)
204 goto err;
205
206 init_rt_bandwidth(&tg->rt_bandwidth,
207 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
208
209 for_each_possible_cpu(i) {
210 rt_rq = kzalloc_node(sizeof(struct rt_rq),
211 GFP_KERNEL, cpu_to_node(i));
212 if (!rt_rq)
213 goto err;
214
215 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
216 GFP_KERNEL, cpu_to_node(i));
217 if (!rt_se)
218 goto err_free_rq;
219
220 init_rt_rq(rt_rq);
221 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
222 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
223 }
224
225 return 1;
226
227err_free_rq:
228 kfree(rt_rq);
229err:
230 return 0;
231}
232
233#else
234
235#define rt_entity_is_task(rt_se) (1)
236
237static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
238{
239 return container_of(rt_se, struct task_struct, rt);
240}
241
242static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
243{
244 return container_of(rt_rq, struct rq, rt);
245}
246
247static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
248{
249 struct task_struct *p = rt_task_of(rt_se);
250
251 return task_rq(p);
252}
253
254static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
255{
256 struct rq *rq = rq_of_rt_se(rt_se);
257
258 return &rq->rt;
259}
260
261void free_rt_sched_group(struct task_group *tg) { }
262
263int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
264{
265 return 1;
266}
267#endif
268
269#ifdef CONFIG_SMP
270
271static void pull_rt_task(struct rq *this_rq);
272
273static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
274{
275
276 return rq->rt.highest_prio.curr > prev->prio;
277}
278
279static inline int rt_overloaded(struct rq *rq)
280{
281 return atomic_read(&rq->rd->rto_count);
282}
283
284static inline void rt_set_overload(struct rq *rq)
285{
286 if (!rq->online)
287 return;
288
289 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
290
291
292
293
294
295
296
297
298
299 smp_wmb();
300 atomic_inc(&rq->rd->rto_count);
301}
302
303static inline void rt_clear_overload(struct rq *rq)
304{
305 if (!rq->online)
306 return;
307
308
309 atomic_dec(&rq->rd->rto_count);
310 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
311}
312
313static void update_rt_migration(struct rt_rq *rt_rq)
314{
315 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
316 if (!rt_rq->overloaded) {
317 rt_set_overload(rq_of_rt_rq(rt_rq));
318 rt_rq->overloaded = 1;
319 }
320 } else if (rt_rq->overloaded) {
321 rt_clear_overload(rq_of_rt_rq(rt_rq));
322 rt_rq->overloaded = 0;
323 }
324}
325
326static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
327{
328 struct task_struct *p;
329
330 if (!rt_entity_is_task(rt_se))
331 return;
332
333 p = rt_task_of(rt_se);
334 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
335
336 rt_rq->rt_nr_total++;
337 if (tsk_nr_cpus_allowed(p) > 1)
338 rt_rq->rt_nr_migratory++;
339
340 update_rt_migration(rt_rq);
341}
342
343static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
344{
345 struct task_struct *p;
346
347 if (!rt_entity_is_task(rt_se))
348 return;
349
350 p = rt_task_of(rt_se);
351 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
352
353 rt_rq->rt_nr_total--;
354 if (tsk_nr_cpus_allowed(p) > 1)
355 rt_rq->rt_nr_migratory--;
356
357 update_rt_migration(rt_rq);
358}
359
360static inline int has_pushable_tasks(struct rq *rq)
361{
362 return !plist_head_empty(&rq->rt.pushable_tasks);
363}
364
365static DEFINE_PER_CPU(struct callback_head, rt_push_head);
366static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
367
368static void push_rt_tasks(struct rq *);
369static void pull_rt_task(struct rq *);
370
371static inline void queue_push_tasks(struct rq *rq)
372{
373 if (!has_pushable_tasks(rq))
374 return;
375
376 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
377}
378
379static inline void queue_pull_task(struct rq *rq)
380{
381 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
382}
383
384static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
385{
386 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
387 plist_node_init(&p->pushable_tasks, p->prio);
388 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
389
390
391 if (p->prio < rq->rt.highest_prio.next)
392 rq->rt.highest_prio.next = p->prio;
393}
394
395static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
396{
397 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
398
399
400 if (has_pushable_tasks(rq)) {
401 p = plist_first_entry(&rq->rt.pushable_tasks,
402 struct task_struct, pushable_tasks);
403 rq->rt.highest_prio.next = p->prio;
404 } else
405 rq->rt.highest_prio.next = MAX_RT_PRIO;
406}
407
408#else
409
410static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
411{
412}
413
414static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
415{
416}
417
418static inline
419void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
420{
421}
422
423static inline
424void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
425{
426}
427
428static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
429{
430 return false;
431}
432
433static inline void pull_rt_task(struct rq *this_rq)
434{
435}
436
437static inline void queue_push_tasks(struct rq *rq)
438{
439}
440#endif
441
442static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
443static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
444
445static inline int on_rt_rq(struct sched_rt_entity *rt_se)
446{
447 return rt_se->on_rq;
448}
449
450#ifdef CONFIG_RT_GROUP_SCHED
451
452static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
453{
454 if (!rt_rq->tg)
455 return RUNTIME_INF;
456
457 return rt_rq->rt_runtime;
458}
459
460static inline u64 sched_rt_period(struct rt_rq *rt_rq)
461{
462 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
463}
464
465typedef struct task_group *rt_rq_iter_t;
466
467static inline struct task_group *next_task_group(struct task_group *tg)
468{
469 do {
470 tg = list_entry_rcu(tg->list.next,
471 typeof(struct task_group), list);
472 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
473
474 if (&tg->list == &task_groups)
475 tg = NULL;
476
477 return tg;
478}
479
480#define for_each_rt_rq(rt_rq, iter, rq) \
481 for (iter = container_of(&task_groups, typeof(*iter), list); \
482 (iter = next_task_group(iter)) && \
483 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
484
485#define for_each_sched_rt_entity(rt_se) \
486 for (; rt_se; rt_se = rt_se->parent)
487
488static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
489{
490 return rt_se->my_q;
491}
492
493static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
494static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
495
496static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
497{
498 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
499 struct rq *rq = rq_of_rt_rq(rt_rq);
500 struct sched_rt_entity *rt_se;
501
502 int cpu = cpu_of(rq);
503
504 rt_se = rt_rq->tg->rt_se[cpu];
505
506 if (rt_rq->rt_nr_running) {
507 if (!rt_se)
508 enqueue_top_rt_rq(rt_rq);
509 else if (!on_rt_rq(rt_se))
510 enqueue_rt_entity(rt_se, 0);
511
512 if (rt_rq->highest_prio.curr < curr->prio)
513 resched_curr(rq);
514 }
515}
516
517static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
518{
519 struct sched_rt_entity *rt_se;
520 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
521
522 rt_se = rt_rq->tg->rt_se[cpu];
523
524 if (!rt_se)
525 dequeue_top_rt_rq(rt_rq);
526 else if (on_rt_rq(rt_se))
527 dequeue_rt_entity(rt_se, 0);
528}
529
530static inline int rt_rq_throttled(struct rt_rq *rt_rq)
531{
532 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
533}
534
535static int rt_se_boosted(struct sched_rt_entity *rt_se)
536{
537 struct rt_rq *rt_rq = group_rt_rq(rt_se);
538 struct task_struct *p;
539
540 if (rt_rq)
541 return !!rt_rq->rt_nr_boosted;
542
543 p = rt_task_of(rt_se);
544 return p->prio != p->normal_prio;
545}
546
547#ifdef CONFIG_SMP
548static inline const struct cpumask *sched_rt_period_mask(void)
549{
550 return this_rq()->rd->span;
551}
552#else
553static inline const struct cpumask *sched_rt_period_mask(void)
554{
555 return cpu_online_mask;
556}
557#endif
558
559static inline
560struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
561{
562 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
563}
564
565static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
566{
567 return &rt_rq->tg->rt_bandwidth;
568}
569
570#else
571
572static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
573{
574 return rt_rq->rt_runtime;
575}
576
577static inline u64 sched_rt_period(struct rt_rq *rt_rq)
578{
579 return ktime_to_ns(def_rt_bandwidth.rt_period);
580}
581
582typedef struct rt_rq *rt_rq_iter_t;
583
584#define for_each_rt_rq(rt_rq, iter, rq) \
585 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
586
587#define for_each_sched_rt_entity(rt_se) \
588 for (; rt_se; rt_se = NULL)
589
590static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
591{
592 return NULL;
593}
594
595static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
596{
597 struct rq *rq = rq_of_rt_rq(rt_rq);
598
599 if (!rt_rq->rt_nr_running)
600 return;
601
602 enqueue_top_rt_rq(rt_rq);
603 resched_curr(rq);
604}
605
606static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
607{
608 dequeue_top_rt_rq(rt_rq);
609}
610
611static inline int rt_rq_throttled(struct rt_rq *rt_rq)
612{
613 return rt_rq->rt_throttled;
614}
615
616static inline const struct cpumask *sched_rt_period_mask(void)
617{
618 return cpu_online_mask;
619}
620
621static inline
622struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
623{
624 return &cpu_rq(cpu)->rt;
625}
626
627static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
628{
629 return &def_rt_bandwidth;
630}
631
632#endif
633
634bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
635{
636 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
637
638 return (hrtimer_active(&rt_b->rt_period_timer) ||
639 rt_rq->rt_time < rt_b->rt_runtime);
640}
641
642#ifdef CONFIG_SMP
643
644
645
646static void do_balance_runtime(struct rt_rq *rt_rq)
647{
648 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
649 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
650 int i, weight;
651 u64 rt_period;
652
653 weight = cpumask_weight(rd->span);
654
655 raw_spin_lock(&rt_b->rt_runtime_lock);
656 rt_period = ktime_to_ns(rt_b->rt_period);
657 for_each_cpu(i, rd->span) {
658 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
659 s64 diff;
660
661 if (iter == rt_rq)
662 continue;
663
664 raw_spin_lock(&iter->rt_runtime_lock);
665
666
667
668
669
670 if (iter->rt_runtime == RUNTIME_INF)
671 goto next;
672
673
674
675
676
677 diff = iter->rt_runtime - iter->rt_time;
678 if (diff > 0) {
679 diff = div_u64((u64)diff, weight);
680 if (rt_rq->rt_runtime + diff > rt_period)
681 diff = rt_period - rt_rq->rt_runtime;
682 iter->rt_runtime -= diff;
683 rt_rq->rt_runtime += diff;
684 if (rt_rq->rt_runtime == rt_period) {
685 raw_spin_unlock(&iter->rt_runtime_lock);
686 break;
687 }
688 }
689next:
690 raw_spin_unlock(&iter->rt_runtime_lock);
691 }
692 raw_spin_unlock(&rt_b->rt_runtime_lock);
693}
694
695
696
697
698static void __disable_runtime(struct rq *rq)
699{
700 struct root_domain *rd = rq->rd;
701 rt_rq_iter_t iter;
702 struct rt_rq *rt_rq;
703
704 if (unlikely(!scheduler_running))
705 return;
706
707 for_each_rt_rq(rt_rq, iter, rq) {
708 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
709 s64 want;
710 int i;
711
712 raw_spin_lock(&rt_b->rt_runtime_lock);
713 raw_spin_lock(&rt_rq->rt_runtime_lock);
714
715
716
717
718
719 if (rt_rq->rt_runtime == RUNTIME_INF ||
720 rt_rq->rt_runtime == rt_b->rt_runtime)
721 goto balanced;
722 raw_spin_unlock(&rt_rq->rt_runtime_lock);
723
724
725
726
727
728
729 want = rt_b->rt_runtime - rt_rq->rt_runtime;
730
731
732
733
734 for_each_cpu(i, rd->span) {
735 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
736 s64 diff;
737
738
739
740
741 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
742 continue;
743
744 raw_spin_lock(&iter->rt_runtime_lock);
745 if (want > 0) {
746 diff = min_t(s64, iter->rt_runtime, want);
747 iter->rt_runtime -= diff;
748 want -= diff;
749 } else {
750 iter->rt_runtime -= want;
751 want -= want;
752 }
753 raw_spin_unlock(&iter->rt_runtime_lock);
754
755 if (!want)
756 break;
757 }
758
759 raw_spin_lock(&rt_rq->rt_runtime_lock);
760
761
762
763
764 BUG_ON(want);
765balanced:
766
767
768
769
770 rt_rq->rt_runtime = RUNTIME_INF;
771 rt_rq->rt_throttled = 0;
772 raw_spin_unlock(&rt_rq->rt_runtime_lock);
773 raw_spin_unlock(&rt_b->rt_runtime_lock);
774
775
776 sched_rt_rq_enqueue(rt_rq);
777 }
778}
779
780static void __enable_runtime(struct rq *rq)
781{
782 rt_rq_iter_t iter;
783 struct rt_rq *rt_rq;
784
785 if (unlikely(!scheduler_running))
786 return;
787
788
789
790
791 for_each_rt_rq(rt_rq, iter, rq) {
792 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
793
794 raw_spin_lock(&rt_b->rt_runtime_lock);
795 raw_spin_lock(&rt_rq->rt_runtime_lock);
796 rt_rq->rt_runtime = rt_b->rt_runtime;
797 rt_rq->rt_time = 0;
798 rt_rq->rt_throttled = 0;
799 raw_spin_unlock(&rt_rq->rt_runtime_lock);
800 raw_spin_unlock(&rt_b->rt_runtime_lock);
801 }
802}
803
804static void balance_runtime(struct rt_rq *rt_rq)
805{
806 if (!sched_feat(RT_RUNTIME_SHARE))
807 return;
808
809 if (rt_rq->rt_time > rt_rq->rt_runtime) {
810 raw_spin_unlock(&rt_rq->rt_runtime_lock);
811 do_balance_runtime(rt_rq);
812 raw_spin_lock(&rt_rq->rt_runtime_lock);
813 }
814}
815#else
816static inline void balance_runtime(struct rt_rq *rt_rq) {}
817#endif
818
819static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
820{
821 int i, idle = 1, throttled = 0;
822 const struct cpumask *span;
823
824 span = sched_rt_period_mask();
825#ifdef CONFIG_RT_GROUP_SCHED
826
827
828
829
830
831
832
833
834
835 if (rt_b == &root_task_group.rt_bandwidth)
836 span = cpu_online_mask;
837#endif
838 for_each_cpu(i, span) {
839 int enqueue = 0;
840 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
841 struct rq *rq = rq_of_rt_rq(rt_rq);
842
843 raw_spin_lock(&rq->lock);
844 if (rt_rq->rt_time) {
845 u64 runtime;
846
847 raw_spin_lock(&rt_rq->rt_runtime_lock);
848 if (rt_rq->rt_throttled)
849 balance_runtime(rt_rq);
850 runtime = rt_rq->rt_runtime;
851 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
852 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
853 rt_rq->rt_throttled = 0;
854 enqueue = 1;
855
856
857
858
859
860
861
862
863 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
864 rq_clock_skip_update(rq, false);
865 }
866 if (rt_rq->rt_time || rt_rq->rt_nr_running)
867 idle = 0;
868 raw_spin_unlock(&rt_rq->rt_runtime_lock);
869 } else if (rt_rq->rt_nr_running) {
870 idle = 0;
871 if (!rt_rq_throttled(rt_rq))
872 enqueue = 1;
873 }
874 if (rt_rq->rt_throttled)
875 throttled = 1;
876
877 if (enqueue)
878 sched_rt_rq_enqueue(rt_rq);
879 raw_spin_unlock(&rq->lock);
880 }
881
882 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
883 return 1;
884
885 return idle;
886}
887
888static inline int rt_se_prio(struct sched_rt_entity *rt_se)
889{
890#ifdef CONFIG_RT_GROUP_SCHED
891 struct rt_rq *rt_rq = group_rt_rq(rt_se);
892
893 if (rt_rq)
894 return rt_rq->highest_prio.curr;
895#endif
896
897 return rt_task_of(rt_se)->prio;
898}
899
900static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
901{
902 u64 runtime = sched_rt_runtime(rt_rq);
903
904 if (rt_rq->rt_throttled)
905 return rt_rq_throttled(rt_rq);
906
907 if (runtime >= sched_rt_period(rt_rq))
908 return 0;
909
910 balance_runtime(rt_rq);
911 runtime = sched_rt_runtime(rt_rq);
912 if (runtime == RUNTIME_INF)
913 return 0;
914
915 if (rt_rq->rt_time > runtime) {
916 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
917
918
919
920
921
922 if (likely(rt_b->rt_runtime)) {
923 rt_rq->rt_throttled = 1;
924 printk_deferred_once("sched: RT throttling activated\n");
925 } else {
926
927
928
929
930
931 rt_rq->rt_time = 0;
932 }
933
934 if (rt_rq_throttled(rt_rq)) {
935 sched_rt_rq_dequeue(rt_rq);
936 return 1;
937 }
938 }
939
940 return 0;
941}
942
943
944
945
946
947static void update_curr_rt(struct rq *rq)
948{
949 struct task_struct *curr = rq->curr;
950 struct sched_rt_entity *rt_se = &curr->rt;
951 u64 delta_exec;
952
953 if (curr->sched_class != &rt_sched_class)
954 return;
955
956 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
957 if (unlikely((s64)delta_exec <= 0))
958 return;
959
960
961 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
962
963 schedstat_set(curr->se.statistics.exec_max,
964 max(curr->se.statistics.exec_max, delta_exec));
965
966 curr->se.sum_exec_runtime += delta_exec;
967 account_group_exec_runtime(curr, delta_exec);
968
969 curr->se.exec_start = rq_clock_task(rq);
970 cpuacct_charge(curr, delta_exec);
971
972 sched_rt_avg_update(rq, delta_exec);
973
974 if (!rt_bandwidth_enabled())
975 return;
976
977 for_each_sched_rt_entity(rt_se) {
978 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
979
980 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
981 raw_spin_lock(&rt_rq->rt_runtime_lock);
982 rt_rq->rt_time += delta_exec;
983 if (sched_rt_runtime_exceeded(rt_rq))
984 resched_curr(rq);
985 raw_spin_unlock(&rt_rq->rt_runtime_lock);
986 }
987 }
988}
989
990static void
991dequeue_top_rt_rq(struct rt_rq *rt_rq)
992{
993 struct rq *rq = rq_of_rt_rq(rt_rq);
994
995 BUG_ON(&rq->rt != rt_rq);
996
997 if (!rt_rq->rt_queued)
998 return;
999
1000 BUG_ON(!rq->nr_running);
1001
1002 sub_nr_running(rq, rt_rq->rt_nr_running);
1003 rt_rq->rt_queued = 0;
1004}
1005
1006static void
1007enqueue_top_rt_rq(struct rt_rq *rt_rq)
1008{
1009 struct rq *rq = rq_of_rt_rq(rt_rq);
1010
1011 BUG_ON(&rq->rt != rt_rq);
1012
1013 if (rt_rq->rt_queued)
1014 return;
1015 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1016 return;
1017
1018 add_nr_running(rq, rt_rq->rt_nr_running);
1019 rt_rq->rt_queued = 1;
1020}
1021
1022#if defined CONFIG_SMP
1023
1024static void
1025inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1026{
1027 struct rq *rq = rq_of_rt_rq(rt_rq);
1028
1029#ifdef CONFIG_RT_GROUP_SCHED
1030
1031
1032
1033 if (&rq->rt != rt_rq)
1034 return;
1035#endif
1036 if (rq->online && prio < prev_prio)
1037 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1038}
1039
1040static void
1041dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1042{
1043 struct rq *rq = rq_of_rt_rq(rt_rq);
1044
1045#ifdef CONFIG_RT_GROUP_SCHED
1046
1047
1048
1049 if (&rq->rt != rt_rq)
1050 return;
1051#endif
1052 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1053 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1054}
1055
1056#else
1057
1058static inline
1059void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1060static inline
1061void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1062
1063#endif
1064
1065#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1066static void
1067inc_rt_prio(struct rt_rq *rt_rq, int prio)
1068{
1069 int prev_prio = rt_rq->highest_prio.curr;
1070
1071 if (prio < prev_prio)
1072 rt_rq->highest_prio.curr = prio;
1073
1074 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1075}
1076
1077static void
1078dec_rt_prio(struct rt_rq *rt_rq, int prio)
1079{
1080 int prev_prio = rt_rq->highest_prio.curr;
1081
1082 if (rt_rq->rt_nr_running) {
1083
1084 WARN_ON(prio < prev_prio);
1085
1086
1087
1088
1089
1090 if (prio == prev_prio) {
1091 struct rt_prio_array *array = &rt_rq->active;
1092
1093 rt_rq->highest_prio.curr =
1094 sched_find_first_bit(array->bitmap);
1095 }
1096
1097 } else
1098 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1099
1100 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1101}
1102
1103#else
1104
1105static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1106static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1107
1108#endif
1109
1110#ifdef CONFIG_RT_GROUP_SCHED
1111
1112static void
1113inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1114{
1115 if (rt_se_boosted(rt_se))
1116 rt_rq->rt_nr_boosted++;
1117
1118 if (rt_rq->tg)
1119 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1120}
1121
1122static void
1123dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1124{
1125 if (rt_se_boosted(rt_se))
1126 rt_rq->rt_nr_boosted--;
1127
1128 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1129}
1130
1131#else
1132
1133static void
1134inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1135{
1136 start_rt_bandwidth(&def_rt_bandwidth);
1137}
1138
1139static inline
1140void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1141
1142#endif
1143
1144static inline
1145unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1146{
1147 struct rt_rq *group_rq = group_rt_rq(rt_se);
1148
1149 if (group_rq)
1150 return group_rq->rt_nr_running;
1151 else
1152 return 1;
1153}
1154
1155static inline
1156unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1157{
1158 struct rt_rq *group_rq = group_rt_rq(rt_se);
1159 struct task_struct *tsk;
1160
1161 if (group_rq)
1162 return group_rq->rr_nr_running;
1163
1164 tsk = rt_task_of(rt_se);
1165
1166 return (tsk->policy == SCHED_RR) ? 1 : 0;
1167}
1168
1169static inline
1170void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1171{
1172 int prio = rt_se_prio(rt_se);
1173
1174 WARN_ON(!rt_prio(prio));
1175 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1176 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1177
1178 inc_rt_prio(rt_rq, prio);
1179 inc_rt_migration(rt_se, rt_rq);
1180 inc_rt_group(rt_se, rt_rq);
1181}
1182
1183static inline
1184void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1185{
1186 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1187 WARN_ON(!rt_rq->rt_nr_running);
1188 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1189 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1190
1191 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1192 dec_rt_migration(rt_se, rt_rq);
1193 dec_rt_group(rt_se, rt_rq);
1194}
1195
1196
1197
1198
1199
1200
1201static inline bool move_entity(unsigned int flags)
1202{
1203 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1204 return false;
1205
1206 return true;
1207}
1208
1209static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1210{
1211 list_del_init(&rt_se->run_list);
1212
1213 if (list_empty(array->queue + rt_se_prio(rt_se)))
1214 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1215
1216 rt_se->on_list = 0;
1217}
1218
1219static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1220{
1221 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1222 struct rt_prio_array *array = &rt_rq->active;
1223 struct rt_rq *group_rq = group_rt_rq(rt_se);
1224 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1225
1226
1227
1228
1229
1230
1231
1232 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1233 if (rt_se->on_list)
1234 __delist_rt_entity(rt_se, array);
1235 return;
1236 }
1237
1238 if (move_entity(flags)) {
1239 WARN_ON_ONCE(rt_se->on_list);
1240 if (flags & ENQUEUE_HEAD)
1241 list_add(&rt_se->run_list, queue);
1242 else
1243 list_add_tail(&rt_se->run_list, queue);
1244
1245 __set_bit(rt_se_prio(rt_se), array->bitmap);
1246 rt_se->on_list = 1;
1247 }
1248 rt_se->on_rq = 1;
1249
1250 inc_rt_tasks(rt_se, rt_rq);
1251}
1252
1253static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1254{
1255 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1256 struct rt_prio_array *array = &rt_rq->active;
1257
1258 if (move_entity(flags)) {
1259 WARN_ON_ONCE(!rt_se->on_list);
1260 __delist_rt_entity(rt_se, array);
1261 }
1262 rt_se->on_rq = 0;
1263
1264 dec_rt_tasks(rt_se, rt_rq);
1265}
1266
1267
1268
1269
1270
1271static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1272{
1273 struct sched_rt_entity *back = NULL;
1274
1275 for_each_sched_rt_entity(rt_se) {
1276 rt_se->back = back;
1277 back = rt_se;
1278 }
1279
1280 dequeue_top_rt_rq(rt_rq_of_se(back));
1281
1282 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1283 if (on_rt_rq(rt_se))
1284 __dequeue_rt_entity(rt_se, flags);
1285 }
1286}
1287
1288static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1289{
1290 struct rq *rq = rq_of_rt_se(rt_se);
1291
1292 dequeue_rt_stack(rt_se, flags);
1293 for_each_sched_rt_entity(rt_se)
1294 __enqueue_rt_entity(rt_se, flags);
1295 enqueue_top_rt_rq(&rq->rt);
1296}
1297
1298static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1299{
1300 struct rq *rq = rq_of_rt_se(rt_se);
1301
1302 dequeue_rt_stack(rt_se, flags);
1303
1304 for_each_sched_rt_entity(rt_se) {
1305 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1306
1307 if (rt_rq && rt_rq->rt_nr_running)
1308 __enqueue_rt_entity(rt_se, flags);
1309 }
1310 enqueue_top_rt_rq(&rq->rt);
1311}
1312
1313
1314
1315
1316static void
1317enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1318{
1319 struct sched_rt_entity *rt_se = &p->rt;
1320
1321 if (flags & ENQUEUE_WAKEUP)
1322 rt_se->timeout = 0;
1323
1324 enqueue_rt_entity(rt_se, flags);
1325
1326 if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
1327 enqueue_pushable_task(rq, p);
1328}
1329
1330static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1331{
1332 struct sched_rt_entity *rt_se = &p->rt;
1333
1334 update_curr_rt(rq);
1335 dequeue_rt_entity(rt_se, flags);
1336
1337 dequeue_pushable_task(rq, p);
1338}
1339
1340
1341
1342
1343
1344static void
1345requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1346{
1347 if (on_rt_rq(rt_se)) {
1348 struct rt_prio_array *array = &rt_rq->active;
1349 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1350
1351 if (head)
1352 list_move(&rt_se->run_list, queue);
1353 else
1354 list_move_tail(&rt_se->run_list, queue);
1355 }
1356}
1357
1358static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1359{
1360 struct sched_rt_entity *rt_se = &p->rt;
1361 struct rt_rq *rt_rq;
1362
1363 for_each_sched_rt_entity(rt_se) {
1364 rt_rq = rt_rq_of_se(rt_se);
1365 requeue_rt_entity(rt_rq, rt_se, head);
1366 }
1367}
1368
1369static void yield_task_rt(struct rq *rq)
1370{
1371 requeue_task_rt(rq, rq->curr, 0);
1372}
1373
1374#ifdef CONFIG_SMP
1375static int find_lowest_rq(struct task_struct *task);
1376
1377static int
1378select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1379{
1380 struct task_struct *curr;
1381 struct rq *rq;
1382
1383
1384 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1385 goto out;
1386
1387 rq = cpu_rq(cpu);
1388
1389 rcu_read_lock();
1390 curr = READ_ONCE(rq->curr);
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 if (curr && unlikely(rt_task(curr)) &&
1415 (tsk_nr_cpus_allowed(curr) < 2 ||
1416 curr->prio <= p->prio)) {
1417 int target = find_lowest_rq(p);
1418
1419
1420
1421
1422
1423 if (target != -1 &&
1424 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1425 cpu = target;
1426 }
1427 rcu_read_unlock();
1428
1429out:
1430 return cpu;
1431}
1432
1433static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1434{
1435
1436
1437
1438
1439 if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
1440 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1441 return;
1442
1443
1444
1445
1446
1447 if (tsk_nr_cpus_allowed(p) != 1
1448 && cpupri_find(&rq->rd->cpupri, p, NULL))
1449 return;
1450
1451
1452
1453
1454
1455
1456 requeue_task_rt(rq, p, 1);
1457 resched_curr(rq);
1458}
1459
1460#endif
1461
1462
1463
1464
1465static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1466{
1467 if (p->prio < rq->curr->prio) {
1468 resched_curr(rq);
1469 return;
1470 }
1471
1472#ifdef CONFIG_SMP
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1486 check_preempt_equal_prio(rq, p);
1487#endif
1488}
1489
1490static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1491 struct rt_rq *rt_rq)
1492{
1493 struct rt_prio_array *array = &rt_rq->active;
1494 struct sched_rt_entity *next = NULL;
1495 struct list_head *queue;
1496 int idx;
1497
1498 idx = sched_find_first_bit(array->bitmap);
1499 BUG_ON(idx >= MAX_RT_PRIO);
1500
1501 queue = array->queue + idx;
1502 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1503
1504 return next;
1505}
1506
1507static struct task_struct *_pick_next_task_rt(struct rq *rq)
1508{
1509 struct sched_rt_entity *rt_se;
1510 struct task_struct *p;
1511 struct rt_rq *rt_rq = &rq->rt;
1512
1513 do {
1514 rt_se = pick_next_rt_entity(rq, rt_rq);
1515 BUG_ON(!rt_se);
1516 rt_rq = group_rt_rq(rt_se);
1517 } while (rt_rq);
1518
1519 p = rt_task_of(rt_se);
1520 p->se.exec_start = rq_clock_task(rq);
1521
1522 return p;
1523}
1524
1525static struct task_struct *
1526pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1527{
1528 struct task_struct *p;
1529 struct rt_rq *rt_rq = &rq->rt;
1530
1531 if (need_pull_rt_task(rq, prev)) {
1532
1533
1534
1535
1536
1537
1538 lockdep_unpin_lock(&rq->lock, cookie);
1539 pull_rt_task(rq);
1540 lockdep_repin_lock(&rq->lock, cookie);
1541
1542
1543
1544
1545
1546 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1547 rq->dl.dl_nr_running))
1548 return RETRY_TASK;
1549 }
1550
1551
1552
1553
1554
1555 if (prev->sched_class == &rt_sched_class)
1556 update_curr_rt(rq);
1557
1558 if (!rt_rq->rt_queued)
1559 return NULL;
1560
1561 put_prev_task(rq, prev);
1562
1563 p = _pick_next_task_rt(rq);
1564
1565
1566 dequeue_pushable_task(rq, p);
1567
1568 queue_push_tasks(rq);
1569
1570 return p;
1571}
1572
1573static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1574{
1575 update_curr_rt(rq);
1576
1577
1578
1579
1580
1581 if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
1582 enqueue_pushable_task(rq, p);
1583}
1584
1585#ifdef CONFIG_SMP
1586
1587
1588#define RT_MAX_TRIES 3
1589
1590static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1591{
1592 if (!task_running(rq, p) &&
1593 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1594 return 1;
1595 return 0;
1596}
1597
1598
1599
1600
1601
1602static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1603{
1604 struct plist_head *head = &rq->rt.pushable_tasks;
1605 struct task_struct *p;
1606
1607 if (!has_pushable_tasks(rq))
1608 return NULL;
1609
1610 plist_for_each_entry(p, head, pushable_tasks) {
1611 if (pick_rt_task(rq, p, cpu))
1612 return p;
1613 }
1614
1615 return NULL;
1616}
1617
1618static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1619
1620static int find_lowest_rq(struct task_struct *task)
1621{
1622 struct sched_domain *sd;
1623 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1624 int this_cpu = smp_processor_id();
1625 int cpu = task_cpu(task);
1626
1627
1628 if (unlikely(!lowest_mask))
1629 return -1;
1630
1631 if (tsk_nr_cpus_allowed(task) == 1)
1632 return -1;
1633
1634 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1635 return -1;
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645 if (cpumask_test_cpu(cpu, lowest_mask))
1646 return cpu;
1647
1648
1649
1650
1651
1652 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1653 this_cpu = -1;
1654
1655 rcu_read_lock();
1656 for_each_domain(cpu, sd) {
1657 if (sd->flags & SD_WAKE_AFFINE) {
1658 int best_cpu;
1659
1660
1661
1662
1663
1664 if (this_cpu != -1 &&
1665 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1666 rcu_read_unlock();
1667 return this_cpu;
1668 }
1669
1670 best_cpu = cpumask_first_and(lowest_mask,
1671 sched_domain_span(sd));
1672 if (best_cpu < nr_cpu_ids) {
1673 rcu_read_unlock();
1674 return best_cpu;
1675 }
1676 }
1677 }
1678 rcu_read_unlock();
1679
1680
1681
1682
1683
1684
1685 if (this_cpu != -1)
1686 return this_cpu;
1687
1688 cpu = cpumask_any(lowest_mask);
1689 if (cpu < nr_cpu_ids)
1690 return cpu;
1691 return -1;
1692}
1693
1694
1695static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1696{
1697 struct rq *lowest_rq = NULL;
1698 int tries;
1699 int cpu;
1700
1701 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1702 cpu = find_lowest_rq(task);
1703
1704 if ((cpu == -1) || (cpu == rq->cpu))
1705 break;
1706
1707 lowest_rq = cpu_rq(cpu);
1708
1709 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1710
1711
1712
1713
1714
1715 lowest_rq = NULL;
1716 break;
1717 }
1718
1719
1720 if (double_lock_balance(rq, lowest_rq)) {
1721
1722
1723
1724
1725
1726
1727 if (unlikely(task_rq(task) != rq ||
1728 !cpumask_test_cpu(lowest_rq->cpu,
1729 tsk_cpus_allowed(task)) ||
1730 task_running(rq, task) ||
1731 !rt_task(task) ||
1732 !task_on_rq_queued(task))) {
1733
1734 double_unlock_balance(rq, lowest_rq);
1735 lowest_rq = NULL;
1736 break;
1737 }
1738 }
1739
1740
1741 if (lowest_rq->rt.highest_prio.curr > task->prio)
1742 break;
1743
1744
1745 double_unlock_balance(rq, lowest_rq);
1746 lowest_rq = NULL;
1747 }
1748
1749 return lowest_rq;
1750}
1751
1752static struct task_struct *pick_next_pushable_task(struct rq *rq)
1753{
1754 struct task_struct *p;
1755
1756 if (!has_pushable_tasks(rq))
1757 return NULL;
1758
1759 p = plist_first_entry(&rq->rt.pushable_tasks,
1760 struct task_struct, pushable_tasks);
1761
1762 BUG_ON(rq->cpu != task_cpu(p));
1763 BUG_ON(task_current(rq, p));
1764 BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
1765
1766 BUG_ON(!task_on_rq_queued(p));
1767 BUG_ON(!rt_task(p));
1768
1769 return p;
1770}
1771
1772
1773
1774
1775
1776
1777static int push_rt_task(struct rq *rq)
1778{
1779 struct task_struct *next_task;
1780 struct rq *lowest_rq;
1781 int ret = 0;
1782
1783 if (!rq->rt.overloaded)
1784 return 0;
1785
1786 next_task = pick_next_pushable_task(rq);
1787 if (!next_task)
1788 return 0;
1789
1790retry:
1791 if (unlikely(next_task == rq->curr)) {
1792 WARN_ON(1);
1793 return 0;
1794 }
1795
1796
1797
1798
1799
1800
1801 if (unlikely(next_task->prio < rq->curr->prio)) {
1802 resched_curr(rq);
1803 return 0;
1804 }
1805
1806
1807 get_task_struct(next_task);
1808
1809
1810 lowest_rq = find_lock_lowest_rq(next_task, rq);
1811 if (!lowest_rq) {
1812 struct task_struct *task;
1813
1814
1815
1816
1817
1818
1819
1820
1821 task = pick_next_pushable_task(rq);
1822 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1823
1824
1825
1826
1827
1828
1829 goto out;
1830 }
1831
1832 if (!task)
1833
1834 goto out;
1835
1836
1837
1838
1839 put_task_struct(next_task);
1840 next_task = task;
1841 goto retry;
1842 }
1843
1844 deactivate_task(rq, next_task, 0);
1845 set_task_cpu(next_task, lowest_rq->cpu);
1846 activate_task(lowest_rq, next_task, 0);
1847 ret = 1;
1848
1849 resched_curr(lowest_rq);
1850
1851 double_unlock_balance(rq, lowest_rq);
1852
1853out:
1854 put_task_struct(next_task);
1855
1856 return ret;
1857}
1858
1859static void push_rt_tasks(struct rq *rq)
1860{
1861
1862 while (push_rt_task(rq))
1863 ;
1864}
1865
1866#ifdef HAVE_RT_PUSH_IPI
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876static int rto_next_cpu(struct rq *rq)
1877{
1878 int prev_cpu = rq->rt.push_cpu;
1879 int cpu;
1880
1881 cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1882
1883
1884
1885
1886
1887
1888 if (prev_cpu < rq->cpu) {
1889 if (cpu >= rq->cpu)
1890 return nr_cpu_ids;
1891
1892 } else if (cpu >= nr_cpu_ids) {
1893
1894
1895
1896
1897
1898 cpu = cpumask_first(rq->rd->rto_mask);
1899 if (cpu >= rq->cpu)
1900 return nr_cpu_ids;
1901 }
1902 rq->rt.push_cpu = cpu;
1903
1904
1905 return cpu;
1906}
1907
1908static int find_next_push_cpu(struct rq *rq)
1909{
1910 struct rq *next_rq;
1911 int cpu;
1912
1913 while (1) {
1914 cpu = rto_next_cpu(rq);
1915 if (cpu >= nr_cpu_ids)
1916 break;
1917 next_rq = cpu_rq(cpu);
1918
1919
1920 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1921 break;
1922 }
1923
1924 return cpu;
1925}
1926
1927#define RT_PUSH_IPI_EXECUTING 1
1928#define RT_PUSH_IPI_RESTART 2
1929
1930static void tell_cpu_to_push(struct rq *rq)
1931{
1932 int cpu;
1933
1934 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1935 raw_spin_lock(&rq->rt.push_lock);
1936
1937 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1938
1939
1940
1941
1942 rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1943 raw_spin_unlock(&rq->rt.push_lock);
1944 return;
1945 }
1946 raw_spin_unlock(&rq->rt.push_lock);
1947 }
1948
1949
1950
1951 rq->rt.push_cpu = rq->cpu;
1952 cpu = find_next_push_cpu(rq);
1953 if (cpu >= nr_cpu_ids)
1954 return;
1955
1956 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1957
1958 irq_work_queue_on(&rq->rt.push_work, cpu);
1959}
1960
1961
1962static void try_to_push_tasks(void *arg)
1963{
1964 struct rt_rq *rt_rq = arg;
1965 struct rq *rq, *src_rq;
1966 int this_cpu;
1967 int cpu;
1968
1969 this_cpu = rt_rq->push_cpu;
1970
1971
1972 BUG_ON(this_cpu != smp_processor_id());
1973
1974 rq = cpu_rq(this_cpu);
1975 src_rq = rq_of_rt_rq(rt_rq);
1976
1977again:
1978 if (has_pushable_tasks(rq)) {
1979 raw_spin_lock(&rq->lock);
1980 push_rt_task(rq);
1981 raw_spin_unlock(&rq->lock);
1982 }
1983
1984
1985 raw_spin_lock(&rt_rq->push_lock);
1986
1987
1988
1989
1990 if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1991 rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1992 rt_rq->push_cpu = src_rq->cpu;
1993 }
1994
1995 cpu = find_next_push_cpu(src_rq);
1996
1997 if (cpu >= nr_cpu_ids)
1998 rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1999 raw_spin_unlock(&rt_rq->push_lock);
2000
2001 if (cpu >= nr_cpu_ids)
2002 return;
2003
2004
2005
2006
2007
2008
2009 if (unlikely(cpu == rq->cpu))
2010 goto again;
2011
2012
2013 irq_work_queue_on(&rt_rq->push_work, cpu);
2014}
2015
2016static void push_irq_work_func(struct irq_work *work)
2017{
2018 struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
2019
2020 try_to_push_tasks(rt_rq);
2021}
2022#endif
2023
2024static void pull_rt_task(struct rq *this_rq)
2025{
2026 int this_cpu = this_rq->cpu, cpu;
2027 bool resched = false;
2028 struct task_struct *p;
2029 struct rq *src_rq;
2030
2031 if (likely(!rt_overloaded(this_rq)))
2032 return;
2033
2034
2035
2036
2037
2038 smp_rmb();
2039
2040#ifdef HAVE_RT_PUSH_IPI
2041 if (sched_feat(RT_PUSH_IPI)) {
2042 tell_cpu_to_push(this_rq);
2043 return;
2044 }
2045#endif
2046
2047 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2048 if (this_cpu == cpu)
2049 continue;
2050
2051 src_rq = cpu_rq(cpu);
2052
2053
2054
2055
2056
2057
2058
2059
2060 if (src_rq->rt.highest_prio.next >=
2061 this_rq->rt.highest_prio.curr)
2062 continue;
2063
2064
2065
2066
2067
2068
2069 double_lock_balance(this_rq, src_rq);
2070
2071
2072
2073
2074
2075 p = pick_highest_pushable_task(src_rq, this_cpu);
2076
2077
2078
2079
2080
2081 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2082 WARN_ON(p == src_rq->curr);
2083 WARN_ON(!task_on_rq_queued(p));
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093 if (p->prio < src_rq->curr->prio)
2094 goto skip;
2095
2096 resched = true;
2097
2098 deactivate_task(src_rq, p, 0);
2099 set_task_cpu(p, this_cpu);
2100 activate_task(this_rq, p, 0);
2101
2102
2103
2104
2105
2106
2107 }
2108skip:
2109 double_unlock_balance(this_rq, src_rq);
2110 }
2111
2112 if (resched)
2113 resched_curr(this_rq);
2114}
2115
2116
2117
2118
2119
2120static void task_woken_rt(struct rq *rq, struct task_struct *p)
2121{
2122 if (!task_running(rq, p) &&
2123 !test_tsk_need_resched(rq->curr) &&
2124 tsk_nr_cpus_allowed(p) > 1 &&
2125 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2126 (tsk_nr_cpus_allowed(rq->curr) < 2 ||
2127 rq->curr->prio <= p->prio))
2128 push_rt_tasks(rq);
2129}
2130
2131
2132static void rq_online_rt(struct rq *rq)
2133{
2134 if (rq->rt.overloaded)
2135 rt_set_overload(rq);
2136
2137 __enable_runtime(rq);
2138
2139 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2140}
2141
2142
2143static void rq_offline_rt(struct rq *rq)
2144{
2145 if (rq->rt.overloaded)
2146 rt_clear_overload(rq);
2147
2148 __disable_runtime(rq);
2149
2150 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2151}
2152
2153
2154
2155
2156
2157static void switched_from_rt(struct rq *rq, struct task_struct *p)
2158{
2159
2160
2161
2162
2163
2164
2165
2166 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2167 return;
2168
2169 queue_pull_task(rq);
2170}
2171
2172void __init init_sched_rt_class(void)
2173{
2174 unsigned int i;
2175
2176 for_each_possible_cpu(i) {
2177 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2178 GFP_KERNEL, cpu_to_node(i));
2179 }
2180}
2181#endif
2182
2183
2184
2185
2186
2187
2188static void switched_to_rt(struct rq *rq, struct task_struct *p)
2189{
2190
2191
2192
2193
2194
2195
2196
2197 if (task_on_rq_queued(p) && rq->curr != p) {
2198#ifdef CONFIG_SMP
2199 if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
2200 queue_push_tasks(rq);
2201#else
2202 if (p->prio < rq->curr->prio)
2203 resched_curr(rq);
2204#endif
2205 }
2206}
2207
2208
2209
2210
2211
2212static void
2213prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2214{
2215 if (!task_on_rq_queued(p))
2216 return;
2217
2218 if (rq->curr == p) {
2219#ifdef CONFIG_SMP
2220
2221
2222
2223
2224 if (oldprio < p->prio)
2225 queue_pull_task(rq);
2226
2227
2228
2229
2230
2231 if (p->prio > rq->rt.highest_prio.curr)
2232 resched_curr(rq);
2233#else
2234
2235 if (oldprio < p->prio)
2236 resched_curr(rq);
2237#endif
2238 } else {
2239
2240
2241
2242
2243
2244 if (p->prio < rq->curr->prio)
2245 resched_curr(rq);
2246 }
2247}
2248
2249static void watchdog(struct rq *rq, struct task_struct *p)
2250{
2251 unsigned long soft, hard;
2252
2253
2254 soft = task_rlimit(p, RLIMIT_RTTIME);
2255 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2256
2257 if (soft != RLIM_INFINITY) {
2258 unsigned long next;
2259
2260 if (p->rt.watchdog_stamp != jiffies) {
2261 p->rt.timeout++;
2262 p->rt.watchdog_stamp = jiffies;
2263 }
2264
2265 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2266 if (p->rt.timeout > next)
2267 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2268 }
2269}
2270
2271static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2272{
2273 struct sched_rt_entity *rt_se = &p->rt;
2274
2275 update_curr_rt(rq);
2276
2277 watchdog(rq, p);
2278
2279
2280
2281
2282
2283 if (p->policy != SCHED_RR)
2284 return;
2285
2286 if (--p->rt.time_slice)
2287 return;
2288
2289 p->rt.time_slice = sched_rr_timeslice;
2290
2291
2292
2293
2294
2295 for_each_sched_rt_entity(rt_se) {
2296 if (rt_se->run_list.prev != rt_se->run_list.next) {
2297 requeue_task_rt(rq, p, 0);
2298 resched_curr(rq);
2299 return;
2300 }
2301 }
2302}
2303
2304static void set_curr_task_rt(struct rq *rq)
2305{
2306 struct task_struct *p = rq->curr;
2307
2308 p->se.exec_start = rq_clock_task(rq);
2309
2310
2311 dequeue_pushable_task(rq, p);
2312}
2313
2314static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2315{
2316
2317
2318
2319 if (task->policy == SCHED_RR)
2320 return sched_rr_timeslice;
2321 else
2322 return 0;
2323}
2324
2325const struct sched_class rt_sched_class = {
2326 .next = &fair_sched_class,
2327 .enqueue_task = enqueue_task_rt,
2328 .dequeue_task = dequeue_task_rt,
2329 .yield_task = yield_task_rt,
2330
2331 .check_preempt_curr = check_preempt_curr_rt,
2332
2333 .pick_next_task = pick_next_task_rt,
2334 .put_prev_task = put_prev_task_rt,
2335
2336#ifdef CONFIG_SMP
2337 .select_task_rq = select_task_rq_rt,
2338
2339 .set_cpus_allowed = set_cpus_allowed_common,
2340 .rq_online = rq_online_rt,
2341 .rq_offline = rq_offline_rt,
2342 .task_woken = task_woken_rt,
2343 .switched_from = switched_from_rt,
2344#endif
2345
2346 .set_curr_task = set_curr_task_rt,
2347 .task_tick = task_tick_rt,
2348
2349 .get_rr_interval = get_rr_interval_rt,
2350
2351 .prio_changed = prio_changed_rt,
2352 .switched_to = switched_to_rt,
2353
2354 .update_curr = update_curr_rt,
2355};
2356
2357#ifdef CONFIG_SCHED_DEBUG
2358extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2359
2360void print_rt_stats(struct seq_file *m, int cpu)
2361{
2362 rt_rq_iter_t iter;
2363 struct rt_rq *rt_rq;
2364
2365 rcu_read_lock();
2366 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2367 print_rt_rq(m, cpu, rt_rq);
2368 rcu_read_unlock();
2369}
2370#endif
2371