1
2
3
4
5
6#ifdef CONFIG_RT_GROUP_SCHED
7
8#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
10static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
11{
12#ifdef CONFIG_SCHED_DEBUG
13 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14#endif
15 return container_of(rt_se, struct task_struct, rt);
16}
17
18static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
19{
20 return rt_rq->rq;
21}
22
23static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
24{
25 return rt_se->rt_rq;
26}
27
28#else
29
30#define rt_entity_is_task(rt_se) (1)
31
32static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33{
34 return container_of(rt_se, struct task_struct, rt);
35}
36
37static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
38{
39 return container_of(rt_rq, struct rq, rt);
40}
41
42static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
43{
44 struct task_struct *p = rt_task_of(rt_se);
45 struct rq *rq = task_rq(p);
46
47 return &rq->rt;
48}
49
50#endif
51
52#ifdef CONFIG_SMP
53
54static inline int rt_overloaded(struct rq *rq)
55{
56 return atomic_read(&rq->rd->rto_count);
57}
58
59static inline void rt_set_overload(struct rq *rq)
60{
61 if (!rq->online)
62 return;
63
64 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
65
66
67
68
69
70
71
72 wmb();
73 atomic_inc(&rq->rd->rto_count);
74}
75
76static inline void rt_clear_overload(struct rq *rq)
77{
78 if (!rq->online)
79 return;
80
81
82 atomic_dec(&rq->rd->rto_count);
83 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
84}
85
86static void update_rt_migration(struct rt_rq *rt_rq)
87{
88 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
89 if (!rt_rq->overloaded) {
90 rt_set_overload(rq_of_rt_rq(rt_rq));
91 rt_rq->overloaded = 1;
92 }
93 } else if (rt_rq->overloaded) {
94 rt_clear_overload(rq_of_rt_rq(rt_rq));
95 rt_rq->overloaded = 0;
96 }
97}
98
99static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
100{
101 if (!rt_entity_is_task(rt_se))
102 return;
103
104 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106 rt_rq->rt_nr_total++;
107 if (rt_se->nr_cpus_allowed > 1)
108 rt_rq->rt_nr_migratory++;
109
110 update_rt_migration(rt_rq);
111}
112
113static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
114{
115 if (!rt_entity_is_task(rt_se))
116 return;
117
118 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120 rt_rq->rt_nr_total--;
121 if (rt_se->nr_cpus_allowed > 1)
122 rt_rq->rt_nr_migratory--;
123
124 update_rt_migration(rt_rq);
125}
126
127static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
128{
129 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130 plist_node_init(&p->pushable_tasks, p->prio);
131 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
132}
133
134static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
135{
136 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
137}
138
139static inline int has_pushable_tasks(struct rq *rq)
140{
141 return !plist_head_empty(&rq->rt.pushable_tasks);
142}
143
144#else
145
146static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
147{
148}
149
150static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
151{
152}
153
154static inline
155void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
156{
157}
158
159static inline
160void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
161{
162}
163
164#endif
165
166static inline int on_rt_rq(struct sched_rt_entity *rt_se)
167{
168 return !list_empty(&rt_se->run_list);
169}
170
171#ifdef CONFIG_RT_GROUP_SCHED
172
173static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
174{
175 if (!rt_rq->tg)
176 return RUNTIME_INF;
177
178 return rt_rq->rt_runtime;
179}
180
181static inline u64 sched_rt_period(struct rt_rq *rt_rq)
182{
183 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
184}
185
186static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
187{
188 list_add_rcu(&rt_rq->leaf_rt_rq_list,
189 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
190}
191
192static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
193{
194 list_del_rcu(&rt_rq->leaf_rt_rq_list);
195}
196
197#define for_each_leaf_rt_rq(rt_rq, rq) \
198 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
199
200#define for_each_sched_rt_entity(rt_se) \
201 for (; rt_se; rt_se = rt_se->parent)
202
203static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
204{
205 return rt_se->my_q;
206}
207
208static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
209static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
210
211static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
212{
213 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
214 struct sched_rt_entity *rt_se;
215
216 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
217
218 rt_se = rt_rq->tg->rt_se[cpu];
219
220 if (rt_rq->rt_nr_running) {
221 if (rt_se && !on_rt_rq(rt_se))
222 enqueue_rt_entity(rt_se, false);
223 if (rt_rq->highest_prio.curr < curr->prio)
224 resched_task(curr);
225 }
226}
227
228static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
229{
230 struct sched_rt_entity *rt_se;
231 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
232
233 rt_se = rt_rq->tg->rt_se[cpu];
234
235 if (rt_se && on_rt_rq(rt_se))
236 dequeue_rt_entity(rt_se);
237}
238
239static inline int rt_rq_throttled(struct rt_rq *rt_rq)
240{
241 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
242}
243
244static int rt_se_boosted(struct sched_rt_entity *rt_se)
245{
246 struct rt_rq *rt_rq = group_rt_rq(rt_se);
247 struct task_struct *p;
248
249 if (rt_rq)
250 return !!rt_rq->rt_nr_boosted;
251
252 p = rt_task_of(rt_se);
253 return p->prio != p->normal_prio;
254}
255
256#ifdef CONFIG_SMP
257static inline const struct cpumask *sched_rt_period_mask(void)
258{
259 return cpu_rq(smp_processor_id())->rd->span;
260}
261#else
262static inline const struct cpumask *sched_rt_period_mask(void)
263{
264 return cpu_online_mask;
265}
266#endif
267
268static inline
269struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
270{
271 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
272}
273
274static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
275{
276 return &rt_rq->tg->rt_bandwidth;
277}
278
279#else
280
281static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
282{
283 return rt_rq->rt_runtime;
284}
285
286static inline u64 sched_rt_period(struct rt_rq *rt_rq)
287{
288 return ktime_to_ns(def_rt_bandwidth.rt_period);
289}
290
291static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
292{
293}
294
295static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
296{
297}
298
299#define for_each_leaf_rt_rq(rt_rq, rq) \
300 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
301
302#define for_each_sched_rt_entity(rt_se) \
303 for (; rt_se; rt_se = NULL)
304
305static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
306{
307 return NULL;
308}
309
310static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
311{
312 if (rt_rq->rt_nr_running)
313 resched_task(rq_of_rt_rq(rt_rq)->curr);
314}
315
316static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
317{
318}
319
320static inline int rt_rq_throttled(struct rt_rq *rt_rq)
321{
322 return rt_rq->rt_throttled;
323}
324
325static inline const struct cpumask *sched_rt_period_mask(void)
326{
327 return cpu_online_mask;
328}
329
330static inline
331struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
332{
333 return &cpu_rq(cpu)->rt;
334}
335
336static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
337{
338 return &def_rt_bandwidth;
339}
340
341#endif
342
343#ifdef CONFIG_SMP
344
345
346
347static int do_balance_runtime(struct rt_rq *rt_rq)
348{
349 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
350 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
351 int i, weight, more = 0;
352 u64 rt_period;
353
354 weight = cpumask_weight(rd->span);
355
356 raw_spin_lock(&rt_b->rt_runtime_lock);
357 rt_period = ktime_to_ns(rt_b->rt_period);
358 for_each_cpu(i, rd->span) {
359 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
360 s64 diff;
361
362 if (iter == rt_rq)
363 continue;
364
365 raw_spin_lock(&iter->rt_runtime_lock);
366
367
368
369
370
371 if (iter->rt_runtime == RUNTIME_INF)
372 goto next;
373
374
375
376
377
378 diff = iter->rt_runtime - iter->rt_time;
379 if (diff > 0) {
380 diff = div_u64((u64)diff, weight);
381 if (rt_rq->rt_runtime + diff > rt_period)
382 diff = rt_period - rt_rq->rt_runtime;
383 iter->rt_runtime -= diff;
384 rt_rq->rt_runtime += diff;
385 more = 1;
386 if (rt_rq->rt_runtime == rt_period) {
387 raw_spin_unlock(&iter->rt_runtime_lock);
388 break;
389 }
390 }
391next:
392 raw_spin_unlock(&iter->rt_runtime_lock);
393 }
394 raw_spin_unlock(&rt_b->rt_runtime_lock);
395
396 return more;
397}
398
399
400
401
402static void __disable_runtime(struct rq *rq)
403{
404 struct root_domain *rd = rq->rd;
405 struct rt_rq *rt_rq;
406
407 if (unlikely(!scheduler_running))
408 return;
409
410 for_each_leaf_rt_rq(rt_rq, rq) {
411 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
412 s64 want;
413 int i;
414
415 raw_spin_lock(&rt_b->rt_runtime_lock);
416 raw_spin_lock(&rt_rq->rt_runtime_lock);
417
418
419
420
421
422 if (rt_rq->rt_runtime == RUNTIME_INF ||
423 rt_rq->rt_runtime == rt_b->rt_runtime)
424 goto balanced;
425 raw_spin_unlock(&rt_rq->rt_runtime_lock);
426
427
428
429
430
431
432 want = rt_b->rt_runtime - rt_rq->rt_runtime;
433
434
435
436
437 for_each_cpu(i, rd->span) {
438 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
439 s64 diff;
440
441
442
443
444 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
445 continue;
446
447 raw_spin_lock(&iter->rt_runtime_lock);
448 if (want > 0) {
449 diff = min_t(s64, iter->rt_runtime, want);
450 iter->rt_runtime -= diff;
451 want -= diff;
452 } else {
453 iter->rt_runtime -= want;
454 want -= want;
455 }
456 raw_spin_unlock(&iter->rt_runtime_lock);
457
458 if (!want)
459 break;
460 }
461
462 raw_spin_lock(&rt_rq->rt_runtime_lock);
463
464
465
466
467 BUG_ON(want);
468balanced:
469
470
471
472
473 rt_rq->rt_runtime = RUNTIME_INF;
474 raw_spin_unlock(&rt_rq->rt_runtime_lock);
475 raw_spin_unlock(&rt_b->rt_runtime_lock);
476 }
477}
478
479static void disable_runtime(struct rq *rq)
480{
481 unsigned long flags;
482
483 raw_spin_lock_irqsave(&rq->lock, flags);
484 __disable_runtime(rq);
485 raw_spin_unlock_irqrestore(&rq->lock, flags);
486}
487
488static void __enable_runtime(struct rq *rq)
489{
490 struct rt_rq *rt_rq;
491
492 if (unlikely(!scheduler_running))
493 return;
494
495
496
497
498 for_each_leaf_rt_rq(rt_rq, rq) {
499 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
500
501 raw_spin_lock(&rt_b->rt_runtime_lock);
502 raw_spin_lock(&rt_rq->rt_runtime_lock);
503 rt_rq->rt_runtime = rt_b->rt_runtime;
504 rt_rq->rt_time = 0;
505 rt_rq->rt_throttled = 0;
506 raw_spin_unlock(&rt_rq->rt_runtime_lock);
507 raw_spin_unlock(&rt_b->rt_runtime_lock);
508 }
509}
510
511static void enable_runtime(struct rq *rq)
512{
513 unsigned long flags;
514
515 raw_spin_lock_irqsave(&rq->lock, flags);
516 __enable_runtime(rq);
517 raw_spin_unlock_irqrestore(&rq->lock, flags);
518}
519
520static int balance_runtime(struct rt_rq *rt_rq)
521{
522 int more = 0;
523
524 if (rt_rq->rt_time > rt_rq->rt_runtime) {
525 raw_spin_unlock(&rt_rq->rt_runtime_lock);
526 more = do_balance_runtime(rt_rq);
527 raw_spin_lock(&rt_rq->rt_runtime_lock);
528 }
529
530 return more;
531}
532#else
533static inline int balance_runtime(struct rt_rq *rt_rq)
534{
535 return 0;
536}
537#endif
538
539static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
540{
541 int i, idle = 1;
542 const struct cpumask *span;
543
544 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
545 return 1;
546
547 span = sched_rt_period_mask();
548 for_each_cpu(i, span) {
549 int enqueue = 0;
550 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
551 struct rq *rq = rq_of_rt_rq(rt_rq);
552
553 raw_spin_lock(&rq->lock);
554 if (rt_rq->rt_time) {
555 u64 runtime;
556
557 raw_spin_lock(&rt_rq->rt_runtime_lock);
558 if (rt_rq->rt_throttled)
559 balance_runtime(rt_rq);
560 runtime = rt_rq->rt_runtime;
561 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
562 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
563 rt_rq->rt_throttled = 0;
564 enqueue = 1;
565 }
566 if (rt_rq->rt_time || rt_rq->rt_nr_running)
567 idle = 0;
568 raw_spin_unlock(&rt_rq->rt_runtime_lock);
569 } else if (rt_rq->rt_nr_running) {
570 idle = 0;
571 if (!rt_rq_throttled(rt_rq))
572 enqueue = 1;
573 }
574
575 if (enqueue)
576 sched_rt_rq_enqueue(rt_rq);
577 raw_spin_unlock(&rq->lock);
578 }
579
580 return idle;
581}
582
583static inline int rt_se_prio(struct sched_rt_entity *rt_se)
584{
585#ifdef CONFIG_RT_GROUP_SCHED
586 struct rt_rq *rt_rq = group_rt_rq(rt_se);
587
588 if (rt_rq)
589 return rt_rq->highest_prio.curr;
590#endif
591
592 return rt_task_of(rt_se)->prio;
593}
594
595static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
596{
597 u64 runtime = sched_rt_runtime(rt_rq);
598
599 if (rt_rq->rt_throttled)
600 return rt_rq_throttled(rt_rq);
601
602 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
603 return 0;
604
605 balance_runtime(rt_rq);
606 runtime = sched_rt_runtime(rt_rq);
607 if (runtime == RUNTIME_INF)
608 return 0;
609
610 if (rt_rq->rt_time > runtime) {
611 rt_rq->rt_throttled = 1;
612 if (rt_rq_throttled(rt_rq)) {
613 sched_rt_rq_dequeue(rt_rq);
614 return 1;
615 }
616 }
617
618 return 0;
619}
620
621
622
623
624
625static void update_curr_rt(struct rq *rq)
626{
627 struct task_struct *curr = rq->curr;
628 struct sched_rt_entity *rt_se = &curr->rt;
629 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
630 u64 delta_exec;
631
632 if (curr->sched_class != &rt_sched_class)
633 return;
634
635 delta_exec = rq->clock_task - curr->se.exec_start;
636 if (unlikely((s64)delta_exec < 0))
637 delta_exec = 0;
638
639 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
640
641 curr->se.sum_exec_runtime += delta_exec;
642 account_group_exec_runtime(curr, delta_exec);
643
644 curr->se.exec_start = rq->clock_task;
645 cpuacct_charge(curr, delta_exec);
646
647 sched_rt_avg_update(rq, delta_exec);
648
649 if (!rt_bandwidth_enabled())
650 return;
651
652 for_each_sched_rt_entity(rt_se) {
653 rt_rq = rt_rq_of_se(rt_se);
654
655 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
656 raw_spin_lock(&rt_rq->rt_runtime_lock);
657 rt_rq->rt_time += delta_exec;
658 if (sched_rt_runtime_exceeded(rt_rq))
659 resched_task(curr);
660 raw_spin_unlock(&rt_rq->rt_runtime_lock);
661 }
662 }
663}
664
665#if defined CONFIG_SMP
666
667static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
668
669static inline int next_prio(struct rq *rq)
670{
671 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
672
673 if (next && rt_prio(next->prio))
674 return next->prio;
675 else
676 return MAX_RT_PRIO;
677}
678
679static void
680inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
681{
682 struct rq *rq = rq_of_rt_rq(rt_rq);
683
684 if (prio < prev_prio) {
685
686
687
688
689
690
691 rt_rq->highest_prio.next = prev_prio;
692
693 if (rq->online)
694 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
695
696 } else if (prio == rt_rq->highest_prio.curr)
697
698
699
700
701
702 rt_rq->highest_prio.next = prio;
703 else if (prio < rt_rq->highest_prio.next)
704
705
706
707 rt_rq->highest_prio.next = next_prio(rq);
708}
709
710static void
711dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
712{
713 struct rq *rq = rq_of_rt_rq(rt_rq);
714
715 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
716 rt_rq->highest_prio.next = next_prio(rq);
717
718 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
719 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
720}
721
722#else
723
724static inline
725void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
726static inline
727void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
728
729#endif
730
731#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
732static void
733inc_rt_prio(struct rt_rq *rt_rq, int prio)
734{
735 int prev_prio = rt_rq->highest_prio.curr;
736
737 if (prio < prev_prio)
738 rt_rq->highest_prio.curr = prio;
739
740 inc_rt_prio_smp(rt_rq, prio, prev_prio);
741}
742
743static void
744dec_rt_prio(struct rt_rq *rt_rq, int prio)
745{
746 int prev_prio = rt_rq->highest_prio.curr;
747
748 if (rt_rq->rt_nr_running) {
749
750 WARN_ON(prio < prev_prio);
751
752
753
754
755
756 if (prio == prev_prio) {
757 struct rt_prio_array *array = &rt_rq->active;
758
759 rt_rq->highest_prio.curr =
760 sched_find_first_bit(array->bitmap);
761 }
762
763 } else
764 rt_rq->highest_prio.curr = MAX_RT_PRIO;
765
766 dec_rt_prio_smp(rt_rq, prio, prev_prio);
767}
768
769#else
770
771static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
772static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
773
774#endif
775
776#ifdef CONFIG_RT_GROUP_SCHED
777
778static void
779inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
780{
781 if (rt_se_boosted(rt_se))
782 rt_rq->rt_nr_boosted++;
783
784 if (rt_rq->tg)
785 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
786}
787
788static void
789dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
790{
791 if (rt_se_boosted(rt_se))
792 rt_rq->rt_nr_boosted--;
793
794 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
795}
796
797#else
798
799static void
800inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
801{
802 start_rt_bandwidth(&def_rt_bandwidth);
803}
804
805static inline
806void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
807
808#endif
809
810static inline
811void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
812{
813 int prio = rt_se_prio(rt_se);
814
815 WARN_ON(!rt_prio(prio));
816 rt_rq->rt_nr_running++;
817
818 inc_rt_prio(rt_rq, prio);
819 inc_rt_migration(rt_se, rt_rq);
820 inc_rt_group(rt_se, rt_rq);
821}
822
823static inline
824void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
825{
826 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
827 WARN_ON(!rt_rq->rt_nr_running);
828 rt_rq->rt_nr_running--;
829
830 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
831 dec_rt_migration(rt_se, rt_rq);
832 dec_rt_group(rt_se, rt_rq);
833}
834
835static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
836{
837 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
838 struct rt_prio_array *array = &rt_rq->active;
839 struct rt_rq *group_rq = group_rt_rq(rt_se);
840 struct list_head *queue = array->queue + rt_se_prio(rt_se);
841
842
843
844
845
846
847
848 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
849 return;
850
851 if (!rt_rq->rt_nr_running)
852 list_add_leaf_rt_rq(rt_rq);
853
854 if (head)
855 list_add(&rt_se->run_list, queue);
856 else
857 list_add_tail(&rt_se->run_list, queue);
858 __set_bit(rt_se_prio(rt_se), array->bitmap);
859
860 inc_rt_tasks(rt_se, rt_rq);
861}
862
863static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
864{
865 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
866 struct rt_prio_array *array = &rt_rq->active;
867
868 list_del_init(&rt_se->run_list);
869 if (list_empty(array->queue + rt_se_prio(rt_se)))
870 __clear_bit(rt_se_prio(rt_se), array->bitmap);
871
872 dec_rt_tasks(rt_se, rt_rq);
873 if (!rt_rq->rt_nr_running)
874 list_del_leaf_rt_rq(rt_rq);
875}
876
877
878
879
880
881static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
882{
883 struct sched_rt_entity *back = NULL;
884
885 for_each_sched_rt_entity(rt_se) {
886 rt_se->back = back;
887 back = rt_se;
888 }
889
890 for (rt_se = back; rt_se; rt_se = rt_se->back) {
891 if (on_rt_rq(rt_se))
892 __dequeue_rt_entity(rt_se);
893 }
894}
895
896static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
897{
898 dequeue_rt_stack(rt_se);
899 for_each_sched_rt_entity(rt_se)
900 __enqueue_rt_entity(rt_se, head);
901}
902
903static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
904{
905 dequeue_rt_stack(rt_se);
906
907 for_each_sched_rt_entity(rt_se) {
908 struct rt_rq *rt_rq = group_rt_rq(rt_se);
909
910 if (rt_rq && rt_rq->rt_nr_running)
911 __enqueue_rt_entity(rt_se, false);
912 }
913}
914
915
916
917
918static void
919enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
920{
921 struct sched_rt_entity *rt_se = &p->rt;
922
923 if (flags & ENQUEUE_WAKEUP)
924 rt_se->timeout = 0;
925
926 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
927
928 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
929 enqueue_pushable_task(rq, p);
930}
931
932static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
933{
934 struct sched_rt_entity *rt_se = &p->rt;
935
936 update_curr_rt(rq);
937 dequeue_rt_entity(rt_se);
938
939 dequeue_pushable_task(rq, p);
940}
941
942
943
944
945
946static void
947requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
948{
949 if (on_rt_rq(rt_se)) {
950 struct rt_prio_array *array = &rt_rq->active;
951 struct list_head *queue = array->queue + rt_se_prio(rt_se);
952
953 if (head)
954 list_move(&rt_se->run_list, queue);
955 else
956 list_move_tail(&rt_se->run_list, queue);
957 }
958}
959
960static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
961{
962 struct sched_rt_entity *rt_se = &p->rt;
963 struct rt_rq *rt_rq;
964
965 for_each_sched_rt_entity(rt_se) {
966 rt_rq = rt_rq_of_se(rt_se);
967 requeue_rt_entity(rt_rq, rt_se, head);
968 }
969}
970
971static void yield_task_rt(struct rq *rq)
972{
973 requeue_task_rt(rq, rq->curr, 0);
974}
975
976#ifdef CONFIG_SMP
977static int find_lowest_rq(struct task_struct *task);
978
979static int
980select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
981{
982 if (sd_flag != SD_BALANCE_WAKE)
983 return smp_processor_id();
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001 if (unlikely(rt_task(rq->curr)) &&
1002 (rq->curr->rt.nr_cpus_allowed < 2 ||
1003 rq->curr->prio < p->prio) &&
1004 (p->rt.nr_cpus_allowed > 1)) {
1005 int cpu = find_lowest_rq(p);
1006
1007 return (cpu == -1) ? task_cpu(p) : cpu;
1008 }
1009
1010
1011
1012
1013
1014 return task_cpu(p);
1015}
1016
1017static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1018{
1019 if (rq->curr->rt.nr_cpus_allowed == 1)
1020 return;
1021
1022 if (p->rt.nr_cpus_allowed != 1
1023 && cpupri_find(&rq->rd->cpupri, p, NULL))
1024 return;
1025
1026 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1027 return;
1028
1029
1030
1031
1032
1033
1034 requeue_task_rt(rq, p, 1);
1035 resched_task(rq->curr);
1036}
1037
1038#endif
1039
1040
1041
1042
1043static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1044{
1045 if (p->prio < rq->curr->prio) {
1046 resched_task(rq->curr);
1047 return;
1048 }
1049
1050#ifdef CONFIG_SMP
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 if (p->prio == rq->curr->prio && !need_resched())
1064 check_preempt_equal_prio(rq, p);
1065#endif
1066}
1067
1068static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1069 struct rt_rq *rt_rq)
1070{
1071 struct rt_prio_array *array = &rt_rq->active;
1072 struct sched_rt_entity *next = NULL;
1073 struct list_head *queue;
1074 int idx;
1075
1076 idx = sched_find_first_bit(array->bitmap);
1077 BUG_ON(idx >= MAX_RT_PRIO);
1078
1079 queue = array->queue + idx;
1080 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1081
1082 return next;
1083}
1084
1085static struct task_struct *_pick_next_task_rt(struct rq *rq)
1086{
1087 struct sched_rt_entity *rt_se;
1088 struct task_struct *p;
1089 struct rt_rq *rt_rq;
1090
1091 rt_rq = &rq->rt;
1092
1093 if (unlikely(!rt_rq->rt_nr_running))
1094 return NULL;
1095
1096 if (rt_rq_throttled(rt_rq))
1097 return NULL;
1098
1099 do {
1100 rt_se = pick_next_rt_entity(rq, rt_rq);
1101 BUG_ON(!rt_se);
1102 rt_rq = group_rt_rq(rt_se);
1103 } while (rt_rq);
1104
1105 p = rt_task_of(rt_se);
1106 p->se.exec_start = rq->clock_task;
1107
1108 return p;
1109}
1110
1111static struct task_struct *pick_next_task_rt(struct rq *rq)
1112{
1113 struct task_struct *p = _pick_next_task_rt(rq);
1114
1115
1116 if (p)
1117 dequeue_pushable_task(rq, p);
1118
1119#ifdef CONFIG_SMP
1120
1121
1122
1123
1124 rq->post_schedule = has_pushable_tasks(rq);
1125#endif
1126
1127 return p;
1128}
1129
1130static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1131{
1132 update_curr_rt(rq);
1133 p->se.exec_start = 0;
1134
1135
1136
1137
1138
1139 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1140 enqueue_pushable_task(rq, p);
1141}
1142
1143#ifdef CONFIG_SMP
1144
1145
1146#define RT_MAX_TRIES 3
1147
1148static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1149
1150static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1151{
1152 if (!task_running(rq, p) &&
1153 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1154 (p->rt.nr_cpus_allowed > 1))
1155 return 1;
1156 return 0;
1157}
1158
1159
1160static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1161{
1162 struct task_struct *next = NULL;
1163 struct sched_rt_entity *rt_se;
1164 struct rt_prio_array *array;
1165 struct rt_rq *rt_rq;
1166 int idx;
1167
1168 for_each_leaf_rt_rq(rt_rq, rq) {
1169 array = &rt_rq->active;
1170 idx = sched_find_first_bit(array->bitmap);
1171next_idx:
1172 if (idx >= MAX_RT_PRIO)
1173 continue;
1174 if (next && next->prio < idx)
1175 continue;
1176 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1177 struct task_struct *p;
1178
1179 if (!rt_entity_is_task(rt_se))
1180 continue;
1181
1182 p = rt_task_of(rt_se);
1183 if (pick_rt_task(rq, p, cpu)) {
1184 next = p;
1185 break;
1186 }
1187 }
1188 if (!next) {
1189 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1190 goto next_idx;
1191 }
1192 }
1193
1194 return next;
1195}
1196
1197static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1198
1199static int find_lowest_rq(struct task_struct *task)
1200{
1201 struct sched_domain *sd;
1202 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1203 int this_cpu = smp_processor_id();
1204 int cpu = task_cpu(task);
1205
1206 if (task->rt.nr_cpus_allowed == 1)
1207 return -1;
1208
1209 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1210 return -1;
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220 if (cpumask_test_cpu(cpu, lowest_mask))
1221 return cpu;
1222
1223
1224
1225
1226
1227 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1228 this_cpu = -1;
1229
1230 for_each_domain(cpu, sd) {
1231 if (sd->flags & SD_WAKE_AFFINE) {
1232 int best_cpu;
1233
1234
1235
1236
1237
1238 if (this_cpu != -1 &&
1239 cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
1240 return this_cpu;
1241
1242 best_cpu = cpumask_first_and(lowest_mask,
1243 sched_domain_span(sd));
1244 if (best_cpu < nr_cpu_ids)
1245 return best_cpu;
1246 }
1247 }
1248
1249
1250
1251
1252
1253
1254 if (this_cpu != -1)
1255 return this_cpu;
1256
1257 cpu = cpumask_any(lowest_mask);
1258 if (cpu < nr_cpu_ids)
1259 return cpu;
1260 return -1;
1261}
1262
1263
1264static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1265{
1266 struct rq *lowest_rq = NULL;
1267 int tries;
1268 int cpu;
1269
1270 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1271 cpu = find_lowest_rq(task);
1272
1273 if ((cpu == -1) || (cpu == rq->cpu))
1274 break;
1275
1276 lowest_rq = cpu_rq(cpu);
1277
1278
1279 if (double_lock_balance(rq, lowest_rq)) {
1280
1281
1282
1283
1284
1285
1286 if (unlikely(task_rq(task) != rq ||
1287 !cpumask_test_cpu(lowest_rq->cpu,
1288 &task->cpus_allowed) ||
1289 task_running(rq, task) ||
1290 !task->se.on_rq)) {
1291
1292 raw_spin_unlock(&lowest_rq->lock);
1293 lowest_rq = NULL;
1294 break;
1295 }
1296 }
1297
1298
1299 if (lowest_rq->rt.highest_prio.curr > task->prio)
1300 break;
1301
1302
1303 double_unlock_balance(rq, lowest_rq);
1304 lowest_rq = NULL;
1305 }
1306
1307 return lowest_rq;
1308}
1309
1310static struct task_struct *pick_next_pushable_task(struct rq *rq)
1311{
1312 struct task_struct *p;
1313
1314 if (!has_pushable_tasks(rq))
1315 return NULL;
1316
1317 p = plist_first_entry(&rq->rt.pushable_tasks,
1318 struct task_struct, pushable_tasks);
1319
1320 BUG_ON(rq->cpu != task_cpu(p));
1321 BUG_ON(task_current(rq, p));
1322 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1323
1324 BUG_ON(!p->se.on_rq);
1325 BUG_ON(!rt_task(p));
1326
1327 return p;
1328}
1329
1330
1331
1332
1333
1334
1335static int push_rt_task(struct rq *rq)
1336{
1337 struct task_struct *next_task;
1338 struct rq *lowest_rq;
1339
1340 if (!rq->rt.overloaded)
1341 return 0;
1342
1343 next_task = pick_next_pushable_task(rq);
1344 if (!next_task)
1345 return 0;
1346
1347retry:
1348 if (unlikely(next_task == rq->curr)) {
1349 WARN_ON(1);
1350 return 0;
1351 }
1352
1353
1354
1355
1356
1357
1358 if (unlikely(next_task->prio < rq->curr->prio)) {
1359 resched_task(rq->curr);
1360 return 0;
1361 }
1362
1363
1364 get_task_struct(next_task);
1365
1366
1367 lowest_rq = find_lock_lowest_rq(next_task, rq);
1368 if (!lowest_rq) {
1369 struct task_struct *task;
1370
1371
1372
1373
1374
1375
1376
1377
1378 task = pick_next_pushable_task(rq);
1379 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1380
1381
1382
1383
1384
1385
1386 dequeue_pushable_task(rq, next_task);
1387 goto out;
1388 }
1389
1390 if (!task)
1391
1392 goto out;
1393
1394
1395
1396
1397 put_task_struct(next_task);
1398 next_task = task;
1399 goto retry;
1400 }
1401
1402 deactivate_task(rq, next_task, 0);
1403 set_task_cpu(next_task, lowest_rq->cpu);
1404 activate_task(lowest_rq, next_task, 0);
1405
1406 resched_task(lowest_rq->curr);
1407
1408 double_unlock_balance(rq, lowest_rq);
1409
1410out:
1411 put_task_struct(next_task);
1412
1413 return 1;
1414}
1415
1416static void push_rt_tasks(struct rq *rq)
1417{
1418
1419 while (push_rt_task(rq))
1420 ;
1421}
1422
1423static int pull_rt_task(struct rq *this_rq)
1424{
1425 int this_cpu = this_rq->cpu, ret = 0, cpu;
1426 struct task_struct *p;
1427 struct rq *src_rq;
1428
1429 if (likely(!rt_overloaded(this_rq)))
1430 return 0;
1431
1432 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1433 if (this_cpu == cpu)
1434 continue;
1435
1436 src_rq = cpu_rq(cpu);
1437
1438
1439
1440
1441
1442
1443
1444
1445 if (src_rq->rt.highest_prio.next >=
1446 this_rq->rt.highest_prio.curr)
1447 continue;
1448
1449
1450
1451
1452
1453
1454 double_lock_balance(this_rq, src_rq);
1455
1456
1457
1458
1459 if (src_rq->rt.rt_nr_running <= 1)
1460 goto skip;
1461
1462 p = pick_next_highest_task_rt(src_rq, this_cpu);
1463
1464
1465
1466
1467
1468 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1469 WARN_ON(p == src_rq->curr);
1470 WARN_ON(!p->se.on_rq);
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480 if (p->prio < src_rq->curr->prio)
1481 goto skip;
1482
1483 ret = 1;
1484
1485 deactivate_task(src_rq, p, 0);
1486 set_task_cpu(p, this_cpu);
1487 activate_task(this_rq, p, 0);
1488
1489
1490
1491
1492
1493
1494 }
1495skip:
1496 double_unlock_balance(this_rq, src_rq);
1497 }
1498
1499 return ret;
1500}
1501
1502static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1503{
1504
1505 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1506 pull_rt_task(rq);
1507}
1508
1509static void post_schedule_rt(struct rq *rq)
1510{
1511 push_rt_tasks(rq);
1512}
1513
1514
1515
1516
1517
1518static void task_woken_rt(struct rq *rq, struct task_struct *p)
1519{
1520 if (!task_running(rq, p) &&
1521 !test_tsk_need_resched(rq->curr) &&
1522 has_pushable_tasks(rq) &&
1523 p->rt.nr_cpus_allowed > 1 &&
1524 rt_task(rq->curr) &&
1525 (rq->curr->rt.nr_cpus_allowed < 2 ||
1526 rq->curr->prio < p->prio))
1527 push_rt_tasks(rq);
1528}
1529
1530static void set_cpus_allowed_rt(struct task_struct *p,
1531 const struct cpumask *new_mask)
1532{
1533 int weight = cpumask_weight(new_mask);
1534
1535 BUG_ON(!rt_task(p));
1536
1537
1538
1539
1540
1541 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1542 struct rq *rq = task_rq(p);
1543
1544 if (!task_current(rq, p)) {
1545
1546
1547
1548
1549
1550
1551 if (p->rt.nr_cpus_allowed > 1)
1552 dequeue_pushable_task(rq, p);
1553
1554
1555
1556
1557 if (weight > 1)
1558 enqueue_pushable_task(rq, p);
1559
1560 }
1561
1562 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1563 rq->rt.rt_nr_migratory++;
1564 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1565 BUG_ON(!rq->rt.rt_nr_migratory);
1566 rq->rt.rt_nr_migratory--;
1567 }
1568
1569 update_rt_migration(&rq->rt);
1570 }
1571
1572 cpumask_copy(&p->cpus_allowed, new_mask);
1573 p->rt.nr_cpus_allowed = weight;
1574}
1575
1576
1577static void rq_online_rt(struct rq *rq)
1578{
1579 if (rq->rt.overloaded)
1580 rt_set_overload(rq);
1581
1582 __enable_runtime(rq);
1583
1584 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1585}
1586
1587
1588static void rq_offline_rt(struct rq *rq)
1589{
1590 if (rq->rt.overloaded)
1591 rt_clear_overload(rq);
1592
1593 __disable_runtime(rq);
1594
1595 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1596}
1597
1598
1599
1600
1601
1602static void switched_from_rt(struct rq *rq, struct task_struct *p,
1603 int running)
1604{
1605
1606
1607
1608
1609
1610
1611
1612 if (!rq->rt.rt_nr_running)
1613 pull_rt_task(rq);
1614}
1615
1616static inline void init_sched_rt_class(void)
1617{
1618 unsigned int i;
1619
1620 for_each_possible_cpu(i)
1621 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1622 GFP_KERNEL, cpu_to_node(i));
1623}
1624#endif
1625
1626
1627
1628
1629
1630
1631static void switched_to_rt(struct rq *rq, struct task_struct *p,
1632 int running)
1633{
1634 int check_resched = 1;
1635
1636
1637
1638
1639
1640
1641
1642
1643 if (!running) {
1644#ifdef CONFIG_SMP
1645 if (rq->rt.overloaded && push_rt_task(rq) &&
1646
1647 rq != task_rq(p))
1648 check_resched = 0;
1649#endif
1650 if (check_resched && p->prio < rq->curr->prio)
1651 resched_task(rq->curr);
1652 }
1653}
1654
1655
1656
1657
1658
1659static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1660 int oldprio, int running)
1661{
1662 if (running) {
1663#ifdef CONFIG_SMP
1664
1665
1666
1667
1668 if (oldprio < p->prio)
1669 pull_rt_task(rq);
1670
1671
1672
1673
1674
1675
1676 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1677 resched_task(p);
1678#else
1679
1680 if (oldprio < p->prio)
1681 resched_task(p);
1682#endif
1683 } else {
1684
1685
1686
1687
1688
1689 if (p->prio < rq->curr->prio)
1690 resched_task(rq->curr);
1691 }
1692}
1693
1694static void watchdog(struct rq *rq, struct task_struct *p)
1695{
1696 unsigned long soft, hard;
1697
1698
1699 soft = task_rlimit(p, RLIMIT_RTTIME);
1700 hard = task_rlimit_max(p, RLIMIT_RTTIME);
1701
1702 if (soft != RLIM_INFINITY) {
1703 unsigned long next;
1704
1705 p->rt.timeout++;
1706 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1707 if (p->rt.timeout > next)
1708 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1709 }
1710}
1711
1712static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1713{
1714 update_curr_rt(rq);
1715
1716 watchdog(rq, p);
1717
1718
1719
1720
1721
1722 if (p->policy != SCHED_RR)
1723 return;
1724
1725 if (--p->rt.time_slice)
1726 return;
1727
1728 p->rt.time_slice = DEF_TIMESLICE;
1729
1730
1731
1732
1733
1734 if (p->rt.run_list.prev != p->rt.run_list.next) {
1735 requeue_task_rt(rq, p, 0);
1736 set_tsk_need_resched(p);
1737 }
1738}
1739
1740static void set_curr_task_rt(struct rq *rq)
1741{
1742 struct task_struct *p = rq->curr;
1743
1744 p->se.exec_start = rq->clock_task;
1745
1746
1747 dequeue_pushable_task(rq, p);
1748}
1749
1750static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1751{
1752
1753
1754
1755 if (task->policy == SCHED_RR)
1756 return DEF_TIMESLICE;
1757 else
1758 return 0;
1759}
1760
1761static const struct sched_class rt_sched_class = {
1762 .next = &fair_sched_class,
1763 .enqueue_task = enqueue_task_rt,
1764 .dequeue_task = dequeue_task_rt,
1765 .yield_task = yield_task_rt,
1766
1767 .check_preempt_curr = check_preempt_curr_rt,
1768
1769 .pick_next_task = pick_next_task_rt,
1770 .put_prev_task = put_prev_task_rt,
1771
1772#ifdef CONFIG_SMP
1773 .select_task_rq = select_task_rq_rt,
1774
1775 .set_cpus_allowed = set_cpus_allowed_rt,
1776 .rq_online = rq_online_rt,
1777 .rq_offline = rq_offline_rt,
1778 .pre_schedule = pre_schedule_rt,
1779 .post_schedule = post_schedule_rt,
1780 .task_woken = task_woken_rt,
1781 .switched_from = switched_from_rt,
1782#endif
1783
1784 .set_curr_task = set_curr_task_rt,
1785 .task_tick = task_tick_rt,
1786
1787 .get_rr_interval = get_rr_interval_rt,
1788
1789 .prio_changed = prio_changed_rt,
1790 .switched_to = switched_to_rt,
1791};
1792
1793#ifdef CONFIG_SCHED_DEBUG
1794extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1795
1796static void print_rt_stats(struct seq_file *m, int cpu)
1797{
1798 struct rt_rq *rt_rq;
1799
1800 rcu_read_lock();
1801 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1802 print_rt_rq(m, cpu, rt_rq);
1803 rcu_read_unlock();
1804}
1805#endif
1806
1807