1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/latencytop.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37unsigned int sysctl_sched_latency = 5000000ULL;
38
39
40
41
42
43unsigned int sysctl_sched_min_granularity = 1000000ULL;
44
45
46
47
48static unsigned int sched_nr_latency = 5;
49
50
51
52
53
54unsigned int sysctl_sched_child_runs_first __read_mostly;
55
56
57
58
59
60
61
62unsigned int __read_mostly sysctl_sched_compat_yield;
63
64
65
66
67
68
69
70
71
72unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
73
74const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
75
76static const struct sched_class fair_sched_class;
77
78
79
80
81
82#ifdef CONFIG_FAIR_GROUP_SCHED
83
84
85static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
86{
87 return cfs_rq->rq;
88}
89
90
91#define entity_is_task(se) (!se->my_q)
92
93static inline struct task_struct *task_of(struct sched_entity *se)
94{
95#ifdef CONFIG_SCHED_DEBUG
96 WARN_ON_ONCE(!entity_is_task(se));
97#endif
98 return container_of(se, struct task_struct, se);
99}
100
101
102#define for_each_sched_entity(se) \
103 for (; se; se = se->parent)
104
105static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
106{
107 return p->se.cfs_rq;
108}
109
110
111static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
112{
113 return se->cfs_rq;
114}
115
116
117static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
118{
119 return grp->my_q;
120}
121
122
123
124
125static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
126{
127 return cfs_rq->tg->cfs_rq[this_cpu];
128}
129
130
131#define for_each_leaf_cfs_rq(rq, cfs_rq) \
132 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
133
134
135static inline int
136is_same_group(struct sched_entity *se, struct sched_entity *pse)
137{
138 if (se->cfs_rq == pse->cfs_rq)
139 return 1;
140
141 return 0;
142}
143
144static inline struct sched_entity *parent_entity(struct sched_entity *se)
145{
146 return se->parent;
147}
148
149
150static inline int depth_se(struct sched_entity *se)
151{
152 int depth = 0;
153
154 for_each_sched_entity(se)
155 depth++;
156
157 return depth;
158}
159
160static void
161find_matching_se(struct sched_entity **se, struct sched_entity **pse)
162{
163 int se_depth, pse_depth;
164
165
166
167
168
169
170
171
172
173 se_depth = depth_se(*se);
174 pse_depth = depth_se(*pse);
175
176 while (se_depth > pse_depth) {
177 se_depth--;
178 *se = parent_entity(*se);
179 }
180
181 while (pse_depth > se_depth) {
182 pse_depth--;
183 *pse = parent_entity(*pse);
184 }
185
186 while (!is_same_group(*se, *pse)) {
187 *se = parent_entity(*se);
188 *pse = parent_entity(*pse);
189 }
190}
191
192#else
193
194static inline struct task_struct *task_of(struct sched_entity *se)
195{
196 return container_of(se, struct task_struct, se);
197}
198
199static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
200{
201 return container_of(cfs_rq, struct rq, cfs);
202}
203
204#define entity_is_task(se) 1
205
206#define for_each_sched_entity(se) \
207 for (; se; se = NULL)
208
209static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
210{
211 return &task_rq(p)->cfs;
212}
213
214static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
215{
216 struct task_struct *p = task_of(se);
217 struct rq *rq = task_rq(p);
218
219 return &rq->cfs;
220}
221
222
223static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
224{
225 return NULL;
226}
227
228static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
229{
230 return &cpu_rq(this_cpu)->cfs;
231}
232
233#define for_each_leaf_cfs_rq(rq, cfs_rq) \
234 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
235
236static inline int
237is_same_group(struct sched_entity *se, struct sched_entity *pse)
238{
239 return 1;
240}
241
242static inline struct sched_entity *parent_entity(struct sched_entity *se)
243{
244 return NULL;
245}
246
247static inline void
248find_matching_se(struct sched_entity **se, struct sched_entity **pse)
249{
250}
251
252#endif
253
254
255
256
257
258
259static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
260{
261 s64 delta = (s64)(vruntime - min_vruntime);
262 if (delta > 0)
263 min_vruntime = vruntime;
264
265 return min_vruntime;
266}
267
268static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
269{
270 s64 delta = (s64)(vruntime - min_vruntime);
271 if (delta < 0)
272 min_vruntime = vruntime;
273
274 return min_vruntime;
275}
276
277static inline int entity_before(struct sched_entity *a,
278 struct sched_entity *b)
279{
280 return (s64)(a->vruntime - b->vruntime) < 0;
281}
282
283static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
284{
285 return se->vruntime - cfs_rq->min_vruntime;
286}
287
288static void update_min_vruntime(struct cfs_rq *cfs_rq)
289{
290 u64 vruntime = cfs_rq->min_vruntime;
291
292 if (cfs_rq->curr)
293 vruntime = cfs_rq->curr->vruntime;
294
295 if (cfs_rq->rb_leftmost) {
296 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
297 struct sched_entity,
298 run_node);
299
300 if (!cfs_rq->curr)
301 vruntime = se->vruntime;
302 else
303 vruntime = min_vruntime(vruntime, se->vruntime);
304 }
305
306 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
307}
308
309
310
311
312static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
313{
314 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
315 struct rb_node *parent = NULL;
316 struct sched_entity *entry;
317 s64 key = entity_key(cfs_rq, se);
318 int leftmost = 1;
319
320
321
322
323 while (*link) {
324 parent = *link;
325 entry = rb_entry(parent, struct sched_entity, run_node);
326
327
328
329
330 if (key < entity_key(cfs_rq, entry)) {
331 link = &parent->rb_left;
332 } else {
333 link = &parent->rb_right;
334 leftmost = 0;
335 }
336 }
337
338
339
340
341
342 if (leftmost)
343 cfs_rq->rb_leftmost = &se->run_node;
344
345 rb_link_node(&se->run_node, parent, link);
346 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
347}
348
349static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
350{
351 if (cfs_rq->rb_leftmost == &se->run_node) {
352 struct rb_node *next_node;
353
354 next_node = rb_next(&se->run_node);
355 cfs_rq->rb_leftmost = next_node;
356 }
357
358 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
359}
360
361static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
362{
363 struct rb_node *left = cfs_rq->rb_leftmost;
364
365 if (!left)
366 return NULL;
367
368 return rb_entry(left, struct sched_entity, run_node);
369}
370
371static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
372{
373 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
374
375 if (!last)
376 return NULL;
377
378 return rb_entry(last, struct sched_entity, run_node);
379}
380
381
382
383
384
385#ifdef CONFIG_SCHED_DEBUG
386int sched_nr_latency_handler(struct ctl_table *table, int write,
387 void __user *buffer, size_t *lenp,
388 loff_t *ppos)
389{
390 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
391
392 if (ret || !write)
393 return ret;
394
395 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
396 sysctl_sched_min_granularity);
397
398 return 0;
399}
400#endif
401
402
403
404
405static inline unsigned long
406calc_delta_fair(unsigned long delta, struct sched_entity *se)
407{
408 if (unlikely(se->load.weight != NICE_0_LOAD))
409 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
410
411 return delta;
412}
413
414
415
416
417
418
419
420
421
422static u64 __sched_period(unsigned long nr_running)
423{
424 u64 period = sysctl_sched_latency;
425 unsigned long nr_latency = sched_nr_latency;
426
427 if (unlikely(nr_running > nr_latency)) {
428 period = sysctl_sched_min_granularity;
429 period *= nr_running;
430 }
431
432 return period;
433}
434
435
436
437
438
439
440
441static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
442{
443 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
444
445 for_each_sched_entity(se) {
446 struct load_weight *load;
447 struct load_weight lw;
448
449 cfs_rq = cfs_rq_of(se);
450 load = &cfs_rq->load;
451
452 if (unlikely(!se->on_rq)) {
453 lw = cfs_rq->load;
454
455 update_load_add(&lw, se->load.weight);
456 load = &lw;
457 }
458 slice = calc_delta_mine(slice, se->load.weight, load);
459 }
460 return slice;
461}
462
463
464
465
466
467
468static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
469{
470 return calc_delta_fair(sched_slice(cfs_rq, se), se);
471}
472
473
474
475
476
477static inline void
478__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
479 unsigned long delta_exec)
480{
481 unsigned long delta_exec_weighted;
482
483 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
484
485 curr->sum_exec_runtime += delta_exec;
486 schedstat_add(cfs_rq, exec_clock, delta_exec);
487 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
488 curr->vruntime += delta_exec_weighted;
489 update_min_vruntime(cfs_rq);
490}
491
492static void update_curr(struct cfs_rq *cfs_rq)
493{
494 struct sched_entity *curr = cfs_rq->curr;
495 u64 now = rq_of(cfs_rq)->clock;
496 unsigned long delta_exec;
497
498 if (unlikely(!curr))
499 return;
500
501
502
503
504
505
506 delta_exec = (unsigned long)(now - curr->exec_start);
507 if (!delta_exec)
508 return;
509
510 __update_curr(cfs_rq, curr, delta_exec);
511 curr->exec_start = now;
512
513 if (entity_is_task(curr)) {
514 struct task_struct *curtask = task_of(curr);
515
516 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
517 cpuacct_charge(curtask, delta_exec);
518 account_group_exec_runtime(curtask, delta_exec);
519 }
520}
521
522static inline void
523update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
524{
525 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
526}
527
528
529
530
531static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
532{
533
534
535
536
537 if (se != cfs_rq->curr)
538 update_stats_wait_start(cfs_rq, se);
539}
540
541static void
542update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
543{
544 schedstat_set(se->wait_max, max(se->wait_max,
545 rq_of(cfs_rq)->clock - se->wait_start));
546 schedstat_set(se->wait_count, se->wait_count + 1);
547 schedstat_set(se->wait_sum, se->wait_sum +
548 rq_of(cfs_rq)->clock - se->wait_start);
549#ifdef CONFIG_SCHEDSTATS
550 if (entity_is_task(se)) {
551 trace_sched_stat_wait(task_of(se),
552 rq_of(cfs_rq)->clock - se->wait_start);
553 }
554#endif
555 schedstat_set(se->wait_start, 0);
556}
557
558static inline void
559update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
560{
561
562
563
564
565 if (se != cfs_rq->curr)
566 update_stats_wait_end(cfs_rq, se);
567}
568
569
570
571
572static inline void
573update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
574{
575
576
577
578 se->exec_start = rq_of(cfs_rq)->clock;
579}
580
581
582
583
584
585#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
586static void
587add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
588{
589 cfs_rq->task_weight += weight;
590}
591#else
592static inline void
593add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
594{
595}
596#endif
597
598static void
599account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
600{
601 update_load_add(&cfs_rq->load, se->load.weight);
602 if (!parent_entity(se))
603 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
604 if (entity_is_task(se)) {
605 add_cfs_task_weight(cfs_rq, se->load.weight);
606 list_add(&se->group_node, &cfs_rq->tasks);
607 }
608 cfs_rq->nr_running++;
609 se->on_rq = 1;
610}
611
612static void
613account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
614{
615 update_load_sub(&cfs_rq->load, se->load.weight);
616 if (!parent_entity(se))
617 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
618 if (entity_is_task(se)) {
619 add_cfs_task_weight(cfs_rq, -se->load.weight);
620 list_del_init(&se->group_node);
621 }
622 cfs_rq->nr_running--;
623 se->on_rq = 0;
624}
625
626static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
627{
628#ifdef CONFIG_SCHEDSTATS
629 struct task_struct *tsk = NULL;
630
631 if (entity_is_task(se))
632 tsk = task_of(se);
633
634 if (se->sleep_start) {
635 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
636
637 if ((s64)delta < 0)
638 delta = 0;
639
640 if (unlikely(delta > se->sleep_max))
641 se->sleep_max = delta;
642
643 se->sleep_start = 0;
644 se->sum_sleep_runtime += delta;
645
646 if (tsk) {
647 account_scheduler_latency(tsk, delta >> 10, 1);
648 trace_sched_stat_sleep(tsk, delta);
649 }
650 }
651 if (se->block_start) {
652 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
653
654 if ((s64)delta < 0)
655 delta = 0;
656
657 if (unlikely(delta > se->block_max))
658 se->block_max = delta;
659
660 se->block_start = 0;
661 se->sum_sleep_runtime += delta;
662
663 if (tsk) {
664 if (tsk->in_iowait) {
665 se->iowait_sum += delta;
666 se->iowait_count++;
667 trace_sched_stat_iowait(tsk, delta);
668 }
669
670
671
672
673
674
675 if (unlikely(prof_on == SLEEP_PROFILING)) {
676 profile_hits(SLEEP_PROFILING,
677 (void *)get_wchan(tsk),
678 delta >> 20);
679 }
680 account_scheduler_latency(tsk, delta >> 10, 0);
681 }
682 }
683#endif
684}
685
686static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
687{
688#ifdef CONFIG_SCHED_DEBUG
689 s64 d = se->vruntime - cfs_rq->min_vruntime;
690
691 if (d < 0)
692 d = -d;
693
694 if (d > 3*sysctl_sched_latency)
695 schedstat_inc(cfs_rq, nr_spread_over);
696#endif
697}
698
699static void
700place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
701{
702 u64 vruntime = cfs_rq->min_vruntime;
703
704
705
706
707
708
709
710 if (initial && sched_feat(START_DEBIT))
711 vruntime += sched_vslice(cfs_rq, se);
712
713
714 if (!initial && sched_feat(FAIR_SLEEPERS)) {
715 unsigned long thresh = sysctl_sched_latency;
716
717
718
719
720
721
722
723 if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
724 task_of(se)->policy != SCHED_IDLE))
725 thresh = calc_delta_fair(thresh, se);
726
727
728
729
730
731 if (sched_feat(GENTLE_FAIR_SLEEPERS))
732 thresh >>= 1;
733
734 vruntime -= thresh;
735 }
736
737
738 vruntime = max_vruntime(se->vruntime, vruntime);
739
740 se->vruntime = vruntime;
741}
742
743static void
744enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
745{
746
747
748
749 update_curr(cfs_rq);
750 account_entity_enqueue(cfs_rq, se);
751
752 if (wakeup) {
753 place_entity(cfs_rq, se, 0);
754 enqueue_sleeper(cfs_rq, se);
755 }
756
757 update_stats_enqueue(cfs_rq, se);
758 check_spread(cfs_rq, se);
759 if (se != cfs_rq->curr)
760 __enqueue_entity(cfs_rq, se);
761}
762
763static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
764{
765 if (!se || cfs_rq->last == se)
766 cfs_rq->last = NULL;
767
768 if (!se || cfs_rq->next == se)
769 cfs_rq->next = NULL;
770}
771
772static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
773{
774 for_each_sched_entity(se)
775 __clear_buddies(cfs_rq_of(se), se);
776}
777
778static void
779dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
780{
781
782
783
784 update_curr(cfs_rq);
785
786 update_stats_dequeue(cfs_rq, se);
787 if (sleep) {
788#ifdef CONFIG_SCHEDSTATS
789 if (entity_is_task(se)) {
790 struct task_struct *tsk = task_of(se);
791
792 if (tsk->state & TASK_INTERRUPTIBLE)
793 se->sleep_start = rq_of(cfs_rq)->clock;
794 if (tsk->state & TASK_UNINTERRUPTIBLE)
795 se->block_start = rq_of(cfs_rq)->clock;
796 }
797#endif
798 }
799
800 clear_buddies(cfs_rq, se);
801
802 if (se != cfs_rq->curr)
803 __dequeue_entity(cfs_rq, se);
804 account_entity_dequeue(cfs_rq, se);
805 update_min_vruntime(cfs_rq);
806}
807
808
809
810
811static void
812check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
813{
814 unsigned long ideal_runtime, delta_exec;
815
816 ideal_runtime = sched_slice(cfs_rq, curr);
817 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
818 if (delta_exec > ideal_runtime) {
819 resched_task(rq_of(cfs_rq)->curr);
820
821
822
823
824 clear_buddies(cfs_rq, curr);
825 return;
826 }
827
828
829
830
831
832
833 if (!sched_feat(WAKEUP_PREEMPT))
834 return;
835
836 if (delta_exec < sysctl_sched_min_granularity)
837 return;
838
839 if (cfs_rq->nr_running > 1) {
840 struct sched_entity *se = __pick_next_entity(cfs_rq);
841 s64 delta = curr->vruntime - se->vruntime;
842
843 if (delta > ideal_runtime)
844 resched_task(rq_of(cfs_rq)->curr);
845 }
846}
847
848static void
849set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
850{
851
852 if (se->on_rq) {
853
854
855
856
857
858 update_stats_wait_end(cfs_rq, se);
859 __dequeue_entity(cfs_rq, se);
860 }
861
862 update_stats_curr_start(cfs_rq, se);
863 cfs_rq->curr = se;
864#ifdef CONFIG_SCHEDSTATS
865
866
867
868
869
870 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
871 se->slice_max = max(se->slice_max,
872 se->sum_exec_runtime - se->prev_sum_exec_runtime);
873 }
874#endif
875 se->prev_sum_exec_runtime = se->sum_exec_runtime;
876}
877
878static int
879wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
880
881static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
882{
883 struct sched_entity *se = __pick_next_entity(cfs_rq);
884 struct sched_entity *left = se;
885
886 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
887 se = cfs_rq->next;
888
889
890
891
892 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
893 se = cfs_rq->last;
894
895 clear_buddies(cfs_rq, se);
896
897 return se;
898}
899
900static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
901{
902
903
904
905
906 if (prev->on_rq)
907 update_curr(cfs_rq);
908
909 check_spread(cfs_rq, prev);
910 if (prev->on_rq) {
911 update_stats_wait_start(cfs_rq, prev);
912
913 __enqueue_entity(cfs_rq, prev);
914 }
915 cfs_rq->curr = NULL;
916}
917
918static void
919entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
920{
921
922
923
924 update_curr(cfs_rq);
925
926#ifdef CONFIG_SCHED_HRTICK
927
928
929
930
931 if (queued) {
932 resched_task(rq_of(cfs_rq)->curr);
933 return;
934 }
935
936
937
938 if (!sched_feat(DOUBLE_TICK) &&
939 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
940 return;
941#endif
942
943 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
944 check_preempt_tick(cfs_rq, curr);
945}
946
947
948
949
950
951#ifdef CONFIG_SCHED_HRTICK
952static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
953{
954 struct sched_entity *se = &p->se;
955 struct cfs_rq *cfs_rq = cfs_rq_of(se);
956
957 WARN_ON(task_rq(p) != rq);
958
959 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
960 u64 slice = sched_slice(cfs_rq, se);
961 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
962 s64 delta = slice - ran;
963
964 if (delta < 0) {
965 if (rq->curr == p)
966 resched_task(p);
967 return;
968 }
969
970
971
972
973
974 if (rq->curr != p)
975 delta = max_t(s64, 10000LL, delta);
976
977 hrtick_start(rq, delta);
978 }
979}
980
981
982
983
984
985
986static void hrtick_update(struct rq *rq)
987{
988 struct task_struct *curr = rq->curr;
989
990 if (curr->sched_class != &fair_sched_class)
991 return;
992
993 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
994 hrtick_start_fair(rq, curr);
995}
996#else
997static inline void
998hrtick_start_fair(struct rq *rq, struct task_struct *p)
999{
1000}
1001
1002static inline void hrtick_update(struct rq *rq)
1003{
1004}
1005#endif
1006
1007
1008
1009
1010
1011
1012static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
1013{
1014 struct cfs_rq *cfs_rq;
1015 struct sched_entity *se = &p->se;
1016
1017 for_each_sched_entity(se) {
1018 if (se->on_rq)
1019 break;
1020 cfs_rq = cfs_rq_of(se);
1021 enqueue_entity(cfs_rq, se, wakeup);
1022 wakeup = 1;
1023 }
1024
1025 hrtick_update(rq);
1026}
1027
1028
1029
1030
1031
1032
1033static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
1034{
1035 struct cfs_rq *cfs_rq;
1036 struct sched_entity *se = &p->se;
1037
1038 for_each_sched_entity(se) {
1039 cfs_rq = cfs_rq_of(se);
1040 dequeue_entity(cfs_rq, se, sleep);
1041
1042 if (cfs_rq->load.weight)
1043 break;
1044 sleep = 1;
1045 }
1046
1047 hrtick_update(rq);
1048}
1049
1050
1051
1052
1053
1054
1055static void yield_task_fair(struct rq *rq)
1056{
1057 struct task_struct *curr = rq->curr;
1058 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1059 struct sched_entity *rightmost, *se = &curr->se;
1060
1061
1062
1063
1064 if (unlikely(cfs_rq->nr_running == 1))
1065 return;
1066
1067 clear_buddies(cfs_rq, se);
1068
1069 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
1070 update_rq_clock(rq);
1071
1072
1073
1074 update_curr(cfs_rq);
1075
1076 return;
1077 }
1078
1079
1080
1081 rightmost = __pick_last_entity(cfs_rq);
1082
1083
1084
1085 if (unlikely(!rightmost || entity_before(rightmost, se)))
1086 return;
1087
1088
1089
1090
1091
1092
1093 se->vruntime = rightmost->vruntime + 1;
1094}
1095
1096#ifdef CONFIG_SMP
1097
1098#ifdef CONFIG_FAIR_GROUP_SCHED
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120static long effective_load(struct task_group *tg, int cpu,
1121 long wl, long wg)
1122{
1123 struct sched_entity *se = tg->se[cpu];
1124
1125 if (!tg->parent)
1126 return wl;
1127
1128
1129
1130
1131
1132 if (!wl && sched_feat(ASYM_EFF_LOAD))
1133 return wl;
1134
1135 for_each_sched_entity(se) {
1136 long S, rw, s, a, b;
1137 long more_w;
1138
1139
1140
1141
1142
1143 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1144 wl += more_w;
1145 wg += more_w;
1146
1147 S = se->my_q->tg->shares;
1148 s = se->my_q->shares;
1149 rw = se->my_q->rq_weight;
1150
1151 a = S*(rw + wl);
1152 b = S*rw + s*wg;
1153
1154 wl = s*(a-b);
1155
1156 if (likely(b))
1157 wl /= b;
1158
1159
1160
1161
1162
1163
1164
1165
1166 wg = 0;
1167 }
1168
1169 return wl;
1170}
1171
1172#else
1173
1174static inline unsigned long effective_load(struct task_group *tg, int cpu,
1175 unsigned long wl, unsigned long wg)
1176{
1177 return wl;
1178}
1179
1180#endif
1181
1182static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1183{
1184 struct task_struct *curr = current;
1185 unsigned long this_load, load;
1186 int idx, this_cpu, prev_cpu;
1187 unsigned long tl_per_task;
1188 unsigned int imbalance;
1189 struct task_group *tg;
1190 unsigned long weight;
1191 int balanced;
1192
1193 idx = sd->wake_idx;
1194 this_cpu = smp_processor_id();
1195 prev_cpu = task_cpu(p);
1196 load = source_load(prev_cpu, idx);
1197 this_load = target_load(this_cpu, idx);
1198
1199 if (sync) {
1200 if (sched_feat(SYNC_LESS) &&
1201 (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1202 p->se.avg_overlap > sysctl_sched_migration_cost))
1203 sync = 0;
1204 } else {
1205 if (sched_feat(SYNC_MORE) &&
1206 (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1207 p->se.avg_overlap < sysctl_sched_migration_cost))
1208 sync = 1;
1209 }
1210
1211
1212
1213
1214
1215
1216 if (sync) {
1217 tg = task_group(current);
1218 weight = current->se.load.weight;
1219
1220 this_load += effective_load(tg, this_cpu, -weight, -weight);
1221 load += effective_load(tg, prev_cpu, 0, -weight);
1222 }
1223
1224 tg = task_group(p);
1225 weight = p->se.load.weight;
1226
1227 imbalance = 100 + (sd->imbalance_pct - 100) / 2;
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 balanced = !this_load ||
1239 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
1240 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1241
1242
1243
1244
1245
1246
1247 if (sync && balanced)
1248 return 1;
1249
1250 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1251 tl_per_task = cpu_avg_load_per_task(this_cpu);
1252
1253 if (balanced ||
1254 (this_load <= load &&
1255 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
1256
1257
1258
1259
1260
1261 schedstat_inc(sd, ttwu_move_affine);
1262 schedstat_inc(p, se.nr_wakeups_affine);
1263
1264 return 1;
1265 }
1266 return 0;
1267}
1268
1269
1270
1271
1272
1273static struct sched_group *
1274find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1275 int this_cpu, int load_idx)
1276{
1277 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1278 unsigned long min_load = ULONG_MAX, this_load = 0;
1279 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1280
1281 do {
1282 unsigned long load, avg_load;
1283 int local_group;
1284 int i;
1285
1286
1287 if (!cpumask_intersects(sched_group_cpus(group),
1288 &p->cpus_allowed))
1289 continue;
1290
1291 local_group = cpumask_test_cpu(this_cpu,
1292 sched_group_cpus(group));
1293
1294
1295 avg_load = 0;
1296
1297 for_each_cpu(i, sched_group_cpus(group)) {
1298
1299 if (local_group)
1300 load = source_load(i, load_idx);
1301 else
1302 load = target_load(i, load_idx);
1303
1304 avg_load += load;
1305 }
1306
1307
1308 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1309
1310 if (local_group) {
1311 this_load = avg_load;
1312 this = group;
1313 } else if (avg_load < min_load) {
1314 min_load = avg_load;
1315 idlest = group;
1316 }
1317 } while (group = group->next, group != sd->groups);
1318
1319 if (!idlest || 100*this_load < imbalance*min_load)
1320 return NULL;
1321 return idlest;
1322}
1323
1324
1325
1326
1327static int
1328find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1329{
1330 unsigned long load, min_load = ULONG_MAX;
1331 int idlest = -1;
1332 int i;
1333
1334
1335 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1336 load = weighted_cpuload(i);
1337
1338 if (load < min_load || (load == min_load && i == this_cpu)) {
1339 min_load = load;
1340 idlest = i;
1341 }
1342 }
1343
1344 return idlest;
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1359{
1360 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1361 int cpu = smp_processor_id();
1362 int prev_cpu = task_cpu(p);
1363 int new_cpu = cpu;
1364 int want_affine = 0;
1365 int want_sd = 1;
1366 int sync = wake_flags & WF_SYNC;
1367
1368 if (sd_flag & SD_BALANCE_WAKE) {
1369 if (sched_feat(AFFINE_WAKEUPS) &&
1370 cpumask_test_cpu(cpu, &p->cpus_allowed))
1371 want_affine = 1;
1372 new_cpu = prev_cpu;
1373 }
1374
1375 rcu_read_lock();
1376 for_each_domain(cpu, tmp) {
1377
1378
1379
1380
1381 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
1382 unsigned long power = 0;
1383 unsigned long nr_running = 0;
1384 unsigned long capacity;
1385 int i;
1386
1387 for_each_cpu(i, sched_domain_span(tmp)) {
1388 power += power_of(i);
1389 nr_running += cpu_rq(i)->cfs.nr_running;
1390 }
1391
1392 capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1393
1394 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1395 nr_running /= 2;
1396
1397 if (nr_running < capacity)
1398 want_sd = 0;
1399 }
1400
1401 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1402 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1403
1404 affine_sd = tmp;
1405 want_affine = 0;
1406 }
1407
1408 if (!want_sd && !want_affine)
1409 break;
1410
1411 if (!(tmp->flags & sd_flag))
1412 continue;
1413
1414 if (want_sd)
1415 sd = tmp;
1416 }
1417
1418 if (sched_feat(LB_SHARES_UPDATE)) {
1419
1420
1421
1422 tmp = sd;
1423 if (affine_sd && (!tmp ||
1424 cpumask_weight(sched_domain_span(affine_sd)) >
1425 cpumask_weight(sched_domain_span(sd))))
1426 tmp = affine_sd;
1427
1428 if (tmp)
1429 update_shares(tmp);
1430 }
1431
1432 if (affine_sd && wake_affine(affine_sd, p, sync)) {
1433 new_cpu = cpu;
1434 goto out;
1435 }
1436
1437 while (sd) {
1438 int load_idx = sd->forkexec_idx;
1439 struct sched_group *group;
1440 int weight;
1441
1442 if (!(sd->flags & sd_flag)) {
1443 sd = sd->child;
1444 continue;
1445 }
1446
1447 if (sd_flag & SD_BALANCE_WAKE)
1448 load_idx = sd->wake_idx;
1449
1450 group = find_idlest_group(sd, p, cpu, load_idx);
1451 if (!group) {
1452 sd = sd->child;
1453 continue;
1454 }
1455
1456 new_cpu = find_idlest_cpu(group, p, cpu);
1457 if (new_cpu == -1 || new_cpu == cpu) {
1458
1459 sd = sd->child;
1460 continue;
1461 }
1462
1463
1464 cpu = new_cpu;
1465 weight = cpumask_weight(sched_domain_span(sd));
1466 sd = NULL;
1467 for_each_domain(cpu, tmp) {
1468 if (weight <= cpumask_weight(sched_domain_span(tmp)))
1469 break;
1470 if (tmp->flags & sd_flag)
1471 sd = tmp;
1472 }
1473
1474 }
1475
1476out:
1477 rcu_read_unlock();
1478 return new_cpu;
1479}
1480#endif
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497static unsigned long
1498adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1499{
1500 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1501 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1502 u64 gran = 0;
1503
1504 if (this_run < expected_wakeup)
1505 gran = expected_wakeup - this_run;
1506
1507 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1508}
1509
1510static unsigned long
1511wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1512{
1513 unsigned long gran = sysctl_sched_wakeup_granularity;
1514
1515 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1516 gran = adaptive_gran(curr, se);
1517
1518
1519
1520
1521
1522 if (sched_feat(ASYM_GRAN)) {
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533 if (unlikely(se->load.weight != NICE_0_LOAD))
1534 gran = calc_delta_fair(gran, se);
1535 } else {
1536 if (unlikely(curr->load.weight != NICE_0_LOAD))
1537 gran = calc_delta_fair(gran, curr);
1538 }
1539
1540 return gran;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557static int
1558wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1559{
1560 s64 gran, vdiff = curr->vruntime - se->vruntime;
1561
1562 if (vdiff <= 0)
1563 return -1;
1564
1565 gran = wakeup_gran(curr, se);
1566 if (vdiff > gran)
1567 return 1;
1568
1569 return 0;
1570}
1571
1572static void set_last_buddy(struct sched_entity *se)
1573{
1574 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1575 for_each_sched_entity(se)
1576 cfs_rq_of(se)->last = se;
1577 }
1578}
1579
1580static void set_next_buddy(struct sched_entity *se)
1581{
1582 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1583 for_each_sched_entity(se)
1584 cfs_rq_of(se)->next = se;
1585 }
1586}
1587
1588
1589
1590
1591static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1592{
1593 struct task_struct *curr = rq->curr;
1594 struct sched_entity *se = &curr->se, *pse = &p->se;
1595 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1596 int sync = wake_flags & WF_SYNC;
1597 int scale = cfs_rq->nr_running >= sched_nr_latency;
1598
1599 update_curr(cfs_rq);
1600
1601 if (unlikely(rt_prio(p->prio))) {
1602 resched_task(curr);
1603 return;
1604 }
1605
1606 if (unlikely(p->sched_class != &fair_sched_class))
1607 return;
1608
1609 if (unlikely(se == pse))
1610 return;
1611
1612 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1613 set_next_buddy(pse);
1614
1615
1616
1617
1618
1619 if (test_tsk_need_resched(curr))
1620 return;
1621
1622
1623
1624
1625
1626 if (unlikely(p->policy != SCHED_NORMAL))
1627 return;
1628
1629
1630 if (unlikely(curr->policy == SCHED_IDLE)) {
1631 resched_task(curr);
1632 return;
1633 }
1634
1635 if ((sched_feat(WAKEUP_SYNC) && sync) ||
1636 (sched_feat(WAKEUP_OVERLAP) &&
1637 (se->avg_overlap < sysctl_sched_migration_cost &&
1638 pse->avg_overlap < sysctl_sched_migration_cost))) {
1639 resched_task(curr);
1640 return;
1641 }
1642
1643 if (sched_feat(WAKEUP_RUNNING)) {
1644 if (pse->avg_running < se->avg_running) {
1645 set_next_buddy(pse);
1646 resched_task(curr);
1647 return;
1648 }
1649 }
1650
1651 if (!sched_feat(WAKEUP_PREEMPT))
1652 return;
1653
1654 find_matching_se(&se, &pse);
1655
1656 BUG_ON(!pse);
1657
1658 if (wakeup_preempt_entity(se, pse) == 1) {
1659 resched_task(curr);
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669 if (unlikely(!se->on_rq || curr == rq->idle))
1670 return;
1671 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1672 set_last_buddy(se);
1673 }
1674}
1675
1676static struct task_struct *pick_next_task_fair(struct rq *rq)
1677{
1678 struct task_struct *p;
1679 struct cfs_rq *cfs_rq = &rq->cfs;
1680 struct sched_entity *se;
1681
1682 if (unlikely(!cfs_rq->nr_running))
1683 return NULL;
1684
1685 do {
1686 se = pick_next_entity(cfs_rq);
1687 set_next_entity(cfs_rq, se);
1688 cfs_rq = group_cfs_rq(se);
1689 } while (cfs_rq);
1690
1691 p = task_of(se);
1692 hrtick_start_fair(rq, p);
1693
1694 return p;
1695}
1696
1697
1698
1699
1700static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1701{
1702 struct sched_entity *se = &prev->se;
1703 struct cfs_rq *cfs_rq;
1704
1705 for_each_sched_entity(se) {
1706 cfs_rq = cfs_rq_of(se);
1707 put_prev_entity(cfs_rq, se);
1708 }
1709}
1710
1711#ifdef CONFIG_SMP
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723static struct task_struct *
1724__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1725{
1726 struct task_struct *p = NULL;
1727 struct sched_entity *se;
1728
1729 if (next == &cfs_rq->tasks)
1730 return NULL;
1731
1732 se = list_entry(next, struct sched_entity, group_node);
1733 p = task_of(se);
1734 cfs_rq->balance_iterator = next->next;
1735
1736 return p;
1737}
1738
1739static struct task_struct *load_balance_start_fair(void *arg)
1740{
1741 struct cfs_rq *cfs_rq = arg;
1742
1743 return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1744}
1745
1746static struct task_struct *load_balance_next_fair(void *arg)
1747{
1748 struct cfs_rq *cfs_rq = arg;
1749
1750 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1751}
1752
1753static unsigned long
1754__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1755 unsigned long max_load_move, struct sched_domain *sd,
1756 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1757 struct cfs_rq *cfs_rq)
1758{
1759 struct rq_iterator cfs_rq_iterator;
1760
1761 cfs_rq_iterator.start = load_balance_start_fair;
1762 cfs_rq_iterator.next = load_balance_next_fair;
1763 cfs_rq_iterator.arg = cfs_rq;
1764
1765 return balance_tasks(this_rq, this_cpu, busiest,
1766 max_load_move, sd, idle, all_pinned,
1767 this_best_prio, &cfs_rq_iterator);
1768}
1769
1770#ifdef CONFIG_FAIR_GROUP_SCHED
1771static unsigned long
1772load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1773 unsigned long max_load_move,
1774 struct sched_domain *sd, enum cpu_idle_type idle,
1775 int *all_pinned, int *this_best_prio)
1776{
1777 long rem_load_move = max_load_move;
1778 int busiest_cpu = cpu_of(busiest);
1779 struct task_group *tg;
1780
1781 rcu_read_lock();
1782 update_h_load(busiest_cpu);
1783
1784 list_for_each_entry_rcu(tg, &task_groups, list) {
1785 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1786 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1787 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1788 u64 rem_load, moved_load;
1789
1790
1791
1792
1793 if (!busiest_cfs_rq->task_weight)
1794 continue;
1795
1796 rem_load = (u64)rem_load_move * busiest_weight;
1797 rem_load = div_u64(rem_load, busiest_h_load + 1);
1798
1799 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1800 rem_load, sd, idle, all_pinned, this_best_prio,
1801 tg->cfs_rq[busiest_cpu]);
1802
1803 if (!moved_load)
1804 continue;
1805
1806 moved_load *= busiest_h_load;
1807 moved_load = div_u64(moved_load, busiest_weight + 1);
1808
1809 rem_load_move -= moved_load;
1810 if (rem_load_move < 0)
1811 break;
1812 }
1813 rcu_read_unlock();
1814
1815 return max_load_move - rem_load_move;
1816}
1817#else
1818static unsigned long
1819load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1820 unsigned long max_load_move,
1821 struct sched_domain *sd, enum cpu_idle_type idle,
1822 int *all_pinned, int *this_best_prio)
1823{
1824 return __load_balance_fair(this_rq, this_cpu, busiest,
1825 max_load_move, sd, idle, all_pinned,
1826 this_best_prio, &busiest->cfs);
1827}
1828#endif
1829
1830static int
1831move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1832 struct sched_domain *sd, enum cpu_idle_type idle)
1833{
1834 struct cfs_rq *busy_cfs_rq;
1835 struct rq_iterator cfs_rq_iterator;
1836
1837 cfs_rq_iterator.start = load_balance_start_fair;
1838 cfs_rq_iterator.next = load_balance_next_fair;
1839
1840 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1841
1842
1843
1844
1845 cfs_rq_iterator.arg = busy_cfs_rq;
1846 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1847 &cfs_rq_iterator))
1848 return 1;
1849 }
1850
1851 return 0;
1852}
1853#endif
1854
1855
1856
1857
1858static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1859{
1860 struct cfs_rq *cfs_rq;
1861 struct sched_entity *se = &curr->se;
1862
1863 for_each_sched_entity(se) {
1864 cfs_rq = cfs_rq_of(se);
1865 entity_tick(cfs_rq, se, queued);
1866 }
1867}
1868
1869
1870
1871
1872
1873
1874
1875
1876static void task_new_fair(struct rq *rq, struct task_struct *p)
1877{
1878 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1879 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1880 int this_cpu = smp_processor_id();
1881
1882 sched_info_queued(p);
1883
1884 update_curr(cfs_rq);
1885 if (curr)
1886 se->vruntime = curr->vruntime;
1887 place_entity(cfs_rq, se, 1);
1888
1889
1890 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1891 curr && entity_before(curr, se)) {
1892
1893
1894
1895
1896 swap(curr->vruntime, se->vruntime);
1897 resched_task(rq->curr);
1898 }
1899
1900 enqueue_task_fair(rq, p, 0);
1901}
1902
1903
1904
1905
1906
1907static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1908 int oldprio, int running)
1909{
1910
1911
1912
1913
1914
1915 if (running) {
1916 if (p->prio > oldprio)
1917 resched_task(rq->curr);
1918 } else
1919 check_preempt_curr(rq, p, 0);
1920}
1921
1922
1923
1924
1925static void switched_to_fair(struct rq *rq, struct task_struct *p,
1926 int running)
1927{
1928
1929
1930
1931
1932
1933 if (running)
1934 resched_task(rq->curr);
1935 else
1936 check_preempt_curr(rq, p, 0);
1937}
1938
1939
1940
1941
1942
1943
1944static void set_curr_task_fair(struct rq *rq)
1945{
1946 struct sched_entity *se = &rq->curr->se;
1947
1948 for_each_sched_entity(se)
1949 set_next_entity(cfs_rq_of(se), se);
1950}
1951
1952#ifdef CONFIG_FAIR_GROUP_SCHED
1953static void moved_group_fair(struct task_struct *p)
1954{
1955 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1956
1957 update_curr(cfs_rq);
1958 place_entity(cfs_rq, &p->se, 1);
1959}
1960#endif
1961
1962unsigned int get_rr_interval_fair(struct task_struct *task)
1963{
1964 struct sched_entity *se = &task->se;
1965 unsigned long flags;
1966 struct rq *rq;
1967 unsigned int rr_interval = 0;
1968
1969
1970
1971
1972
1973 rq = task_rq_lock(task, &flags);
1974 if (rq->cfs.load.weight)
1975 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
1976 task_rq_unlock(rq, &flags);
1977
1978 return rr_interval;
1979}
1980
1981
1982
1983
1984static const struct sched_class fair_sched_class = {
1985 .next = &idle_sched_class,
1986 .enqueue_task = enqueue_task_fair,
1987 .dequeue_task = dequeue_task_fair,
1988 .yield_task = yield_task_fair,
1989
1990 .check_preempt_curr = check_preempt_wakeup,
1991
1992 .pick_next_task = pick_next_task_fair,
1993 .put_prev_task = put_prev_task_fair,
1994
1995#ifdef CONFIG_SMP
1996 .select_task_rq = select_task_rq_fair,
1997
1998 .load_balance = load_balance_fair,
1999 .move_one_task = move_one_task_fair,
2000#endif
2001
2002 .set_curr_task = set_curr_task_fair,
2003 .task_tick = task_tick_fair,
2004 .task_new = task_new_fair,
2005
2006 .prio_changed = prio_changed_fair,
2007 .switched_to = switched_to_fair,
2008
2009 .get_rr_interval = get_rr_interval_fair,
2010
2011#ifdef CONFIG_FAIR_GROUP_SCHED
2012 .moved_group = moved_group_fair,
2013#endif
2014};
2015
2016#ifdef CONFIG_SCHED_DEBUG
2017static void print_cfs_stats(struct seq_file *m, int cpu)
2018{
2019 struct cfs_rq *cfs_rq;
2020
2021 rcu_read_lock();
2022 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
2023 print_cfs_rq(m, cpu, cfs_rq);
2024 rcu_read_unlock();
2025}
2026#endif
2027