1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/latencytop.h>
24#include <linux/sched.h>
25#include <linux/cpumask.h>
26#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
29#include <linux/mempolicy.h>
30#include <linux/migrate.h>
31#include <linux/task_work.h>
32
33#include <trace/events/sched.h>
34
35#include "sched.h"
36
37
38
39
40
41
42
43
44
45
46
47
48
49unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
51
52
53
54
55
56
57
58
59
60
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64
65
66
67
68unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
70
71
72
73
74static unsigned int sched_nr_latency = 8;
75
76
77
78
79
80unsigned int sysctl_sched_child_runs_first __read_mostly;
81
82
83
84
85
86
87
88
89
90unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
91unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
92
93const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
95
96
97
98
99
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
102#ifdef CONFIG_CFS_BANDWIDTH
103
104
105
106
107
108
109
110
111
112
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
116
117
118
119
120
121
122
123
124
125static int get_update_sysctl_factor(void)
126{
127 unsigned int cpus = min_t(int, num_online_cpus(), 8);
128 unsigned int factor;
129
130 switch (sysctl_sched_tunable_scaling) {
131 case SCHED_TUNABLESCALING_NONE:
132 factor = 1;
133 break;
134 case SCHED_TUNABLESCALING_LINEAR:
135 factor = cpus;
136 break;
137 case SCHED_TUNABLESCALING_LOG:
138 default:
139 factor = 1 + ilog2(cpus);
140 break;
141 }
142
143 return factor;
144}
145
146static void update_sysctl(void)
147{
148 unsigned int factor = get_update_sysctl_factor();
149
150#define SET_SYSCTL(name) \
151 (sysctl_##name = (factor) * normalized_sysctl_##name)
152 SET_SYSCTL(sched_min_granularity);
153 SET_SYSCTL(sched_latency);
154 SET_SYSCTL(sched_wakeup_granularity);
155#undef SET_SYSCTL
156}
157
158void sched_init_granularity(void)
159{
160 update_sysctl();
161}
162
163#if BITS_PER_LONG == 32
164# define WMULT_CONST (~0UL)
165#else
166# define WMULT_CONST (1UL << 32)
167#endif
168
169#define WMULT_SHIFT 32
170
171
172
173
174#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
175
176
177
178
179static unsigned long
180calc_delta_mine(unsigned long delta_exec, unsigned long weight,
181 struct load_weight *lw)
182{
183 u64 tmp;
184
185
186
187
188
189
190 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
191 tmp = (u64)delta_exec * scale_load_down(weight);
192 else
193 tmp = (u64)delta_exec;
194
195 if (!lw->inv_weight) {
196 unsigned long w = scale_load_down(lw->weight);
197
198 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
199 lw->inv_weight = 1;
200 else if (unlikely(!w))
201 lw->inv_weight = WMULT_CONST;
202 else
203 lw->inv_weight = WMULT_CONST / w;
204 }
205
206
207
208
209 if (unlikely(tmp > WMULT_CONST))
210 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
211 WMULT_SHIFT/2);
212 else
213 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
214
215 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
216}
217
218
219const struct sched_class fair_sched_class;
220
221
222
223
224
225#ifdef CONFIG_FAIR_GROUP_SCHED
226
227
228static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
229{
230 return cfs_rq->rq;
231}
232
233
234#define entity_is_task(se) (!se->my_q)
235
236static inline struct task_struct *task_of(struct sched_entity *se)
237{
238#ifdef CONFIG_SCHED_DEBUG
239 WARN_ON_ONCE(!entity_is_task(se));
240#endif
241 return container_of(se, struct task_struct, se);
242}
243
244
245#define for_each_sched_entity(se) \
246 for (; se; se = se->parent)
247
248static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
249{
250 return p->se.cfs_rq;
251}
252
253
254static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
255{
256 return se->cfs_rq;
257}
258
259
260static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
261{
262 return grp->my_q;
263}
264
265static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
266 int force_update);
267
268static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
269{
270 if (!cfs_rq->on_list) {
271
272
273
274
275
276
277 if (cfs_rq->tg->parent &&
278 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
279 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
280 &rq_of(cfs_rq)->leaf_cfs_rq_list);
281 } else {
282 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
283 &rq_of(cfs_rq)->leaf_cfs_rq_list);
284 }
285
286 cfs_rq->on_list = 1;
287
288 update_cfs_rq_blocked_load(cfs_rq, 0);
289 }
290}
291
292static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
293{
294 if (cfs_rq->on_list) {
295 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
296 cfs_rq->on_list = 0;
297 }
298}
299
300
301#define for_each_leaf_cfs_rq(rq, cfs_rq) \
302 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
303
304
305static inline int
306is_same_group(struct sched_entity *se, struct sched_entity *pse)
307{
308 if (se->cfs_rq == pse->cfs_rq)
309 return 1;
310
311 return 0;
312}
313
314static inline struct sched_entity *parent_entity(struct sched_entity *se)
315{
316 return se->parent;
317}
318
319
320static inline int depth_se(struct sched_entity *se)
321{
322 int depth = 0;
323
324 for_each_sched_entity(se)
325 depth++;
326
327 return depth;
328}
329
330static void
331find_matching_se(struct sched_entity **se, struct sched_entity **pse)
332{
333 int se_depth, pse_depth;
334
335
336
337
338
339
340
341
342
343 se_depth = depth_se(*se);
344 pse_depth = depth_se(*pse);
345
346 while (se_depth > pse_depth) {
347 se_depth--;
348 *se = parent_entity(*se);
349 }
350
351 while (pse_depth > se_depth) {
352 pse_depth--;
353 *pse = parent_entity(*pse);
354 }
355
356 while (!is_same_group(*se, *pse)) {
357 *se = parent_entity(*se);
358 *pse = parent_entity(*pse);
359 }
360}
361
362#else
363
364static inline struct task_struct *task_of(struct sched_entity *se)
365{
366 return container_of(se, struct task_struct, se);
367}
368
369static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
370{
371 return container_of(cfs_rq, struct rq, cfs);
372}
373
374#define entity_is_task(se) 1
375
376#define for_each_sched_entity(se) \
377 for (; se; se = NULL)
378
379static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
380{
381 return &task_rq(p)->cfs;
382}
383
384static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
385{
386 struct task_struct *p = task_of(se);
387 struct rq *rq = task_rq(p);
388
389 return &rq->cfs;
390}
391
392
393static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
394{
395 return NULL;
396}
397
398static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
399{
400}
401
402static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
403{
404}
405
406#define for_each_leaf_cfs_rq(rq, cfs_rq) \
407 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
408
409static inline int
410is_same_group(struct sched_entity *se, struct sched_entity *pse)
411{
412 return 1;
413}
414
415static inline struct sched_entity *parent_entity(struct sched_entity *se)
416{
417 return NULL;
418}
419
420static inline void
421find_matching_se(struct sched_entity **se, struct sched_entity **pse)
422{
423}
424
425#endif
426
427static __always_inline
428void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
429
430
431
432
433
434static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
435{
436 s64 delta = (s64)(vruntime - min_vruntime);
437 if (delta > 0)
438 min_vruntime = vruntime;
439
440 return min_vruntime;
441}
442
443static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
444{
445 s64 delta = (s64)(vruntime - min_vruntime);
446 if (delta < 0)
447 min_vruntime = vruntime;
448
449 return min_vruntime;
450}
451
452static inline int entity_before(struct sched_entity *a,
453 struct sched_entity *b)
454{
455 return (s64)(a->vruntime - b->vruntime) < 0;
456}
457
458static void update_min_vruntime(struct cfs_rq *cfs_rq)
459{
460 u64 vruntime = cfs_rq->min_vruntime;
461
462 if (cfs_rq->curr)
463 vruntime = cfs_rq->curr->vruntime;
464
465 if (cfs_rq->rb_leftmost) {
466 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
467 struct sched_entity,
468 run_node);
469
470 if (!cfs_rq->curr)
471 vruntime = se->vruntime;
472 else
473 vruntime = min_vruntime(vruntime, se->vruntime);
474 }
475
476 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
477#ifndef CONFIG_64BIT
478 smp_wmb();
479 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
480#endif
481}
482
483
484
485
486static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
487{
488 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
489 struct rb_node *parent = NULL;
490 struct sched_entity *entry;
491 int leftmost = 1;
492
493
494
495
496 while (*link) {
497 parent = *link;
498 entry = rb_entry(parent, struct sched_entity, run_node);
499
500
501
502
503 if (entity_before(se, entry)) {
504 link = &parent->rb_left;
505 } else {
506 link = &parent->rb_right;
507 leftmost = 0;
508 }
509 }
510
511
512
513
514
515 if (leftmost)
516 cfs_rq->rb_leftmost = &se->run_node;
517
518 rb_link_node(&se->run_node, parent, link);
519 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
520}
521
522static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
523{
524 if (cfs_rq->rb_leftmost == &se->run_node) {
525 struct rb_node *next_node;
526
527 next_node = rb_next(&se->run_node);
528 cfs_rq->rb_leftmost = next_node;
529 }
530
531 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
532}
533
534struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
535{
536 struct rb_node *left = cfs_rq->rb_leftmost;
537
538 if (!left)
539 return NULL;
540
541 return rb_entry(left, struct sched_entity, run_node);
542}
543
544static struct sched_entity *__pick_next_entity(struct sched_entity *se)
545{
546 struct rb_node *next = rb_next(&se->run_node);
547
548 if (!next)
549 return NULL;
550
551 return rb_entry(next, struct sched_entity, run_node);
552}
553
554#ifdef CONFIG_SCHED_DEBUG
555struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
556{
557 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
558
559 if (!last)
560 return NULL;
561
562 return rb_entry(last, struct sched_entity, run_node);
563}
564
565
566
567
568
569int sched_proc_update_handler(struct ctl_table *table, int write,
570 void __user *buffer, size_t *lenp,
571 loff_t *ppos)
572{
573 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
574 int factor = get_update_sysctl_factor();
575
576 if (ret || !write)
577 return ret;
578
579 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
580 sysctl_sched_min_granularity);
581
582#define WRT_SYSCTL(name) \
583 (normalized_sysctl_##name = sysctl_##name / (factor))
584 WRT_SYSCTL(sched_min_granularity);
585 WRT_SYSCTL(sched_latency);
586 WRT_SYSCTL(sched_wakeup_granularity);
587#undef WRT_SYSCTL
588
589 return 0;
590}
591#endif
592
593
594
595
596static inline unsigned long
597calc_delta_fair(unsigned long delta, struct sched_entity *se)
598{
599 if (unlikely(se->load.weight != NICE_0_LOAD))
600 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
601
602 return delta;
603}
604
605
606
607
608
609
610
611
612
613static u64 __sched_period(unsigned long nr_running)
614{
615 u64 period = sysctl_sched_latency;
616 unsigned long nr_latency = sched_nr_latency;
617
618 if (unlikely(nr_running > nr_latency)) {
619 period = sysctl_sched_min_granularity;
620 period *= nr_running;
621 }
622
623 return period;
624}
625
626
627
628
629
630
631
632static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
633{
634 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
635
636 for_each_sched_entity(se) {
637 struct load_weight *load;
638 struct load_weight lw;
639
640 cfs_rq = cfs_rq_of(se);
641 load = &cfs_rq->load;
642
643 if (unlikely(!se->on_rq)) {
644 lw = cfs_rq->load;
645
646 update_load_add(&lw, se->load.weight);
647 load = &lw;
648 }
649 slice = calc_delta_mine(slice, se->load.weight, load);
650 }
651 return slice;
652}
653
654
655
656
657
658
659static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
660{
661 return calc_delta_fair(sched_slice(cfs_rq, se), se);
662}
663
664
665
666
667
668static inline void
669__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
670 unsigned long delta_exec)
671{
672 unsigned long delta_exec_weighted;
673
674 schedstat_set(curr->statistics.exec_max,
675 max((u64)delta_exec, curr->statistics.exec_max));
676
677 curr->sum_exec_runtime += delta_exec;
678 schedstat_add(cfs_rq, exec_clock, delta_exec);
679 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
680
681 curr->vruntime += delta_exec_weighted;
682 update_min_vruntime(cfs_rq);
683}
684
685static void update_curr(struct cfs_rq *cfs_rq)
686{
687 struct sched_entity *curr = cfs_rq->curr;
688 u64 now = rq_of(cfs_rq)->clock_task;
689 unsigned long delta_exec;
690
691 if (unlikely(!curr))
692 return;
693
694
695
696
697
698
699 delta_exec = (unsigned long)(now - curr->exec_start);
700 if (!delta_exec)
701 return;
702
703 __update_curr(cfs_rq, curr, delta_exec);
704 curr->exec_start = now;
705
706 if (entity_is_task(curr)) {
707 struct task_struct *curtask = task_of(curr);
708
709 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
710 cpuacct_charge(curtask, delta_exec);
711 account_group_exec_runtime(curtask, delta_exec);
712 }
713
714 account_cfs_rq_runtime(cfs_rq, delta_exec);
715}
716
717static inline void
718update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
719{
720 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
721}
722
723
724
725
726static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
727{
728
729
730
731
732 if (se != cfs_rq->curr)
733 update_stats_wait_start(cfs_rq, se);
734}
735
736static void
737update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
738{
739 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
740 rq_of(cfs_rq)->clock - se->statistics.wait_start));
741 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
742 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
743 rq_of(cfs_rq)->clock - se->statistics.wait_start);
744#ifdef CONFIG_SCHEDSTATS
745 if (entity_is_task(se)) {
746 trace_sched_stat_wait(task_of(se),
747 rq_of(cfs_rq)->clock - se->statistics.wait_start);
748 }
749#endif
750 schedstat_set(se->statistics.wait_start, 0);
751}
752
753static inline void
754update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
755{
756
757
758
759
760 if (se != cfs_rq->curr)
761 update_stats_wait_end(cfs_rq, se);
762}
763
764
765
766
767static inline void
768update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
769{
770
771
772
773 se->exec_start = rq_of(cfs_rq)->clock_task;
774}
775
776
777
778
779
780#ifdef CONFIG_NUMA_BALANCING
781
782
783
784unsigned int sysctl_numa_balancing_scan_period_min = 100;
785unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
786unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
787
788
789unsigned int sysctl_numa_balancing_scan_size = 256;
790
791
792unsigned int sysctl_numa_balancing_scan_delay = 1000;
793
794static void task_numa_placement(struct task_struct *p)
795{
796 int seq;
797
798 if (!p->mm)
799 return;
800 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
801 if (p->numa_scan_seq == seq)
802 return;
803 p->numa_scan_seq = seq;
804
805
806}
807
808
809
810
811void task_numa_fault(int node, int pages, bool migrated)
812{
813 struct task_struct *p = current;
814
815 if (!sched_feat_numa(NUMA))
816 return;
817
818
819
820
821
822
823
824 if (!migrated)
825 p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
826 p->numa_scan_period + jiffies_to_msecs(10));
827
828 task_numa_placement(p);
829}
830
831static void reset_ptenuma_scan(struct task_struct *p)
832{
833 ACCESS_ONCE(p->mm->numa_scan_seq)++;
834 p->mm->numa_scan_offset = 0;
835}
836
837
838
839
840
841void task_numa_work(struct callback_head *work)
842{
843 unsigned long migrate, next_scan, now = jiffies;
844 struct task_struct *p = current;
845 struct mm_struct *mm = p->mm;
846 struct vm_area_struct *vma;
847 unsigned long start, end;
848 long pages;
849
850 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
851
852 work->next = work;
853
854
855
856
857
858
859
860
861 if (p->flags & PF_EXITING)
862 return;
863
864
865
866
867
868
869
870
871 if (mm->first_nid == NUMA_PTE_SCAN_INIT)
872 mm->first_nid = numa_node_id();
873 if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
874
875 if (numa_node_id() == mm->first_nid &&
876 !sched_feat_numa(NUMA_FORCE))
877 return;
878
879 mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
880 }
881
882
883
884
885
886
887
888 migrate = mm->numa_next_reset;
889 if (time_after(now, migrate)) {
890 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
891 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
892 xchg(&mm->numa_next_reset, next_scan);
893 }
894
895
896
897
898 migrate = mm->numa_next_scan;
899 if (time_before(now, migrate))
900 return;
901
902 if (p->numa_scan_period == 0)
903 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
904
905 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
906 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
907 return;
908
909
910
911
912
913
914 if (migrate_ratelimited(numa_node_id()))
915 return;
916
917 start = mm->numa_scan_offset;
918 pages = sysctl_numa_balancing_scan_size;
919 pages <<= 20 - PAGE_SHIFT;
920 if (!pages)
921 return;
922
923 down_read(&mm->mmap_sem);
924 vma = find_vma(mm, start);
925 if (!vma) {
926 reset_ptenuma_scan(p);
927 start = 0;
928 vma = mm->mmap;
929 }
930 for (; vma; vma = vma->vm_next) {
931 if (!vma_migratable(vma))
932 continue;
933
934
935 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
936 continue;
937
938 do {
939 start = max(start, vma->vm_start);
940 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
941 end = min(end, vma->vm_end);
942 pages -= change_prot_numa(vma, start, end);
943
944 start = end;
945 if (pages <= 0)
946 goto out;
947 } while (end != vma->vm_end);
948 }
949
950out:
951
952
953
954
955
956
957 if (vma)
958 mm->numa_scan_offset = start;
959 else
960 reset_ptenuma_scan(p);
961 up_read(&mm->mmap_sem);
962}
963
964
965
966
967void task_tick_numa(struct rq *rq, struct task_struct *curr)
968{
969 struct callback_head *work = &curr->numa_work;
970 u64 period, now;
971
972
973
974
975 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
976 return;
977
978
979
980
981
982
983
984 now = curr->se.sum_exec_runtime;
985 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
986
987 if (now - curr->node_stamp > period) {
988 if (!curr->node_stamp)
989 curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
990 curr->node_stamp = now;
991
992 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
993 init_task_work(work, task_numa_work);
994 task_work_add(curr, work, true);
995 }
996 }
997}
998#else
999static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1000{
1001}
1002#endif
1003
1004static void
1005account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1006{
1007 update_load_add(&cfs_rq->load, se->load.weight);
1008 if (!parent_entity(se))
1009 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
1010#ifdef CONFIG_SMP
1011 if (entity_is_task(se))
1012 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
1013#endif
1014 cfs_rq->nr_running++;
1015}
1016
1017static void
1018account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1019{
1020 update_load_sub(&cfs_rq->load, se->load.weight);
1021 if (!parent_entity(se))
1022 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
1023 if (entity_is_task(se))
1024 list_del_init(&se->group_node);
1025 cfs_rq->nr_running--;
1026}
1027
1028#ifdef CONFIG_FAIR_GROUP_SCHED
1029# ifdef CONFIG_SMP
1030static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1031{
1032 long tg_weight;
1033
1034
1035
1036
1037
1038
1039 tg_weight = atomic64_read(&tg->load_avg);
1040 tg_weight -= cfs_rq->tg_load_contrib;
1041 tg_weight += cfs_rq->load.weight;
1042
1043 return tg_weight;
1044}
1045
1046static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1047{
1048 long tg_weight, load, shares;
1049
1050 tg_weight = calc_tg_weight(tg, cfs_rq);
1051 load = cfs_rq->load.weight;
1052
1053 shares = (tg->shares * load);
1054 if (tg_weight)
1055 shares /= tg_weight;
1056
1057 if (shares < MIN_SHARES)
1058 shares = MIN_SHARES;
1059 if (shares > tg->shares)
1060 shares = tg->shares;
1061
1062 return shares;
1063}
1064# else
1065static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1066{
1067 return tg->shares;
1068}
1069# endif
1070static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1071 unsigned long weight)
1072{
1073 if (se->on_rq) {
1074
1075 if (cfs_rq->curr == se)
1076 update_curr(cfs_rq);
1077 account_entity_dequeue(cfs_rq, se);
1078 }
1079
1080 update_load_set(&se->load, weight);
1081
1082 if (se->on_rq)
1083 account_entity_enqueue(cfs_rq, se);
1084}
1085
1086static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1087
1088static void update_cfs_shares(struct cfs_rq *cfs_rq)
1089{
1090 struct task_group *tg;
1091 struct sched_entity *se;
1092 long shares;
1093
1094 tg = cfs_rq->tg;
1095 se = tg->se[cpu_of(rq_of(cfs_rq))];
1096 if (!se || throttled_hierarchy(cfs_rq))
1097 return;
1098#ifndef CONFIG_SMP
1099 if (likely(se->load.weight == tg->shares))
1100 return;
1101#endif
1102 shares = calc_cfs_shares(cfs_rq, tg);
1103
1104 reweight_entity(cfs_rq_of(se), se, shares);
1105}
1106#else
1107static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
1108{
1109}
1110#endif
1111
1112
1113#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1114
1115
1116
1117
1118#define LOAD_AVG_PERIOD 32
1119#define LOAD_AVG_MAX 47742
1120#define LOAD_AVG_MAX_N 345
1121
1122
1123static const u32 runnable_avg_yN_inv[] = {
1124 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1125 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1126 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1127 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1128 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1129 0x85aac367, 0x82cd8698,
1130};
1131
1132
1133
1134
1135
1136static const u32 runnable_avg_yN_sum[] = {
1137 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1138 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1139 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1140};
1141
1142
1143
1144
1145
1146static __always_inline u64 decay_load(u64 val, u64 n)
1147{
1148 unsigned int local_n;
1149
1150 if (!n)
1151 return val;
1152 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1153 return 0;
1154
1155
1156 local_n = n;
1157
1158
1159
1160
1161
1162
1163
1164
1165 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1166 val >>= local_n / LOAD_AVG_PERIOD;
1167 local_n %= LOAD_AVG_PERIOD;
1168 }
1169
1170 val *= runnable_avg_yN_inv[local_n];
1171
1172 return val >> 32;
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182static u32 __compute_runnable_contrib(u64 n)
1183{
1184 u32 contrib = 0;
1185
1186 if (likely(n <= LOAD_AVG_PERIOD))
1187 return runnable_avg_yN_sum[n];
1188 else if (unlikely(n >= LOAD_AVG_MAX_N))
1189 return LOAD_AVG_MAX;
1190
1191
1192 do {
1193 contrib /= 2;
1194 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1195
1196 n -= LOAD_AVG_PERIOD;
1197 } while (n > LOAD_AVG_PERIOD);
1198
1199 contrib = decay_load(contrib, n);
1200 return contrib + runnable_avg_yN_sum[n];
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231static __always_inline int __update_entity_runnable_avg(u64 now,
1232 struct sched_avg *sa,
1233 int runnable)
1234{
1235 u64 delta, periods;
1236 u32 runnable_contrib;
1237 int delta_w, decayed = 0;
1238
1239 delta = now - sa->last_runnable_update;
1240
1241
1242
1243
1244 if ((s64)delta < 0) {
1245 sa->last_runnable_update = now;
1246 return 0;
1247 }
1248
1249
1250
1251
1252
1253 delta >>= 10;
1254 if (!delta)
1255 return 0;
1256 sa->last_runnable_update = now;
1257
1258
1259 delta_w = sa->runnable_avg_period % 1024;
1260 if (delta + delta_w >= 1024) {
1261
1262 decayed = 1;
1263
1264
1265
1266
1267
1268
1269 delta_w = 1024 - delta_w;
1270 if (runnable)
1271 sa->runnable_avg_sum += delta_w;
1272 sa->runnable_avg_period += delta_w;
1273
1274 delta -= delta_w;
1275
1276
1277 periods = delta / 1024;
1278 delta %= 1024;
1279
1280 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1281 periods + 1);
1282 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1283 periods + 1);
1284
1285
1286 runnable_contrib = __compute_runnable_contrib(periods);
1287 if (runnable)
1288 sa->runnable_avg_sum += runnable_contrib;
1289 sa->runnable_avg_period += runnable_contrib;
1290 }
1291
1292
1293 if (runnable)
1294 sa->runnable_avg_sum += delta;
1295 sa->runnable_avg_period += delta;
1296
1297 return decayed;
1298}
1299
1300
1301static inline u64 __synchronize_entity_decay(struct sched_entity *se)
1302{
1303 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1304 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1305
1306 decays -= se->avg.decay_count;
1307 if (!decays)
1308 return 0;
1309
1310 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1311 se->avg.decay_count = 0;
1312
1313 return decays;
1314}
1315
1316#ifdef CONFIG_FAIR_GROUP_SCHED
1317static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1318 int force_update)
1319{
1320 struct task_group *tg = cfs_rq->tg;
1321 s64 tg_contrib;
1322
1323 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1324 tg_contrib -= cfs_rq->tg_load_contrib;
1325
1326 if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1327 atomic64_add(tg_contrib, &tg->load_avg);
1328 cfs_rq->tg_load_contrib += tg_contrib;
1329 }
1330}
1331
1332
1333
1334
1335
1336static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1337 struct cfs_rq *cfs_rq)
1338{
1339 struct task_group *tg = cfs_rq->tg;
1340 long contrib;
1341
1342
1343 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1344 sa->runnable_avg_period + 1);
1345 contrib -= cfs_rq->tg_runnable_contrib;
1346
1347 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1348 atomic_add(contrib, &tg->runnable_avg);
1349 cfs_rq->tg_runnable_contrib += contrib;
1350 }
1351}
1352
1353static inline void __update_group_entity_contrib(struct sched_entity *se)
1354{
1355 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1356 struct task_group *tg = cfs_rq->tg;
1357 int runnable_avg;
1358
1359 u64 contrib;
1360
1361 contrib = cfs_rq->tg_load_contrib * tg->shares;
1362 se->avg.load_avg_contrib = div64_u64(contrib,
1363 atomic64_read(&tg->load_avg) + 1);
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 runnable_avg = atomic_read(&tg->runnable_avg);
1389 if (runnable_avg < NICE_0_LOAD) {
1390 se->avg.load_avg_contrib *= runnable_avg;
1391 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1392 }
1393}
1394#else
1395static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1396 int force_update) {}
1397static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1398 struct cfs_rq *cfs_rq) {}
1399static inline void __update_group_entity_contrib(struct sched_entity *se) {}
1400#endif
1401
1402static inline void __update_task_entity_contrib(struct sched_entity *se)
1403{
1404 u32 contrib;
1405
1406
1407 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1408 contrib /= (se->avg.runnable_avg_period + 1);
1409 se->avg.load_avg_contrib = scale_load(contrib);
1410}
1411
1412
1413static long __update_entity_load_avg_contrib(struct sched_entity *se)
1414{
1415 long old_contrib = se->avg.load_avg_contrib;
1416
1417 if (entity_is_task(se)) {
1418 __update_task_entity_contrib(se);
1419 } else {
1420 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
1421 __update_group_entity_contrib(se);
1422 }
1423
1424 return se->avg.load_avg_contrib - old_contrib;
1425}
1426
1427static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1428 long load_contrib)
1429{
1430 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1431 cfs_rq->blocked_load_avg -= load_contrib;
1432 else
1433 cfs_rq->blocked_load_avg = 0;
1434}
1435
1436static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1437
1438
1439static inline void update_entity_load_avg(struct sched_entity *se,
1440 int update_cfs_rq)
1441{
1442 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1443 long contrib_delta;
1444 u64 now;
1445
1446
1447
1448
1449
1450 if (entity_is_task(se))
1451 now = cfs_rq_clock_task(cfs_rq);
1452 else
1453 now = cfs_rq_clock_task(group_cfs_rq(se));
1454
1455 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
1456 return;
1457
1458 contrib_delta = __update_entity_load_avg_contrib(se);
1459
1460 if (!update_cfs_rq)
1461 return;
1462
1463 if (se->on_rq)
1464 cfs_rq->runnable_load_avg += contrib_delta;
1465 else
1466 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1467}
1468
1469
1470
1471
1472
1473static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
1474{
1475 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
1476 u64 decays;
1477
1478 decays = now - cfs_rq->last_decay;
1479 if (!decays && !force_update)
1480 return;
1481
1482 if (atomic64_read(&cfs_rq->removed_load)) {
1483 u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0);
1484 subtract_blocked_load_contrib(cfs_rq, removed_load);
1485 }
1486
1487 if (decays) {
1488 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1489 decays);
1490 atomic64_add(decays, &cfs_rq->decay_counter);
1491 cfs_rq->last_decay = now;
1492 }
1493
1494 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
1495}
1496
1497static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1498{
1499 __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
1500 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
1501}
1502
1503
1504static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1505 struct sched_entity *se,
1506 int wakeup)
1507{
1508
1509
1510
1511
1512
1513 if (unlikely(se->avg.decay_count <= 0)) {
1514 se->avg.last_runnable_update = rq_of(cfs_rq)->clock_task;
1515 if (se->avg.decay_count) {
1516
1517
1518
1519
1520
1521
1522
1523
1524 se->avg.last_runnable_update -= (-se->avg.decay_count)
1525 << 20;
1526 update_entity_load_avg(se, 0);
1527
1528 se->avg.decay_count = 0;
1529 }
1530 wakeup = 0;
1531 } else {
1532 __synchronize_entity_decay(se);
1533 }
1534
1535
1536 if (wakeup) {
1537 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
1538 update_entity_load_avg(se, 0);
1539 }
1540
1541 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
1542
1543 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
1544}
1545
1546
1547
1548
1549
1550
1551static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1552 struct sched_entity *se,
1553 int sleep)
1554{
1555 update_entity_load_avg(se, 1);
1556
1557 update_cfs_rq_blocked_load(cfs_rq, !sleep);
1558
1559 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
1560 if (sleep) {
1561 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1562 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1563 }
1564}
1565#else
1566static inline void update_entity_load_avg(struct sched_entity *se,
1567 int update_cfs_rq) {}
1568static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
1569static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1570 struct sched_entity *se,
1571 int wakeup) {}
1572static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1573 struct sched_entity *se,
1574 int sleep) {}
1575static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1576 int force_update) {}
1577#endif
1578
1579static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1580{
1581#ifdef CONFIG_SCHEDSTATS
1582 struct task_struct *tsk = NULL;
1583
1584 if (entity_is_task(se))
1585 tsk = task_of(se);
1586
1587 if (se->statistics.sleep_start) {
1588 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
1589
1590 if ((s64)delta < 0)
1591 delta = 0;
1592
1593 if (unlikely(delta > se->statistics.sleep_max))
1594 se->statistics.sleep_max = delta;
1595
1596 se->statistics.sleep_start = 0;
1597 se->statistics.sum_sleep_runtime += delta;
1598
1599 if (tsk) {
1600 account_scheduler_latency(tsk, delta >> 10, 1);
1601 trace_sched_stat_sleep(tsk, delta);
1602 }
1603 }
1604 if (se->statistics.block_start) {
1605 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
1606
1607 if ((s64)delta < 0)
1608 delta = 0;
1609
1610 if (unlikely(delta > se->statistics.block_max))
1611 se->statistics.block_max = delta;
1612
1613 se->statistics.block_start = 0;
1614 se->statistics.sum_sleep_runtime += delta;
1615
1616 if (tsk) {
1617 if (tsk->in_iowait) {
1618 se->statistics.iowait_sum += delta;
1619 se->statistics.iowait_count++;
1620 trace_sched_stat_iowait(tsk, delta);
1621 }
1622
1623 trace_sched_stat_blocked(tsk, delta);
1624
1625
1626
1627
1628
1629
1630 if (unlikely(prof_on == SLEEP_PROFILING)) {
1631 profile_hits(SLEEP_PROFILING,
1632 (void *)get_wchan(tsk),
1633 delta >> 20);
1634 }
1635 account_scheduler_latency(tsk, delta >> 10, 0);
1636 }
1637 }
1638#endif
1639}
1640
1641static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1642{
1643#ifdef CONFIG_SCHED_DEBUG
1644 s64 d = se->vruntime - cfs_rq->min_vruntime;
1645
1646 if (d < 0)
1647 d = -d;
1648
1649 if (d > 3*sysctl_sched_latency)
1650 schedstat_inc(cfs_rq, nr_spread_over);
1651#endif
1652}
1653
1654static void
1655place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1656{
1657 u64 vruntime = cfs_rq->min_vruntime;
1658
1659
1660
1661
1662
1663
1664
1665 if (initial && sched_feat(START_DEBIT))
1666 vruntime += sched_vslice(cfs_rq, se);
1667
1668
1669 if (!initial) {
1670 unsigned long thresh = sysctl_sched_latency;
1671
1672
1673
1674
1675
1676 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1677 thresh >>= 1;
1678
1679 vruntime -= thresh;
1680 }
1681
1682
1683 vruntime = max_vruntime(se->vruntime, vruntime);
1684
1685 se->vruntime = vruntime;
1686}
1687
1688static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1689
1690static void
1691enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1692{
1693
1694
1695
1696
1697 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
1698 se->vruntime += cfs_rq->min_vruntime;
1699
1700
1701
1702
1703 update_curr(cfs_rq);
1704 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
1705 account_entity_enqueue(cfs_rq, se);
1706 update_cfs_shares(cfs_rq);
1707
1708 if (flags & ENQUEUE_WAKEUP) {
1709 place_entity(cfs_rq, se, 0);
1710 enqueue_sleeper(cfs_rq, se);
1711 }
1712
1713 update_stats_enqueue(cfs_rq, se);
1714 check_spread(cfs_rq, se);
1715 if (se != cfs_rq->curr)
1716 __enqueue_entity(cfs_rq, se);
1717 se->on_rq = 1;
1718
1719 if (cfs_rq->nr_running == 1) {
1720 list_add_leaf_cfs_rq(cfs_rq);
1721 check_enqueue_throttle(cfs_rq);
1722 }
1723}
1724
1725static void __clear_buddies_last(struct sched_entity *se)
1726{
1727 for_each_sched_entity(se) {
1728 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1729 if (cfs_rq->last == se)
1730 cfs_rq->last = NULL;
1731 else
1732 break;
1733 }
1734}
1735
1736static void __clear_buddies_next(struct sched_entity *se)
1737{
1738 for_each_sched_entity(se) {
1739 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1740 if (cfs_rq->next == se)
1741 cfs_rq->next = NULL;
1742 else
1743 break;
1744 }
1745}
1746
1747static void __clear_buddies_skip(struct sched_entity *se)
1748{
1749 for_each_sched_entity(se) {
1750 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1751 if (cfs_rq->skip == se)
1752 cfs_rq->skip = NULL;
1753 else
1754 break;
1755 }
1756}
1757
1758static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1759{
1760 if (cfs_rq->last == se)
1761 __clear_buddies_last(se);
1762
1763 if (cfs_rq->next == se)
1764 __clear_buddies_next(se);
1765
1766 if (cfs_rq->skip == se)
1767 __clear_buddies_skip(se);
1768}
1769
1770static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1771
1772static void
1773dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1774{
1775
1776
1777
1778 update_curr(cfs_rq);
1779 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
1780
1781 update_stats_dequeue(cfs_rq, se);
1782 if (flags & DEQUEUE_SLEEP) {
1783#ifdef CONFIG_SCHEDSTATS
1784 if (entity_is_task(se)) {
1785 struct task_struct *tsk = task_of(se);
1786
1787 if (tsk->state & TASK_INTERRUPTIBLE)
1788 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
1789 if (tsk->state & TASK_UNINTERRUPTIBLE)
1790 se->statistics.block_start = rq_of(cfs_rq)->clock;
1791 }
1792#endif
1793 }
1794
1795 clear_buddies(cfs_rq, se);
1796
1797 if (se != cfs_rq->curr)
1798 __dequeue_entity(cfs_rq, se);
1799 se->on_rq = 0;
1800 account_entity_dequeue(cfs_rq, se);
1801
1802
1803
1804
1805
1806
1807 if (!(flags & DEQUEUE_SLEEP))
1808 se->vruntime -= cfs_rq->min_vruntime;
1809
1810
1811 return_cfs_rq_runtime(cfs_rq);
1812
1813 update_min_vruntime(cfs_rq);
1814 update_cfs_shares(cfs_rq);
1815}
1816
1817
1818
1819
1820static void
1821check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1822{
1823 unsigned long ideal_runtime, delta_exec;
1824 struct sched_entity *se;
1825 s64 delta;
1826
1827 ideal_runtime = sched_slice(cfs_rq, curr);
1828 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1829 if (delta_exec > ideal_runtime) {
1830 resched_task(rq_of(cfs_rq)->curr);
1831
1832
1833
1834
1835 clear_buddies(cfs_rq, curr);
1836 return;
1837 }
1838
1839
1840
1841
1842
1843
1844 if (delta_exec < sysctl_sched_min_granularity)
1845 return;
1846
1847 se = __pick_first_entity(cfs_rq);
1848 delta = curr->vruntime - se->vruntime;
1849
1850 if (delta < 0)
1851 return;
1852
1853 if (delta > ideal_runtime)
1854 resched_task(rq_of(cfs_rq)->curr);
1855}
1856
1857static void
1858set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1859{
1860
1861 if (se->on_rq) {
1862
1863
1864
1865
1866
1867 update_stats_wait_end(cfs_rq, se);
1868 __dequeue_entity(cfs_rq, se);
1869 }
1870
1871 update_stats_curr_start(cfs_rq, se);
1872 cfs_rq->curr = se;
1873#ifdef CONFIG_SCHEDSTATS
1874
1875
1876
1877
1878
1879 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1880 se->statistics.slice_max = max(se->statistics.slice_max,
1881 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1882 }
1883#endif
1884 se->prev_sum_exec_runtime = se->sum_exec_runtime;
1885}
1886
1887static int
1888wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1889
1890
1891
1892
1893
1894
1895
1896
1897static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1898{
1899 struct sched_entity *se = __pick_first_entity(cfs_rq);
1900 struct sched_entity *left = se;
1901
1902
1903
1904
1905
1906 if (cfs_rq->skip == se) {
1907 struct sched_entity *second = __pick_next_entity(se);
1908 if (second && wakeup_preempt_entity(second, left) < 1)
1909 se = second;
1910 }
1911
1912
1913
1914
1915 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1916 se = cfs_rq->last;
1917
1918
1919
1920
1921 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1922 se = cfs_rq->next;
1923
1924 clear_buddies(cfs_rq, se);
1925
1926 return se;
1927}
1928
1929static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1930
1931static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1932{
1933
1934
1935
1936
1937 if (prev->on_rq)
1938 update_curr(cfs_rq);
1939
1940
1941 check_cfs_rq_runtime(cfs_rq);
1942
1943 check_spread(cfs_rq, prev);
1944 if (prev->on_rq) {
1945 update_stats_wait_start(cfs_rq, prev);
1946
1947 __enqueue_entity(cfs_rq, prev);
1948
1949 update_entity_load_avg(prev, 1);
1950 }
1951 cfs_rq->curr = NULL;
1952}
1953
1954static void
1955entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1956{
1957
1958
1959
1960 update_curr(cfs_rq);
1961
1962
1963
1964
1965 update_entity_load_avg(curr, 1);
1966 update_cfs_rq_blocked_load(cfs_rq, 1);
1967
1968#ifdef CONFIG_SCHED_HRTICK
1969
1970
1971
1972
1973 if (queued) {
1974 resched_task(rq_of(cfs_rq)->curr);
1975 return;
1976 }
1977
1978
1979
1980 if (!sched_feat(DOUBLE_TICK) &&
1981 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
1982 return;
1983#endif
1984
1985 if (cfs_rq->nr_running > 1)
1986 check_preempt_tick(cfs_rq, curr);
1987}
1988
1989
1990
1991
1992
1993
1994#ifdef CONFIG_CFS_BANDWIDTH
1995
1996#ifdef HAVE_JUMP_LABEL
1997static struct static_key __cfs_bandwidth_used;
1998
1999static inline bool cfs_bandwidth_used(void)
2000{
2001 return static_key_false(&__cfs_bandwidth_used);
2002}
2003
2004void account_cfs_bandwidth_used(int enabled, int was_enabled)
2005{
2006
2007 if (enabled && !was_enabled)
2008 static_key_slow_inc(&__cfs_bandwidth_used);
2009 else if (!enabled && was_enabled)
2010 static_key_slow_dec(&__cfs_bandwidth_used);
2011}
2012#else
2013static bool cfs_bandwidth_used(void)
2014{
2015 return true;
2016}
2017
2018void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2019#endif
2020
2021
2022
2023
2024
2025static inline u64 default_cfs_period(void)
2026{
2027 return 100000000ULL;
2028}
2029
2030static inline u64 sched_cfs_bandwidth_slice(void)
2031{
2032 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
2043{
2044 u64 now;
2045
2046 if (cfs_b->quota == RUNTIME_INF)
2047 return;
2048
2049 now = sched_clock_cpu(smp_processor_id());
2050 cfs_b->runtime = cfs_b->quota;
2051 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2052}
2053
2054static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2055{
2056 return &tg->cfs_bandwidth;
2057}
2058
2059
2060static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2061{
2062 if (unlikely(cfs_rq->throttle_count))
2063 return cfs_rq->throttled_clock_task;
2064
2065 return rq_of(cfs_rq)->clock_task - cfs_rq->throttled_clock_task_time;
2066}
2067
2068
2069static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2070{
2071 struct task_group *tg = cfs_rq->tg;
2072 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
2073 u64 amount = 0, min_amount, expires;
2074
2075
2076 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2077
2078 raw_spin_lock(&cfs_b->lock);
2079 if (cfs_b->quota == RUNTIME_INF)
2080 amount = min_amount;
2081 else {
2082
2083
2084
2085
2086
2087
2088 if (!cfs_b->timer_active) {
2089 __refill_cfs_bandwidth_runtime(cfs_b);
2090 __start_cfs_bandwidth(cfs_b);
2091 }
2092
2093 if (cfs_b->runtime > 0) {
2094 amount = min(cfs_b->runtime, min_amount);
2095 cfs_b->runtime -= amount;
2096 cfs_b->idle = 0;
2097 }
2098 }
2099 expires = cfs_b->runtime_expires;
2100 raw_spin_unlock(&cfs_b->lock);
2101
2102 cfs_rq->runtime_remaining += amount;
2103
2104
2105
2106
2107
2108 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2109 cfs_rq->runtime_expires = expires;
2110
2111 return cfs_rq->runtime_remaining > 0;
2112}
2113
2114
2115
2116
2117
2118static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2119{
2120 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2121 struct rq *rq = rq_of(cfs_rq);
2122
2123
2124 if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0))
2125 return;
2126
2127 if (cfs_rq->runtime_remaining < 0)
2128 return;
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2140
2141 cfs_rq->runtime_expires += TICK_NSEC;
2142 } else {
2143
2144 cfs_rq->runtime_remaining = 0;
2145 }
2146}
2147
2148static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2149 unsigned long delta_exec)
2150{
2151
2152 cfs_rq->runtime_remaining -= delta_exec;
2153 expire_cfs_rq_runtime(cfs_rq);
2154
2155 if (likely(cfs_rq->runtime_remaining > 0))
2156 return;
2157
2158
2159
2160
2161
2162 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2163 resched_task(rq_of(cfs_rq)->curr);
2164}
2165
2166static __always_inline
2167void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
2168{
2169 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
2170 return;
2171
2172 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2173}
2174
2175static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2176{
2177 return cfs_bandwidth_used() && cfs_rq->throttled;
2178}
2179
2180
2181static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2182{
2183 return cfs_bandwidth_used() && cfs_rq->throttle_count;
2184}
2185
2186
2187
2188
2189
2190
2191static inline int throttled_lb_pair(struct task_group *tg,
2192 int src_cpu, int dest_cpu)
2193{
2194 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2195
2196 src_cfs_rq = tg->cfs_rq[src_cpu];
2197 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2198
2199 return throttled_hierarchy(src_cfs_rq) ||
2200 throttled_hierarchy(dest_cfs_rq);
2201}
2202
2203
2204static int tg_unthrottle_up(struct task_group *tg, void *data)
2205{
2206 struct rq *rq = data;
2207 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2208
2209 cfs_rq->throttle_count--;
2210#ifdef CONFIG_SMP
2211 if (!cfs_rq->throttle_count) {
2212
2213 cfs_rq->throttled_clock_task_time += rq->clock_task -
2214 cfs_rq->throttled_clock_task;
2215 }
2216#endif
2217
2218 return 0;
2219}
2220
2221static int tg_throttle_down(struct task_group *tg, void *data)
2222{
2223 struct rq *rq = data;
2224 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2225
2226
2227 if (!cfs_rq->throttle_count)
2228 cfs_rq->throttled_clock_task = rq->clock_task;
2229 cfs_rq->throttle_count++;
2230
2231 return 0;
2232}
2233
2234static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
2235{
2236 struct rq *rq = rq_of(cfs_rq);
2237 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2238 struct sched_entity *se;
2239 long task_delta, dequeue = 1;
2240
2241 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2242
2243
2244 rcu_read_lock();
2245 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2246 rcu_read_unlock();
2247
2248 task_delta = cfs_rq->h_nr_running;
2249 for_each_sched_entity(se) {
2250 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2251
2252 if (!se->on_rq)
2253 break;
2254
2255 if (dequeue)
2256 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2257 qcfs_rq->h_nr_running -= task_delta;
2258
2259 if (qcfs_rq->load.weight)
2260 dequeue = 0;
2261 }
2262
2263 if (!se)
2264 rq->nr_running -= task_delta;
2265
2266 cfs_rq->throttled = 1;
2267 cfs_rq->throttled_clock = rq->clock;
2268 raw_spin_lock(&cfs_b->lock);
2269 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2270 raw_spin_unlock(&cfs_b->lock);
2271}
2272
2273void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
2274{
2275 struct rq *rq = rq_of(cfs_rq);
2276 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2277 struct sched_entity *se;
2278 int enqueue = 1;
2279 long task_delta;
2280
2281 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2282
2283 cfs_rq->throttled = 0;
2284 raw_spin_lock(&cfs_b->lock);
2285 cfs_b->throttled_time += rq->clock - cfs_rq->throttled_clock;
2286 list_del_rcu(&cfs_rq->throttled_list);
2287 raw_spin_unlock(&cfs_b->lock);
2288
2289 update_rq_clock(rq);
2290
2291 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2292
2293 if (!cfs_rq->load.weight)
2294 return;
2295
2296 task_delta = cfs_rq->h_nr_running;
2297 for_each_sched_entity(se) {
2298 if (se->on_rq)
2299 enqueue = 0;
2300
2301 cfs_rq = cfs_rq_of(se);
2302 if (enqueue)
2303 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2304 cfs_rq->h_nr_running += task_delta;
2305
2306 if (cfs_rq_throttled(cfs_rq))
2307 break;
2308 }
2309
2310 if (!se)
2311 rq->nr_running += task_delta;
2312
2313
2314 if (rq->curr == rq->idle && rq->cfs.nr_running)
2315 resched_task(rq->curr);
2316}
2317
2318static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2319 u64 remaining, u64 expires)
2320{
2321 struct cfs_rq *cfs_rq;
2322 u64 runtime = remaining;
2323
2324 rcu_read_lock();
2325 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2326 throttled_list) {
2327 struct rq *rq = rq_of(cfs_rq);
2328
2329 raw_spin_lock(&rq->lock);
2330 if (!cfs_rq_throttled(cfs_rq))
2331 goto next;
2332
2333 runtime = -cfs_rq->runtime_remaining + 1;
2334 if (runtime > remaining)
2335 runtime = remaining;
2336 remaining -= runtime;
2337
2338 cfs_rq->runtime_remaining += runtime;
2339 cfs_rq->runtime_expires = expires;
2340
2341
2342 if (cfs_rq->runtime_remaining > 0)
2343 unthrottle_cfs_rq(cfs_rq);
2344
2345next:
2346 raw_spin_unlock(&rq->lock);
2347
2348 if (!remaining)
2349 break;
2350 }
2351 rcu_read_unlock();
2352
2353 return remaining;
2354}
2355
2356
2357
2358
2359
2360
2361
2362static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2363{
2364 u64 runtime, runtime_expires;
2365 int idle = 1, throttled;
2366
2367 raw_spin_lock(&cfs_b->lock);
2368
2369 if (cfs_b->quota == RUNTIME_INF)
2370 goto out_unlock;
2371
2372 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2373
2374 idle = cfs_b->idle && !throttled;
2375 cfs_b->nr_periods += overrun;
2376
2377
2378 if (idle)
2379 goto out_unlock;
2380
2381 __refill_cfs_bandwidth_runtime(cfs_b);
2382
2383 if (!throttled) {
2384
2385 cfs_b->idle = 1;
2386 goto out_unlock;
2387 }
2388
2389
2390 cfs_b->nr_throttled += overrun;
2391
2392
2393
2394
2395
2396
2397
2398 runtime = cfs_b->runtime;
2399 runtime_expires = cfs_b->runtime_expires;
2400 cfs_b->runtime = 0;
2401
2402
2403
2404
2405
2406
2407 while (throttled && runtime > 0) {
2408 raw_spin_unlock(&cfs_b->lock);
2409
2410 runtime = distribute_cfs_runtime(cfs_b, runtime,
2411 runtime_expires);
2412 raw_spin_lock(&cfs_b->lock);
2413
2414 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2415 }
2416
2417
2418 cfs_b->runtime = runtime;
2419
2420
2421
2422
2423
2424
2425 cfs_b->idle = 0;
2426out_unlock:
2427 if (idle)
2428 cfs_b->timer_active = 0;
2429 raw_spin_unlock(&cfs_b->lock);
2430
2431 return idle;
2432}
2433
2434
2435static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2436
2437static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2438
2439static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2440
2441
2442static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2443{
2444 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2445 u64 remaining;
2446
2447
2448 if (hrtimer_callback_running(refresh_timer))
2449 return 1;
2450
2451
2452 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2453 if (remaining < min_expire)
2454 return 1;
2455
2456 return 0;
2457}
2458
2459static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2460{
2461 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2462
2463
2464 if (runtime_refresh_within(cfs_b, min_left))
2465 return;
2466
2467 start_bandwidth_timer(&cfs_b->slack_timer,
2468 ns_to_ktime(cfs_bandwidth_slack_period));
2469}
2470
2471
2472static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2473{
2474 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2475 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2476
2477 if (slack_runtime <= 0)
2478 return;
2479
2480 raw_spin_lock(&cfs_b->lock);
2481 if (cfs_b->quota != RUNTIME_INF &&
2482 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2483 cfs_b->runtime += slack_runtime;
2484
2485
2486 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2487 !list_empty(&cfs_b->throttled_cfs_rq))
2488 start_cfs_slack_bandwidth(cfs_b);
2489 }
2490 raw_spin_unlock(&cfs_b->lock);
2491
2492
2493 cfs_rq->runtime_remaining -= slack_runtime;
2494}
2495
2496static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2497{
2498 if (!cfs_bandwidth_used())
2499 return;
2500
2501 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
2502 return;
2503
2504 __return_cfs_rq_runtime(cfs_rq);
2505}
2506
2507
2508
2509
2510
2511static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2512{
2513 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2514 u64 expires;
2515
2516
2517 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2518 return;
2519
2520 raw_spin_lock(&cfs_b->lock);
2521 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2522 runtime = cfs_b->runtime;
2523 cfs_b->runtime = 0;
2524 }
2525 expires = cfs_b->runtime_expires;
2526 raw_spin_unlock(&cfs_b->lock);
2527
2528 if (!runtime)
2529 return;
2530
2531 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2532
2533 raw_spin_lock(&cfs_b->lock);
2534 if (expires == cfs_b->runtime_expires)
2535 cfs_b->runtime = runtime;
2536 raw_spin_unlock(&cfs_b->lock);
2537}
2538
2539
2540
2541
2542
2543
2544static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2545{
2546 if (!cfs_bandwidth_used())
2547 return;
2548
2549
2550 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2551 return;
2552
2553
2554 if (cfs_rq_throttled(cfs_rq))
2555 return;
2556
2557
2558 account_cfs_rq_runtime(cfs_rq, 0);
2559 if (cfs_rq->runtime_remaining <= 0)
2560 throttle_cfs_rq(cfs_rq);
2561}
2562
2563
2564static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2565{
2566 if (!cfs_bandwidth_used())
2567 return;
2568
2569 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2570 return;
2571
2572
2573
2574
2575
2576 if (cfs_rq_throttled(cfs_rq))
2577 return;
2578
2579 throttle_cfs_rq(cfs_rq);
2580}
2581
2582static inline u64 default_cfs_period(void);
2583static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
2584static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
2585
2586static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2587{
2588 struct cfs_bandwidth *cfs_b =
2589 container_of(timer, struct cfs_bandwidth, slack_timer);
2590 do_sched_cfs_slack_timer(cfs_b);
2591
2592 return HRTIMER_NORESTART;
2593}
2594
2595static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2596{
2597 struct cfs_bandwidth *cfs_b =
2598 container_of(timer, struct cfs_bandwidth, period_timer);
2599 ktime_t now;
2600 int overrun;
2601 int idle = 0;
2602
2603 for (;;) {
2604 now = hrtimer_cb_get_time(timer);
2605 overrun = hrtimer_forward(timer, now, cfs_b->period);
2606
2607 if (!overrun)
2608 break;
2609
2610 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2611 }
2612
2613 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2614}
2615
2616void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2617{
2618 raw_spin_lock_init(&cfs_b->lock);
2619 cfs_b->runtime = 0;
2620 cfs_b->quota = RUNTIME_INF;
2621 cfs_b->period = ns_to_ktime(default_cfs_period());
2622
2623 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2624 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2625 cfs_b->period_timer.function = sched_cfs_period_timer;
2626 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2627 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2628}
2629
2630static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2631{
2632 cfs_rq->runtime_enabled = 0;
2633 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2634}
2635
2636
2637void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2638{
2639
2640
2641
2642
2643
2644
2645 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2646 raw_spin_unlock(&cfs_b->lock);
2647
2648 hrtimer_cancel(&cfs_b->period_timer);
2649
2650 raw_spin_lock(&cfs_b->lock);
2651
2652 if (cfs_b->timer_active)
2653 return;
2654 }
2655
2656 cfs_b->timer_active = 1;
2657 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2658}
2659
2660static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2661{
2662 hrtimer_cancel(&cfs_b->period_timer);
2663 hrtimer_cancel(&cfs_b->slack_timer);
2664}
2665
2666static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
2667{
2668 struct cfs_rq *cfs_rq;
2669
2670 for_each_leaf_cfs_rq(rq, cfs_rq) {
2671 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2672
2673 if (!cfs_rq->runtime_enabled)
2674 continue;
2675
2676
2677
2678
2679
2680 cfs_rq->runtime_remaining = cfs_b->quota;
2681 if (cfs_rq_throttled(cfs_rq))
2682 unthrottle_cfs_rq(cfs_rq);
2683 }
2684}
2685
2686#else
2687static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2688{
2689 return rq_of(cfs_rq)->clock_task;
2690}
2691
2692static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2693 unsigned long delta_exec) {}
2694static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2695static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
2696static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2697
2698static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2699{
2700 return 0;
2701}
2702
2703static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2704{
2705 return 0;
2706}
2707
2708static inline int throttled_lb_pair(struct task_group *tg,
2709 int src_cpu, int dest_cpu)
2710{
2711 return 0;
2712}
2713
2714void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2715
2716#ifdef CONFIG_FAIR_GROUP_SCHED
2717static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2718#endif
2719
2720static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2721{
2722 return NULL;
2723}
2724static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2725static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2726
2727#endif
2728
2729
2730
2731
2732
2733#ifdef CONFIG_SCHED_HRTICK
2734static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2735{
2736 struct sched_entity *se = &p->se;
2737 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2738
2739 WARN_ON(task_rq(p) != rq);
2740
2741 if (cfs_rq->nr_running > 1) {
2742 u64 slice = sched_slice(cfs_rq, se);
2743 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2744 s64 delta = slice - ran;
2745
2746 if (delta < 0) {
2747 if (rq->curr == p)
2748 resched_task(p);
2749 return;
2750 }
2751
2752
2753
2754
2755
2756 if (rq->curr != p)
2757 delta = max_t(s64, 10000LL, delta);
2758
2759 hrtick_start(rq, delta);
2760 }
2761}
2762
2763
2764
2765
2766
2767
2768static void hrtick_update(struct rq *rq)
2769{
2770 struct task_struct *curr = rq->curr;
2771
2772 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
2773 return;
2774
2775 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2776 hrtick_start_fair(rq, curr);
2777}
2778#else
2779static inline void
2780hrtick_start_fair(struct rq *rq, struct task_struct *p)
2781{
2782}
2783
2784static inline void hrtick_update(struct rq *rq)
2785{
2786}
2787#endif
2788
2789
2790
2791
2792
2793
2794static void
2795enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2796{
2797 struct cfs_rq *cfs_rq;
2798 struct sched_entity *se = &p->se;
2799
2800 for_each_sched_entity(se) {
2801 if (se->on_rq)
2802 break;
2803 cfs_rq = cfs_rq_of(se);
2804 enqueue_entity(cfs_rq, se, flags);
2805
2806
2807
2808
2809
2810
2811
2812 if (cfs_rq_throttled(cfs_rq))
2813 break;
2814 cfs_rq->h_nr_running++;
2815
2816 flags = ENQUEUE_WAKEUP;
2817 }
2818
2819 for_each_sched_entity(se) {
2820 cfs_rq = cfs_rq_of(se);
2821 cfs_rq->h_nr_running++;
2822
2823 if (cfs_rq_throttled(cfs_rq))
2824 break;
2825
2826 update_cfs_shares(cfs_rq);
2827 update_entity_load_avg(se, 1);
2828 }
2829
2830 if (!se) {
2831 update_rq_runnable_avg(rq, rq->nr_running);
2832 inc_nr_running(rq);
2833 }
2834 hrtick_update(rq);
2835}
2836
2837static void set_next_buddy(struct sched_entity *se);
2838
2839
2840
2841
2842
2843
2844static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2845{
2846 struct cfs_rq *cfs_rq;
2847 struct sched_entity *se = &p->se;
2848 int task_sleep = flags & DEQUEUE_SLEEP;
2849
2850 for_each_sched_entity(se) {
2851 cfs_rq = cfs_rq_of(se);
2852 dequeue_entity(cfs_rq, se, flags);
2853
2854
2855
2856
2857
2858
2859
2860 if (cfs_rq_throttled(cfs_rq))
2861 break;
2862 cfs_rq->h_nr_running--;
2863
2864
2865 if (cfs_rq->load.weight) {
2866
2867
2868
2869
2870 if (task_sleep && parent_entity(se))
2871 set_next_buddy(parent_entity(se));
2872
2873
2874 se = parent_entity(se);
2875 break;
2876 }
2877 flags |= DEQUEUE_SLEEP;
2878 }
2879
2880 for_each_sched_entity(se) {
2881 cfs_rq = cfs_rq_of(se);
2882 cfs_rq->h_nr_running--;
2883
2884 if (cfs_rq_throttled(cfs_rq))
2885 break;
2886
2887 update_cfs_shares(cfs_rq);
2888 update_entity_load_avg(se, 1);
2889 }
2890
2891 if (!se) {
2892 dec_nr_running(rq);
2893 update_rq_runnable_avg(rq, 1);
2894 }
2895 hrtick_update(rq);
2896}
2897
2898#ifdef CONFIG_SMP
2899
2900static unsigned long weighted_cpuload(const int cpu)
2901{
2902 return cpu_rq(cpu)->load.weight;
2903}
2904
2905
2906
2907
2908
2909
2910
2911
2912static unsigned long source_load(int cpu, int type)
2913{
2914 struct rq *rq = cpu_rq(cpu);
2915 unsigned long total = weighted_cpuload(cpu);
2916
2917 if (type == 0 || !sched_feat(LB_BIAS))
2918 return total;
2919
2920 return min(rq->cpu_load[type-1], total);
2921}
2922
2923
2924
2925
2926
2927static unsigned long target_load(int cpu, int type)
2928{
2929 struct rq *rq = cpu_rq(cpu);
2930 unsigned long total = weighted_cpuload(cpu);
2931
2932 if (type == 0 || !sched_feat(LB_BIAS))
2933 return total;
2934
2935 return max(rq->cpu_load[type-1], total);
2936}
2937
2938static unsigned long power_of(int cpu)
2939{
2940 return cpu_rq(cpu)->cpu_power;
2941}
2942
2943static unsigned long cpu_avg_load_per_task(int cpu)
2944{
2945 struct rq *rq = cpu_rq(cpu);
2946 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
2947
2948 if (nr_running)
2949 return rq->load.weight / nr_running;
2950
2951 return 0;
2952}
2953
2954
2955static void task_waking_fair(struct task_struct *p)
2956{
2957 struct sched_entity *se = &p->se;
2958 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2959 u64 min_vruntime;
2960
2961#ifndef CONFIG_64BIT
2962 u64 min_vruntime_copy;
2963
2964 do {
2965 min_vruntime_copy = cfs_rq->min_vruntime_copy;
2966 smp_rmb();
2967 min_vruntime = cfs_rq->min_vruntime;
2968 } while (min_vruntime != min_vruntime_copy);
2969#else
2970 min_vruntime = cfs_rq->min_vruntime;
2971#endif
2972
2973 se->vruntime -= min_vruntime;
2974}
2975
2976#ifdef CONFIG_FAIR_GROUP_SCHED
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
3028{
3029 struct sched_entity *se = tg->se[cpu];
3030
3031 if (!tg->parent)
3032 return wl;
3033
3034 for_each_sched_entity(se) {
3035 long w, W;
3036
3037 tg = se->my_q->tg;
3038
3039
3040
3041
3042 W = wg + calc_tg_weight(tg, se->my_q);
3043
3044
3045
3046
3047 w = se->my_q->load.weight + wl;
3048
3049
3050
3051
3052 if (W > 0 && w < W)
3053 wl = (w * tg->shares) / W;
3054 else
3055 wl = tg->shares;
3056
3057
3058
3059
3060
3061
3062 if (wl < MIN_SHARES)
3063 wl = MIN_SHARES;
3064
3065
3066
3067
3068 wl -= se->load.weight;
3069
3070
3071
3072
3073
3074
3075
3076
3077 wg = 0;
3078 }
3079
3080 return wl;
3081}
3082#else
3083
3084static inline unsigned long effective_load(struct task_group *tg, int cpu,
3085 unsigned long wl, unsigned long wg)
3086{
3087 return wl;
3088}
3089
3090#endif
3091
3092static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
3093{
3094 s64 this_load, load;
3095 int idx, this_cpu, prev_cpu;
3096 unsigned long tl_per_task;
3097 struct task_group *tg;
3098 unsigned long weight;
3099 int balanced;
3100
3101 idx = sd->wake_idx;
3102 this_cpu = smp_processor_id();
3103 prev_cpu = task_cpu(p);
3104 load = source_load(prev_cpu, idx);
3105 this_load = target_load(this_cpu, idx);
3106
3107
3108
3109
3110
3111
3112 if (sync) {
3113 tg = task_group(current);
3114 weight = current->se.load.weight;
3115
3116 this_load += effective_load(tg, this_cpu, -weight, -weight);
3117 load += effective_load(tg, prev_cpu, 0, -weight);
3118 }
3119
3120 tg = task_group(p);
3121 weight = p->se.load.weight;
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132 if (this_load > 0) {
3133 s64 this_eff_load, prev_eff_load;
3134
3135 this_eff_load = 100;
3136 this_eff_load *= power_of(prev_cpu);
3137 this_eff_load *= this_load +
3138 effective_load(tg, this_cpu, weight, weight);
3139
3140 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3141 prev_eff_load *= power_of(this_cpu);
3142 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3143
3144 balanced = this_eff_load <= prev_eff_load;
3145 } else
3146 balanced = true;
3147
3148
3149
3150
3151
3152
3153 if (sync && balanced)
3154 return 1;
3155
3156 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
3157 tl_per_task = cpu_avg_load_per_task(this_cpu);
3158
3159 if (balanced ||
3160 (this_load <= load &&
3161 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
3162
3163
3164
3165
3166
3167 schedstat_inc(sd, ttwu_move_affine);
3168 schedstat_inc(p, se.statistics.nr_wakeups_affine);
3169
3170 return 1;
3171 }
3172 return 0;
3173}
3174
3175
3176
3177
3178
3179static struct sched_group *
3180find_idlest_group(struct sched_domain *sd, struct task_struct *p,
3181 int this_cpu, int load_idx)
3182{
3183 struct sched_group *idlest = NULL, *group = sd->groups;
3184 unsigned long min_load = ULONG_MAX, this_load = 0;
3185 int imbalance = 100 + (sd->imbalance_pct-100)/2;
3186
3187 do {
3188 unsigned long load, avg_load;
3189 int local_group;
3190 int i;
3191
3192
3193 if (!cpumask_intersects(sched_group_cpus(group),
3194 tsk_cpus_allowed(p)))
3195 continue;
3196
3197 local_group = cpumask_test_cpu(this_cpu,
3198 sched_group_cpus(group));
3199
3200
3201 avg_load = 0;
3202
3203 for_each_cpu(i, sched_group_cpus(group)) {
3204
3205 if (local_group)
3206 load = source_load(i, load_idx);
3207 else
3208 load = target_load(i, load_idx);
3209
3210 avg_load += load;
3211 }
3212
3213
3214 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
3215
3216 if (local_group) {
3217 this_load = avg_load;
3218 } else if (avg_load < min_load) {
3219 min_load = avg_load;
3220 idlest = group;
3221 }
3222 } while (group = group->next, group != sd->groups);
3223
3224 if (!idlest || 100*this_load < imbalance*min_load)
3225 return NULL;
3226 return idlest;
3227}
3228
3229
3230
3231
3232static int
3233find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3234{
3235 unsigned long load, min_load = ULONG_MAX;
3236 int idlest = -1;
3237 int i;
3238
3239
3240 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
3241 load = weighted_cpuload(i);
3242
3243 if (load < min_load || (load == min_load && i == this_cpu)) {
3244 min_load = load;
3245 idlest = i;
3246 }
3247 }
3248
3249 return idlest;
3250}
3251
3252
3253
3254
3255static int select_idle_sibling(struct task_struct *p, int target)
3256{
3257 int cpu = smp_processor_id();
3258 int prev_cpu = task_cpu(p);
3259 struct sched_domain *sd;
3260 struct sched_group *sg;
3261 int i;
3262
3263
3264
3265
3266
3267 if (target == cpu && idle_cpu(cpu))
3268 return cpu;
3269
3270
3271
3272
3273
3274 if (target == prev_cpu && idle_cpu(prev_cpu))
3275 return prev_cpu;
3276
3277
3278
3279
3280 sd = rcu_dereference(per_cpu(sd_llc, target));
3281 for_each_lower_domain(sd) {
3282 sg = sd->groups;
3283 do {
3284 if (!cpumask_intersects(sched_group_cpus(sg),
3285 tsk_cpus_allowed(p)))
3286 goto next;
3287
3288 for_each_cpu(i, sched_group_cpus(sg)) {
3289 if (!idle_cpu(i))
3290 goto next;
3291 }
3292
3293 target = cpumask_first_and(sched_group_cpus(sg),
3294 tsk_cpus_allowed(p));
3295 goto done;
3296next:
3297 sg = sg->next;
3298 } while (sg != sd->groups);
3299 }
3300done:
3301 return target;
3302}
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315static int
3316select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
3317{
3318 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
3319 int cpu = smp_processor_id();
3320 int prev_cpu = task_cpu(p);
3321 int new_cpu = cpu;
3322 int want_affine = 0;
3323 int sync = wake_flags & WF_SYNC;
3324
3325 if (p->nr_cpus_allowed == 1)
3326 return prev_cpu;
3327
3328 if (sd_flag & SD_BALANCE_WAKE) {
3329 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
3330 want_affine = 1;
3331 new_cpu = prev_cpu;
3332 }
3333
3334 rcu_read_lock();
3335 for_each_domain(cpu, tmp) {
3336 if (!(tmp->flags & SD_LOAD_BALANCE))
3337 continue;
3338
3339
3340
3341
3342
3343 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3344 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3345 affine_sd = tmp;
3346 break;
3347 }
3348
3349 if (tmp->flags & sd_flag)
3350 sd = tmp;
3351 }
3352
3353 if (affine_sd) {
3354 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
3355 prev_cpu = cpu;
3356
3357 new_cpu = select_idle_sibling(p, prev_cpu);
3358 goto unlock;
3359 }
3360
3361 while (sd) {
3362 int load_idx = sd->forkexec_idx;
3363 struct sched_group *group;
3364 int weight;
3365
3366 if (!(sd->flags & sd_flag)) {
3367 sd = sd->child;
3368 continue;
3369 }
3370
3371 if (sd_flag & SD_BALANCE_WAKE)
3372 load_idx = sd->wake_idx;
3373
3374 group = find_idlest_group(sd, p, cpu, load_idx);
3375 if (!group) {
3376 sd = sd->child;
3377 continue;
3378 }
3379
3380 new_cpu = find_idlest_cpu(group, p, cpu);
3381 if (new_cpu == -1 || new_cpu == cpu) {
3382
3383 sd = sd->child;
3384 continue;
3385 }
3386
3387
3388 cpu = new_cpu;
3389 weight = sd->span_weight;
3390 sd = NULL;
3391 for_each_domain(cpu, tmp) {
3392 if (weight <= tmp->span_weight)
3393 break;
3394 if (tmp->flags & sd_flag)
3395 sd = tmp;
3396 }
3397
3398 }
3399unlock:
3400 rcu_read_unlock();
3401
3402 return new_cpu;
3403}
3404
3405
3406
3407
3408
3409
3410#ifdef CONFIG_FAIR_GROUP_SCHED
3411
3412
3413
3414
3415
3416
3417static void
3418migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3419{
3420 struct sched_entity *se = &p->se;
3421 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3422
3423
3424
3425
3426
3427
3428
3429 if (se->avg.decay_count) {
3430 se->avg.decay_count = -__synchronize_entity_decay(se);
3431 atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
3432 }
3433}
3434#endif
3435#endif
3436
3437static unsigned long
3438wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
3439{
3440 unsigned long gran = sysctl_sched_wakeup_granularity;
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455 return calc_delta_fair(gran, se);
3456}
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472static int
3473wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3474{
3475 s64 gran, vdiff = curr->vruntime - se->vruntime;
3476
3477 if (vdiff <= 0)
3478 return -1;
3479
3480 gran = wakeup_gran(curr, se);
3481 if (vdiff > gran)
3482 return 1;
3483
3484 return 0;
3485}
3486
3487static void set_last_buddy(struct sched_entity *se)
3488{
3489 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3490 return;
3491
3492 for_each_sched_entity(se)
3493 cfs_rq_of(se)->last = se;
3494}
3495
3496static void set_next_buddy(struct sched_entity *se)
3497{
3498 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3499 return;
3500
3501 for_each_sched_entity(se)
3502 cfs_rq_of(se)->next = se;
3503}
3504
3505static void set_skip_buddy(struct sched_entity *se)
3506{
3507 for_each_sched_entity(se)
3508 cfs_rq_of(se)->skip = se;
3509}
3510
3511
3512
3513
3514static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
3515{
3516 struct task_struct *curr = rq->curr;
3517 struct sched_entity *se = &curr->se, *pse = &p->se;
3518 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3519 int scale = cfs_rq->nr_running >= sched_nr_latency;
3520 int next_buddy_marked = 0;
3521
3522 if (unlikely(se == pse))
3523 return;
3524
3525
3526
3527
3528
3529
3530
3531 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3532 return;
3533
3534 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3535 set_next_buddy(pse);
3536 next_buddy_marked = 1;
3537 }
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549 if (test_tsk_need_resched(curr))
3550 return;
3551
3552
3553 if (unlikely(curr->policy == SCHED_IDLE) &&
3554 likely(p->policy != SCHED_IDLE))
3555 goto preempt;
3556
3557
3558
3559
3560
3561 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
3562 return;
3563
3564 find_matching_se(&se, &pse);
3565 update_curr(cfs_rq_of(se));
3566 BUG_ON(!pse);
3567 if (wakeup_preempt_entity(se, pse) == 1) {
3568
3569
3570
3571
3572 if (!next_buddy_marked)
3573 set_next_buddy(pse);
3574 goto preempt;
3575 }
3576
3577 return;
3578
3579preempt:
3580 resched_task(curr);
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590 if (unlikely(!se->on_rq || curr == rq->idle))
3591 return;
3592
3593 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3594 set_last_buddy(se);
3595}
3596
3597static struct task_struct *pick_next_task_fair(struct rq *rq)
3598{
3599 struct task_struct *p;
3600 struct cfs_rq *cfs_rq = &rq->cfs;
3601 struct sched_entity *se;
3602
3603 if (!cfs_rq->nr_running)
3604 return NULL;
3605
3606 do {
3607 se = pick_next_entity(cfs_rq);
3608 set_next_entity(cfs_rq, se);
3609 cfs_rq = group_cfs_rq(se);
3610 } while (cfs_rq);
3611
3612 p = task_of(se);
3613 if (hrtick_enabled(rq))
3614 hrtick_start_fair(rq, p);
3615
3616 return p;
3617}
3618
3619
3620
3621
3622static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
3623{
3624 struct sched_entity *se = &prev->se;
3625 struct cfs_rq *cfs_rq;
3626
3627 for_each_sched_entity(se) {
3628 cfs_rq = cfs_rq_of(se);
3629 put_prev_entity(cfs_rq, se);
3630 }
3631}
3632
3633
3634
3635
3636
3637
3638static void yield_task_fair(struct rq *rq)
3639{
3640 struct task_struct *curr = rq->curr;
3641 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3642 struct sched_entity *se = &curr->se;
3643
3644
3645
3646
3647 if (unlikely(rq->nr_running == 1))
3648 return;
3649
3650 clear_buddies(cfs_rq, se);
3651
3652 if (curr->policy != SCHED_BATCH) {
3653 update_rq_clock(rq);
3654
3655
3656
3657 update_curr(cfs_rq);
3658
3659
3660
3661
3662
3663 rq->skip_clock_update = 1;
3664 }
3665
3666 set_skip_buddy(se);
3667}
3668
3669static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3670{
3671 struct sched_entity *se = &p->se;
3672
3673
3674 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
3675 return false;
3676
3677
3678 set_next_buddy(se);
3679
3680 yield_task_fair(rq);
3681
3682 return true;
3683}
3684
3685#ifdef CONFIG_SMP
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3805
3806#define LBF_ALL_PINNED 0x01
3807#define LBF_NEED_BREAK 0x02
3808#define LBF_SOME_PINNED 0x04
3809
3810struct lb_env {
3811 struct sched_domain *sd;
3812
3813 struct rq *src_rq;
3814 int src_cpu;
3815
3816 int dst_cpu;
3817 struct rq *dst_rq;
3818
3819 struct cpumask *dst_grpmask;
3820 int new_dst_cpu;
3821 enum cpu_idle_type idle;
3822 long imbalance;
3823
3824 struct cpumask *cpus;
3825
3826 unsigned int flags;
3827
3828 unsigned int loop;
3829 unsigned int loop_break;
3830 unsigned int loop_max;
3831};
3832
3833
3834
3835
3836
3837static void move_task(struct task_struct *p, struct lb_env *env)
3838{
3839 deactivate_task(env->src_rq, p, 0);
3840 set_task_cpu(p, env->dst_cpu);
3841 activate_task(env->dst_rq, p, 0);
3842 check_preempt_curr(env->dst_rq, p, 0);
3843}
3844
3845
3846
3847
3848static int
3849task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3850{
3851 s64 delta;
3852
3853 if (p->sched_class != &fair_sched_class)
3854 return 0;
3855
3856 if (unlikely(p->policy == SCHED_IDLE))
3857 return 0;
3858
3859
3860
3861
3862 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3863 (&p->se == cfs_rq_of(&p->se)->next ||
3864 &p->se == cfs_rq_of(&p->se)->last))
3865 return 1;
3866
3867 if (sysctl_sched_migration_cost == -1)
3868 return 1;
3869 if (sysctl_sched_migration_cost == 0)
3870 return 0;
3871
3872 delta = now - p->se.exec_start;
3873
3874 return delta < (s64)sysctl_sched_migration_cost;
3875}
3876
3877
3878
3879
3880static
3881int can_migrate_task(struct task_struct *p, struct lb_env *env)
3882{
3883 int tsk_cache_hot = 0;
3884
3885
3886
3887
3888
3889
3890 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
3891 int new_dst_cpu;
3892
3893 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903 if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3904 return 0;
3905
3906 new_dst_cpu = cpumask_first_and(env->dst_grpmask,
3907 tsk_cpus_allowed(p));
3908 if (new_dst_cpu < nr_cpu_ids) {
3909 env->flags |= LBF_SOME_PINNED;
3910 env->new_dst_cpu = new_dst_cpu;
3911 }
3912 return 0;
3913 }
3914
3915
3916 env->flags &= ~LBF_ALL_PINNED;
3917
3918 if (task_running(env->src_rq, p)) {
3919 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3920 return 0;
3921 }
3922
3923
3924
3925
3926
3927
3928
3929 tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
3930 if (!tsk_cache_hot ||
3931 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
3932#ifdef CONFIG_SCHEDSTATS
3933 if (tsk_cache_hot) {
3934 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
3935 schedstat_inc(p, se.statistics.nr_forced_migrations);
3936 }
3937#endif
3938 return 1;
3939 }
3940
3941 if (tsk_cache_hot) {
3942 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
3943 return 0;
3944 }
3945 return 1;
3946}
3947
3948
3949
3950
3951
3952
3953
3954
3955static int move_one_task(struct lb_env *env)
3956{
3957 struct task_struct *p, *n;
3958
3959 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
3960 if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu))
3961 continue;
3962
3963 if (!can_migrate_task(p, env))
3964 continue;
3965
3966 move_task(p, env);
3967
3968
3969
3970
3971
3972 schedstat_inc(env->sd, lb_gained[env->idle]);
3973 return 1;
3974 }
3975 return 0;
3976}
3977
3978static unsigned long task_h_load(struct task_struct *p);
3979
3980static const unsigned int sched_nr_migrate_break = 32;
3981
3982
3983
3984
3985
3986
3987
3988
3989static int move_tasks(struct lb_env *env)
3990{
3991 struct list_head *tasks = &env->src_rq->cfs_tasks;
3992 struct task_struct *p;
3993 unsigned long load;
3994 int pulled = 0;
3995
3996 if (env->imbalance <= 0)
3997 return 0;
3998
3999 while (!list_empty(tasks)) {
4000 p = list_first_entry(tasks, struct task_struct, se.group_node);
4001
4002 env->loop++;
4003
4004 if (env->loop > env->loop_max)
4005 break;
4006
4007
4008 if (env->loop > env->loop_break) {
4009 env->loop_break += sched_nr_migrate_break;
4010 env->flags |= LBF_NEED_BREAK;
4011 break;
4012 }
4013
4014 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
4015 goto next;
4016
4017 load = task_h_load(p);
4018
4019 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
4020 goto next;
4021
4022 if ((load / 2) > env->imbalance)
4023 goto next;
4024
4025 if (!can_migrate_task(p, env))
4026 goto next;
4027
4028 move_task(p, env);
4029 pulled++;
4030 env->imbalance -= load;
4031
4032#ifdef CONFIG_PREEMPT
4033
4034
4035
4036
4037
4038 if (env->idle == CPU_NEWLY_IDLE)
4039 break;
4040#endif
4041
4042
4043
4044
4045
4046 if (env->imbalance <= 0)
4047 break;
4048
4049 continue;
4050next:
4051 list_move_tail(&p->se.group_node, tasks);
4052 }
4053
4054
4055
4056
4057
4058
4059 schedstat_add(env->sd, lb_gained[env->idle], pulled);
4060
4061 return pulled;
4062}
4063
4064#ifdef CONFIG_FAIR_GROUP_SCHED
4065
4066
4067
4068static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
4069{
4070 struct sched_entity *se = tg->se[cpu];
4071 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
4072
4073
4074 if (throttled_hierarchy(cfs_rq))
4075 return;
4076
4077 update_cfs_rq_blocked_load(cfs_rq, 1);
4078
4079 if (se) {
4080 update_entity_load_avg(se, 1);
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4091 list_del_leaf_cfs_rq(cfs_rq);
4092 } else {
4093 struct rq *rq = rq_of(cfs_rq);
4094 update_rq_runnable_avg(rq, rq->nr_running);
4095 }
4096}
4097
4098static void update_blocked_averages(int cpu)
4099{
4100 struct rq *rq = cpu_rq(cpu);
4101 struct cfs_rq *cfs_rq;
4102 unsigned long flags;
4103
4104 raw_spin_lock_irqsave(&rq->lock, flags);
4105 update_rq_clock(rq);
4106
4107
4108
4109
4110 for_each_leaf_cfs_rq(rq, cfs_rq) {
4111
4112
4113
4114
4115
4116 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
4117 }
4118
4119 raw_spin_unlock_irqrestore(&rq->lock, flags);
4120}
4121
4122
4123
4124
4125
4126
4127static int tg_load_down(struct task_group *tg, void *data)
4128{
4129 unsigned long load;
4130 long cpu = (long)data;
4131
4132 if (!tg->parent) {
4133 load = cpu_rq(cpu)->load.weight;
4134 } else {
4135 load = tg->parent->cfs_rq[cpu]->h_load;
4136 load *= tg->se[cpu]->load.weight;
4137 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
4138 }
4139
4140 tg->cfs_rq[cpu]->h_load = load;
4141
4142 return 0;
4143}
4144
4145static void update_h_load(long cpu)
4146{
4147 struct rq *rq = cpu_rq(cpu);
4148 unsigned long now = jiffies;
4149
4150 if (rq->h_load_throttle == now)
4151 return;
4152
4153 rq->h_load_throttle = now;
4154
4155 rcu_read_lock();
4156 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
4157 rcu_read_unlock();
4158}
4159
4160static unsigned long task_h_load(struct task_struct *p)
4161{
4162 struct cfs_rq *cfs_rq = task_cfs_rq(p);
4163 unsigned long load;
4164
4165 load = p->se.load.weight;
4166 load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
4167
4168 return load;
4169}
4170#else
4171static inline void update_blocked_averages(int cpu)
4172{
4173}
4174
4175static inline void update_h_load(long cpu)
4176{
4177}
4178
4179static unsigned long task_h_load(struct task_struct *p)
4180{
4181 return p->se.load.weight;
4182}
4183#endif
4184
4185
4186
4187
4188
4189
4190struct sd_lb_stats {
4191 struct sched_group *busiest;
4192 struct sched_group *this;
4193 unsigned long total_load;
4194 unsigned long total_pwr;
4195 unsigned long avg_load;
4196
4197
4198 unsigned long this_load;
4199 unsigned long this_load_per_task;
4200 unsigned long this_nr_running;
4201 unsigned long this_has_capacity;
4202 unsigned int this_idle_cpus;
4203
4204
4205 unsigned int busiest_idle_cpus;
4206 unsigned long max_load;
4207 unsigned long busiest_load_per_task;
4208 unsigned long busiest_nr_running;
4209 unsigned long busiest_group_capacity;
4210 unsigned long busiest_has_capacity;
4211 unsigned int busiest_group_weight;
4212
4213 int group_imb;
4214};
4215
4216
4217
4218
4219struct sg_lb_stats {
4220 unsigned long avg_load;
4221 unsigned long group_load;
4222 unsigned long sum_nr_running;
4223 unsigned long sum_weighted_load;
4224 unsigned long group_capacity;
4225 unsigned long idle_cpus;
4226 unsigned long group_weight;
4227 int group_imb;
4228 int group_has_capacity;
4229};
4230
4231
4232
4233
4234
4235
4236static inline int get_sd_load_idx(struct sched_domain *sd,
4237 enum cpu_idle_type idle)
4238{
4239 int load_idx;
4240
4241 switch (idle) {
4242 case CPU_NOT_IDLE:
4243 load_idx = sd->busy_idx;
4244 break;
4245
4246 case CPU_NEWLY_IDLE:
4247 load_idx = sd->newidle_idx;
4248 break;
4249 default:
4250 load_idx = sd->idle_idx;
4251 break;
4252 }
4253
4254 return load_idx;
4255}
4256
4257unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
4258{
4259 return SCHED_POWER_SCALE;
4260}
4261
4262unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4263{
4264 return default_scale_freq_power(sd, cpu);
4265}
4266
4267unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
4268{
4269 unsigned long weight = sd->span_weight;
4270 unsigned long smt_gain = sd->smt_gain;
4271
4272 smt_gain /= weight;
4273
4274 return smt_gain;
4275}
4276
4277unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4278{
4279 return default_scale_smt_power(sd, cpu);
4280}
4281
4282unsigned long scale_rt_power(int cpu)
4283{
4284 struct rq *rq = cpu_rq(cpu);
4285 u64 total, available, age_stamp, avg;
4286
4287
4288
4289
4290
4291 age_stamp = ACCESS_ONCE(rq->age_stamp);
4292 avg = ACCESS_ONCE(rq->rt_avg);
4293
4294 total = sched_avg_period() + (rq->clock - age_stamp);
4295
4296 if (unlikely(total < avg)) {
4297
4298 available = 0;
4299 } else {
4300 available = total - avg;
4301 }
4302
4303 if (unlikely((s64)total < SCHED_POWER_SCALE))
4304 total = SCHED_POWER_SCALE;
4305
4306 total >>= SCHED_POWER_SHIFT;
4307
4308 return div_u64(available, total);
4309}
4310
4311static void update_cpu_power(struct sched_domain *sd, int cpu)
4312{
4313 unsigned long weight = sd->span_weight;
4314 unsigned long power = SCHED_POWER_SCALE;
4315 struct sched_group *sdg = sd->groups;
4316
4317 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4318 if (sched_feat(ARCH_POWER))
4319 power *= arch_scale_smt_power(sd, cpu);
4320 else
4321 power *= default_scale_smt_power(sd, cpu);
4322
4323 power >>= SCHED_POWER_SHIFT;
4324 }
4325
4326 sdg->sgp->power_orig = power;
4327
4328 if (sched_feat(ARCH_POWER))
4329 power *= arch_scale_freq_power(sd, cpu);
4330 else
4331 power *= default_scale_freq_power(sd, cpu);
4332
4333 power >>= SCHED_POWER_SHIFT;
4334
4335 power *= scale_rt_power(cpu);
4336 power >>= SCHED_POWER_SHIFT;
4337
4338 if (!power)
4339 power = 1;
4340
4341 cpu_rq(cpu)->cpu_power = power;
4342 sdg->sgp->power = power;
4343}
4344
4345void update_group_power(struct sched_domain *sd, int cpu)
4346{
4347 struct sched_domain *child = sd->child;
4348 struct sched_group *group, *sdg = sd->groups;
4349 unsigned long power;
4350 unsigned long interval;
4351
4352 interval = msecs_to_jiffies(sd->balance_interval);
4353 interval = clamp(interval, 1UL, max_load_balance_interval);
4354 sdg->sgp->next_update = jiffies + interval;
4355
4356 if (!child) {
4357 update_cpu_power(sd, cpu);
4358 return;
4359 }
4360
4361 power = 0;
4362
4363 if (child->flags & SD_OVERLAP) {
4364
4365
4366
4367
4368
4369 for_each_cpu(cpu, sched_group_cpus(sdg))
4370 power += power_of(cpu);
4371 } else {
4372
4373
4374
4375
4376
4377 group = child->groups;
4378 do {
4379 power += group->sgp->power;
4380 group = group->next;
4381 } while (group != child->groups);
4382 }
4383
4384 sdg->sgp->power_orig = sdg->sgp->power = power;
4385}
4386
4387
4388
4389
4390
4391
4392
4393
4394static inline int
4395fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4396{
4397
4398
4399
4400 if (!(sd->flags & SD_SHARE_CPUPOWER))
4401 return 0;
4402
4403
4404
4405
4406 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
4407 return 1;
4408
4409 return 0;
4410}
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421static inline void update_sg_lb_stats(struct lb_env *env,
4422 struct sched_group *group, int load_idx,
4423 int local_group, int *balance, struct sg_lb_stats *sgs)
4424{
4425 unsigned long nr_running, max_nr_running, min_nr_running;
4426 unsigned long load, max_cpu_load, min_cpu_load;
4427 unsigned int balance_cpu = -1, first_idle_cpu = 0;
4428 unsigned long avg_load_per_task = 0;
4429 int i;
4430
4431 if (local_group)
4432 balance_cpu = group_balance_cpu(group);
4433
4434
4435 max_cpu_load = 0;
4436 min_cpu_load = ~0UL;
4437 max_nr_running = 0;
4438 min_nr_running = ~0UL;
4439
4440 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
4441 struct rq *rq = cpu_rq(i);
4442
4443 nr_running = rq->nr_running;
4444
4445
4446 if (local_group) {
4447 if (idle_cpu(i) && !first_idle_cpu &&
4448 cpumask_test_cpu(i, sched_group_mask(group))) {
4449 first_idle_cpu = 1;
4450 balance_cpu = i;
4451 }
4452
4453 load = target_load(i, load_idx);
4454 } else {
4455 load = source_load(i, load_idx);
4456 if (load > max_cpu_load)
4457 max_cpu_load = load;
4458 if (min_cpu_load > load)
4459 min_cpu_load = load;
4460
4461 if (nr_running > max_nr_running)
4462 max_nr_running = nr_running;
4463 if (min_nr_running > nr_running)
4464 min_nr_running = nr_running;
4465 }
4466
4467 sgs->group_load += load;
4468 sgs->sum_nr_running += nr_running;
4469 sgs->sum_weighted_load += weighted_cpuload(i);
4470 if (idle_cpu(i))
4471 sgs->idle_cpus++;
4472 }
4473
4474
4475
4476
4477
4478
4479
4480 if (local_group) {
4481 if (env->idle != CPU_NEWLY_IDLE) {
4482 if (balance_cpu != env->dst_cpu) {
4483 *balance = 0;
4484 return;
4485 }
4486 update_group_power(env->sd, env->dst_cpu);
4487 } else if (time_after_eq(jiffies, group->sgp->next_update))
4488 update_group_power(env->sd, env->dst_cpu);
4489 }
4490
4491
4492 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503 if (sgs->sum_nr_running)
4504 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
4505
4506 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4507 (max_nr_running - min_nr_running) > 1)
4508 sgs->group_imb = 1;
4509
4510 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
4511 SCHED_POWER_SCALE);
4512 if (!sgs->group_capacity)
4513 sgs->group_capacity = fix_small_capacity(env->sd, group);
4514 sgs->group_weight = group->group_weight;
4515
4516 if (sgs->group_capacity > sgs->sum_nr_running)
4517 sgs->group_has_capacity = 1;
4518}
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530static bool update_sd_pick_busiest(struct lb_env *env,
4531 struct sd_lb_stats *sds,
4532 struct sched_group *sg,
4533 struct sg_lb_stats *sgs)
4534{
4535 if (sgs->avg_load <= sds->max_load)
4536 return false;
4537
4538 if (sgs->sum_nr_running > sgs->group_capacity)
4539 return true;
4540
4541 if (sgs->group_imb)
4542 return true;
4543
4544
4545
4546
4547
4548
4549 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4550 env->dst_cpu < group_first_cpu(sg)) {
4551 if (!sds->busiest)
4552 return true;
4553
4554 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4555 return true;
4556 }
4557
4558 return false;
4559}
4560
4561
4562
4563
4564
4565
4566
4567static inline void update_sd_lb_stats(struct lb_env *env,
4568 int *balance, struct sd_lb_stats *sds)
4569{
4570 struct sched_domain *child = env->sd->child;
4571 struct sched_group *sg = env->sd->groups;
4572 struct sg_lb_stats sgs;
4573 int load_idx, prefer_sibling = 0;
4574
4575 if (child && child->flags & SD_PREFER_SIBLING)
4576 prefer_sibling = 1;
4577
4578 load_idx = get_sd_load_idx(env->sd, env->idle);
4579
4580 do {
4581 int local_group;
4582
4583 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
4584 memset(&sgs, 0, sizeof(sgs));
4585 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
4586
4587 if (local_group && !(*balance))
4588 return;
4589
4590 sds->total_load += sgs.group_load;
4591 sds->total_pwr += sg->sgp->power;
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603 if (prefer_sibling && !local_group && sds->this_has_capacity)
4604 sgs.group_capacity = min(sgs.group_capacity, 1UL);
4605
4606 if (local_group) {
4607 sds->this_load = sgs.avg_load;
4608 sds->this = sg;
4609 sds->this_nr_running = sgs.sum_nr_running;
4610 sds->this_load_per_task = sgs.sum_weighted_load;
4611 sds->this_has_capacity = sgs.group_has_capacity;
4612 sds->this_idle_cpus = sgs.idle_cpus;
4613 } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
4614 sds->max_load = sgs.avg_load;
4615 sds->busiest = sg;
4616 sds->busiest_nr_running = sgs.sum_nr_running;
4617 sds->busiest_idle_cpus = sgs.idle_cpus;
4618 sds->busiest_group_capacity = sgs.group_capacity;
4619 sds->busiest_load_per_task = sgs.sum_weighted_load;
4620 sds->busiest_has_capacity = sgs.group_has_capacity;
4621 sds->busiest_group_weight = sgs.group_weight;
4622 sds->group_imb = sgs.group_imb;
4623 }
4624
4625 sg = sg->next;
4626 } while (sg != env->sd->groups);
4627}
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
4653{
4654 int busiest_cpu;
4655
4656 if (!(env->sd->flags & SD_ASYM_PACKING))
4657 return 0;
4658
4659 if (!sds->busiest)
4660 return 0;
4661
4662 busiest_cpu = group_first_cpu(sds->busiest);
4663 if (env->dst_cpu > busiest_cpu)
4664 return 0;
4665
4666 env->imbalance = DIV_ROUND_CLOSEST(
4667 sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4668
4669 return 1;
4670}
4671
4672
4673
4674
4675
4676
4677
4678
4679static inline
4680void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4681{
4682 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4683 unsigned int imbn = 2;
4684 unsigned long scaled_busy_load_per_task;
4685
4686 if (sds->this_nr_running) {
4687 sds->this_load_per_task /= sds->this_nr_running;
4688 if (sds->busiest_load_per_task >
4689 sds->this_load_per_task)
4690 imbn = 1;
4691 } else {
4692 sds->this_load_per_task =
4693 cpu_avg_load_per_task(env->dst_cpu);
4694 }
4695
4696 scaled_busy_load_per_task = sds->busiest_load_per_task
4697 * SCHED_POWER_SCALE;
4698 scaled_busy_load_per_task /= sds->busiest->sgp->power;
4699
4700 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4701 (scaled_busy_load_per_task * imbn)) {
4702 env->imbalance = sds->busiest_load_per_task;
4703 return;
4704 }
4705
4706
4707
4708
4709
4710
4711
4712 pwr_now += sds->busiest->sgp->power *
4713 min(sds->busiest_load_per_task, sds->max_load);
4714 pwr_now += sds->this->sgp->power *
4715 min(sds->this_load_per_task, sds->this_load);
4716 pwr_now /= SCHED_POWER_SCALE;
4717
4718
4719 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4720 sds->busiest->sgp->power;
4721 if (sds->max_load > tmp)
4722 pwr_move += sds->busiest->sgp->power *
4723 min(sds->busiest_load_per_task, sds->max_load - tmp);
4724
4725
4726 if (sds->max_load * sds->busiest->sgp->power <
4727 sds->busiest_load_per_task * SCHED_POWER_SCALE)
4728 tmp = (sds->max_load * sds->busiest->sgp->power) /
4729 sds->this->sgp->power;
4730 else
4731 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4732 sds->this->sgp->power;
4733 pwr_move += sds->this->sgp->power *
4734 min(sds->this_load_per_task, sds->this_load + tmp);
4735 pwr_move /= SCHED_POWER_SCALE;
4736
4737
4738 if (pwr_move > pwr_now)
4739 env->imbalance = sds->busiest_load_per_task;
4740}
4741
4742
4743
4744
4745
4746
4747
4748static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4749{
4750 unsigned long max_pull, load_above_capacity = ~0UL;
4751
4752 sds->busiest_load_per_task /= sds->busiest_nr_running;
4753 if (sds->group_imb) {
4754 sds->busiest_load_per_task =
4755 min(sds->busiest_load_per_task, sds->avg_load);
4756 }
4757
4758
4759
4760
4761
4762
4763 if (sds->max_load < sds->avg_load) {
4764 env->imbalance = 0;
4765 return fix_small_imbalance(env, sds);
4766 }
4767
4768 if (!sds->group_imb) {
4769
4770
4771
4772 load_above_capacity = (sds->busiest_nr_running -
4773 sds->busiest_group_capacity);
4774
4775 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
4776
4777 load_above_capacity /= sds->busiest->sgp->power;
4778 }
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4791
4792
4793 env->imbalance = min(max_pull * sds->busiest->sgp->power,
4794 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
4795 / SCHED_POWER_SCALE;
4796
4797
4798
4799
4800
4801
4802
4803 if (env->imbalance < sds->busiest_load_per_task)
4804 return fix_small_imbalance(env, sds);
4805
4806}
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829static struct sched_group *
4830find_busiest_group(struct lb_env *env, int *balance)
4831{
4832 struct sd_lb_stats sds;
4833
4834 memset(&sds, 0, sizeof(sds));
4835
4836
4837
4838
4839
4840 update_sd_lb_stats(env, balance, &sds);
4841
4842
4843
4844
4845
4846 if (!(*balance))
4847 goto ret;
4848
4849 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4850 check_asym_packing(env, &sds))
4851 return sds.busiest;
4852
4853
4854 if (!sds.busiest || sds.busiest_nr_running == 0)
4855 goto out_balanced;
4856
4857 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
4858
4859
4860
4861
4862
4863
4864 if (sds.group_imb)
4865 goto force_balance;
4866
4867
4868 if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
4869 !sds.busiest_has_capacity)
4870 goto force_balance;
4871
4872
4873
4874
4875
4876 if (sds.this_load >= sds.max_load)
4877 goto out_balanced;
4878
4879
4880
4881
4882
4883 if (sds.this_load >= sds.avg_load)
4884 goto out_balanced;
4885
4886 if (env->idle == CPU_IDLE) {
4887
4888
4889
4890
4891
4892
4893 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
4894 sds.busiest_nr_running <= sds.busiest_group_weight)
4895 goto out_balanced;
4896 } else {
4897
4898
4899
4900
4901 if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
4902 goto out_balanced;
4903 }
4904
4905force_balance:
4906
4907 calculate_imbalance(env, &sds);
4908 return sds.busiest;
4909
4910out_balanced:
4911ret:
4912 env->imbalance = 0;
4913 return NULL;
4914}
4915
4916
4917
4918
4919static struct rq *find_busiest_queue(struct lb_env *env,
4920 struct sched_group *group)
4921{
4922 struct rq *busiest = NULL, *rq;
4923 unsigned long max_load = 0;
4924 int i;
4925
4926 for_each_cpu(i, sched_group_cpus(group)) {
4927 unsigned long power = power_of(i);
4928 unsigned long capacity = DIV_ROUND_CLOSEST(power,
4929 SCHED_POWER_SCALE);
4930 unsigned long wl;
4931
4932 if (!capacity)
4933 capacity = fix_small_capacity(env->sd, group);
4934
4935 if (!cpumask_test_cpu(i, env->cpus))
4936 continue;
4937
4938 rq = cpu_rq(i);
4939 wl = weighted_cpuload(i);
4940
4941
4942
4943
4944
4945 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
4946 continue;
4947
4948
4949
4950
4951
4952
4953
4954 wl = (wl * SCHED_POWER_SCALE) / power;
4955
4956 if (wl > max_load) {
4957 max_load = wl;
4958 busiest = rq;
4959 }
4960 }
4961
4962 return busiest;
4963}
4964
4965
4966
4967
4968
4969#define MAX_PINNED_INTERVAL 512
4970
4971
4972DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
4973
4974static int need_active_balance(struct lb_env *env)
4975{
4976 struct sched_domain *sd = env->sd;
4977
4978 if (env->idle == CPU_NEWLY_IDLE) {
4979
4980
4981
4982
4983
4984
4985 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
4986 return 1;
4987 }
4988
4989 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
4990}
4991
4992static int active_load_balance_cpu_stop(void *data);
4993
4994
4995
4996
4997
4998static int load_balance(int this_cpu, struct rq *this_rq,
4999 struct sched_domain *sd, enum cpu_idle_type idle,
5000 int *balance)
5001{
5002 int ld_moved, cur_ld_moved, active_balance = 0;
5003 int lb_iterations, max_lb_iterations;
5004 struct sched_group *group;
5005 struct rq *busiest;
5006 unsigned long flags;
5007 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
5008
5009 struct lb_env env = {
5010 .sd = sd,
5011 .dst_cpu = this_cpu,
5012 .dst_rq = this_rq,
5013 .dst_grpmask = sched_group_cpus(sd->groups),
5014 .idle = idle,
5015 .loop_break = sched_nr_migrate_break,
5016 .cpus = cpus,
5017 };
5018
5019 cpumask_copy(cpus, cpu_active_mask);
5020 max_lb_iterations = cpumask_weight(env.dst_grpmask);
5021
5022 schedstat_inc(sd, lb_count[idle]);
5023
5024redo:
5025 group = find_busiest_group(&env, balance);
5026
5027 if (*balance == 0)
5028 goto out_balanced;
5029
5030 if (!group) {
5031 schedstat_inc(sd, lb_nobusyg[idle]);
5032 goto out_balanced;
5033 }
5034
5035 busiest = find_busiest_queue(&env, group);
5036 if (!busiest) {
5037 schedstat_inc(sd, lb_nobusyq[idle]);
5038 goto out_balanced;
5039 }
5040
5041 BUG_ON(busiest == env.dst_rq);
5042
5043 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
5044
5045 ld_moved = 0;
5046 lb_iterations = 1;
5047 if (busiest->nr_running > 1) {
5048
5049
5050
5051
5052
5053
5054 env.flags |= LBF_ALL_PINNED;
5055 env.src_cpu = busiest->cpu;
5056 env.src_rq = busiest;
5057 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
5058
5059 update_h_load(env.src_cpu);
5060more_balance:
5061 local_irq_save(flags);
5062 double_rq_lock(env.dst_rq, busiest);
5063
5064
5065
5066
5067
5068 cur_ld_moved = move_tasks(&env);
5069 ld_moved += cur_ld_moved;
5070 double_rq_unlock(env.dst_rq, busiest);
5071 local_irq_restore(flags);
5072
5073 if (env.flags & LBF_NEED_BREAK) {
5074 env.flags &= ~LBF_NEED_BREAK;
5075 goto more_balance;
5076 }
5077
5078
5079
5080
5081 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5082 resched_cpu(env.dst_cpu);
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 &&
5104 lb_iterations++ < max_lb_iterations) {
5105
5106 env.dst_rq = cpu_rq(env.new_dst_cpu);
5107 env.dst_cpu = env.new_dst_cpu;
5108 env.flags &= ~LBF_SOME_PINNED;
5109 env.loop = 0;
5110 env.loop_break = sched_nr_migrate_break;
5111
5112
5113
5114
5115 goto more_balance;
5116 }
5117
5118
5119 if (unlikely(env.flags & LBF_ALL_PINNED)) {
5120 cpumask_clear_cpu(cpu_of(busiest), cpus);
5121 if (!cpumask_empty(cpus)) {
5122 env.loop = 0;
5123 env.loop_break = sched_nr_migrate_break;
5124 goto redo;
5125 }
5126 goto out_balanced;
5127 }
5128 }
5129
5130 if (!ld_moved) {
5131 schedstat_inc(sd, lb_failed[idle]);
5132
5133
5134
5135
5136
5137
5138 if (idle != CPU_NEWLY_IDLE)
5139 sd->nr_balance_failed++;
5140
5141 if (need_active_balance(&env)) {
5142 raw_spin_lock_irqsave(&busiest->lock, flags);
5143
5144
5145
5146
5147
5148 if (!cpumask_test_cpu(this_cpu,
5149 tsk_cpus_allowed(busiest->curr))) {
5150 raw_spin_unlock_irqrestore(&busiest->lock,
5151 flags);
5152 env.flags |= LBF_ALL_PINNED;
5153 goto out_one_pinned;
5154 }
5155
5156
5157
5158
5159
5160
5161 if (!busiest->active_balance) {
5162 busiest->active_balance = 1;
5163 busiest->push_cpu = this_cpu;
5164 active_balance = 1;
5165 }
5166 raw_spin_unlock_irqrestore(&busiest->lock, flags);
5167
5168 if (active_balance) {
5169 stop_one_cpu_nowait(cpu_of(busiest),
5170 active_load_balance_cpu_stop, busiest,
5171 &busiest->active_balance_work);
5172 }
5173
5174
5175
5176
5177
5178 sd->nr_balance_failed = sd->cache_nice_tries+1;
5179 }
5180 } else
5181 sd->nr_balance_failed = 0;
5182
5183 if (likely(!active_balance)) {
5184
5185 sd->balance_interval = sd->min_interval;
5186 } else {
5187
5188
5189
5190
5191
5192
5193 if (sd->balance_interval < sd->max_interval)
5194 sd->balance_interval *= 2;
5195 }
5196
5197 goto out;
5198
5199out_balanced:
5200 schedstat_inc(sd, lb_balanced[idle]);
5201
5202 sd->nr_balance_failed = 0;
5203
5204out_one_pinned:
5205
5206 if (((env.flags & LBF_ALL_PINNED) &&
5207 sd->balance_interval < MAX_PINNED_INTERVAL) ||
5208 (sd->balance_interval < sd->max_interval))
5209 sd->balance_interval *= 2;
5210
5211 ld_moved = 0;
5212out:
5213 return ld_moved;
5214}
5215
5216
5217
5218
5219
5220void idle_balance(int this_cpu, struct rq *this_rq)
5221{
5222 struct sched_domain *sd;
5223 int pulled_task = 0;
5224 unsigned long next_balance = jiffies + HZ;
5225
5226 this_rq->idle_stamp = this_rq->clock;
5227
5228 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5229 return;
5230
5231 update_rq_runnable_avg(this_rq, 1);
5232
5233
5234
5235
5236 raw_spin_unlock(&this_rq->lock);
5237
5238 update_blocked_averages(this_cpu);
5239 rcu_read_lock();
5240 for_each_domain(this_cpu, sd) {
5241 unsigned long interval;
5242 int balance = 1;
5243
5244 if (!(sd->flags & SD_LOAD_BALANCE))
5245 continue;
5246
5247 if (sd->flags & SD_BALANCE_NEWIDLE) {
5248
5249 pulled_task = load_balance(this_cpu, this_rq,
5250 sd, CPU_NEWLY_IDLE, &balance);
5251 }
5252
5253 interval = msecs_to_jiffies(sd->balance_interval);
5254 if (time_after(next_balance, sd->last_balance + interval))
5255 next_balance = sd->last_balance + interval;
5256 if (pulled_task) {
5257 this_rq->idle_stamp = 0;
5258 break;
5259 }
5260 }
5261 rcu_read_unlock();
5262
5263 raw_spin_lock(&this_rq->lock);
5264
5265 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5266
5267
5268
5269
5270 this_rq->next_balance = next_balance;
5271 }
5272}
5273
5274
5275
5276
5277
5278
5279
5280static int active_load_balance_cpu_stop(void *data)
5281{
5282 struct rq *busiest_rq = data;
5283 int busiest_cpu = cpu_of(busiest_rq);
5284 int target_cpu = busiest_rq->push_cpu;
5285 struct rq *target_rq = cpu_rq(target_cpu);
5286 struct sched_domain *sd;
5287
5288 raw_spin_lock_irq(&busiest_rq->lock);
5289
5290
5291 if (unlikely(busiest_cpu != smp_processor_id() ||
5292 !busiest_rq->active_balance))
5293 goto out_unlock;
5294
5295
5296 if (busiest_rq->nr_running <= 1)
5297 goto out_unlock;
5298
5299
5300
5301
5302
5303
5304 BUG_ON(busiest_rq == target_rq);
5305
5306
5307 double_lock_balance(busiest_rq, target_rq);
5308
5309
5310 rcu_read_lock();
5311 for_each_domain(target_cpu, sd) {
5312 if ((sd->flags & SD_LOAD_BALANCE) &&
5313 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5314 break;
5315 }
5316
5317 if (likely(sd)) {
5318 struct lb_env env = {
5319 .sd = sd,
5320 .dst_cpu = target_cpu,
5321 .dst_rq = target_rq,
5322 .src_cpu = busiest_rq->cpu,
5323 .src_rq = busiest_rq,
5324 .idle = CPU_IDLE,
5325 };
5326
5327 schedstat_inc(sd, alb_count);
5328
5329 if (move_one_task(&env))
5330 schedstat_inc(sd, alb_pushed);
5331 else
5332 schedstat_inc(sd, alb_failed);
5333 }
5334 rcu_read_unlock();
5335 double_unlock_balance(busiest_rq, target_rq);
5336out_unlock:
5337 busiest_rq->active_balance = 0;
5338 raw_spin_unlock_irq(&busiest_rq->lock);
5339 return 0;
5340}
5341
5342#ifdef CONFIG_NO_HZ
5343
5344
5345
5346
5347
5348
5349static struct {
5350 cpumask_var_t idle_cpus_mask;
5351 atomic_t nr_cpus;
5352 unsigned long next_balance;
5353} nohz ____cacheline_aligned;
5354
5355static inline int find_new_ilb(int call_cpu)
5356{
5357 int ilb = cpumask_first(nohz.idle_cpus_mask);
5358
5359 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5360 return ilb;
5361
5362 return nr_cpu_ids;
5363}
5364
5365
5366
5367
5368
5369
5370static void nohz_balancer_kick(int cpu)
5371{
5372 int ilb_cpu;
5373
5374 nohz.next_balance++;
5375
5376 ilb_cpu = find_new_ilb(cpu);
5377
5378 if (ilb_cpu >= nr_cpu_ids)
5379 return;
5380
5381 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
5382 return;
5383
5384
5385
5386
5387
5388
5389 smp_send_reschedule(ilb_cpu);
5390 return;
5391}
5392
5393static inline void nohz_balance_exit_idle(int cpu)
5394{
5395 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5396 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5397 atomic_dec(&nohz.nr_cpus);
5398 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5399 }
5400}
5401
5402static inline void set_cpu_sd_state_busy(void)
5403{
5404 struct sched_domain *sd;
5405 int cpu = smp_processor_id();
5406
5407 if (!test_bit(NOHZ_IDLE, nohz_flags(cpu)))
5408 return;
5409 clear_bit(NOHZ_IDLE, nohz_flags(cpu));
5410
5411 rcu_read_lock();
5412 for_each_domain(cpu, sd)
5413 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
5414 rcu_read_unlock();
5415}
5416
5417void set_cpu_sd_state_idle(void)
5418{
5419 struct sched_domain *sd;
5420 int cpu = smp_processor_id();
5421
5422 if (test_bit(NOHZ_IDLE, nohz_flags(cpu)))
5423 return;
5424 set_bit(NOHZ_IDLE, nohz_flags(cpu));
5425
5426 rcu_read_lock();
5427 for_each_domain(cpu, sd)
5428 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
5429 rcu_read_unlock();
5430}
5431
5432
5433
5434
5435
5436void nohz_balance_enter_idle(int cpu)
5437{
5438
5439
5440
5441 if (!cpu_active(cpu))
5442 return;
5443
5444 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5445 return;
5446
5447 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5448 atomic_inc(&nohz.nr_cpus);
5449 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5450}
5451
5452static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
5453 unsigned long action, void *hcpu)
5454{
5455 switch (action & ~CPU_TASKS_FROZEN) {
5456 case CPU_DYING:
5457 nohz_balance_exit_idle(smp_processor_id());
5458 return NOTIFY_OK;
5459 default:
5460 return NOTIFY_DONE;
5461 }
5462}
5463#endif
5464
5465static DEFINE_SPINLOCK(balancing);
5466
5467
5468
5469
5470
5471void update_max_interval(void)
5472{
5473 max_load_balance_interval = HZ*num_online_cpus()/10;
5474}
5475
5476
5477
5478
5479
5480
5481
5482static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5483{
5484 int balance = 1;
5485 struct rq *rq = cpu_rq(cpu);
5486 unsigned long interval;
5487 struct sched_domain *sd;
5488
5489 unsigned long next_balance = jiffies + 60*HZ;
5490 int update_next_balance = 0;
5491 int need_serialize;
5492
5493 update_blocked_averages(cpu);
5494
5495 rcu_read_lock();
5496 for_each_domain(cpu, sd) {
5497 if (!(sd->flags & SD_LOAD_BALANCE))
5498 continue;
5499
5500 interval = sd->balance_interval;
5501 if (idle != CPU_IDLE)
5502 interval *= sd->busy_factor;
5503
5504
5505 interval = msecs_to_jiffies(interval);
5506 interval = clamp(interval, 1UL, max_load_balance_interval);
5507
5508 need_serialize = sd->flags & SD_SERIALIZE;
5509
5510 if (need_serialize) {
5511 if (!spin_trylock(&balancing))
5512 goto out;
5513 }
5514
5515 if (time_after_eq(jiffies, sd->last_balance + interval)) {
5516 if (load_balance(cpu, rq, sd, idle, &balance)) {
5517
5518
5519
5520
5521 idle = CPU_NOT_IDLE;
5522 }
5523 sd->last_balance = jiffies;
5524 }
5525 if (need_serialize)
5526 spin_unlock(&balancing);
5527out:
5528 if (time_after(next_balance, sd->last_balance + interval)) {
5529 next_balance = sd->last_balance + interval;
5530 update_next_balance = 1;
5531 }
5532
5533
5534
5535
5536
5537
5538 if (!balance)
5539 break;
5540 }
5541 rcu_read_unlock();
5542
5543
5544
5545
5546
5547
5548 if (likely(update_next_balance))
5549 rq->next_balance = next_balance;
5550}
5551
5552#ifdef CONFIG_NO_HZ
5553
5554
5555
5556
5557static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5558{
5559 struct rq *this_rq = cpu_rq(this_cpu);
5560 struct rq *rq;
5561 int balance_cpu;
5562
5563 if (idle != CPU_IDLE ||
5564 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5565 goto end;
5566
5567 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
5568 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
5569 continue;
5570
5571
5572
5573
5574
5575
5576 if (need_resched())
5577 break;
5578
5579 rq = cpu_rq(balance_cpu);
5580
5581 raw_spin_lock_irq(&rq->lock);
5582 update_rq_clock(rq);
5583 update_idle_cpu_load(rq);
5584 raw_spin_unlock_irq(&rq->lock);
5585
5586 rebalance_domains(balance_cpu, CPU_IDLE);
5587
5588 if (time_after(this_rq->next_balance, rq->next_balance))
5589 this_rq->next_balance = rq->next_balance;
5590 }
5591 nohz.next_balance = this_rq->next_balance;
5592end:
5593 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
5594}
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605static inline int nohz_kick_needed(struct rq *rq, int cpu)
5606{
5607 unsigned long now = jiffies;
5608 struct sched_domain *sd;
5609
5610 if (unlikely(idle_cpu(cpu)))
5611 return 0;
5612
5613
5614
5615
5616
5617 set_cpu_sd_state_busy();
5618 nohz_balance_exit_idle(cpu);
5619
5620
5621
5622
5623
5624 if (likely(!atomic_read(&nohz.nr_cpus)))
5625 return 0;
5626
5627 if (time_before(now, nohz.next_balance))
5628 return 0;
5629
5630 if (rq->nr_running >= 2)
5631 goto need_kick;
5632
5633 rcu_read_lock();
5634 for_each_domain(cpu, sd) {
5635 struct sched_group *sg = sd->groups;
5636 struct sched_group_power *sgp = sg->sgp;
5637 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
5638
5639 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
5640 goto need_kick_unlock;
5641
5642 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5643 && (cpumask_first_and(nohz.idle_cpus_mask,
5644 sched_domain_span(sd)) < cpu))
5645 goto need_kick_unlock;
5646
5647 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5648 break;
5649 }
5650 rcu_read_unlock();
5651 return 0;
5652
5653need_kick_unlock:
5654 rcu_read_unlock();
5655need_kick:
5656 return 1;
5657}
5658#else
5659static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5660#endif
5661
5662
5663
5664
5665
5666static void run_rebalance_domains(struct softirq_action *h)
5667{
5668 int this_cpu = smp_processor_id();
5669 struct rq *this_rq = cpu_rq(this_cpu);
5670 enum cpu_idle_type idle = this_rq->idle_balance ?
5671 CPU_IDLE : CPU_NOT_IDLE;
5672
5673 rebalance_domains(this_cpu, idle);
5674
5675
5676
5677
5678
5679
5680 nohz_idle_balance(this_cpu, idle);
5681}
5682
5683static inline int on_null_domain(int cpu)
5684{
5685 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
5686}
5687
5688
5689
5690
5691void trigger_load_balance(struct rq *rq, int cpu)
5692{
5693
5694 if (time_after_eq(jiffies, rq->next_balance) &&
5695 likely(!on_null_domain(cpu)))
5696 raise_softirq(SCHED_SOFTIRQ);
5697#ifdef CONFIG_NO_HZ
5698 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
5699 nohz_balancer_kick(cpu);
5700#endif
5701}
5702
5703static void rq_online_fair(struct rq *rq)
5704{
5705 update_sysctl();
5706}
5707
5708static void rq_offline_fair(struct rq *rq)
5709{
5710 update_sysctl();
5711
5712
5713 unthrottle_offline_cfs_rqs(rq);
5714}
5715
5716#endif
5717
5718
5719
5720
5721static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
5722{
5723 struct cfs_rq *cfs_rq;
5724 struct sched_entity *se = &curr->se;
5725
5726 for_each_sched_entity(se) {
5727 cfs_rq = cfs_rq_of(se);
5728 entity_tick(cfs_rq, se, queued);
5729 }
5730
5731 if (sched_feat_numa(NUMA))
5732 task_tick_numa(rq, curr);
5733
5734 update_rq_runnable_avg(rq, 1);
5735}
5736
5737
5738
5739
5740
5741
5742static void task_fork_fair(struct task_struct *p)
5743{
5744 struct cfs_rq *cfs_rq;
5745 struct sched_entity *se = &p->se, *curr;
5746 int this_cpu = smp_processor_id();
5747 struct rq *rq = this_rq();
5748 unsigned long flags;
5749
5750 raw_spin_lock_irqsave(&rq->lock, flags);
5751
5752 update_rq_clock(rq);
5753
5754 cfs_rq = task_cfs_rq(current);
5755 curr = cfs_rq->curr;
5756
5757 if (unlikely(task_cpu(p) != this_cpu)) {
5758 rcu_read_lock();
5759 __set_task_cpu(p, this_cpu);
5760 rcu_read_unlock();
5761 }
5762
5763 update_curr(cfs_rq);
5764
5765 if (curr)
5766 se->vruntime = curr->vruntime;
5767 place_entity(cfs_rq, se, 1);
5768
5769 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
5770
5771
5772
5773
5774 swap(curr->vruntime, se->vruntime);
5775 resched_task(rq->curr);
5776 }
5777
5778 se->vruntime -= cfs_rq->min_vruntime;
5779
5780 raw_spin_unlock_irqrestore(&rq->lock, flags);
5781}
5782
5783
5784
5785
5786
5787static void
5788prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
5789{
5790 if (!p->se.on_rq)
5791 return;
5792
5793
5794
5795
5796
5797
5798 if (rq->curr == p) {
5799 if (p->prio > oldprio)
5800 resched_task(rq->curr);
5801 } else
5802 check_preempt_curr(rq, p, 0);
5803}
5804
5805static void switched_from_fair(struct rq *rq, struct task_struct *p)
5806{
5807 struct sched_entity *se = &p->se;
5808 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5809
5810
5811
5812
5813
5814
5815
5816
5817
5818
5819 if (!se->on_rq && p->state != TASK_RUNNING) {
5820
5821
5822
5823
5824 place_entity(cfs_rq, se, 0);
5825 se->vruntime -= cfs_rq->min_vruntime;
5826 }
5827
5828#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
5829
5830
5831
5832
5833
5834 if (p->se.avg.decay_count) {
5835 struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5836 __synchronize_entity_decay(&p->se);
5837 subtract_blocked_load_contrib(cfs_rq,
5838 p->se.avg.load_avg_contrib);
5839 }
5840#endif
5841}
5842
5843
5844
5845
5846static void switched_to_fair(struct rq *rq, struct task_struct *p)
5847{
5848 if (!p->se.on_rq)
5849 return;
5850
5851
5852
5853
5854
5855
5856 if (rq->curr == p)
5857 resched_task(rq->curr);
5858 else
5859 check_preempt_curr(rq, p, 0);
5860}
5861
5862
5863
5864
5865
5866
5867static void set_curr_task_fair(struct rq *rq)
5868{
5869 struct sched_entity *se = &rq->curr->se;
5870
5871 for_each_sched_entity(se) {
5872 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5873
5874 set_next_entity(cfs_rq, se);
5875
5876 account_cfs_rq_runtime(cfs_rq, 0);
5877 }
5878}
5879
5880void init_cfs_rq(struct cfs_rq *cfs_rq)
5881{
5882 cfs_rq->tasks_timeline = RB_ROOT;
5883 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5884#ifndef CONFIG_64BIT
5885 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5886#endif
5887#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
5888 atomic64_set(&cfs_rq->decay_counter, 1);
5889 atomic64_set(&cfs_rq->removed_load, 0);
5890#endif
5891}
5892
5893#ifdef CONFIG_FAIR_GROUP_SCHED
5894static void task_move_group_fair(struct task_struct *p, int on_rq)
5895{
5896 struct cfs_rq *cfs_rq;
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
5923 on_rq = 1;
5924
5925 if (!on_rq)
5926 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5927 set_task_rq(p, task_cpu(p));
5928 if (!on_rq) {
5929 cfs_rq = cfs_rq_of(&p->se);
5930 p->se.vruntime += cfs_rq->min_vruntime;
5931#ifdef CONFIG_SMP
5932
5933
5934
5935
5936
5937 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
5938 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
5939#endif
5940 }
5941}
5942
5943void free_fair_sched_group(struct task_group *tg)
5944{
5945 int i;
5946
5947 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
5948
5949 for_each_possible_cpu(i) {
5950 if (tg->cfs_rq)
5951 kfree(tg->cfs_rq[i]);
5952 if (tg->se)
5953 kfree(tg->se[i]);
5954 }
5955
5956 kfree(tg->cfs_rq);
5957 kfree(tg->se);
5958}
5959
5960int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5961{
5962 struct cfs_rq *cfs_rq;
5963 struct sched_entity *se;
5964 int i;
5965
5966 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
5967 if (!tg->cfs_rq)
5968 goto err;
5969 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
5970 if (!tg->se)
5971 goto err;
5972
5973 tg->shares = NICE_0_LOAD;
5974
5975 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
5976
5977 for_each_possible_cpu(i) {
5978 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
5979 GFP_KERNEL, cpu_to_node(i));
5980 if (!cfs_rq)
5981 goto err;
5982
5983 se = kzalloc_node(sizeof(struct sched_entity),
5984 GFP_KERNEL, cpu_to_node(i));
5985 if (!se)
5986 goto err_free_rq;
5987
5988 init_cfs_rq(cfs_rq);
5989 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
5990 }
5991
5992 return 1;
5993
5994err_free_rq:
5995 kfree(cfs_rq);
5996err:
5997 return 0;
5998}
5999
6000void unregister_fair_sched_group(struct task_group *tg, int cpu)
6001{
6002 struct rq *rq = cpu_rq(cpu);
6003 unsigned long flags;
6004
6005
6006
6007
6008
6009 if (!tg->cfs_rq[cpu]->on_list)
6010 return;
6011
6012 raw_spin_lock_irqsave(&rq->lock, flags);
6013 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6014 raw_spin_unlock_irqrestore(&rq->lock, flags);
6015}
6016
6017void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6018 struct sched_entity *se, int cpu,
6019 struct sched_entity *parent)
6020{
6021 struct rq *rq = cpu_rq(cpu);
6022
6023 cfs_rq->tg = tg;
6024 cfs_rq->rq = rq;
6025 init_cfs_rq_runtime(cfs_rq);
6026
6027 tg->cfs_rq[cpu] = cfs_rq;
6028 tg->se[cpu] = se;
6029
6030
6031 if (!se)
6032 return;
6033
6034 if (!parent)
6035 se->cfs_rq = &rq->cfs;
6036 else
6037 se->cfs_rq = parent->my_q;
6038
6039 se->my_q = cfs_rq;
6040 update_load_set(&se->load, 0);
6041 se->parent = parent;
6042}
6043
6044static DEFINE_MUTEX(shares_mutex);
6045
6046int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6047{
6048 int i;
6049 unsigned long flags;
6050
6051
6052
6053
6054 if (!tg->se[0])
6055 return -EINVAL;
6056
6057 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6058
6059 mutex_lock(&shares_mutex);
6060 if (tg->shares == shares)
6061 goto done;
6062
6063 tg->shares = shares;
6064 for_each_possible_cpu(i) {
6065 struct rq *rq = cpu_rq(i);
6066 struct sched_entity *se;
6067
6068 se = tg->se[i];
6069
6070 raw_spin_lock_irqsave(&rq->lock, flags);
6071 for_each_sched_entity(se)
6072 update_cfs_shares(group_cfs_rq(se));
6073 raw_spin_unlock_irqrestore(&rq->lock, flags);
6074 }
6075
6076done:
6077 mutex_unlock(&shares_mutex);
6078 return 0;
6079}
6080#else
6081
6082void free_fair_sched_group(struct task_group *tg) { }
6083
6084int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6085{
6086 return 1;
6087}
6088
6089void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6090
6091#endif
6092
6093
6094static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
6095{
6096 struct sched_entity *se = &task->se;
6097 unsigned int rr_interval = 0;
6098
6099
6100
6101
6102
6103 if (rq->cfs.load.weight)
6104 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
6105
6106 return rr_interval;
6107}
6108
6109
6110
6111
6112const struct sched_class fair_sched_class = {
6113 .next = &idle_sched_class,
6114 .enqueue_task = enqueue_task_fair,
6115 .dequeue_task = dequeue_task_fair,
6116 .yield_task = yield_task_fair,
6117 .yield_to_task = yield_to_task_fair,
6118
6119 .check_preempt_curr = check_preempt_wakeup,
6120
6121 .pick_next_task = pick_next_task_fair,
6122 .put_prev_task = put_prev_task_fair,
6123
6124#ifdef CONFIG_SMP
6125 .select_task_rq = select_task_rq_fair,
6126#ifdef CONFIG_FAIR_GROUP_SCHED
6127 .migrate_task_rq = migrate_task_rq_fair,
6128#endif
6129 .rq_online = rq_online_fair,
6130 .rq_offline = rq_offline_fair,
6131
6132 .task_waking = task_waking_fair,
6133#endif
6134
6135 .set_curr_task = set_curr_task_fair,
6136 .task_tick = task_tick_fair,
6137 .task_fork = task_fork_fair,
6138
6139 .prio_changed = prio_changed_fair,
6140 .switched_from = switched_from_fair,
6141 .switched_to = switched_to_fair,
6142
6143 .get_rr_interval = get_rr_interval_fair,
6144
6145#ifdef CONFIG_FAIR_GROUP_SCHED
6146 .task_move_group = task_move_group_fair,
6147#endif
6148};
6149
6150#ifdef CONFIG_SCHED_DEBUG
6151void print_cfs_stats(struct seq_file *m, int cpu)
6152{
6153 struct cfs_rq *cfs_rq;
6154
6155 rcu_read_lock();
6156 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
6157 print_cfs_rq(m, cpu, cfs_rq);
6158 rcu_read_unlock();
6159}
6160#endif
6161
6162__init void init_sched_fair_class(void)
6163{
6164#ifdef CONFIG_SMP
6165 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6166
6167#ifdef CONFIG_NO_HZ
6168 nohz.next_balance = jiffies;
6169 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
6170 cpu_notifier(sched_ilb_notifier, 0);
6171#endif
6172#endif
6173
6174}
6175