1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/latencytop.h>
24#include <linux/sched.h>
25#include <linux/cpumask.h>
26#include <linux/cpuidle.h>
27#include <linux/slab.h>
28#include <linux/profile.h>
29#include <linux/interrupt.h>
30#include <linux/mempolicy.h>
31#include <linux/migrate.h>
32#include <linux/task_work.h>
33
34#include <trace/events/sched.h>
35
36#include "sched.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50unsigned int sysctl_sched_latency = 6000000ULL;
51unsigned int normalized_sysctl_sched_latency = 6000000ULL;
52
53
54
55
56
57
58
59
60
61
62enum sched_tunable_scaling sysctl_sched_tunable_scaling
63 = SCHED_TUNABLESCALING_LOG;
64
65
66
67
68
69unsigned int sysctl_sched_min_granularity = 750000ULL;
70unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
71
72
73
74
75static unsigned int sched_nr_latency = 8;
76
77
78
79
80
81unsigned int sysctl_sched_child_runs_first __read_mostly;
82
83
84
85
86
87
88
89
90
91unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
92unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
93
94const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
95
96
97
98
99
100
101unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102
103#ifdef CONFIG_CFS_BANDWIDTH
104
105
106
107
108
109
110
111
112
113
114unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
115#endif
116
117static inline void update_load_add(struct load_weight *lw, unsigned long inc)
118{
119 lw->weight += inc;
120 lw->inv_weight = 0;
121}
122
123static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
124{
125 lw->weight -= dec;
126 lw->inv_weight = 0;
127}
128
129static inline void update_load_set(struct load_weight *lw, unsigned long w)
130{
131 lw->weight = w;
132 lw->inv_weight = 0;
133}
134
135
136
137
138
139
140
141
142
143
144static unsigned int get_update_sysctl_factor(void)
145{
146 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
147 unsigned int factor;
148
149 switch (sysctl_sched_tunable_scaling) {
150 case SCHED_TUNABLESCALING_NONE:
151 factor = 1;
152 break;
153 case SCHED_TUNABLESCALING_LINEAR:
154 factor = cpus;
155 break;
156 case SCHED_TUNABLESCALING_LOG:
157 default:
158 factor = 1 + ilog2(cpus);
159 break;
160 }
161
162 return factor;
163}
164
165static void update_sysctl(void)
166{
167 unsigned int factor = get_update_sysctl_factor();
168
169#define SET_SYSCTL(name) \
170 (sysctl_##name = (factor) * normalized_sysctl_##name)
171 SET_SYSCTL(sched_min_granularity);
172 SET_SYSCTL(sched_latency);
173 SET_SYSCTL(sched_wakeup_granularity);
174#undef SET_SYSCTL
175}
176
177void sched_init_granularity(void)
178{
179 update_sysctl();
180}
181
182#define WMULT_CONST (~0U)
183#define WMULT_SHIFT 32
184
185static void __update_inv_weight(struct load_weight *lw)
186{
187 unsigned long w;
188
189 if (likely(lw->inv_weight))
190 return;
191
192 w = scale_load_down(lw->weight);
193
194 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
195 lw->inv_weight = 1;
196 else if (unlikely(!w))
197 lw->inv_weight = WMULT_CONST;
198 else
199 lw->inv_weight = WMULT_CONST / w;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
215{
216 u64 fact = scale_load_down(weight);
217 int shift = WMULT_SHIFT;
218
219 __update_inv_weight(lw);
220
221 if (unlikely(fact >> 32)) {
222 while (fact >> 32) {
223 fact >>= 1;
224 shift--;
225 }
226 }
227
228
229 fact = (u64)(u32)fact * lw->inv_weight;
230
231 while (fact >> 32) {
232 fact >>= 1;
233 shift--;
234 }
235
236 return mul_u64_u32_shr(delta_exec, fact, shift);
237}
238
239
240const struct sched_class fair_sched_class;
241
242
243
244
245
246#ifdef CONFIG_FAIR_GROUP_SCHED
247
248
249static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
250{
251 return cfs_rq->rq;
252}
253
254
255#define entity_is_task(se) (!se->my_q)
256
257static inline struct task_struct *task_of(struct sched_entity *se)
258{
259#ifdef CONFIG_SCHED_DEBUG
260 WARN_ON_ONCE(!entity_is_task(se));
261#endif
262 return container_of(se, struct task_struct, se);
263}
264
265
266#define for_each_sched_entity(se) \
267 for (; se; se = se->parent)
268
269static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
270{
271 return p->se.cfs_rq;
272}
273
274
275static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
276{
277 return se->cfs_rq;
278}
279
280
281static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
282{
283 return grp->my_q;
284}
285
286static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
287 int force_update);
288
289static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
290{
291 if (!cfs_rq->on_list) {
292
293
294
295
296
297
298 if (cfs_rq->tg->parent &&
299 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
300 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 } else {
303 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
304 &rq_of(cfs_rq)->leaf_cfs_rq_list);
305 }
306
307 cfs_rq->on_list = 1;
308
309 update_cfs_rq_blocked_load(cfs_rq, 0);
310 }
311}
312
313static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
314{
315 if (cfs_rq->on_list) {
316 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
317 cfs_rq->on_list = 0;
318 }
319}
320
321
322#define for_each_leaf_cfs_rq(rq, cfs_rq) \
323 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
324
325
326static inline struct cfs_rq *
327is_same_group(struct sched_entity *se, struct sched_entity *pse)
328{
329 if (se->cfs_rq == pse->cfs_rq)
330 return se->cfs_rq;
331
332 return NULL;
333}
334
335static inline struct sched_entity *parent_entity(struct sched_entity *se)
336{
337 return se->parent;
338}
339
340static void
341find_matching_se(struct sched_entity **se, struct sched_entity **pse)
342{
343 int se_depth, pse_depth;
344
345
346
347
348
349
350
351
352
353 se_depth = (*se)->depth;
354 pse_depth = (*pse)->depth;
355
356 while (se_depth > pse_depth) {
357 se_depth--;
358 *se = parent_entity(*se);
359 }
360
361 while (pse_depth > se_depth) {
362 pse_depth--;
363 *pse = parent_entity(*pse);
364 }
365
366 while (!is_same_group(*se, *pse)) {
367 *se = parent_entity(*se);
368 *pse = parent_entity(*pse);
369 }
370}
371
372#else
373
374static inline struct task_struct *task_of(struct sched_entity *se)
375{
376 return container_of(se, struct task_struct, se);
377}
378
379static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
380{
381 return container_of(cfs_rq, struct rq, cfs);
382}
383
384#define entity_is_task(se) 1
385
386#define for_each_sched_entity(se) \
387 for (; se; se = NULL)
388
389static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
390{
391 return &task_rq(p)->cfs;
392}
393
394static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
395{
396 struct task_struct *p = task_of(se);
397 struct rq *rq = task_rq(p);
398
399 return &rq->cfs;
400}
401
402
403static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
404{
405 return NULL;
406}
407
408static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
409{
410}
411
412static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
413{
414}
415
416#define for_each_leaf_cfs_rq(rq, cfs_rq) \
417 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
418
419static inline struct sched_entity *parent_entity(struct sched_entity *se)
420{
421 return NULL;
422}
423
424static inline void
425find_matching_se(struct sched_entity **se, struct sched_entity **pse)
426{
427}
428
429#endif
430
431static __always_inline
432void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
433
434
435
436
437
438static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
439{
440 s64 delta = (s64)(vruntime - max_vruntime);
441 if (delta > 0)
442 max_vruntime = vruntime;
443
444 return max_vruntime;
445}
446
447static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
448{
449 s64 delta = (s64)(vruntime - min_vruntime);
450 if (delta < 0)
451 min_vruntime = vruntime;
452
453 return min_vruntime;
454}
455
456static inline int entity_before(struct sched_entity *a,
457 struct sched_entity *b)
458{
459 return (s64)(a->vruntime - b->vruntime) < 0;
460}
461
462static void update_min_vruntime(struct cfs_rq *cfs_rq)
463{
464 u64 vruntime = cfs_rq->min_vruntime;
465
466 if (cfs_rq->curr)
467 vruntime = cfs_rq->curr->vruntime;
468
469 if (cfs_rq->rb_leftmost) {
470 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
471 struct sched_entity,
472 run_node);
473
474 if (!cfs_rq->curr)
475 vruntime = se->vruntime;
476 else
477 vruntime = min_vruntime(vruntime, se->vruntime);
478 }
479
480
481 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
482#ifndef CONFIG_64BIT
483 smp_wmb();
484 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
485#endif
486}
487
488
489
490
491static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
492{
493 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
494 struct rb_node *parent = NULL;
495 struct sched_entity *entry;
496 int leftmost = 1;
497
498
499
500
501 while (*link) {
502 parent = *link;
503 entry = rb_entry(parent, struct sched_entity, run_node);
504
505
506
507
508 if (entity_before(se, entry)) {
509 link = &parent->rb_left;
510 } else {
511 link = &parent->rb_right;
512 leftmost = 0;
513 }
514 }
515
516
517
518
519
520 if (leftmost)
521 cfs_rq->rb_leftmost = &se->run_node;
522
523 rb_link_node(&se->run_node, parent, link);
524 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
525}
526
527static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
528{
529 if (cfs_rq->rb_leftmost == &se->run_node) {
530 struct rb_node *next_node;
531
532 next_node = rb_next(&se->run_node);
533 cfs_rq->rb_leftmost = next_node;
534 }
535
536 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
537}
538
539struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
540{
541 struct rb_node *left = cfs_rq->rb_leftmost;
542
543 if (!left)
544 return NULL;
545
546 return rb_entry(left, struct sched_entity, run_node);
547}
548
549static struct sched_entity *__pick_next_entity(struct sched_entity *se)
550{
551 struct rb_node *next = rb_next(&se->run_node);
552
553 if (!next)
554 return NULL;
555
556 return rb_entry(next, struct sched_entity, run_node);
557}
558
559#ifdef CONFIG_SCHED_DEBUG
560struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
561{
562 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
563
564 if (!last)
565 return NULL;
566
567 return rb_entry(last, struct sched_entity, run_node);
568}
569
570
571
572
573
574int sched_proc_update_handler(struct ctl_table *table, int write,
575 void __user *buffer, size_t *lenp,
576 loff_t *ppos)
577{
578 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
579 unsigned int factor = get_update_sysctl_factor();
580
581 if (ret || !write)
582 return ret;
583
584 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
585 sysctl_sched_min_granularity);
586
587#define WRT_SYSCTL(name) \
588 (normalized_sysctl_##name = sysctl_##name / (factor))
589 WRT_SYSCTL(sched_min_granularity);
590 WRT_SYSCTL(sched_latency);
591 WRT_SYSCTL(sched_wakeup_granularity);
592#undef WRT_SYSCTL
593
594 return 0;
595}
596#endif
597
598
599
600
601static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
602{
603 if (unlikely(se->load.weight != NICE_0_LOAD))
604 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
605
606 return delta;
607}
608
609
610
611
612
613
614
615
616
617static u64 __sched_period(unsigned long nr_running)
618{
619 u64 period = sysctl_sched_latency;
620 unsigned long nr_latency = sched_nr_latency;
621
622 if (unlikely(nr_running > nr_latency)) {
623 period = sysctl_sched_min_granularity;
624 period *= nr_running;
625 }
626
627 return period;
628}
629
630
631
632
633
634
635
636static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
637{
638 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
639
640 for_each_sched_entity(se) {
641 struct load_weight *load;
642 struct load_weight lw;
643
644 cfs_rq = cfs_rq_of(se);
645 load = &cfs_rq->load;
646
647 if (unlikely(!se->on_rq)) {
648 lw = cfs_rq->load;
649
650 update_load_add(&lw, se->load.weight);
651 load = &lw;
652 }
653 slice = __calc_delta(slice, se->load.weight, load);
654 }
655 return slice;
656}
657
658
659
660
661
662
663static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
664{
665 return calc_delta_fair(sched_slice(cfs_rq, se), se);
666}
667
668#ifdef CONFIG_SMP
669static int select_idle_sibling(struct task_struct *p, int cpu);
670static unsigned long task_h_load(struct task_struct *p);
671
672static inline void __update_task_entity_contrib(struct sched_entity *se);
673static inline void __update_task_entity_utilization(struct sched_entity *se);
674
675
676void init_task_runnable_average(struct task_struct *p)
677{
678 u32 slice;
679
680 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
681 p->se.avg.runnable_avg_sum = p->se.avg.running_avg_sum = slice;
682 p->se.avg.avg_period = slice;
683 __update_task_entity_contrib(&p->se);
684 __update_task_entity_utilization(&p->se);
685}
686#else
687void init_task_runnable_average(struct task_struct *p)
688{
689}
690#endif
691
692
693
694
695static void update_curr(struct cfs_rq *cfs_rq)
696{
697 struct sched_entity *curr = cfs_rq->curr;
698 u64 now = rq_clock_task(rq_of(cfs_rq));
699 u64 delta_exec;
700
701 if (unlikely(!curr))
702 return;
703
704 delta_exec = now - curr->exec_start;
705 if (unlikely((s64)delta_exec <= 0))
706 return;
707
708 curr->exec_start = now;
709
710 schedstat_set(curr->statistics.exec_max,
711 max(delta_exec, curr->statistics.exec_max));
712
713 curr->sum_exec_runtime += delta_exec;
714 schedstat_add(cfs_rq, exec_clock, delta_exec);
715
716 curr->vruntime += calc_delta_fair(delta_exec, curr);
717 update_min_vruntime(cfs_rq);
718
719 if (entity_is_task(curr)) {
720 struct task_struct *curtask = task_of(curr);
721
722 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
723 cpuacct_charge(curtask, delta_exec);
724 account_group_exec_runtime(curtask, delta_exec);
725 }
726
727 account_cfs_rq_runtime(cfs_rq, delta_exec);
728}
729
730static void update_curr_fair(struct rq *rq)
731{
732 update_curr(cfs_rq_of(&rq->curr->se));
733}
734
735static inline void
736update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
737{
738 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
739}
740
741
742
743
744static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
745{
746
747
748
749
750 if (se != cfs_rq->curr)
751 update_stats_wait_start(cfs_rq, se);
752}
753
754static void
755update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
756{
757 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
758 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
759 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
760 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
761 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
762#ifdef CONFIG_SCHEDSTATS
763 if (entity_is_task(se)) {
764 trace_sched_stat_wait(task_of(se),
765 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
766 }
767#endif
768 schedstat_set(se->statistics.wait_start, 0);
769}
770
771static inline void
772update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
773{
774
775
776
777
778 if (se != cfs_rq->curr)
779 update_stats_wait_end(cfs_rq, se);
780}
781
782
783
784
785static inline void
786update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
787{
788
789
790
791 se->exec_start = rq_clock_task(rq_of(cfs_rq));
792}
793
794
795
796
797
798#ifdef CONFIG_NUMA_BALANCING
799
800
801
802
803
804unsigned int sysctl_numa_balancing_scan_period_min = 1000;
805unsigned int sysctl_numa_balancing_scan_period_max = 60000;
806
807
808unsigned int sysctl_numa_balancing_scan_size = 256;
809
810
811unsigned int sysctl_numa_balancing_scan_delay = 1000;
812
813static unsigned int task_nr_scan_windows(struct task_struct *p)
814{
815 unsigned long rss = 0;
816 unsigned long nr_scan_pages;
817
818
819
820
821
822
823 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
824 rss = get_mm_rss(p->mm);
825 if (!rss)
826 rss = nr_scan_pages;
827
828 rss = round_up(rss, nr_scan_pages);
829 return rss / nr_scan_pages;
830}
831
832
833#define MAX_SCAN_WINDOW 2560
834
835static unsigned int task_scan_min(struct task_struct *p)
836{
837 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
838 unsigned int scan, floor;
839 unsigned int windows = 1;
840
841 if (scan_size < MAX_SCAN_WINDOW)
842 windows = MAX_SCAN_WINDOW / scan_size;
843 floor = 1000 / windows;
844
845 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
846 return max_t(unsigned int, floor, scan);
847}
848
849static unsigned int task_scan_max(struct task_struct *p)
850{
851 unsigned int smin = task_scan_min(p);
852 unsigned int smax;
853
854
855 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
856 return max(smin, smax);
857}
858
859static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
860{
861 rq->nr_numa_running += (p->numa_preferred_nid != -1);
862 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
863}
864
865static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
866{
867 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
868 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
869}
870
871struct numa_group {
872 atomic_t refcount;
873
874 spinlock_t lock;
875 int nr_tasks;
876 pid_t gid;
877
878 struct rcu_head rcu;
879 nodemask_t active_nodes;
880 unsigned long total_faults;
881
882
883
884
885
886 unsigned long *faults_cpu;
887 unsigned long faults[0];
888};
889
890
891#define NR_NUMA_HINT_FAULT_TYPES 2
892
893
894#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
895
896
897#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
898
899pid_t task_numa_group_id(struct task_struct *p)
900{
901 return p->numa_group ? p->numa_group->gid : 0;
902}
903
904
905
906
907
908
909
910static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
911{
912 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
913}
914
915static inline unsigned long task_faults(struct task_struct *p, int nid)
916{
917 if (!p->numa_faults)
918 return 0;
919
920 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
921 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
922}
923
924static inline unsigned long group_faults(struct task_struct *p, int nid)
925{
926 if (!p->numa_group)
927 return 0;
928
929 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
930 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
931}
932
933static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
934{
935 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
936 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
937}
938
939
940static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
941 int maxdist, bool task)
942{
943 unsigned long score = 0;
944 int node;
945
946
947
948
949
950 if (sched_numa_topology_type == NUMA_DIRECT)
951 return 0;
952
953
954
955
956
957 for_each_online_node(node) {
958 unsigned long faults;
959 int dist = node_distance(nid, node);
960
961
962
963
964
965 if (dist == sched_max_numa_distance || node == nid)
966 continue;
967
968
969
970
971
972
973
974
975 if (sched_numa_topology_type == NUMA_BACKPLANE &&
976 dist > maxdist)
977 continue;
978
979
980 if (task)
981 faults = task_faults(p, node);
982 else
983 faults = group_faults(p, node);
984
985
986
987
988
989
990
991
992
993 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
994 faults *= (sched_max_numa_distance - dist);
995 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
996 }
997
998 score += faults;
999 }
1000
1001 return score;
1002}
1003
1004
1005
1006
1007
1008
1009
1010static inline unsigned long task_weight(struct task_struct *p, int nid,
1011 int dist)
1012{
1013 unsigned long faults, total_faults;
1014
1015 if (!p->numa_faults)
1016 return 0;
1017
1018 total_faults = p->total_numa_faults;
1019
1020 if (!total_faults)
1021 return 0;
1022
1023 faults = task_faults(p, nid);
1024 faults += score_nearby_nodes(p, nid, dist, true);
1025
1026 return 1000 * faults / total_faults;
1027}
1028
1029static inline unsigned long group_weight(struct task_struct *p, int nid,
1030 int dist)
1031{
1032 unsigned long faults, total_faults;
1033
1034 if (!p->numa_group)
1035 return 0;
1036
1037 total_faults = p->numa_group->total_faults;
1038
1039 if (!total_faults)
1040 return 0;
1041
1042 faults = group_faults(p, nid);
1043 faults += score_nearby_nodes(p, nid, dist, false);
1044
1045 return 1000 * faults / total_faults;
1046}
1047
1048bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1049 int src_nid, int dst_cpu)
1050{
1051 struct numa_group *ng = p->numa_group;
1052 int dst_nid = cpu_to_node(dst_cpu);
1053 int last_cpupid, this_cpupid;
1054
1055 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1075 if (!cpupid_pid_unset(last_cpupid) &&
1076 cpupid_to_nid(last_cpupid) != dst_nid)
1077 return false;
1078
1079
1080 if (cpupid_match_pid(p, last_cpupid))
1081 return true;
1082
1083
1084 if (!ng)
1085 return true;
1086
1087
1088
1089
1090
1091 if (!node_isset(dst_nid, ng->active_nodes))
1092 return false;
1093
1094
1095
1096
1097
1098 if (!node_isset(src_nid, ng->active_nodes))
1099 return true;
1100
1101
1102
1103
1104
1105
1106
1107
1108 return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
1109}
1110
1111static unsigned long weighted_cpuload(const int cpu);
1112static unsigned long source_load(int cpu, int type);
1113static unsigned long target_load(int cpu, int type);
1114static unsigned long capacity_of(int cpu);
1115static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1116
1117
1118struct numa_stats {
1119 unsigned long nr_running;
1120 unsigned long load;
1121
1122
1123 unsigned long compute_capacity;
1124
1125
1126 unsigned long task_capacity;
1127 int has_free_capacity;
1128};
1129
1130
1131
1132
1133static void update_numa_stats(struct numa_stats *ns, int nid)
1134{
1135 int smt, cpu, cpus = 0;
1136 unsigned long capacity;
1137
1138 memset(ns, 0, sizeof(*ns));
1139 for_each_cpu(cpu, cpumask_of_node(nid)) {
1140 struct rq *rq = cpu_rq(cpu);
1141
1142 ns->nr_running += rq->nr_running;
1143 ns->load += weighted_cpuload(cpu);
1144 ns->compute_capacity += capacity_of(cpu);
1145
1146 cpus++;
1147 }
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 if (!cpus)
1158 return;
1159
1160
1161 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1162 capacity = cpus / smt;
1163
1164 ns->task_capacity = min_t(unsigned, capacity,
1165 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1166 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1167}
1168
1169struct task_numa_env {
1170 struct task_struct *p;
1171
1172 int src_cpu, src_nid;
1173 int dst_cpu, dst_nid;
1174
1175 struct numa_stats src_stats, dst_stats;
1176
1177 int imbalance_pct;
1178 int dist;
1179
1180 struct task_struct *best_task;
1181 long best_imp;
1182 int best_cpu;
1183};
1184
1185static void task_numa_assign(struct task_numa_env *env,
1186 struct task_struct *p, long imp)
1187{
1188 if (env->best_task)
1189 put_task_struct(env->best_task);
1190 if (p)
1191 get_task_struct(p);
1192
1193 env->best_task = p;
1194 env->best_imp = imp;
1195 env->best_cpu = env->dst_cpu;
1196}
1197
1198static bool load_too_imbalanced(long src_load, long dst_load,
1199 struct task_numa_env *env)
1200{
1201 long imb, old_imb;
1202 long orig_src_load, orig_dst_load;
1203 long src_capacity, dst_capacity;
1204
1205
1206
1207
1208
1209
1210
1211
1212 src_capacity = env->src_stats.compute_capacity;
1213 dst_capacity = env->dst_stats.compute_capacity;
1214
1215
1216 if (dst_load < src_load)
1217 swap(dst_load, src_load);
1218
1219
1220 imb = dst_load * src_capacity * 100 -
1221 src_load * dst_capacity * env->imbalance_pct;
1222 if (imb <= 0)
1223 return false;
1224
1225
1226
1227
1228
1229 orig_src_load = env->src_stats.load;
1230 orig_dst_load = env->dst_stats.load;
1231
1232 if (orig_dst_load < orig_src_load)
1233 swap(orig_dst_load, orig_src_load);
1234
1235 old_imb = orig_dst_load * src_capacity * 100 -
1236 orig_src_load * dst_capacity * env->imbalance_pct;
1237
1238
1239 return (imb > old_imb);
1240}
1241
1242
1243
1244
1245
1246
1247
1248static void task_numa_compare(struct task_numa_env *env,
1249 long taskimp, long groupimp)
1250{
1251 struct rq *src_rq = cpu_rq(env->src_cpu);
1252 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1253 struct task_struct *cur;
1254 long src_load, dst_load;
1255 long load;
1256 long imp = env->p->numa_group ? groupimp : taskimp;
1257 long moveimp = imp;
1258 int dist = env->dist;
1259
1260 rcu_read_lock();
1261
1262 raw_spin_lock_irq(&dst_rq->lock);
1263 cur = dst_rq->curr;
1264
1265
1266
1267
1268
1269
1270
1271 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
1272 cur = NULL;
1273 raw_spin_unlock_irq(&dst_rq->lock);
1274
1275
1276
1277
1278
1279 if (cur == env->p)
1280 goto unlock;
1281
1282
1283
1284
1285
1286
1287
1288
1289 if (cur) {
1290
1291 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1292 goto unlock;
1293
1294
1295
1296
1297
1298 if (cur->numa_group == env->p->numa_group) {
1299 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1300 task_weight(cur, env->dst_nid, dist);
1301
1302
1303
1304
1305 if (cur->numa_group)
1306 imp -= imp/16;
1307 } else {
1308
1309
1310
1311
1312
1313 if (cur->numa_group)
1314 imp += group_weight(cur, env->src_nid, dist) -
1315 group_weight(cur, env->dst_nid, dist);
1316 else
1317 imp += task_weight(cur, env->src_nid, dist) -
1318 task_weight(cur, env->dst_nid, dist);
1319 }
1320 }
1321
1322 if (imp <= env->best_imp && moveimp <= env->best_imp)
1323 goto unlock;
1324
1325 if (!cur) {
1326
1327 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1328 !env->dst_stats.has_free_capacity)
1329 goto unlock;
1330
1331 goto balance;
1332 }
1333
1334
1335 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1336 dst_rq->nr_running == 1)
1337 goto assign;
1338
1339
1340
1341
1342balance:
1343 load = task_h_load(env->p);
1344 dst_load = env->dst_stats.load + load;
1345 src_load = env->src_stats.load - load;
1346
1347 if (moveimp > imp && moveimp > env->best_imp) {
1348
1349
1350
1351
1352
1353
1354 if (!load_too_imbalanced(src_load, dst_load, env)) {
1355 imp = moveimp - 1;
1356 cur = NULL;
1357 goto assign;
1358 }
1359 }
1360
1361 if (imp <= env->best_imp)
1362 goto unlock;
1363
1364 if (cur) {
1365 load = task_h_load(cur);
1366 dst_load -= load;
1367 src_load += load;
1368 }
1369
1370 if (load_too_imbalanced(src_load, dst_load, env))
1371 goto unlock;
1372
1373
1374
1375
1376
1377 if (!cur)
1378 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1379
1380assign:
1381 task_numa_assign(env, cur, imp);
1382unlock:
1383 rcu_read_unlock();
1384}
1385
1386static void task_numa_find_cpu(struct task_numa_env *env,
1387 long taskimp, long groupimp)
1388{
1389 int cpu;
1390
1391 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1392
1393 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1394 continue;
1395
1396 env->dst_cpu = cpu;
1397 task_numa_compare(env, taskimp, groupimp);
1398 }
1399}
1400
1401
1402static bool numa_has_capacity(struct task_numa_env *env)
1403{
1404 struct numa_stats *src = &env->src_stats;
1405 struct numa_stats *dst = &env->dst_stats;
1406
1407 if (src->has_free_capacity && !dst->has_free_capacity)
1408 return false;
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 if (src->load * dst->compute_capacity >
1419 dst->load * src->compute_capacity)
1420 return true;
1421
1422 return false;
1423}
1424
1425static int task_numa_migrate(struct task_struct *p)
1426{
1427 struct task_numa_env env = {
1428 .p = p,
1429
1430 .src_cpu = task_cpu(p),
1431 .src_nid = task_node(p),
1432
1433 .imbalance_pct = 112,
1434
1435 .best_task = NULL,
1436 .best_imp = 0,
1437 .best_cpu = -1
1438 };
1439 struct sched_domain *sd;
1440 unsigned long taskweight, groupweight;
1441 int nid, ret, dist;
1442 long taskimp, groupimp;
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452 rcu_read_lock();
1453 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1454 if (sd)
1455 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1456 rcu_read_unlock();
1457
1458
1459
1460
1461
1462
1463
1464 if (unlikely(!sd)) {
1465 p->numa_preferred_nid = task_node(p);
1466 return -EINVAL;
1467 }
1468
1469 env.dst_nid = p->numa_preferred_nid;
1470 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1471 taskweight = task_weight(p, env.src_nid, dist);
1472 groupweight = group_weight(p, env.src_nid, dist);
1473 update_numa_stats(&env.src_stats, env.src_nid);
1474 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1475 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1476 update_numa_stats(&env.dst_stats, env.dst_nid);
1477
1478
1479 if (numa_has_capacity(&env))
1480 task_numa_find_cpu(&env, taskimp, groupimp);
1481
1482
1483
1484
1485
1486
1487
1488
1489 if (env.best_cpu == -1 || (p->numa_group &&
1490 nodes_weight(p->numa_group->active_nodes) > 1)) {
1491 for_each_online_node(nid) {
1492 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1493 continue;
1494
1495 dist = node_distance(env.src_nid, env.dst_nid);
1496 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1497 dist != env.dist) {
1498 taskweight = task_weight(p, env.src_nid, dist);
1499 groupweight = group_weight(p, env.src_nid, dist);
1500 }
1501
1502
1503 taskimp = task_weight(p, nid, dist) - taskweight;
1504 groupimp = group_weight(p, nid, dist) - groupweight;
1505 if (taskimp < 0 && groupimp < 0)
1506 continue;
1507
1508 env.dist = dist;
1509 env.dst_nid = nid;
1510 update_numa_stats(&env.dst_stats, env.dst_nid);
1511 if (numa_has_capacity(&env))
1512 task_numa_find_cpu(&env, taskimp, groupimp);
1513 }
1514 }
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524 if (p->numa_group) {
1525 if (env.best_cpu == -1)
1526 nid = env.src_nid;
1527 else
1528 nid = env.dst_nid;
1529
1530 if (node_isset(nid, p->numa_group->active_nodes))
1531 sched_setnuma(p, env.dst_nid);
1532 }
1533
1534
1535 if (env.best_cpu == -1)
1536 return -EAGAIN;
1537
1538
1539
1540
1541
1542 p->numa_scan_period = task_scan_min(p);
1543
1544 if (env.best_task == NULL) {
1545 ret = migrate_task_to(p, env.best_cpu);
1546 if (ret != 0)
1547 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1548 return ret;
1549 }
1550
1551 ret = migrate_swap(p, env.best_task);
1552 if (ret != 0)
1553 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1554 put_task_struct(env.best_task);
1555 return ret;
1556}
1557
1558
1559static void numa_migrate_preferred(struct task_struct *p)
1560{
1561 unsigned long interval = HZ;
1562
1563
1564 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1565 return;
1566
1567
1568 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1569 p->numa_migrate_retry = jiffies + interval;
1570
1571
1572 if (task_node(p) == p->numa_preferred_nid)
1573 return;
1574
1575
1576 task_numa_migrate(p);
1577}
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590static void update_numa_active_node_mask(struct numa_group *numa_group)
1591{
1592 unsigned long faults, max_faults = 0;
1593 int nid;
1594
1595 for_each_online_node(nid) {
1596 faults = group_faults_cpu(numa_group, nid);
1597 if (faults > max_faults)
1598 max_faults = faults;
1599 }
1600
1601 for_each_online_node(nid) {
1602 faults = group_faults_cpu(numa_group, nid);
1603 if (!node_isset(nid, numa_group->active_nodes)) {
1604 if (faults > max_faults * 6 / 16)
1605 node_set(nid, numa_group->active_nodes);
1606 } else if (faults < max_faults * 3 / 16)
1607 node_clear(nid, numa_group->active_nodes);
1608 }
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618#define NUMA_PERIOD_SLOTS 10
1619#define NUMA_PERIOD_THRESHOLD 7
1620
1621
1622
1623
1624
1625
1626
1627static void update_task_scan_period(struct task_struct *p,
1628 unsigned long shared, unsigned long private)
1629{
1630 unsigned int period_slot;
1631 int ratio;
1632 int diff;
1633
1634 unsigned long remote = p->numa_faults_locality[0];
1635 unsigned long local = p->numa_faults_locality[1];
1636
1637
1638
1639
1640
1641
1642
1643
1644 if (local + shared == 0 || p->numa_faults_locality[2]) {
1645 p->numa_scan_period = min(p->numa_scan_period_max,
1646 p->numa_scan_period << 1);
1647
1648 p->mm->numa_next_scan = jiffies +
1649 msecs_to_jiffies(p->numa_scan_period);
1650
1651 return;
1652 }
1653
1654
1655
1656
1657
1658
1659
1660 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1661 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1662 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1663 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1664 if (!slot)
1665 slot = 1;
1666 diff = slot * period_slot;
1667 } else {
1668 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1679 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1680 }
1681
1682 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1683 task_scan_min(p), task_scan_max(p));
1684 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1695{
1696 u64 runtime, delta, now;
1697
1698 now = p->se.exec_start;
1699 runtime = p->se.sum_exec_runtime;
1700
1701 if (p->last_task_numa_placement) {
1702 delta = runtime - p->last_sum_exec_runtime;
1703 *period = now - p->last_task_numa_placement;
1704 } else {
1705 delta = p->se.avg.runnable_avg_sum;
1706 *period = p->se.avg.avg_period;
1707 }
1708
1709 p->last_sum_exec_runtime = runtime;
1710 p->last_task_numa_placement = now;
1711
1712 return delta;
1713}
1714
1715
1716
1717
1718
1719
1720static int preferred_group_nid(struct task_struct *p, int nid)
1721{
1722 nodemask_t nodes;
1723 int dist;
1724
1725
1726 if (sched_numa_topology_type == NUMA_DIRECT)
1727 return nid;
1728
1729
1730
1731
1732
1733
1734 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1735 unsigned long score, max_score = 0;
1736 int node, max_node = nid;
1737
1738 dist = sched_max_numa_distance;
1739
1740 for_each_online_node(node) {
1741 score = group_weight(p, node, dist);
1742 if (score > max_score) {
1743 max_score = score;
1744 max_node = node;
1745 }
1746 }
1747 return max_node;
1748 }
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759 nodes = node_online_map;
1760 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1761 unsigned long max_faults = 0;
1762 nodemask_t max_group = NODE_MASK_NONE;
1763 int a, b;
1764
1765
1766 if (!find_numa_distance(dist))
1767 continue;
1768
1769 for_each_node_mask(a, nodes) {
1770 unsigned long faults = 0;
1771 nodemask_t this_group;
1772 nodes_clear(this_group);
1773
1774
1775 for_each_node_mask(b, nodes) {
1776 if (node_distance(a, b) < dist) {
1777 faults += group_faults(p, b);
1778 node_set(b, this_group);
1779 node_clear(b, nodes);
1780 }
1781 }
1782
1783
1784 if (faults > max_faults) {
1785 max_faults = faults;
1786 max_group = this_group;
1787
1788
1789
1790
1791
1792 nid = a;
1793 }
1794 }
1795
1796 if (!max_faults)
1797 break;
1798 nodes = max_group;
1799 }
1800 return nid;
1801}
1802
1803static void task_numa_placement(struct task_struct *p)
1804{
1805 int seq, nid, max_nid = -1, max_group_nid = -1;
1806 unsigned long max_faults = 0, max_group_faults = 0;
1807 unsigned long fault_types[2] = { 0, 0 };
1808 unsigned long total_faults;
1809 u64 runtime, period;
1810 spinlock_t *group_lock = NULL;
1811
1812
1813
1814
1815
1816
1817 seq = READ_ONCE(p->mm->numa_scan_seq);
1818 if (p->numa_scan_seq == seq)
1819 return;
1820 p->numa_scan_seq = seq;
1821 p->numa_scan_period_max = task_scan_max(p);
1822
1823 total_faults = p->numa_faults_locality[0] +
1824 p->numa_faults_locality[1];
1825 runtime = numa_get_avg_runtime(p, &period);
1826
1827
1828 if (p->numa_group) {
1829 group_lock = &p->numa_group->lock;
1830 spin_lock_irq(group_lock);
1831 }
1832
1833
1834 for_each_online_node(nid) {
1835
1836 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
1837 unsigned long faults = 0, group_faults = 0;
1838 int priv;
1839
1840 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
1841 long diff, f_diff, f_weight;
1842
1843 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
1844 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
1845 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
1846 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
1847
1848
1849 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
1850 fault_types[priv] += p->numa_faults[membuf_idx];
1851 p->numa_faults[membuf_idx] = 0;
1852
1853
1854
1855
1856
1857
1858
1859
1860 f_weight = div64_u64(runtime << 16, period + 1);
1861 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
1862 (total_faults + 1);
1863 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
1864 p->numa_faults[cpubuf_idx] = 0;
1865
1866 p->numa_faults[mem_idx] += diff;
1867 p->numa_faults[cpu_idx] += f_diff;
1868 faults += p->numa_faults[mem_idx];
1869 p->total_numa_faults += diff;
1870 if (p->numa_group) {
1871
1872
1873
1874
1875
1876
1877
1878 p->numa_group->faults[mem_idx] += diff;
1879 p->numa_group->faults_cpu[mem_idx] += f_diff;
1880 p->numa_group->total_faults += diff;
1881 group_faults += p->numa_group->faults[mem_idx];
1882 }
1883 }
1884
1885 if (faults > max_faults) {
1886 max_faults = faults;
1887 max_nid = nid;
1888 }
1889
1890 if (group_faults > max_group_faults) {
1891 max_group_faults = group_faults;
1892 max_group_nid = nid;
1893 }
1894 }
1895
1896 update_task_scan_period(p, fault_types[0], fault_types[1]);
1897
1898 if (p->numa_group) {
1899 update_numa_active_node_mask(p->numa_group);
1900 spin_unlock_irq(group_lock);
1901 max_nid = preferred_group_nid(p, max_group_nid);
1902 }
1903
1904 if (max_faults) {
1905
1906 if (max_nid != p->numa_preferred_nid)
1907 sched_setnuma(p, max_nid);
1908
1909 if (task_node(p) != p->numa_preferred_nid)
1910 numa_migrate_preferred(p);
1911 }
1912}
1913
1914static inline int get_numa_group(struct numa_group *grp)
1915{
1916 return atomic_inc_not_zero(&grp->refcount);
1917}
1918
1919static inline void put_numa_group(struct numa_group *grp)
1920{
1921 if (atomic_dec_and_test(&grp->refcount))
1922 kfree_rcu(grp, rcu);
1923}
1924
1925static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1926 int *priv)
1927{
1928 struct numa_group *grp, *my_grp;
1929 struct task_struct *tsk;
1930 bool join = false;
1931 int cpu = cpupid_to_cpu(cpupid);
1932 int i;
1933
1934 if (unlikely(!p->numa_group)) {
1935 unsigned int size = sizeof(struct numa_group) +
1936 4*nr_node_ids*sizeof(unsigned long);
1937
1938 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1939 if (!grp)
1940 return;
1941
1942 atomic_set(&grp->refcount, 1);
1943 spin_lock_init(&grp->lock);
1944 grp->gid = p->pid;
1945
1946 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
1947 nr_node_ids;
1948
1949 node_set(task_node(current), grp->active_nodes);
1950
1951 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1952 grp->faults[i] = p->numa_faults[i];
1953
1954 grp->total_faults = p->total_numa_faults;
1955
1956 grp->nr_tasks++;
1957 rcu_assign_pointer(p->numa_group, grp);
1958 }
1959
1960 rcu_read_lock();
1961 tsk = READ_ONCE(cpu_rq(cpu)->curr);
1962
1963 if (!cpupid_match_pid(tsk, cpupid))
1964 goto no_join;
1965
1966 grp = rcu_dereference(tsk->numa_group);
1967 if (!grp)
1968 goto no_join;
1969
1970 my_grp = p->numa_group;
1971 if (grp == my_grp)
1972 goto no_join;
1973
1974
1975
1976
1977
1978 if (my_grp->nr_tasks > grp->nr_tasks)
1979 goto no_join;
1980
1981
1982
1983
1984 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
1985 goto no_join;
1986
1987
1988 if (tsk->mm == current->mm)
1989 join = true;
1990
1991
1992 if (flags & TNF_SHARED)
1993 join = true;
1994
1995
1996 *priv = !join;
1997
1998 if (join && !get_numa_group(grp))
1999 goto no_join;
2000
2001 rcu_read_unlock();
2002
2003 if (!join)
2004 return;
2005
2006 BUG_ON(irqs_disabled());
2007 double_lock_irq(&my_grp->lock, &grp->lock);
2008
2009 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2010 my_grp->faults[i] -= p->numa_faults[i];
2011 grp->faults[i] += p->numa_faults[i];
2012 }
2013 my_grp->total_faults -= p->total_numa_faults;
2014 grp->total_faults += p->total_numa_faults;
2015
2016 my_grp->nr_tasks--;
2017 grp->nr_tasks++;
2018
2019 spin_unlock(&my_grp->lock);
2020 spin_unlock_irq(&grp->lock);
2021
2022 rcu_assign_pointer(p->numa_group, grp);
2023
2024 put_numa_group(my_grp);
2025 return;
2026
2027no_join:
2028 rcu_read_unlock();
2029 return;
2030}
2031
2032void task_numa_free(struct task_struct *p)
2033{
2034 struct numa_group *grp = p->numa_group;
2035 void *numa_faults = p->numa_faults;
2036 unsigned long flags;
2037 int i;
2038
2039 if (grp) {
2040 spin_lock_irqsave(&grp->lock, flags);
2041 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2042 grp->faults[i] -= p->numa_faults[i];
2043 grp->total_faults -= p->total_numa_faults;
2044
2045 grp->nr_tasks--;
2046 spin_unlock_irqrestore(&grp->lock, flags);
2047 RCU_INIT_POINTER(p->numa_group, NULL);
2048 put_numa_group(grp);
2049 }
2050
2051 p->numa_faults = NULL;
2052 kfree(numa_faults);
2053}
2054
2055
2056
2057
2058void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2059{
2060 struct task_struct *p = current;
2061 bool migrated = flags & TNF_MIGRATED;
2062 int cpu_node = task_node(current);
2063 int local = !!(flags & TNF_FAULT_LOCAL);
2064 int priv;
2065
2066 if (!numabalancing_enabled)
2067 return;
2068
2069
2070 if (!p->mm)
2071 return;
2072
2073
2074 if (unlikely(!p->numa_faults)) {
2075 int size = sizeof(*p->numa_faults) *
2076 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2077
2078 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2079 if (!p->numa_faults)
2080 return;
2081
2082 p->total_numa_faults = 0;
2083 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2084 }
2085
2086
2087
2088
2089
2090 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2091 priv = 1;
2092 } else {
2093 priv = cpupid_match_pid(p, last_cpupid);
2094 if (!priv && !(flags & TNF_NO_GROUP))
2095 task_numa_group(p, last_cpupid, flags, &priv);
2096 }
2097
2098
2099
2100
2101
2102
2103
2104 if (!priv && !local && p->numa_group &&
2105 node_isset(cpu_node, p->numa_group->active_nodes) &&
2106 node_isset(mem_node, p->numa_group->active_nodes))
2107 local = 1;
2108
2109 task_numa_placement(p);
2110
2111
2112
2113
2114
2115 if (time_after(jiffies, p->numa_migrate_retry))
2116 numa_migrate_preferred(p);
2117
2118 if (migrated)
2119 p->numa_pages_migrated += pages;
2120 if (flags & TNF_MIGRATE_FAIL)
2121 p->numa_faults_locality[2] += pages;
2122
2123 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2124 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2125 p->numa_faults_locality[local] += pages;
2126}
2127
2128static void reset_ptenuma_scan(struct task_struct *p)
2129{
2130
2131
2132
2133
2134
2135
2136
2137
2138 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2139 p->mm->numa_scan_offset = 0;
2140}
2141
2142
2143
2144
2145
2146void task_numa_work(struct callback_head *work)
2147{
2148 unsigned long migrate, next_scan, now = jiffies;
2149 struct task_struct *p = current;
2150 struct mm_struct *mm = p->mm;
2151 struct vm_area_struct *vma;
2152 unsigned long start, end;
2153 unsigned long nr_pte_updates = 0;
2154 long pages;
2155
2156 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
2157
2158 work->next = work;
2159
2160
2161
2162
2163
2164
2165
2166
2167 if (p->flags & PF_EXITING)
2168 return;
2169
2170 if (!mm->numa_next_scan) {
2171 mm->numa_next_scan = now +
2172 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2173 }
2174
2175
2176
2177
2178 migrate = mm->numa_next_scan;
2179 if (time_before(now, migrate))
2180 return;
2181
2182 if (p->numa_scan_period == 0) {
2183 p->numa_scan_period_max = task_scan_max(p);
2184 p->numa_scan_period = task_scan_min(p);
2185 }
2186
2187 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2188 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2189 return;
2190
2191
2192
2193
2194
2195 p->node_stamp += 2 * TICK_NSEC;
2196
2197 start = mm->numa_scan_offset;
2198 pages = sysctl_numa_balancing_scan_size;
2199 pages <<= 20 - PAGE_SHIFT;
2200 if (!pages)
2201 return;
2202
2203 down_read(&mm->mmap_sem);
2204 vma = find_vma(mm, start);
2205 if (!vma) {
2206 reset_ptenuma_scan(p);
2207 start = 0;
2208 vma = mm->mmap;
2209 }
2210 for (; vma; vma = vma->vm_next) {
2211 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2212 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2213 continue;
2214 }
2215
2216
2217
2218
2219
2220
2221
2222 if (!vma->vm_mm ||
2223 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2224 continue;
2225
2226
2227
2228
2229
2230 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2231 continue;
2232
2233 do {
2234 start = max(start, vma->vm_start);
2235 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2236 end = min(end, vma->vm_end);
2237 nr_pte_updates += change_prot_numa(vma, start, end);
2238
2239
2240
2241
2242
2243
2244 if (nr_pte_updates)
2245 pages -= (end - start) >> PAGE_SHIFT;
2246
2247 start = end;
2248 if (pages <= 0)
2249 goto out;
2250
2251 cond_resched();
2252 } while (end != vma->vm_end);
2253 }
2254
2255out:
2256
2257
2258
2259
2260
2261
2262 if (vma)
2263 mm->numa_scan_offset = start;
2264 else
2265 reset_ptenuma_scan(p);
2266 up_read(&mm->mmap_sem);
2267}
2268
2269
2270
2271
2272void task_tick_numa(struct rq *rq, struct task_struct *curr)
2273{
2274 struct callback_head *work = &curr->numa_work;
2275 u64 period, now;
2276
2277
2278
2279
2280 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2281 return;
2282
2283
2284
2285
2286
2287
2288
2289 now = curr->se.sum_exec_runtime;
2290 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2291
2292 if (now - curr->node_stamp > period) {
2293 if (!curr->node_stamp)
2294 curr->numa_scan_period = task_scan_min(curr);
2295 curr->node_stamp += period;
2296
2297 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2298 init_task_work(work, task_numa_work);
2299 task_work_add(curr, work, true);
2300 }
2301 }
2302}
2303#else
2304static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2305{
2306}
2307
2308static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2309{
2310}
2311
2312static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2313{
2314}
2315#endif
2316
2317static void
2318account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2319{
2320 update_load_add(&cfs_rq->load, se->load.weight);
2321 if (!parent_entity(se))
2322 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2323#ifdef CONFIG_SMP
2324 if (entity_is_task(se)) {
2325 struct rq *rq = rq_of(cfs_rq);
2326
2327 account_numa_enqueue(rq, task_of(se));
2328 list_add(&se->group_node, &rq->cfs_tasks);
2329 }
2330#endif
2331 cfs_rq->nr_running++;
2332}
2333
2334static void
2335account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2336{
2337 update_load_sub(&cfs_rq->load, se->load.weight);
2338 if (!parent_entity(se))
2339 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2340 if (entity_is_task(se)) {
2341 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2342 list_del_init(&se->group_node);
2343 }
2344 cfs_rq->nr_running--;
2345}
2346
2347#ifdef CONFIG_FAIR_GROUP_SCHED
2348# ifdef CONFIG_SMP
2349static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2350{
2351 long tg_weight;
2352
2353
2354
2355
2356
2357
2358 tg_weight = atomic_long_read(&tg->load_avg);
2359 tg_weight -= cfs_rq->tg_load_contrib;
2360 tg_weight += cfs_rq->load.weight;
2361
2362 return tg_weight;
2363}
2364
2365static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2366{
2367 long tg_weight, load, shares;
2368
2369 tg_weight = calc_tg_weight(tg, cfs_rq);
2370 load = cfs_rq->load.weight;
2371
2372 shares = (tg->shares * load);
2373 if (tg_weight)
2374 shares /= tg_weight;
2375
2376 if (shares < MIN_SHARES)
2377 shares = MIN_SHARES;
2378 if (shares > tg->shares)
2379 shares = tg->shares;
2380
2381 return shares;
2382}
2383# else
2384static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2385{
2386 return tg->shares;
2387}
2388# endif
2389static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2390 unsigned long weight)
2391{
2392 if (se->on_rq) {
2393
2394 if (cfs_rq->curr == se)
2395 update_curr(cfs_rq);
2396 account_entity_dequeue(cfs_rq, se);
2397 }
2398
2399 update_load_set(&se->load, weight);
2400
2401 if (se->on_rq)
2402 account_entity_enqueue(cfs_rq, se);
2403}
2404
2405static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2406
2407static void update_cfs_shares(struct cfs_rq *cfs_rq)
2408{
2409 struct task_group *tg;
2410 struct sched_entity *se;
2411 long shares;
2412
2413 tg = cfs_rq->tg;
2414 se = tg->se[cpu_of(rq_of(cfs_rq))];
2415 if (!se || throttled_hierarchy(cfs_rq))
2416 return;
2417#ifndef CONFIG_SMP
2418 if (likely(se->load.weight == tg->shares))
2419 return;
2420#endif
2421 shares = calc_cfs_shares(cfs_rq, tg);
2422
2423 reweight_entity(cfs_rq_of(se), se, shares);
2424}
2425#else
2426static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2427{
2428}
2429#endif
2430
2431#ifdef CONFIG_SMP
2432
2433
2434
2435
2436#define LOAD_AVG_PERIOD 32
2437#define LOAD_AVG_MAX 47742
2438#define LOAD_AVG_MAX_N 345
2439
2440
2441static const u32 runnable_avg_yN_inv[] = {
2442 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2443 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2444 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2445 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2446 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2447 0x85aac367, 0x82cd8698,
2448};
2449
2450
2451
2452
2453
2454static const u32 runnable_avg_yN_sum[] = {
2455 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2456 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2457 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2458};
2459
2460
2461
2462
2463
2464static __always_inline u64 decay_load(u64 val, u64 n)
2465{
2466 unsigned int local_n;
2467
2468 if (!n)
2469 return val;
2470 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2471 return 0;
2472
2473
2474 local_n = n;
2475
2476
2477
2478
2479
2480
2481
2482
2483 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2484 val >>= local_n / LOAD_AVG_PERIOD;
2485 local_n %= LOAD_AVG_PERIOD;
2486 }
2487
2488 val *= runnable_avg_yN_inv[local_n];
2489
2490 return val >> 32;
2491}
2492
2493
2494
2495
2496
2497
2498
2499
2500static u32 __compute_runnable_contrib(u64 n)
2501{
2502 u32 contrib = 0;
2503
2504 if (likely(n <= LOAD_AVG_PERIOD))
2505 return runnable_avg_yN_sum[n];
2506 else if (unlikely(n >= LOAD_AVG_MAX_N))
2507 return LOAD_AVG_MAX;
2508
2509
2510 do {
2511 contrib /= 2;
2512 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2513
2514 n -= LOAD_AVG_PERIOD;
2515 } while (n > LOAD_AVG_PERIOD);
2516
2517 contrib = decay_load(contrib, n);
2518 return contrib + runnable_avg_yN_sum[n];
2519}
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
2550 struct sched_avg *sa,
2551 int runnable,
2552 int running)
2553{
2554 u64 delta, periods;
2555 u32 runnable_contrib;
2556 int delta_w, decayed = 0;
2557 unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
2558
2559 delta = now - sa->last_runnable_update;
2560
2561
2562
2563
2564 if ((s64)delta < 0) {
2565 sa->last_runnable_update = now;
2566 return 0;
2567 }
2568
2569
2570
2571
2572
2573 delta >>= 10;
2574 if (!delta)
2575 return 0;
2576 sa->last_runnable_update = now;
2577
2578
2579 delta_w = sa->avg_period % 1024;
2580 if (delta + delta_w >= 1024) {
2581
2582 decayed = 1;
2583
2584
2585
2586
2587
2588
2589 delta_w = 1024 - delta_w;
2590 if (runnable)
2591 sa->runnable_avg_sum += delta_w;
2592 if (running)
2593 sa->running_avg_sum += delta_w * scale_freq
2594 >> SCHED_CAPACITY_SHIFT;
2595 sa->avg_period += delta_w;
2596
2597 delta -= delta_w;
2598
2599
2600 periods = delta / 1024;
2601 delta %= 1024;
2602
2603 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2604 periods + 1);
2605 sa->running_avg_sum = decay_load(sa->running_avg_sum,
2606 periods + 1);
2607 sa->avg_period = decay_load(sa->avg_period,
2608 periods + 1);
2609
2610
2611 runnable_contrib = __compute_runnable_contrib(periods);
2612 if (runnable)
2613 sa->runnable_avg_sum += runnable_contrib;
2614 if (running)
2615 sa->running_avg_sum += runnable_contrib * scale_freq
2616 >> SCHED_CAPACITY_SHIFT;
2617 sa->avg_period += runnable_contrib;
2618 }
2619
2620
2621 if (runnable)
2622 sa->runnable_avg_sum += delta;
2623 if (running)
2624 sa->running_avg_sum += delta * scale_freq
2625 >> SCHED_CAPACITY_SHIFT;
2626 sa->avg_period += delta;
2627
2628 return decayed;
2629}
2630
2631
2632static inline u64 __synchronize_entity_decay(struct sched_entity *se)
2633{
2634 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2635 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2636
2637 decays -= se->avg.decay_count;
2638 se->avg.decay_count = 0;
2639 if (!decays)
2640 return 0;
2641
2642 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2643 se->avg.utilization_avg_contrib =
2644 decay_load(se->avg.utilization_avg_contrib, decays);
2645
2646 return decays;
2647}
2648
2649#ifdef CONFIG_FAIR_GROUP_SCHED
2650static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2651 int force_update)
2652{
2653 struct task_group *tg = cfs_rq->tg;
2654 long tg_contrib;
2655
2656 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2657 tg_contrib -= cfs_rq->tg_load_contrib;
2658
2659 if (!tg_contrib)
2660 return;
2661
2662 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2663 atomic_long_add(tg_contrib, &tg->load_avg);
2664 cfs_rq->tg_load_contrib += tg_contrib;
2665 }
2666}
2667
2668
2669
2670
2671
2672static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2673 struct cfs_rq *cfs_rq)
2674{
2675 struct task_group *tg = cfs_rq->tg;
2676 long contrib;
2677
2678
2679 contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
2680 sa->avg_period + 1);
2681 contrib -= cfs_rq->tg_runnable_contrib;
2682
2683 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2684 atomic_add(contrib, &tg->runnable_avg);
2685 cfs_rq->tg_runnable_contrib += contrib;
2686 }
2687}
2688
2689static inline void __update_group_entity_contrib(struct sched_entity *se)
2690{
2691 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2692 struct task_group *tg = cfs_rq->tg;
2693 int runnable_avg;
2694
2695 u64 contrib;
2696
2697 contrib = cfs_rq->tg_load_contrib * tg->shares;
2698 se->avg.load_avg_contrib = div_u64(contrib,
2699 atomic_long_read(&tg->load_avg) + 1);
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724 runnable_avg = atomic_read(&tg->runnable_avg);
2725 if (runnable_avg < NICE_0_LOAD) {
2726 se->avg.load_avg_contrib *= runnable_avg;
2727 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2728 }
2729}
2730
2731static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2732{
2733 __update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg,
2734 runnable, runnable);
2735 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2736}
2737#else
2738static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2739 int force_update) {}
2740static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2741 struct cfs_rq *cfs_rq) {}
2742static inline void __update_group_entity_contrib(struct sched_entity *se) {}
2743static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2744#endif
2745
2746static inline void __update_task_entity_contrib(struct sched_entity *se)
2747{
2748 u32 contrib;
2749
2750
2751 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2752 contrib /= (se->avg.avg_period + 1);
2753 se->avg.load_avg_contrib = scale_load(contrib);
2754}
2755
2756
2757static long __update_entity_load_avg_contrib(struct sched_entity *se)
2758{
2759 long old_contrib = se->avg.load_avg_contrib;
2760
2761 if (entity_is_task(se)) {
2762 __update_task_entity_contrib(se);
2763 } else {
2764 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
2765 __update_group_entity_contrib(se);
2766 }
2767
2768 return se->avg.load_avg_contrib - old_contrib;
2769}
2770
2771
2772static inline void __update_task_entity_utilization(struct sched_entity *se)
2773{
2774 u32 contrib;
2775
2776
2777 contrib = se->avg.running_avg_sum * scale_load_down(SCHED_LOAD_SCALE);
2778 contrib /= (se->avg.avg_period + 1);
2779 se->avg.utilization_avg_contrib = scale_load(contrib);
2780}
2781
2782static long __update_entity_utilization_avg_contrib(struct sched_entity *se)
2783{
2784 long old_contrib = se->avg.utilization_avg_contrib;
2785
2786 if (entity_is_task(se))
2787 __update_task_entity_utilization(se);
2788 else
2789 se->avg.utilization_avg_contrib =
2790 group_cfs_rq(se)->utilization_load_avg;
2791
2792 return se->avg.utilization_avg_contrib - old_contrib;
2793}
2794
2795static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2796 long load_contrib)
2797{
2798 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2799 cfs_rq->blocked_load_avg -= load_contrib;
2800 else
2801 cfs_rq->blocked_load_avg = 0;
2802}
2803
2804static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2805
2806
2807static inline void update_entity_load_avg(struct sched_entity *se,
2808 int update_cfs_rq)
2809{
2810 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2811 long contrib_delta, utilization_delta;
2812 int cpu = cpu_of(rq_of(cfs_rq));
2813 u64 now;
2814
2815
2816
2817
2818
2819 if (entity_is_task(se))
2820 now = cfs_rq_clock_task(cfs_rq);
2821 else
2822 now = cfs_rq_clock_task(group_cfs_rq(se));
2823
2824 if (!__update_entity_runnable_avg(now, cpu, &se->avg, se->on_rq,
2825 cfs_rq->curr == se))
2826 return;
2827
2828 contrib_delta = __update_entity_load_avg_contrib(se);
2829 utilization_delta = __update_entity_utilization_avg_contrib(se);
2830
2831 if (!update_cfs_rq)
2832 return;
2833
2834 if (se->on_rq) {
2835 cfs_rq->runnable_load_avg += contrib_delta;
2836 cfs_rq->utilization_load_avg += utilization_delta;
2837 } else {
2838 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2839 }
2840}
2841
2842
2843
2844
2845
2846static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
2847{
2848 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
2849 u64 decays;
2850
2851 decays = now - cfs_rq->last_decay;
2852 if (!decays && !force_update)
2853 return;
2854
2855 if (atomic_long_read(&cfs_rq->removed_load)) {
2856 unsigned long removed_load;
2857 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
2858 subtract_blocked_load_contrib(cfs_rq, removed_load);
2859 }
2860
2861 if (decays) {
2862 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2863 decays);
2864 atomic64_add(decays, &cfs_rq->decay_counter);
2865 cfs_rq->last_decay = now;
2866 }
2867
2868 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
2869}
2870
2871
2872static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2873 struct sched_entity *se,
2874 int wakeup)
2875{
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885 if (unlikely(se->avg.decay_count <= 0)) {
2886 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
2887 if (se->avg.decay_count) {
2888
2889
2890
2891
2892
2893
2894
2895
2896 se->avg.last_runnable_update -= (-se->avg.decay_count)
2897 << 20;
2898 update_entity_load_avg(se, 0);
2899
2900 se->avg.decay_count = 0;
2901 }
2902 wakeup = 0;
2903 } else {
2904 __synchronize_entity_decay(se);
2905 }
2906
2907
2908 if (wakeup) {
2909 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
2910 update_entity_load_avg(se, 0);
2911 }
2912
2913 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
2914 cfs_rq->utilization_load_avg += se->avg.utilization_avg_contrib;
2915
2916 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2917}
2918
2919
2920
2921
2922
2923
2924static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2925 struct sched_entity *se,
2926 int sleep)
2927{
2928 update_entity_load_avg(se, 1);
2929
2930 update_cfs_rq_blocked_load(cfs_rq, !sleep);
2931
2932 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
2933 cfs_rq->utilization_load_avg -= se->avg.utilization_avg_contrib;
2934 if (sleep) {
2935 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2936 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2937 }
2938}
2939
2940
2941
2942
2943
2944
2945void idle_enter_fair(struct rq *this_rq)
2946{
2947 update_rq_runnable_avg(this_rq, 1);
2948}
2949
2950
2951
2952
2953
2954
2955void idle_exit_fair(struct rq *this_rq)
2956{
2957 update_rq_runnable_avg(this_rq, 0);
2958}
2959
2960static int idle_balance(struct rq *this_rq);
2961
2962#else
2963
2964static inline void update_entity_load_avg(struct sched_entity *se,
2965 int update_cfs_rq) {}
2966static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2967static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2968 struct sched_entity *se,
2969 int wakeup) {}
2970static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2971 struct sched_entity *se,
2972 int sleep) {}
2973static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2974 int force_update) {}
2975
2976static inline int idle_balance(struct rq *rq)
2977{
2978 return 0;
2979}
2980
2981#endif
2982
2983static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
2984{
2985#ifdef CONFIG_SCHEDSTATS
2986 struct task_struct *tsk = NULL;
2987
2988 if (entity_is_task(se))
2989 tsk = task_of(se);
2990
2991 if (se->statistics.sleep_start) {
2992 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
2993
2994 if ((s64)delta < 0)
2995 delta = 0;
2996
2997 if (unlikely(delta > se->statistics.sleep_max))
2998 se->statistics.sleep_max = delta;
2999
3000 se->statistics.sleep_start = 0;
3001 se->statistics.sum_sleep_runtime += delta;
3002
3003 if (tsk) {
3004 account_scheduler_latency(tsk, delta >> 10, 1);
3005 trace_sched_stat_sleep(tsk, delta);
3006 }
3007 }
3008 if (se->statistics.block_start) {
3009 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
3010
3011 if ((s64)delta < 0)
3012 delta = 0;
3013
3014 if (unlikely(delta > se->statistics.block_max))
3015 se->statistics.block_max = delta;
3016
3017 se->statistics.block_start = 0;
3018 se->statistics.sum_sleep_runtime += delta;
3019
3020 if (tsk) {
3021 if (tsk->in_iowait) {
3022 se->statistics.iowait_sum += delta;
3023 se->statistics.iowait_count++;
3024 trace_sched_stat_iowait(tsk, delta);
3025 }
3026
3027 trace_sched_stat_blocked(tsk, delta);
3028
3029
3030
3031
3032
3033
3034 if (unlikely(prof_on == SLEEP_PROFILING)) {
3035 profile_hits(SLEEP_PROFILING,
3036 (void *)get_wchan(tsk),
3037 delta >> 20);
3038 }
3039 account_scheduler_latency(tsk, delta >> 10, 0);
3040 }
3041 }
3042#endif
3043}
3044
3045static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3046{
3047#ifdef CONFIG_SCHED_DEBUG
3048 s64 d = se->vruntime - cfs_rq->min_vruntime;
3049
3050 if (d < 0)
3051 d = -d;
3052
3053 if (d > 3*sysctl_sched_latency)
3054 schedstat_inc(cfs_rq, nr_spread_over);
3055#endif
3056}
3057
3058static void
3059place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3060{
3061 u64 vruntime = cfs_rq->min_vruntime;
3062
3063
3064
3065
3066
3067
3068
3069 if (initial && sched_feat(START_DEBIT))
3070 vruntime += sched_vslice(cfs_rq, se);
3071
3072
3073 if (!initial) {
3074 unsigned long thresh = sysctl_sched_latency;
3075
3076
3077
3078
3079
3080 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3081 thresh >>= 1;
3082
3083 vruntime -= thresh;
3084 }
3085
3086
3087 se->vruntime = max_vruntime(se->vruntime, vruntime);
3088}
3089
3090static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3091
3092static void
3093enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3094{
3095
3096
3097
3098
3099 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
3100 se->vruntime += cfs_rq->min_vruntime;
3101
3102
3103
3104
3105 update_curr(cfs_rq);
3106 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
3107 account_entity_enqueue(cfs_rq, se);
3108 update_cfs_shares(cfs_rq);
3109
3110 if (flags & ENQUEUE_WAKEUP) {
3111 place_entity(cfs_rq, se, 0);
3112 enqueue_sleeper(cfs_rq, se);
3113 }
3114
3115 update_stats_enqueue(cfs_rq, se);
3116 check_spread(cfs_rq, se);
3117 if (se != cfs_rq->curr)
3118 __enqueue_entity(cfs_rq, se);
3119 se->on_rq = 1;
3120
3121 if (cfs_rq->nr_running == 1) {
3122 list_add_leaf_cfs_rq(cfs_rq);
3123 check_enqueue_throttle(cfs_rq);
3124 }
3125}
3126
3127static void __clear_buddies_last(struct sched_entity *se)
3128{
3129 for_each_sched_entity(se) {
3130 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3131 if (cfs_rq->last != se)
3132 break;
3133
3134 cfs_rq->last = NULL;
3135 }
3136}
3137
3138static void __clear_buddies_next(struct sched_entity *se)
3139{
3140 for_each_sched_entity(se) {
3141 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3142 if (cfs_rq->next != se)
3143 break;
3144
3145 cfs_rq->next = NULL;
3146 }
3147}
3148
3149static void __clear_buddies_skip(struct sched_entity *se)
3150{
3151 for_each_sched_entity(se) {
3152 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3153 if (cfs_rq->skip != se)
3154 break;
3155
3156 cfs_rq->skip = NULL;
3157 }
3158}
3159
3160static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3161{
3162 if (cfs_rq->last == se)
3163 __clear_buddies_last(se);
3164
3165 if (cfs_rq->next == se)
3166 __clear_buddies_next(se);
3167
3168 if (cfs_rq->skip == se)
3169 __clear_buddies_skip(se);
3170}
3171
3172static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3173
3174static void
3175dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3176{
3177
3178
3179
3180 update_curr(cfs_rq);
3181 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
3182
3183 update_stats_dequeue(cfs_rq, se);
3184 if (flags & DEQUEUE_SLEEP) {
3185#ifdef CONFIG_SCHEDSTATS
3186 if (entity_is_task(se)) {
3187 struct task_struct *tsk = task_of(se);
3188
3189 if (tsk->state & TASK_INTERRUPTIBLE)
3190 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
3191 if (tsk->state & TASK_UNINTERRUPTIBLE)
3192 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
3193 }
3194#endif
3195 }
3196
3197 clear_buddies(cfs_rq, se);
3198
3199 if (se != cfs_rq->curr)
3200 __dequeue_entity(cfs_rq, se);
3201 se->on_rq = 0;
3202 account_entity_dequeue(cfs_rq, se);
3203
3204
3205
3206
3207
3208
3209 if (!(flags & DEQUEUE_SLEEP))
3210 se->vruntime -= cfs_rq->min_vruntime;
3211
3212
3213 return_cfs_rq_runtime(cfs_rq);
3214
3215 update_min_vruntime(cfs_rq);
3216 update_cfs_shares(cfs_rq);
3217}
3218
3219
3220
3221
3222static void
3223check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3224{
3225 unsigned long ideal_runtime, delta_exec;
3226 struct sched_entity *se;
3227 s64 delta;
3228
3229 ideal_runtime = sched_slice(cfs_rq, curr);
3230 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3231 if (delta_exec > ideal_runtime) {
3232 resched_curr(rq_of(cfs_rq));
3233
3234
3235
3236
3237 clear_buddies(cfs_rq, curr);
3238 return;
3239 }
3240
3241
3242
3243
3244
3245
3246 if (delta_exec < sysctl_sched_min_granularity)
3247 return;
3248
3249 se = __pick_first_entity(cfs_rq);
3250 delta = curr->vruntime - se->vruntime;
3251
3252 if (delta < 0)
3253 return;
3254
3255 if (delta > ideal_runtime)
3256 resched_curr(rq_of(cfs_rq));
3257}
3258
3259static void
3260set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3261{
3262
3263 if (se->on_rq) {
3264
3265
3266
3267
3268
3269 update_stats_wait_end(cfs_rq, se);
3270 __dequeue_entity(cfs_rq, se);
3271 update_entity_load_avg(se, 1);
3272 }
3273
3274 update_stats_curr_start(cfs_rq, se);
3275 cfs_rq->curr = se;
3276#ifdef CONFIG_SCHEDSTATS
3277
3278
3279
3280
3281
3282 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3283 se->statistics.slice_max = max(se->statistics.slice_max,
3284 se->sum_exec_runtime - se->prev_sum_exec_runtime);
3285 }
3286#endif
3287 se->prev_sum_exec_runtime = se->sum_exec_runtime;
3288}
3289
3290static int
3291wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3292
3293
3294
3295
3296
3297
3298
3299
3300static struct sched_entity *
3301pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3302{
3303 struct sched_entity *left = __pick_first_entity(cfs_rq);
3304 struct sched_entity *se;
3305
3306
3307
3308
3309
3310 if (!left || (curr && entity_before(curr, left)))
3311 left = curr;
3312
3313 se = left;
3314
3315
3316
3317
3318
3319 if (cfs_rq->skip == se) {
3320 struct sched_entity *second;
3321
3322 if (se == curr) {
3323 second = __pick_first_entity(cfs_rq);
3324 } else {
3325 second = __pick_next_entity(se);
3326 if (!second || (curr && entity_before(curr, second)))
3327 second = curr;
3328 }
3329
3330 if (second && wakeup_preempt_entity(second, left) < 1)
3331 se = second;
3332 }
3333
3334
3335
3336
3337 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3338 se = cfs_rq->last;
3339
3340
3341
3342
3343 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3344 se = cfs_rq->next;
3345
3346 clear_buddies(cfs_rq, se);
3347
3348 return se;
3349}
3350
3351static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3352
3353static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3354{
3355
3356
3357
3358
3359 if (prev->on_rq)
3360 update_curr(cfs_rq);
3361
3362
3363 check_cfs_rq_runtime(cfs_rq);
3364
3365 check_spread(cfs_rq, prev);
3366 if (prev->on_rq) {
3367 update_stats_wait_start(cfs_rq, prev);
3368
3369 __enqueue_entity(cfs_rq, prev);
3370
3371 update_entity_load_avg(prev, 1);
3372 }
3373 cfs_rq->curr = NULL;
3374}
3375
3376static void
3377entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3378{
3379
3380
3381
3382 update_curr(cfs_rq);
3383
3384
3385
3386
3387 update_entity_load_avg(curr, 1);
3388 update_cfs_rq_blocked_load(cfs_rq, 1);
3389 update_cfs_shares(cfs_rq);
3390
3391#ifdef CONFIG_SCHED_HRTICK
3392
3393
3394
3395
3396 if (queued) {
3397 resched_curr(rq_of(cfs_rq));
3398 return;
3399 }
3400
3401
3402
3403 if (!sched_feat(DOUBLE_TICK) &&
3404 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3405 return;
3406#endif
3407
3408 if (cfs_rq->nr_running > 1)
3409 check_preempt_tick(cfs_rq, curr);
3410}
3411
3412
3413
3414
3415
3416
3417#ifdef CONFIG_CFS_BANDWIDTH
3418
3419#ifdef HAVE_JUMP_LABEL
3420static struct static_key __cfs_bandwidth_used;
3421
3422static inline bool cfs_bandwidth_used(void)
3423{
3424 return static_key_false(&__cfs_bandwidth_used);
3425}
3426
3427void cfs_bandwidth_usage_inc(void)
3428{
3429 static_key_slow_inc(&__cfs_bandwidth_used);
3430}
3431
3432void cfs_bandwidth_usage_dec(void)
3433{
3434 static_key_slow_dec(&__cfs_bandwidth_used);
3435}
3436#else
3437static bool cfs_bandwidth_used(void)
3438{
3439 return true;
3440}
3441
3442void cfs_bandwidth_usage_inc(void) {}
3443void cfs_bandwidth_usage_dec(void) {}
3444#endif
3445
3446
3447
3448
3449
3450static inline u64 default_cfs_period(void)
3451{
3452 return 100000000ULL;
3453}
3454
3455static inline u64 sched_cfs_bandwidth_slice(void)
3456{
3457 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3458}
3459
3460
3461
3462
3463
3464
3465
3466
3467void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3468{
3469 u64 now;
3470
3471 if (cfs_b->quota == RUNTIME_INF)
3472 return;
3473
3474 now = sched_clock_cpu(smp_processor_id());
3475 cfs_b->runtime = cfs_b->quota;
3476 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3477}
3478
3479static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3480{
3481 return &tg->cfs_bandwidth;
3482}
3483
3484
3485static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3486{
3487 if (unlikely(cfs_rq->throttle_count))
3488 return cfs_rq->throttled_clock_task;
3489
3490 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3491}
3492
3493
3494static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3495{
3496 struct task_group *tg = cfs_rq->tg;
3497 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3498 u64 amount = 0, min_amount, expires;
3499
3500
3501 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3502
3503 raw_spin_lock(&cfs_b->lock);
3504 if (cfs_b->quota == RUNTIME_INF)
3505 amount = min_amount;
3506 else {
3507 start_cfs_bandwidth(cfs_b);
3508
3509 if (cfs_b->runtime > 0) {
3510 amount = min(cfs_b->runtime, min_amount);
3511 cfs_b->runtime -= amount;
3512 cfs_b->idle = 0;
3513 }
3514 }
3515 expires = cfs_b->runtime_expires;
3516 raw_spin_unlock(&cfs_b->lock);
3517
3518 cfs_rq->runtime_remaining += amount;
3519
3520
3521
3522
3523
3524 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3525 cfs_rq->runtime_expires = expires;
3526
3527 return cfs_rq->runtime_remaining > 0;
3528}
3529
3530
3531
3532
3533
3534static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3535{
3536 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3537
3538
3539 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3540 return;
3541
3542 if (cfs_rq->runtime_remaining < 0)
3543 return;
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
3557
3558 cfs_rq->runtime_expires += TICK_NSEC;
3559 } else {
3560
3561 cfs_rq->runtime_remaining = 0;
3562 }
3563}
3564
3565static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3566{
3567
3568 cfs_rq->runtime_remaining -= delta_exec;
3569 expire_cfs_rq_runtime(cfs_rq);
3570
3571 if (likely(cfs_rq->runtime_remaining > 0))
3572 return;
3573
3574
3575
3576
3577
3578 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3579 resched_curr(rq_of(cfs_rq));
3580}
3581
3582static __always_inline
3583void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3584{
3585 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3586 return;
3587
3588 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3589}
3590
3591static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3592{
3593 return cfs_bandwidth_used() && cfs_rq->throttled;
3594}
3595
3596
3597static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3598{
3599 return cfs_bandwidth_used() && cfs_rq->throttle_count;
3600}
3601
3602
3603
3604
3605
3606
3607static inline int throttled_lb_pair(struct task_group *tg,
3608 int src_cpu, int dest_cpu)
3609{
3610 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3611
3612 src_cfs_rq = tg->cfs_rq[src_cpu];
3613 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3614
3615 return throttled_hierarchy(src_cfs_rq) ||
3616 throttled_hierarchy(dest_cfs_rq);
3617}
3618
3619
3620static int tg_unthrottle_up(struct task_group *tg, void *data)
3621{
3622 struct rq *rq = data;
3623 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3624
3625 cfs_rq->throttle_count--;
3626#ifdef CONFIG_SMP
3627 if (!cfs_rq->throttle_count) {
3628
3629 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3630 cfs_rq->throttled_clock_task;
3631 }
3632#endif
3633
3634 return 0;
3635}
3636
3637static int tg_throttle_down(struct task_group *tg, void *data)
3638{
3639 struct rq *rq = data;
3640 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3641
3642
3643 if (!cfs_rq->throttle_count)
3644 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3645 cfs_rq->throttle_count++;
3646
3647 return 0;
3648}
3649
3650static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3651{
3652 struct rq *rq = rq_of(cfs_rq);
3653 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3654 struct sched_entity *se;
3655 long task_delta, dequeue = 1;
3656 bool empty;
3657
3658 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3659
3660
3661 rcu_read_lock();
3662 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3663 rcu_read_unlock();
3664
3665 task_delta = cfs_rq->h_nr_running;
3666 for_each_sched_entity(se) {
3667 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3668
3669 if (!se->on_rq)
3670 break;
3671
3672 if (dequeue)
3673 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3674 qcfs_rq->h_nr_running -= task_delta;
3675
3676 if (qcfs_rq->load.weight)
3677 dequeue = 0;
3678 }
3679
3680 if (!se)
3681 sub_nr_running(rq, task_delta);
3682
3683 cfs_rq->throttled = 1;
3684 cfs_rq->throttled_clock = rq_clock(rq);
3685 raw_spin_lock(&cfs_b->lock);
3686 empty = list_empty(&cfs_b->throttled_cfs_rq);
3687
3688
3689
3690
3691
3692 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3693
3694
3695
3696
3697
3698 if (empty)
3699 start_cfs_bandwidth(cfs_b);
3700
3701 raw_spin_unlock(&cfs_b->lock);
3702}
3703
3704void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3705{
3706 struct rq *rq = rq_of(cfs_rq);
3707 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3708 struct sched_entity *se;
3709 int enqueue = 1;
3710 long task_delta;
3711
3712 se = cfs_rq->tg->se[cpu_of(rq)];
3713
3714 cfs_rq->throttled = 0;
3715
3716 update_rq_clock(rq);
3717
3718 raw_spin_lock(&cfs_b->lock);
3719 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
3720 list_del_rcu(&cfs_rq->throttled_list);
3721 raw_spin_unlock(&cfs_b->lock);
3722
3723
3724 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3725
3726 if (!cfs_rq->load.weight)
3727 return;
3728
3729 task_delta = cfs_rq->h_nr_running;
3730 for_each_sched_entity(se) {
3731 if (se->on_rq)
3732 enqueue = 0;
3733
3734 cfs_rq = cfs_rq_of(se);
3735 if (enqueue)
3736 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3737 cfs_rq->h_nr_running += task_delta;
3738
3739 if (cfs_rq_throttled(cfs_rq))
3740 break;
3741 }
3742
3743 if (!se)
3744 add_nr_running(rq, task_delta);
3745
3746
3747 if (rq->curr == rq->idle && rq->cfs.nr_running)
3748 resched_curr(rq);
3749}
3750
3751static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3752 u64 remaining, u64 expires)
3753{
3754 struct cfs_rq *cfs_rq;
3755 u64 runtime;
3756 u64 starting_runtime = remaining;
3757
3758 rcu_read_lock();
3759 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3760 throttled_list) {
3761 struct rq *rq = rq_of(cfs_rq);
3762
3763 raw_spin_lock(&rq->lock);
3764 if (!cfs_rq_throttled(cfs_rq))
3765 goto next;
3766
3767 runtime = -cfs_rq->runtime_remaining + 1;
3768 if (runtime > remaining)
3769 runtime = remaining;
3770 remaining -= runtime;
3771
3772 cfs_rq->runtime_remaining += runtime;
3773 cfs_rq->runtime_expires = expires;
3774
3775
3776 if (cfs_rq->runtime_remaining > 0)
3777 unthrottle_cfs_rq(cfs_rq);
3778
3779next:
3780 raw_spin_unlock(&rq->lock);
3781
3782 if (!remaining)
3783 break;
3784 }
3785 rcu_read_unlock();
3786
3787 return starting_runtime - remaining;
3788}
3789
3790
3791
3792
3793
3794
3795
3796static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3797{
3798 u64 runtime, runtime_expires;
3799 int throttled;
3800
3801
3802 if (cfs_b->quota == RUNTIME_INF)
3803 goto out_deactivate;
3804
3805 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3806 cfs_b->nr_periods += overrun;
3807
3808
3809
3810
3811
3812 if (cfs_b->idle && !throttled)
3813 goto out_deactivate;
3814
3815 __refill_cfs_bandwidth_runtime(cfs_b);
3816
3817 if (!throttled) {
3818
3819 cfs_b->idle = 1;
3820 return 0;
3821 }
3822
3823
3824 cfs_b->nr_throttled += overrun;
3825
3826 runtime_expires = cfs_b->runtime_expires;
3827
3828
3829
3830
3831
3832
3833
3834
3835 while (throttled && cfs_b->runtime > 0) {
3836 runtime = cfs_b->runtime;
3837 raw_spin_unlock(&cfs_b->lock);
3838
3839 runtime = distribute_cfs_runtime(cfs_b, runtime,
3840 runtime_expires);
3841 raw_spin_lock(&cfs_b->lock);
3842
3843 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3844
3845 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3846 }
3847
3848
3849
3850
3851
3852
3853
3854 cfs_b->idle = 0;
3855
3856 return 0;
3857
3858out_deactivate:
3859 return 1;
3860}
3861
3862
3863static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3864
3865static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3866
3867static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3868
3869
3870
3871
3872
3873
3874
3875
3876static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3877{
3878 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3879 u64 remaining;
3880
3881
3882 if (hrtimer_callback_running(refresh_timer))
3883 return 1;
3884
3885
3886 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3887 if (remaining < min_expire)
3888 return 1;
3889
3890 return 0;
3891}
3892
3893static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3894{
3895 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3896
3897
3898 if (runtime_refresh_within(cfs_b, min_left))
3899 return;
3900
3901 hrtimer_start(&cfs_b->slack_timer,
3902 ns_to_ktime(cfs_bandwidth_slack_period),
3903 HRTIMER_MODE_REL);
3904}
3905
3906
3907static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3908{
3909 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3910 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3911
3912 if (slack_runtime <= 0)
3913 return;
3914
3915 raw_spin_lock(&cfs_b->lock);
3916 if (cfs_b->quota != RUNTIME_INF &&
3917 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3918 cfs_b->runtime += slack_runtime;
3919
3920
3921 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3922 !list_empty(&cfs_b->throttled_cfs_rq))
3923 start_cfs_slack_bandwidth(cfs_b);
3924 }
3925 raw_spin_unlock(&cfs_b->lock);
3926
3927
3928 cfs_rq->runtime_remaining -= slack_runtime;
3929}
3930
3931static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3932{
3933 if (!cfs_bandwidth_used())
3934 return;
3935
3936 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
3937 return;
3938
3939 __return_cfs_rq_runtime(cfs_rq);
3940}
3941
3942
3943
3944
3945
3946static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3947{
3948 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3949 u64 expires;
3950
3951
3952 raw_spin_lock(&cfs_b->lock);
3953 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3954 raw_spin_unlock(&cfs_b->lock);
3955 return;
3956 }
3957
3958 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
3959 runtime = cfs_b->runtime;
3960
3961 expires = cfs_b->runtime_expires;
3962 raw_spin_unlock(&cfs_b->lock);
3963
3964 if (!runtime)
3965 return;
3966
3967 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3968
3969 raw_spin_lock(&cfs_b->lock);
3970 if (expires == cfs_b->runtime_expires)
3971 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3972 raw_spin_unlock(&cfs_b->lock);
3973}
3974
3975
3976
3977
3978
3979
3980static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3981{
3982 if (!cfs_bandwidth_used())
3983 return;
3984
3985
3986 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3987 return;
3988
3989
3990 if (cfs_rq_throttled(cfs_rq))
3991 return;
3992
3993
3994 account_cfs_rq_runtime(cfs_rq, 0);
3995 if (cfs_rq->runtime_remaining <= 0)
3996 throttle_cfs_rq(cfs_rq);
3997}
3998
3999
4000static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4001{
4002 if (!cfs_bandwidth_used())
4003 return false;
4004
4005 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
4006 return false;
4007
4008
4009
4010
4011
4012 if (cfs_rq_throttled(cfs_rq))
4013 return true;
4014
4015 throttle_cfs_rq(cfs_rq);
4016 return true;
4017}
4018
4019static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4020{
4021 struct cfs_bandwidth *cfs_b =
4022 container_of(timer, struct cfs_bandwidth, slack_timer);
4023
4024 do_sched_cfs_slack_timer(cfs_b);
4025
4026 return HRTIMER_NORESTART;
4027}
4028
4029static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4030{
4031 struct cfs_bandwidth *cfs_b =
4032 container_of(timer, struct cfs_bandwidth, period_timer);
4033 int overrun;
4034 int idle = 0;
4035
4036 raw_spin_lock(&cfs_b->lock);
4037 for (;;) {
4038 overrun = hrtimer_forward_now(timer, cfs_b->period);
4039 if (!overrun)
4040 break;
4041
4042 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4043 }
4044 if (idle)
4045 cfs_b->period_active = 0;
4046 raw_spin_unlock(&cfs_b->lock);
4047
4048 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4049}
4050
4051void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4052{
4053 raw_spin_lock_init(&cfs_b->lock);
4054 cfs_b->runtime = 0;
4055 cfs_b->quota = RUNTIME_INF;
4056 cfs_b->period = ns_to_ktime(default_cfs_period());
4057
4058 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4059 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
4060 cfs_b->period_timer.function = sched_cfs_period_timer;
4061 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4062 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4063}
4064
4065static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4066{
4067 cfs_rq->runtime_enabled = 0;
4068 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4069}
4070
4071void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4072{
4073 lockdep_assert_held(&cfs_b->lock);
4074
4075 if (!cfs_b->period_active) {
4076 cfs_b->period_active = 1;
4077 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4078 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4079 }
4080}
4081
4082static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4083{
4084
4085 if (!cfs_b->throttled_cfs_rq.next)
4086 return;
4087
4088 hrtimer_cancel(&cfs_b->period_timer);
4089 hrtimer_cancel(&cfs_b->slack_timer);
4090}
4091
4092static void __maybe_unused update_runtime_enabled(struct rq *rq)
4093{
4094 struct cfs_rq *cfs_rq;
4095
4096 for_each_leaf_cfs_rq(rq, cfs_rq) {
4097 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4098
4099 raw_spin_lock(&cfs_b->lock);
4100 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4101 raw_spin_unlock(&cfs_b->lock);
4102 }
4103}
4104
4105static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4106{
4107 struct cfs_rq *cfs_rq;
4108
4109 for_each_leaf_cfs_rq(rq, cfs_rq) {
4110 if (!cfs_rq->runtime_enabled)
4111 continue;
4112
4113
4114
4115
4116
4117 cfs_rq->runtime_remaining = 1;
4118
4119
4120
4121
4122 cfs_rq->runtime_enabled = 0;
4123
4124 if (cfs_rq_throttled(cfs_rq))
4125 unthrottle_cfs_rq(cfs_rq);
4126 }
4127}
4128
4129#else
4130static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4131{
4132 return rq_clock_task(rq_of(cfs_rq));
4133}
4134
4135static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4136static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4137static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4138static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4139
4140static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4141{
4142 return 0;
4143}
4144
4145static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4146{
4147 return 0;
4148}
4149
4150static inline int throttled_lb_pair(struct task_group *tg,
4151 int src_cpu, int dest_cpu)
4152{
4153 return 0;
4154}
4155
4156void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4157
4158#ifdef CONFIG_FAIR_GROUP_SCHED
4159static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4160#endif
4161
4162static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4163{
4164 return NULL;
4165}
4166static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4167static inline void update_runtime_enabled(struct rq *rq) {}
4168static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4169
4170#endif
4171
4172
4173
4174
4175
4176#ifdef CONFIG_SCHED_HRTICK
4177static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4178{
4179 struct sched_entity *se = &p->se;
4180 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4181
4182 WARN_ON(task_rq(p) != rq);
4183
4184 if (cfs_rq->nr_running > 1) {
4185 u64 slice = sched_slice(cfs_rq, se);
4186 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4187 s64 delta = slice - ran;
4188
4189 if (delta < 0) {
4190 if (rq->curr == p)
4191 resched_curr(rq);
4192 return;
4193 }
4194 hrtick_start(rq, delta);
4195 }
4196}
4197
4198
4199
4200
4201
4202
4203static void hrtick_update(struct rq *rq)
4204{
4205 struct task_struct *curr = rq->curr;
4206
4207 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4208 return;
4209
4210 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4211 hrtick_start_fair(rq, curr);
4212}
4213#else
4214static inline void
4215hrtick_start_fair(struct rq *rq, struct task_struct *p)
4216{
4217}
4218
4219static inline void hrtick_update(struct rq *rq)
4220{
4221}
4222#endif
4223
4224
4225
4226
4227
4228
4229static void
4230enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4231{
4232 struct cfs_rq *cfs_rq;
4233 struct sched_entity *se = &p->se;
4234
4235 for_each_sched_entity(se) {
4236 if (se->on_rq)
4237 break;
4238 cfs_rq = cfs_rq_of(se);
4239 enqueue_entity(cfs_rq, se, flags);
4240
4241
4242
4243
4244
4245
4246
4247 if (cfs_rq_throttled(cfs_rq))
4248 break;
4249 cfs_rq->h_nr_running++;
4250
4251 flags = ENQUEUE_WAKEUP;
4252 }
4253
4254 for_each_sched_entity(se) {
4255 cfs_rq = cfs_rq_of(se);
4256 cfs_rq->h_nr_running++;
4257
4258 if (cfs_rq_throttled(cfs_rq))
4259 break;
4260
4261 update_cfs_shares(cfs_rq);
4262 update_entity_load_avg(se, 1);
4263 }
4264
4265 if (!se) {
4266 update_rq_runnable_avg(rq, rq->nr_running);
4267 add_nr_running(rq, 1);
4268 }
4269 hrtick_update(rq);
4270}
4271
4272static void set_next_buddy(struct sched_entity *se);
4273
4274
4275
4276
4277
4278
4279static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4280{
4281 struct cfs_rq *cfs_rq;
4282 struct sched_entity *se = &p->se;
4283 int task_sleep = flags & DEQUEUE_SLEEP;
4284
4285 for_each_sched_entity(se) {
4286 cfs_rq = cfs_rq_of(se);
4287 dequeue_entity(cfs_rq, se, flags);
4288
4289
4290
4291
4292
4293
4294
4295 if (cfs_rq_throttled(cfs_rq))
4296 break;
4297 cfs_rq->h_nr_running--;
4298
4299
4300 if (cfs_rq->load.weight) {
4301
4302
4303
4304
4305 if (task_sleep && parent_entity(se))
4306 set_next_buddy(parent_entity(se));
4307
4308
4309 se = parent_entity(se);
4310 break;
4311 }
4312 flags |= DEQUEUE_SLEEP;
4313 }
4314
4315 for_each_sched_entity(se) {
4316 cfs_rq = cfs_rq_of(se);
4317 cfs_rq->h_nr_running--;
4318
4319 if (cfs_rq_throttled(cfs_rq))
4320 break;
4321
4322 update_cfs_shares(cfs_rq);
4323 update_entity_load_avg(se, 1);
4324 }
4325
4326 if (!se) {
4327 sub_nr_running(rq, 1);
4328 update_rq_runnable_avg(rq, 1);
4329 }
4330 hrtick_update(rq);
4331}
4332
4333#ifdef CONFIG_SMP
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366#define DEGRADE_SHIFT 7
4367static const unsigned char
4368 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4369static const unsigned char
4370 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4371 {0, 0, 0, 0, 0, 0, 0, 0},
4372 {64, 32, 8, 0, 0, 0, 0, 0},
4373 {96, 72, 40, 12, 1, 0, 0},
4374 {112, 98, 75, 43, 15, 1, 0},
4375 {120, 112, 98, 76, 45, 16, 2} };
4376
4377
4378
4379
4380
4381
4382static unsigned long
4383decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4384{
4385 int j = 0;
4386
4387 if (!missed_updates)
4388 return load;
4389
4390 if (missed_updates >= degrade_zero_ticks[idx])
4391 return 0;
4392
4393 if (idx == 1)
4394 return load >> missed_updates;
4395
4396 while (missed_updates) {
4397 if (missed_updates % 2)
4398 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4399
4400 missed_updates >>= 1;
4401 j++;
4402 }
4403 return load;
4404}
4405
4406
4407
4408
4409
4410
4411static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
4412 unsigned long pending_updates)
4413{
4414 int i, scale;
4415
4416 this_rq->nr_load_updates++;
4417
4418
4419 this_rq->cpu_load[0] = this_load;
4420 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
4421 unsigned long old_load, new_load;
4422
4423
4424
4425 old_load = this_rq->cpu_load[i];
4426 old_load = decay_load_missed(old_load, pending_updates - 1, i);
4427 new_load = this_load;
4428
4429
4430
4431
4432
4433 if (new_load > old_load)
4434 new_load += scale - 1;
4435
4436 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
4437 }
4438
4439 sched_avg_update(this_rq);
4440}
4441
4442#ifdef CONFIG_NO_HZ_COMMON
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460static void update_idle_cpu_load(struct rq *this_rq)
4461{
4462 unsigned long curr_jiffies = READ_ONCE(jiffies);
4463 unsigned long load = this_rq->cfs.runnable_load_avg;
4464 unsigned long pending_updates;
4465
4466
4467
4468
4469 if (load || curr_jiffies == this_rq->last_load_update_tick)
4470 return;
4471
4472 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4473 this_rq->last_load_update_tick = curr_jiffies;
4474
4475 __update_cpu_load(this_rq, load, pending_updates);
4476}
4477
4478
4479
4480
4481void update_cpu_load_nohz(void)
4482{
4483 struct rq *this_rq = this_rq();
4484 unsigned long curr_jiffies = READ_ONCE(jiffies);
4485 unsigned long pending_updates;
4486
4487 if (curr_jiffies == this_rq->last_load_update_tick)
4488 return;
4489
4490 raw_spin_lock(&this_rq->lock);
4491 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4492 if (pending_updates) {
4493 this_rq->last_load_update_tick = curr_jiffies;
4494
4495
4496
4497
4498 __update_cpu_load(this_rq, 0, pending_updates);
4499 }
4500 raw_spin_unlock(&this_rq->lock);
4501}
4502#endif
4503
4504
4505
4506
4507void update_cpu_load_active(struct rq *this_rq)
4508{
4509 unsigned long load = this_rq->cfs.runnable_load_avg;
4510
4511
4512
4513 this_rq->last_load_update_tick = jiffies;
4514 __update_cpu_load(this_rq, load, 1);
4515}
4516
4517
4518static unsigned long weighted_cpuload(const int cpu)
4519{
4520 return cpu_rq(cpu)->cfs.runnable_load_avg;
4521}
4522
4523
4524
4525
4526
4527
4528
4529
4530static unsigned long source_load(int cpu, int type)
4531{
4532 struct rq *rq = cpu_rq(cpu);
4533 unsigned long total = weighted_cpuload(cpu);
4534
4535 if (type == 0 || !sched_feat(LB_BIAS))
4536 return total;
4537
4538 return min(rq->cpu_load[type-1], total);
4539}
4540
4541
4542
4543
4544
4545static unsigned long target_load(int cpu, int type)
4546{
4547 struct rq *rq = cpu_rq(cpu);
4548 unsigned long total = weighted_cpuload(cpu);
4549
4550 if (type == 0 || !sched_feat(LB_BIAS))
4551 return total;
4552
4553 return max(rq->cpu_load[type-1], total);
4554}
4555
4556static unsigned long capacity_of(int cpu)
4557{
4558 return cpu_rq(cpu)->cpu_capacity;
4559}
4560
4561static unsigned long capacity_orig_of(int cpu)
4562{
4563 return cpu_rq(cpu)->cpu_capacity_orig;
4564}
4565
4566static unsigned long cpu_avg_load_per_task(int cpu)
4567{
4568 struct rq *rq = cpu_rq(cpu);
4569 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
4570 unsigned long load_avg = rq->cfs.runnable_load_avg;
4571
4572 if (nr_running)
4573 return load_avg / nr_running;
4574
4575 return 0;
4576}
4577
4578static void record_wakee(struct task_struct *p)
4579{
4580
4581
4582
4583
4584
4585 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
4586 current->wakee_flips >>= 1;
4587 current->wakee_flip_decay_ts = jiffies;
4588 }
4589
4590 if (current->last_wakee != p) {
4591 current->last_wakee = p;
4592 current->wakee_flips++;
4593 }
4594}
4595
4596static void task_waking_fair(struct task_struct *p)
4597{
4598 struct sched_entity *se = &p->se;
4599 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4600 u64 min_vruntime;
4601
4602#ifndef CONFIG_64BIT
4603 u64 min_vruntime_copy;
4604
4605 do {
4606 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4607 smp_rmb();
4608 min_vruntime = cfs_rq->min_vruntime;
4609 } while (min_vruntime != min_vruntime_copy);
4610#else
4611 min_vruntime = cfs_rq->min_vruntime;
4612#endif
4613
4614 se->vruntime -= min_vruntime;
4615 record_wakee(p);
4616}
4617
4618#ifdef CONFIG_FAIR_GROUP_SCHED
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4670{
4671 struct sched_entity *se = tg->se[cpu];
4672
4673 if (!tg->parent)
4674 return wl;
4675
4676 for_each_sched_entity(se) {
4677 long w, W;
4678
4679 tg = se->my_q->tg;
4680
4681
4682
4683
4684 W = wg + calc_tg_weight(tg, se->my_q);
4685
4686
4687
4688
4689 w = se->my_q->load.weight + wl;
4690
4691
4692
4693
4694 if (W > 0 && w < W)
4695 wl = (w * (long)tg->shares) / W;
4696 else
4697 wl = tg->shares;
4698
4699
4700
4701
4702
4703
4704 if (wl < MIN_SHARES)
4705 wl = MIN_SHARES;
4706
4707
4708
4709
4710 wl -= se->load.weight;
4711
4712
4713
4714
4715
4716
4717
4718
4719 wg = 0;
4720 }
4721
4722 return wl;
4723}
4724#else
4725
4726static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4727{
4728 return wl;
4729}
4730
4731#endif
4732
4733static int wake_wide(struct task_struct *p)
4734{
4735 int factor = this_cpu_read(sd_llc_size);
4736
4737
4738
4739
4740
4741
4742 if (p->wakee_flips > factor) {
4743
4744
4745
4746
4747
4748 if (current->wakee_flips > (factor * p->wakee_flips))
4749 return 1;
4750 }
4751
4752 return 0;
4753}
4754
4755static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4756{
4757 s64 this_load, load;
4758 s64 this_eff_load, prev_eff_load;
4759 int idx, this_cpu, prev_cpu;
4760 struct task_group *tg;
4761 unsigned long weight;
4762 int balanced;
4763
4764
4765
4766
4767
4768 if (wake_wide(p))
4769 return 0;
4770
4771 idx = sd->wake_idx;
4772 this_cpu = smp_processor_id();
4773 prev_cpu = task_cpu(p);
4774 load = source_load(prev_cpu, idx);
4775 this_load = target_load(this_cpu, idx);
4776
4777
4778
4779
4780
4781
4782 if (sync) {
4783 tg = task_group(current);
4784 weight = current->se.load.weight;
4785
4786 this_load += effective_load(tg, this_cpu, -weight, -weight);
4787 load += effective_load(tg, prev_cpu, 0, -weight);
4788 }
4789
4790 tg = task_group(p);
4791 weight = p->se.load.weight;
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802 this_eff_load = 100;
4803 this_eff_load *= capacity_of(prev_cpu);
4804
4805 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4806 prev_eff_load *= capacity_of(this_cpu);
4807
4808 if (this_load > 0) {
4809 this_eff_load *= this_load +
4810 effective_load(tg, this_cpu, weight, weight);
4811
4812 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4813 }
4814
4815 balanced = this_eff_load <= prev_eff_load;
4816
4817 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4818
4819 if (!balanced)
4820 return 0;
4821
4822 schedstat_inc(sd, ttwu_move_affine);
4823 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4824
4825 return 1;
4826}
4827
4828
4829
4830
4831
4832static struct sched_group *
4833find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4834 int this_cpu, int sd_flag)
4835{
4836 struct sched_group *idlest = NULL, *group = sd->groups;
4837 unsigned long min_load = ULONG_MAX, this_load = 0;
4838 int load_idx = sd->forkexec_idx;
4839 int imbalance = 100 + (sd->imbalance_pct-100)/2;
4840
4841 if (sd_flag & SD_BALANCE_WAKE)
4842 load_idx = sd->wake_idx;
4843
4844 do {
4845 unsigned long load, avg_load;
4846 int local_group;
4847 int i;
4848
4849
4850 if (!cpumask_intersects(sched_group_cpus(group),
4851 tsk_cpus_allowed(p)))
4852 continue;
4853
4854 local_group = cpumask_test_cpu(this_cpu,
4855 sched_group_cpus(group));
4856
4857
4858 avg_load = 0;
4859
4860 for_each_cpu(i, sched_group_cpus(group)) {
4861
4862 if (local_group)
4863 load = source_load(i, load_idx);
4864 else
4865 load = target_load(i, load_idx);
4866
4867 avg_load += load;
4868 }
4869
4870
4871 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
4872
4873 if (local_group) {
4874 this_load = avg_load;
4875 } else if (avg_load < min_load) {
4876 min_load = avg_load;
4877 idlest = group;
4878 }
4879 } while (group = group->next, group != sd->groups);
4880
4881 if (!idlest || 100*this_load < imbalance*min_load)
4882 return NULL;
4883 return idlest;
4884}
4885
4886
4887
4888
4889static int
4890find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4891{
4892 unsigned long load, min_load = ULONG_MAX;
4893 unsigned int min_exit_latency = UINT_MAX;
4894 u64 latest_idle_timestamp = 0;
4895 int least_loaded_cpu = this_cpu;
4896 int shallowest_idle_cpu = -1;
4897 int i;
4898
4899
4900 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
4901 if (idle_cpu(i)) {
4902 struct rq *rq = cpu_rq(i);
4903 struct cpuidle_state *idle = idle_get_state(rq);
4904 if (idle && idle->exit_latency < min_exit_latency) {
4905
4906
4907
4908
4909
4910 min_exit_latency = idle->exit_latency;
4911 latest_idle_timestamp = rq->idle_stamp;
4912 shallowest_idle_cpu = i;
4913 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
4914 rq->idle_stamp > latest_idle_timestamp) {
4915
4916
4917
4918
4919
4920 latest_idle_timestamp = rq->idle_stamp;
4921 shallowest_idle_cpu = i;
4922 }
4923 } else if (shallowest_idle_cpu == -1) {
4924 load = weighted_cpuload(i);
4925 if (load < min_load || (load == min_load && i == this_cpu)) {
4926 min_load = load;
4927 least_loaded_cpu = i;
4928 }
4929 }
4930 }
4931
4932 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
4933}
4934
4935
4936
4937
4938static int select_idle_sibling(struct task_struct *p, int target)
4939{
4940 struct sched_domain *sd;
4941 struct sched_group *sg;
4942 int i = task_cpu(p);
4943
4944 if (idle_cpu(target))
4945 return target;
4946
4947
4948
4949
4950 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4951 return i;
4952
4953
4954
4955
4956 sd = rcu_dereference(per_cpu(sd_llc, target));
4957 for_each_lower_domain(sd) {
4958 sg = sd->groups;
4959 do {
4960 if (!cpumask_intersects(sched_group_cpus(sg),
4961 tsk_cpus_allowed(p)))
4962 goto next;
4963
4964 for_each_cpu(i, sched_group_cpus(sg)) {
4965 if (i == target || !idle_cpu(i))
4966 goto next;
4967 }
4968
4969 target = cpumask_first_and(sched_group_cpus(sg),
4970 tsk_cpus_allowed(p));
4971 goto done;
4972next:
4973 sg = sg->next;
4974 } while (sg != sd->groups);
4975 }
4976done:
4977 return target;
4978}
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996static int get_cpu_usage(int cpu)
4997{
4998 unsigned long usage = cpu_rq(cpu)->cfs.utilization_load_avg;
4999 unsigned long capacity = capacity_orig_of(cpu);
5000
5001 if (usage >= SCHED_LOAD_SCALE)
5002 return capacity;
5003
5004 return (usage * capacity) >> SCHED_LOAD_SHIFT;
5005}
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019static int
5020select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
5021{
5022 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
5023 int cpu = smp_processor_id();
5024 int new_cpu = cpu;
5025 int want_affine = 0;
5026 int sync = wake_flags & WF_SYNC;
5027
5028 if (sd_flag & SD_BALANCE_WAKE)
5029 want_affine = cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
5030
5031 rcu_read_lock();
5032 for_each_domain(cpu, tmp) {
5033 if (!(tmp->flags & SD_LOAD_BALANCE))
5034 continue;
5035
5036
5037
5038
5039
5040 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
5041 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
5042 affine_sd = tmp;
5043 break;
5044 }
5045
5046 if (tmp->flags & sd_flag)
5047 sd = tmp;
5048 }
5049
5050 if (affine_sd && cpu != prev_cpu && wake_affine(affine_sd, p, sync))
5051 prev_cpu = cpu;
5052
5053 if (sd_flag & SD_BALANCE_WAKE) {
5054 new_cpu = select_idle_sibling(p, prev_cpu);
5055 goto unlock;
5056 }
5057
5058 while (sd) {
5059 struct sched_group *group;
5060 int weight;
5061
5062 if (!(sd->flags & sd_flag)) {
5063 sd = sd->child;
5064 continue;
5065 }
5066
5067 group = find_idlest_group(sd, p, cpu, sd_flag);
5068 if (!group) {
5069 sd = sd->child;
5070 continue;
5071 }
5072
5073 new_cpu = find_idlest_cpu(group, p, cpu);
5074 if (new_cpu == -1 || new_cpu == cpu) {
5075
5076 sd = sd->child;
5077 continue;
5078 }
5079
5080
5081 cpu = new_cpu;
5082 weight = sd->span_weight;
5083 sd = NULL;
5084 for_each_domain(cpu, tmp) {
5085 if (weight <= tmp->span_weight)
5086 break;
5087 if (tmp->flags & sd_flag)
5088 sd = tmp;
5089 }
5090
5091 }
5092unlock:
5093 rcu_read_unlock();
5094
5095 return new_cpu;
5096}
5097
5098
5099
5100
5101
5102
5103
5104static void
5105migrate_task_rq_fair(struct task_struct *p, int next_cpu)
5106{
5107 struct sched_entity *se = &p->se;
5108 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5109
5110
5111
5112
5113
5114
5115
5116 if (se->avg.decay_count) {
5117 se->avg.decay_count = -__synchronize_entity_decay(se);
5118 atomic_long_add(se->avg.load_avg_contrib,
5119 &cfs_rq->removed_load);
5120 }
5121
5122
5123 se->exec_start = 0;
5124}
5125#endif
5126
5127static unsigned long
5128wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
5129{
5130 unsigned long gran = sysctl_sched_wakeup_granularity;
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145 return calc_delta_fair(gran, se);
5146}
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162static int
5163wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
5164{
5165 s64 gran, vdiff = curr->vruntime - se->vruntime;
5166
5167 if (vdiff <= 0)
5168 return -1;
5169
5170 gran = wakeup_gran(curr, se);
5171 if (vdiff > gran)
5172 return 1;
5173
5174 return 0;
5175}
5176
5177static void set_last_buddy(struct sched_entity *se)
5178{
5179 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5180 return;
5181
5182 for_each_sched_entity(se)
5183 cfs_rq_of(se)->last = se;
5184}
5185
5186static void set_next_buddy(struct sched_entity *se)
5187{
5188 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5189 return;
5190
5191 for_each_sched_entity(se)
5192 cfs_rq_of(se)->next = se;
5193}
5194
5195static void set_skip_buddy(struct sched_entity *se)
5196{
5197 for_each_sched_entity(se)
5198 cfs_rq_of(se)->skip = se;
5199}
5200
5201
5202
5203
5204static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
5205{
5206 struct task_struct *curr = rq->curr;
5207 struct sched_entity *se = &curr->se, *pse = &p->se;
5208 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5209 int scale = cfs_rq->nr_running >= sched_nr_latency;
5210 int next_buddy_marked = 0;
5211
5212 if (unlikely(se == pse))
5213 return;
5214
5215
5216
5217
5218
5219
5220
5221 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
5222 return;
5223
5224 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
5225 set_next_buddy(pse);
5226 next_buddy_marked = 1;
5227 }
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239 if (test_tsk_need_resched(curr))
5240 return;
5241
5242
5243 if (unlikely(curr->policy == SCHED_IDLE) &&
5244 likely(p->policy != SCHED_IDLE))
5245 goto preempt;
5246
5247
5248
5249
5250
5251 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
5252 return;
5253
5254 find_matching_se(&se, &pse);
5255 update_curr(cfs_rq_of(se));
5256 BUG_ON(!pse);
5257 if (wakeup_preempt_entity(se, pse) == 1) {
5258
5259
5260
5261
5262 if (!next_buddy_marked)
5263 set_next_buddy(pse);
5264 goto preempt;
5265 }
5266
5267 return;
5268
5269preempt:
5270 resched_curr(rq);
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280 if (unlikely(!se->on_rq || curr == rq->idle))
5281 return;
5282
5283 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
5284 set_last_buddy(se);
5285}
5286
5287static struct task_struct *
5288pick_next_task_fair(struct rq *rq, struct task_struct *prev)
5289{
5290 struct cfs_rq *cfs_rq = &rq->cfs;
5291 struct sched_entity *se;
5292 struct task_struct *p;
5293 int new_tasks;
5294
5295again:
5296#ifdef CONFIG_FAIR_GROUP_SCHED
5297 if (!cfs_rq->nr_running)
5298 goto idle;
5299
5300 if (prev->sched_class != &fair_sched_class)
5301 goto simple;
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311 do {
5312 struct sched_entity *curr = cfs_rq->curr;
5313
5314
5315
5316
5317
5318
5319
5320 if (curr) {
5321 if (curr->on_rq)
5322 update_curr(cfs_rq);
5323 else
5324 curr = NULL;
5325
5326
5327
5328
5329
5330
5331
5332 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5333 goto simple;
5334 }
5335
5336 se = pick_next_entity(cfs_rq, curr);
5337 cfs_rq = group_cfs_rq(se);
5338 } while (cfs_rq);
5339
5340 p = task_of(se);
5341
5342
5343
5344
5345
5346
5347 if (prev != p) {
5348 struct sched_entity *pse = &prev->se;
5349
5350 while (!(cfs_rq = is_same_group(se, pse))) {
5351 int se_depth = se->depth;
5352 int pse_depth = pse->depth;
5353
5354 if (se_depth <= pse_depth) {
5355 put_prev_entity(cfs_rq_of(pse), pse);
5356 pse = parent_entity(pse);
5357 }
5358 if (se_depth >= pse_depth) {
5359 set_next_entity(cfs_rq_of(se), se);
5360 se = parent_entity(se);
5361 }
5362 }
5363
5364 put_prev_entity(cfs_rq, pse);
5365 set_next_entity(cfs_rq, se);
5366 }
5367
5368 if (hrtick_enabled(rq))
5369 hrtick_start_fair(rq, p);
5370
5371 return p;
5372simple:
5373 cfs_rq = &rq->cfs;
5374#endif
5375
5376 if (!cfs_rq->nr_running)
5377 goto idle;
5378
5379 put_prev_task(rq, prev);
5380
5381 do {
5382 se = pick_next_entity(cfs_rq, NULL);
5383 set_next_entity(cfs_rq, se);
5384 cfs_rq = group_cfs_rq(se);
5385 } while (cfs_rq);
5386
5387 p = task_of(se);
5388
5389 if (hrtick_enabled(rq))
5390 hrtick_start_fair(rq, p);
5391
5392 return p;
5393
5394idle:
5395
5396
5397
5398
5399
5400
5401 lockdep_unpin_lock(&rq->lock);
5402 new_tasks = idle_balance(rq);
5403 lockdep_pin_lock(&rq->lock);
5404
5405
5406
5407
5408
5409 if (new_tasks < 0)
5410 return RETRY_TASK;
5411
5412 if (new_tasks > 0)
5413 goto again;
5414
5415 return NULL;
5416}
5417
5418
5419
5420
5421static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
5422{
5423 struct sched_entity *se = &prev->se;
5424 struct cfs_rq *cfs_rq;
5425
5426 for_each_sched_entity(se) {
5427 cfs_rq = cfs_rq_of(se);
5428 put_prev_entity(cfs_rq, se);
5429 }
5430}
5431
5432
5433
5434
5435
5436
5437static void yield_task_fair(struct rq *rq)
5438{
5439 struct task_struct *curr = rq->curr;
5440 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5441 struct sched_entity *se = &curr->se;
5442
5443
5444
5445
5446 if (unlikely(rq->nr_running == 1))
5447 return;
5448
5449 clear_buddies(cfs_rq, se);
5450
5451 if (curr->policy != SCHED_BATCH) {
5452 update_rq_clock(rq);
5453
5454
5455
5456 update_curr(cfs_rq);
5457
5458
5459
5460
5461
5462 rq_clock_skip_update(rq, true);
5463 }
5464
5465 set_skip_buddy(se);
5466}
5467
5468static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
5469{
5470 struct sched_entity *se = &p->se;
5471
5472
5473 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
5474 return false;
5475
5476
5477 set_next_buddy(se);
5478
5479 yield_task_fair(rq);
5480
5481 return true;
5482}
5483
5484#ifdef CONFIG_SMP
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603static unsigned long __read_mostly max_load_balance_interval = HZ/10;
5604
5605enum fbq_type { regular, remote, all };
5606
5607#define LBF_ALL_PINNED 0x01
5608#define LBF_NEED_BREAK 0x02
5609#define LBF_DST_PINNED 0x04
5610#define LBF_SOME_PINNED 0x08
5611
5612struct lb_env {
5613 struct sched_domain *sd;
5614
5615 struct rq *src_rq;
5616 int src_cpu;
5617
5618 int dst_cpu;
5619 struct rq *dst_rq;
5620
5621 struct cpumask *dst_grpmask;
5622 int new_dst_cpu;
5623 enum cpu_idle_type idle;
5624 long imbalance;
5625
5626 struct cpumask *cpus;
5627
5628 unsigned int flags;
5629
5630 unsigned int loop;
5631 unsigned int loop_break;
5632 unsigned int loop_max;
5633
5634 enum fbq_type fbq_type;
5635 struct list_head tasks;
5636};
5637
5638
5639
5640
5641static int task_hot(struct task_struct *p, struct lb_env *env)
5642{
5643 s64 delta;
5644
5645 lockdep_assert_held(&env->src_rq->lock);
5646
5647 if (p->sched_class != &fair_sched_class)
5648 return 0;
5649
5650 if (unlikely(p->policy == SCHED_IDLE))
5651 return 0;
5652
5653
5654
5655
5656 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
5657 (&p->se == cfs_rq_of(&p->se)->next ||
5658 &p->se == cfs_rq_of(&p->se)->last))
5659 return 1;
5660
5661 if (sysctl_sched_migration_cost == -1)
5662 return 1;
5663 if (sysctl_sched_migration_cost == 0)
5664 return 0;
5665
5666 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
5667
5668 return delta < (s64)sysctl_sched_migration_cost;
5669}
5670
5671#ifdef CONFIG_NUMA_BALANCING
5672
5673
5674
5675
5676
5677static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
5678{
5679 struct numa_group *numa_group = rcu_dereference(p->numa_group);
5680 unsigned long src_faults, dst_faults;
5681 int src_nid, dst_nid;
5682
5683 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
5684 !(env->sd->flags & SD_NUMA)) {
5685 return false;
5686 }
5687
5688 src_nid = cpu_to_node(env->src_cpu);
5689 dst_nid = cpu_to_node(env->dst_cpu);
5690
5691 if (src_nid == dst_nid)
5692 return false;
5693
5694
5695 if (dst_nid == p->numa_preferred_nid)
5696 return true;
5697
5698
5699 if (src_nid == p->numa_preferred_nid)
5700 return false;
5701
5702 if (numa_group) {
5703 src_faults = group_faults(p, src_nid);
5704 dst_faults = group_faults(p, dst_nid);
5705 } else {
5706 src_faults = task_faults(p, src_nid);
5707 dst_faults = task_faults(p, dst_nid);
5708 }
5709
5710 return dst_faults > src_faults;
5711}
5712
5713
5714static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5715{
5716 struct numa_group *numa_group = rcu_dereference(p->numa_group);
5717 unsigned long src_faults, dst_faults;
5718 int src_nid, dst_nid;
5719
5720 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
5721 return false;
5722
5723 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
5724 return false;
5725
5726 src_nid = cpu_to_node(env->src_cpu);
5727 dst_nid = cpu_to_node(env->dst_cpu);
5728
5729 if (src_nid == dst_nid)
5730 return false;
5731
5732
5733 if (src_nid == p->numa_preferred_nid)
5734 return true;
5735
5736
5737 if (dst_nid == p->numa_preferred_nid)
5738 return false;
5739
5740 if (numa_group) {
5741 src_faults = group_faults(p, src_nid);
5742 dst_faults = group_faults(p, dst_nid);
5743 } else {
5744 src_faults = task_faults(p, src_nid);
5745 dst_faults = task_faults(p, dst_nid);
5746 }
5747
5748 return dst_faults < src_faults;
5749}
5750
5751#else
5752static inline bool migrate_improves_locality(struct task_struct *p,
5753 struct lb_env *env)
5754{
5755 return false;
5756}
5757
5758static inline bool migrate_degrades_locality(struct task_struct *p,
5759 struct lb_env *env)
5760{
5761 return false;
5762}
5763#endif
5764
5765
5766
5767
5768static
5769int can_migrate_task(struct task_struct *p, struct lb_env *env)
5770{
5771 int tsk_cache_hot = 0;
5772
5773 lockdep_assert_held(&env->src_rq->lock);
5774
5775
5776
5777
5778
5779
5780
5781
5782 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5783 return 0;
5784
5785 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
5786 int cpu;
5787
5788 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
5789
5790 env->flags |= LBF_SOME_PINNED;
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
5801 return 0;
5802
5803
5804 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5805 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
5806 env->flags |= LBF_DST_PINNED;
5807 env->new_dst_cpu = cpu;
5808 break;
5809 }
5810 }
5811
5812 return 0;
5813 }
5814
5815
5816 env->flags &= ~LBF_ALL_PINNED;
5817
5818 if (task_running(env->src_rq, p)) {
5819 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
5820 return 0;
5821 }
5822
5823
5824
5825
5826
5827
5828
5829 tsk_cache_hot = task_hot(p, env);
5830 if (!tsk_cache_hot)
5831 tsk_cache_hot = migrate_degrades_locality(p, env);
5832
5833 if (migrate_improves_locality(p, env) || !tsk_cache_hot ||
5834 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
5835 if (tsk_cache_hot) {
5836 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5837 schedstat_inc(p, se.statistics.nr_forced_migrations);
5838 }
5839 return 1;
5840 }
5841
5842 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5843 return 0;
5844}
5845
5846
5847
5848
5849static void detach_task(struct task_struct *p, struct lb_env *env)
5850{
5851 lockdep_assert_held(&env->src_rq->lock);
5852
5853 deactivate_task(env->src_rq, p, 0);
5854 p->on_rq = TASK_ON_RQ_MIGRATING;
5855 set_task_cpu(p, env->dst_cpu);
5856}
5857
5858
5859
5860
5861
5862
5863
5864static struct task_struct *detach_one_task(struct lb_env *env)
5865{
5866 struct task_struct *p, *n;
5867
5868 lockdep_assert_held(&env->src_rq->lock);
5869
5870 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
5871 if (!can_migrate_task(p, env))
5872 continue;
5873
5874 detach_task(p, env);
5875
5876
5877
5878
5879
5880
5881
5882 schedstat_inc(env->sd, lb_gained[env->idle]);
5883 return p;
5884 }
5885 return NULL;
5886}
5887
5888static const unsigned int sched_nr_migrate_break = 32;
5889
5890
5891
5892
5893
5894
5895
5896static int detach_tasks(struct lb_env *env)
5897{
5898 struct list_head *tasks = &env->src_rq->cfs_tasks;
5899 struct task_struct *p;
5900 unsigned long load;
5901 int detached = 0;
5902
5903 lockdep_assert_held(&env->src_rq->lock);
5904
5905 if (env->imbalance <= 0)
5906 return 0;
5907
5908 while (!list_empty(tasks)) {
5909 p = list_first_entry(tasks, struct task_struct, se.group_node);
5910
5911 env->loop++;
5912
5913 if (env->loop > env->loop_max)
5914 break;
5915
5916
5917 if (env->loop > env->loop_break) {
5918 env->loop_break += sched_nr_migrate_break;
5919 env->flags |= LBF_NEED_BREAK;
5920 break;
5921 }
5922
5923 if (!can_migrate_task(p, env))
5924 goto next;
5925
5926 load = task_h_load(p);
5927
5928 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
5929 goto next;
5930
5931 if ((load / 2) > env->imbalance)
5932 goto next;
5933
5934 detach_task(p, env);
5935 list_add(&p->se.group_node, &env->tasks);
5936
5937 detached++;
5938 env->imbalance -= load;
5939
5940#ifdef CONFIG_PREEMPT
5941
5942
5943
5944
5945
5946 if (env->idle == CPU_NEWLY_IDLE)
5947 break;
5948#endif
5949
5950
5951
5952
5953
5954 if (env->imbalance <= 0)
5955 break;
5956
5957 continue;
5958next:
5959 list_move_tail(&p->se.group_node, tasks);
5960 }
5961
5962
5963
5964
5965
5966
5967 schedstat_add(env->sd, lb_gained[env->idle], detached);
5968
5969 return detached;
5970}
5971
5972
5973
5974
5975static void attach_task(struct rq *rq, struct task_struct *p)
5976{
5977 lockdep_assert_held(&rq->lock);
5978
5979 BUG_ON(task_rq(p) != rq);
5980 p->on_rq = TASK_ON_RQ_QUEUED;
5981 activate_task(rq, p, 0);
5982 check_preempt_curr(rq, p, 0);
5983}
5984
5985
5986
5987
5988
5989static void attach_one_task(struct rq *rq, struct task_struct *p)
5990{
5991 raw_spin_lock(&rq->lock);
5992 attach_task(rq, p);
5993 raw_spin_unlock(&rq->lock);
5994}
5995
5996
5997
5998
5999
6000static void attach_tasks(struct lb_env *env)
6001{
6002 struct list_head *tasks = &env->tasks;
6003 struct task_struct *p;
6004
6005 raw_spin_lock(&env->dst_rq->lock);
6006
6007 while (!list_empty(tasks)) {
6008 p = list_first_entry(tasks, struct task_struct, se.group_node);
6009 list_del_init(&p->se.group_node);
6010
6011 attach_task(env->dst_rq, p);
6012 }
6013
6014 raw_spin_unlock(&env->dst_rq->lock);
6015}
6016
6017#ifdef CONFIG_FAIR_GROUP_SCHED
6018
6019
6020
6021static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
6022{
6023 struct sched_entity *se = tg->se[cpu];
6024 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
6025
6026
6027 if (throttled_hierarchy(cfs_rq))
6028 return;
6029
6030 update_cfs_rq_blocked_load(cfs_rq, 1);
6031
6032 if (se) {
6033 update_entity_load_avg(se, 1);
6034
6035
6036
6037
6038
6039
6040
6041
6042
6043 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
6044 list_del_leaf_cfs_rq(cfs_rq);
6045 } else {
6046 struct rq *rq = rq_of(cfs_rq);
6047 update_rq_runnable_avg(rq, rq->nr_running);
6048 }
6049}
6050
6051static void update_blocked_averages(int cpu)
6052{
6053 struct rq *rq = cpu_rq(cpu);
6054 struct cfs_rq *cfs_rq;
6055 unsigned long flags;
6056
6057 raw_spin_lock_irqsave(&rq->lock, flags);
6058 update_rq_clock(rq);
6059
6060
6061
6062
6063 for_each_leaf_cfs_rq(rq, cfs_rq) {
6064
6065
6066
6067
6068
6069 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
6070 }
6071
6072 raw_spin_unlock_irqrestore(&rq->lock, flags);
6073}
6074
6075
6076
6077
6078
6079
6080static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
6081{
6082 struct rq *rq = rq_of(cfs_rq);
6083 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
6084 unsigned long now = jiffies;
6085 unsigned long load;
6086
6087 if (cfs_rq->last_h_load_update == now)
6088 return;
6089
6090 cfs_rq->h_load_next = NULL;
6091 for_each_sched_entity(se) {
6092 cfs_rq = cfs_rq_of(se);
6093 cfs_rq->h_load_next = se;
6094 if (cfs_rq->last_h_load_update == now)
6095 break;
6096 }
6097
6098 if (!se) {
6099 cfs_rq->h_load = cfs_rq->runnable_load_avg;
6100 cfs_rq->last_h_load_update = now;
6101 }
6102
6103 while ((se = cfs_rq->h_load_next) != NULL) {
6104 load = cfs_rq->h_load;
6105 load = div64_ul(load * se->avg.load_avg_contrib,
6106 cfs_rq->runnable_load_avg + 1);
6107 cfs_rq = group_cfs_rq(se);
6108 cfs_rq->h_load = load;
6109 cfs_rq->last_h_load_update = now;
6110 }
6111}
6112
6113static unsigned long task_h_load(struct task_struct *p)
6114{
6115 struct cfs_rq *cfs_rq = task_cfs_rq(p);
6116
6117 update_cfs_rq_h_load(cfs_rq);
6118 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
6119 cfs_rq->runnable_load_avg + 1);
6120}
6121#else
6122static inline void update_blocked_averages(int cpu)
6123{
6124}
6125
6126static unsigned long task_h_load(struct task_struct *p)
6127{
6128 return p->se.avg.load_avg_contrib;
6129}
6130#endif
6131
6132
6133
6134enum group_type {
6135 group_other = 0,
6136 group_imbalanced,
6137 group_overloaded,
6138};
6139
6140
6141
6142
6143struct sg_lb_stats {
6144 unsigned long avg_load;
6145 unsigned long group_load;
6146 unsigned long sum_weighted_load;
6147 unsigned long load_per_task;
6148 unsigned long group_capacity;
6149 unsigned long group_usage;
6150 unsigned int sum_nr_running;
6151 unsigned int idle_cpus;
6152 unsigned int group_weight;
6153 enum group_type group_type;
6154 int group_no_capacity;
6155#ifdef CONFIG_NUMA_BALANCING
6156 unsigned int nr_numa_running;
6157 unsigned int nr_preferred_running;
6158#endif
6159};
6160
6161
6162
6163
6164
6165struct sd_lb_stats {
6166 struct sched_group *busiest;
6167 struct sched_group *local;
6168 unsigned long total_load;
6169 unsigned long total_capacity;
6170 unsigned long avg_load;
6171
6172 struct sg_lb_stats busiest_stat;
6173 struct sg_lb_stats local_stat;
6174};
6175
6176static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
6177{
6178
6179
6180
6181
6182
6183
6184 *sds = (struct sd_lb_stats){
6185 .busiest = NULL,
6186 .local = NULL,
6187 .total_load = 0UL,
6188 .total_capacity = 0UL,
6189 .busiest_stat = {
6190 .avg_load = 0UL,
6191 .sum_nr_running = 0,
6192 .group_type = group_other,
6193 },
6194 };
6195}
6196
6197
6198
6199
6200
6201
6202
6203
6204static inline int get_sd_load_idx(struct sched_domain *sd,
6205 enum cpu_idle_type idle)
6206{
6207 int load_idx;
6208
6209 switch (idle) {
6210 case CPU_NOT_IDLE:
6211 load_idx = sd->busy_idx;
6212 break;
6213
6214 case CPU_NEWLY_IDLE:
6215 load_idx = sd->newidle_idx;
6216 break;
6217 default:
6218 load_idx = sd->idle_idx;
6219 break;
6220 }
6221
6222 return load_idx;
6223}
6224
6225static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
6226{
6227 if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
6228 return sd->smt_gain / sd->span_weight;
6229
6230 return SCHED_CAPACITY_SCALE;
6231}
6232
6233unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
6234{
6235 return default_scale_cpu_capacity(sd, cpu);
6236}
6237
6238static unsigned long scale_rt_capacity(int cpu)
6239{
6240 struct rq *rq = cpu_rq(cpu);
6241 u64 total, used, age_stamp, avg;
6242 s64 delta;
6243
6244
6245
6246
6247
6248 age_stamp = READ_ONCE(rq->age_stamp);
6249 avg = READ_ONCE(rq->rt_avg);
6250 delta = __rq_clock_broken(rq) - age_stamp;
6251
6252 if (unlikely(delta < 0))
6253 delta = 0;
6254
6255 total = sched_avg_period() + delta;
6256
6257 used = div_u64(avg, total);
6258
6259 if (likely(used < SCHED_CAPACITY_SCALE))
6260 return SCHED_CAPACITY_SCALE - used;
6261
6262 return 1;
6263}
6264
6265static void update_cpu_capacity(struct sched_domain *sd, int cpu)
6266{
6267 unsigned long capacity = SCHED_CAPACITY_SCALE;
6268 struct sched_group *sdg = sd->groups;
6269
6270 if (sched_feat(ARCH_CAPACITY))
6271 capacity *= arch_scale_cpu_capacity(sd, cpu);
6272 else
6273 capacity *= default_scale_cpu_capacity(sd, cpu);
6274
6275 capacity >>= SCHED_CAPACITY_SHIFT;
6276
6277 cpu_rq(cpu)->cpu_capacity_orig = capacity;
6278
6279 capacity *= scale_rt_capacity(cpu);
6280 capacity >>= SCHED_CAPACITY_SHIFT;
6281
6282 if (!capacity)
6283 capacity = 1;
6284
6285 cpu_rq(cpu)->cpu_capacity = capacity;
6286 sdg->sgc->capacity = capacity;
6287}
6288
6289void update_group_capacity(struct sched_domain *sd, int cpu)
6290{
6291 struct sched_domain *child = sd->child;
6292 struct sched_group *group, *sdg = sd->groups;
6293 unsigned long capacity;
6294 unsigned long interval;
6295
6296 interval = msecs_to_jiffies(sd->balance_interval);
6297 interval = clamp(interval, 1UL, max_load_balance_interval);
6298 sdg->sgc->next_update = jiffies + interval;
6299
6300 if (!child) {
6301 update_cpu_capacity(sd, cpu);
6302 return;
6303 }
6304
6305 capacity = 0;
6306
6307 if (child->flags & SD_OVERLAP) {
6308
6309
6310
6311
6312
6313 for_each_cpu(cpu, sched_group_cpus(sdg)) {
6314 struct sched_group_capacity *sgc;
6315 struct rq *rq = cpu_rq(cpu);
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328 if (unlikely(!rq->sd)) {
6329 capacity += capacity_of(cpu);
6330 continue;
6331 }
6332
6333 sgc = rq->sd->groups->sgc;
6334 capacity += sgc->capacity;
6335 }
6336 } else {
6337
6338
6339
6340
6341
6342 group = child->groups;
6343 do {
6344 capacity += group->sgc->capacity;
6345 group = group->next;
6346 } while (group != child->groups);
6347 }
6348
6349 sdg->sgc->capacity = capacity;
6350}
6351
6352
6353
6354
6355
6356
6357static inline int
6358check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
6359{
6360 return ((rq->cpu_capacity * sd->imbalance_pct) <
6361 (rq->cpu_capacity_orig * 100));
6362}
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383
6384
6385
6386
6387
6388
6389
6390
6391
6392
6393static inline int sg_imbalanced(struct sched_group *group)
6394{
6395 return group->sgc->imbalance;
6396}
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406
6407
6408
6409
6410static inline bool
6411group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
6412{
6413 if (sgs->sum_nr_running < sgs->group_weight)
6414 return true;
6415
6416 if ((sgs->group_capacity * 100) >
6417 (sgs->group_usage * env->sd->imbalance_pct))
6418 return true;
6419
6420 return false;
6421}
6422
6423
6424
6425
6426
6427
6428
6429
6430
6431static inline bool
6432group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
6433{
6434 if (sgs->sum_nr_running <= sgs->group_weight)
6435 return false;
6436
6437 if ((sgs->group_capacity * 100) <
6438 (sgs->group_usage * env->sd->imbalance_pct))
6439 return true;
6440
6441 return false;
6442}
6443
6444static enum group_type group_classify(struct lb_env *env,
6445 struct sched_group *group,
6446 struct sg_lb_stats *sgs)
6447{
6448 if (sgs->group_no_capacity)
6449 return group_overloaded;
6450
6451 if (sg_imbalanced(group))
6452 return group_imbalanced;
6453
6454 return group_other;
6455}
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466static inline void update_sg_lb_stats(struct lb_env *env,
6467 struct sched_group *group, int load_idx,
6468 int local_group, struct sg_lb_stats *sgs,
6469 bool *overload)
6470{
6471 unsigned long load;
6472 int i;
6473
6474 memset(sgs, 0, sizeof(*sgs));
6475
6476 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6477 struct rq *rq = cpu_rq(i);
6478
6479
6480 if (local_group)
6481 load = target_load(i, load_idx);
6482 else
6483 load = source_load(i, load_idx);
6484
6485 sgs->group_load += load;
6486 sgs->group_usage += get_cpu_usage(i);
6487 sgs->sum_nr_running += rq->cfs.h_nr_running;
6488
6489 if (rq->nr_running > 1)
6490 *overload = true;
6491
6492#ifdef CONFIG_NUMA_BALANCING
6493 sgs->nr_numa_running += rq->nr_numa_running;
6494 sgs->nr_preferred_running += rq->nr_preferred_running;
6495#endif
6496 sgs->sum_weighted_load += weighted_cpuload(i);
6497 if (idle_cpu(i))
6498 sgs->idle_cpus++;
6499 }
6500
6501
6502 sgs->group_capacity = group->sgc->capacity;
6503 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
6504
6505 if (sgs->sum_nr_running)
6506 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
6507
6508 sgs->group_weight = group->group_weight;
6509
6510 sgs->group_no_capacity = group_is_overloaded(env, sgs);
6511 sgs->group_type = group_classify(env, group, sgs);
6512}
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527static bool update_sd_pick_busiest(struct lb_env *env,
6528 struct sd_lb_stats *sds,
6529 struct sched_group *sg,
6530 struct sg_lb_stats *sgs)
6531{
6532 struct sg_lb_stats *busiest = &sds->busiest_stat;
6533
6534 if (sgs->group_type > busiest->group_type)
6535 return true;
6536
6537 if (sgs->group_type < busiest->group_type)
6538 return false;
6539
6540 if (sgs->avg_load <= busiest->avg_load)
6541 return false;
6542
6543
6544 if (!(env->sd->flags & SD_ASYM_PACKING))
6545 return true;
6546
6547
6548
6549
6550
6551
6552 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
6553 if (!sds->busiest)
6554 return true;
6555
6556 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
6557 return true;
6558 }
6559
6560 return false;
6561}
6562
6563#ifdef CONFIG_NUMA_BALANCING
6564static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6565{
6566 if (sgs->sum_nr_running > sgs->nr_numa_running)
6567 return regular;
6568 if (sgs->sum_nr_running > sgs->nr_preferred_running)
6569 return remote;
6570 return all;
6571}
6572
6573static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6574{
6575 if (rq->nr_running > rq->nr_numa_running)
6576 return regular;
6577 if (rq->nr_running > rq->nr_preferred_running)
6578 return remote;
6579 return all;
6580}
6581#else
6582static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6583{
6584 return all;
6585}
6586
6587static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6588{
6589 return regular;
6590}
6591#endif
6592
6593
6594
6595
6596
6597
6598static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
6599{
6600 struct sched_domain *child = env->sd->child;
6601 struct sched_group *sg = env->sd->groups;
6602 struct sg_lb_stats tmp_sgs;
6603 int load_idx, prefer_sibling = 0;
6604 bool overload = false;
6605
6606 if (child && child->flags & SD_PREFER_SIBLING)
6607 prefer_sibling = 1;
6608
6609 load_idx = get_sd_load_idx(env->sd, env->idle);
6610
6611 do {
6612 struct sg_lb_stats *sgs = &tmp_sgs;
6613 int local_group;
6614
6615 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
6616 if (local_group) {
6617 sds->local = sg;
6618 sgs = &sds->local_stat;
6619
6620 if (env->idle != CPU_NEWLY_IDLE ||
6621 time_after_eq(jiffies, sg->sgc->next_update))
6622 update_group_capacity(env->sd, env->dst_cpu);
6623 }
6624
6625 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
6626 &overload);
6627
6628 if (local_group)
6629 goto next_group;
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641 if (prefer_sibling && sds->local &&
6642 group_has_capacity(env, &sds->local_stat) &&
6643 (sgs->sum_nr_running > 1)) {
6644 sgs->group_no_capacity = 1;
6645 sgs->group_type = group_overloaded;
6646 }
6647
6648 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
6649 sds->busiest = sg;
6650 sds->busiest_stat = *sgs;
6651 }
6652
6653next_group:
6654
6655 sds->total_load += sgs->group_load;
6656 sds->total_capacity += sgs->group_capacity;
6657
6658 sg = sg->next;
6659 } while (sg != env->sd->groups);
6660
6661 if (env->sd->flags & SD_NUMA)
6662 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
6663
6664 if (!env->sd->parent) {
6665
6666 if (env->dst_rq->rd->overload != overload)
6667 env->dst_rq->rd->overload = overload;
6668 }
6669
6670}
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
6696{
6697 int busiest_cpu;
6698
6699 if (!(env->sd->flags & SD_ASYM_PACKING))
6700 return 0;
6701
6702 if (!sds->busiest)
6703 return 0;
6704
6705 busiest_cpu = group_first_cpu(sds->busiest);
6706 if (env->dst_cpu > busiest_cpu)
6707 return 0;
6708
6709 env->imbalance = DIV_ROUND_CLOSEST(
6710 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
6711 SCHED_CAPACITY_SCALE);
6712
6713 return 1;
6714}
6715
6716
6717
6718
6719
6720
6721
6722
6723static inline
6724void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6725{
6726 unsigned long tmp, capa_now = 0, capa_move = 0;
6727 unsigned int imbn = 2;
6728 unsigned long scaled_busy_load_per_task;
6729 struct sg_lb_stats *local, *busiest;
6730
6731 local = &sds->local_stat;
6732 busiest = &sds->busiest_stat;
6733
6734 if (!local->sum_nr_running)
6735 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6736 else if (busiest->load_per_task > local->load_per_task)
6737 imbn = 1;
6738
6739 scaled_busy_load_per_task =
6740 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6741 busiest->group_capacity;
6742
6743 if (busiest->avg_load + scaled_busy_load_per_task >=
6744 local->avg_load + (scaled_busy_load_per_task * imbn)) {
6745 env->imbalance = busiest->load_per_task;
6746 return;
6747 }
6748
6749
6750
6751
6752
6753
6754
6755 capa_now += busiest->group_capacity *
6756 min(busiest->load_per_task, busiest->avg_load);
6757 capa_now += local->group_capacity *
6758 min(local->load_per_task, local->avg_load);
6759 capa_now /= SCHED_CAPACITY_SCALE;
6760
6761
6762 if (busiest->avg_load > scaled_busy_load_per_task) {
6763 capa_move += busiest->group_capacity *
6764 min(busiest->load_per_task,
6765 busiest->avg_load - scaled_busy_load_per_task);
6766 }
6767
6768
6769 if (busiest->avg_load * busiest->group_capacity <
6770 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
6771 tmp = (busiest->avg_load * busiest->group_capacity) /
6772 local->group_capacity;
6773 } else {
6774 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6775 local->group_capacity;
6776 }
6777 capa_move += local->group_capacity *
6778 min(local->load_per_task, local->avg_load + tmp);
6779 capa_move /= SCHED_CAPACITY_SCALE;
6780
6781
6782 if (capa_move > capa_now)
6783 env->imbalance = busiest->load_per_task;
6784}
6785
6786
6787
6788
6789
6790
6791
6792static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6793{
6794 unsigned long max_pull, load_above_capacity = ~0UL;
6795 struct sg_lb_stats *local, *busiest;
6796
6797 local = &sds->local_stat;
6798 busiest = &sds->busiest_stat;
6799
6800 if (busiest->group_type == group_imbalanced) {
6801
6802
6803
6804
6805 busiest->load_per_task =
6806 min(busiest->load_per_task, sds->avg_load);
6807 }
6808
6809
6810
6811
6812
6813
6814 if (busiest->avg_load <= sds->avg_load ||
6815 local->avg_load >= sds->avg_load) {
6816 env->imbalance = 0;
6817 return fix_small_imbalance(env, sds);
6818 }
6819
6820
6821
6822
6823 if (busiest->group_type == group_overloaded &&
6824 local->group_type == group_overloaded) {
6825 load_above_capacity = busiest->sum_nr_running *
6826 SCHED_LOAD_SCALE;
6827 if (load_above_capacity > busiest->group_capacity)
6828 load_above_capacity -= busiest->group_capacity;
6829 else
6830 load_above_capacity = ~0UL;
6831 }
6832
6833
6834
6835
6836
6837
6838
6839
6840
6841 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
6842
6843
6844 env->imbalance = min(
6845 max_pull * busiest->group_capacity,
6846 (sds->avg_load - local->avg_load) * local->group_capacity
6847 ) / SCHED_CAPACITY_SCALE;
6848
6849
6850
6851
6852
6853
6854
6855 if (env->imbalance < busiest->load_per_task)
6856 return fix_small_imbalance(env, sds);
6857}
6858
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877
6878static struct sched_group *find_busiest_group(struct lb_env *env)
6879{
6880 struct sg_lb_stats *local, *busiest;
6881 struct sd_lb_stats sds;
6882
6883 init_sd_lb_stats(&sds);
6884
6885
6886
6887
6888
6889 update_sd_lb_stats(env, &sds);
6890 local = &sds.local_stat;
6891 busiest = &sds.busiest_stat;
6892
6893
6894 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6895 check_asym_packing(env, &sds))
6896 return sds.busiest;
6897
6898
6899 if (!sds.busiest || busiest->sum_nr_running == 0)
6900 goto out_balanced;
6901
6902 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
6903 / sds.total_capacity;
6904
6905
6906
6907
6908
6909
6910 if (busiest->group_type == group_imbalanced)
6911 goto force_balance;
6912
6913
6914 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
6915 busiest->group_no_capacity)
6916 goto force_balance;
6917
6918
6919
6920
6921
6922 if (local->avg_load >= busiest->avg_load)
6923 goto out_balanced;
6924
6925
6926
6927
6928
6929 if (local->avg_load >= sds.avg_load)
6930 goto out_balanced;
6931
6932 if (env->idle == CPU_IDLE) {
6933
6934
6935
6936
6937
6938
6939
6940 if ((busiest->group_type != group_overloaded) &&
6941 (local->idle_cpus <= (busiest->idle_cpus + 1)))
6942 goto out_balanced;
6943 } else {
6944
6945
6946
6947
6948 if (100 * busiest->avg_load <=
6949 env->sd->imbalance_pct * local->avg_load)
6950 goto out_balanced;
6951 }
6952
6953force_balance:
6954
6955 calculate_imbalance(env, &sds);
6956 return sds.busiest;
6957
6958out_balanced:
6959 env->imbalance = 0;
6960 return NULL;
6961}
6962
6963
6964
6965
6966static struct rq *find_busiest_queue(struct lb_env *env,
6967 struct sched_group *group)
6968{
6969 struct rq *busiest = NULL, *rq;
6970 unsigned long busiest_load = 0, busiest_capacity = 1;
6971 int i;
6972
6973 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6974 unsigned long capacity, wl;
6975 enum fbq_type rt;
6976
6977 rq = cpu_rq(i);
6978 rt = fbq_classify_rq(rq);
6979
6980
6981
6982
6983
6984
6985
6986
6987
6988
6989
6990
6991
6992
6993
6994
6995
6996
6997
6998
6999 if (rt > env->fbq_type)
7000 continue;
7001
7002 capacity = capacity_of(i);
7003
7004 wl = weighted_cpuload(i);
7005
7006
7007
7008
7009
7010
7011 if (rq->nr_running == 1 && wl > env->imbalance &&
7012 !check_cpu_capacity(rq, env->sd))
7013 continue;
7014
7015
7016
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026 if (wl * busiest_capacity > busiest_load * capacity) {
7027 busiest_load = wl;
7028 busiest_capacity = capacity;
7029 busiest = rq;
7030 }
7031 }
7032
7033 return busiest;
7034}
7035
7036
7037
7038
7039
7040#define MAX_PINNED_INTERVAL 512
7041
7042
7043DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
7044
7045static int need_active_balance(struct lb_env *env)
7046{
7047 struct sched_domain *sd = env->sd;
7048
7049 if (env->idle == CPU_NEWLY_IDLE) {
7050
7051
7052
7053
7054
7055
7056 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
7057 return 1;
7058 }
7059
7060
7061
7062
7063
7064
7065
7066 if ((env->idle != CPU_NOT_IDLE) &&
7067 (env->src_rq->cfs.h_nr_running == 1)) {
7068 if ((check_cpu_capacity(env->src_rq, sd)) &&
7069 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
7070 return 1;
7071 }
7072
7073 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
7074}
7075
7076static int active_load_balance_cpu_stop(void *data);
7077
7078static int should_we_balance(struct lb_env *env)
7079{
7080 struct sched_group *sg = env->sd->groups;
7081 struct cpumask *sg_cpus, *sg_mask;
7082 int cpu, balance_cpu = -1;
7083
7084
7085
7086
7087
7088 if (env->idle == CPU_NEWLY_IDLE)
7089 return 1;
7090
7091 sg_cpus = sched_group_cpus(sg);
7092 sg_mask = sched_group_mask(sg);
7093
7094 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
7095 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
7096 continue;
7097
7098 balance_cpu = cpu;
7099 break;
7100 }
7101
7102 if (balance_cpu == -1)
7103 balance_cpu = group_balance_cpu(sg);
7104
7105
7106
7107
7108
7109 return balance_cpu == env->dst_cpu;
7110}
7111
7112
7113
7114
7115
7116static int load_balance(int this_cpu, struct rq *this_rq,
7117 struct sched_domain *sd, enum cpu_idle_type idle,
7118 int *continue_balancing)
7119{
7120 int ld_moved, cur_ld_moved, active_balance = 0;
7121 struct sched_domain *sd_parent = sd->parent;
7122 struct sched_group *group;
7123 struct rq *busiest;
7124 unsigned long flags;
7125 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
7126
7127 struct lb_env env = {
7128 .sd = sd,
7129 .dst_cpu = this_cpu,
7130 .dst_rq = this_rq,
7131 .dst_grpmask = sched_group_cpus(sd->groups),
7132 .idle = idle,
7133 .loop_break = sched_nr_migrate_break,
7134 .cpus = cpus,
7135 .fbq_type = all,
7136 .tasks = LIST_HEAD_INIT(env.tasks),
7137 };
7138
7139
7140
7141
7142
7143 if (idle == CPU_NEWLY_IDLE)
7144 env.dst_grpmask = NULL;
7145
7146 cpumask_copy(cpus, cpu_active_mask);
7147
7148 schedstat_inc(sd, lb_count[idle]);
7149
7150redo:
7151 if (!should_we_balance(&env)) {
7152 *continue_balancing = 0;
7153 goto out_balanced;
7154 }
7155
7156 group = find_busiest_group(&env);
7157 if (!group) {
7158 schedstat_inc(sd, lb_nobusyg[idle]);
7159 goto out_balanced;
7160 }
7161
7162 busiest = find_busiest_queue(&env, group);
7163 if (!busiest) {
7164 schedstat_inc(sd, lb_nobusyq[idle]);
7165 goto out_balanced;
7166 }
7167
7168 BUG_ON(busiest == env.dst_rq);
7169
7170 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
7171
7172 env.src_cpu = busiest->cpu;
7173 env.src_rq = busiest;
7174
7175 ld_moved = 0;
7176 if (busiest->nr_running > 1) {
7177
7178
7179
7180
7181
7182
7183 env.flags |= LBF_ALL_PINNED;
7184 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
7185
7186more_balance:
7187 raw_spin_lock_irqsave(&busiest->lock, flags);
7188
7189
7190
7191
7192
7193 cur_ld_moved = detach_tasks(&env);
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203 raw_spin_unlock(&busiest->lock);
7204
7205 if (cur_ld_moved) {
7206 attach_tasks(&env);
7207 ld_moved += cur_ld_moved;
7208 }
7209
7210 local_irq_restore(flags);
7211
7212 if (env.flags & LBF_NEED_BREAK) {
7213 env.flags &= ~LBF_NEED_BREAK;
7214 goto more_balance;
7215 }
7216
7217
7218
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233
7234
7235
7236 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
7237
7238
7239 cpumask_clear_cpu(env.dst_cpu, env.cpus);
7240
7241 env.dst_rq = cpu_rq(env.new_dst_cpu);
7242 env.dst_cpu = env.new_dst_cpu;
7243 env.flags &= ~LBF_DST_PINNED;
7244 env.loop = 0;
7245 env.loop_break = sched_nr_migrate_break;
7246
7247
7248
7249
7250
7251 goto more_balance;
7252 }
7253
7254
7255
7256
7257 if (sd_parent) {
7258 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7259
7260 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
7261 *group_imbalance = 1;
7262 }
7263
7264
7265 if (unlikely(env.flags & LBF_ALL_PINNED)) {
7266 cpumask_clear_cpu(cpu_of(busiest), cpus);
7267 if (!cpumask_empty(cpus)) {
7268 env.loop = 0;
7269 env.loop_break = sched_nr_migrate_break;
7270 goto redo;
7271 }
7272 goto out_all_pinned;
7273 }
7274 }
7275
7276 if (!ld_moved) {
7277 schedstat_inc(sd, lb_failed[idle]);
7278
7279
7280
7281
7282
7283
7284 if (idle != CPU_NEWLY_IDLE)
7285 sd->nr_balance_failed++;
7286
7287 if (need_active_balance(&env)) {
7288 raw_spin_lock_irqsave(&busiest->lock, flags);
7289
7290
7291
7292
7293
7294 if (!cpumask_test_cpu(this_cpu,
7295 tsk_cpus_allowed(busiest->curr))) {
7296 raw_spin_unlock_irqrestore(&busiest->lock,
7297 flags);
7298 env.flags |= LBF_ALL_PINNED;
7299 goto out_one_pinned;
7300 }
7301
7302
7303
7304
7305
7306
7307 if (!busiest->active_balance) {
7308 busiest->active_balance = 1;
7309 busiest->push_cpu = this_cpu;
7310 active_balance = 1;
7311 }
7312 raw_spin_unlock_irqrestore(&busiest->lock, flags);
7313
7314 if (active_balance) {
7315 stop_one_cpu_nowait(cpu_of(busiest),
7316 active_load_balance_cpu_stop, busiest,
7317 &busiest->active_balance_work);
7318 }
7319
7320
7321
7322
7323
7324 sd->nr_balance_failed = sd->cache_nice_tries+1;
7325 }
7326 } else
7327 sd->nr_balance_failed = 0;
7328
7329 if (likely(!active_balance)) {
7330
7331 sd->balance_interval = sd->min_interval;
7332 } else {
7333
7334
7335
7336
7337
7338
7339 if (sd->balance_interval < sd->max_interval)
7340 sd->balance_interval *= 2;
7341 }
7342
7343 goto out;
7344
7345out_balanced:
7346
7347
7348
7349
7350 if (sd_parent) {
7351 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7352
7353 if (*group_imbalance)
7354 *group_imbalance = 0;
7355 }
7356
7357out_all_pinned:
7358
7359
7360
7361
7362
7363 schedstat_inc(sd, lb_balanced[idle]);
7364
7365 sd->nr_balance_failed = 0;
7366
7367out_one_pinned:
7368
7369 if (((env.flags & LBF_ALL_PINNED) &&
7370 sd->balance_interval < MAX_PINNED_INTERVAL) ||
7371 (sd->balance_interval < sd->max_interval))
7372 sd->balance_interval *= 2;
7373
7374 ld_moved = 0;
7375out:
7376 return ld_moved;
7377}
7378
7379static inline unsigned long
7380get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7381{
7382 unsigned long interval = sd->balance_interval;
7383
7384 if (cpu_busy)
7385 interval *= sd->busy_factor;
7386
7387
7388 interval = msecs_to_jiffies(interval);
7389 interval = clamp(interval, 1UL, max_load_balance_interval);
7390
7391 return interval;
7392}
7393
7394static inline void
7395update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
7396{
7397 unsigned long interval, next;
7398
7399 interval = get_sd_balance_interval(sd, cpu_busy);
7400 next = sd->last_balance + interval;
7401
7402 if (time_after(*next_balance, next))
7403 *next_balance = next;
7404}
7405
7406
7407
7408
7409
7410static int idle_balance(struct rq *this_rq)
7411{
7412 unsigned long next_balance = jiffies + HZ;
7413 int this_cpu = this_rq->cpu;
7414 struct sched_domain *sd;
7415 int pulled_task = 0;
7416 u64 curr_cost = 0;
7417
7418 idle_enter_fair(this_rq);
7419
7420
7421
7422
7423
7424 this_rq->idle_stamp = rq_clock(this_rq);
7425
7426 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7427 !this_rq->rd->overload) {
7428 rcu_read_lock();
7429 sd = rcu_dereference_check_sched_domain(this_rq->sd);
7430 if (sd)
7431 update_next_balance(sd, 0, &next_balance);
7432 rcu_read_unlock();
7433
7434 goto out;
7435 }
7436
7437 raw_spin_unlock(&this_rq->lock);
7438
7439 update_blocked_averages(this_cpu);
7440 rcu_read_lock();
7441 for_each_domain(this_cpu, sd) {
7442 int continue_balancing = 1;
7443 u64 t0, domain_cost;
7444
7445 if (!(sd->flags & SD_LOAD_BALANCE))
7446 continue;
7447
7448 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
7449 update_next_balance(sd, 0, &next_balance);
7450 break;
7451 }
7452
7453 if (sd->flags & SD_BALANCE_NEWIDLE) {
7454 t0 = sched_clock_cpu(this_cpu);
7455
7456 pulled_task = load_balance(this_cpu, this_rq,
7457 sd, CPU_NEWLY_IDLE,
7458 &continue_balancing);
7459
7460 domain_cost = sched_clock_cpu(this_cpu) - t0;
7461 if (domain_cost > sd->max_newidle_lb_cost)
7462 sd->max_newidle_lb_cost = domain_cost;
7463
7464 curr_cost += domain_cost;
7465 }
7466
7467 update_next_balance(sd, 0, &next_balance);
7468
7469
7470
7471
7472
7473 if (pulled_task || this_rq->nr_running > 0)
7474 break;
7475 }
7476 rcu_read_unlock();
7477
7478 raw_spin_lock(&this_rq->lock);
7479
7480 if (curr_cost > this_rq->max_idle_balance_cost)
7481 this_rq->max_idle_balance_cost = curr_cost;
7482
7483
7484
7485
7486
7487
7488 if (this_rq->cfs.h_nr_running && !pulled_task)
7489 pulled_task = 1;
7490
7491out:
7492
7493 if (time_after(this_rq->next_balance, next_balance))
7494 this_rq->next_balance = next_balance;
7495
7496
7497 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
7498 pulled_task = -1;
7499
7500 if (pulled_task) {
7501 idle_exit_fair(this_rq);
7502 this_rq->idle_stamp = 0;
7503 }
7504
7505 return pulled_task;
7506}
7507
7508
7509
7510
7511
7512
7513
7514static int active_load_balance_cpu_stop(void *data)
7515{
7516 struct rq *busiest_rq = data;
7517 int busiest_cpu = cpu_of(busiest_rq);
7518 int target_cpu = busiest_rq->push_cpu;
7519 struct rq *target_rq = cpu_rq(target_cpu);
7520 struct sched_domain *sd;
7521 struct task_struct *p = NULL;
7522
7523 raw_spin_lock_irq(&busiest_rq->lock);
7524
7525
7526 if (unlikely(busiest_cpu != smp_processor_id() ||
7527 !busiest_rq->active_balance))
7528 goto out_unlock;
7529
7530
7531 if (busiest_rq->nr_running <= 1)
7532 goto out_unlock;
7533
7534
7535
7536
7537
7538
7539 BUG_ON(busiest_rq == target_rq);
7540
7541
7542 rcu_read_lock();
7543 for_each_domain(target_cpu, sd) {
7544 if ((sd->flags & SD_LOAD_BALANCE) &&
7545 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
7546 break;
7547 }
7548
7549 if (likely(sd)) {
7550 struct lb_env env = {
7551 .sd = sd,
7552 .dst_cpu = target_cpu,
7553 .dst_rq = target_rq,
7554 .src_cpu = busiest_rq->cpu,
7555 .src_rq = busiest_rq,
7556 .idle = CPU_IDLE,
7557 };
7558
7559 schedstat_inc(sd, alb_count);
7560
7561 p = detach_one_task(&env);
7562 if (p)
7563 schedstat_inc(sd, alb_pushed);
7564 else
7565 schedstat_inc(sd, alb_failed);
7566 }
7567 rcu_read_unlock();
7568out_unlock:
7569 busiest_rq->active_balance = 0;
7570 raw_spin_unlock(&busiest_rq->lock);
7571
7572 if (p)
7573 attach_one_task(target_rq, p);
7574
7575 local_irq_enable();
7576
7577 return 0;
7578}
7579
7580static inline int on_null_domain(struct rq *rq)
7581{
7582 return unlikely(!rcu_dereference_sched(rq->sd));
7583}
7584
7585#ifdef CONFIG_NO_HZ_COMMON
7586
7587
7588
7589
7590
7591
7592static struct {
7593 cpumask_var_t idle_cpus_mask;
7594 atomic_t nr_cpus;
7595 unsigned long next_balance;
7596} nohz ____cacheline_aligned;
7597
7598static inline int find_new_ilb(void)
7599{
7600 int ilb = cpumask_first(nohz.idle_cpus_mask);
7601
7602 if (ilb < nr_cpu_ids && idle_cpu(ilb))
7603 return ilb;
7604
7605 return nr_cpu_ids;
7606}
7607
7608
7609
7610
7611
7612
7613static void nohz_balancer_kick(void)
7614{
7615 int ilb_cpu;
7616
7617 nohz.next_balance++;
7618
7619 ilb_cpu = find_new_ilb();
7620
7621 if (ilb_cpu >= nr_cpu_ids)
7622 return;
7623
7624 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
7625 return;
7626
7627
7628
7629
7630
7631
7632 smp_send_reschedule(ilb_cpu);
7633 return;
7634}
7635
7636static inline void nohz_balance_exit_idle(int cpu)
7637{
7638 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
7639
7640
7641
7642 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
7643 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
7644 atomic_dec(&nohz.nr_cpus);
7645 }
7646 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7647 }
7648}
7649
7650static inline void set_cpu_sd_state_busy(void)
7651{
7652 struct sched_domain *sd;
7653 int cpu = smp_processor_id();
7654
7655 rcu_read_lock();
7656 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7657
7658 if (!sd || !sd->nohz_idle)
7659 goto unlock;
7660 sd->nohz_idle = 0;
7661
7662 atomic_inc(&sd->groups->sgc->nr_busy_cpus);
7663unlock:
7664 rcu_read_unlock();
7665}
7666
7667void set_cpu_sd_state_idle(void)
7668{
7669 struct sched_domain *sd;
7670 int cpu = smp_processor_id();
7671
7672 rcu_read_lock();
7673 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7674
7675 if (!sd || sd->nohz_idle)
7676 goto unlock;
7677 sd->nohz_idle = 1;
7678
7679 atomic_dec(&sd->groups->sgc->nr_busy_cpus);
7680unlock:
7681 rcu_read_unlock();
7682}
7683
7684
7685
7686
7687
7688void nohz_balance_enter_idle(int cpu)
7689{
7690
7691
7692
7693 if (!cpu_active(cpu))
7694 return;
7695
7696 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
7697 return;
7698
7699
7700
7701
7702 if (on_null_domain(cpu_rq(cpu)))
7703 return;
7704
7705 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
7706 atomic_inc(&nohz.nr_cpus);
7707 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7708}
7709
7710static int sched_ilb_notifier(struct notifier_block *nfb,
7711 unsigned long action, void *hcpu)
7712{
7713 switch (action & ~CPU_TASKS_FROZEN) {
7714 case CPU_DYING:
7715 nohz_balance_exit_idle(smp_processor_id());
7716 return NOTIFY_OK;
7717 default:
7718 return NOTIFY_DONE;
7719 }
7720}
7721#endif
7722
7723static DEFINE_SPINLOCK(balancing);
7724
7725
7726
7727
7728
7729void update_max_interval(void)
7730{
7731 max_load_balance_interval = HZ*num_online_cpus()/10;
7732}
7733
7734
7735
7736
7737
7738
7739
7740static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
7741{
7742 int continue_balancing = 1;
7743 int cpu = rq->cpu;
7744 unsigned long interval;
7745 struct sched_domain *sd;
7746
7747 unsigned long next_balance = jiffies + 60*HZ;
7748 int update_next_balance = 0;
7749 int need_serialize, need_decay = 0;
7750 u64 max_cost = 0;
7751
7752 update_blocked_averages(cpu);
7753
7754 rcu_read_lock();
7755 for_each_domain(cpu, sd) {
7756
7757
7758
7759
7760 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
7761 sd->max_newidle_lb_cost =
7762 (sd->max_newidle_lb_cost * 253) / 256;
7763 sd->next_decay_max_lb_cost = jiffies + HZ;
7764 need_decay = 1;
7765 }
7766 max_cost += sd->max_newidle_lb_cost;
7767
7768 if (!(sd->flags & SD_LOAD_BALANCE))
7769 continue;
7770
7771
7772
7773
7774
7775
7776 if (!continue_balancing) {
7777 if (need_decay)
7778 continue;
7779 break;
7780 }
7781
7782 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7783
7784 need_serialize = sd->flags & SD_SERIALIZE;
7785 if (need_serialize) {
7786 if (!spin_trylock(&balancing))
7787 goto out;
7788 }
7789
7790 if (time_after_eq(jiffies, sd->last_balance + interval)) {
7791 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
7792
7793
7794
7795
7796
7797 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
7798 }
7799 sd->last_balance = jiffies;
7800 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7801 }
7802 if (need_serialize)
7803 spin_unlock(&balancing);
7804out:
7805 if (time_after(next_balance, sd->last_balance + interval)) {
7806 next_balance = sd->last_balance + interval;
7807 update_next_balance = 1;
7808 }
7809 }
7810 if (need_decay) {
7811
7812
7813
7814
7815 rq->max_idle_balance_cost =
7816 max((u64)sysctl_sched_migration_cost, max_cost);
7817 }
7818 rcu_read_unlock();
7819
7820
7821
7822
7823
7824
7825 if (likely(update_next_balance))
7826 rq->next_balance = next_balance;
7827}
7828
7829#ifdef CONFIG_NO_HZ_COMMON
7830
7831
7832
7833
7834static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7835{
7836 int this_cpu = this_rq->cpu;
7837 struct rq *rq;
7838 int balance_cpu;
7839
7840 if (idle != CPU_IDLE ||
7841 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7842 goto end;
7843
7844 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
7845 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
7846 continue;
7847
7848
7849
7850
7851
7852
7853 if (need_resched())
7854 break;
7855
7856 rq = cpu_rq(balance_cpu);
7857
7858
7859
7860
7861
7862 if (time_after_eq(jiffies, rq->next_balance)) {
7863 raw_spin_lock_irq(&rq->lock);
7864 update_rq_clock(rq);
7865 update_idle_cpu_load(rq);
7866 raw_spin_unlock_irq(&rq->lock);
7867 rebalance_domains(rq, CPU_IDLE);
7868 }
7869
7870 if (time_after(this_rq->next_balance, rq->next_balance))
7871 this_rq->next_balance = rq->next_balance;
7872 }
7873 nohz.next_balance = this_rq->next_balance;
7874end:
7875 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
7876}
7877
7878
7879
7880
7881
7882
7883
7884
7885
7886
7887
7888
7889static inline bool nohz_kick_needed(struct rq *rq)
7890{
7891 unsigned long now = jiffies;
7892 struct sched_domain *sd;
7893 struct sched_group_capacity *sgc;
7894 int nr_busy, cpu = rq->cpu;
7895 bool kick = false;
7896
7897 if (unlikely(rq->idle_balance))
7898 return false;
7899
7900
7901
7902
7903
7904 set_cpu_sd_state_busy();
7905 nohz_balance_exit_idle(cpu);
7906
7907
7908
7909
7910
7911 if (likely(!atomic_read(&nohz.nr_cpus)))
7912 return false;
7913
7914 if (time_before(now, nohz.next_balance))
7915 return false;
7916
7917 if (rq->nr_running >= 2)
7918 return true;
7919
7920 rcu_read_lock();
7921 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7922 if (sd) {
7923 sgc = sd->groups->sgc;
7924 nr_busy = atomic_read(&sgc->nr_busy_cpus);
7925
7926 if (nr_busy > 1) {
7927 kick = true;
7928 goto unlock;
7929 }
7930
7931 }
7932
7933 sd = rcu_dereference(rq->sd);
7934 if (sd) {
7935 if ((rq->cfs.h_nr_running >= 1) &&
7936 check_cpu_capacity(rq, sd)) {
7937 kick = true;
7938 goto unlock;
7939 }
7940 }
7941
7942 sd = rcu_dereference(per_cpu(sd_asym, cpu));
7943 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
7944 sched_domain_span(sd)) < cpu)) {
7945 kick = true;
7946 goto unlock;
7947 }
7948
7949unlock:
7950 rcu_read_unlock();
7951 return kick;
7952}
7953#else
7954static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
7955#endif
7956
7957
7958
7959
7960
7961static void run_rebalance_domains(struct softirq_action *h)
7962{
7963 struct rq *this_rq = this_rq();
7964 enum cpu_idle_type idle = this_rq->idle_balance ?
7965 CPU_IDLE : CPU_NOT_IDLE;
7966
7967
7968
7969
7970
7971
7972
7973
7974
7975 nohz_idle_balance(this_rq, idle);
7976 rebalance_domains(this_rq, idle);
7977}
7978
7979
7980
7981
7982void trigger_load_balance(struct rq *rq)
7983{
7984
7985 if (unlikely(on_null_domain(rq)))
7986 return;
7987
7988 if (time_after_eq(jiffies, rq->next_balance))
7989 raise_softirq(SCHED_SOFTIRQ);
7990#ifdef CONFIG_NO_HZ_COMMON
7991 if (nohz_kick_needed(rq))
7992 nohz_balancer_kick();
7993#endif
7994}
7995
7996static void rq_online_fair(struct rq *rq)
7997{
7998 update_sysctl();
7999
8000 update_runtime_enabled(rq);
8001}
8002
8003static void rq_offline_fair(struct rq *rq)
8004{
8005 update_sysctl();
8006
8007
8008 unthrottle_offline_cfs_rqs(rq);
8009}
8010
8011#endif
8012
8013
8014
8015
8016static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
8017{
8018 struct cfs_rq *cfs_rq;
8019 struct sched_entity *se = &curr->se;
8020
8021 for_each_sched_entity(se) {
8022 cfs_rq = cfs_rq_of(se);
8023 entity_tick(cfs_rq, se, queued);
8024 }
8025
8026 if (numabalancing_enabled)
8027 task_tick_numa(rq, curr);
8028
8029 update_rq_runnable_avg(rq, 1);
8030}
8031
8032
8033
8034
8035
8036
8037static void task_fork_fair(struct task_struct *p)
8038{
8039 struct cfs_rq *cfs_rq;
8040 struct sched_entity *se = &p->se, *curr;
8041 int this_cpu = smp_processor_id();
8042 struct rq *rq = this_rq();
8043 unsigned long flags;
8044
8045 raw_spin_lock_irqsave(&rq->lock, flags);
8046
8047 update_rq_clock(rq);
8048
8049 cfs_rq = task_cfs_rq(current);
8050 curr = cfs_rq->curr;
8051
8052
8053
8054
8055
8056
8057
8058 rcu_read_lock();
8059 __set_task_cpu(p, this_cpu);
8060 rcu_read_unlock();
8061
8062 update_curr(cfs_rq);
8063
8064 if (curr)
8065 se->vruntime = curr->vruntime;
8066 place_entity(cfs_rq, se, 1);
8067
8068 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
8069
8070
8071
8072
8073 swap(curr->vruntime, se->vruntime);
8074 resched_curr(rq);
8075 }
8076
8077 se->vruntime -= cfs_rq->min_vruntime;
8078
8079 raw_spin_unlock_irqrestore(&rq->lock, flags);
8080}
8081
8082
8083
8084
8085
8086static void
8087prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
8088{
8089 if (!task_on_rq_queued(p))
8090 return;
8091
8092
8093
8094
8095
8096
8097 if (rq->curr == p) {
8098 if (p->prio > oldprio)
8099 resched_curr(rq);
8100 } else
8101 check_preempt_curr(rq, p, 0);
8102}
8103
8104static void switched_from_fair(struct rq *rq, struct task_struct *p)
8105{
8106 struct sched_entity *se = &p->se;
8107 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8108
8109
8110
8111
8112
8113
8114
8115
8116
8117
8118 if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
8119
8120
8121
8122
8123 place_entity(cfs_rq, se, 0);
8124 se->vruntime -= cfs_rq->min_vruntime;
8125 }
8126
8127#ifdef CONFIG_SMP
8128
8129
8130
8131
8132
8133 if (se->avg.decay_count) {
8134 __synchronize_entity_decay(se);
8135 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
8136 }
8137#endif
8138}
8139
8140
8141
8142
8143static void switched_to_fair(struct rq *rq, struct task_struct *p)
8144{
8145#ifdef CONFIG_FAIR_GROUP_SCHED
8146 struct sched_entity *se = &p->se;
8147
8148
8149
8150
8151 se->depth = se->parent ? se->parent->depth + 1 : 0;
8152#endif
8153 if (!task_on_rq_queued(p))
8154 return;
8155
8156
8157
8158
8159
8160
8161 if (rq->curr == p)
8162 resched_curr(rq);
8163 else
8164 check_preempt_curr(rq, p, 0);
8165}
8166
8167
8168
8169
8170
8171
8172static void set_curr_task_fair(struct rq *rq)
8173{
8174 struct sched_entity *se = &rq->curr->se;
8175
8176 for_each_sched_entity(se) {
8177 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8178
8179 set_next_entity(cfs_rq, se);
8180
8181 account_cfs_rq_runtime(cfs_rq, 0);
8182 }
8183}
8184
8185void init_cfs_rq(struct cfs_rq *cfs_rq)
8186{
8187 cfs_rq->tasks_timeline = RB_ROOT;
8188 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
8189#ifndef CONFIG_64BIT
8190 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
8191#endif
8192#ifdef CONFIG_SMP
8193 atomic64_set(&cfs_rq->decay_counter, 1);
8194 atomic_long_set(&cfs_rq->removed_load, 0);
8195#endif
8196}
8197
8198#ifdef CONFIG_FAIR_GROUP_SCHED
8199static void task_move_group_fair(struct task_struct *p, int queued)
8200{
8201 struct sched_entity *se = &p->se;
8202 struct cfs_rq *cfs_rq;
8203
8204
8205
8206
8207
8208
8209
8210
8211
8212
8213
8214
8215
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228
8229 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
8230 queued = 1;
8231
8232 if (!queued)
8233 se->vruntime -= cfs_rq_of(se)->min_vruntime;
8234 set_task_rq(p, task_cpu(p));
8235 se->depth = se->parent ? se->parent->depth + 1 : 0;
8236 if (!queued) {
8237 cfs_rq = cfs_rq_of(se);
8238 se->vruntime += cfs_rq->min_vruntime;
8239#ifdef CONFIG_SMP
8240
8241
8242
8243
8244
8245 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
8246 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
8247#endif
8248 }
8249}
8250
8251void free_fair_sched_group(struct task_group *tg)
8252{
8253 int i;
8254
8255 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
8256
8257 for_each_possible_cpu(i) {
8258 if (tg->cfs_rq)
8259 kfree(tg->cfs_rq[i]);
8260 if (tg->se)
8261 kfree(tg->se[i]);
8262 }
8263
8264 kfree(tg->cfs_rq);
8265 kfree(tg->se);
8266}
8267
8268int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8269{
8270 struct cfs_rq *cfs_rq;
8271 struct sched_entity *se;
8272 int i;
8273
8274 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8275 if (!tg->cfs_rq)
8276 goto err;
8277 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8278 if (!tg->se)
8279 goto err;
8280
8281 tg->shares = NICE_0_LOAD;
8282
8283 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8284
8285 for_each_possible_cpu(i) {
8286 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8287 GFP_KERNEL, cpu_to_node(i));
8288 if (!cfs_rq)
8289 goto err;
8290
8291 se = kzalloc_node(sizeof(struct sched_entity),
8292 GFP_KERNEL, cpu_to_node(i));
8293 if (!se)
8294 goto err_free_rq;
8295
8296 init_cfs_rq(cfs_rq);
8297 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
8298 }
8299
8300 return 1;
8301
8302err_free_rq:
8303 kfree(cfs_rq);
8304err:
8305 return 0;
8306}
8307
8308void unregister_fair_sched_group(struct task_group *tg, int cpu)
8309{
8310 struct rq *rq = cpu_rq(cpu);
8311 unsigned long flags;
8312
8313
8314
8315
8316
8317 if (!tg->cfs_rq[cpu]->on_list)
8318 return;
8319
8320 raw_spin_lock_irqsave(&rq->lock, flags);
8321 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8322 raw_spin_unlock_irqrestore(&rq->lock, flags);
8323}
8324
8325void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8326 struct sched_entity *se, int cpu,
8327 struct sched_entity *parent)
8328{
8329 struct rq *rq = cpu_rq(cpu);
8330
8331 cfs_rq->tg = tg;
8332 cfs_rq->rq = rq;
8333 init_cfs_rq_runtime(cfs_rq);
8334
8335 tg->cfs_rq[cpu] = cfs_rq;
8336 tg->se[cpu] = se;
8337
8338
8339 if (!se)
8340 return;
8341
8342 if (!parent) {
8343 se->cfs_rq = &rq->cfs;
8344 se->depth = 0;
8345 } else {
8346 se->cfs_rq = parent->my_q;
8347 se->depth = parent->depth + 1;
8348 }
8349
8350 se->my_q = cfs_rq;
8351
8352 update_load_set(&se->load, NICE_0_LOAD);
8353 se->parent = parent;
8354}
8355
8356static DEFINE_MUTEX(shares_mutex);
8357
8358int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8359{
8360 int i;
8361 unsigned long flags;
8362
8363
8364
8365
8366 if (!tg->se[0])
8367 return -EINVAL;
8368
8369 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8370
8371 mutex_lock(&shares_mutex);
8372 if (tg->shares == shares)
8373 goto done;
8374
8375 tg->shares = shares;
8376 for_each_possible_cpu(i) {
8377 struct rq *rq = cpu_rq(i);
8378 struct sched_entity *se;
8379
8380 se = tg->se[i];
8381
8382 raw_spin_lock_irqsave(&rq->lock, flags);
8383
8384
8385 update_rq_clock(rq);
8386 for_each_sched_entity(se)
8387 update_cfs_shares(group_cfs_rq(se));
8388 raw_spin_unlock_irqrestore(&rq->lock, flags);
8389 }
8390
8391done:
8392 mutex_unlock(&shares_mutex);
8393 return 0;
8394}
8395#else
8396
8397void free_fair_sched_group(struct task_group *tg) { }
8398
8399int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8400{
8401 return 1;
8402}
8403
8404void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
8405
8406#endif
8407
8408
8409static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
8410{
8411 struct sched_entity *se = &task->se;
8412 unsigned int rr_interval = 0;
8413
8414
8415
8416
8417
8418 if (rq->cfs.load.weight)
8419 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
8420
8421 return rr_interval;
8422}
8423
8424
8425
8426
8427const struct sched_class fair_sched_class = {
8428 .next = &idle_sched_class,
8429 .enqueue_task = enqueue_task_fair,
8430 .dequeue_task = dequeue_task_fair,
8431 .yield_task = yield_task_fair,
8432 .yield_to_task = yield_to_task_fair,
8433
8434 .check_preempt_curr = check_preempt_wakeup,
8435
8436 .pick_next_task = pick_next_task_fair,
8437 .put_prev_task = put_prev_task_fair,
8438
8439#ifdef CONFIG_SMP
8440 .select_task_rq = select_task_rq_fair,
8441 .migrate_task_rq = migrate_task_rq_fair,
8442
8443 .rq_online = rq_online_fair,
8444 .rq_offline = rq_offline_fair,
8445
8446 .task_waking = task_waking_fair,
8447#endif
8448
8449 .set_curr_task = set_curr_task_fair,
8450 .task_tick = task_tick_fair,
8451 .task_fork = task_fork_fair,
8452
8453 .prio_changed = prio_changed_fair,
8454 .switched_from = switched_from_fair,
8455 .switched_to = switched_to_fair,
8456
8457 .get_rr_interval = get_rr_interval_fair,
8458
8459 .update_curr = update_curr_fair,
8460
8461#ifdef CONFIG_FAIR_GROUP_SCHED
8462 .task_move_group = task_move_group_fair,
8463#endif
8464};
8465
8466#ifdef CONFIG_SCHED_DEBUG
8467void print_cfs_stats(struct seq_file *m, int cpu)
8468{
8469 struct cfs_rq *cfs_rq;
8470
8471 rcu_read_lock();
8472 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
8473 print_cfs_rq(m, cpu, cfs_rq);
8474 rcu_read_unlock();
8475}
8476
8477#ifdef CONFIG_NUMA_BALANCING
8478void show_numa_stats(struct task_struct *p, struct seq_file *m)
8479{
8480 int node;
8481 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
8482
8483 for_each_online_node(node) {
8484 if (p->numa_faults) {
8485 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
8486 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
8487 }
8488 if (p->numa_group) {
8489 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
8490 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
8491 }
8492 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
8493 }
8494}
8495#endif
8496#endif
8497
8498__init void init_sched_fair_class(void)
8499{
8500#ifdef CONFIG_SMP
8501 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8502
8503#ifdef CONFIG_NO_HZ_COMMON
8504 nohz.next_balance = jiffies;
8505 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8506 cpu_notifier(sched_ilb_notifier, 0);
8507#endif
8508#endif
8509
8510}
8511