1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/latencytop.h>
24#include <linux/sched.h>
25#include <linux/cpumask.h>
26#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
29#include <linux/mempolicy.h>
30#include <linux/migrate.h>
31#include <linux/task_work.h>
32
33#include <trace/events/sched.h>
34
35#include "sched.h"
36
37
38
39
40
41
42
43
44
45
46
47
48
49unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
51
52
53
54
55
56
57
58
59
60
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64
65
66
67
68unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
70
71
72
73
74static unsigned int sched_nr_latency = 8;
75
76
77
78
79
80unsigned int sysctl_sched_child_runs_first __read_mostly;
81
82
83
84
85
86
87
88
89
90unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
91unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
92
93const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
95
96
97
98
99
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
102#ifdef CONFIG_CFS_BANDWIDTH
103
104
105
106
107
108
109
110
111
112
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
134
135
136
137
138
139
140
141
142
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#define WMULT_CONST (~0U)
182#define WMULT_SHIFT 32
183
184static void __update_inv_weight(struct load_weight *lw)
185{
186 unsigned long w;
187
188 if (likely(lw->inv_weight))
189 return;
190
191 w = scale_load_down(lw->weight);
192
193 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
194 lw->inv_weight = 1;
195 else if (unlikely(!w))
196 lw->inv_weight = WMULT_CONST;
197 else
198 lw->inv_weight = WMULT_CONST / w;
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
214{
215 u64 fact = scale_load_down(weight);
216 int shift = WMULT_SHIFT;
217
218 __update_inv_weight(lw);
219
220 if (unlikely(fact >> 32)) {
221 while (fact >> 32) {
222 fact >>= 1;
223 shift--;
224 }
225 }
226
227
228 fact = (u64)(u32)fact * lw->inv_weight;
229
230 while (fact >> 32) {
231 fact >>= 1;
232 shift--;
233 }
234
235 return mul_u64_u32_shr(delta_exec, fact, shift);
236}
237
238
239const struct sched_class fair_sched_class;
240
241
242
243
244
245#ifdef CONFIG_FAIR_GROUP_SCHED
246
247
248static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
249{
250 return cfs_rq->rq;
251}
252
253
254#define entity_is_task(se) (!se->my_q)
255
256static inline struct task_struct *task_of(struct sched_entity *se)
257{
258#ifdef CONFIG_SCHED_DEBUG
259 WARN_ON_ONCE(!entity_is_task(se));
260#endif
261 return container_of(se, struct task_struct, se);
262}
263
264
265#define for_each_sched_entity(se) \
266 for (; se; se = se->parent)
267
268static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
269{
270 return p->se.cfs_rq;
271}
272
273
274static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
275{
276 return se->cfs_rq;
277}
278
279
280static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
281{
282 return grp->my_q;
283}
284
285static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
286 int force_update);
287
288static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
289{
290 if (!cfs_rq->on_list) {
291
292
293
294
295
296
297 if (cfs_rq->tg->parent &&
298 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
299 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
300 &rq_of(cfs_rq)->leaf_cfs_rq_list);
301 } else {
302 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
303 &rq_of(cfs_rq)->leaf_cfs_rq_list);
304 }
305
306 cfs_rq->on_list = 1;
307
308 update_cfs_rq_blocked_load(cfs_rq, 0);
309 }
310}
311
312static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
313{
314 if (cfs_rq->on_list) {
315 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
316 cfs_rq->on_list = 0;
317 }
318}
319
320
321#define for_each_leaf_cfs_rq(rq, cfs_rq) \
322 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
323
324
325static inline struct cfs_rq *
326is_same_group(struct sched_entity *se, struct sched_entity *pse)
327{
328 if (se->cfs_rq == pse->cfs_rq)
329 return se->cfs_rq;
330
331 return NULL;
332}
333
334static inline struct sched_entity *parent_entity(struct sched_entity *se)
335{
336 return se->parent;
337}
338
339static void
340find_matching_se(struct sched_entity **se, struct sched_entity **pse)
341{
342 int se_depth, pse_depth;
343
344
345
346
347
348
349
350
351
352 se_depth = (*se)->depth;
353 pse_depth = (*pse)->depth;
354
355 while (se_depth > pse_depth) {
356 se_depth--;
357 *se = parent_entity(*se);
358 }
359
360 while (pse_depth > se_depth) {
361 pse_depth--;
362 *pse = parent_entity(*pse);
363 }
364
365 while (!is_same_group(*se, *pse)) {
366 *se = parent_entity(*se);
367 *pse = parent_entity(*pse);
368 }
369}
370
371#else
372
373static inline struct task_struct *task_of(struct sched_entity *se)
374{
375 return container_of(se, struct task_struct, se);
376}
377
378static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
379{
380 return container_of(cfs_rq, struct rq, cfs);
381}
382
383#define entity_is_task(se) 1
384
385#define for_each_sched_entity(se) \
386 for (; se; se = NULL)
387
388static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
389{
390 return &task_rq(p)->cfs;
391}
392
393static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
394{
395 struct task_struct *p = task_of(se);
396 struct rq *rq = task_rq(p);
397
398 return &rq->cfs;
399}
400
401
402static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
403{
404 return NULL;
405}
406
407static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408{
409}
410
411static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
412{
413}
414
415#define for_each_leaf_cfs_rq(rq, cfs_rq) \
416 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
417
418static inline struct sched_entity *parent_entity(struct sched_entity *se)
419{
420 return NULL;
421}
422
423static inline void
424find_matching_se(struct sched_entity **se, struct sched_entity **pse)
425{
426}
427
428#endif
429
430static __always_inline
431void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
432
433
434
435
436
437static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
438{
439 s64 delta = (s64)(vruntime - max_vruntime);
440 if (delta > 0)
441 max_vruntime = vruntime;
442
443 return max_vruntime;
444}
445
446static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
447{
448 s64 delta = (s64)(vruntime - min_vruntime);
449 if (delta < 0)
450 min_vruntime = vruntime;
451
452 return min_vruntime;
453}
454
455static inline int entity_before(struct sched_entity *a,
456 struct sched_entity *b)
457{
458 return (s64)(a->vruntime - b->vruntime) < 0;
459}
460
461static void update_min_vruntime(struct cfs_rq *cfs_rq)
462{
463 u64 vruntime = cfs_rq->min_vruntime;
464
465 if (cfs_rq->curr)
466 vruntime = cfs_rq->curr->vruntime;
467
468 if (cfs_rq->rb_leftmost) {
469 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
470 struct sched_entity,
471 run_node);
472
473 if (!cfs_rq->curr)
474 vruntime = se->vruntime;
475 else
476 vruntime = min_vruntime(vruntime, se->vruntime);
477 }
478
479
480 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
481#ifndef CONFIG_64BIT
482 smp_wmb();
483 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
484#endif
485}
486
487
488
489
490static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
491{
492 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
493 struct rb_node *parent = NULL;
494 struct sched_entity *entry;
495 int leftmost = 1;
496
497
498
499
500 while (*link) {
501 parent = *link;
502 entry = rb_entry(parent, struct sched_entity, run_node);
503
504
505
506
507 if (entity_before(se, entry)) {
508 link = &parent->rb_left;
509 } else {
510 link = &parent->rb_right;
511 leftmost = 0;
512 }
513 }
514
515
516
517
518
519 if (leftmost)
520 cfs_rq->rb_leftmost = &se->run_node;
521
522 rb_link_node(&se->run_node, parent, link);
523 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
524}
525
526static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
527{
528 if (cfs_rq->rb_leftmost == &se->run_node) {
529 struct rb_node *next_node;
530
531 next_node = rb_next(&se->run_node);
532 cfs_rq->rb_leftmost = next_node;
533 }
534
535 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
536}
537
538struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
539{
540 struct rb_node *left = cfs_rq->rb_leftmost;
541
542 if (!left)
543 return NULL;
544
545 return rb_entry(left, struct sched_entity, run_node);
546}
547
548static struct sched_entity *__pick_next_entity(struct sched_entity *se)
549{
550 struct rb_node *next = rb_next(&se->run_node);
551
552 if (!next)
553 return NULL;
554
555 return rb_entry(next, struct sched_entity, run_node);
556}
557
558#ifdef CONFIG_SCHED_DEBUG
559struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
560{
561 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
562
563 if (!last)
564 return NULL;
565
566 return rb_entry(last, struct sched_entity, run_node);
567}
568
569
570
571
572
573int sched_proc_update_handler(struct ctl_table *table, int write,
574 void __user *buffer, size_t *lenp,
575 loff_t *ppos)
576{
577 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
578 int factor = get_update_sysctl_factor();
579
580 if (ret || !write)
581 return ret;
582
583 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
584 sysctl_sched_min_granularity);
585
586#define WRT_SYSCTL(name) \
587 (normalized_sysctl_##name = sysctl_##name / (factor))
588 WRT_SYSCTL(sched_min_granularity);
589 WRT_SYSCTL(sched_latency);
590 WRT_SYSCTL(sched_wakeup_granularity);
591#undef WRT_SYSCTL
592
593 return 0;
594}
595#endif
596
597
598
599
600static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
601{
602 if (unlikely(se->load.weight != NICE_0_LOAD))
603 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
604
605 return delta;
606}
607
608
609
610
611
612
613
614
615
616static u64 __sched_period(unsigned long nr_running)
617{
618 u64 period = sysctl_sched_latency;
619 unsigned long nr_latency = sched_nr_latency;
620
621 if (unlikely(nr_running > nr_latency)) {
622 period = sysctl_sched_min_granularity;
623 period *= nr_running;
624 }
625
626 return period;
627}
628
629
630
631
632
633
634
635static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
636{
637 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
638
639 for_each_sched_entity(se) {
640 struct load_weight *load;
641 struct load_weight lw;
642
643 cfs_rq = cfs_rq_of(se);
644 load = &cfs_rq->load;
645
646 if (unlikely(!se->on_rq)) {
647 lw = cfs_rq->load;
648
649 update_load_add(&lw, se->load.weight);
650 load = &lw;
651 }
652 slice = __calc_delta(slice, se->load.weight, load);
653 }
654 return slice;
655}
656
657
658
659
660
661
662static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
663{
664 return calc_delta_fair(sched_slice(cfs_rq, se), se);
665}
666
667#ifdef CONFIG_SMP
668static unsigned long task_h_load(struct task_struct *p);
669
670static inline void __update_task_entity_contrib(struct sched_entity *se);
671
672
673void init_task_runnable_average(struct task_struct *p)
674{
675 u32 slice;
676
677 p->se.avg.decay_count = 0;
678 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
679 p->se.avg.runnable_avg_sum = slice;
680 p->se.avg.runnable_avg_period = slice;
681 __update_task_entity_contrib(&p->se);
682}
683#else
684void init_task_runnable_average(struct task_struct *p)
685{
686}
687#endif
688
689
690
691
692static void update_curr(struct cfs_rq *cfs_rq)
693{
694 struct sched_entity *curr = cfs_rq->curr;
695 u64 now = rq_clock_task(rq_of(cfs_rq));
696 u64 delta_exec;
697
698 if (unlikely(!curr))
699 return;
700
701 delta_exec = now - curr->exec_start;
702 if (unlikely((s64)delta_exec <= 0))
703 return;
704
705 curr->exec_start = now;
706
707 schedstat_set(curr->statistics.exec_max,
708 max(delta_exec, curr->statistics.exec_max));
709
710 curr->sum_exec_runtime += delta_exec;
711 schedstat_add(cfs_rq, exec_clock, delta_exec);
712
713 curr->vruntime += calc_delta_fair(delta_exec, curr);
714 update_min_vruntime(cfs_rq);
715
716 if (entity_is_task(curr)) {
717 struct task_struct *curtask = task_of(curr);
718
719 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
720 cpuacct_charge(curtask, delta_exec);
721 account_group_exec_runtime(curtask, delta_exec);
722 }
723
724 account_cfs_rq_runtime(cfs_rq, delta_exec);
725}
726
727static inline void
728update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
729{
730 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
731}
732
733
734
735
736static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
737{
738
739
740
741
742 if (se != cfs_rq->curr)
743 update_stats_wait_start(cfs_rq, se);
744}
745
746static void
747update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
748{
749 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
750 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
751 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
752 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
753 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
754#ifdef CONFIG_SCHEDSTATS
755 if (entity_is_task(se)) {
756 trace_sched_stat_wait(task_of(se),
757 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
758 }
759#endif
760 schedstat_set(se->statistics.wait_start, 0);
761}
762
763static inline void
764update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
765{
766
767
768
769
770 if (se != cfs_rq->curr)
771 update_stats_wait_end(cfs_rq, se);
772}
773
774
775
776
777static inline void
778update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
779{
780
781
782
783 se->exec_start = rq_clock_task(rq_of(cfs_rq));
784}
785
786
787
788
789
790#ifdef CONFIG_NUMA_BALANCING
791
792
793
794
795
796unsigned int sysctl_numa_balancing_scan_period_min = 1000;
797unsigned int sysctl_numa_balancing_scan_period_max = 60000;
798
799
800unsigned int sysctl_numa_balancing_scan_size = 256;
801
802
803unsigned int sysctl_numa_balancing_scan_delay = 1000;
804
805static unsigned int task_nr_scan_windows(struct task_struct *p)
806{
807 unsigned long rss = 0;
808 unsigned long nr_scan_pages;
809
810
811
812
813
814
815 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
816 rss = get_mm_rss(p->mm);
817 if (!rss)
818 rss = nr_scan_pages;
819
820 rss = round_up(rss, nr_scan_pages);
821 return rss / nr_scan_pages;
822}
823
824
825#define MAX_SCAN_WINDOW 2560
826
827static unsigned int task_scan_min(struct task_struct *p)
828{
829 unsigned int scan, floor;
830 unsigned int windows = 1;
831
832 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
833 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
834 floor = 1000 / windows;
835
836 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
837 return max_t(unsigned int, floor, scan);
838}
839
840static unsigned int task_scan_max(struct task_struct *p)
841{
842 unsigned int smin = task_scan_min(p);
843 unsigned int smax;
844
845
846 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
847 return max(smin, smax);
848}
849
850static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
851{
852 rq->nr_numa_running += (p->numa_preferred_nid != -1);
853 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
854}
855
856static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
857{
858 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
859 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
860}
861
862struct numa_group {
863 atomic_t refcount;
864
865 spinlock_t lock;
866 int nr_tasks;
867 pid_t gid;
868 struct list_head task_list;
869
870 struct rcu_head rcu;
871 nodemask_t active_nodes;
872 unsigned long total_faults;
873
874
875
876
877
878 unsigned long *faults_cpu;
879 unsigned long faults[0];
880};
881
882
883#define NR_NUMA_HINT_FAULT_TYPES 2
884
885
886#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
887
888
889#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
890
891pid_t task_numa_group_id(struct task_struct *p)
892{
893 return p->numa_group ? p->numa_group->gid : 0;
894}
895
896static inline int task_faults_idx(int nid, int priv)
897{
898 return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
899}
900
901static inline unsigned long task_faults(struct task_struct *p, int nid)
902{
903 if (!p->numa_faults_memory)
904 return 0;
905
906 return p->numa_faults_memory[task_faults_idx(nid, 0)] +
907 p->numa_faults_memory[task_faults_idx(nid, 1)];
908}
909
910static inline unsigned long group_faults(struct task_struct *p, int nid)
911{
912 if (!p->numa_group)
913 return 0;
914
915 return p->numa_group->faults[task_faults_idx(nid, 0)] +
916 p->numa_group->faults[task_faults_idx(nid, 1)];
917}
918
919static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
920{
921 return group->faults_cpu[task_faults_idx(nid, 0)] +
922 group->faults_cpu[task_faults_idx(nid, 1)];
923}
924
925
926
927
928
929
930
931static inline unsigned long task_weight(struct task_struct *p, int nid)
932{
933 unsigned long total_faults;
934
935 if (!p->numa_faults_memory)
936 return 0;
937
938 total_faults = p->total_numa_faults;
939
940 if (!total_faults)
941 return 0;
942
943 return 1000 * task_faults(p, nid) / total_faults;
944}
945
946static inline unsigned long group_weight(struct task_struct *p, int nid)
947{
948 if (!p->numa_group || !p->numa_group->total_faults)
949 return 0;
950
951 return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
952}
953
954bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
955 int src_nid, int dst_cpu)
956{
957 struct numa_group *ng = p->numa_group;
958 int dst_nid = cpu_to_node(dst_cpu);
959 int last_cpupid, this_cpupid;
960
961 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
981 if (!cpupid_pid_unset(last_cpupid) &&
982 cpupid_to_nid(last_cpupid) != dst_nid)
983 return false;
984
985
986 if (cpupid_match_pid(p, last_cpupid))
987 return true;
988
989
990 if (!ng)
991 return true;
992
993
994
995
996
997 if (!node_isset(dst_nid, ng->active_nodes))
998 return false;
999
1000
1001
1002
1003
1004 if (!node_isset(src_nid, ng->active_nodes))
1005 return true;
1006
1007
1008
1009
1010
1011
1012
1013
1014 return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
1015}
1016
1017static unsigned long weighted_cpuload(const int cpu);
1018static unsigned long source_load(int cpu, int type);
1019static unsigned long target_load(int cpu, int type);
1020static unsigned long power_of(int cpu);
1021static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1022
1023
1024struct numa_stats {
1025 unsigned long nr_running;
1026 unsigned long load;
1027
1028
1029 unsigned long power;
1030
1031
1032 unsigned long capacity;
1033 int has_capacity;
1034};
1035
1036
1037
1038
1039static void update_numa_stats(struct numa_stats *ns, int nid)
1040{
1041 int cpu, cpus = 0;
1042
1043 memset(ns, 0, sizeof(*ns));
1044 for_each_cpu(cpu, cpumask_of_node(nid)) {
1045 struct rq *rq = cpu_rq(cpu);
1046
1047 ns->nr_running += rq->nr_running;
1048 ns->load += weighted_cpuload(cpu);
1049 ns->power += power_of(cpu);
1050
1051 cpus++;
1052 }
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 if (!cpus)
1063 return;
1064
1065 ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
1066 ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
1067 ns->has_capacity = (ns->nr_running < ns->capacity);
1068}
1069
1070struct task_numa_env {
1071 struct task_struct *p;
1072
1073 int src_cpu, src_nid;
1074 int dst_cpu, dst_nid;
1075
1076 struct numa_stats src_stats, dst_stats;
1077
1078 int imbalance_pct;
1079
1080 struct task_struct *best_task;
1081 long best_imp;
1082 int best_cpu;
1083};
1084
1085static void task_numa_assign(struct task_numa_env *env,
1086 struct task_struct *p, long imp)
1087{
1088 if (env->best_task)
1089 put_task_struct(env->best_task);
1090 if (p)
1091 get_task_struct(p);
1092
1093 env->best_task = p;
1094 env->best_imp = imp;
1095 env->best_cpu = env->dst_cpu;
1096}
1097
1098
1099
1100
1101
1102
1103
1104static void task_numa_compare(struct task_numa_env *env,
1105 long taskimp, long groupimp)
1106{
1107 struct rq *src_rq = cpu_rq(env->src_cpu);
1108 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1109 struct task_struct *cur;
1110 long dst_load, src_load;
1111 long load;
1112 long imp = (groupimp > 0) ? groupimp : taskimp;
1113
1114 rcu_read_lock();
1115 cur = ACCESS_ONCE(dst_rq->curr);
1116 if (cur->pid == 0)
1117 cur = NULL;
1118
1119
1120
1121
1122
1123
1124
1125
1126 if (cur) {
1127
1128 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1129 goto unlock;
1130
1131
1132
1133
1134
1135 if (cur->numa_group == env->p->numa_group) {
1136 imp = taskimp + task_weight(cur, env->src_nid) -
1137 task_weight(cur, env->dst_nid);
1138
1139
1140
1141
1142 if (cur->numa_group)
1143 imp -= imp/16;
1144 } else {
1145
1146
1147
1148
1149
1150 if (env->p->numa_group)
1151 imp = groupimp;
1152 else
1153 imp = taskimp;
1154
1155 if (cur->numa_group)
1156 imp += group_weight(cur, env->src_nid) -
1157 group_weight(cur, env->dst_nid);
1158 else
1159 imp += task_weight(cur, env->src_nid) -
1160 task_weight(cur, env->dst_nid);
1161 }
1162 }
1163
1164 if (imp < env->best_imp)
1165 goto unlock;
1166
1167 if (!cur) {
1168
1169 if (env->src_stats.has_capacity &&
1170 !env->dst_stats.has_capacity)
1171 goto unlock;
1172
1173 goto balance;
1174 }
1175
1176
1177 if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
1178 goto assign;
1179
1180
1181
1182
1183balance:
1184 dst_load = env->dst_stats.load;
1185 src_load = env->src_stats.load;
1186
1187
1188 load = task_h_load(env->p);
1189 dst_load += load;
1190 src_load -= load;
1191
1192 if (cur) {
1193 load = task_h_load(cur);
1194 dst_load -= load;
1195 src_load += load;
1196 }
1197
1198
1199 if (dst_load < src_load)
1200 swap(dst_load, src_load);
1201
1202 if (src_load * env->imbalance_pct < dst_load * 100)
1203 goto unlock;
1204
1205assign:
1206 task_numa_assign(env, cur, imp);
1207unlock:
1208 rcu_read_unlock();
1209}
1210
1211static void task_numa_find_cpu(struct task_numa_env *env,
1212 long taskimp, long groupimp)
1213{
1214 int cpu;
1215
1216 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1217
1218 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1219 continue;
1220
1221 env->dst_cpu = cpu;
1222 task_numa_compare(env, taskimp, groupimp);
1223 }
1224}
1225
1226static int task_numa_migrate(struct task_struct *p)
1227{
1228 struct task_numa_env env = {
1229 .p = p,
1230
1231 .src_cpu = task_cpu(p),
1232 .src_nid = task_node(p),
1233
1234 .imbalance_pct = 112,
1235
1236 .best_task = NULL,
1237 .best_imp = 0,
1238 .best_cpu = -1
1239 };
1240 struct sched_domain *sd;
1241 unsigned long taskweight, groupweight;
1242 int nid, ret;
1243 long taskimp, groupimp;
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253 rcu_read_lock();
1254 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1255 if (sd)
1256 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1257 rcu_read_unlock();
1258
1259
1260
1261
1262
1263
1264
1265 if (unlikely(!sd)) {
1266 p->numa_preferred_nid = task_node(p);
1267 return -EINVAL;
1268 }
1269
1270 taskweight = task_weight(p, env.src_nid);
1271 groupweight = group_weight(p, env.src_nid);
1272 update_numa_stats(&env.src_stats, env.src_nid);
1273 env.dst_nid = p->numa_preferred_nid;
1274 taskimp = task_weight(p, env.dst_nid) - taskweight;
1275 groupimp = group_weight(p, env.dst_nid) - groupweight;
1276 update_numa_stats(&env.dst_stats, env.dst_nid);
1277
1278
1279 if (env.dst_stats.has_capacity)
1280 task_numa_find_cpu(&env, taskimp, groupimp);
1281
1282
1283 if (env.best_cpu == -1) {
1284 for_each_online_node(nid) {
1285 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1286 continue;
1287
1288
1289 taskimp = task_weight(p, nid) - taskweight;
1290 groupimp = group_weight(p, nid) - groupweight;
1291 if (taskimp < 0 && groupimp < 0)
1292 continue;
1293
1294 env.dst_nid = nid;
1295 update_numa_stats(&env.dst_stats, env.dst_nid);
1296 task_numa_find_cpu(&env, taskimp, groupimp);
1297 }
1298 }
1299
1300
1301 if (env.best_cpu == -1)
1302 return -EAGAIN;
1303
1304 sched_setnuma(p, env.dst_nid);
1305
1306
1307
1308
1309
1310 p->numa_scan_period = task_scan_min(p);
1311
1312 if (env.best_task == NULL) {
1313 ret = migrate_task_to(p, env.best_cpu);
1314 if (ret != 0)
1315 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1316 return ret;
1317 }
1318
1319 ret = migrate_swap(p, env.best_task);
1320 if (ret != 0)
1321 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1322 put_task_struct(env.best_task);
1323 return ret;
1324}
1325
1326
1327static void numa_migrate_preferred(struct task_struct *p)
1328{
1329
1330 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
1331 return;
1332
1333
1334 p->numa_migrate_retry = jiffies + HZ;
1335
1336
1337 if (task_node(p) == p->numa_preferred_nid)
1338 return;
1339
1340
1341 task_numa_migrate(p);
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355static void update_numa_active_node_mask(struct numa_group *numa_group)
1356{
1357 unsigned long faults, max_faults = 0;
1358 int nid;
1359
1360 for_each_online_node(nid) {
1361 faults = group_faults_cpu(numa_group, nid);
1362 if (faults > max_faults)
1363 max_faults = faults;
1364 }
1365
1366 for_each_online_node(nid) {
1367 faults = group_faults_cpu(numa_group, nid);
1368 if (!node_isset(nid, numa_group->active_nodes)) {
1369 if (faults > max_faults * 6 / 16)
1370 node_set(nid, numa_group->active_nodes);
1371 } else if (faults < max_faults * 3 / 16)
1372 node_clear(nid, numa_group->active_nodes);
1373 }
1374}
1375
1376
1377
1378
1379
1380
1381
1382
1383#define NUMA_PERIOD_SLOTS 10
1384#define NUMA_PERIOD_THRESHOLD 3
1385
1386
1387
1388
1389
1390
1391
1392static void update_task_scan_period(struct task_struct *p,
1393 unsigned long shared, unsigned long private)
1394{
1395 unsigned int period_slot;
1396 int ratio;
1397 int diff;
1398
1399 unsigned long remote = p->numa_faults_locality[0];
1400 unsigned long local = p->numa_faults_locality[1];
1401
1402
1403
1404
1405
1406
1407 if (local + shared == 0) {
1408 p->numa_scan_period = min(p->numa_scan_period_max,
1409 p->numa_scan_period << 1);
1410
1411 p->mm->numa_next_scan = jiffies +
1412 msecs_to_jiffies(p->numa_scan_period);
1413
1414 return;
1415 }
1416
1417
1418
1419
1420
1421
1422
1423 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1424 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1425 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1426 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1427 if (!slot)
1428 slot = 1;
1429 diff = slot * period_slot;
1430 } else {
1431 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
1442 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1443 }
1444
1445 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1446 task_scan_min(p), task_scan_max(p));
1447 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1448}
1449
1450
1451
1452
1453
1454
1455
1456
1457static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1458{
1459 u64 runtime, delta, now;
1460
1461 now = p->se.exec_start;
1462 runtime = p->se.sum_exec_runtime;
1463
1464 if (p->last_task_numa_placement) {
1465 delta = runtime - p->last_sum_exec_runtime;
1466 *period = now - p->last_task_numa_placement;
1467 } else {
1468 delta = p->se.avg.runnable_avg_sum;
1469 *period = p->se.avg.runnable_avg_period;
1470 }
1471
1472 p->last_sum_exec_runtime = runtime;
1473 p->last_task_numa_placement = now;
1474
1475 return delta;
1476}
1477
1478static void task_numa_placement(struct task_struct *p)
1479{
1480 int seq, nid, max_nid = -1, max_group_nid = -1;
1481 unsigned long max_faults = 0, max_group_faults = 0;
1482 unsigned long fault_types[2] = { 0, 0 };
1483 unsigned long total_faults;
1484 u64 runtime, period;
1485 spinlock_t *group_lock = NULL;
1486
1487 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
1488 if (p->numa_scan_seq == seq)
1489 return;
1490 p->numa_scan_seq = seq;
1491 p->numa_scan_period_max = task_scan_max(p);
1492
1493 total_faults = p->numa_faults_locality[0] +
1494 p->numa_faults_locality[1];
1495 runtime = numa_get_avg_runtime(p, &period);
1496
1497
1498 if (p->numa_group) {
1499 group_lock = &p->numa_group->lock;
1500 spin_lock_irq(group_lock);
1501 }
1502
1503
1504 for_each_online_node(nid) {
1505 unsigned long faults = 0, group_faults = 0;
1506 int priv, i;
1507
1508 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
1509 long diff, f_diff, f_weight;
1510
1511 i = task_faults_idx(nid, priv);
1512
1513
1514 diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
1515 fault_types[priv] += p->numa_faults_buffer_memory[i];
1516 p->numa_faults_buffer_memory[i] = 0;
1517
1518
1519
1520
1521
1522
1523
1524
1525 f_weight = div64_u64(runtime << 16, period + 1);
1526 f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
1527 (total_faults + 1);
1528 f_diff = f_weight - p->numa_faults_cpu[i] / 2;
1529 p->numa_faults_buffer_cpu[i] = 0;
1530
1531 p->numa_faults_memory[i] += diff;
1532 p->numa_faults_cpu[i] += f_diff;
1533 faults += p->numa_faults_memory[i];
1534 p->total_numa_faults += diff;
1535 if (p->numa_group) {
1536
1537 p->numa_group->faults[i] += diff;
1538 p->numa_group->faults_cpu[i] += f_diff;
1539 p->numa_group->total_faults += diff;
1540 group_faults += p->numa_group->faults[i];
1541 }
1542 }
1543
1544 if (faults > max_faults) {
1545 max_faults = faults;
1546 max_nid = nid;
1547 }
1548
1549 if (group_faults > max_group_faults) {
1550 max_group_faults = group_faults;
1551 max_group_nid = nid;
1552 }
1553 }
1554
1555 update_task_scan_period(p, fault_types[0], fault_types[1]);
1556
1557 if (p->numa_group) {
1558 update_numa_active_node_mask(p->numa_group);
1559
1560
1561
1562
1563 if (max_nid != max_group_nid) {
1564 unsigned long weight, max_weight = 0;
1565
1566 for_each_online_node(nid) {
1567 weight = task_weight(p, nid) + group_weight(p, nid);
1568 if (weight > max_weight) {
1569 max_weight = weight;
1570 max_nid = nid;
1571 }
1572 }
1573 }
1574
1575 spin_unlock_irq(group_lock);
1576 }
1577
1578
1579 if (max_faults && max_nid != p->numa_preferred_nid) {
1580
1581 sched_setnuma(p, max_nid);
1582 numa_migrate_preferred(p);
1583 }
1584}
1585
1586static inline int get_numa_group(struct numa_group *grp)
1587{
1588 return atomic_inc_not_zero(&grp->refcount);
1589}
1590
1591static inline void put_numa_group(struct numa_group *grp)
1592{
1593 if (atomic_dec_and_test(&grp->refcount))
1594 kfree_rcu(grp, rcu);
1595}
1596
1597static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1598 int *priv)
1599{
1600 struct numa_group *grp, *my_grp;
1601 struct task_struct *tsk;
1602 bool join = false;
1603 int cpu = cpupid_to_cpu(cpupid);
1604 int i;
1605
1606 if (unlikely(!p->numa_group)) {
1607 unsigned int size = sizeof(struct numa_group) +
1608 4*nr_node_ids*sizeof(unsigned long);
1609
1610 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1611 if (!grp)
1612 return;
1613
1614 atomic_set(&grp->refcount, 1);
1615 spin_lock_init(&grp->lock);
1616 INIT_LIST_HEAD(&grp->task_list);
1617 grp->gid = p->pid;
1618
1619 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
1620 nr_node_ids;
1621
1622 node_set(task_node(current), grp->active_nodes);
1623
1624 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1625 grp->faults[i] = p->numa_faults_memory[i];
1626
1627 grp->total_faults = p->total_numa_faults;
1628
1629 list_add(&p->numa_entry, &grp->task_list);
1630 grp->nr_tasks++;
1631 rcu_assign_pointer(p->numa_group, grp);
1632 }
1633
1634 rcu_read_lock();
1635 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1636
1637 if (!cpupid_match_pid(tsk, cpupid))
1638 goto no_join;
1639
1640 grp = rcu_dereference(tsk->numa_group);
1641 if (!grp)
1642 goto no_join;
1643
1644 my_grp = p->numa_group;
1645 if (grp == my_grp)
1646 goto no_join;
1647
1648
1649
1650
1651
1652 if (my_grp->nr_tasks > grp->nr_tasks)
1653 goto no_join;
1654
1655
1656
1657
1658 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
1659 goto no_join;
1660
1661
1662 if (tsk->mm == current->mm)
1663 join = true;
1664
1665
1666 if (flags & TNF_SHARED)
1667 join = true;
1668
1669
1670 *priv = !join;
1671
1672 if (join && !get_numa_group(grp))
1673 goto no_join;
1674
1675 rcu_read_unlock();
1676
1677 if (!join)
1678 return;
1679
1680 BUG_ON(irqs_disabled());
1681 double_lock_irq(&my_grp->lock, &grp->lock);
1682
1683 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
1684 my_grp->faults[i] -= p->numa_faults_memory[i];
1685 grp->faults[i] += p->numa_faults_memory[i];
1686 }
1687 my_grp->total_faults -= p->total_numa_faults;
1688 grp->total_faults += p->total_numa_faults;
1689
1690 list_move(&p->numa_entry, &grp->task_list);
1691 my_grp->nr_tasks--;
1692 grp->nr_tasks++;
1693
1694 spin_unlock(&my_grp->lock);
1695 spin_unlock_irq(&grp->lock);
1696
1697 rcu_assign_pointer(p->numa_group, grp);
1698
1699 put_numa_group(my_grp);
1700 return;
1701
1702no_join:
1703 rcu_read_unlock();
1704 return;
1705}
1706
1707void task_numa_free(struct task_struct *p)
1708{
1709 struct numa_group *grp = p->numa_group;
1710 void *numa_faults = p->numa_faults_memory;
1711 unsigned long flags;
1712 int i;
1713
1714 if (grp) {
1715 spin_lock_irqsave(&grp->lock, flags);
1716 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1717 grp->faults[i] -= p->numa_faults_memory[i];
1718 grp->total_faults -= p->total_numa_faults;
1719
1720 list_del(&p->numa_entry);
1721 grp->nr_tasks--;
1722 spin_unlock_irqrestore(&grp->lock, flags);
1723 rcu_assign_pointer(p->numa_group, NULL);
1724 put_numa_group(grp);
1725 }
1726
1727 p->numa_faults_memory = NULL;
1728 p->numa_faults_buffer_memory = NULL;
1729 p->numa_faults_cpu= NULL;
1730 p->numa_faults_buffer_cpu = NULL;
1731 kfree(numa_faults);
1732}
1733
1734
1735
1736
1737void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
1738{
1739 struct task_struct *p = current;
1740 bool migrated = flags & TNF_MIGRATED;
1741 int cpu_node = task_node(current);
1742 int priv;
1743
1744 if (!numabalancing_enabled)
1745 return;
1746
1747
1748 if (!p->mm)
1749 return;
1750
1751
1752 if (p->state == TASK_DEAD)
1753 return;
1754
1755
1756 if (unlikely(!p->numa_faults_memory)) {
1757 int size = sizeof(*p->numa_faults_memory) *
1758 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
1759
1760 p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
1761 if (!p->numa_faults_memory)
1762 return;
1763
1764 BUG_ON(p->numa_faults_buffer_memory);
1765
1766
1767
1768
1769
1770
1771 p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
1772 p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
1773 p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
1774 p->total_numa_faults = 0;
1775 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1776 }
1777
1778
1779
1780
1781
1782 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
1783 priv = 1;
1784 } else {
1785 priv = cpupid_match_pid(p, last_cpupid);
1786 if (!priv && !(flags & TNF_NO_GROUP))
1787 task_numa_group(p, last_cpupid, flags, &priv);
1788 }
1789
1790 task_numa_placement(p);
1791
1792
1793
1794
1795
1796 if (time_after(jiffies, p->numa_migrate_retry))
1797 numa_migrate_preferred(p);
1798
1799 if (migrated)
1800 p->numa_pages_migrated += pages;
1801
1802 p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
1803 p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
1804 p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
1805}
1806
1807static void reset_ptenuma_scan(struct task_struct *p)
1808{
1809 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1810 p->mm->numa_scan_offset = 0;
1811}
1812
1813
1814
1815
1816
1817void task_numa_work(struct callback_head *work)
1818{
1819 unsigned long migrate, next_scan, now = jiffies;
1820 struct task_struct *p = current;
1821 struct mm_struct *mm = p->mm;
1822 struct vm_area_struct *vma;
1823 unsigned long start, end;
1824 unsigned long nr_pte_updates = 0;
1825 long pages;
1826
1827 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1828
1829 work->next = work;
1830
1831
1832
1833
1834
1835
1836
1837
1838 if (p->flags & PF_EXITING)
1839 return;
1840
1841 if (!mm->numa_next_scan) {
1842 mm->numa_next_scan = now +
1843 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1844 }
1845
1846
1847
1848
1849 migrate = mm->numa_next_scan;
1850 if (time_before(now, migrate))
1851 return;
1852
1853 if (p->numa_scan_period == 0) {
1854 p->numa_scan_period_max = task_scan_max(p);
1855 p->numa_scan_period = task_scan_min(p);
1856 }
1857
1858 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
1859 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1860 return;
1861
1862
1863
1864
1865
1866 p->node_stamp += 2 * TICK_NSEC;
1867
1868 start = mm->numa_scan_offset;
1869 pages = sysctl_numa_balancing_scan_size;
1870 pages <<= 20 - PAGE_SHIFT;
1871 if (!pages)
1872 return;
1873
1874 down_read(&mm->mmap_sem);
1875 vma = find_vma(mm, start);
1876 if (!vma) {
1877 reset_ptenuma_scan(p);
1878 start = 0;
1879 vma = mm->mmap;
1880 }
1881 for (; vma; vma = vma->vm_next) {
1882 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
1883 continue;
1884
1885
1886
1887
1888
1889
1890
1891 if (!vma->vm_mm ||
1892 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1893 continue;
1894
1895
1896
1897
1898
1899 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1900 continue;
1901
1902 do {
1903 start = max(start, vma->vm_start);
1904 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1905 end = min(end, vma->vm_end);
1906 nr_pte_updates += change_prot_numa(vma, start, end);
1907
1908
1909
1910
1911
1912
1913 if (nr_pte_updates)
1914 pages -= (end - start) >> PAGE_SHIFT;
1915
1916 start = end;
1917 if (pages <= 0)
1918 goto out;
1919
1920 cond_resched();
1921 } while (end != vma->vm_end);
1922 }
1923
1924out:
1925
1926
1927
1928
1929
1930
1931 if (vma)
1932 mm->numa_scan_offset = start;
1933 else
1934 reset_ptenuma_scan(p);
1935 up_read(&mm->mmap_sem);
1936}
1937
1938
1939
1940
1941void task_tick_numa(struct rq *rq, struct task_struct *curr)
1942{
1943 struct callback_head *work = &curr->numa_work;
1944 u64 period, now;
1945
1946
1947
1948
1949 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1950 return;
1951
1952
1953
1954
1955
1956
1957
1958 now = curr->se.sum_exec_runtime;
1959 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1960
1961 if (now - curr->node_stamp > period) {
1962 if (!curr->node_stamp)
1963 curr->numa_scan_period = task_scan_min(curr);
1964 curr->node_stamp += period;
1965
1966 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1967 init_task_work(work, task_numa_work);
1968 task_work_add(curr, work, true);
1969 }
1970 }
1971}
1972#else
1973static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1974{
1975}
1976
1977static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1978{
1979}
1980
1981static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1982{
1983}
1984#endif
1985
1986static void
1987account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1988{
1989 update_load_add(&cfs_rq->load, se->load.weight);
1990 if (!parent_entity(se))
1991 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
1992#ifdef CONFIG_SMP
1993 if (entity_is_task(se)) {
1994 struct rq *rq = rq_of(cfs_rq);
1995
1996 account_numa_enqueue(rq, task_of(se));
1997 list_add(&se->group_node, &rq->cfs_tasks);
1998 }
1999#endif
2000 cfs_rq->nr_running++;
2001}
2002
2003static void
2004account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2005{
2006 update_load_sub(&cfs_rq->load, se->load.weight);
2007 if (!parent_entity(se))
2008 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2009 if (entity_is_task(se)) {
2010 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2011 list_del_init(&se->group_node);
2012 }
2013 cfs_rq->nr_running--;
2014}
2015
2016#ifdef CONFIG_FAIR_GROUP_SCHED
2017# ifdef CONFIG_SMP
2018static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2019{
2020 long tg_weight;
2021
2022
2023
2024
2025
2026
2027 tg_weight = atomic_long_read(&tg->load_avg);
2028 tg_weight -= cfs_rq->tg_load_contrib;
2029 tg_weight += cfs_rq->load.weight;
2030
2031 return tg_weight;
2032}
2033
2034static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2035{
2036 long tg_weight, load, shares;
2037
2038 tg_weight = calc_tg_weight(tg, cfs_rq);
2039 load = cfs_rq->load.weight;
2040
2041 shares = (tg->shares * load);
2042 if (tg_weight)
2043 shares /= tg_weight;
2044
2045 if (shares < MIN_SHARES)
2046 shares = MIN_SHARES;
2047 if (shares > tg->shares)
2048 shares = tg->shares;
2049
2050 return shares;
2051}
2052# else
2053static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2054{
2055 return tg->shares;
2056}
2057# endif
2058static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2059 unsigned long weight)
2060{
2061 if (se->on_rq) {
2062
2063 if (cfs_rq->curr == se)
2064 update_curr(cfs_rq);
2065 account_entity_dequeue(cfs_rq, se);
2066 }
2067
2068 update_load_set(&se->load, weight);
2069
2070 if (se->on_rq)
2071 account_entity_enqueue(cfs_rq, se);
2072}
2073
2074static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2075
2076static void update_cfs_shares(struct cfs_rq *cfs_rq)
2077{
2078 struct task_group *tg;
2079 struct sched_entity *se;
2080 long shares;
2081
2082 tg = cfs_rq->tg;
2083 se = tg->se[cpu_of(rq_of(cfs_rq))];
2084 if (!se || throttled_hierarchy(cfs_rq))
2085 return;
2086#ifndef CONFIG_SMP
2087 if (likely(se->load.weight == tg->shares))
2088 return;
2089#endif
2090 shares = calc_cfs_shares(cfs_rq, tg);
2091
2092 reweight_entity(cfs_rq_of(se), se, shares);
2093}
2094#else
2095static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2096{
2097}
2098#endif
2099
2100#ifdef CONFIG_SMP
2101
2102
2103
2104
2105#define LOAD_AVG_PERIOD 32
2106#define LOAD_AVG_MAX 47742
2107#define LOAD_AVG_MAX_N 345
2108
2109
2110static const u32 runnable_avg_yN_inv[] = {
2111 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2112 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2113 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2114 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2115 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2116 0x85aac367, 0x82cd8698,
2117};
2118
2119
2120
2121
2122
2123static const u32 runnable_avg_yN_sum[] = {
2124 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2125 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2126 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2127};
2128
2129
2130
2131
2132
2133static __always_inline u64 decay_load(u64 val, u64 n)
2134{
2135 unsigned int local_n;
2136
2137 if (!n)
2138 return val;
2139 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2140 return 0;
2141
2142
2143 local_n = n;
2144
2145
2146
2147
2148
2149
2150
2151
2152 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2153 val >>= local_n / LOAD_AVG_PERIOD;
2154 local_n %= LOAD_AVG_PERIOD;
2155 }
2156
2157 val *= runnable_avg_yN_inv[local_n];
2158
2159 return val >> 32;
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169static u32 __compute_runnable_contrib(u64 n)
2170{
2171 u32 contrib = 0;
2172
2173 if (likely(n <= LOAD_AVG_PERIOD))
2174 return runnable_avg_yN_sum[n];
2175 else if (unlikely(n >= LOAD_AVG_MAX_N))
2176 return LOAD_AVG_MAX;
2177
2178
2179 do {
2180 contrib /= 2;
2181 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2182
2183 n -= LOAD_AVG_PERIOD;
2184 } while (n > LOAD_AVG_PERIOD);
2185
2186 contrib = decay_load(contrib, n);
2187 return contrib + runnable_avg_yN_sum[n];
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218static __always_inline int __update_entity_runnable_avg(u64 now,
2219 struct sched_avg *sa,
2220 int runnable)
2221{
2222 u64 delta, periods;
2223 u32 runnable_contrib;
2224 int delta_w, decayed = 0;
2225
2226 delta = now - sa->last_runnable_update;
2227
2228
2229
2230
2231 if ((s64)delta < 0) {
2232 sa->last_runnable_update = now;
2233 return 0;
2234 }
2235
2236
2237
2238
2239
2240 delta >>= 10;
2241 if (!delta)
2242 return 0;
2243 sa->last_runnable_update = now;
2244
2245
2246 delta_w = sa->runnable_avg_period % 1024;
2247 if (delta + delta_w >= 1024) {
2248
2249 decayed = 1;
2250
2251
2252
2253
2254
2255
2256 delta_w = 1024 - delta_w;
2257 if (runnable)
2258 sa->runnable_avg_sum += delta_w;
2259 sa->runnable_avg_period += delta_w;
2260
2261 delta -= delta_w;
2262
2263
2264 periods = delta / 1024;
2265 delta %= 1024;
2266
2267 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2268 periods + 1);
2269 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
2270 periods + 1);
2271
2272
2273 runnable_contrib = __compute_runnable_contrib(periods);
2274 if (runnable)
2275 sa->runnable_avg_sum += runnable_contrib;
2276 sa->runnable_avg_period += runnable_contrib;
2277 }
2278
2279
2280 if (runnable)
2281 sa->runnable_avg_sum += delta;
2282 sa->runnable_avg_period += delta;
2283
2284 return decayed;
2285}
2286
2287
2288static inline u64 __synchronize_entity_decay(struct sched_entity *se)
2289{
2290 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2291 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2292
2293 decays -= se->avg.decay_count;
2294 if (!decays)
2295 return 0;
2296
2297 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2298 se->avg.decay_count = 0;
2299
2300 return decays;
2301}
2302
2303#ifdef CONFIG_FAIR_GROUP_SCHED
2304static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2305 int force_update)
2306{
2307 struct task_group *tg = cfs_rq->tg;
2308 long tg_contrib;
2309
2310 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2311 tg_contrib -= cfs_rq->tg_load_contrib;
2312
2313 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2314 atomic_long_add(tg_contrib, &tg->load_avg);
2315 cfs_rq->tg_load_contrib += tg_contrib;
2316 }
2317}
2318
2319
2320
2321
2322
2323static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2324 struct cfs_rq *cfs_rq)
2325{
2326 struct task_group *tg = cfs_rq->tg;
2327 long contrib;
2328
2329
2330 contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
2331 sa->runnable_avg_period + 1);
2332 contrib -= cfs_rq->tg_runnable_contrib;
2333
2334 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2335 atomic_add(contrib, &tg->runnable_avg);
2336 cfs_rq->tg_runnable_contrib += contrib;
2337 }
2338}
2339
2340static inline void __update_group_entity_contrib(struct sched_entity *se)
2341{
2342 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2343 struct task_group *tg = cfs_rq->tg;
2344 int runnable_avg;
2345
2346 u64 contrib;
2347
2348 contrib = cfs_rq->tg_load_contrib * tg->shares;
2349 se->avg.load_avg_contrib = div_u64(contrib,
2350 atomic_long_read(&tg->load_avg) + 1);
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375 runnable_avg = atomic_read(&tg->runnable_avg);
2376 if (runnable_avg < NICE_0_LOAD) {
2377 se->avg.load_avg_contrib *= runnable_avg;
2378 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2379 }
2380}
2381
2382static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2383{
2384 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
2385 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2386}
2387#else
2388static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2389 int force_update) {}
2390static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2391 struct cfs_rq *cfs_rq) {}
2392static inline void __update_group_entity_contrib(struct sched_entity *se) {}
2393static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2394#endif
2395
2396static inline void __update_task_entity_contrib(struct sched_entity *se)
2397{
2398 u32 contrib;
2399
2400
2401 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2402 contrib /= (se->avg.runnable_avg_period + 1);
2403 se->avg.load_avg_contrib = scale_load(contrib);
2404}
2405
2406
2407static long __update_entity_load_avg_contrib(struct sched_entity *se)
2408{
2409 long old_contrib = se->avg.load_avg_contrib;
2410
2411 if (entity_is_task(se)) {
2412 __update_task_entity_contrib(se);
2413 } else {
2414 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
2415 __update_group_entity_contrib(se);
2416 }
2417
2418 return se->avg.load_avg_contrib - old_contrib;
2419}
2420
2421static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2422 long load_contrib)
2423{
2424 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2425 cfs_rq->blocked_load_avg -= load_contrib;
2426 else
2427 cfs_rq->blocked_load_avg = 0;
2428}
2429
2430static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2431
2432
2433static inline void update_entity_load_avg(struct sched_entity *se,
2434 int update_cfs_rq)
2435{
2436 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2437 long contrib_delta;
2438 u64 now;
2439
2440
2441
2442
2443
2444 if (entity_is_task(se))
2445 now = cfs_rq_clock_task(cfs_rq);
2446 else
2447 now = cfs_rq_clock_task(group_cfs_rq(se));
2448
2449 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2450 return;
2451
2452 contrib_delta = __update_entity_load_avg_contrib(se);
2453
2454 if (!update_cfs_rq)
2455 return;
2456
2457 if (se->on_rq)
2458 cfs_rq->runnable_load_avg += contrib_delta;
2459 else
2460 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2461}
2462
2463
2464
2465
2466
2467static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
2468{
2469 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
2470 u64 decays;
2471
2472 decays = now - cfs_rq->last_decay;
2473 if (!decays && !force_update)
2474 return;
2475
2476 if (atomic_long_read(&cfs_rq->removed_load)) {
2477 unsigned long removed_load;
2478 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
2479 subtract_blocked_load_contrib(cfs_rq, removed_load);
2480 }
2481
2482 if (decays) {
2483 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2484 decays);
2485 atomic64_add(decays, &cfs_rq->decay_counter);
2486 cfs_rq->last_decay = now;
2487 }
2488
2489 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
2490}
2491
2492
2493static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2494 struct sched_entity *se,
2495 int wakeup)
2496{
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506 if (unlikely(se->avg.decay_count <= 0)) {
2507 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
2508 if (se->avg.decay_count) {
2509
2510
2511
2512
2513
2514
2515
2516
2517 se->avg.last_runnable_update -= (-se->avg.decay_count)
2518 << 20;
2519 update_entity_load_avg(se, 0);
2520
2521 se->avg.decay_count = 0;
2522 }
2523 wakeup = 0;
2524 } else {
2525 __synchronize_entity_decay(se);
2526 }
2527
2528
2529 if (wakeup) {
2530 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
2531 update_entity_load_avg(se, 0);
2532 }
2533
2534 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
2535
2536 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2537}
2538
2539
2540
2541
2542
2543
2544static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2545 struct sched_entity *se,
2546 int sleep)
2547{
2548 update_entity_load_avg(se, 1);
2549
2550 update_cfs_rq_blocked_load(cfs_rq, !sleep);
2551
2552 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
2553 if (sleep) {
2554 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2555 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2556 }
2557}
2558
2559
2560
2561
2562
2563
2564void idle_enter_fair(struct rq *this_rq)
2565{
2566 update_rq_runnable_avg(this_rq, 1);
2567}
2568
2569
2570
2571
2572
2573
2574void idle_exit_fair(struct rq *this_rq)
2575{
2576 update_rq_runnable_avg(this_rq, 0);
2577}
2578
2579static int idle_balance(struct rq *this_rq);
2580
2581#else
2582
2583static inline void update_entity_load_avg(struct sched_entity *se,
2584 int update_cfs_rq) {}
2585static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2586static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2587 struct sched_entity *se,
2588 int wakeup) {}
2589static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2590 struct sched_entity *se,
2591 int sleep) {}
2592static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2593 int force_update) {}
2594
2595static inline int idle_balance(struct rq *rq)
2596{
2597 return 0;
2598}
2599
2600#endif
2601
2602static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
2603{
2604#ifdef CONFIG_SCHEDSTATS
2605 struct task_struct *tsk = NULL;
2606
2607 if (entity_is_task(se))
2608 tsk = task_of(se);
2609
2610 if (se->statistics.sleep_start) {
2611 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
2612
2613 if ((s64)delta < 0)
2614 delta = 0;
2615
2616 if (unlikely(delta > se->statistics.sleep_max))
2617 se->statistics.sleep_max = delta;
2618
2619 se->statistics.sleep_start = 0;
2620 se->statistics.sum_sleep_runtime += delta;
2621
2622 if (tsk) {
2623 account_scheduler_latency(tsk, delta >> 10, 1);
2624 trace_sched_stat_sleep(tsk, delta);
2625 }
2626 }
2627 if (se->statistics.block_start) {
2628 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
2629
2630 if ((s64)delta < 0)
2631 delta = 0;
2632
2633 if (unlikely(delta > se->statistics.block_max))
2634 se->statistics.block_max = delta;
2635
2636 se->statistics.block_start = 0;
2637 se->statistics.sum_sleep_runtime += delta;
2638
2639 if (tsk) {
2640 if (tsk->in_iowait) {
2641 se->statistics.iowait_sum += delta;
2642 se->statistics.iowait_count++;
2643 trace_sched_stat_iowait(tsk, delta);
2644 }
2645
2646 trace_sched_stat_blocked(tsk, delta);
2647
2648
2649
2650
2651
2652
2653 if (unlikely(prof_on == SLEEP_PROFILING)) {
2654 profile_hits(SLEEP_PROFILING,
2655 (void *)get_wchan(tsk),
2656 delta >> 20);
2657 }
2658 account_scheduler_latency(tsk, delta >> 10, 0);
2659 }
2660 }
2661#endif
2662}
2663
2664static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2665{
2666#ifdef CONFIG_SCHED_DEBUG
2667 s64 d = se->vruntime - cfs_rq->min_vruntime;
2668
2669 if (d < 0)
2670 d = -d;
2671
2672 if (d > 3*sysctl_sched_latency)
2673 schedstat_inc(cfs_rq, nr_spread_over);
2674#endif
2675}
2676
2677static void
2678place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2679{
2680 u64 vruntime = cfs_rq->min_vruntime;
2681
2682
2683
2684
2685
2686
2687
2688 if (initial && sched_feat(START_DEBIT))
2689 vruntime += sched_vslice(cfs_rq, se);
2690
2691
2692 if (!initial) {
2693 unsigned long thresh = sysctl_sched_latency;
2694
2695
2696
2697
2698
2699 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2700 thresh >>= 1;
2701
2702 vruntime -= thresh;
2703 }
2704
2705
2706 se->vruntime = max_vruntime(se->vruntime, vruntime);
2707}
2708
2709static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2710
2711static void
2712enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2713{
2714
2715
2716
2717
2718 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
2719 se->vruntime += cfs_rq->min_vruntime;
2720
2721
2722
2723
2724 update_curr(cfs_rq);
2725 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
2726 account_entity_enqueue(cfs_rq, se);
2727 update_cfs_shares(cfs_rq);
2728
2729 if (flags & ENQUEUE_WAKEUP) {
2730 place_entity(cfs_rq, se, 0);
2731 enqueue_sleeper(cfs_rq, se);
2732 }
2733
2734 update_stats_enqueue(cfs_rq, se);
2735 check_spread(cfs_rq, se);
2736 if (se != cfs_rq->curr)
2737 __enqueue_entity(cfs_rq, se);
2738 se->on_rq = 1;
2739
2740 if (cfs_rq->nr_running == 1) {
2741 list_add_leaf_cfs_rq(cfs_rq);
2742 check_enqueue_throttle(cfs_rq);
2743 }
2744}
2745
2746static void __clear_buddies_last(struct sched_entity *se)
2747{
2748 for_each_sched_entity(se) {
2749 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2750 if (cfs_rq->last != se)
2751 break;
2752
2753 cfs_rq->last = NULL;
2754 }
2755}
2756
2757static void __clear_buddies_next(struct sched_entity *se)
2758{
2759 for_each_sched_entity(se) {
2760 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2761 if (cfs_rq->next != se)
2762 break;
2763
2764 cfs_rq->next = NULL;
2765 }
2766}
2767
2768static void __clear_buddies_skip(struct sched_entity *se)
2769{
2770 for_each_sched_entity(se) {
2771 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2772 if (cfs_rq->skip != se)
2773 break;
2774
2775 cfs_rq->skip = NULL;
2776 }
2777}
2778
2779static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2780{
2781 if (cfs_rq->last == se)
2782 __clear_buddies_last(se);
2783
2784 if (cfs_rq->next == se)
2785 __clear_buddies_next(se);
2786
2787 if (cfs_rq->skip == se)
2788 __clear_buddies_skip(se);
2789}
2790
2791static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2792
2793static void
2794dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2795{
2796
2797
2798
2799 update_curr(cfs_rq);
2800 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
2801
2802 update_stats_dequeue(cfs_rq, se);
2803 if (flags & DEQUEUE_SLEEP) {
2804#ifdef CONFIG_SCHEDSTATS
2805 if (entity_is_task(se)) {
2806 struct task_struct *tsk = task_of(se);
2807
2808 if (tsk->state & TASK_INTERRUPTIBLE)
2809 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
2810 if (tsk->state & TASK_UNINTERRUPTIBLE)
2811 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
2812 }
2813#endif
2814 }
2815
2816 clear_buddies(cfs_rq, se);
2817
2818 if (se != cfs_rq->curr)
2819 __dequeue_entity(cfs_rq, se);
2820 se->on_rq = 0;
2821 account_entity_dequeue(cfs_rq, se);
2822
2823
2824
2825
2826
2827
2828 if (!(flags & DEQUEUE_SLEEP))
2829 se->vruntime -= cfs_rq->min_vruntime;
2830
2831
2832 return_cfs_rq_runtime(cfs_rq);
2833
2834 update_min_vruntime(cfs_rq);
2835 update_cfs_shares(cfs_rq);
2836}
2837
2838
2839
2840
2841static void
2842check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
2843{
2844 unsigned long ideal_runtime, delta_exec;
2845 struct sched_entity *se;
2846 s64 delta;
2847
2848 ideal_runtime = sched_slice(cfs_rq, curr);
2849 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
2850 if (delta_exec > ideal_runtime) {
2851 resched_task(rq_of(cfs_rq)->curr);
2852
2853
2854
2855
2856 clear_buddies(cfs_rq, curr);
2857 return;
2858 }
2859
2860
2861
2862
2863
2864
2865 if (delta_exec < sysctl_sched_min_granularity)
2866 return;
2867
2868 se = __pick_first_entity(cfs_rq);
2869 delta = curr->vruntime - se->vruntime;
2870
2871 if (delta < 0)
2872 return;
2873
2874 if (delta > ideal_runtime)
2875 resched_task(rq_of(cfs_rq)->curr);
2876}
2877
2878static void
2879set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
2880{
2881
2882 if (se->on_rq) {
2883
2884
2885
2886
2887
2888 update_stats_wait_end(cfs_rq, se);
2889 __dequeue_entity(cfs_rq, se);
2890 }
2891
2892 update_stats_curr_start(cfs_rq, se);
2893 cfs_rq->curr = se;
2894#ifdef CONFIG_SCHEDSTATS
2895
2896
2897
2898
2899
2900 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
2901 se->statistics.slice_max = max(se->statistics.slice_max,
2902 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2903 }
2904#endif
2905 se->prev_sum_exec_runtime = se->sum_exec_runtime;
2906}
2907
2908static int
2909wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2910
2911
2912
2913
2914
2915
2916
2917
2918static struct sched_entity *
2919pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
2920{
2921 struct sched_entity *left = __pick_first_entity(cfs_rq);
2922 struct sched_entity *se;
2923
2924
2925
2926
2927
2928 if (!left || (curr && entity_before(curr, left)))
2929 left = curr;
2930
2931 se = left;
2932
2933
2934
2935
2936
2937 if (cfs_rq->skip == se) {
2938 struct sched_entity *second;
2939
2940 if (se == curr) {
2941 second = __pick_first_entity(cfs_rq);
2942 } else {
2943 second = __pick_next_entity(se);
2944 if (!second || (curr && entity_before(curr, second)))
2945 second = curr;
2946 }
2947
2948 if (second && wakeup_preempt_entity(second, left) < 1)
2949 se = second;
2950 }
2951
2952
2953
2954
2955 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2956 se = cfs_rq->last;
2957
2958
2959
2960
2961 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2962 se = cfs_rq->next;
2963
2964 clear_buddies(cfs_rq, se);
2965
2966 return se;
2967}
2968
2969static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2970
2971static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
2972{
2973
2974
2975
2976
2977 if (prev->on_rq)
2978 update_curr(cfs_rq);
2979
2980
2981 check_cfs_rq_runtime(cfs_rq);
2982
2983 check_spread(cfs_rq, prev);
2984 if (prev->on_rq) {
2985 update_stats_wait_start(cfs_rq, prev);
2986
2987 __enqueue_entity(cfs_rq, prev);
2988
2989 update_entity_load_avg(prev, 1);
2990 }
2991 cfs_rq->curr = NULL;
2992}
2993
2994static void
2995entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2996{
2997
2998
2999
3000 update_curr(cfs_rq);
3001
3002
3003
3004
3005 update_entity_load_avg(curr, 1);
3006 update_cfs_rq_blocked_load(cfs_rq, 1);
3007 update_cfs_shares(cfs_rq);
3008
3009#ifdef CONFIG_SCHED_HRTICK
3010
3011
3012
3013
3014 if (queued) {
3015 resched_task(rq_of(cfs_rq)->curr);
3016 return;
3017 }
3018
3019
3020
3021 if (!sched_feat(DOUBLE_TICK) &&
3022 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3023 return;
3024#endif
3025
3026 if (cfs_rq->nr_running > 1)
3027 check_preempt_tick(cfs_rq, curr);
3028}
3029
3030
3031
3032
3033
3034
3035#ifdef CONFIG_CFS_BANDWIDTH
3036
3037#ifdef HAVE_JUMP_LABEL
3038static struct static_key __cfs_bandwidth_used;
3039
3040static inline bool cfs_bandwidth_used(void)
3041{
3042 return static_key_false(&__cfs_bandwidth_used);
3043}
3044
3045void cfs_bandwidth_usage_inc(void)
3046{
3047 static_key_slow_inc(&__cfs_bandwidth_used);
3048}
3049
3050void cfs_bandwidth_usage_dec(void)
3051{
3052 static_key_slow_dec(&__cfs_bandwidth_used);
3053}
3054#else
3055static bool cfs_bandwidth_used(void)
3056{
3057 return true;
3058}
3059
3060void cfs_bandwidth_usage_inc(void) {}
3061void cfs_bandwidth_usage_dec(void) {}
3062#endif
3063
3064
3065
3066
3067
3068static inline u64 default_cfs_period(void)
3069{
3070 return 100000000ULL;
3071}
3072
3073static inline u64 sched_cfs_bandwidth_slice(void)
3074{
3075 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3076}
3077
3078
3079
3080
3081
3082
3083
3084
3085void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3086{
3087 u64 now;
3088
3089 if (cfs_b->quota == RUNTIME_INF)
3090 return;
3091
3092 now = sched_clock_cpu(smp_processor_id());
3093 cfs_b->runtime = cfs_b->quota;
3094 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3095}
3096
3097static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3098{
3099 return &tg->cfs_bandwidth;
3100}
3101
3102
3103static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3104{
3105 if (unlikely(cfs_rq->throttle_count))
3106 return cfs_rq->throttled_clock_task;
3107
3108 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3109}
3110
3111
3112static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3113{
3114 struct task_group *tg = cfs_rq->tg;
3115 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3116 u64 amount = 0, min_amount, expires;
3117
3118
3119 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3120
3121 raw_spin_lock(&cfs_b->lock);
3122 if (cfs_b->quota == RUNTIME_INF)
3123 amount = min_amount;
3124 else {
3125
3126
3127
3128
3129
3130
3131 if (!cfs_b->timer_active) {
3132 __refill_cfs_bandwidth_runtime(cfs_b);
3133 __start_cfs_bandwidth(cfs_b, false);
3134 }
3135
3136 if (cfs_b->runtime > 0) {
3137 amount = min(cfs_b->runtime, min_amount);
3138 cfs_b->runtime -= amount;
3139 cfs_b->idle = 0;
3140 }
3141 }
3142 expires = cfs_b->runtime_expires;
3143 raw_spin_unlock(&cfs_b->lock);
3144
3145 cfs_rq->runtime_remaining += amount;
3146
3147
3148
3149
3150
3151 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3152 cfs_rq->runtime_expires = expires;
3153
3154 return cfs_rq->runtime_remaining > 0;
3155}
3156
3157
3158
3159
3160
3161static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3162{
3163 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3164
3165
3166 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3167 return;
3168
3169 if (cfs_rq->runtime_remaining < 0)
3170 return;
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
3182
3183 cfs_rq->runtime_expires += TICK_NSEC;
3184 } else {
3185
3186 cfs_rq->runtime_remaining = 0;
3187 }
3188}
3189
3190static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3191{
3192
3193 cfs_rq->runtime_remaining -= delta_exec;
3194 expire_cfs_rq_runtime(cfs_rq);
3195
3196 if (likely(cfs_rq->runtime_remaining > 0))
3197 return;
3198
3199
3200
3201
3202
3203 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3204 resched_task(rq_of(cfs_rq)->curr);
3205}
3206
3207static __always_inline
3208void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3209{
3210 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3211 return;
3212
3213 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3214}
3215
3216static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3217{
3218 return cfs_bandwidth_used() && cfs_rq->throttled;
3219}
3220
3221
3222static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3223{
3224 return cfs_bandwidth_used() && cfs_rq->throttle_count;
3225}
3226
3227
3228
3229
3230
3231
3232static inline int throttled_lb_pair(struct task_group *tg,
3233 int src_cpu, int dest_cpu)
3234{
3235 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3236
3237 src_cfs_rq = tg->cfs_rq[src_cpu];
3238 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3239
3240 return throttled_hierarchy(src_cfs_rq) ||
3241 throttled_hierarchy(dest_cfs_rq);
3242}
3243
3244
3245static int tg_unthrottle_up(struct task_group *tg, void *data)
3246{
3247 struct rq *rq = data;
3248 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3249
3250 cfs_rq->throttle_count--;
3251#ifdef CONFIG_SMP
3252 if (!cfs_rq->throttle_count) {
3253
3254 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3255 cfs_rq->throttled_clock_task;
3256 }
3257#endif
3258
3259 return 0;
3260}
3261
3262static int tg_throttle_down(struct task_group *tg, void *data)
3263{
3264 struct rq *rq = data;
3265 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3266
3267
3268 if (!cfs_rq->throttle_count)
3269 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3270 cfs_rq->throttle_count++;
3271
3272 return 0;
3273}
3274
3275static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3276{
3277 struct rq *rq = rq_of(cfs_rq);
3278 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3279 struct sched_entity *se;
3280 long task_delta, dequeue = 1;
3281
3282 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3283
3284
3285 rcu_read_lock();
3286 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3287 rcu_read_unlock();
3288
3289 task_delta = cfs_rq->h_nr_running;
3290 for_each_sched_entity(se) {
3291 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3292
3293 if (!se->on_rq)
3294 break;
3295
3296 if (dequeue)
3297 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3298 qcfs_rq->h_nr_running -= task_delta;
3299
3300 if (qcfs_rq->load.weight)
3301 dequeue = 0;
3302 }
3303
3304 if (!se)
3305 rq->nr_running -= task_delta;
3306
3307 cfs_rq->throttled = 1;
3308 cfs_rq->throttled_clock = rq_clock(rq);
3309 raw_spin_lock(&cfs_b->lock);
3310 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3311 if (!cfs_b->timer_active)
3312 __start_cfs_bandwidth(cfs_b, false);
3313 raw_spin_unlock(&cfs_b->lock);
3314}
3315
3316void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3317{
3318 struct rq *rq = rq_of(cfs_rq);
3319 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3320 struct sched_entity *se;
3321 int enqueue = 1;
3322 long task_delta;
3323
3324 se = cfs_rq->tg->se[cpu_of(rq)];
3325
3326 cfs_rq->throttled = 0;
3327
3328 update_rq_clock(rq);
3329
3330 raw_spin_lock(&cfs_b->lock);
3331 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
3332 list_del_rcu(&cfs_rq->throttled_list);
3333 raw_spin_unlock(&cfs_b->lock);
3334
3335
3336 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3337
3338 if (!cfs_rq->load.weight)
3339 return;
3340
3341 task_delta = cfs_rq->h_nr_running;
3342 for_each_sched_entity(se) {
3343 if (se->on_rq)
3344 enqueue = 0;
3345
3346 cfs_rq = cfs_rq_of(se);
3347 if (enqueue)
3348 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3349 cfs_rq->h_nr_running += task_delta;
3350
3351 if (cfs_rq_throttled(cfs_rq))
3352 break;
3353 }
3354
3355 if (!se)
3356 rq->nr_running += task_delta;
3357
3358
3359 if (rq->curr == rq->idle && rq->cfs.nr_running)
3360 resched_task(rq->curr);
3361}
3362
3363static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3364 u64 remaining, u64 expires)
3365{
3366 struct cfs_rq *cfs_rq;
3367 u64 runtime = remaining;
3368
3369 rcu_read_lock();
3370 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3371 throttled_list) {
3372 struct rq *rq = rq_of(cfs_rq);
3373
3374 raw_spin_lock(&rq->lock);
3375 if (!cfs_rq_throttled(cfs_rq))
3376 goto next;
3377
3378 runtime = -cfs_rq->runtime_remaining + 1;
3379 if (runtime > remaining)
3380 runtime = remaining;
3381 remaining -= runtime;
3382
3383 cfs_rq->runtime_remaining += runtime;
3384 cfs_rq->runtime_expires = expires;
3385
3386
3387 if (cfs_rq->runtime_remaining > 0)
3388 unthrottle_cfs_rq(cfs_rq);
3389
3390next:
3391 raw_spin_unlock(&rq->lock);
3392
3393 if (!remaining)
3394 break;
3395 }
3396 rcu_read_unlock();
3397
3398 return remaining;
3399}
3400
3401
3402
3403
3404
3405
3406
3407static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3408{
3409 u64 runtime, runtime_expires;
3410 int idle = 1, throttled;
3411
3412 raw_spin_lock(&cfs_b->lock);
3413
3414 if (cfs_b->quota == RUNTIME_INF)
3415 goto out_unlock;
3416
3417 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3418
3419 idle = cfs_b->idle && !throttled;
3420 cfs_b->nr_periods += overrun;
3421
3422
3423 if (idle)
3424 goto out_unlock;
3425
3426
3427
3428
3429
3430
3431 cfs_b->timer_active = 1;
3432
3433 __refill_cfs_bandwidth_runtime(cfs_b);
3434
3435 if (!throttled) {
3436
3437 cfs_b->idle = 1;
3438 goto out_unlock;
3439 }
3440
3441
3442 cfs_b->nr_throttled += overrun;
3443
3444
3445
3446
3447
3448
3449
3450 runtime = cfs_b->runtime;
3451 runtime_expires = cfs_b->runtime_expires;
3452 cfs_b->runtime = 0;
3453
3454
3455
3456
3457
3458
3459 while (throttled && runtime > 0) {
3460 raw_spin_unlock(&cfs_b->lock);
3461
3462 runtime = distribute_cfs_runtime(cfs_b, runtime,
3463 runtime_expires);
3464 raw_spin_lock(&cfs_b->lock);
3465
3466 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3467 }
3468
3469
3470 cfs_b->runtime = runtime;
3471
3472
3473
3474
3475
3476
3477 cfs_b->idle = 0;
3478out_unlock:
3479 if (idle)
3480 cfs_b->timer_active = 0;
3481 raw_spin_unlock(&cfs_b->lock);
3482
3483 return idle;
3484}
3485
3486
3487static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3488
3489static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3490
3491static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3492
3493
3494
3495
3496
3497
3498
3499
3500static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3501{
3502 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3503 u64 remaining;
3504
3505
3506 if (hrtimer_callback_running(refresh_timer))
3507 return 1;
3508
3509
3510 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3511 if (remaining < min_expire)
3512 return 1;
3513
3514 return 0;
3515}
3516
3517static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3518{
3519 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3520
3521
3522 if (runtime_refresh_within(cfs_b, min_left))
3523 return;
3524
3525 start_bandwidth_timer(&cfs_b->slack_timer,
3526 ns_to_ktime(cfs_bandwidth_slack_period));
3527}
3528
3529
3530static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3531{
3532 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3533 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3534
3535 if (slack_runtime <= 0)
3536 return;
3537
3538 raw_spin_lock(&cfs_b->lock);
3539 if (cfs_b->quota != RUNTIME_INF &&
3540 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3541 cfs_b->runtime += slack_runtime;
3542
3543
3544 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3545 !list_empty(&cfs_b->throttled_cfs_rq))
3546 start_cfs_slack_bandwidth(cfs_b);
3547 }
3548 raw_spin_unlock(&cfs_b->lock);
3549
3550
3551 cfs_rq->runtime_remaining -= slack_runtime;
3552}
3553
3554static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3555{
3556 if (!cfs_bandwidth_used())
3557 return;
3558
3559 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
3560 return;
3561
3562 __return_cfs_rq_runtime(cfs_rq);
3563}
3564
3565
3566
3567
3568
3569static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3570{
3571 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3572 u64 expires;
3573
3574
3575 raw_spin_lock(&cfs_b->lock);
3576 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3577 raw_spin_unlock(&cfs_b->lock);
3578 return;
3579 }
3580
3581 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
3582 runtime = cfs_b->runtime;
3583 cfs_b->runtime = 0;
3584 }
3585 expires = cfs_b->runtime_expires;
3586 raw_spin_unlock(&cfs_b->lock);
3587
3588 if (!runtime)
3589 return;
3590
3591 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3592
3593 raw_spin_lock(&cfs_b->lock);
3594 if (expires == cfs_b->runtime_expires)
3595 cfs_b->runtime = runtime;
3596 raw_spin_unlock(&cfs_b->lock);
3597}
3598
3599
3600
3601
3602
3603
3604static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3605{
3606 if (!cfs_bandwidth_used())
3607 return;
3608
3609
3610 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3611 return;
3612
3613
3614 if (cfs_rq_throttled(cfs_rq))
3615 return;
3616
3617
3618 account_cfs_rq_runtime(cfs_rq, 0);
3619 if (cfs_rq->runtime_remaining <= 0)
3620 throttle_cfs_rq(cfs_rq);
3621}
3622
3623
3624static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3625{
3626 if (!cfs_bandwidth_used())
3627 return false;
3628
3629 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3630 return false;
3631
3632
3633
3634
3635
3636 if (cfs_rq_throttled(cfs_rq))
3637 return true;
3638
3639 throttle_cfs_rq(cfs_rq);
3640 return true;
3641}
3642
3643static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3644{
3645 struct cfs_bandwidth *cfs_b =
3646 container_of(timer, struct cfs_bandwidth, slack_timer);
3647 do_sched_cfs_slack_timer(cfs_b);
3648
3649 return HRTIMER_NORESTART;
3650}
3651
3652static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3653{
3654 struct cfs_bandwidth *cfs_b =
3655 container_of(timer, struct cfs_bandwidth, period_timer);
3656 ktime_t now;
3657 int overrun;
3658 int idle = 0;
3659
3660 for (;;) {
3661 now = hrtimer_cb_get_time(timer);
3662 overrun = hrtimer_forward(timer, now, cfs_b->period);
3663
3664 if (!overrun)
3665 break;
3666
3667 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3668 }
3669
3670 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3671}
3672
3673void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3674{
3675 raw_spin_lock_init(&cfs_b->lock);
3676 cfs_b->runtime = 0;
3677 cfs_b->quota = RUNTIME_INF;
3678 cfs_b->period = ns_to_ktime(default_cfs_period());
3679
3680 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3681 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3682 cfs_b->period_timer.function = sched_cfs_period_timer;
3683 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3684 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3685}
3686
3687static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3688{
3689 cfs_rq->runtime_enabled = 0;
3690 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3691}
3692
3693
3694void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
3695{
3696
3697
3698
3699
3700
3701
3702 while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
3703 hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
3704
3705 raw_spin_unlock(&cfs_b->lock);
3706 cpu_relax();
3707 raw_spin_lock(&cfs_b->lock);
3708
3709 if (!force && cfs_b->timer_active)
3710 return;
3711 }
3712
3713 cfs_b->timer_active = 1;
3714 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3715}
3716
3717static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3718{
3719 hrtimer_cancel(&cfs_b->period_timer);
3720 hrtimer_cancel(&cfs_b->slack_timer);
3721}
3722
3723static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
3724{
3725 struct cfs_rq *cfs_rq;
3726
3727 for_each_leaf_cfs_rq(rq, cfs_rq) {
3728 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3729
3730 if (!cfs_rq->runtime_enabled)
3731 continue;
3732
3733
3734
3735
3736
3737 cfs_rq->runtime_remaining = cfs_b->quota;
3738 if (cfs_rq_throttled(cfs_rq))
3739 unthrottle_cfs_rq(cfs_rq);
3740 }
3741}
3742
3743#else
3744static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3745{
3746 return rq_clock_task(rq_of(cfs_rq));
3747}
3748
3749static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
3750static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
3751static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
3752static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3753
3754static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3755{
3756 return 0;
3757}
3758
3759static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3760{
3761 return 0;
3762}
3763
3764static inline int throttled_lb_pair(struct task_group *tg,
3765 int src_cpu, int dest_cpu)
3766{
3767 return 0;
3768}
3769
3770void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3771
3772#ifdef CONFIG_FAIR_GROUP_SCHED
3773static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3774#endif
3775
3776static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3777{
3778 return NULL;
3779}
3780static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3781static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
3782
3783#endif
3784
3785
3786
3787
3788
3789#ifdef CONFIG_SCHED_HRTICK
3790static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3791{
3792 struct sched_entity *se = &p->se;
3793 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3794
3795 WARN_ON(task_rq(p) != rq);
3796
3797 if (cfs_rq->nr_running > 1) {
3798 u64 slice = sched_slice(cfs_rq, se);
3799 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3800 s64 delta = slice - ran;
3801
3802 if (delta < 0) {
3803 if (rq->curr == p)
3804 resched_task(p);
3805 return;
3806 }
3807
3808
3809
3810
3811
3812 if (rq->curr != p)
3813 delta = max_t(s64, 10000LL, delta);
3814
3815 hrtick_start(rq, delta);
3816 }
3817}
3818
3819
3820
3821
3822
3823
3824static void hrtick_update(struct rq *rq)
3825{
3826 struct task_struct *curr = rq->curr;
3827
3828 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
3829 return;
3830
3831 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3832 hrtick_start_fair(rq, curr);
3833}
3834#else
3835static inline void
3836hrtick_start_fair(struct rq *rq, struct task_struct *p)
3837{
3838}
3839
3840static inline void hrtick_update(struct rq *rq)
3841{
3842}
3843#endif
3844
3845
3846
3847
3848
3849
3850static void
3851enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3852{
3853 struct cfs_rq *cfs_rq;
3854 struct sched_entity *se = &p->se;
3855
3856 for_each_sched_entity(se) {
3857 if (se->on_rq)
3858 break;
3859 cfs_rq = cfs_rq_of(se);
3860 enqueue_entity(cfs_rq, se, flags);
3861
3862
3863
3864
3865
3866
3867
3868 if (cfs_rq_throttled(cfs_rq))
3869 break;
3870 cfs_rq->h_nr_running++;
3871
3872 flags = ENQUEUE_WAKEUP;
3873 }
3874
3875 for_each_sched_entity(se) {
3876 cfs_rq = cfs_rq_of(se);
3877 cfs_rq->h_nr_running++;
3878
3879 if (cfs_rq_throttled(cfs_rq))
3880 break;
3881
3882 update_cfs_shares(cfs_rq);
3883 update_entity_load_avg(se, 1);
3884 }
3885
3886 if (!se) {
3887 update_rq_runnable_avg(rq, rq->nr_running);
3888 inc_nr_running(rq);
3889 }
3890 hrtick_update(rq);
3891}
3892
3893static void set_next_buddy(struct sched_entity *se);
3894
3895
3896
3897
3898
3899
3900static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3901{
3902 struct cfs_rq *cfs_rq;
3903 struct sched_entity *se = &p->se;
3904 int task_sleep = flags & DEQUEUE_SLEEP;
3905
3906 for_each_sched_entity(se) {
3907 cfs_rq = cfs_rq_of(se);
3908 dequeue_entity(cfs_rq, se, flags);
3909
3910
3911
3912
3913
3914
3915
3916 if (cfs_rq_throttled(cfs_rq))
3917 break;
3918 cfs_rq->h_nr_running--;
3919
3920
3921 if (cfs_rq->load.weight) {
3922
3923
3924
3925
3926 if (task_sleep && parent_entity(se))
3927 set_next_buddy(parent_entity(se));
3928
3929
3930 se = parent_entity(se);
3931 break;
3932 }
3933 flags |= DEQUEUE_SLEEP;
3934 }
3935
3936 for_each_sched_entity(se) {
3937 cfs_rq = cfs_rq_of(se);
3938 cfs_rq->h_nr_running--;
3939
3940 if (cfs_rq_throttled(cfs_rq))
3941 break;
3942
3943 update_cfs_shares(cfs_rq);
3944 update_entity_load_avg(se, 1);
3945 }
3946
3947 if (!se) {
3948 dec_nr_running(rq);
3949 update_rq_runnable_avg(rq, 1);
3950 }
3951 hrtick_update(rq);
3952}
3953
3954#ifdef CONFIG_SMP
3955
3956static unsigned long weighted_cpuload(const int cpu)
3957{
3958 return cpu_rq(cpu)->cfs.runnable_load_avg;
3959}
3960
3961
3962
3963
3964
3965
3966
3967
3968static unsigned long source_load(int cpu, int type)
3969{
3970 struct rq *rq = cpu_rq(cpu);
3971 unsigned long total = weighted_cpuload(cpu);
3972
3973 if (type == 0 || !sched_feat(LB_BIAS))
3974 return total;
3975
3976 return min(rq->cpu_load[type-1], total);
3977}
3978
3979
3980
3981
3982
3983static unsigned long target_load(int cpu, int type)
3984{
3985 struct rq *rq = cpu_rq(cpu);
3986 unsigned long total = weighted_cpuload(cpu);
3987
3988 if (type == 0 || !sched_feat(LB_BIAS))
3989 return total;
3990
3991 return max(rq->cpu_load[type-1], total);
3992}
3993
3994static unsigned long power_of(int cpu)
3995{
3996 return cpu_rq(cpu)->cpu_power;
3997}
3998
3999static unsigned long cpu_avg_load_per_task(int cpu)
4000{
4001 struct rq *rq = cpu_rq(cpu);
4002 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
4003 unsigned long load_avg = rq->cfs.runnable_load_avg;
4004
4005 if (nr_running)
4006 return load_avg / nr_running;
4007
4008 return 0;
4009}
4010
4011static void record_wakee(struct task_struct *p)
4012{
4013
4014
4015
4016
4017
4018 if (jiffies > current->wakee_flip_decay_ts + HZ) {
4019 current->wakee_flips = 0;
4020 current->wakee_flip_decay_ts = jiffies;
4021 }
4022
4023 if (current->last_wakee != p) {
4024 current->last_wakee = p;
4025 current->wakee_flips++;
4026 }
4027}
4028
4029static void task_waking_fair(struct task_struct *p)
4030{
4031 struct sched_entity *se = &p->se;
4032 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4033 u64 min_vruntime;
4034
4035#ifndef CONFIG_64BIT
4036 u64 min_vruntime_copy;
4037
4038 do {
4039 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4040 smp_rmb();
4041 min_vruntime = cfs_rq->min_vruntime;
4042 } while (min_vruntime != min_vruntime_copy);
4043#else
4044 min_vruntime = cfs_rq->min_vruntime;
4045#endif
4046
4047 se->vruntime -= min_vruntime;
4048 record_wakee(p);
4049}
4050
4051#ifdef CONFIG_FAIR_GROUP_SCHED
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4103{
4104 struct sched_entity *se = tg->se[cpu];
4105
4106 if (!tg->parent)
4107 return wl;
4108
4109 for_each_sched_entity(se) {
4110 long w, W;
4111
4112 tg = se->my_q->tg;
4113
4114
4115
4116
4117 W = wg + calc_tg_weight(tg, se->my_q);
4118
4119
4120
4121
4122 w = se->my_q->load.weight + wl;
4123
4124
4125
4126
4127 if (W > 0 && w < W)
4128 wl = (w * tg->shares) / W;
4129 else
4130 wl = tg->shares;
4131
4132
4133
4134
4135
4136
4137 if (wl < MIN_SHARES)
4138 wl = MIN_SHARES;
4139
4140
4141
4142
4143 wl -= se->load.weight;
4144
4145
4146
4147
4148
4149
4150
4151
4152 wg = 0;
4153 }
4154
4155 return wl;
4156}
4157#else
4158
4159static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4160{
4161 return wl;
4162}
4163
4164#endif
4165
4166static int wake_wide(struct task_struct *p)
4167{
4168 int factor = this_cpu_read(sd_llc_size);
4169
4170
4171
4172
4173
4174
4175 if (p->wakee_flips > factor) {
4176
4177
4178
4179
4180
4181 if (current->wakee_flips > (factor * p->wakee_flips))
4182 return 1;
4183 }
4184
4185 return 0;
4186}
4187
4188static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4189{
4190 s64 this_load, load;
4191 int idx, this_cpu, prev_cpu;
4192 unsigned long tl_per_task;
4193 struct task_group *tg;
4194 unsigned long weight;
4195 int balanced;
4196
4197
4198
4199
4200
4201 if (wake_wide(p))
4202 return 0;
4203
4204 idx = sd->wake_idx;
4205 this_cpu = smp_processor_id();
4206 prev_cpu = task_cpu(p);
4207 load = source_load(prev_cpu, idx);
4208 this_load = target_load(this_cpu, idx);
4209
4210
4211
4212
4213
4214
4215 if (sync) {
4216 tg = task_group(current);
4217 weight = current->se.load.weight;
4218
4219 this_load += effective_load(tg, this_cpu, -weight, -weight);
4220 load += effective_load(tg, prev_cpu, 0, -weight);
4221 }
4222
4223 tg = task_group(p);
4224 weight = p->se.load.weight;
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235 if (this_load > 0) {
4236 s64 this_eff_load, prev_eff_load;
4237
4238 this_eff_load = 100;
4239 this_eff_load *= power_of(prev_cpu);
4240 this_eff_load *= this_load +
4241 effective_load(tg, this_cpu, weight, weight);
4242
4243 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4244 prev_eff_load *= power_of(this_cpu);
4245 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4246
4247 balanced = this_eff_load <= prev_eff_load;
4248 } else
4249 balanced = true;
4250
4251
4252
4253
4254
4255
4256 if (sync && balanced)
4257 return 1;
4258
4259 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4260 tl_per_task = cpu_avg_load_per_task(this_cpu);
4261
4262 if (balanced ||
4263 (this_load <= load &&
4264 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
4265
4266
4267
4268
4269
4270 schedstat_inc(sd, ttwu_move_affine);
4271 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4272
4273 return 1;
4274 }
4275 return 0;
4276}
4277
4278
4279
4280
4281
4282static struct sched_group *
4283find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4284 int this_cpu, int sd_flag)
4285{
4286 struct sched_group *idlest = NULL, *group = sd->groups;
4287 unsigned long min_load = ULONG_MAX, this_load = 0;
4288 int load_idx = sd->forkexec_idx;
4289 int imbalance = 100 + (sd->imbalance_pct-100)/2;
4290
4291 if (sd_flag & SD_BALANCE_WAKE)
4292 load_idx = sd->wake_idx;
4293
4294 do {
4295 unsigned long load, avg_load;
4296 int local_group;
4297 int i;
4298
4299
4300 if (!cpumask_intersects(sched_group_cpus(group),
4301 tsk_cpus_allowed(p)))
4302 continue;
4303
4304 local_group = cpumask_test_cpu(this_cpu,
4305 sched_group_cpus(group));
4306
4307
4308 avg_load = 0;
4309
4310 for_each_cpu(i, sched_group_cpus(group)) {
4311
4312 if (local_group)
4313 load = source_load(i, load_idx);
4314 else
4315 load = target_load(i, load_idx);
4316
4317 avg_load += load;
4318 }
4319
4320
4321 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
4322
4323 if (local_group) {
4324 this_load = avg_load;
4325 } else if (avg_load < min_load) {
4326 min_load = avg_load;
4327 idlest = group;
4328 }
4329 } while (group = group->next, group != sd->groups);
4330
4331 if (!idlest || 100*this_load < imbalance*min_load)
4332 return NULL;
4333 return idlest;
4334}
4335
4336
4337
4338
4339static int
4340find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4341{
4342 unsigned long load, min_load = ULONG_MAX;
4343 int idlest = -1;
4344 int i;
4345
4346
4347 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
4348 load = weighted_cpuload(i);
4349
4350 if (load < min_load || (load == min_load && i == this_cpu)) {
4351 min_load = load;
4352 idlest = i;
4353 }
4354 }
4355
4356 return idlest;
4357}
4358
4359
4360
4361
4362static int select_idle_sibling(struct task_struct *p, int target)
4363{
4364 struct sched_domain *sd;
4365 struct sched_group *sg;
4366 int i = task_cpu(p);
4367
4368 if (idle_cpu(target))
4369 return target;
4370
4371
4372
4373
4374 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4375 return i;
4376
4377
4378
4379
4380 sd = rcu_dereference(per_cpu(sd_llc, target));
4381 for_each_lower_domain(sd) {
4382 sg = sd->groups;
4383 do {
4384 if (!cpumask_intersects(sched_group_cpus(sg),
4385 tsk_cpus_allowed(p)))
4386 goto next;
4387
4388 for_each_cpu(i, sched_group_cpus(sg)) {
4389 if (i == target || !idle_cpu(i))
4390 goto next;
4391 }
4392
4393 target = cpumask_first_and(sched_group_cpus(sg),
4394 tsk_cpus_allowed(p));
4395 goto done;
4396next:
4397 sg = sg->next;
4398 } while (sg != sd->groups);
4399 }
4400done:
4401 return target;
4402}
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416static int
4417select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
4418{
4419 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
4420 int cpu = smp_processor_id();
4421 int new_cpu = cpu;
4422 int want_affine = 0;
4423 int sync = wake_flags & WF_SYNC;
4424
4425 if (p->nr_cpus_allowed == 1)
4426 return prev_cpu;
4427
4428 if (sd_flag & SD_BALANCE_WAKE) {
4429 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
4430 want_affine = 1;
4431 new_cpu = prev_cpu;
4432 }
4433
4434 rcu_read_lock();
4435 for_each_domain(cpu, tmp) {
4436 if (!(tmp->flags & SD_LOAD_BALANCE))
4437 continue;
4438
4439
4440
4441
4442
4443 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4444 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4445 affine_sd = tmp;
4446 break;
4447 }
4448
4449 if (tmp->flags & sd_flag)
4450 sd = tmp;
4451 }
4452
4453 if (affine_sd) {
4454 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
4455 prev_cpu = cpu;
4456
4457 new_cpu = select_idle_sibling(p, prev_cpu);
4458 goto unlock;
4459 }
4460
4461 while (sd) {
4462 struct sched_group *group;
4463 int weight;
4464
4465 if (!(sd->flags & sd_flag)) {
4466 sd = sd->child;
4467 continue;
4468 }
4469
4470 group = find_idlest_group(sd, p, cpu, sd_flag);
4471 if (!group) {
4472 sd = sd->child;
4473 continue;
4474 }
4475
4476 new_cpu = find_idlest_cpu(group, p, cpu);
4477 if (new_cpu == -1 || new_cpu == cpu) {
4478
4479 sd = sd->child;
4480 continue;
4481 }
4482
4483
4484 cpu = new_cpu;
4485 weight = sd->span_weight;
4486 sd = NULL;
4487 for_each_domain(cpu, tmp) {
4488 if (weight <= tmp->span_weight)
4489 break;
4490 if (tmp->flags & sd_flag)
4491 sd = tmp;
4492 }
4493
4494 }
4495unlock:
4496 rcu_read_unlock();
4497
4498 return new_cpu;
4499}
4500
4501
4502
4503
4504
4505
4506
4507static void
4508migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4509{
4510 struct sched_entity *se = &p->se;
4511 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4512
4513
4514
4515
4516
4517
4518
4519 if (se->avg.decay_count) {
4520 se->avg.decay_count = -__synchronize_entity_decay(se);
4521 atomic_long_add(se->avg.load_avg_contrib,
4522 &cfs_rq->removed_load);
4523 }
4524}
4525#endif
4526
4527static unsigned long
4528wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
4529{
4530 unsigned long gran = sysctl_sched_wakeup_granularity;
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545 return calc_delta_fair(gran, se);
4546}
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562static int
4563wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4564{
4565 s64 gran, vdiff = curr->vruntime - se->vruntime;
4566
4567 if (vdiff <= 0)
4568 return -1;
4569
4570 gran = wakeup_gran(curr, se);
4571 if (vdiff > gran)
4572 return 1;
4573
4574 return 0;
4575}
4576
4577static void set_last_buddy(struct sched_entity *se)
4578{
4579 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4580 return;
4581
4582 for_each_sched_entity(se)
4583 cfs_rq_of(se)->last = se;
4584}
4585
4586static void set_next_buddy(struct sched_entity *se)
4587{
4588 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4589 return;
4590
4591 for_each_sched_entity(se)
4592 cfs_rq_of(se)->next = se;
4593}
4594
4595static void set_skip_buddy(struct sched_entity *se)
4596{
4597 for_each_sched_entity(se)
4598 cfs_rq_of(se)->skip = se;
4599}
4600
4601
4602
4603
4604static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
4605{
4606 struct task_struct *curr = rq->curr;
4607 struct sched_entity *se = &curr->se, *pse = &p->se;
4608 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4609 int scale = cfs_rq->nr_running >= sched_nr_latency;
4610 int next_buddy_marked = 0;
4611
4612 if (unlikely(se == pse))
4613 return;
4614
4615
4616
4617
4618
4619
4620
4621 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4622 return;
4623
4624 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
4625 set_next_buddy(pse);
4626 next_buddy_marked = 1;
4627 }
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639 if (test_tsk_need_resched(curr))
4640 return;
4641
4642
4643 if (unlikely(curr->policy == SCHED_IDLE) &&
4644 likely(p->policy != SCHED_IDLE))
4645 goto preempt;
4646
4647
4648
4649
4650
4651 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
4652 return;
4653
4654 find_matching_se(&se, &pse);
4655 update_curr(cfs_rq_of(se));
4656 BUG_ON(!pse);
4657 if (wakeup_preempt_entity(se, pse) == 1) {
4658
4659
4660
4661
4662 if (!next_buddy_marked)
4663 set_next_buddy(pse);
4664 goto preempt;
4665 }
4666
4667 return;
4668
4669preempt:
4670 resched_task(curr);
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680 if (unlikely(!se->on_rq || curr == rq->idle))
4681 return;
4682
4683 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4684 set_last_buddy(se);
4685}
4686
4687static struct task_struct *
4688pick_next_task_fair(struct rq *rq, struct task_struct *prev)
4689{
4690 struct cfs_rq *cfs_rq = &rq->cfs;
4691 struct sched_entity *se;
4692 struct task_struct *p;
4693 int new_tasks;
4694
4695again:
4696#ifdef CONFIG_FAIR_GROUP_SCHED
4697 if (!cfs_rq->nr_running)
4698 goto idle;
4699
4700 if (prev->sched_class != &fair_sched_class)
4701 goto simple;
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711 do {
4712 struct sched_entity *curr = cfs_rq->curr;
4713
4714
4715
4716
4717
4718
4719
4720 if (curr && curr->on_rq)
4721 update_curr(cfs_rq);
4722 else
4723 curr = NULL;
4724
4725
4726
4727
4728
4729
4730 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
4731 goto simple;
4732
4733 se = pick_next_entity(cfs_rq, curr);
4734 cfs_rq = group_cfs_rq(se);
4735 } while (cfs_rq);
4736
4737 p = task_of(se);
4738
4739
4740
4741
4742
4743
4744 if (prev != p) {
4745 struct sched_entity *pse = &prev->se;
4746
4747 while (!(cfs_rq = is_same_group(se, pse))) {
4748 int se_depth = se->depth;
4749 int pse_depth = pse->depth;
4750
4751 if (se_depth <= pse_depth) {
4752 put_prev_entity(cfs_rq_of(pse), pse);
4753 pse = parent_entity(pse);
4754 }
4755 if (se_depth >= pse_depth) {
4756 set_next_entity(cfs_rq_of(se), se);
4757 se = parent_entity(se);
4758 }
4759 }
4760
4761 put_prev_entity(cfs_rq, pse);
4762 set_next_entity(cfs_rq, se);
4763 }
4764
4765 if (hrtick_enabled(rq))
4766 hrtick_start_fair(rq, p);
4767
4768 return p;
4769simple:
4770 cfs_rq = &rq->cfs;
4771#endif
4772
4773 if (!cfs_rq->nr_running)
4774 goto idle;
4775
4776 put_prev_task(rq, prev);
4777
4778 do {
4779 se = pick_next_entity(cfs_rq, NULL);
4780 set_next_entity(cfs_rq, se);
4781 cfs_rq = group_cfs_rq(se);
4782 } while (cfs_rq);
4783
4784 p = task_of(se);
4785
4786 if (hrtick_enabled(rq))
4787 hrtick_start_fair(rq, p);
4788
4789 return p;
4790
4791idle:
4792 new_tasks = idle_balance(rq);
4793
4794
4795
4796
4797
4798 if (new_tasks < 0)
4799 return RETRY_TASK;
4800
4801 if (new_tasks > 0)
4802 goto again;
4803
4804 return NULL;
4805}
4806
4807
4808
4809
4810static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
4811{
4812 struct sched_entity *se = &prev->se;
4813 struct cfs_rq *cfs_rq;
4814
4815 for_each_sched_entity(se) {
4816 cfs_rq = cfs_rq_of(se);
4817 put_prev_entity(cfs_rq, se);
4818 }
4819}
4820
4821
4822
4823
4824
4825
4826static void yield_task_fair(struct rq *rq)
4827{
4828 struct task_struct *curr = rq->curr;
4829 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4830 struct sched_entity *se = &curr->se;
4831
4832
4833
4834
4835 if (unlikely(rq->nr_running == 1))
4836 return;
4837
4838 clear_buddies(cfs_rq, se);
4839
4840 if (curr->policy != SCHED_BATCH) {
4841 update_rq_clock(rq);
4842
4843
4844
4845 update_curr(cfs_rq);
4846
4847
4848
4849
4850
4851 rq->skip_clock_update = 1;
4852 }
4853
4854 set_skip_buddy(se);
4855}
4856
4857static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4858{
4859 struct sched_entity *se = &p->se;
4860
4861
4862 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
4863 return false;
4864
4865
4866 set_next_buddy(se);
4867
4868 yield_task_fair(rq);
4869
4870 return true;
4871}
4872
4873#ifdef CONFIG_SMP
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4993
4994enum fbq_type { regular, remote, all };
4995
4996#define LBF_ALL_PINNED 0x01
4997#define LBF_NEED_BREAK 0x02
4998#define LBF_DST_PINNED 0x04
4999#define LBF_SOME_PINNED 0x08
5000
5001struct lb_env {
5002 struct sched_domain *sd;
5003
5004 struct rq *src_rq;
5005 int src_cpu;
5006
5007 int dst_cpu;
5008 struct rq *dst_rq;
5009
5010 struct cpumask *dst_grpmask;
5011 int new_dst_cpu;
5012 enum cpu_idle_type idle;
5013 long imbalance;
5014
5015 struct cpumask *cpus;
5016
5017 unsigned int flags;
5018
5019 unsigned int loop;
5020 unsigned int loop_break;
5021 unsigned int loop_max;
5022
5023 enum fbq_type fbq_type;
5024};
5025
5026
5027
5028
5029
5030static void move_task(struct task_struct *p, struct lb_env *env)
5031{
5032 deactivate_task(env->src_rq, p, 0);
5033 set_task_cpu(p, env->dst_cpu);
5034 activate_task(env->dst_rq, p, 0);
5035 check_preempt_curr(env->dst_rq, p, 0);
5036}
5037
5038
5039
5040
5041static int
5042task_hot(struct task_struct *p, u64 now)
5043{
5044 s64 delta;
5045
5046 if (p->sched_class != &fair_sched_class)
5047 return 0;
5048
5049 if (unlikely(p->policy == SCHED_IDLE))
5050 return 0;
5051
5052
5053
5054
5055 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
5056 (&p->se == cfs_rq_of(&p->se)->next ||
5057 &p->se == cfs_rq_of(&p->se)->last))
5058 return 1;
5059
5060 if (sysctl_sched_migration_cost == -1)
5061 return 1;
5062 if (sysctl_sched_migration_cost == 0)
5063 return 0;
5064
5065 delta = now - p->se.exec_start;
5066
5067 return delta < (s64)sysctl_sched_migration_cost;
5068}
5069
5070#ifdef CONFIG_NUMA_BALANCING
5071
5072static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
5073{
5074 int src_nid, dst_nid;
5075
5076 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
5077 !(env->sd->flags & SD_NUMA)) {
5078 return false;
5079 }
5080
5081 src_nid = cpu_to_node(env->src_cpu);
5082 dst_nid = cpu_to_node(env->dst_cpu);
5083
5084 if (src_nid == dst_nid)
5085 return false;
5086
5087
5088 if (dst_nid == p->numa_preferred_nid)
5089 return true;
5090
5091
5092 if (task_weight(p, dst_nid) > task_weight(p, src_nid) &&
5093 group_weight(p, dst_nid) > group_weight(p, src_nid))
5094 return true;
5095
5096 return false;
5097}
5098
5099
5100static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5101{
5102 int src_nid, dst_nid;
5103
5104 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
5105 return false;
5106
5107 if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
5108 return false;
5109
5110 src_nid = cpu_to_node(env->src_cpu);
5111 dst_nid = cpu_to_node(env->dst_cpu);
5112
5113 if (src_nid == dst_nid)
5114 return false;
5115
5116
5117 if (src_nid == p->numa_preferred_nid)
5118 return true;
5119
5120
5121 if (task_weight(p, dst_nid) < task_weight(p, src_nid) ||
5122 group_weight(p, dst_nid) < group_weight(p, src_nid))
5123 return true;
5124
5125 return false;
5126}
5127
5128#else
5129static inline bool migrate_improves_locality(struct task_struct *p,
5130 struct lb_env *env)
5131{
5132 return false;
5133}
5134
5135static inline bool migrate_degrades_locality(struct task_struct *p,
5136 struct lb_env *env)
5137{
5138 return false;
5139}
5140#endif
5141
5142
5143
5144
5145static
5146int can_migrate_task(struct task_struct *p, struct lb_env *env)
5147{
5148 int tsk_cache_hot = 0;
5149
5150
5151
5152
5153
5154
5155
5156 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5157 return 0;
5158
5159 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
5160 int cpu;
5161
5162 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
5163
5164 env->flags |= LBF_SOME_PINNED;
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
5175 return 0;
5176
5177
5178 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5179 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
5180 env->flags |= LBF_DST_PINNED;
5181 env->new_dst_cpu = cpu;
5182 break;
5183 }
5184 }
5185
5186 return 0;
5187 }
5188
5189
5190 env->flags &= ~LBF_ALL_PINNED;
5191
5192 if (task_running(env->src_rq, p)) {
5193 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
5194 return 0;
5195 }
5196
5197
5198
5199
5200
5201
5202
5203 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq));
5204 if (!tsk_cache_hot)
5205 tsk_cache_hot = migrate_degrades_locality(p, env);
5206
5207 if (migrate_improves_locality(p, env)) {
5208#ifdef CONFIG_SCHEDSTATS
5209 if (tsk_cache_hot) {
5210 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5211 schedstat_inc(p, se.statistics.nr_forced_migrations);
5212 }
5213#endif
5214 return 1;
5215 }
5216
5217 if (!tsk_cache_hot ||
5218 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
5219
5220 if (tsk_cache_hot) {
5221 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5222 schedstat_inc(p, se.statistics.nr_forced_migrations);
5223 }
5224
5225 return 1;
5226 }
5227
5228 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5229 return 0;
5230}
5231
5232
5233
5234
5235
5236
5237
5238
5239static int move_one_task(struct lb_env *env)
5240{
5241 struct task_struct *p, *n;
5242
5243 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
5244 if (!can_migrate_task(p, env))
5245 continue;
5246
5247 move_task(p, env);
5248
5249
5250
5251
5252
5253 schedstat_inc(env->sd, lb_gained[env->idle]);
5254 return 1;
5255 }
5256 return 0;
5257}
5258
5259static const unsigned int sched_nr_migrate_break = 32;
5260
5261
5262
5263
5264
5265
5266
5267
5268static int move_tasks(struct lb_env *env)
5269{
5270 struct list_head *tasks = &env->src_rq->cfs_tasks;
5271 struct task_struct *p;
5272 unsigned long load;
5273 int pulled = 0;
5274
5275 if (env->imbalance <= 0)
5276 return 0;
5277
5278 while (!list_empty(tasks)) {
5279 p = list_first_entry(tasks, struct task_struct, se.group_node);
5280
5281 env->loop++;
5282
5283 if (env->loop > env->loop_max)
5284 break;
5285
5286
5287 if (env->loop > env->loop_break) {
5288 env->loop_break += sched_nr_migrate_break;
5289 env->flags |= LBF_NEED_BREAK;
5290 break;
5291 }
5292
5293 if (!can_migrate_task(p, env))
5294 goto next;
5295
5296 load = task_h_load(p);
5297
5298 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
5299 goto next;
5300
5301 if ((load / 2) > env->imbalance)
5302 goto next;
5303
5304 move_task(p, env);
5305 pulled++;
5306 env->imbalance -= load;
5307
5308#ifdef CONFIG_PREEMPT
5309
5310
5311
5312
5313
5314 if (env->idle == CPU_NEWLY_IDLE)
5315 break;
5316#endif
5317
5318
5319
5320
5321
5322 if (env->imbalance <= 0)
5323 break;
5324
5325 continue;
5326next:
5327 list_move_tail(&p->se.group_node, tasks);
5328 }
5329
5330
5331
5332
5333
5334
5335 schedstat_add(env->sd, lb_gained[env->idle], pulled);
5336
5337 return pulled;
5338}
5339
5340#ifdef CONFIG_FAIR_GROUP_SCHED
5341
5342
5343
5344static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
5345{
5346 struct sched_entity *se = tg->se[cpu];
5347 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
5348
5349
5350 if (throttled_hierarchy(cfs_rq))
5351 return;
5352
5353 update_cfs_rq_blocked_load(cfs_rq, 1);
5354
5355 if (se) {
5356 update_entity_load_avg(se, 1);
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5367 list_del_leaf_cfs_rq(cfs_rq);
5368 } else {
5369 struct rq *rq = rq_of(cfs_rq);
5370 update_rq_runnable_avg(rq, rq->nr_running);
5371 }
5372}
5373
5374static void update_blocked_averages(int cpu)
5375{
5376 struct rq *rq = cpu_rq(cpu);
5377 struct cfs_rq *cfs_rq;
5378 unsigned long flags;
5379
5380 raw_spin_lock_irqsave(&rq->lock, flags);
5381 update_rq_clock(rq);
5382
5383
5384
5385
5386 for_each_leaf_cfs_rq(rq, cfs_rq) {
5387
5388
5389
5390
5391
5392 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
5393 }
5394
5395 raw_spin_unlock_irqrestore(&rq->lock, flags);
5396}
5397
5398
5399
5400
5401
5402
5403static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
5404{
5405 struct rq *rq = rq_of(cfs_rq);
5406 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
5407 unsigned long now = jiffies;
5408 unsigned long load;
5409
5410 if (cfs_rq->last_h_load_update == now)
5411 return;
5412
5413 cfs_rq->h_load_next = NULL;
5414 for_each_sched_entity(se) {
5415 cfs_rq = cfs_rq_of(se);
5416 cfs_rq->h_load_next = se;
5417 if (cfs_rq->last_h_load_update == now)
5418 break;
5419 }
5420
5421 if (!se) {
5422 cfs_rq->h_load = cfs_rq->runnable_load_avg;
5423 cfs_rq->last_h_load_update = now;
5424 }
5425
5426 while ((se = cfs_rq->h_load_next) != NULL) {
5427 load = cfs_rq->h_load;
5428 load = div64_ul(load * se->avg.load_avg_contrib,
5429 cfs_rq->runnable_load_avg + 1);
5430 cfs_rq = group_cfs_rq(se);
5431 cfs_rq->h_load = load;
5432 cfs_rq->last_h_load_update = now;
5433 }
5434}
5435
5436static unsigned long task_h_load(struct task_struct *p)
5437{
5438 struct cfs_rq *cfs_rq = task_cfs_rq(p);
5439
5440 update_cfs_rq_h_load(cfs_rq);
5441 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5442 cfs_rq->runnable_load_avg + 1);
5443}
5444#else
5445static inline void update_blocked_averages(int cpu)
5446{
5447}
5448
5449static unsigned long task_h_load(struct task_struct *p)
5450{
5451 return p->se.avg.load_avg_contrib;
5452}
5453#endif
5454
5455
5456
5457
5458
5459struct sg_lb_stats {
5460 unsigned long avg_load;
5461 unsigned long group_load;
5462 unsigned long sum_weighted_load;
5463 unsigned long load_per_task;
5464 unsigned long group_power;
5465 unsigned int sum_nr_running;
5466 unsigned int group_capacity;
5467 unsigned int idle_cpus;
5468 unsigned int group_weight;
5469 int group_imb;
5470 int group_has_capacity;
5471#ifdef CONFIG_NUMA_BALANCING
5472 unsigned int nr_numa_running;
5473 unsigned int nr_preferred_running;
5474#endif
5475};
5476
5477
5478
5479
5480
5481struct sd_lb_stats {
5482 struct sched_group *busiest;
5483 struct sched_group *local;
5484 unsigned long total_load;
5485 unsigned long total_pwr;
5486 unsigned long avg_load;
5487
5488 struct sg_lb_stats busiest_stat;
5489 struct sg_lb_stats local_stat;
5490};
5491
5492static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5493{
5494
5495
5496
5497
5498
5499
5500 *sds = (struct sd_lb_stats){
5501 .busiest = NULL,
5502 .local = NULL,
5503 .total_load = 0UL,
5504 .total_pwr = 0UL,
5505 .busiest_stat = {
5506 .avg_load = 0UL,
5507 },
5508 };
5509}
5510
5511
5512
5513
5514
5515
5516
5517
5518static inline int get_sd_load_idx(struct sched_domain *sd,
5519 enum cpu_idle_type idle)
5520{
5521 int load_idx;
5522
5523 switch (idle) {
5524 case CPU_NOT_IDLE:
5525 load_idx = sd->busy_idx;
5526 break;
5527
5528 case CPU_NEWLY_IDLE:
5529 load_idx = sd->newidle_idx;
5530 break;
5531 default:
5532 load_idx = sd->idle_idx;
5533 break;
5534 }
5535
5536 return load_idx;
5537}
5538
5539static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
5540{
5541 return SCHED_POWER_SCALE;
5542}
5543
5544unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
5545{
5546 return default_scale_freq_power(sd, cpu);
5547}
5548
5549static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
5550{
5551 unsigned long weight = sd->span_weight;
5552 unsigned long smt_gain = sd->smt_gain;
5553
5554 smt_gain /= weight;
5555
5556 return smt_gain;
5557}
5558
5559unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
5560{
5561 return default_scale_smt_power(sd, cpu);
5562}
5563
5564static unsigned long scale_rt_power(int cpu)
5565{
5566 struct rq *rq = cpu_rq(cpu);
5567 u64 total, available, age_stamp, avg;
5568
5569
5570
5571
5572
5573 age_stamp = ACCESS_ONCE(rq->age_stamp);
5574 avg = ACCESS_ONCE(rq->rt_avg);
5575
5576 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
5577
5578 if (unlikely(total < avg)) {
5579
5580 available = 0;
5581 } else {
5582 available = total - avg;
5583 }
5584
5585 if (unlikely((s64)total < SCHED_POWER_SCALE))
5586 total = SCHED_POWER_SCALE;
5587
5588 total >>= SCHED_POWER_SHIFT;
5589
5590 return div_u64(available, total);
5591}
5592
5593static void update_cpu_power(struct sched_domain *sd, int cpu)
5594{
5595 unsigned long weight = sd->span_weight;
5596 unsigned long power = SCHED_POWER_SCALE;
5597 struct sched_group *sdg = sd->groups;
5598
5599 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
5600 if (sched_feat(ARCH_POWER))
5601 power *= arch_scale_smt_power(sd, cpu);
5602 else
5603 power *= default_scale_smt_power(sd, cpu);
5604
5605 power >>= SCHED_POWER_SHIFT;
5606 }
5607
5608 sdg->sgp->power_orig = power;
5609
5610 if (sched_feat(ARCH_POWER))
5611 power *= arch_scale_freq_power(sd, cpu);
5612 else
5613 power *= default_scale_freq_power(sd, cpu);
5614
5615 power >>= SCHED_POWER_SHIFT;
5616
5617 power *= scale_rt_power(cpu);
5618 power >>= SCHED_POWER_SHIFT;
5619
5620 if (!power)
5621 power = 1;
5622
5623 cpu_rq(cpu)->cpu_power = power;
5624 sdg->sgp->power = power;
5625}
5626
5627void update_group_power(struct sched_domain *sd, int cpu)
5628{
5629 struct sched_domain *child = sd->child;
5630 struct sched_group *group, *sdg = sd->groups;
5631 unsigned long power, power_orig;
5632 unsigned long interval;
5633
5634 interval = msecs_to_jiffies(sd->balance_interval);
5635 interval = clamp(interval, 1UL, max_load_balance_interval);
5636 sdg->sgp->next_update = jiffies + interval;
5637
5638 if (!child) {
5639 update_cpu_power(sd, cpu);
5640 return;
5641 }
5642
5643 power_orig = power = 0;
5644
5645 if (child->flags & SD_OVERLAP) {
5646
5647
5648
5649
5650
5651 for_each_cpu(cpu, sched_group_cpus(sdg)) {
5652 struct sched_group_power *sgp;
5653 struct rq *rq = cpu_rq(cpu);
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668 if (unlikely(!rq->sd)) {
5669 power_orig += power_of(cpu);
5670 power += power_of(cpu);
5671 continue;
5672 }
5673
5674 sgp = rq->sd->groups->sgp;
5675 power_orig += sgp->power_orig;
5676 power += sgp->power;
5677 }
5678 } else {
5679
5680
5681
5682
5683
5684 group = child->groups;
5685 do {
5686 power_orig += group->sgp->power_orig;
5687 power += group->sgp->power;
5688 group = group->next;
5689 } while (group != child->groups);
5690 }
5691
5692 sdg->sgp->power_orig = power_orig;
5693 sdg->sgp->power = power;
5694}
5695
5696
5697
5698
5699
5700
5701
5702
5703static inline int
5704fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5705{
5706
5707
5708
5709 if (!(sd->flags & SD_SHARE_CPUPOWER))
5710 return 0;
5711
5712
5713
5714
5715 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
5716 return 1;
5717
5718 return 0;
5719}
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750static inline int sg_imbalanced(struct sched_group *group)
5751{
5752 return group->sgp->imbalance;
5753}
5754
5755
5756
5757
5758
5759
5760
5761
5762static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
5763{
5764 unsigned int capacity, smt, cpus;
5765 unsigned int power, power_orig;
5766
5767 power = group->sgp->power;
5768 power_orig = group->sgp->power_orig;
5769 cpus = group->group_weight;
5770
5771
5772 smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
5773 capacity = cpus / smt;
5774
5775 capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
5776 if (!capacity)
5777 capacity = fix_small_capacity(env->sd, group);
5778
5779 return capacity;
5780}
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790static inline void update_sg_lb_stats(struct lb_env *env,
5791 struct sched_group *group, int load_idx,
5792 int local_group, struct sg_lb_stats *sgs)
5793{
5794 unsigned long load;
5795 int i;
5796
5797 memset(sgs, 0, sizeof(*sgs));
5798
5799 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
5800 struct rq *rq = cpu_rq(i);
5801
5802
5803 if (local_group)
5804 load = target_load(i, load_idx);
5805 else
5806 load = source_load(i, load_idx);
5807
5808 sgs->group_load += load;
5809 sgs->sum_nr_running += rq->nr_running;
5810#ifdef CONFIG_NUMA_BALANCING
5811 sgs->nr_numa_running += rq->nr_numa_running;
5812 sgs->nr_preferred_running += rq->nr_preferred_running;
5813#endif
5814 sgs->sum_weighted_load += weighted_cpuload(i);
5815 if (idle_cpu(i))
5816 sgs->idle_cpus++;
5817 }
5818
5819
5820 sgs->group_power = group->sgp->power;
5821 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
5822
5823 if (sgs->sum_nr_running)
5824 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
5825
5826 sgs->group_weight = group->group_weight;
5827
5828 sgs->group_imb = sg_imbalanced(group);
5829 sgs->group_capacity = sg_capacity(env, group);
5830
5831 if (sgs->group_capacity > sgs->sum_nr_running)
5832 sgs->group_has_capacity = 1;
5833}
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848static bool update_sd_pick_busiest(struct lb_env *env,
5849 struct sd_lb_stats *sds,
5850 struct sched_group *sg,
5851 struct sg_lb_stats *sgs)
5852{
5853 if (sgs->avg_load <= sds->busiest_stat.avg_load)
5854 return false;
5855
5856 if (sgs->sum_nr_running > sgs->group_capacity)
5857 return true;
5858
5859 if (sgs->group_imb)
5860 return true;
5861
5862
5863
5864
5865
5866
5867 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
5868 env->dst_cpu < group_first_cpu(sg)) {
5869 if (!sds->busiest)
5870 return true;
5871
5872 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
5873 return true;
5874 }
5875
5876 return false;
5877}
5878
5879#ifdef CONFIG_NUMA_BALANCING
5880static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5881{
5882 if (sgs->sum_nr_running > sgs->nr_numa_running)
5883 return regular;
5884 if (sgs->sum_nr_running > sgs->nr_preferred_running)
5885 return remote;
5886 return all;
5887}
5888
5889static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5890{
5891 if (rq->nr_running > rq->nr_numa_running)
5892 return regular;
5893 if (rq->nr_running > rq->nr_preferred_running)
5894 return remote;
5895 return all;
5896}
5897#else
5898static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5899{
5900 return all;
5901}
5902
5903static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5904{
5905 return regular;
5906}
5907#endif
5908
5909
5910
5911
5912
5913
5914static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
5915{
5916 struct sched_domain *child = env->sd->child;
5917 struct sched_group *sg = env->sd->groups;
5918 struct sg_lb_stats tmp_sgs;
5919 int load_idx, prefer_sibling = 0;
5920
5921 if (child && child->flags & SD_PREFER_SIBLING)
5922 prefer_sibling = 1;
5923
5924 load_idx = get_sd_load_idx(env->sd, env->idle);
5925
5926 do {
5927 struct sg_lb_stats *sgs = &tmp_sgs;
5928 int local_group;
5929
5930 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
5931 if (local_group) {
5932 sds->local = sg;
5933 sgs = &sds->local_stat;
5934
5935 if (env->idle != CPU_NEWLY_IDLE ||
5936 time_after_eq(jiffies, sg->sgp->next_update))
5937 update_group_power(env->sd, env->dst_cpu);
5938 }
5939
5940 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
5941
5942 if (local_group)
5943 goto next_group;
5944
5945
5946
5947
5948
5949
5950
5951
5952
5953
5954
5955 if (prefer_sibling && sds->local &&
5956 sds->local_stat.group_has_capacity)
5957 sgs->group_capacity = min(sgs->group_capacity, 1U);
5958
5959 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
5960 sds->busiest = sg;
5961 sds->busiest_stat = *sgs;
5962 }
5963
5964next_group:
5965
5966 sds->total_load += sgs->group_load;
5967 sds->total_pwr += sgs->group_power;
5968
5969 sg = sg->next;
5970 } while (sg != env->sd->groups);
5971
5972 if (env->sd->flags & SD_NUMA)
5973 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
5974}
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987
5988
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
6000{
6001 int busiest_cpu;
6002
6003 if (!(env->sd->flags & SD_ASYM_PACKING))
6004 return 0;
6005
6006 if (!sds->busiest)
6007 return 0;
6008
6009 busiest_cpu = group_first_cpu(sds->busiest);
6010 if (env->dst_cpu > busiest_cpu)
6011 return 0;
6012
6013 env->imbalance = DIV_ROUND_CLOSEST(
6014 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
6015 SCHED_POWER_SCALE);
6016
6017 return 1;
6018}
6019
6020
6021
6022
6023
6024
6025
6026
6027static inline
6028void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6029{
6030 unsigned long tmp, pwr_now = 0, pwr_move = 0;
6031 unsigned int imbn = 2;
6032 unsigned long scaled_busy_load_per_task;
6033 struct sg_lb_stats *local, *busiest;
6034
6035 local = &sds->local_stat;
6036 busiest = &sds->busiest_stat;
6037
6038 if (!local->sum_nr_running)
6039 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6040 else if (busiest->load_per_task > local->load_per_task)
6041 imbn = 1;
6042
6043 scaled_busy_load_per_task =
6044 (busiest->load_per_task * SCHED_POWER_SCALE) /
6045 busiest->group_power;
6046
6047 if (busiest->avg_load + scaled_busy_load_per_task >=
6048 local->avg_load + (scaled_busy_load_per_task * imbn)) {
6049 env->imbalance = busiest->load_per_task;
6050 return;
6051 }
6052
6053
6054
6055
6056
6057
6058
6059 pwr_now += busiest->group_power *
6060 min(busiest->load_per_task, busiest->avg_load);
6061 pwr_now += local->group_power *
6062 min(local->load_per_task, local->avg_load);
6063 pwr_now /= SCHED_POWER_SCALE;
6064
6065
6066 if (busiest->avg_load > scaled_busy_load_per_task) {
6067 pwr_move += busiest->group_power *
6068 min(busiest->load_per_task,
6069 busiest->avg_load - scaled_busy_load_per_task);
6070 }
6071
6072
6073 if (busiest->avg_load * busiest->group_power <
6074 busiest->load_per_task * SCHED_POWER_SCALE) {
6075 tmp = (busiest->avg_load * busiest->group_power) /
6076 local->group_power;
6077 } else {
6078 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
6079 local->group_power;
6080 }
6081 pwr_move += local->group_power *
6082 min(local->load_per_task, local->avg_load + tmp);
6083 pwr_move /= SCHED_POWER_SCALE;
6084
6085
6086 if (pwr_move > pwr_now)
6087 env->imbalance = busiest->load_per_task;
6088}
6089
6090
6091
6092
6093
6094
6095
6096static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6097{
6098 unsigned long max_pull, load_above_capacity = ~0UL;
6099 struct sg_lb_stats *local, *busiest;
6100
6101 local = &sds->local_stat;
6102 busiest = &sds->busiest_stat;
6103
6104 if (busiest->group_imb) {
6105
6106
6107
6108
6109 busiest->load_per_task =
6110 min(busiest->load_per_task, sds->avg_load);
6111 }
6112
6113
6114
6115
6116
6117
6118 if (busiest->avg_load <= sds->avg_load ||
6119 local->avg_load >= sds->avg_load) {
6120 env->imbalance = 0;
6121 return fix_small_imbalance(env, sds);
6122 }
6123
6124 if (!busiest->group_imb) {
6125
6126
6127
6128
6129
6130 load_above_capacity =
6131 (busiest->sum_nr_running - busiest->group_capacity);
6132
6133 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
6134 load_above_capacity /= busiest->group_power;
6135 }
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
6146
6147
6148 env->imbalance = min(
6149 max_pull * busiest->group_power,
6150 (sds->avg_load - local->avg_load) * local->group_power
6151 ) / SCHED_POWER_SCALE;
6152
6153
6154
6155
6156
6157
6158
6159 if (env->imbalance < busiest->load_per_task)
6160 return fix_small_imbalance(env, sds);
6161}
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181
6182static struct sched_group *find_busiest_group(struct lb_env *env)
6183{
6184 struct sg_lb_stats *local, *busiest;
6185 struct sd_lb_stats sds;
6186
6187 init_sd_lb_stats(&sds);
6188
6189
6190
6191
6192
6193 update_sd_lb_stats(env, &sds);
6194 local = &sds.local_stat;
6195 busiest = &sds.busiest_stat;
6196
6197 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6198 check_asym_packing(env, &sds))
6199 return sds.busiest;
6200
6201
6202 if (!sds.busiest || busiest->sum_nr_running == 0)
6203 goto out_balanced;
6204
6205 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
6206
6207
6208
6209
6210
6211
6212 if (busiest->group_imb)
6213 goto force_balance;
6214
6215
6216 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
6217 !busiest->group_has_capacity)
6218 goto force_balance;
6219
6220
6221
6222
6223
6224 if (local->avg_load >= busiest->avg_load)
6225 goto out_balanced;
6226
6227
6228
6229
6230
6231 if (local->avg_load >= sds.avg_load)
6232 goto out_balanced;
6233
6234 if (env->idle == CPU_IDLE) {
6235
6236
6237
6238
6239
6240
6241 if ((local->idle_cpus < busiest->idle_cpus) &&
6242 busiest->sum_nr_running <= busiest->group_weight)
6243 goto out_balanced;
6244 } else {
6245
6246
6247
6248
6249 if (100 * busiest->avg_load <=
6250 env->sd->imbalance_pct * local->avg_load)
6251 goto out_balanced;
6252 }
6253
6254force_balance:
6255
6256 calculate_imbalance(env, &sds);
6257 return sds.busiest;
6258
6259out_balanced:
6260 env->imbalance = 0;
6261 return NULL;
6262}
6263
6264
6265
6266
6267static struct rq *find_busiest_queue(struct lb_env *env,
6268 struct sched_group *group)
6269{
6270 struct rq *busiest = NULL, *rq;
6271 unsigned long busiest_load = 0, busiest_power = 1;
6272 int i;
6273
6274 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6275 unsigned long power, capacity, wl;
6276 enum fbq_type rt;
6277
6278 rq = cpu_rq(i);
6279 rt = fbq_classify_rq(rq);
6280
6281
6282
6283
6284
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299
6300 if (rt > env->fbq_type)
6301 continue;
6302
6303 power = power_of(i);
6304 capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
6305 if (!capacity)
6306 capacity = fix_small_capacity(env->sd, group);
6307
6308 wl = weighted_cpuload(i);
6309
6310
6311
6312
6313
6314 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
6315 continue;
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328 if (wl * busiest_power > busiest_load * power) {
6329 busiest_load = wl;
6330 busiest_power = power;
6331 busiest = rq;
6332 }
6333 }
6334
6335 return busiest;
6336}
6337
6338
6339
6340
6341
6342#define MAX_PINNED_INTERVAL 512
6343
6344
6345DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
6346
6347static int need_active_balance(struct lb_env *env)
6348{
6349 struct sched_domain *sd = env->sd;
6350
6351 if (env->idle == CPU_NEWLY_IDLE) {
6352
6353
6354
6355
6356
6357
6358 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
6359 return 1;
6360 }
6361
6362 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6363}
6364
6365static int active_load_balance_cpu_stop(void *data);
6366
6367static int should_we_balance(struct lb_env *env)
6368{
6369 struct sched_group *sg = env->sd->groups;
6370 struct cpumask *sg_cpus, *sg_mask;
6371 int cpu, balance_cpu = -1;
6372
6373
6374
6375
6376
6377 if (env->idle == CPU_NEWLY_IDLE)
6378 return 1;
6379
6380 sg_cpus = sched_group_cpus(sg);
6381 sg_mask = sched_group_mask(sg);
6382
6383 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6384 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6385 continue;
6386
6387 balance_cpu = cpu;
6388 break;
6389 }
6390
6391 if (balance_cpu == -1)
6392 balance_cpu = group_balance_cpu(sg);
6393
6394
6395
6396
6397
6398 return balance_cpu == env->dst_cpu;
6399}
6400
6401
6402
6403
6404
6405static int load_balance(int this_cpu, struct rq *this_rq,
6406 struct sched_domain *sd, enum cpu_idle_type idle,
6407 int *continue_balancing)
6408{
6409 int ld_moved, cur_ld_moved, active_balance = 0;
6410 struct sched_domain *sd_parent = sd->parent;
6411 struct sched_group *group;
6412 struct rq *busiest;
6413 unsigned long flags;
6414 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
6415
6416 struct lb_env env = {
6417 .sd = sd,
6418 .dst_cpu = this_cpu,
6419 .dst_rq = this_rq,
6420 .dst_grpmask = sched_group_cpus(sd->groups),
6421 .idle = idle,
6422 .loop_break = sched_nr_migrate_break,
6423 .cpus = cpus,
6424 .fbq_type = all,
6425 };
6426
6427
6428
6429
6430
6431 if (idle == CPU_NEWLY_IDLE)
6432 env.dst_grpmask = NULL;
6433
6434 cpumask_copy(cpus, cpu_active_mask);
6435
6436 schedstat_inc(sd, lb_count[idle]);
6437
6438redo:
6439 if (!should_we_balance(&env)) {
6440 *continue_balancing = 0;
6441 goto out_balanced;
6442 }
6443
6444 group = find_busiest_group(&env);
6445 if (!group) {
6446 schedstat_inc(sd, lb_nobusyg[idle]);
6447 goto out_balanced;
6448 }
6449
6450 busiest = find_busiest_queue(&env, group);
6451 if (!busiest) {
6452 schedstat_inc(sd, lb_nobusyq[idle]);
6453 goto out_balanced;
6454 }
6455
6456 BUG_ON(busiest == env.dst_rq);
6457
6458 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
6459
6460 ld_moved = 0;
6461 if (busiest->nr_running > 1) {
6462
6463
6464
6465
6466
6467
6468 env.flags |= LBF_ALL_PINNED;
6469 env.src_cpu = busiest->cpu;
6470 env.src_rq = busiest;
6471 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
6472
6473more_balance:
6474 local_irq_save(flags);
6475 double_rq_lock(env.dst_rq, busiest);
6476
6477
6478
6479
6480
6481 cur_ld_moved = move_tasks(&env);
6482 ld_moved += cur_ld_moved;
6483 double_rq_unlock(env.dst_rq, busiest);
6484 local_irq_restore(flags);
6485
6486
6487
6488
6489 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
6490 resched_cpu(env.dst_cpu);
6491
6492 if (env.flags & LBF_NEED_BREAK) {
6493 env.flags &= ~LBF_NEED_BREAK;
6494 goto more_balance;
6495 }
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
6517
6518
6519 cpumask_clear_cpu(env.dst_cpu, env.cpus);
6520
6521 env.dst_rq = cpu_rq(env.new_dst_cpu);
6522 env.dst_cpu = env.new_dst_cpu;
6523 env.flags &= ~LBF_DST_PINNED;
6524 env.loop = 0;
6525 env.loop_break = sched_nr_migrate_break;
6526
6527
6528
6529
6530
6531 goto more_balance;
6532 }
6533
6534
6535
6536
6537 if (sd_parent) {
6538 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
6539
6540 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
6541 *group_imbalance = 1;
6542 } else if (*group_imbalance)
6543 *group_imbalance = 0;
6544 }
6545
6546
6547 if (unlikely(env.flags & LBF_ALL_PINNED)) {
6548 cpumask_clear_cpu(cpu_of(busiest), cpus);
6549 if (!cpumask_empty(cpus)) {
6550 env.loop = 0;
6551 env.loop_break = sched_nr_migrate_break;
6552 goto redo;
6553 }
6554 goto out_balanced;
6555 }
6556 }
6557
6558 if (!ld_moved) {
6559 schedstat_inc(sd, lb_failed[idle]);
6560
6561
6562
6563
6564
6565
6566 if (idle != CPU_NEWLY_IDLE)
6567 sd->nr_balance_failed++;
6568
6569 if (need_active_balance(&env)) {
6570 raw_spin_lock_irqsave(&busiest->lock, flags);
6571
6572
6573
6574
6575
6576 if (!cpumask_test_cpu(this_cpu,
6577 tsk_cpus_allowed(busiest->curr))) {
6578 raw_spin_unlock_irqrestore(&busiest->lock,
6579 flags);
6580 env.flags |= LBF_ALL_PINNED;
6581 goto out_one_pinned;
6582 }
6583
6584
6585
6586
6587
6588
6589 if (!busiest->active_balance) {
6590 busiest->active_balance = 1;
6591 busiest->push_cpu = this_cpu;
6592 active_balance = 1;
6593 }
6594 raw_spin_unlock_irqrestore(&busiest->lock, flags);
6595
6596 if (active_balance) {
6597 stop_one_cpu_nowait(cpu_of(busiest),
6598 active_load_balance_cpu_stop, busiest,
6599 &busiest->active_balance_work);
6600 }
6601
6602
6603
6604
6605
6606 sd->nr_balance_failed = sd->cache_nice_tries+1;
6607 }
6608 } else
6609 sd->nr_balance_failed = 0;
6610
6611 if (likely(!active_balance)) {
6612
6613 sd->balance_interval = sd->min_interval;
6614 } else {
6615
6616
6617
6618
6619
6620
6621 if (sd->balance_interval < sd->max_interval)
6622 sd->balance_interval *= 2;
6623 }
6624
6625 goto out;
6626
6627out_balanced:
6628 schedstat_inc(sd, lb_balanced[idle]);
6629
6630 sd->nr_balance_failed = 0;
6631
6632out_one_pinned:
6633
6634 if (((env.flags & LBF_ALL_PINNED) &&
6635 sd->balance_interval < MAX_PINNED_INTERVAL) ||
6636 (sd->balance_interval < sd->max_interval))
6637 sd->balance_interval *= 2;
6638
6639 ld_moved = 0;
6640out:
6641 return ld_moved;
6642}
6643
6644
6645
6646
6647
6648static int idle_balance(struct rq *this_rq)
6649{
6650 struct sched_domain *sd;
6651 int pulled_task = 0;
6652 unsigned long next_balance = jiffies + HZ;
6653 u64 curr_cost = 0;
6654 int this_cpu = this_rq->cpu;
6655
6656 idle_enter_fair(this_rq);
6657
6658
6659
6660
6661
6662 this_rq->idle_stamp = rq_clock(this_rq);
6663
6664 if (this_rq->avg_idle < sysctl_sched_migration_cost)
6665 goto out;
6666
6667
6668
6669
6670 raw_spin_unlock(&this_rq->lock);
6671
6672 update_blocked_averages(this_cpu);
6673 rcu_read_lock();
6674 for_each_domain(this_cpu, sd) {
6675 unsigned long interval;
6676 int continue_balancing = 1;
6677 u64 t0, domain_cost;
6678
6679 if (!(sd->flags & SD_LOAD_BALANCE))
6680 continue;
6681
6682 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
6683 break;
6684
6685 if (sd->flags & SD_BALANCE_NEWIDLE) {
6686 t0 = sched_clock_cpu(this_cpu);
6687
6688
6689 pulled_task = load_balance(this_cpu, this_rq,
6690 sd, CPU_NEWLY_IDLE,
6691 &continue_balancing);
6692
6693 domain_cost = sched_clock_cpu(this_cpu) - t0;
6694 if (domain_cost > sd->max_newidle_lb_cost)
6695 sd->max_newidle_lb_cost = domain_cost;
6696
6697 curr_cost += domain_cost;
6698 }
6699
6700 interval = msecs_to_jiffies(sd->balance_interval);
6701 if (time_after(next_balance, sd->last_balance + interval))
6702 next_balance = sd->last_balance + interval;
6703 if (pulled_task)
6704 break;
6705 }
6706 rcu_read_unlock();
6707
6708 raw_spin_lock(&this_rq->lock);
6709
6710 if (curr_cost > this_rq->max_idle_balance_cost)
6711 this_rq->max_idle_balance_cost = curr_cost;
6712
6713
6714
6715
6716
6717
6718 if (this_rq->cfs.h_nr_running && !pulled_task)
6719 pulled_task = 1;
6720
6721 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6722
6723
6724
6725
6726 this_rq->next_balance = next_balance;
6727 }
6728
6729out:
6730
6731 if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
6732 ((this_rq->stop && this_rq->stop->on_rq) ||
6733 this_rq->dl.dl_nr_running ||
6734 (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
6735 pulled_task = -1;
6736
6737 if (pulled_task) {
6738 idle_exit_fair(this_rq);
6739 this_rq->idle_stamp = 0;
6740 }
6741
6742 return pulled_task;
6743}
6744
6745
6746
6747
6748
6749
6750
6751static int active_load_balance_cpu_stop(void *data)
6752{
6753 struct rq *busiest_rq = data;
6754 int busiest_cpu = cpu_of(busiest_rq);
6755 int target_cpu = busiest_rq->push_cpu;
6756 struct rq *target_rq = cpu_rq(target_cpu);
6757 struct sched_domain *sd;
6758
6759 raw_spin_lock_irq(&busiest_rq->lock);
6760
6761
6762 if (unlikely(busiest_cpu != smp_processor_id() ||
6763 !busiest_rq->active_balance))
6764 goto out_unlock;
6765
6766
6767 if (busiest_rq->nr_running <= 1)
6768 goto out_unlock;
6769
6770
6771
6772
6773
6774
6775 BUG_ON(busiest_rq == target_rq);
6776
6777
6778 double_lock_balance(busiest_rq, target_rq);
6779
6780
6781 rcu_read_lock();
6782 for_each_domain(target_cpu, sd) {
6783 if ((sd->flags & SD_LOAD_BALANCE) &&
6784 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
6785 break;
6786 }
6787
6788 if (likely(sd)) {
6789 struct lb_env env = {
6790 .sd = sd,
6791 .dst_cpu = target_cpu,
6792 .dst_rq = target_rq,
6793 .src_cpu = busiest_rq->cpu,
6794 .src_rq = busiest_rq,
6795 .idle = CPU_IDLE,
6796 };
6797
6798 schedstat_inc(sd, alb_count);
6799
6800 if (move_one_task(&env))
6801 schedstat_inc(sd, alb_pushed);
6802 else
6803 schedstat_inc(sd, alb_failed);
6804 }
6805 rcu_read_unlock();
6806 double_unlock_balance(busiest_rq, target_rq);
6807out_unlock:
6808 busiest_rq->active_balance = 0;
6809 raw_spin_unlock_irq(&busiest_rq->lock);
6810 return 0;
6811}
6812
6813static inline int on_null_domain(struct rq *rq)
6814{
6815 return unlikely(!rcu_dereference_sched(rq->sd));
6816}
6817
6818#ifdef CONFIG_NO_HZ_COMMON
6819
6820
6821
6822
6823
6824
6825static struct {
6826 cpumask_var_t idle_cpus_mask;
6827 atomic_t nr_cpus;
6828 unsigned long next_balance;
6829} nohz ____cacheline_aligned;
6830
6831static inline int find_new_ilb(void)
6832{
6833 int ilb = cpumask_first(nohz.idle_cpus_mask);
6834
6835 if (ilb < nr_cpu_ids && idle_cpu(ilb))
6836 return ilb;
6837
6838 return nr_cpu_ids;
6839}
6840
6841
6842
6843
6844
6845
6846static void nohz_balancer_kick(void)
6847{
6848 int ilb_cpu;
6849
6850 nohz.next_balance++;
6851
6852 ilb_cpu = find_new_ilb();
6853
6854 if (ilb_cpu >= nr_cpu_ids)
6855 return;
6856
6857 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
6858 return;
6859
6860
6861
6862
6863
6864
6865 smp_send_reschedule(ilb_cpu);
6866 return;
6867}
6868
6869static inline void nohz_balance_exit_idle(int cpu)
6870{
6871 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
6872
6873
6874
6875 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
6876 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
6877 atomic_dec(&nohz.nr_cpus);
6878 }
6879 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6880 }
6881}
6882
6883static inline void set_cpu_sd_state_busy(void)
6884{
6885 struct sched_domain *sd;
6886 int cpu = smp_processor_id();
6887
6888 rcu_read_lock();
6889 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6890
6891 if (!sd || !sd->nohz_idle)
6892 goto unlock;
6893 sd->nohz_idle = 0;
6894
6895 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
6896unlock:
6897 rcu_read_unlock();
6898}
6899
6900void set_cpu_sd_state_idle(void)
6901{
6902 struct sched_domain *sd;
6903 int cpu = smp_processor_id();
6904
6905 rcu_read_lock();
6906 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6907
6908 if (!sd || sd->nohz_idle)
6909 goto unlock;
6910 sd->nohz_idle = 1;
6911
6912 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
6913unlock:
6914 rcu_read_unlock();
6915}
6916
6917
6918
6919
6920
6921void nohz_balance_enter_idle(int cpu)
6922{
6923
6924
6925
6926 if (!cpu_active(cpu))
6927 return;
6928
6929 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
6930 return;
6931
6932
6933
6934
6935 if (on_null_domain(cpu_rq(cpu)))
6936 return;
6937
6938 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
6939 atomic_inc(&nohz.nr_cpus);
6940 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6941}
6942
6943static int sched_ilb_notifier(struct notifier_block *nfb,
6944 unsigned long action, void *hcpu)
6945{
6946 switch (action & ~CPU_TASKS_FROZEN) {
6947 case CPU_DYING:
6948 nohz_balance_exit_idle(smp_processor_id());
6949 return NOTIFY_OK;
6950 default:
6951 return NOTIFY_DONE;
6952 }
6953}
6954#endif
6955
6956static DEFINE_SPINLOCK(balancing);
6957
6958
6959
6960
6961
6962void update_max_interval(void)
6963{
6964 max_load_balance_interval = HZ*num_online_cpus()/10;
6965}
6966
6967
6968
6969
6970
6971
6972
6973static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
6974{
6975 int continue_balancing = 1;
6976 int cpu = rq->cpu;
6977 unsigned long interval;
6978 struct sched_domain *sd;
6979
6980 unsigned long next_balance = jiffies + 60*HZ;
6981 int update_next_balance = 0;
6982 int need_serialize, need_decay = 0;
6983 u64 max_cost = 0;
6984
6985 update_blocked_averages(cpu);
6986
6987 rcu_read_lock();
6988 for_each_domain(cpu, sd) {
6989
6990
6991
6992
6993 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
6994 sd->max_newidle_lb_cost =
6995 (sd->max_newidle_lb_cost * 253) / 256;
6996 sd->next_decay_max_lb_cost = jiffies + HZ;
6997 need_decay = 1;
6998 }
6999 max_cost += sd->max_newidle_lb_cost;
7000
7001 if (!(sd->flags & SD_LOAD_BALANCE))
7002 continue;
7003
7004
7005
7006
7007
7008
7009 if (!continue_balancing) {
7010 if (need_decay)
7011 continue;
7012 break;
7013 }
7014
7015 interval = sd->balance_interval;
7016 if (idle != CPU_IDLE)
7017 interval *= sd->busy_factor;
7018
7019
7020 interval = msecs_to_jiffies(interval);
7021 interval = clamp(interval, 1UL, max_load_balance_interval);
7022
7023 need_serialize = sd->flags & SD_SERIALIZE;
7024
7025 if (need_serialize) {
7026 if (!spin_trylock(&balancing))
7027 goto out;
7028 }
7029
7030 if (time_after_eq(jiffies, sd->last_balance + interval)) {
7031 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
7032
7033
7034
7035
7036
7037 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
7038 }
7039 sd->last_balance = jiffies;
7040 }
7041 if (need_serialize)
7042 spin_unlock(&balancing);
7043out:
7044 if (time_after(next_balance, sd->last_balance + interval)) {
7045 next_balance = sd->last_balance + interval;
7046 update_next_balance = 1;
7047 }
7048 }
7049 if (need_decay) {
7050
7051
7052
7053
7054 rq->max_idle_balance_cost =
7055 max((u64)sysctl_sched_migration_cost, max_cost);
7056 }
7057 rcu_read_unlock();
7058
7059
7060
7061
7062
7063
7064 if (likely(update_next_balance))
7065 rq->next_balance = next_balance;
7066}
7067
7068#ifdef CONFIG_NO_HZ_COMMON
7069
7070
7071
7072
7073static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7074{
7075 int this_cpu = this_rq->cpu;
7076 struct rq *rq;
7077 int balance_cpu;
7078
7079 if (idle != CPU_IDLE ||
7080 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7081 goto end;
7082
7083 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
7084 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
7085 continue;
7086
7087
7088
7089
7090
7091
7092 if (need_resched())
7093 break;
7094
7095 rq = cpu_rq(balance_cpu);
7096
7097 raw_spin_lock_irq(&rq->lock);
7098 update_rq_clock(rq);
7099 update_idle_cpu_load(rq);
7100 raw_spin_unlock_irq(&rq->lock);
7101
7102 rebalance_domains(rq, CPU_IDLE);
7103
7104 if (time_after(this_rq->next_balance, rq->next_balance))
7105 this_rq->next_balance = rq->next_balance;
7106 }
7107 nohz.next_balance = this_rq->next_balance;
7108end:
7109 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
7110}
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121static inline int nohz_kick_needed(struct rq *rq)
7122{
7123 unsigned long now = jiffies;
7124 struct sched_domain *sd;
7125 struct sched_group_power *sgp;
7126 int nr_busy, cpu = rq->cpu;
7127
7128 if (unlikely(rq->idle_balance))
7129 return 0;
7130
7131
7132
7133
7134
7135 set_cpu_sd_state_busy();
7136 nohz_balance_exit_idle(cpu);
7137
7138
7139
7140
7141
7142 if (likely(!atomic_read(&nohz.nr_cpus)))
7143 return 0;
7144
7145 if (time_before(now, nohz.next_balance))
7146 return 0;
7147
7148 if (rq->nr_running >= 2)
7149 goto need_kick;
7150
7151 rcu_read_lock();
7152 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7153
7154 if (sd) {
7155 sgp = sd->groups->sgp;
7156 nr_busy = atomic_read(&sgp->nr_busy_cpus);
7157
7158 if (nr_busy > 1)
7159 goto need_kick_unlock;
7160 }
7161
7162 sd = rcu_dereference(per_cpu(sd_asym, cpu));
7163
7164 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
7165 sched_domain_span(sd)) < cpu))
7166 goto need_kick_unlock;
7167
7168 rcu_read_unlock();
7169 return 0;
7170
7171need_kick_unlock:
7172 rcu_read_unlock();
7173need_kick:
7174 return 1;
7175}
7176#else
7177static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
7178#endif
7179
7180
7181
7182
7183
7184static void run_rebalance_domains(struct softirq_action *h)
7185{
7186 struct rq *this_rq = this_rq();
7187 enum cpu_idle_type idle = this_rq->idle_balance ?
7188 CPU_IDLE : CPU_NOT_IDLE;
7189
7190 rebalance_domains(this_rq, idle);
7191
7192
7193
7194
7195
7196
7197 nohz_idle_balance(this_rq, idle);
7198}
7199
7200
7201
7202
7203void trigger_load_balance(struct rq *rq)
7204{
7205
7206 if (unlikely(on_null_domain(rq)))
7207 return;
7208
7209 if (time_after_eq(jiffies, rq->next_balance))
7210 raise_softirq(SCHED_SOFTIRQ);
7211#ifdef CONFIG_NO_HZ_COMMON
7212 if (nohz_kick_needed(rq))
7213 nohz_balancer_kick();
7214#endif
7215}
7216
7217static void rq_online_fair(struct rq *rq)
7218{
7219 update_sysctl();
7220}
7221
7222static void rq_offline_fair(struct rq *rq)
7223{
7224 update_sysctl();
7225
7226
7227 unthrottle_offline_cfs_rqs(rq);
7228}
7229
7230#endif
7231
7232
7233
7234
7235static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
7236{
7237 struct cfs_rq *cfs_rq;
7238 struct sched_entity *se = &curr->se;
7239
7240 for_each_sched_entity(se) {
7241 cfs_rq = cfs_rq_of(se);
7242 entity_tick(cfs_rq, se, queued);
7243 }
7244
7245 if (numabalancing_enabled)
7246 task_tick_numa(rq, curr);
7247
7248 update_rq_runnable_avg(rq, 1);
7249}
7250
7251
7252
7253
7254
7255
7256static void task_fork_fair(struct task_struct *p)
7257{
7258 struct cfs_rq *cfs_rq;
7259 struct sched_entity *se = &p->se, *curr;
7260 int this_cpu = smp_processor_id();
7261 struct rq *rq = this_rq();
7262 unsigned long flags;
7263
7264 raw_spin_lock_irqsave(&rq->lock, flags);
7265
7266 update_rq_clock(rq);
7267
7268 cfs_rq = task_cfs_rq(current);
7269 curr = cfs_rq->curr;
7270
7271
7272
7273
7274
7275
7276
7277 rcu_read_lock();
7278 __set_task_cpu(p, this_cpu);
7279 rcu_read_unlock();
7280
7281 update_curr(cfs_rq);
7282
7283 if (curr)
7284 se->vruntime = curr->vruntime;
7285 place_entity(cfs_rq, se, 1);
7286
7287 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
7288
7289
7290
7291
7292 swap(curr->vruntime, se->vruntime);
7293 resched_task(rq->curr);
7294 }
7295
7296 se->vruntime -= cfs_rq->min_vruntime;
7297
7298 raw_spin_unlock_irqrestore(&rq->lock, flags);
7299}
7300
7301
7302
7303
7304
7305static void
7306prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
7307{
7308 if (!p->se.on_rq)
7309 return;
7310
7311
7312
7313
7314
7315
7316 if (rq->curr == p) {
7317 if (p->prio > oldprio)
7318 resched_task(rq->curr);
7319 } else
7320 check_preempt_curr(rq, p, 0);
7321}
7322
7323static void switched_from_fair(struct rq *rq, struct task_struct *p)
7324{
7325 struct sched_entity *se = &p->se;
7326 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7327
7328
7329
7330
7331
7332
7333
7334
7335
7336
7337 if (!p->on_rq && p->state != TASK_RUNNING) {
7338
7339
7340
7341
7342 place_entity(cfs_rq, se, 0);
7343 se->vruntime -= cfs_rq->min_vruntime;
7344 }
7345
7346#ifdef CONFIG_SMP
7347
7348
7349
7350
7351
7352 if (se->avg.decay_count) {
7353 __synchronize_entity_decay(se);
7354 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
7355 }
7356#endif
7357}
7358
7359
7360
7361
7362static void switched_to_fair(struct rq *rq, struct task_struct *p)
7363{
7364 struct sched_entity *se = &p->se;
7365#ifdef CONFIG_FAIR_GROUP_SCHED
7366
7367
7368
7369
7370 se->depth = se->parent ? se->parent->depth + 1 : 0;
7371#endif
7372 if (!se->on_rq)
7373 return;
7374
7375
7376
7377
7378
7379
7380 if (rq->curr == p)
7381 resched_task(rq->curr);
7382 else
7383 check_preempt_curr(rq, p, 0);
7384}
7385
7386
7387
7388
7389
7390
7391static void set_curr_task_fair(struct rq *rq)
7392{
7393 struct sched_entity *se = &rq->curr->se;
7394
7395 for_each_sched_entity(se) {
7396 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7397
7398 set_next_entity(cfs_rq, se);
7399
7400 account_cfs_rq_runtime(cfs_rq, 0);
7401 }
7402}
7403
7404void init_cfs_rq(struct cfs_rq *cfs_rq)
7405{
7406 cfs_rq->tasks_timeline = RB_ROOT;
7407 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7408#ifndef CONFIG_64BIT
7409 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7410#endif
7411#ifdef CONFIG_SMP
7412 atomic64_set(&cfs_rq->decay_counter, 1);
7413 atomic_long_set(&cfs_rq->removed_load, 0);
7414#endif
7415}
7416
7417#ifdef CONFIG_FAIR_GROUP_SCHED
7418static void task_move_group_fair(struct task_struct *p, int on_rq)
7419{
7420 struct sched_entity *se = &p->se;
7421 struct cfs_rq *cfs_rq;
7422
7423
7424
7425
7426
7427
7428
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439
7440
7441
7442
7443
7444
7445
7446
7447
7448 if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING))
7449 on_rq = 1;
7450
7451 if (!on_rq)
7452 se->vruntime -= cfs_rq_of(se)->min_vruntime;
7453 set_task_rq(p, task_cpu(p));
7454 se->depth = se->parent ? se->parent->depth + 1 : 0;
7455 if (!on_rq) {
7456 cfs_rq = cfs_rq_of(se);
7457 se->vruntime += cfs_rq->min_vruntime;
7458#ifdef CONFIG_SMP
7459
7460
7461
7462
7463
7464 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
7465 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
7466#endif
7467 }
7468}
7469
7470void free_fair_sched_group(struct task_group *tg)
7471{
7472 int i;
7473
7474 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
7475
7476 for_each_possible_cpu(i) {
7477 if (tg->cfs_rq)
7478 kfree(tg->cfs_rq[i]);
7479 if (tg->se)
7480 kfree(tg->se[i]);
7481 }
7482
7483 kfree(tg->cfs_rq);
7484 kfree(tg->se);
7485}
7486
7487int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7488{
7489 struct cfs_rq *cfs_rq;
7490 struct sched_entity *se;
7491 int i;
7492
7493 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7494 if (!tg->cfs_rq)
7495 goto err;
7496 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7497 if (!tg->se)
7498 goto err;
7499
7500 tg->shares = NICE_0_LOAD;
7501
7502 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
7503
7504 for_each_possible_cpu(i) {
7505 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
7506 GFP_KERNEL, cpu_to_node(i));
7507 if (!cfs_rq)
7508 goto err;
7509
7510 se = kzalloc_node(sizeof(struct sched_entity),
7511 GFP_KERNEL, cpu_to_node(i));
7512 if (!se)
7513 goto err_free_rq;
7514
7515 init_cfs_rq(cfs_rq);
7516 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
7517 }
7518
7519 return 1;
7520
7521err_free_rq:
7522 kfree(cfs_rq);
7523err:
7524 return 0;
7525}
7526
7527void unregister_fair_sched_group(struct task_group *tg, int cpu)
7528{
7529 struct rq *rq = cpu_rq(cpu);
7530 unsigned long flags;
7531
7532
7533
7534
7535
7536 if (!tg->cfs_rq[cpu]->on_list)
7537 return;
7538
7539 raw_spin_lock_irqsave(&rq->lock, flags);
7540 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
7541 raw_spin_unlock_irqrestore(&rq->lock, flags);
7542}
7543
7544void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7545 struct sched_entity *se, int cpu,
7546 struct sched_entity *parent)
7547{
7548 struct rq *rq = cpu_rq(cpu);
7549
7550 cfs_rq->tg = tg;
7551 cfs_rq->rq = rq;
7552 init_cfs_rq_runtime(cfs_rq);
7553
7554 tg->cfs_rq[cpu] = cfs_rq;
7555 tg->se[cpu] = se;
7556
7557
7558 if (!se)
7559 return;
7560
7561 if (!parent) {
7562 se->cfs_rq = &rq->cfs;
7563 se->depth = 0;
7564 } else {
7565 se->cfs_rq = parent->my_q;
7566 se->depth = parent->depth + 1;
7567 }
7568
7569 se->my_q = cfs_rq;
7570
7571 update_load_set(&se->load, NICE_0_LOAD);
7572 se->parent = parent;
7573}
7574
7575static DEFINE_MUTEX(shares_mutex);
7576
7577int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7578{
7579 int i;
7580 unsigned long flags;
7581
7582
7583
7584
7585 if (!tg->se[0])
7586 return -EINVAL;
7587
7588 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
7589
7590 mutex_lock(&shares_mutex);
7591 if (tg->shares == shares)
7592 goto done;
7593
7594 tg->shares = shares;
7595 for_each_possible_cpu(i) {
7596 struct rq *rq = cpu_rq(i);
7597 struct sched_entity *se;
7598
7599 se = tg->se[i];
7600
7601 raw_spin_lock_irqsave(&rq->lock, flags);
7602
7603
7604 update_rq_clock(rq);
7605 for_each_sched_entity(se)
7606 update_cfs_shares(group_cfs_rq(se));
7607 raw_spin_unlock_irqrestore(&rq->lock, flags);
7608 }
7609
7610done:
7611 mutex_unlock(&shares_mutex);
7612 return 0;
7613}
7614#else
7615
7616void free_fair_sched_group(struct task_group *tg) { }
7617
7618int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7619{
7620 return 1;
7621}
7622
7623void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
7624
7625#endif
7626
7627
7628static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
7629{
7630 struct sched_entity *se = &task->se;
7631 unsigned int rr_interval = 0;
7632
7633
7634
7635
7636
7637 if (rq->cfs.load.weight)
7638 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
7639
7640 return rr_interval;
7641}
7642
7643
7644
7645
7646const struct sched_class fair_sched_class = {
7647 .next = &idle_sched_class,
7648 .enqueue_task = enqueue_task_fair,
7649 .dequeue_task = dequeue_task_fair,
7650 .yield_task = yield_task_fair,
7651 .yield_to_task = yield_to_task_fair,
7652
7653 .check_preempt_curr = check_preempt_wakeup,
7654
7655 .pick_next_task = pick_next_task_fair,
7656 .put_prev_task = put_prev_task_fair,
7657
7658#ifdef CONFIG_SMP
7659 .select_task_rq = select_task_rq_fair,
7660 .migrate_task_rq = migrate_task_rq_fair,
7661
7662 .rq_online = rq_online_fair,
7663 .rq_offline = rq_offline_fair,
7664
7665 .task_waking = task_waking_fair,
7666#endif
7667
7668 .set_curr_task = set_curr_task_fair,
7669 .task_tick = task_tick_fair,
7670 .task_fork = task_fork_fair,
7671
7672 .prio_changed = prio_changed_fair,
7673 .switched_from = switched_from_fair,
7674 .switched_to = switched_to_fair,
7675
7676 .get_rr_interval = get_rr_interval_fair,
7677
7678#ifdef CONFIG_FAIR_GROUP_SCHED
7679 .task_move_group = task_move_group_fair,
7680#endif
7681};
7682
7683#ifdef CONFIG_SCHED_DEBUG
7684void print_cfs_stats(struct seq_file *m, int cpu)
7685{
7686 struct cfs_rq *cfs_rq;
7687
7688 rcu_read_lock();
7689 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
7690 print_cfs_rq(m, cpu, cfs_rq);
7691 rcu_read_unlock();
7692}
7693#endif
7694
7695__init void init_sched_fair_class(void)
7696{
7697#ifdef CONFIG_SMP
7698 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
7699
7700#ifdef CONFIG_NO_HZ_COMMON
7701 nohz.next_balance = jiffies;
7702 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
7703 cpu_notifier(sched_ilb_notifier, 0);
7704#endif
7705#endif
7706
7707}
7708