1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/sched.h>
24#include <linux/latencytop.h>
25#include <linux/cpumask.h>
26#include <linux/cpuidle.h>
27#include <linux/slab.h>
28#include <linux/profile.h>
29#include <linux/interrupt.h>
30#include <linux/mempolicy.h>
31#include <linux/migrate.h>
32#include <linux/task_work.h>
33
34#include <trace/events/sched.h>
35
36#include "sched.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50unsigned int sysctl_sched_latency = 6000000ULL;
51unsigned int normalized_sysctl_sched_latency = 6000000ULL;
52
53
54
55
56
57
58
59
60
61
62enum sched_tunable_scaling sysctl_sched_tunable_scaling
63 = SCHED_TUNABLESCALING_LOG;
64
65
66
67
68
69unsigned int sysctl_sched_min_granularity = 750000ULL;
70unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
71
72
73
74
75static unsigned int sched_nr_latency = 8;
76
77
78
79
80
81unsigned int sysctl_sched_child_runs_first __read_mostly;
82
83
84
85
86
87
88
89
90
91unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
92unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
93
94const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
95
96
97
98
99
100
101unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102
103#ifdef CONFIG_CFS_BANDWIDTH
104
105
106
107
108
109
110
111
112
113
114unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
115#endif
116
117static inline void update_load_add(struct load_weight *lw, unsigned long inc)
118{
119 lw->weight += inc;
120 lw->inv_weight = 0;
121}
122
123static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
124{
125 lw->weight -= dec;
126 lw->inv_weight = 0;
127}
128
129static inline void update_load_set(struct load_weight *lw, unsigned long w)
130{
131 lw->weight = w;
132 lw->inv_weight = 0;
133}
134
135
136
137
138
139
140
141
142
143
144static unsigned int get_update_sysctl_factor(void)
145{
146 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
147 unsigned int factor;
148
149 switch (sysctl_sched_tunable_scaling) {
150 case SCHED_TUNABLESCALING_NONE:
151 factor = 1;
152 break;
153 case SCHED_TUNABLESCALING_LINEAR:
154 factor = cpus;
155 break;
156 case SCHED_TUNABLESCALING_LOG:
157 default:
158 factor = 1 + ilog2(cpus);
159 break;
160 }
161
162 return factor;
163}
164
165static void update_sysctl(void)
166{
167 unsigned int factor = get_update_sysctl_factor();
168
169#define SET_SYSCTL(name) \
170 (sysctl_##name = (factor) * normalized_sysctl_##name)
171 SET_SYSCTL(sched_min_granularity);
172 SET_SYSCTL(sched_latency);
173 SET_SYSCTL(sched_wakeup_granularity);
174#undef SET_SYSCTL
175}
176
177void sched_init_granularity(void)
178{
179 update_sysctl();
180}
181
182#define WMULT_CONST (~0U)
183#define WMULT_SHIFT 32
184
185static void __update_inv_weight(struct load_weight *lw)
186{
187 unsigned long w;
188
189 if (likely(lw->inv_weight))
190 return;
191
192 w = scale_load_down(lw->weight);
193
194 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
195 lw->inv_weight = 1;
196 else if (unlikely(!w))
197 lw->inv_weight = WMULT_CONST;
198 else
199 lw->inv_weight = WMULT_CONST / w;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
215{
216 u64 fact = scale_load_down(weight);
217 int shift = WMULT_SHIFT;
218
219 __update_inv_weight(lw);
220
221 if (unlikely(fact >> 32)) {
222 while (fact >> 32) {
223 fact >>= 1;
224 shift--;
225 }
226 }
227
228
229 fact = (u64)(u32)fact * lw->inv_weight;
230
231 while (fact >> 32) {
232 fact >>= 1;
233 shift--;
234 }
235
236 return mul_u64_u32_shr(delta_exec, fact, shift);
237}
238
239
240const struct sched_class fair_sched_class;
241
242
243
244
245
246#ifdef CONFIG_FAIR_GROUP_SCHED
247
248
249static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
250{
251 return cfs_rq->rq;
252}
253
254
255#define entity_is_task(se) (!se->my_q)
256
257static inline struct task_struct *task_of(struct sched_entity *se)
258{
259#ifdef CONFIG_SCHED_DEBUG
260 WARN_ON_ONCE(!entity_is_task(se));
261#endif
262 return container_of(se, struct task_struct, se);
263}
264
265
266#define for_each_sched_entity(se) \
267 for (; se; se = se->parent)
268
269static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
270{
271 return p->se.cfs_rq;
272}
273
274
275static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
276{
277 return se->cfs_rq;
278}
279
280
281static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
282{
283 return grp->my_q;
284}
285
286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
289
290
291
292
293
294
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
303
304 cfs_rq->on_list = 1;
305 }
306}
307
308static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
309{
310 if (cfs_rq->on_list) {
311 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
312 cfs_rq->on_list = 0;
313 }
314}
315
316
317#define for_each_leaf_cfs_rq(rq, cfs_rq) \
318 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
319
320
321static inline struct cfs_rq *
322is_same_group(struct sched_entity *se, struct sched_entity *pse)
323{
324 if (se->cfs_rq == pse->cfs_rq)
325 return se->cfs_rq;
326
327 return NULL;
328}
329
330static inline struct sched_entity *parent_entity(struct sched_entity *se)
331{
332 return se->parent;
333}
334
335static void
336find_matching_se(struct sched_entity **se, struct sched_entity **pse)
337{
338 int se_depth, pse_depth;
339
340
341
342
343
344
345
346
347
348 se_depth = (*se)->depth;
349 pse_depth = (*pse)->depth;
350
351 while (se_depth > pse_depth) {
352 se_depth--;
353 *se = parent_entity(*se);
354 }
355
356 while (pse_depth > se_depth) {
357 pse_depth--;
358 *pse = parent_entity(*pse);
359 }
360
361 while (!is_same_group(*se, *pse)) {
362 *se = parent_entity(*se);
363 *pse = parent_entity(*pse);
364 }
365}
366
367#else
368
369static inline struct task_struct *task_of(struct sched_entity *se)
370{
371 return container_of(se, struct task_struct, se);
372}
373
374static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
375{
376 return container_of(cfs_rq, struct rq, cfs);
377}
378
379#define entity_is_task(se) 1
380
381#define for_each_sched_entity(se) \
382 for (; se; se = NULL)
383
384static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
385{
386 return &task_rq(p)->cfs;
387}
388
389static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
390{
391 struct task_struct *p = task_of(se);
392 struct rq *rq = task_rq(p);
393
394 return &rq->cfs;
395}
396
397
398static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
399{
400 return NULL;
401}
402
403static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
404{
405}
406
407static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408{
409}
410
411#define for_each_leaf_cfs_rq(rq, cfs_rq) \
412 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
413
414static inline struct sched_entity *parent_entity(struct sched_entity *se)
415{
416 return NULL;
417}
418
419static inline void
420find_matching_se(struct sched_entity **se, struct sched_entity **pse)
421{
422}
423
424#endif
425
426static __always_inline
427void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
428
429
430
431
432
433static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
434{
435 s64 delta = (s64)(vruntime - max_vruntime);
436 if (delta > 0)
437 max_vruntime = vruntime;
438
439 return max_vruntime;
440}
441
442static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
443{
444 s64 delta = (s64)(vruntime - min_vruntime);
445 if (delta < 0)
446 min_vruntime = vruntime;
447
448 return min_vruntime;
449}
450
451static inline int entity_before(struct sched_entity *a,
452 struct sched_entity *b)
453{
454 return (s64)(a->vruntime - b->vruntime) < 0;
455}
456
457static void update_min_vruntime(struct cfs_rq *cfs_rq)
458{
459 u64 vruntime = cfs_rq->min_vruntime;
460
461 if (cfs_rq->curr)
462 vruntime = cfs_rq->curr->vruntime;
463
464 if (cfs_rq->rb_leftmost) {
465 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
466 struct sched_entity,
467 run_node);
468
469 if (!cfs_rq->curr)
470 vruntime = se->vruntime;
471 else
472 vruntime = min_vruntime(vruntime, se->vruntime);
473 }
474
475
476 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
477#ifndef CONFIG_64BIT
478 smp_wmb();
479 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
480#endif
481}
482
483
484
485
486static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
487{
488 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
489 struct rb_node *parent = NULL;
490 struct sched_entity *entry;
491 int leftmost = 1;
492
493
494
495
496 while (*link) {
497 parent = *link;
498 entry = rb_entry(parent, struct sched_entity, run_node);
499
500
501
502
503 if (entity_before(se, entry)) {
504 link = &parent->rb_left;
505 } else {
506 link = &parent->rb_right;
507 leftmost = 0;
508 }
509 }
510
511
512
513
514
515 if (leftmost)
516 cfs_rq->rb_leftmost = &se->run_node;
517
518 rb_link_node(&se->run_node, parent, link);
519 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
520}
521
522static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
523{
524 if (cfs_rq->rb_leftmost == &se->run_node) {
525 struct rb_node *next_node;
526
527 next_node = rb_next(&se->run_node);
528 cfs_rq->rb_leftmost = next_node;
529 }
530
531 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
532}
533
534struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
535{
536 struct rb_node *left = cfs_rq->rb_leftmost;
537
538 if (!left)
539 return NULL;
540
541 return rb_entry(left, struct sched_entity, run_node);
542}
543
544static struct sched_entity *__pick_next_entity(struct sched_entity *se)
545{
546 struct rb_node *next = rb_next(&se->run_node);
547
548 if (!next)
549 return NULL;
550
551 return rb_entry(next, struct sched_entity, run_node);
552}
553
554#ifdef CONFIG_SCHED_DEBUG
555struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
556{
557 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
558
559 if (!last)
560 return NULL;
561
562 return rb_entry(last, struct sched_entity, run_node);
563}
564
565
566
567
568
569int sched_proc_update_handler(struct ctl_table *table, int write,
570 void __user *buffer, size_t *lenp,
571 loff_t *ppos)
572{
573 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
574 unsigned int factor = get_update_sysctl_factor();
575
576 if (ret || !write)
577 return ret;
578
579 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
580 sysctl_sched_min_granularity);
581
582#define WRT_SYSCTL(name) \
583 (normalized_sysctl_##name = sysctl_##name / (factor))
584 WRT_SYSCTL(sched_min_granularity);
585 WRT_SYSCTL(sched_latency);
586 WRT_SYSCTL(sched_wakeup_granularity);
587#undef WRT_SYSCTL
588
589 return 0;
590}
591#endif
592
593
594
595
596static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
597{
598 if (unlikely(se->load.weight != NICE_0_LOAD))
599 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
600
601 return delta;
602}
603
604
605
606
607
608
609
610
611
612static u64 __sched_period(unsigned long nr_running)
613{
614 if (unlikely(nr_running > sched_nr_latency))
615 return nr_running * sysctl_sched_min_granularity;
616 else
617 return sysctl_sched_latency;
618}
619
620
621
622
623
624
625
626static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
627{
628 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
629
630 for_each_sched_entity(se) {
631 struct load_weight *load;
632 struct load_weight lw;
633
634 cfs_rq = cfs_rq_of(se);
635 load = &cfs_rq->load;
636
637 if (unlikely(!se->on_rq)) {
638 lw = cfs_rq->load;
639
640 update_load_add(&lw, se->load.weight);
641 load = &lw;
642 }
643 slice = __calc_delta(slice, se->load.weight, load);
644 }
645 return slice;
646}
647
648
649
650
651
652
653static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
654{
655 return calc_delta_fair(sched_slice(cfs_rq, se), se);
656}
657
658#ifdef CONFIG_SMP
659static int select_idle_sibling(struct task_struct *p, int cpu);
660static unsigned long task_h_load(struct task_struct *p);
661
662
663
664
665
666
667#define LOAD_AVG_PERIOD 32
668#define LOAD_AVG_MAX 47742
669#define LOAD_AVG_MAX_N 345
670
671
672void init_entity_runnable_average(struct sched_entity *se)
673{
674 struct sched_avg *sa = &se->avg;
675
676 sa->last_update_time = 0;
677
678
679
680
681
682 sa->period_contrib = 1023;
683 sa->load_avg = scale_load_down(se->load.weight);
684 sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
685 sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
686 sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
687
688}
689
690static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
691static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
692#else
693void init_entity_runnable_average(struct sched_entity *se)
694{
695}
696#endif
697
698
699
700
701static void update_curr(struct cfs_rq *cfs_rq)
702{
703 struct sched_entity *curr = cfs_rq->curr;
704 u64 now = rq_clock_task(rq_of(cfs_rq));
705 u64 delta_exec;
706
707 if (unlikely(!curr))
708 return;
709
710 delta_exec = now - curr->exec_start;
711 if (unlikely((s64)delta_exec <= 0))
712 return;
713
714 curr->exec_start = now;
715
716 schedstat_set(curr->statistics.exec_max,
717 max(delta_exec, curr->statistics.exec_max));
718
719 curr->sum_exec_runtime += delta_exec;
720 schedstat_add(cfs_rq, exec_clock, delta_exec);
721
722 curr->vruntime += calc_delta_fair(delta_exec, curr);
723 update_min_vruntime(cfs_rq);
724
725 if (entity_is_task(curr)) {
726 struct task_struct *curtask = task_of(curr);
727
728 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
729 cpuacct_charge(curtask, delta_exec);
730 account_group_exec_runtime(curtask, delta_exec);
731 }
732
733 account_cfs_rq_runtime(cfs_rq, delta_exec);
734}
735
736static void update_curr_fair(struct rq *rq)
737{
738 update_curr(cfs_rq_of(&rq->curr->se));
739}
740
741#ifdef CONFIG_SCHEDSTATS
742static inline void
743update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
744{
745 u64 wait_start = rq_clock(rq_of(cfs_rq));
746
747 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
748 likely(wait_start > se->statistics.wait_start))
749 wait_start -= se->statistics.wait_start;
750
751 se->statistics.wait_start = wait_start;
752}
753
754static void
755update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
756{
757 struct task_struct *p;
758 u64 delta;
759
760 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
761
762 if (entity_is_task(se)) {
763 p = task_of(se);
764 if (task_on_rq_migrating(p)) {
765
766
767
768
769
770 se->statistics.wait_start = delta;
771 return;
772 }
773 trace_sched_stat_wait(p, delta);
774 }
775
776 se->statistics.wait_max = max(se->statistics.wait_max, delta);
777 se->statistics.wait_count++;
778 se->statistics.wait_sum += delta;
779 se->statistics.wait_start = 0;
780}
781
782
783
784
785static inline void
786update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
787{
788
789
790
791
792 if (se != cfs_rq->curr)
793 update_stats_wait_start(cfs_rq, se);
794}
795
796static inline void
797update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
798{
799
800
801
802
803 if (se != cfs_rq->curr)
804 update_stats_wait_end(cfs_rq, se);
805
806 if (flags & DEQUEUE_SLEEP) {
807 if (entity_is_task(se)) {
808 struct task_struct *tsk = task_of(se);
809
810 if (tsk->state & TASK_INTERRUPTIBLE)
811 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
812 if (tsk->state & TASK_UNINTERRUPTIBLE)
813 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
814 }
815 }
816
817}
818#else
819static inline void
820update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
821{
822}
823
824static inline void
825update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
826{
827}
828
829static inline void
830update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
831{
832}
833
834static inline void
835update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
836{
837}
838#endif
839
840
841
842
843static inline void
844update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
845{
846
847
848
849 se->exec_start = rq_clock_task(rq_of(cfs_rq));
850}
851
852
853
854
855
856#ifdef CONFIG_NUMA_BALANCING
857
858
859
860
861
862unsigned int sysctl_numa_balancing_scan_period_min = 1000;
863unsigned int sysctl_numa_balancing_scan_period_max = 60000;
864
865
866unsigned int sysctl_numa_balancing_scan_size = 256;
867
868
869unsigned int sysctl_numa_balancing_scan_delay = 1000;
870
871static unsigned int task_nr_scan_windows(struct task_struct *p)
872{
873 unsigned long rss = 0;
874 unsigned long nr_scan_pages;
875
876
877
878
879
880
881 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
882 rss = get_mm_rss(p->mm);
883 if (!rss)
884 rss = nr_scan_pages;
885
886 rss = round_up(rss, nr_scan_pages);
887 return rss / nr_scan_pages;
888}
889
890
891#define MAX_SCAN_WINDOW 2560
892
893static unsigned int task_scan_min(struct task_struct *p)
894{
895 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
896 unsigned int scan, floor;
897 unsigned int windows = 1;
898
899 if (scan_size < MAX_SCAN_WINDOW)
900 windows = MAX_SCAN_WINDOW / scan_size;
901 floor = 1000 / windows;
902
903 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
904 return max_t(unsigned int, floor, scan);
905}
906
907static unsigned int task_scan_max(struct task_struct *p)
908{
909 unsigned int smin = task_scan_min(p);
910 unsigned int smax;
911
912
913 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
914 return max(smin, smax);
915}
916
917static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
918{
919 rq->nr_numa_running += (p->numa_preferred_nid != -1);
920 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
921}
922
923static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
924{
925 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
926 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
927}
928
929struct numa_group {
930 atomic_t refcount;
931
932 spinlock_t lock;
933 int nr_tasks;
934 pid_t gid;
935 int active_nodes;
936
937 struct rcu_head rcu;
938 unsigned long total_faults;
939 unsigned long max_faults_cpu;
940
941
942
943
944
945 unsigned long *faults_cpu;
946 unsigned long faults[0];
947};
948
949
950#define NR_NUMA_HINT_FAULT_TYPES 2
951
952
953#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
954
955
956#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
957
958pid_t task_numa_group_id(struct task_struct *p)
959{
960 return p->numa_group ? p->numa_group->gid : 0;
961}
962
963
964
965
966
967
968
969static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
970{
971 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
972}
973
974static inline unsigned long task_faults(struct task_struct *p, int nid)
975{
976 if (!p->numa_faults)
977 return 0;
978
979 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
980 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
981}
982
983static inline unsigned long group_faults(struct task_struct *p, int nid)
984{
985 if (!p->numa_group)
986 return 0;
987
988 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
989 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
990}
991
992static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
993{
994 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
995 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
996}
997
998
999
1000
1001
1002
1003#define ACTIVE_NODE_FRACTION 3
1004
1005static bool numa_is_active_node(int nid, struct numa_group *ng)
1006{
1007 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1008}
1009
1010
1011static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1012 int maxdist, bool task)
1013{
1014 unsigned long score = 0;
1015 int node;
1016
1017
1018
1019
1020
1021 if (sched_numa_topology_type == NUMA_DIRECT)
1022 return 0;
1023
1024
1025
1026
1027
1028 for_each_online_node(node) {
1029 unsigned long faults;
1030 int dist = node_distance(nid, node);
1031
1032
1033
1034
1035
1036 if (dist == sched_max_numa_distance || node == nid)
1037 continue;
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1047 dist > maxdist)
1048 continue;
1049
1050
1051 if (task)
1052 faults = task_faults(p, node);
1053 else
1054 faults = group_faults(p, node);
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1065 faults *= (sched_max_numa_distance - dist);
1066 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1067 }
1068
1069 score += faults;
1070 }
1071
1072 return score;
1073}
1074
1075
1076
1077
1078
1079
1080
1081static inline unsigned long task_weight(struct task_struct *p, int nid,
1082 int dist)
1083{
1084 unsigned long faults, total_faults;
1085
1086 if (!p->numa_faults)
1087 return 0;
1088
1089 total_faults = p->total_numa_faults;
1090
1091 if (!total_faults)
1092 return 0;
1093
1094 faults = task_faults(p, nid);
1095 faults += score_nearby_nodes(p, nid, dist, true);
1096
1097 return 1000 * faults / total_faults;
1098}
1099
1100static inline unsigned long group_weight(struct task_struct *p, int nid,
1101 int dist)
1102{
1103 unsigned long faults, total_faults;
1104
1105 if (!p->numa_group)
1106 return 0;
1107
1108 total_faults = p->numa_group->total_faults;
1109
1110 if (!total_faults)
1111 return 0;
1112
1113 faults = group_faults(p, nid);
1114 faults += score_nearby_nodes(p, nid, dist, false);
1115
1116 return 1000 * faults / total_faults;
1117}
1118
1119bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1120 int src_nid, int dst_cpu)
1121{
1122 struct numa_group *ng = p->numa_group;
1123 int dst_nid = cpu_to_node(dst_cpu);
1124 int last_cpupid, this_cpupid;
1125
1126 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1146 if (!cpupid_pid_unset(last_cpupid) &&
1147 cpupid_to_nid(last_cpupid) != dst_nid)
1148 return false;
1149
1150
1151 if (cpupid_match_pid(p, last_cpupid))
1152 return true;
1153
1154
1155 if (!ng)
1156 return true;
1157
1158
1159
1160
1161
1162 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1163 ACTIVE_NODE_FRACTION)
1164 return true;
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1175 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1176}
1177
1178static unsigned long weighted_cpuload(const int cpu);
1179static unsigned long source_load(int cpu, int type);
1180static unsigned long target_load(int cpu, int type);
1181static unsigned long capacity_of(int cpu);
1182static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1183
1184
1185struct numa_stats {
1186 unsigned long nr_running;
1187 unsigned long load;
1188
1189
1190 unsigned long compute_capacity;
1191
1192
1193 unsigned long task_capacity;
1194 int has_free_capacity;
1195};
1196
1197
1198
1199
1200static void update_numa_stats(struct numa_stats *ns, int nid)
1201{
1202 int smt, cpu, cpus = 0;
1203 unsigned long capacity;
1204
1205 memset(ns, 0, sizeof(*ns));
1206 for_each_cpu(cpu, cpumask_of_node(nid)) {
1207 struct rq *rq = cpu_rq(cpu);
1208
1209 ns->nr_running += rq->nr_running;
1210 ns->load += weighted_cpuload(cpu);
1211 ns->compute_capacity += capacity_of(cpu);
1212
1213 cpus++;
1214 }
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224 if (!cpus)
1225 return;
1226
1227
1228 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1229 capacity = cpus / smt;
1230
1231 ns->task_capacity = min_t(unsigned, capacity,
1232 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1233 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1234}
1235
1236struct task_numa_env {
1237 struct task_struct *p;
1238
1239 int src_cpu, src_nid;
1240 int dst_cpu, dst_nid;
1241
1242 struct numa_stats src_stats, dst_stats;
1243
1244 int imbalance_pct;
1245 int dist;
1246
1247 struct task_struct *best_task;
1248 long best_imp;
1249 int best_cpu;
1250};
1251
1252static void task_numa_assign(struct task_numa_env *env,
1253 struct task_struct *p, long imp)
1254{
1255 if (env->best_task)
1256 put_task_struct(env->best_task);
1257
1258 env->best_task = p;
1259 env->best_imp = imp;
1260 env->best_cpu = env->dst_cpu;
1261}
1262
1263static bool load_too_imbalanced(long src_load, long dst_load,
1264 struct task_numa_env *env)
1265{
1266 long imb, old_imb;
1267 long orig_src_load, orig_dst_load;
1268 long src_capacity, dst_capacity;
1269
1270
1271
1272
1273
1274
1275
1276
1277 src_capacity = env->src_stats.compute_capacity;
1278 dst_capacity = env->dst_stats.compute_capacity;
1279
1280
1281 if (dst_load < src_load)
1282 swap(dst_load, src_load);
1283
1284
1285 imb = dst_load * src_capacity * 100 -
1286 src_load * dst_capacity * env->imbalance_pct;
1287 if (imb <= 0)
1288 return false;
1289
1290
1291
1292
1293
1294 orig_src_load = env->src_stats.load;
1295 orig_dst_load = env->dst_stats.load;
1296
1297 if (orig_dst_load < orig_src_load)
1298 swap(orig_dst_load, orig_src_load);
1299
1300 old_imb = orig_dst_load * src_capacity * 100 -
1301 orig_src_load * dst_capacity * env->imbalance_pct;
1302
1303
1304 return (imb > old_imb);
1305}
1306
1307
1308
1309
1310
1311
1312
1313static void task_numa_compare(struct task_numa_env *env,
1314 long taskimp, long groupimp)
1315{
1316 struct rq *src_rq = cpu_rq(env->src_cpu);
1317 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1318 struct task_struct *cur;
1319 long src_load, dst_load;
1320 long load;
1321 long imp = env->p->numa_group ? groupimp : taskimp;
1322 long moveimp = imp;
1323 int dist = env->dist;
1324 bool assigned = false;
1325
1326 rcu_read_lock();
1327
1328 raw_spin_lock_irq(&dst_rq->lock);
1329 cur = dst_rq->curr;
1330
1331
1332
1333 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
1334 cur = NULL;
1335 else {
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 get_task_struct(cur);
1346 }
1347
1348 raw_spin_unlock_irq(&dst_rq->lock);
1349
1350
1351
1352
1353
1354 if (cur == env->p)
1355 goto unlock;
1356
1357
1358
1359
1360
1361
1362
1363
1364 if (cur) {
1365
1366 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1367 goto unlock;
1368
1369
1370
1371
1372
1373 if (cur->numa_group == env->p->numa_group) {
1374 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1375 task_weight(cur, env->dst_nid, dist);
1376
1377
1378
1379
1380 if (cur->numa_group)
1381 imp -= imp/16;
1382 } else {
1383
1384
1385
1386
1387
1388 if (cur->numa_group)
1389 imp += group_weight(cur, env->src_nid, dist) -
1390 group_weight(cur, env->dst_nid, dist);
1391 else
1392 imp += task_weight(cur, env->src_nid, dist) -
1393 task_weight(cur, env->dst_nid, dist);
1394 }
1395 }
1396
1397 if (imp <= env->best_imp && moveimp <= env->best_imp)
1398 goto unlock;
1399
1400 if (!cur) {
1401
1402 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1403 !env->dst_stats.has_free_capacity)
1404 goto unlock;
1405
1406 goto balance;
1407 }
1408
1409
1410 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1411 dst_rq->nr_running == 1)
1412 goto assign;
1413
1414
1415
1416
1417balance:
1418 load = task_h_load(env->p);
1419 dst_load = env->dst_stats.load + load;
1420 src_load = env->src_stats.load - load;
1421
1422 if (moveimp > imp && moveimp > env->best_imp) {
1423
1424
1425
1426
1427
1428
1429 if (!load_too_imbalanced(src_load, dst_load, env)) {
1430 imp = moveimp - 1;
1431 put_task_struct(cur);
1432 cur = NULL;
1433 goto assign;
1434 }
1435 }
1436
1437 if (imp <= env->best_imp)
1438 goto unlock;
1439
1440 if (cur) {
1441 load = task_h_load(cur);
1442 dst_load -= load;
1443 src_load += load;
1444 }
1445
1446 if (load_too_imbalanced(src_load, dst_load, env))
1447 goto unlock;
1448
1449
1450
1451
1452
1453 if (!cur)
1454 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1455
1456assign:
1457 assigned = true;
1458 task_numa_assign(env, cur, imp);
1459unlock:
1460 rcu_read_unlock();
1461
1462
1463
1464
1465 if (cur && !assigned)
1466 put_task_struct(cur);
1467}
1468
1469static void task_numa_find_cpu(struct task_numa_env *env,
1470 long taskimp, long groupimp)
1471{
1472 int cpu;
1473
1474 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1475
1476 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1477 continue;
1478
1479 env->dst_cpu = cpu;
1480 task_numa_compare(env, taskimp, groupimp);
1481 }
1482}
1483
1484
1485static bool numa_has_capacity(struct task_numa_env *env)
1486{
1487 struct numa_stats *src = &env->src_stats;
1488 struct numa_stats *dst = &env->dst_stats;
1489
1490 if (src->has_free_capacity && !dst->has_free_capacity)
1491 return false;
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501 if (src->load * dst->compute_capacity * env->imbalance_pct >
1502
1503 dst->load * src->compute_capacity * 100)
1504 return true;
1505
1506 return false;
1507}
1508
1509static int task_numa_migrate(struct task_struct *p)
1510{
1511 struct task_numa_env env = {
1512 .p = p,
1513
1514 .src_cpu = task_cpu(p),
1515 .src_nid = task_node(p),
1516
1517 .imbalance_pct = 112,
1518
1519 .best_task = NULL,
1520 .best_imp = 0,
1521 .best_cpu = -1,
1522 };
1523 struct sched_domain *sd;
1524 unsigned long taskweight, groupweight;
1525 int nid, ret, dist;
1526 long taskimp, groupimp;
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 rcu_read_lock();
1537 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1538 if (sd)
1539 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1540 rcu_read_unlock();
1541
1542
1543
1544
1545
1546
1547
1548 if (unlikely(!sd)) {
1549 p->numa_preferred_nid = task_node(p);
1550 return -EINVAL;
1551 }
1552
1553 env.dst_nid = p->numa_preferred_nid;
1554 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1555 taskweight = task_weight(p, env.src_nid, dist);
1556 groupweight = group_weight(p, env.src_nid, dist);
1557 update_numa_stats(&env.src_stats, env.src_nid);
1558 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1559 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1560 update_numa_stats(&env.dst_stats, env.dst_nid);
1561
1562
1563 if (numa_has_capacity(&env))
1564 task_numa_find_cpu(&env, taskimp, groupimp);
1565
1566
1567
1568
1569
1570
1571
1572
1573 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
1574 for_each_online_node(nid) {
1575 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1576 continue;
1577
1578 dist = node_distance(env.src_nid, env.dst_nid);
1579 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1580 dist != env.dist) {
1581 taskweight = task_weight(p, env.src_nid, dist);
1582 groupweight = group_weight(p, env.src_nid, dist);
1583 }
1584
1585
1586 taskimp = task_weight(p, nid, dist) - taskweight;
1587 groupimp = group_weight(p, nid, dist) - groupweight;
1588 if (taskimp < 0 && groupimp < 0)
1589 continue;
1590
1591 env.dist = dist;
1592 env.dst_nid = nid;
1593 update_numa_stats(&env.dst_stats, env.dst_nid);
1594 if (numa_has_capacity(&env))
1595 task_numa_find_cpu(&env, taskimp, groupimp);
1596 }
1597 }
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607 if (p->numa_group) {
1608 struct numa_group *ng = p->numa_group;
1609
1610 if (env.best_cpu == -1)
1611 nid = env.src_nid;
1612 else
1613 nid = env.dst_nid;
1614
1615 if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
1616 sched_setnuma(p, env.dst_nid);
1617 }
1618
1619
1620 if (env.best_cpu == -1)
1621 return -EAGAIN;
1622
1623
1624
1625
1626
1627 p->numa_scan_period = task_scan_min(p);
1628
1629 if (env.best_task == NULL) {
1630 ret = migrate_task_to(p, env.best_cpu);
1631 if (ret != 0)
1632 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1633 return ret;
1634 }
1635
1636 ret = migrate_swap(p, env.best_task);
1637 if (ret != 0)
1638 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1639 put_task_struct(env.best_task);
1640 return ret;
1641}
1642
1643
1644static void numa_migrate_preferred(struct task_struct *p)
1645{
1646 unsigned long interval = HZ;
1647
1648
1649 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1650 return;
1651
1652
1653 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1654 p->numa_migrate_retry = jiffies + interval;
1655
1656
1657 if (task_node(p) == p->numa_preferred_nid)
1658 return;
1659
1660
1661 task_numa_migrate(p);
1662}
1663
1664
1665
1666
1667
1668
1669
1670static void numa_group_count_active_nodes(struct numa_group *numa_group)
1671{
1672 unsigned long faults, max_faults = 0;
1673 int nid, active_nodes = 0;
1674
1675 for_each_online_node(nid) {
1676 faults = group_faults_cpu(numa_group, nid);
1677 if (faults > max_faults)
1678 max_faults = faults;
1679 }
1680
1681 for_each_online_node(nid) {
1682 faults = group_faults_cpu(numa_group, nid);
1683 if (faults * ACTIVE_NODE_FRACTION > max_faults)
1684 active_nodes++;
1685 }
1686
1687 numa_group->max_faults_cpu = max_faults;
1688 numa_group->active_nodes = active_nodes;
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698#define NUMA_PERIOD_SLOTS 10
1699#define NUMA_PERIOD_THRESHOLD 7
1700
1701
1702
1703
1704
1705
1706
1707static void update_task_scan_period(struct task_struct *p,
1708 unsigned long shared, unsigned long private)
1709{
1710 unsigned int period_slot;
1711 int ratio;
1712 int diff;
1713
1714 unsigned long remote = p->numa_faults_locality[0];
1715 unsigned long local = p->numa_faults_locality[1];
1716
1717
1718
1719
1720
1721
1722
1723
1724 if (local + shared == 0 || p->numa_faults_locality[2]) {
1725 p->numa_scan_period = min(p->numa_scan_period_max,
1726 p->numa_scan_period << 1);
1727
1728 p->mm->numa_next_scan = jiffies +
1729 msecs_to_jiffies(p->numa_scan_period);
1730
1731 return;
1732 }
1733
1734
1735
1736
1737
1738
1739
1740 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1741 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1742 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1743 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1744 if (!slot)
1745 slot = 1;
1746 diff = slot * period_slot;
1747 } else {
1748 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1759 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1760 }
1761
1762 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1763 task_scan_min(p), task_scan_max(p));
1764 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1775{
1776 u64 runtime, delta, now;
1777
1778 now = p->se.exec_start;
1779 runtime = p->se.sum_exec_runtime;
1780
1781 if (p->last_task_numa_placement) {
1782 delta = runtime - p->last_sum_exec_runtime;
1783 *period = now - p->last_task_numa_placement;
1784 } else {
1785 delta = p->se.avg.load_sum / p->se.load.weight;
1786 *period = LOAD_AVG_MAX;
1787 }
1788
1789 p->last_sum_exec_runtime = runtime;
1790 p->last_task_numa_placement = now;
1791
1792 return delta;
1793}
1794
1795
1796
1797
1798
1799
1800static int preferred_group_nid(struct task_struct *p, int nid)
1801{
1802 nodemask_t nodes;
1803 int dist;
1804
1805
1806 if (sched_numa_topology_type == NUMA_DIRECT)
1807 return nid;
1808
1809
1810
1811
1812
1813
1814 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1815 unsigned long score, max_score = 0;
1816 int node, max_node = nid;
1817
1818 dist = sched_max_numa_distance;
1819
1820 for_each_online_node(node) {
1821 score = group_weight(p, node, dist);
1822 if (score > max_score) {
1823 max_score = score;
1824 max_node = node;
1825 }
1826 }
1827 return max_node;
1828 }
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839 nodes = node_online_map;
1840 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1841 unsigned long max_faults = 0;
1842 nodemask_t max_group = NODE_MASK_NONE;
1843 int a, b;
1844
1845
1846 if (!find_numa_distance(dist))
1847 continue;
1848
1849 for_each_node_mask(a, nodes) {
1850 unsigned long faults = 0;
1851 nodemask_t this_group;
1852 nodes_clear(this_group);
1853
1854
1855 for_each_node_mask(b, nodes) {
1856 if (node_distance(a, b) < dist) {
1857 faults += group_faults(p, b);
1858 node_set(b, this_group);
1859 node_clear(b, nodes);
1860 }
1861 }
1862
1863
1864 if (faults > max_faults) {
1865 max_faults = faults;
1866 max_group = this_group;
1867
1868
1869
1870
1871
1872 nid = a;
1873 }
1874 }
1875
1876 if (!max_faults)
1877 break;
1878 nodes = max_group;
1879 }
1880 return nid;
1881}
1882
1883static void task_numa_placement(struct task_struct *p)
1884{
1885 int seq, nid, max_nid = -1, max_group_nid = -1;
1886 unsigned long max_faults = 0, max_group_faults = 0;
1887 unsigned long fault_types[2] = { 0, 0 };
1888 unsigned long total_faults;
1889 u64 runtime, period;
1890 spinlock_t *group_lock = NULL;
1891
1892
1893
1894
1895
1896
1897 seq = READ_ONCE(p->mm->numa_scan_seq);
1898 if (p->numa_scan_seq == seq)
1899 return;
1900 p->numa_scan_seq = seq;
1901 p->numa_scan_period_max = task_scan_max(p);
1902
1903 total_faults = p->numa_faults_locality[0] +
1904 p->numa_faults_locality[1];
1905 runtime = numa_get_avg_runtime(p, &period);
1906
1907
1908 if (p->numa_group) {
1909 group_lock = &p->numa_group->lock;
1910 spin_lock_irq(group_lock);
1911 }
1912
1913
1914 for_each_online_node(nid) {
1915
1916 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
1917 unsigned long faults = 0, group_faults = 0;
1918 int priv;
1919
1920 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
1921 long diff, f_diff, f_weight;
1922
1923 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
1924 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
1925 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
1926 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
1927
1928
1929 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
1930 fault_types[priv] += p->numa_faults[membuf_idx];
1931 p->numa_faults[membuf_idx] = 0;
1932
1933
1934
1935
1936
1937
1938
1939
1940 f_weight = div64_u64(runtime << 16, period + 1);
1941 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
1942 (total_faults + 1);
1943 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
1944 p->numa_faults[cpubuf_idx] = 0;
1945
1946 p->numa_faults[mem_idx] += diff;
1947 p->numa_faults[cpu_idx] += f_diff;
1948 faults += p->numa_faults[mem_idx];
1949 p->total_numa_faults += diff;
1950 if (p->numa_group) {
1951
1952
1953
1954
1955
1956
1957
1958 p->numa_group->faults[mem_idx] += diff;
1959 p->numa_group->faults_cpu[mem_idx] += f_diff;
1960 p->numa_group->total_faults += diff;
1961 group_faults += p->numa_group->faults[mem_idx];
1962 }
1963 }
1964
1965 if (faults > max_faults) {
1966 max_faults = faults;
1967 max_nid = nid;
1968 }
1969
1970 if (group_faults > max_group_faults) {
1971 max_group_faults = group_faults;
1972 max_group_nid = nid;
1973 }
1974 }
1975
1976 update_task_scan_period(p, fault_types[0], fault_types[1]);
1977
1978 if (p->numa_group) {
1979 numa_group_count_active_nodes(p->numa_group);
1980 spin_unlock_irq(group_lock);
1981 max_nid = preferred_group_nid(p, max_group_nid);
1982 }
1983
1984 if (max_faults) {
1985
1986 if (max_nid != p->numa_preferred_nid)
1987 sched_setnuma(p, max_nid);
1988
1989 if (task_node(p) != p->numa_preferred_nid)
1990 numa_migrate_preferred(p);
1991 }
1992}
1993
1994static inline int get_numa_group(struct numa_group *grp)
1995{
1996 return atomic_inc_not_zero(&grp->refcount);
1997}
1998
1999static inline void put_numa_group(struct numa_group *grp)
2000{
2001 if (atomic_dec_and_test(&grp->refcount))
2002 kfree_rcu(grp, rcu);
2003}
2004
2005static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2006 int *priv)
2007{
2008 struct numa_group *grp, *my_grp;
2009 struct task_struct *tsk;
2010 bool join = false;
2011 int cpu = cpupid_to_cpu(cpupid);
2012 int i;
2013
2014 if (unlikely(!p->numa_group)) {
2015 unsigned int size = sizeof(struct numa_group) +
2016 4*nr_node_ids*sizeof(unsigned long);
2017
2018 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2019 if (!grp)
2020 return;
2021
2022 atomic_set(&grp->refcount, 1);
2023 grp->active_nodes = 1;
2024 grp->max_faults_cpu = 0;
2025 spin_lock_init(&grp->lock);
2026 grp->gid = p->pid;
2027
2028 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2029 nr_node_ids;
2030
2031 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2032 grp->faults[i] = p->numa_faults[i];
2033
2034 grp->total_faults = p->total_numa_faults;
2035
2036 grp->nr_tasks++;
2037 rcu_assign_pointer(p->numa_group, grp);
2038 }
2039
2040 rcu_read_lock();
2041 tsk = READ_ONCE(cpu_rq(cpu)->curr);
2042
2043 if (!cpupid_match_pid(tsk, cpupid))
2044 goto no_join;
2045
2046 grp = rcu_dereference(tsk->numa_group);
2047 if (!grp)
2048 goto no_join;
2049
2050 my_grp = p->numa_group;
2051 if (grp == my_grp)
2052 goto no_join;
2053
2054
2055
2056
2057
2058 if (my_grp->nr_tasks > grp->nr_tasks)
2059 goto no_join;
2060
2061
2062
2063
2064 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2065 goto no_join;
2066
2067
2068 if (tsk->mm == current->mm)
2069 join = true;
2070
2071
2072 if (flags & TNF_SHARED)
2073 join = true;
2074
2075
2076 *priv = !join;
2077
2078 if (join && !get_numa_group(grp))
2079 goto no_join;
2080
2081 rcu_read_unlock();
2082
2083 if (!join)
2084 return;
2085
2086 BUG_ON(irqs_disabled());
2087 double_lock_irq(&my_grp->lock, &grp->lock);
2088
2089 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2090 my_grp->faults[i] -= p->numa_faults[i];
2091 grp->faults[i] += p->numa_faults[i];
2092 }
2093 my_grp->total_faults -= p->total_numa_faults;
2094 grp->total_faults += p->total_numa_faults;
2095
2096 my_grp->nr_tasks--;
2097 grp->nr_tasks++;
2098
2099 spin_unlock(&my_grp->lock);
2100 spin_unlock_irq(&grp->lock);
2101
2102 rcu_assign_pointer(p->numa_group, grp);
2103
2104 put_numa_group(my_grp);
2105 return;
2106
2107no_join:
2108 rcu_read_unlock();
2109 return;
2110}
2111
2112void task_numa_free(struct task_struct *p)
2113{
2114 struct numa_group *grp = p->numa_group;
2115 void *numa_faults = p->numa_faults;
2116 unsigned long flags;
2117 int i;
2118
2119 if (grp) {
2120 spin_lock_irqsave(&grp->lock, flags);
2121 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2122 grp->faults[i] -= p->numa_faults[i];
2123 grp->total_faults -= p->total_numa_faults;
2124
2125 grp->nr_tasks--;
2126 spin_unlock_irqrestore(&grp->lock, flags);
2127 RCU_INIT_POINTER(p->numa_group, NULL);
2128 put_numa_group(grp);
2129 }
2130
2131 p->numa_faults = NULL;
2132 kfree(numa_faults);
2133}
2134
2135
2136
2137
2138void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2139{
2140 struct task_struct *p = current;
2141 bool migrated = flags & TNF_MIGRATED;
2142 int cpu_node = task_node(current);
2143 int local = !!(flags & TNF_FAULT_LOCAL);
2144 struct numa_group *ng;
2145 int priv;
2146
2147 if (!static_branch_likely(&sched_numa_balancing))
2148 return;
2149
2150
2151 if (!p->mm)
2152 return;
2153
2154
2155 if (unlikely(!p->numa_faults)) {
2156 int size = sizeof(*p->numa_faults) *
2157 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2158
2159 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2160 if (!p->numa_faults)
2161 return;
2162
2163 p->total_numa_faults = 0;
2164 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2165 }
2166
2167
2168
2169
2170
2171 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2172 priv = 1;
2173 } else {
2174 priv = cpupid_match_pid(p, last_cpupid);
2175 if (!priv && !(flags & TNF_NO_GROUP))
2176 task_numa_group(p, last_cpupid, flags, &priv);
2177 }
2178
2179
2180
2181
2182
2183
2184
2185 ng = p->numa_group;
2186 if (!priv && !local && ng && ng->active_nodes > 1 &&
2187 numa_is_active_node(cpu_node, ng) &&
2188 numa_is_active_node(mem_node, ng))
2189 local = 1;
2190
2191 task_numa_placement(p);
2192
2193
2194
2195
2196
2197 if (time_after(jiffies, p->numa_migrate_retry))
2198 numa_migrate_preferred(p);
2199
2200 if (migrated)
2201 p->numa_pages_migrated += pages;
2202 if (flags & TNF_MIGRATE_FAIL)
2203 p->numa_faults_locality[2] += pages;
2204
2205 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2206 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2207 p->numa_faults_locality[local] += pages;
2208}
2209
2210static void reset_ptenuma_scan(struct task_struct *p)
2211{
2212
2213
2214
2215
2216
2217
2218
2219
2220 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2221 p->mm->numa_scan_offset = 0;
2222}
2223
2224
2225
2226
2227
2228void task_numa_work(struct callback_head *work)
2229{
2230 unsigned long migrate, next_scan, now = jiffies;
2231 struct task_struct *p = current;
2232 struct mm_struct *mm = p->mm;
2233 u64 runtime = p->se.sum_exec_runtime;
2234 struct vm_area_struct *vma;
2235 unsigned long start, end;
2236 unsigned long nr_pte_updates = 0;
2237 long pages, virtpages;
2238
2239 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
2240
2241 work->next = work;
2242
2243
2244
2245
2246
2247
2248
2249
2250 if (p->flags & PF_EXITING)
2251 return;
2252
2253 if (!mm->numa_next_scan) {
2254 mm->numa_next_scan = now +
2255 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2256 }
2257
2258
2259
2260
2261 migrate = mm->numa_next_scan;
2262 if (time_before(now, migrate))
2263 return;
2264
2265 if (p->numa_scan_period == 0) {
2266 p->numa_scan_period_max = task_scan_max(p);
2267 p->numa_scan_period = task_scan_min(p);
2268 }
2269
2270 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2271 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2272 return;
2273
2274
2275
2276
2277
2278 p->node_stamp += 2 * TICK_NSEC;
2279
2280 start = mm->numa_scan_offset;
2281 pages = sysctl_numa_balancing_scan_size;
2282 pages <<= 20 - PAGE_SHIFT;
2283 virtpages = pages * 8;
2284 if (!pages)
2285 return;
2286
2287
2288 down_read(&mm->mmap_sem);
2289 vma = find_vma(mm, start);
2290 if (!vma) {
2291 reset_ptenuma_scan(p);
2292 start = 0;
2293 vma = mm->mmap;
2294 }
2295 for (; vma; vma = vma->vm_next) {
2296 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2297 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2298 continue;
2299 }
2300
2301
2302
2303
2304
2305
2306
2307 if (!vma->vm_mm ||
2308 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2309 continue;
2310
2311
2312
2313
2314
2315 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2316 continue;
2317
2318 do {
2319 start = max(start, vma->vm_start);
2320 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2321 end = min(end, vma->vm_end);
2322 nr_pte_updates = change_prot_numa(vma, start, end);
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332 if (nr_pte_updates)
2333 pages -= (end - start) >> PAGE_SHIFT;
2334 virtpages -= (end - start) >> PAGE_SHIFT;
2335
2336 start = end;
2337 if (pages <= 0 || virtpages <= 0)
2338 goto out;
2339
2340 cond_resched();
2341 } while (end != vma->vm_end);
2342 }
2343
2344out:
2345
2346
2347
2348
2349
2350
2351 if (vma)
2352 mm->numa_scan_offset = start;
2353 else
2354 reset_ptenuma_scan(p);
2355 up_read(&mm->mmap_sem);
2356
2357
2358
2359
2360
2361
2362
2363 if (unlikely(p->se.sum_exec_runtime != runtime)) {
2364 u64 diff = p->se.sum_exec_runtime - runtime;
2365 p->node_stamp += 32 * diff;
2366 }
2367}
2368
2369
2370
2371
2372void task_tick_numa(struct rq *rq, struct task_struct *curr)
2373{
2374 struct callback_head *work = &curr->numa_work;
2375 u64 period, now;
2376
2377
2378
2379
2380 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2381 return;
2382
2383
2384
2385
2386
2387
2388
2389 now = curr->se.sum_exec_runtime;
2390 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2391
2392 if (now > curr->node_stamp + period) {
2393 if (!curr->node_stamp)
2394 curr->numa_scan_period = task_scan_min(curr);
2395 curr->node_stamp += period;
2396
2397 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2398 init_task_work(work, task_numa_work);
2399 task_work_add(curr, work, true);
2400 }
2401 }
2402}
2403#else
2404static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2405{
2406}
2407
2408static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2409{
2410}
2411
2412static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2413{
2414}
2415#endif
2416
2417static void
2418account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2419{
2420 update_load_add(&cfs_rq->load, se->load.weight);
2421 if (!parent_entity(se))
2422 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2423#ifdef CONFIG_SMP
2424 if (entity_is_task(se)) {
2425 struct rq *rq = rq_of(cfs_rq);
2426
2427 account_numa_enqueue(rq, task_of(se));
2428 list_add(&se->group_node, &rq->cfs_tasks);
2429 }
2430#endif
2431 cfs_rq->nr_running++;
2432}
2433
2434static void
2435account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2436{
2437 update_load_sub(&cfs_rq->load, se->load.weight);
2438 if (!parent_entity(se))
2439 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2440 if (entity_is_task(se)) {
2441 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2442 list_del_init(&se->group_node);
2443 }
2444 cfs_rq->nr_running--;
2445}
2446
2447#ifdef CONFIG_FAIR_GROUP_SCHED
2448# ifdef CONFIG_SMP
2449static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2450{
2451 long tg_weight;
2452
2453
2454
2455
2456
2457
2458 tg_weight = atomic_long_read(&tg->load_avg);
2459 tg_weight -= cfs_rq->tg_load_avg_contrib;
2460 tg_weight += cfs_rq->load.weight;
2461
2462 return tg_weight;
2463}
2464
2465static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2466{
2467 long tg_weight, load, shares;
2468
2469 tg_weight = calc_tg_weight(tg, cfs_rq);
2470 load = cfs_rq->load.weight;
2471
2472 shares = (tg->shares * load);
2473 if (tg_weight)
2474 shares /= tg_weight;
2475
2476 if (shares < MIN_SHARES)
2477 shares = MIN_SHARES;
2478 if (shares > tg->shares)
2479 shares = tg->shares;
2480
2481 return shares;
2482}
2483# else
2484static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2485{
2486 return tg->shares;
2487}
2488# endif
2489static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2490 unsigned long weight)
2491{
2492 if (se->on_rq) {
2493
2494 if (cfs_rq->curr == se)
2495 update_curr(cfs_rq);
2496 account_entity_dequeue(cfs_rq, se);
2497 }
2498
2499 update_load_set(&se->load, weight);
2500
2501 if (se->on_rq)
2502 account_entity_enqueue(cfs_rq, se);
2503}
2504
2505static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2506
2507static void update_cfs_shares(struct cfs_rq *cfs_rq)
2508{
2509 struct task_group *tg;
2510 struct sched_entity *se;
2511 long shares;
2512
2513 tg = cfs_rq->tg;
2514 se = tg->se[cpu_of(rq_of(cfs_rq))];
2515 if (!se || throttled_hierarchy(cfs_rq))
2516 return;
2517#ifndef CONFIG_SMP
2518 if (likely(se->load.weight == tg->shares))
2519 return;
2520#endif
2521 shares = calc_cfs_shares(cfs_rq, tg);
2522
2523 reweight_entity(cfs_rq_of(se), se, shares);
2524}
2525#else
2526static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2527{
2528}
2529#endif
2530
2531#ifdef CONFIG_SMP
2532
2533static const u32 runnable_avg_yN_inv[] = {
2534 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2535 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2536 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2537 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2538 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2539 0x85aac367, 0x82cd8698,
2540};
2541
2542
2543
2544
2545
2546static const u32 runnable_avg_yN_sum[] = {
2547 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2548 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2549 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2550};
2551
2552
2553
2554
2555
2556static __always_inline u64 decay_load(u64 val, u64 n)
2557{
2558 unsigned int local_n;
2559
2560 if (!n)
2561 return val;
2562 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2563 return 0;
2564
2565
2566 local_n = n;
2567
2568
2569
2570
2571
2572
2573
2574
2575 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2576 val >>= local_n / LOAD_AVG_PERIOD;
2577 local_n %= LOAD_AVG_PERIOD;
2578 }
2579
2580 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2581 return val;
2582}
2583
2584
2585
2586
2587
2588
2589
2590
2591static u32 __compute_runnable_contrib(u64 n)
2592{
2593 u32 contrib = 0;
2594
2595 if (likely(n <= LOAD_AVG_PERIOD))
2596 return runnable_avg_yN_sum[n];
2597 else if (unlikely(n >= LOAD_AVG_MAX_N))
2598 return LOAD_AVG_MAX;
2599
2600
2601 do {
2602 contrib /= 2;
2603 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2604
2605 n -= LOAD_AVG_PERIOD;
2606 } while (n > LOAD_AVG_PERIOD);
2607
2608 contrib = decay_load(contrib, n);
2609 return contrib + runnable_avg_yN_sum[n];
2610}
2611
2612#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
2613#error "load tracking assumes 2^10 as unit"
2614#endif
2615
2616#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646static __always_inline int
2647__update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2648 unsigned long weight, int running, struct cfs_rq *cfs_rq)
2649{
2650 u64 delta, scaled_delta, periods;
2651 u32 contrib;
2652 unsigned int delta_w, scaled_delta_w, decayed = 0;
2653 unsigned long scale_freq, scale_cpu;
2654
2655 delta = now - sa->last_update_time;
2656
2657
2658
2659
2660 if ((s64)delta < 0) {
2661 sa->last_update_time = now;
2662 return 0;
2663 }
2664
2665
2666
2667
2668
2669 delta >>= 10;
2670 if (!delta)
2671 return 0;
2672 sa->last_update_time = now;
2673
2674 scale_freq = arch_scale_freq_capacity(NULL, cpu);
2675 scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2676
2677
2678 delta_w = sa->period_contrib;
2679 if (delta + delta_w >= 1024) {
2680 decayed = 1;
2681
2682
2683 sa->period_contrib = 0;
2684
2685
2686
2687
2688
2689
2690 delta_w = 1024 - delta_w;
2691 scaled_delta_w = cap_scale(delta_w, scale_freq);
2692 if (weight) {
2693 sa->load_sum += weight * scaled_delta_w;
2694 if (cfs_rq) {
2695 cfs_rq->runnable_load_sum +=
2696 weight * scaled_delta_w;
2697 }
2698 }
2699 if (running)
2700 sa->util_sum += scaled_delta_w * scale_cpu;
2701
2702 delta -= delta_w;
2703
2704
2705 periods = delta / 1024;
2706 delta %= 1024;
2707
2708 sa->load_sum = decay_load(sa->load_sum, periods + 1);
2709 if (cfs_rq) {
2710 cfs_rq->runnable_load_sum =
2711 decay_load(cfs_rq->runnable_load_sum, periods + 1);
2712 }
2713 sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
2714
2715
2716 contrib = __compute_runnable_contrib(periods);
2717 contrib = cap_scale(contrib, scale_freq);
2718 if (weight) {
2719 sa->load_sum += weight * contrib;
2720 if (cfs_rq)
2721 cfs_rq->runnable_load_sum += weight * contrib;
2722 }
2723 if (running)
2724 sa->util_sum += contrib * scale_cpu;
2725 }
2726
2727
2728 scaled_delta = cap_scale(delta, scale_freq);
2729 if (weight) {
2730 sa->load_sum += weight * scaled_delta;
2731 if (cfs_rq)
2732 cfs_rq->runnable_load_sum += weight * scaled_delta;
2733 }
2734 if (running)
2735 sa->util_sum += scaled_delta * scale_cpu;
2736
2737 sa->period_contrib += delta;
2738
2739 if (decayed) {
2740 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
2741 if (cfs_rq) {
2742 cfs_rq->runnable_load_avg =
2743 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2744 }
2745 sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
2746 }
2747
2748 return decayed;
2749}
2750
2751#ifdef CONFIG_FAIR_GROUP_SCHED
2752
2753
2754
2755
2756static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
2757{
2758 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
2759
2760
2761
2762
2763 if (cfs_rq->tg == &root_task_group)
2764 return;
2765
2766 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
2767 atomic_long_add(delta, &cfs_rq->tg->load_avg);
2768 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
2769 }
2770}
2771
2772
2773
2774
2775
2776
2777void set_task_rq_fair(struct sched_entity *se,
2778 struct cfs_rq *prev, struct cfs_rq *next)
2779{
2780 if (!sched_feat(ATTACH_AGE_LOAD))
2781 return;
2782
2783
2784
2785
2786
2787
2788
2789
2790 if (se->avg.last_update_time && prev) {
2791 u64 p_last_update_time;
2792 u64 n_last_update_time;
2793
2794#ifndef CONFIG_64BIT
2795 u64 p_last_update_time_copy;
2796 u64 n_last_update_time_copy;
2797
2798 do {
2799 p_last_update_time_copy = prev->load_last_update_time_copy;
2800 n_last_update_time_copy = next->load_last_update_time_copy;
2801
2802 smp_rmb();
2803
2804 p_last_update_time = prev->avg.last_update_time;
2805 n_last_update_time = next->avg.last_update_time;
2806
2807 } while (p_last_update_time != p_last_update_time_copy ||
2808 n_last_update_time != n_last_update_time_copy);
2809#else
2810 p_last_update_time = prev->avg.last_update_time;
2811 n_last_update_time = next->avg.last_update_time;
2812#endif
2813 __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
2814 &se->avg, 0, 0, NULL);
2815 se->avg.last_update_time = n_last_update_time;
2816 }
2817}
2818#else
2819static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
2820#endif
2821
2822static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2823
2824
2825static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2826{
2827 struct sched_avg *sa = &cfs_rq->avg;
2828 int decayed, removed = 0;
2829
2830 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2831 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
2832 sa->load_avg = max_t(long, sa->load_avg - r, 0);
2833 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
2834 removed = 1;
2835 }
2836
2837 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
2838 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
2839 sa->util_avg = max_t(long, sa->util_avg - r, 0);
2840 sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
2841 }
2842
2843 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
2844 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
2845
2846#ifndef CONFIG_64BIT
2847 smp_wmb();
2848 cfs_rq->load_last_update_time_copy = sa->last_update_time;
2849#endif
2850
2851 return decayed || removed;
2852}
2853
2854
2855static inline void update_load_avg(struct sched_entity *se, int update_tg)
2856{
2857 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2858 u64 now = cfs_rq_clock_task(cfs_rq);
2859 struct rq *rq = rq_of(cfs_rq);
2860 int cpu = cpu_of(rq);
2861
2862
2863
2864
2865
2866 __update_load_avg(now, cpu, &se->avg,
2867 se->on_rq * scale_load_down(se->load.weight),
2868 cfs_rq->curr == se, NULL);
2869
2870 if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
2871 update_tg_load_avg(cfs_rq, 0);
2872
2873 if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) {
2874 unsigned long max = rq->cpu_capacity_orig;
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892 cpufreq_update_util(rq_clock(rq),
2893 min(cfs_rq->avg.util_avg, max), max);
2894 }
2895}
2896
2897static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2898{
2899 if (!sched_feat(ATTACH_AGE_LOAD))
2900 goto skip_aging;
2901
2902
2903
2904
2905
2906 if (se->avg.last_update_time) {
2907 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
2908 &se->avg, 0, 0, NULL);
2909
2910
2911
2912
2913
2914 }
2915
2916skip_aging:
2917 se->avg.last_update_time = cfs_rq->avg.last_update_time;
2918 cfs_rq->avg.load_avg += se->avg.load_avg;
2919 cfs_rq->avg.load_sum += se->avg.load_sum;
2920 cfs_rq->avg.util_avg += se->avg.util_avg;
2921 cfs_rq->avg.util_sum += se->avg.util_sum;
2922}
2923
2924static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2925{
2926 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
2927 &se->avg, se->on_rq * scale_load_down(se->load.weight),
2928 cfs_rq->curr == se, NULL);
2929
2930 cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
2931 cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
2932 cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
2933 cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
2934}
2935
2936
2937static inline void
2938enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2939{
2940 struct sched_avg *sa = &se->avg;
2941 u64 now = cfs_rq_clock_task(cfs_rq);
2942 int migrated, decayed;
2943
2944 migrated = !sa->last_update_time;
2945 if (!migrated) {
2946 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
2947 se->on_rq * scale_load_down(se->load.weight),
2948 cfs_rq->curr == se, NULL);
2949 }
2950
2951 decayed = update_cfs_rq_load_avg(now, cfs_rq);
2952
2953 cfs_rq->runnable_load_avg += sa->load_avg;
2954 cfs_rq->runnable_load_sum += sa->load_sum;
2955
2956 if (migrated)
2957 attach_entity_load_avg(cfs_rq, se);
2958
2959 if (decayed || migrated)
2960 update_tg_load_avg(cfs_rq, 0);
2961}
2962
2963
2964static inline void
2965dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2966{
2967 update_load_avg(se, 1);
2968
2969 cfs_rq->runnable_load_avg =
2970 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
2971 cfs_rq->runnable_load_sum =
2972 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
2973}
2974
2975#ifndef CONFIG_64BIT
2976static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
2977{
2978 u64 last_update_time_copy;
2979 u64 last_update_time;
2980
2981 do {
2982 last_update_time_copy = cfs_rq->load_last_update_time_copy;
2983 smp_rmb();
2984 last_update_time = cfs_rq->avg.last_update_time;
2985 } while (last_update_time != last_update_time_copy);
2986
2987 return last_update_time;
2988}
2989#else
2990static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
2991{
2992 return cfs_rq->avg.last_update_time;
2993}
2994#endif
2995
2996
2997
2998
2999
3000void remove_entity_load_avg(struct sched_entity *se)
3001{
3002 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3003 u64 last_update_time;
3004
3005
3006
3007
3008
3009 if (se->avg.last_update_time == 0)
3010 return;
3011
3012 last_update_time = cfs_rq_last_update_time(cfs_rq);
3013
3014 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
3015 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
3016 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
3017}
3018
3019static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3020{
3021 return cfs_rq->runnable_load_avg;
3022}
3023
3024static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3025{
3026 return cfs_rq->avg.load_avg;
3027}
3028
3029static int idle_balance(struct rq *this_rq);
3030
3031#else
3032
3033static inline void update_load_avg(struct sched_entity *se, int not_used)
3034{
3035 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3036 struct rq *rq = rq_of(cfs_rq);
3037
3038 cpufreq_trigger_update(rq_clock(rq));
3039}
3040
3041static inline void
3042enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3043static inline void
3044dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3045static inline void remove_entity_load_avg(struct sched_entity *se) {}
3046
3047static inline void
3048attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3049static inline void
3050detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3051
3052static inline int idle_balance(struct rq *rq)
3053{
3054 return 0;
3055}
3056
3057#endif
3058
3059static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
3060{
3061#ifdef CONFIG_SCHEDSTATS
3062 struct task_struct *tsk = NULL;
3063
3064 if (entity_is_task(se))
3065 tsk = task_of(se);
3066
3067 if (se->statistics.sleep_start) {
3068 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
3069
3070 if ((s64)delta < 0)
3071 delta = 0;
3072
3073 if (unlikely(delta > se->statistics.sleep_max))
3074 se->statistics.sleep_max = delta;
3075
3076 se->statistics.sleep_start = 0;
3077 se->statistics.sum_sleep_runtime += delta;
3078
3079 if (tsk) {
3080 account_scheduler_latency(tsk, delta >> 10, 1);
3081 trace_sched_stat_sleep(tsk, delta);
3082 }
3083 }
3084 if (se->statistics.block_start) {
3085 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
3086
3087 if ((s64)delta < 0)
3088 delta = 0;
3089
3090 if (unlikely(delta > se->statistics.block_max))
3091 se->statistics.block_max = delta;
3092
3093 se->statistics.block_start = 0;
3094 se->statistics.sum_sleep_runtime += delta;
3095
3096 if (tsk) {
3097 if (tsk->in_iowait) {
3098 se->statistics.iowait_sum += delta;
3099 se->statistics.iowait_count++;
3100 trace_sched_stat_iowait(tsk, delta);
3101 }
3102
3103 trace_sched_stat_blocked(tsk, delta);
3104
3105
3106
3107
3108
3109
3110 if (unlikely(prof_on == SLEEP_PROFILING)) {
3111 profile_hits(SLEEP_PROFILING,
3112 (void *)get_wchan(tsk),
3113 delta >> 20);
3114 }
3115 account_scheduler_latency(tsk, delta >> 10, 0);
3116 }
3117 }
3118#endif
3119}
3120
3121static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3122{
3123#ifdef CONFIG_SCHED_DEBUG
3124 s64 d = se->vruntime - cfs_rq->min_vruntime;
3125
3126 if (d < 0)
3127 d = -d;
3128
3129 if (d > 3*sysctl_sched_latency)
3130 schedstat_inc(cfs_rq, nr_spread_over);
3131#endif
3132}
3133
3134static void
3135place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3136{
3137 u64 vruntime = cfs_rq->min_vruntime;
3138
3139
3140
3141
3142
3143
3144
3145 if (initial && sched_feat(START_DEBIT))
3146 vruntime += sched_vslice(cfs_rq, se);
3147
3148
3149 if (!initial) {
3150 unsigned long thresh = sysctl_sched_latency;
3151
3152
3153
3154
3155
3156 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3157 thresh >>= 1;
3158
3159 vruntime -= thresh;
3160 }
3161
3162
3163 se->vruntime = max_vruntime(se->vruntime, vruntime);
3164}
3165
3166static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3167
3168static inline void check_schedstat_required(void)
3169{
3170#ifdef CONFIG_SCHEDSTATS
3171 if (schedstat_enabled())
3172 return;
3173
3174
3175 if (trace_sched_stat_wait_enabled() ||
3176 trace_sched_stat_sleep_enabled() ||
3177 trace_sched_stat_iowait_enabled() ||
3178 trace_sched_stat_blocked_enabled() ||
3179 trace_sched_stat_runtime_enabled()) {
3180 pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3181 "stat_blocked and stat_runtime require the "
3182 "kernel parameter schedstats=enabled or "
3183 "kernel.sched_schedstats=1\n");
3184 }
3185#endif
3186}
3187
3188static void
3189enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3190{
3191
3192
3193
3194
3195 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
3196 se->vruntime += cfs_rq->min_vruntime;
3197
3198
3199
3200
3201 update_curr(cfs_rq);
3202 enqueue_entity_load_avg(cfs_rq, se);
3203 account_entity_enqueue(cfs_rq, se);
3204 update_cfs_shares(cfs_rq);
3205
3206 if (flags & ENQUEUE_WAKEUP) {
3207 place_entity(cfs_rq, se, 0);
3208 if (schedstat_enabled())
3209 enqueue_sleeper(cfs_rq, se);
3210 }
3211
3212 check_schedstat_required();
3213 if (schedstat_enabled()) {
3214 update_stats_enqueue(cfs_rq, se);
3215 check_spread(cfs_rq, se);
3216 }
3217 if (se != cfs_rq->curr)
3218 __enqueue_entity(cfs_rq, se);
3219 se->on_rq = 1;
3220
3221 if (cfs_rq->nr_running == 1) {
3222 list_add_leaf_cfs_rq(cfs_rq);
3223 check_enqueue_throttle(cfs_rq);
3224 }
3225}
3226
3227static void __clear_buddies_last(struct sched_entity *se)
3228{
3229 for_each_sched_entity(se) {
3230 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3231 if (cfs_rq->last != se)
3232 break;
3233
3234 cfs_rq->last = NULL;
3235 }
3236}
3237
3238static void __clear_buddies_next(struct sched_entity *se)
3239{
3240 for_each_sched_entity(se) {
3241 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3242 if (cfs_rq->next != se)
3243 break;
3244
3245 cfs_rq->next = NULL;
3246 }
3247}
3248
3249static void __clear_buddies_skip(struct sched_entity *se)
3250{
3251 for_each_sched_entity(se) {
3252 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3253 if (cfs_rq->skip != se)
3254 break;
3255
3256 cfs_rq->skip = NULL;
3257 }
3258}
3259
3260static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3261{
3262 if (cfs_rq->last == se)
3263 __clear_buddies_last(se);
3264
3265 if (cfs_rq->next == se)
3266 __clear_buddies_next(se);
3267
3268 if (cfs_rq->skip == se)
3269 __clear_buddies_skip(se);
3270}
3271
3272static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3273
3274static void
3275dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3276{
3277
3278
3279
3280 update_curr(cfs_rq);
3281 dequeue_entity_load_avg(cfs_rq, se);
3282
3283 if (schedstat_enabled())
3284 update_stats_dequeue(cfs_rq, se, flags);
3285
3286 clear_buddies(cfs_rq, se);
3287
3288 if (se != cfs_rq->curr)
3289 __dequeue_entity(cfs_rq, se);
3290 se->on_rq = 0;
3291 account_entity_dequeue(cfs_rq, se);
3292
3293
3294
3295
3296
3297
3298 if (!(flags & DEQUEUE_SLEEP))
3299 se->vruntime -= cfs_rq->min_vruntime;
3300
3301
3302 return_cfs_rq_runtime(cfs_rq);
3303
3304 update_min_vruntime(cfs_rq);
3305 update_cfs_shares(cfs_rq);
3306}
3307
3308
3309
3310
3311static void
3312check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3313{
3314 unsigned long ideal_runtime, delta_exec;
3315 struct sched_entity *se;
3316 s64 delta;
3317
3318 ideal_runtime = sched_slice(cfs_rq, curr);
3319 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3320 if (delta_exec > ideal_runtime) {
3321 resched_curr(rq_of(cfs_rq));
3322
3323
3324
3325
3326 clear_buddies(cfs_rq, curr);
3327 return;
3328 }
3329
3330
3331
3332
3333
3334
3335 if (delta_exec < sysctl_sched_min_granularity)
3336 return;
3337
3338 se = __pick_first_entity(cfs_rq);
3339 delta = curr->vruntime - se->vruntime;
3340
3341 if (delta < 0)
3342 return;
3343
3344 if (delta > ideal_runtime)
3345 resched_curr(rq_of(cfs_rq));
3346}
3347
3348static void
3349set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3350{
3351
3352 if (se->on_rq) {
3353
3354
3355
3356
3357
3358 if (schedstat_enabled())
3359 update_stats_wait_end(cfs_rq, se);
3360 __dequeue_entity(cfs_rq, se);
3361 update_load_avg(se, 1);
3362 }
3363
3364 update_stats_curr_start(cfs_rq, se);
3365 cfs_rq->curr = se;
3366#ifdef CONFIG_SCHEDSTATS
3367
3368
3369
3370
3371
3372 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3373 se->statistics.slice_max = max(se->statistics.slice_max,
3374 se->sum_exec_runtime - se->prev_sum_exec_runtime);
3375 }
3376#endif
3377 se->prev_sum_exec_runtime = se->sum_exec_runtime;
3378}
3379
3380static int
3381wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3382
3383
3384
3385
3386
3387
3388
3389
3390static struct sched_entity *
3391pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3392{
3393 struct sched_entity *left = __pick_first_entity(cfs_rq);
3394 struct sched_entity *se;
3395
3396
3397
3398
3399
3400 if (!left || (curr && entity_before(curr, left)))
3401 left = curr;
3402
3403 se = left;
3404
3405
3406
3407
3408
3409 if (cfs_rq->skip == se) {
3410 struct sched_entity *second;
3411
3412 if (se == curr) {
3413 second = __pick_first_entity(cfs_rq);
3414 } else {
3415 second = __pick_next_entity(se);
3416 if (!second || (curr && entity_before(curr, second)))
3417 second = curr;
3418 }
3419
3420 if (second && wakeup_preempt_entity(second, left) < 1)
3421 se = second;
3422 }
3423
3424
3425
3426
3427 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3428 se = cfs_rq->last;
3429
3430
3431
3432
3433 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3434 se = cfs_rq->next;
3435
3436 clear_buddies(cfs_rq, se);
3437
3438 return se;
3439}
3440
3441static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3442
3443static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3444{
3445
3446
3447
3448
3449 if (prev->on_rq)
3450 update_curr(cfs_rq);
3451
3452
3453 check_cfs_rq_runtime(cfs_rq);
3454
3455 if (schedstat_enabled()) {
3456 check_spread(cfs_rq, prev);
3457 if (prev->on_rq)
3458 update_stats_wait_start(cfs_rq, prev);
3459 }
3460
3461 if (prev->on_rq) {
3462
3463 __enqueue_entity(cfs_rq, prev);
3464
3465 update_load_avg(prev, 0);
3466 }
3467 cfs_rq->curr = NULL;
3468}
3469
3470static void
3471entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3472{
3473
3474
3475
3476 update_curr(cfs_rq);
3477
3478
3479
3480
3481 update_load_avg(curr, 1);
3482 update_cfs_shares(cfs_rq);
3483
3484#ifdef CONFIG_SCHED_HRTICK
3485
3486
3487
3488
3489 if (queued) {
3490 resched_curr(rq_of(cfs_rq));
3491 return;
3492 }
3493
3494
3495
3496 if (!sched_feat(DOUBLE_TICK) &&
3497 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3498 return;
3499#endif
3500
3501 if (cfs_rq->nr_running > 1)
3502 check_preempt_tick(cfs_rq, curr);
3503}
3504
3505
3506
3507
3508
3509
3510#ifdef CONFIG_CFS_BANDWIDTH
3511
3512#ifdef HAVE_JUMP_LABEL
3513static struct static_key __cfs_bandwidth_used;
3514
3515static inline bool cfs_bandwidth_used(void)
3516{
3517 return static_key_false(&__cfs_bandwidth_used);
3518}
3519
3520void cfs_bandwidth_usage_inc(void)
3521{
3522 static_key_slow_inc(&__cfs_bandwidth_used);
3523}
3524
3525void cfs_bandwidth_usage_dec(void)
3526{
3527 static_key_slow_dec(&__cfs_bandwidth_used);
3528}
3529#else
3530static bool cfs_bandwidth_used(void)
3531{
3532 return true;
3533}
3534
3535void cfs_bandwidth_usage_inc(void) {}
3536void cfs_bandwidth_usage_dec(void) {}
3537#endif
3538
3539
3540
3541
3542
3543static inline u64 default_cfs_period(void)
3544{
3545 return 100000000ULL;
3546}
3547
3548static inline u64 sched_cfs_bandwidth_slice(void)
3549{
3550 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3551}
3552
3553
3554
3555
3556
3557
3558
3559
3560void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3561{
3562 u64 now;
3563
3564 if (cfs_b->quota == RUNTIME_INF)
3565 return;
3566
3567 now = sched_clock_cpu(smp_processor_id());
3568 cfs_b->runtime = cfs_b->quota;
3569 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3570}
3571
3572static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3573{
3574 return &tg->cfs_bandwidth;
3575}
3576
3577
3578static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3579{
3580 if (unlikely(cfs_rq->throttle_count))
3581 return cfs_rq->throttled_clock_task;
3582
3583 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3584}
3585
3586
3587static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3588{
3589 struct task_group *tg = cfs_rq->tg;
3590 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3591 u64 amount = 0, min_amount, expires;
3592
3593
3594 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3595
3596 raw_spin_lock(&cfs_b->lock);
3597 if (cfs_b->quota == RUNTIME_INF)
3598 amount = min_amount;
3599 else {
3600 start_cfs_bandwidth(cfs_b);
3601
3602 if (cfs_b->runtime > 0) {
3603 amount = min(cfs_b->runtime, min_amount);
3604 cfs_b->runtime -= amount;
3605 cfs_b->idle = 0;
3606 }
3607 }
3608 expires = cfs_b->runtime_expires;
3609 raw_spin_unlock(&cfs_b->lock);
3610
3611 cfs_rq->runtime_remaining += amount;
3612
3613
3614
3615
3616
3617 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3618 cfs_rq->runtime_expires = expires;
3619
3620 return cfs_rq->runtime_remaining > 0;
3621}
3622
3623
3624
3625
3626
3627static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3628{
3629 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3630
3631
3632 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3633 return;
3634
3635 if (cfs_rq->runtime_remaining < 0)
3636 return;
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
3650
3651 cfs_rq->runtime_expires += TICK_NSEC;
3652 } else {
3653
3654 cfs_rq->runtime_remaining = 0;
3655 }
3656}
3657
3658static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3659{
3660
3661 cfs_rq->runtime_remaining -= delta_exec;
3662 expire_cfs_rq_runtime(cfs_rq);
3663
3664 if (likely(cfs_rq->runtime_remaining > 0))
3665 return;
3666
3667
3668
3669
3670
3671 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3672 resched_curr(rq_of(cfs_rq));
3673}
3674
3675static __always_inline
3676void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3677{
3678 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3679 return;
3680
3681 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3682}
3683
3684static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3685{
3686 return cfs_bandwidth_used() && cfs_rq->throttled;
3687}
3688
3689
3690static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3691{
3692 return cfs_bandwidth_used() && cfs_rq->throttle_count;
3693}
3694
3695
3696
3697
3698
3699
3700static inline int throttled_lb_pair(struct task_group *tg,
3701 int src_cpu, int dest_cpu)
3702{
3703 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3704
3705 src_cfs_rq = tg->cfs_rq[src_cpu];
3706 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3707
3708 return throttled_hierarchy(src_cfs_rq) ||
3709 throttled_hierarchy(dest_cfs_rq);
3710}
3711
3712
3713static int tg_unthrottle_up(struct task_group *tg, void *data)
3714{
3715 struct rq *rq = data;
3716 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3717
3718 cfs_rq->throttle_count--;
3719#ifdef CONFIG_SMP
3720 if (!cfs_rq->throttle_count) {
3721
3722 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3723 cfs_rq->throttled_clock_task;
3724 }
3725#endif
3726
3727 return 0;
3728}
3729
3730static int tg_throttle_down(struct task_group *tg, void *data)
3731{
3732 struct rq *rq = data;
3733 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3734
3735
3736 if (!cfs_rq->throttle_count)
3737 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3738 cfs_rq->throttle_count++;
3739
3740 return 0;
3741}
3742
3743static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3744{
3745 struct rq *rq = rq_of(cfs_rq);
3746 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3747 struct sched_entity *se;
3748 long task_delta, dequeue = 1;
3749 bool empty;
3750
3751 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3752
3753
3754 rcu_read_lock();
3755 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3756 rcu_read_unlock();
3757
3758 task_delta = cfs_rq->h_nr_running;
3759 for_each_sched_entity(se) {
3760 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3761
3762 if (!se->on_rq)
3763 break;
3764
3765 if (dequeue)
3766 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3767 qcfs_rq->h_nr_running -= task_delta;
3768
3769 if (qcfs_rq->load.weight)
3770 dequeue = 0;
3771 }
3772
3773 if (!se)
3774 sub_nr_running(rq, task_delta);
3775
3776 cfs_rq->throttled = 1;
3777 cfs_rq->throttled_clock = rq_clock(rq);
3778 raw_spin_lock(&cfs_b->lock);
3779 empty = list_empty(&cfs_b->throttled_cfs_rq);
3780
3781
3782
3783
3784
3785 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3786
3787
3788
3789
3790
3791 if (empty)
3792 start_cfs_bandwidth(cfs_b);
3793
3794 raw_spin_unlock(&cfs_b->lock);
3795}
3796
3797void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3798{
3799 struct rq *rq = rq_of(cfs_rq);
3800 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3801 struct sched_entity *se;
3802 int enqueue = 1;
3803 long task_delta;
3804
3805 se = cfs_rq->tg->se[cpu_of(rq)];
3806
3807 cfs_rq->throttled = 0;
3808
3809 update_rq_clock(rq);
3810
3811 raw_spin_lock(&cfs_b->lock);
3812 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
3813 list_del_rcu(&cfs_rq->throttled_list);
3814 raw_spin_unlock(&cfs_b->lock);
3815
3816
3817 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3818
3819 if (!cfs_rq->load.weight)
3820 return;
3821
3822 task_delta = cfs_rq->h_nr_running;
3823 for_each_sched_entity(se) {
3824 if (se->on_rq)
3825 enqueue = 0;
3826
3827 cfs_rq = cfs_rq_of(se);
3828 if (enqueue)
3829 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3830 cfs_rq->h_nr_running += task_delta;
3831
3832 if (cfs_rq_throttled(cfs_rq))
3833 break;
3834 }
3835
3836 if (!se)
3837 add_nr_running(rq, task_delta);
3838
3839
3840 if (rq->curr == rq->idle && rq->cfs.nr_running)
3841 resched_curr(rq);
3842}
3843
3844static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3845 u64 remaining, u64 expires)
3846{
3847 struct cfs_rq *cfs_rq;
3848 u64 runtime;
3849 u64 starting_runtime = remaining;
3850
3851 rcu_read_lock();
3852 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3853 throttled_list) {
3854 struct rq *rq = rq_of(cfs_rq);
3855
3856 raw_spin_lock(&rq->lock);
3857 if (!cfs_rq_throttled(cfs_rq))
3858 goto next;
3859
3860 runtime = -cfs_rq->runtime_remaining + 1;
3861 if (runtime > remaining)
3862 runtime = remaining;
3863 remaining -= runtime;
3864
3865 cfs_rq->runtime_remaining += runtime;
3866 cfs_rq->runtime_expires = expires;
3867
3868
3869 if (cfs_rq->runtime_remaining > 0)
3870 unthrottle_cfs_rq(cfs_rq);
3871
3872next:
3873 raw_spin_unlock(&rq->lock);
3874
3875 if (!remaining)
3876 break;
3877 }
3878 rcu_read_unlock();
3879
3880 return starting_runtime - remaining;
3881}
3882
3883
3884
3885
3886
3887
3888
3889static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3890{
3891 u64 runtime, runtime_expires;
3892 int throttled;
3893
3894
3895 if (cfs_b->quota == RUNTIME_INF)
3896 goto out_deactivate;
3897
3898 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3899 cfs_b->nr_periods += overrun;
3900
3901
3902
3903
3904
3905 if (cfs_b->idle && !throttled)
3906 goto out_deactivate;
3907
3908 __refill_cfs_bandwidth_runtime(cfs_b);
3909
3910 if (!throttled) {
3911
3912 cfs_b->idle = 1;
3913 return 0;
3914 }
3915
3916
3917 cfs_b->nr_throttled += overrun;
3918
3919 runtime_expires = cfs_b->runtime_expires;
3920
3921
3922
3923
3924
3925
3926
3927
3928 while (throttled && cfs_b->runtime > 0) {
3929 runtime = cfs_b->runtime;
3930 raw_spin_unlock(&cfs_b->lock);
3931
3932 runtime = distribute_cfs_runtime(cfs_b, runtime,
3933 runtime_expires);
3934 raw_spin_lock(&cfs_b->lock);
3935
3936 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3937
3938 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3939 }
3940
3941
3942
3943
3944
3945
3946
3947 cfs_b->idle = 0;
3948
3949 return 0;
3950
3951out_deactivate:
3952 return 1;
3953}
3954
3955
3956static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3957
3958static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3959
3960static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3961
3962
3963
3964
3965
3966
3967
3968
3969static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3970{
3971 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3972 u64 remaining;
3973
3974
3975 if (hrtimer_callback_running(refresh_timer))
3976 return 1;
3977
3978
3979 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3980 if (remaining < min_expire)
3981 return 1;
3982
3983 return 0;
3984}
3985
3986static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3987{
3988 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3989
3990
3991 if (runtime_refresh_within(cfs_b, min_left))
3992 return;
3993
3994 hrtimer_start(&cfs_b->slack_timer,
3995 ns_to_ktime(cfs_bandwidth_slack_period),
3996 HRTIMER_MODE_REL);
3997}
3998
3999
4000static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4001{
4002 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4003 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4004
4005 if (slack_runtime <= 0)
4006 return;
4007
4008 raw_spin_lock(&cfs_b->lock);
4009 if (cfs_b->quota != RUNTIME_INF &&
4010 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4011 cfs_b->runtime += slack_runtime;
4012
4013
4014 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4015 !list_empty(&cfs_b->throttled_cfs_rq))
4016 start_cfs_slack_bandwidth(cfs_b);
4017 }
4018 raw_spin_unlock(&cfs_b->lock);
4019
4020
4021 cfs_rq->runtime_remaining -= slack_runtime;
4022}
4023
4024static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4025{
4026 if (!cfs_bandwidth_used())
4027 return;
4028
4029 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
4030 return;
4031
4032 __return_cfs_rq_runtime(cfs_rq);
4033}
4034
4035
4036
4037
4038
4039static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4040{
4041 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4042 u64 expires;
4043
4044
4045 raw_spin_lock(&cfs_b->lock);
4046 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4047 raw_spin_unlock(&cfs_b->lock);
4048 return;
4049 }
4050
4051 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
4052 runtime = cfs_b->runtime;
4053
4054 expires = cfs_b->runtime_expires;
4055 raw_spin_unlock(&cfs_b->lock);
4056
4057 if (!runtime)
4058 return;
4059
4060 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4061
4062 raw_spin_lock(&cfs_b->lock);
4063 if (expires == cfs_b->runtime_expires)
4064 cfs_b->runtime -= min(runtime, cfs_b->runtime);
4065 raw_spin_unlock(&cfs_b->lock);
4066}
4067
4068
4069
4070
4071
4072
4073static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4074{
4075 if (!cfs_bandwidth_used())
4076 return;
4077
4078
4079 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4080 return;
4081
4082
4083 if (cfs_rq_throttled(cfs_rq))
4084 return;
4085
4086
4087 account_cfs_rq_runtime(cfs_rq, 0);
4088 if (cfs_rq->runtime_remaining <= 0)
4089 throttle_cfs_rq(cfs_rq);
4090}
4091
4092
4093static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4094{
4095 if (!cfs_bandwidth_used())
4096 return false;
4097
4098 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
4099 return false;
4100
4101
4102
4103
4104
4105 if (cfs_rq_throttled(cfs_rq))
4106 return true;
4107
4108 throttle_cfs_rq(cfs_rq);
4109 return true;
4110}
4111
4112static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4113{
4114 struct cfs_bandwidth *cfs_b =
4115 container_of(timer, struct cfs_bandwidth, slack_timer);
4116
4117 do_sched_cfs_slack_timer(cfs_b);
4118
4119 return HRTIMER_NORESTART;
4120}
4121
4122static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4123{
4124 struct cfs_bandwidth *cfs_b =
4125 container_of(timer, struct cfs_bandwidth, period_timer);
4126 int overrun;
4127 int idle = 0;
4128
4129 raw_spin_lock(&cfs_b->lock);
4130 for (;;) {
4131 overrun = hrtimer_forward_now(timer, cfs_b->period);
4132 if (!overrun)
4133 break;
4134
4135 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4136 }
4137 if (idle)
4138 cfs_b->period_active = 0;
4139 raw_spin_unlock(&cfs_b->lock);
4140
4141 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4142}
4143
4144void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4145{
4146 raw_spin_lock_init(&cfs_b->lock);
4147 cfs_b->runtime = 0;
4148 cfs_b->quota = RUNTIME_INF;
4149 cfs_b->period = ns_to_ktime(default_cfs_period());
4150
4151 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4152 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
4153 cfs_b->period_timer.function = sched_cfs_period_timer;
4154 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4155 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4156}
4157
4158static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4159{
4160 cfs_rq->runtime_enabled = 0;
4161 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4162}
4163
4164void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4165{
4166 lockdep_assert_held(&cfs_b->lock);
4167
4168 if (!cfs_b->period_active) {
4169 cfs_b->period_active = 1;
4170 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4171 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4172 }
4173}
4174
4175static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4176{
4177
4178 if (!cfs_b->throttled_cfs_rq.next)
4179 return;
4180
4181 hrtimer_cancel(&cfs_b->period_timer);
4182 hrtimer_cancel(&cfs_b->slack_timer);
4183}
4184
4185static void __maybe_unused update_runtime_enabled(struct rq *rq)
4186{
4187 struct cfs_rq *cfs_rq;
4188
4189 for_each_leaf_cfs_rq(rq, cfs_rq) {
4190 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4191
4192 raw_spin_lock(&cfs_b->lock);
4193 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4194 raw_spin_unlock(&cfs_b->lock);
4195 }
4196}
4197
4198static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4199{
4200 struct cfs_rq *cfs_rq;
4201
4202 for_each_leaf_cfs_rq(rq, cfs_rq) {
4203 if (!cfs_rq->runtime_enabled)
4204 continue;
4205
4206
4207
4208
4209
4210 cfs_rq->runtime_remaining = 1;
4211
4212
4213
4214
4215 cfs_rq->runtime_enabled = 0;
4216
4217 if (cfs_rq_throttled(cfs_rq))
4218 unthrottle_cfs_rq(cfs_rq);
4219 }
4220}
4221
4222#else
4223static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4224{
4225 return rq_clock_task(rq_of(cfs_rq));
4226}
4227
4228static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4229static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4230static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4231static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4232
4233static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4234{
4235 return 0;
4236}
4237
4238static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4239{
4240 return 0;
4241}
4242
4243static inline int throttled_lb_pair(struct task_group *tg,
4244 int src_cpu, int dest_cpu)
4245{
4246 return 0;
4247}
4248
4249void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4250
4251#ifdef CONFIG_FAIR_GROUP_SCHED
4252static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4253#endif
4254
4255static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4256{
4257 return NULL;
4258}
4259static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4260static inline void update_runtime_enabled(struct rq *rq) {}
4261static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4262
4263#endif
4264
4265
4266
4267
4268
4269#ifdef CONFIG_SCHED_HRTICK
4270static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4271{
4272 struct sched_entity *se = &p->se;
4273 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4274
4275 WARN_ON(task_rq(p) != rq);
4276
4277 if (cfs_rq->nr_running > 1) {
4278 u64 slice = sched_slice(cfs_rq, se);
4279 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4280 s64 delta = slice - ran;
4281
4282 if (delta < 0) {
4283 if (rq->curr == p)
4284 resched_curr(rq);
4285 return;
4286 }
4287 hrtick_start(rq, delta);
4288 }
4289}
4290
4291
4292
4293
4294
4295
4296static void hrtick_update(struct rq *rq)
4297{
4298 struct task_struct *curr = rq->curr;
4299
4300 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4301 return;
4302
4303 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4304 hrtick_start_fair(rq, curr);
4305}
4306#else
4307static inline void
4308hrtick_start_fair(struct rq *rq, struct task_struct *p)
4309{
4310}
4311
4312static inline void hrtick_update(struct rq *rq)
4313{
4314}
4315#endif
4316
4317
4318
4319
4320
4321
4322static void
4323enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4324{
4325 struct cfs_rq *cfs_rq;
4326 struct sched_entity *se = &p->se;
4327
4328 for_each_sched_entity(se) {
4329 if (se->on_rq)
4330 break;
4331 cfs_rq = cfs_rq_of(se);
4332 enqueue_entity(cfs_rq, se, flags);
4333
4334
4335
4336
4337
4338
4339
4340 if (cfs_rq_throttled(cfs_rq))
4341 break;
4342 cfs_rq->h_nr_running++;
4343
4344 flags = ENQUEUE_WAKEUP;
4345 }
4346
4347 for_each_sched_entity(se) {
4348 cfs_rq = cfs_rq_of(se);
4349 cfs_rq->h_nr_running++;
4350
4351 if (cfs_rq_throttled(cfs_rq))
4352 break;
4353
4354 update_load_avg(se, 1);
4355 update_cfs_shares(cfs_rq);
4356 }
4357
4358 if (!se)
4359 add_nr_running(rq, 1);
4360
4361 hrtick_update(rq);
4362}
4363
4364static void set_next_buddy(struct sched_entity *se);
4365
4366
4367
4368
4369
4370
4371static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4372{
4373 struct cfs_rq *cfs_rq;
4374 struct sched_entity *se = &p->se;
4375 int task_sleep = flags & DEQUEUE_SLEEP;
4376
4377 for_each_sched_entity(se) {
4378 cfs_rq = cfs_rq_of(se);
4379 dequeue_entity(cfs_rq, se, flags);
4380
4381
4382
4383
4384
4385
4386
4387 if (cfs_rq_throttled(cfs_rq))
4388 break;
4389 cfs_rq->h_nr_running--;
4390
4391
4392 if (cfs_rq->load.weight) {
4393
4394
4395
4396
4397 if (task_sleep && parent_entity(se))
4398 set_next_buddy(parent_entity(se));
4399
4400
4401 se = parent_entity(se);
4402 break;
4403 }
4404 flags |= DEQUEUE_SLEEP;
4405 }
4406
4407 for_each_sched_entity(se) {
4408 cfs_rq = cfs_rq_of(se);
4409 cfs_rq->h_nr_running--;
4410
4411 if (cfs_rq_throttled(cfs_rq))
4412 break;
4413
4414 update_load_avg(se, 1);
4415 update_cfs_shares(cfs_rq);
4416 }
4417
4418 if (!se)
4419 sub_nr_running(rq, 1);
4420
4421 hrtick_update(rq);
4422}
4423
4424#ifdef CONFIG_SMP
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452#define DEGRADE_SHIFT 7
4453
4454static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4455static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4456 { 0, 0, 0, 0, 0, 0, 0, 0 },
4457 { 64, 32, 8, 0, 0, 0, 0, 0 },
4458 { 96, 72, 40, 12, 1, 0, 0, 0 },
4459 { 112, 98, 75, 43, 15, 1, 0, 0 },
4460 { 120, 112, 98, 76, 45, 16, 2, 0 }
4461};
4462
4463
4464
4465
4466
4467
4468static unsigned long
4469decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4470{
4471 int j = 0;
4472
4473 if (!missed_updates)
4474 return load;
4475
4476 if (missed_updates >= degrade_zero_ticks[idx])
4477 return 0;
4478
4479 if (idx == 1)
4480 return load >> missed_updates;
4481
4482 while (missed_updates) {
4483 if (missed_updates % 2)
4484 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4485
4486 missed_updates >>= 1;
4487 j++;
4488 }
4489 return load;
4490}
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
4529 unsigned long pending_updates, int active)
4530{
4531 unsigned long tickless_load = active ? this_rq->cpu_load[0] : 0;
4532 int i, scale;
4533
4534 this_rq->nr_load_updates++;
4535
4536
4537 this_rq->cpu_load[0] = this_load;
4538 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
4539 unsigned long old_load, new_load;
4540
4541
4542
4543 old_load = this_rq->cpu_load[i];
4544 old_load = decay_load_missed(old_load, pending_updates - 1, i);
4545 if (tickless_load) {
4546 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
4547
4548
4549
4550
4551
4552 old_load += tickless_load;
4553 }
4554 new_load = this_load;
4555
4556
4557
4558
4559
4560 if (new_load > old_load)
4561 new_load += scale - 1;
4562
4563 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
4564 }
4565
4566 sched_avg_update(this_rq);
4567}
4568
4569
4570static unsigned long weighted_cpuload(const int cpu)
4571{
4572 return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
4573}
4574
4575#ifdef CONFIG_NO_HZ_COMMON
4576static void __update_cpu_load_nohz(struct rq *this_rq,
4577 unsigned long curr_jiffies,
4578 unsigned long load,
4579 int active)
4580{
4581 unsigned long pending_updates;
4582
4583 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4584 if (pending_updates) {
4585 this_rq->last_load_update_tick = curr_jiffies;
4586
4587
4588
4589
4590
4591 __update_cpu_load(this_rq, load, pending_updates, active);
4592 }
4593}
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612static void update_cpu_load_idle(struct rq *this_rq)
4613{
4614
4615
4616
4617 if (weighted_cpuload(cpu_of(this_rq)))
4618 return;
4619
4620 __update_cpu_load_nohz(this_rq, READ_ONCE(jiffies), 0, 0);
4621}
4622
4623
4624
4625
4626void update_cpu_load_nohz(int active)
4627{
4628 struct rq *this_rq = this_rq();
4629 unsigned long curr_jiffies = READ_ONCE(jiffies);
4630 unsigned long load = active ? weighted_cpuload(cpu_of(this_rq)) : 0;
4631
4632 if (curr_jiffies == this_rq->last_load_update_tick)
4633 return;
4634
4635 raw_spin_lock(&this_rq->lock);
4636 __update_cpu_load_nohz(this_rq, curr_jiffies, load, active);
4637 raw_spin_unlock(&this_rq->lock);
4638}
4639#endif
4640
4641
4642
4643
4644void update_cpu_load_active(struct rq *this_rq)
4645{
4646 unsigned long load = weighted_cpuload(cpu_of(this_rq));
4647
4648
4649
4650 this_rq->last_load_update_tick = jiffies;
4651 __update_cpu_load(this_rq, load, 1, 1);
4652}
4653
4654
4655
4656
4657
4658
4659
4660
4661static unsigned long source_load(int cpu, int type)
4662{
4663 struct rq *rq = cpu_rq(cpu);
4664 unsigned long total = weighted_cpuload(cpu);
4665
4666 if (type == 0 || !sched_feat(LB_BIAS))
4667 return total;
4668
4669 return min(rq->cpu_load[type-1], total);
4670}
4671
4672
4673
4674
4675
4676static unsigned long target_load(int cpu, int type)
4677{
4678 struct rq *rq = cpu_rq(cpu);
4679 unsigned long total = weighted_cpuload(cpu);
4680
4681 if (type == 0 || !sched_feat(LB_BIAS))
4682 return total;
4683
4684 return max(rq->cpu_load[type-1], total);
4685}
4686
4687static unsigned long capacity_of(int cpu)
4688{
4689 return cpu_rq(cpu)->cpu_capacity;
4690}
4691
4692static unsigned long capacity_orig_of(int cpu)
4693{
4694 return cpu_rq(cpu)->cpu_capacity_orig;
4695}
4696
4697static unsigned long cpu_avg_load_per_task(int cpu)
4698{
4699 struct rq *rq = cpu_rq(cpu);
4700 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
4701 unsigned long load_avg = weighted_cpuload(cpu);
4702
4703 if (nr_running)
4704 return load_avg / nr_running;
4705
4706 return 0;
4707}
4708
4709static void record_wakee(struct task_struct *p)
4710{
4711
4712
4713
4714
4715
4716 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
4717 current->wakee_flips >>= 1;
4718 current->wakee_flip_decay_ts = jiffies;
4719 }
4720
4721 if (current->last_wakee != p) {
4722 current->last_wakee = p;
4723 current->wakee_flips++;
4724 }
4725}
4726
4727static void task_waking_fair(struct task_struct *p)
4728{
4729 struct sched_entity *se = &p->se;
4730 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4731 u64 min_vruntime;
4732
4733#ifndef CONFIG_64BIT
4734 u64 min_vruntime_copy;
4735
4736 do {
4737 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4738 smp_rmb();
4739 min_vruntime = cfs_rq->min_vruntime;
4740 } while (min_vruntime != min_vruntime_copy);
4741#else
4742 min_vruntime = cfs_rq->min_vruntime;
4743#endif
4744
4745 se->vruntime -= min_vruntime;
4746 record_wakee(p);
4747}
4748
4749#ifdef CONFIG_FAIR_GROUP_SCHED
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4801{
4802 struct sched_entity *se = tg->se[cpu];
4803
4804 if (!tg->parent)
4805 return wl;
4806
4807 for_each_sched_entity(se) {
4808 long w, W;
4809
4810 tg = se->my_q->tg;
4811
4812
4813
4814
4815 W = wg + calc_tg_weight(tg, se->my_q);
4816
4817
4818
4819
4820 w = cfs_rq_load_avg(se->my_q) + wl;
4821
4822
4823
4824
4825 if (W > 0 && w < W)
4826 wl = (w * (long)tg->shares) / W;
4827 else
4828 wl = tg->shares;
4829
4830
4831
4832
4833
4834
4835 if (wl < MIN_SHARES)
4836 wl = MIN_SHARES;
4837
4838
4839
4840
4841 wl -= se->avg.load_avg;
4842
4843
4844
4845
4846
4847
4848
4849
4850 wg = 0;
4851 }
4852
4853 return wl;
4854}
4855#else
4856
4857static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4858{
4859 return wl;
4860}
4861
4862#endif
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876static int wake_wide(struct task_struct *p)
4877{
4878 unsigned int master = current->wakee_flips;
4879 unsigned int slave = p->wakee_flips;
4880 int factor = this_cpu_read(sd_llc_size);
4881
4882 if (master < slave)
4883 swap(master, slave);
4884 if (slave < factor || master < slave * factor)
4885 return 0;
4886 return 1;
4887}
4888
4889static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4890{
4891 s64 this_load, load;
4892 s64 this_eff_load, prev_eff_load;
4893 int idx, this_cpu, prev_cpu;
4894 struct task_group *tg;
4895 unsigned long weight;
4896 int balanced;
4897
4898 idx = sd->wake_idx;
4899 this_cpu = smp_processor_id();
4900 prev_cpu = task_cpu(p);
4901 load = source_load(prev_cpu, idx);
4902 this_load = target_load(this_cpu, idx);
4903
4904
4905
4906
4907
4908
4909 if (sync) {
4910 tg = task_group(current);
4911 weight = current->se.avg.load_avg;
4912
4913 this_load += effective_load(tg, this_cpu, -weight, -weight);
4914 load += effective_load(tg, prev_cpu, 0, -weight);
4915 }
4916
4917 tg = task_group(p);
4918 weight = p->se.avg.load_avg;
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929 this_eff_load = 100;
4930 this_eff_load *= capacity_of(prev_cpu);
4931
4932 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4933 prev_eff_load *= capacity_of(this_cpu);
4934
4935 if (this_load > 0) {
4936 this_eff_load *= this_load +
4937 effective_load(tg, this_cpu, weight, weight);
4938
4939 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4940 }
4941
4942 balanced = this_eff_load <= prev_eff_load;
4943
4944 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4945
4946 if (!balanced)
4947 return 0;
4948
4949 schedstat_inc(sd, ttwu_move_affine);
4950 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4951
4952 return 1;
4953}
4954
4955
4956
4957
4958
4959static struct sched_group *
4960find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4961 int this_cpu, int sd_flag)
4962{
4963 struct sched_group *idlest = NULL, *group = sd->groups;
4964 unsigned long min_load = ULONG_MAX, this_load = 0;
4965 int load_idx = sd->forkexec_idx;
4966 int imbalance = 100 + (sd->imbalance_pct-100)/2;
4967
4968 if (sd_flag & SD_BALANCE_WAKE)
4969 load_idx = sd->wake_idx;
4970
4971 do {
4972 unsigned long load, avg_load;
4973 int local_group;
4974 int i;
4975
4976
4977 if (!cpumask_intersects(sched_group_cpus(group),
4978 tsk_cpus_allowed(p)))
4979 continue;
4980
4981 local_group = cpumask_test_cpu(this_cpu,
4982 sched_group_cpus(group));
4983
4984
4985 avg_load = 0;
4986
4987 for_each_cpu(i, sched_group_cpus(group)) {
4988
4989 if (local_group)
4990 load = source_load(i, load_idx);
4991 else
4992 load = target_load(i, load_idx);
4993
4994 avg_load += load;
4995 }
4996
4997
4998 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
4999
5000 if (local_group) {
5001 this_load = avg_load;
5002 } else if (avg_load < min_load) {
5003 min_load = avg_load;
5004 idlest = group;
5005 }
5006 } while (group = group->next, group != sd->groups);
5007
5008 if (!idlest || 100*this_load < imbalance*min_load)
5009 return NULL;
5010 return idlest;
5011}
5012
5013
5014
5015
5016static int
5017find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5018{
5019 unsigned long load, min_load = ULONG_MAX;
5020 unsigned int min_exit_latency = UINT_MAX;
5021 u64 latest_idle_timestamp = 0;
5022 int least_loaded_cpu = this_cpu;
5023 int shallowest_idle_cpu = -1;
5024 int i;
5025
5026
5027 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
5028 if (idle_cpu(i)) {
5029 struct rq *rq = cpu_rq(i);
5030 struct cpuidle_state *idle = idle_get_state(rq);
5031 if (idle && idle->exit_latency < min_exit_latency) {
5032
5033
5034
5035
5036
5037 min_exit_latency = idle->exit_latency;
5038 latest_idle_timestamp = rq->idle_stamp;
5039 shallowest_idle_cpu = i;
5040 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
5041 rq->idle_stamp > latest_idle_timestamp) {
5042
5043
5044
5045
5046
5047 latest_idle_timestamp = rq->idle_stamp;
5048 shallowest_idle_cpu = i;
5049 }
5050 } else if (shallowest_idle_cpu == -1) {
5051 load = weighted_cpuload(i);
5052 if (load < min_load || (load == min_load && i == this_cpu)) {
5053 min_load = load;
5054 least_loaded_cpu = i;
5055 }
5056 }
5057 }
5058
5059 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
5060}
5061
5062
5063
5064
5065static int select_idle_sibling(struct task_struct *p, int target)
5066{
5067 struct sched_domain *sd;
5068 struct sched_group *sg;
5069 int i = task_cpu(p);
5070
5071 if (idle_cpu(target))
5072 return target;
5073
5074
5075
5076
5077 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
5078 return i;
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095 sd = rcu_dereference(per_cpu(sd_llc, target));
5096 for_each_lower_domain(sd) {
5097 sg = sd->groups;
5098 do {
5099 if (!cpumask_intersects(sched_group_cpus(sg),
5100 tsk_cpus_allowed(p)))
5101 goto next;
5102
5103
5104 for_each_cpu(i, sched_group_cpus(sg)) {
5105 if (i == target || !idle_cpu(i))
5106 goto next;
5107 }
5108
5109
5110
5111
5112
5113 target = cpumask_first_and(sched_group_cpus(sg),
5114 tsk_cpus_allowed(p));
5115 goto done;
5116next:
5117 sg = sg->next;
5118 } while (sg != sd->groups);
5119 }
5120done:
5121 return target;
5122}
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150static int cpu_util(int cpu)
5151{
5152 unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
5153 unsigned long capacity = capacity_orig_of(cpu);
5154
5155 return (util >= capacity) ? capacity : util;
5156}
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170static int
5171select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
5172{
5173 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
5174 int cpu = smp_processor_id();
5175 int new_cpu = prev_cpu;
5176 int want_affine = 0;
5177 int sync = wake_flags & WF_SYNC;
5178
5179 if (sd_flag & SD_BALANCE_WAKE)
5180 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
5181
5182 rcu_read_lock();
5183 for_each_domain(cpu, tmp) {
5184 if (!(tmp->flags & SD_LOAD_BALANCE))
5185 break;
5186
5187
5188
5189
5190
5191 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
5192 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
5193 affine_sd = tmp;
5194 break;
5195 }
5196
5197 if (tmp->flags & sd_flag)
5198 sd = tmp;
5199 else if (!want_affine)
5200 break;
5201 }
5202
5203 if (affine_sd) {
5204 sd = NULL;
5205 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
5206 new_cpu = cpu;
5207 }
5208
5209 if (!sd) {
5210 if (sd_flag & SD_BALANCE_WAKE)
5211 new_cpu = select_idle_sibling(p, new_cpu);
5212
5213 } else while (sd) {
5214 struct sched_group *group;
5215 int weight;
5216
5217 if (!(sd->flags & sd_flag)) {
5218 sd = sd->child;
5219 continue;
5220 }
5221
5222 group = find_idlest_group(sd, p, cpu, sd_flag);
5223 if (!group) {
5224 sd = sd->child;
5225 continue;
5226 }
5227
5228 new_cpu = find_idlest_cpu(group, p, cpu);
5229 if (new_cpu == -1 || new_cpu == cpu) {
5230
5231 sd = sd->child;
5232 continue;
5233 }
5234
5235
5236 cpu = new_cpu;
5237 weight = sd->span_weight;
5238 sd = NULL;
5239 for_each_domain(cpu, tmp) {
5240 if (weight <= tmp->span_weight)
5241 break;
5242 if (tmp->flags & sd_flag)
5243 sd = tmp;
5244 }
5245
5246 }
5247 rcu_read_unlock();
5248
5249 return new_cpu;
5250}
5251
5252
5253
5254
5255
5256
5257static void migrate_task_rq_fair(struct task_struct *p)
5258{
5259
5260
5261
5262
5263
5264
5265
5266 remove_entity_load_avg(&p->se);
5267
5268
5269 p->se.avg.last_update_time = 0;
5270
5271
5272 p->se.exec_start = 0;
5273}
5274
5275static void task_dead_fair(struct task_struct *p)
5276{
5277 remove_entity_load_avg(&p->se);
5278}
5279#endif
5280
5281static unsigned long
5282wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
5283{
5284 unsigned long gran = sysctl_sched_wakeup_granularity;
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299 return calc_delta_fair(gran, se);
5300}
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316static int
5317wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
5318{
5319 s64 gran, vdiff = curr->vruntime - se->vruntime;
5320
5321 if (vdiff <= 0)
5322 return -1;
5323
5324 gran = wakeup_gran(curr, se);
5325 if (vdiff > gran)
5326 return 1;
5327
5328 return 0;
5329}
5330
5331static void set_last_buddy(struct sched_entity *se)
5332{
5333 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5334 return;
5335
5336 for_each_sched_entity(se)
5337 cfs_rq_of(se)->last = se;
5338}
5339
5340static void set_next_buddy(struct sched_entity *se)
5341{
5342 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5343 return;
5344
5345 for_each_sched_entity(se)
5346 cfs_rq_of(se)->next = se;
5347}
5348
5349static void set_skip_buddy(struct sched_entity *se)
5350{
5351 for_each_sched_entity(se)
5352 cfs_rq_of(se)->skip = se;
5353}
5354
5355
5356
5357
5358static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
5359{
5360 struct task_struct *curr = rq->curr;
5361 struct sched_entity *se = &curr->se, *pse = &p->se;
5362 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5363 int scale = cfs_rq->nr_running >= sched_nr_latency;
5364 int next_buddy_marked = 0;
5365
5366 if (unlikely(se == pse))
5367 return;
5368
5369
5370
5371
5372
5373
5374
5375 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
5376 return;
5377
5378 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
5379 set_next_buddy(pse);
5380 next_buddy_marked = 1;
5381 }
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393 if (test_tsk_need_resched(curr))
5394 return;
5395
5396
5397 if (unlikely(curr->policy == SCHED_IDLE) &&
5398 likely(p->policy != SCHED_IDLE))
5399 goto preempt;
5400
5401
5402
5403
5404
5405 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
5406 return;
5407
5408 find_matching_se(&se, &pse);
5409 update_curr(cfs_rq_of(se));
5410 BUG_ON(!pse);
5411 if (wakeup_preempt_entity(se, pse) == 1) {
5412
5413
5414
5415
5416 if (!next_buddy_marked)
5417 set_next_buddy(pse);
5418 goto preempt;
5419 }
5420
5421 return;
5422
5423preempt:
5424 resched_curr(rq);
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434 if (unlikely(!se->on_rq || curr == rq->idle))
5435 return;
5436
5437 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
5438 set_last_buddy(se);
5439}
5440
5441static struct task_struct *
5442pick_next_task_fair(struct rq *rq, struct task_struct *prev)
5443{
5444 struct cfs_rq *cfs_rq = &rq->cfs;
5445 struct sched_entity *se;
5446 struct task_struct *p;
5447 int new_tasks;
5448
5449again:
5450#ifdef CONFIG_FAIR_GROUP_SCHED
5451 if (!cfs_rq->nr_running)
5452 goto idle;
5453
5454 if (prev->sched_class != &fair_sched_class)
5455 goto simple;
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465 do {
5466 struct sched_entity *curr = cfs_rq->curr;
5467
5468
5469
5470
5471
5472
5473
5474 if (curr) {
5475 if (curr->on_rq)
5476 update_curr(cfs_rq);
5477 else
5478 curr = NULL;
5479
5480
5481
5482
5483
5484
5485
5486 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5487 goto simple;
5488 }
5489
5490 se = pick_next_entity(cfs_rq, curr);
5491 cfs_rq = group_cfs_rq(se);
5492 } while (cfs_rq);
5493
5494 p = task_of(se);
5495
5496
5497
5498
5499
5500
5501 if (prev != p) {
5502 struct sched_entity *pse = &prev->se;
5503
5504 while (!(cfs_rq = is_same_group(se, pse))) {
5505 int se_depth = se->depth;
5506 int pse_depth = pse->depth;
5507
5508 if (se_depth <= pse_depth) {
5509 put_prev_entity(cfs_rq_of(pse), pse);
5510 pse = parent_entity(pse);
5511 }
5512 if (se_depth >= pse_depth) {
5513 set_next_entity(cfs_rq_of(se), se);
5514 se = parent_entity(se);
5515 }
5516 }
5517
5518 put_prev_entity(cfs_rq, pse);
5519 set_next_entity(cfs_rq, se);
5520 }
5521
5522 if (hrtick_enabled(rq))
5523 hrtick_start_fair(rq, p);
5524
5525 return p;
5526simple:
5527 cfs_rq = &rq->cfs;
5528#endif
5529
5530 if (!cfs_rq->nr_running)
5531 goto idle;
5532
5533 put_prev_task(rq, prev);
5534
5535 do {
5536 se = pick_next_entity(cfs_rq, NULL);
5537 set_next_entity(cfs_rq, se);
5538 cfs_rq = group_cfs_rq(se);
5539 } while (cfs_rq);
5540
5541 p = task_of(se);
5542
5543 if (hrtick_enabled(rq))
5544 hrtick_start_fair(rq, p);
5545
5546 return p;
5547
5548idle:
5549
5550
5551
5552
5553
5554
5555 lockdep_unpin_lock(&rq->lock);
5556 new_tasks = idle_balance(rq);
5557 lockdep_pin_lock(&rq->lock);
5558
5559
5560
5561
5562
5563 if (new_tasks < 0)
5564 return RETRY_TASK;
5565
5566 if (new_tasks > 0)
5567 goto again;
5568
5569 return NULL;
5570}
5571
5572
5573
5574
5575static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
5576{
5577 struct sched_entity *se = &prev->se;
5578 struct cfs_rq *cfs_rq;
5579
5580 for_each_sched_entity(se) {
5581 cfs_rq = cfs_rq_of(se);
5582 put_prev_entity(cfs_rq, se);
5583 }
5584}
5585
5586
5587
5588
5589
5590
5591static void yield_task_fair(struct rq *rq)
5592{
5593 struct task_struct *curr = rq->curr;
5594 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5595 struct sched_entity *se = &curr->se;
5596
5597
5598
5599
5600 if (unlikely(rq->nr_running == 1))
5601 return;
5602
5603 clear_buddies(cfs_rq, se);
5604
5605 if (curr->policy != SCHED_BATCH) {
5606 update_rq_clock(rq);
5607
5608
5609
5610 update_curr(cfs_rq);
5611
5612
5613
5614
5615
5616 rq_clock_skip_update(rq, true);
5617 }
5618
5619 set_skip_buddy(se);
5620}
5621
5622static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
5623{
5624 struct sched_entity *se = &p->se;
5625
5626
5627 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
5628 return false;
5629
5630
5631 set_next_buddy(se);
5632
5633 yield_task_fair(rq);
5634
5635 return true;
5636}
5637
5638#ifdef CONFIG_SMP
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757static unsigned long __read_mostly max_load_balance_interval = HZ/10;
5758
5759enum fbq_type { regular, remote, all };
5760
5761#define LBF_ALL_PINNED 0x01
5762#define LBF_NEED_BREAK 0x02
5763#define LBF_DST_PINNED 0x04
5764#define LBF_SOME_PINNED 0x08
5765
5766struct lb_env {
5767 struct sched_domain *sd;
5768
5769 struct rq *src_rq;
5770 int src_cpu;
5771
5772 int dst_cpu;
5773 struct rq *dst_rq;
5774
5775 struct cpumask *dst_grpmask;
5776 int new_dst_cpu;
5777 enum cpu_idle_type idle;
5778 long imbalance;
5779
5780 struct cpumask *cpus;
5781
5782 unsigned int flags;
5783
5784 unsigned int loop;
5785 unsigned int loop_break;
5786 unsigned int loop_max;
5787
5788 enum fbq_type fbq_type;
5789 struct list_head tasks;
5790};
5791
5792
5793
5794
5795static int task_hot(struct task_struct *p, struct lb_env *env)
5796{
5797 s64 delta;
5798
5799 lockdep_assert_held(&env->src_rq->lock);
5800
5801 if (p->sched_class != &fair_sched_class)
5802 return 0;
5803
5804 if (unlikely(p->policy == SCHED_IDLE))
5805 return 0;
5806
5807
5808
5809
5810 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
5811 (&p->se == cfs_rq_of(&p->se)->next ||
5812 &p->se == cfs_rq_of(&p->se)->last))
5813 return 1;
5814
5815 if (sysctl_sched_migration_cost == -1)
5816 return 1;
5817 if (sysctl_sched_migration_cost == 0)
5818 return 0;
5819
5820 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
5821
5822 return delta < (s64)sysctl_sched_migration_cost;
5823}
5824
5825#ifdef CONFIG_NUMA_BALANCING
5826
5827
5828
5829
5830
5831static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5832{
5833 struct numa_group *numa_group = rcu_dereference(p->numa_group);
5834 unsigned long src_faults, dst_faults;
5835 int src_nid, dst_nid;
5836
5837 if (!static_branch_likely(&sched_numa_balancing))
5838 return -1;
5839
5840 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
5841 return -1;
5842
5843 src_nid = cpu_to_node(env->src_cpu);
5844 dst_nid = cpu_to_node(env->dst_cpu);
5845
5846 if (src_nid == dst_nid)
5847 return -1;
5848
5849
5850 if (src_nid == p->numa_preferred_nid) {
5851 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
5852 return 1;
5853 else
5854 return -1;
5855 }
5856
5857
5858 if (dst_nid == p->numa_preferred_nid)
5859 return 0;
5860
5861 if (numa_group) {
5862 src_faults = group_faults(p, src_nid);
5863 dst_faults = group_faults(p, dst_nid);
5864 } else {
5865 src_faults = task_faults(p, src_nid);
5866 dst_faults = task_faults(p, dst_nid);
5867 }
5868
5869 return dst_faults < src_faults;
5870}
5871
5872#else
5873static inline int migrate_degrades_locality(struct task_struct *p,
5874 struct lb_env *env)
5875{
5876 return -1;
5877}
5878#endif
5879
5880
5881
5882
5883static
5884int can_migrate_task(struct task_struct *p, struct lb_env *env)
5885{
5886 int tsk_cache_hot;
5887
5888 lockdep_assert_held(&env->src_rq->lock);
5889
5890
5891
5892
5893
5894
5895
5896
5897 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5898 return 0;
5899
5900 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
5901 int cpu;
5902
5903 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
5904
5905 env->flags |= LBF_SOME_PINNED;
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
5916 return 0;
5917
5918
5919 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5920 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
5921 env->flags |= LBF_DST_PINNED;
5922 env->new_dst_cpu = cpu;
5923 break;
5924 }
5925 }
5926
5927 return 0;
5928 }
5929
5930
5931 env->flags &= ~LBF_ALL_PINNED;
5932
5933 if (task_running(env->src_rq, p)) {
5934 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
5935 return 0;
5936 }
5937
5938
5939
5940
5941
5942
5943
5944 tsk_cache_hot = migrate_degrades_locality(p, env);
5945 if (tsk_cache_hot == -1)
5946 tsk_cache_hot = task_hot(p, env);
5947
5948 if (tsk_cache_hot <= 0 ||
5949 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
5950 if (tsk_cache_hot == 1) {
5951 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5952 schedstat_inc(p, se.statistics.nr_forced_migrations);
5953 }
5954 return 1;
5955 }
5956
5957 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5958 return 0;
5959}
5960
5961
5962
5963
5964static void detach_task(struct task_struct *p, struct lb_env *env)
5965{
5966 lockdep_assert_held(&env->src_rq->lock);
5967
5968 p->on_rq = TASK_ON_RQ_MIGRATING;
5969 deactivate_task(env->src_rq, p, 0);
5970 set_task_cpu(p, env->dst_cpu);
5971}
5972
5973
5974
5975
5976
5977
5978
5979static struct task_struct *detach_one_task(struct lb_env *env)
5980{
5981 struct task_struct *p, *n;
5982
5983 lockdep_assert_held(&env->src_rq->lock);
5984
5985 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
5986 if (!can_migrate_task(p, env))
5987 continue;
5988
5989 detach_task(p, env);
5990
5991
5992
5993
5994
5995
5996
5997 schedstat_inc(env->sd, lb_gained[env->idle]);
5998 return p;
5999 }
6000 return NULL;
6001}
6002
6003static const unsigned int sched_nr_migrate_break = 32;
6004
6005
6006
6007
6008
6009
6010
6011static int detach_tasks(struct lb_env *env)
6012{
6013 struct list_head *tasks = &env->src_rq->cfs_tasks;
6014 struct task_struct *p;
6015 unsigned long load;
6016 int detached = 0;
6017
6018 lockdep_assert_held(&env->src_rq->lock);
6019
6020 if (env->imbalance <= 0)
6021 return 0;
6022
6023 while (!list_empty(tasks)) {
6024
6025
6026
6027
6028 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
6029 break;
6030
6031 p = list_first_entry(tasks, struct task_struct, se.group_node);
6032
6033 env->loop++;
6034
6035 if (env->loop > env->loop_max)
6036 break;
6037
6038
6039 if (env->loop > env->loop_break) {
6040 env->loop_break += sched_nr_migrate_break;
6041 env->flags |= LBF_NEED_BREAK;
6042 break;
6043 }
6044
6045 if (!can_migrate_task(p, env))
6046 goto next;
6047
6048 load = task_h_load(p);
6049
6050 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
6051 goto next;
6052
6053 if ((load / 2) > env->imbalance)
6054 goto next;
6055
6056 detach_task(p, env);
6057 list_add(&p->se.group_node, &env->tasks);
6058
6059 detached++;
6060 env->imbalance -= load;
6061
6062#ifdef CONFIG_PREEMPT
6063
6064
6065
6066
6067
6068 if (env->idle == CPU_NEWLY_IDLE)
6069 break;
6070#endif
6071
6072
6073
6074
6075
6076 if (env->imbalance <= 0)
6077 break;
6078
6079 continue;
6080next:
6081 list_move_tail(&p->se.group_node, tasks);
6082 }
6083
6084
6085
6086
6087
6088
6089 schedstat_add(env->sd, lb_gained[env->idle], detached);
6090
6091 return detached;
6092}
6093
6094
6095
6096
6097static void attach_task(struct rq *rq, struct task_struct *p)
6098{
6099 lockdep_assert_held(&rq->lock);
6100
6101 BUG_ON(task_rq(p) != rq);
6102 activate_task(rq, p, 0);
6103 p->on_rq = TASK_ON_RQ_QUEUED;
6104 check_preempt_curr(rq, p, 0);
6105}
6106
6107
6108
6109
6110
6111static void attach_one_task(struct rq *rq, struct task_struct *p)
6112{
6113 raw_spin_lock(&rq->lock);
6114 attach_task(rq, p);
6115 raw_spin_unlock(&rq->lock);
6116}
6117
6118
6119
6120
6121
6122static void attach_tasks(struct lb_env *env)
6123{
6124 struct list_head *tasks = &env->tasks;
6125 struct task_struct *p;
6126
6127 raw_spin_lock(&env->dst_rq->lock);
6128
6129 while (!list_empty(tasks)) {
6130 p = list_first_entry(tasks, struct task_struct, se.group_node);
6131 list_del_init(&p->se.group_node);
6132
6133 attach_task(env->dst_rq, p);
6134 }
6135
6136 raw_spin_unlock(&env->dst_rq->lock);
6137}
6138
6139#ifdef CONFIG_FAIR_GROUP_SCHED
6140static void update_blocked_averages(int cpu)
6141{
6142 struct rq *rq = cpu_rq(cpu);
6143 struct cfs_rq *cfs_rq;
6144 unsigned long flags;
6145
6146 raw_spin_lock_irqsave(&rq->lock, flags);
6147 update_rq_clock(rq);
6148
6149
6150
6151
6152
6153 for_each_leaf_cfs_rq(rq, cfs_rq) {
6154
6155 if (throttled_hierarchy(cfs_rq))
6156 continue;
6157
6158 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
6159 update_tg_load_avg(cfs_rq, 0);
6160 }
6161 raw_spin_unlock_irqrestore(&rq->lock, flags);
6162}
6163
6164
6165
6166
6167
6168
6169static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
6170{
6171 struct rq *rq = rq_of(cfs_rq);
6172 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
6173 unsigned long now = jiffies;
6174 unsigned long load;
6175
6176 if (cfs_rq->last_h_load_update == now)
6177 return;
6178
6179 cfs_rq->h_load_next = NULL;
6180 for_each_sched_entity(se) {
6181 cfs_rq = cfs_rq_of(se);
6182 cfs_rq->h_load_next = se;
6183 if (cfs_rq->last_h_load_update == now)
6184 break;
6185 }
6186
6187 if (!se) {
6188 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
6189 cfs_rq->last_h_load_update = now;
6190 }
6191
6192 while ((se = cfs_rq->h_load_next) != NULL) {
6193 load = cfs_rq->h_load;
6194 load = div64_ul(load * se->avg.load_avg,
6195 cfs_rq_load_avg(cfs_rq) + 1);
6196 cfs_rq = group_cfs_rq(se);
6197 cfs_rq->h_load = load;
6198 cfs_rq->last_h_load_update = now;
6199 }
6200}
6201
6202static unsigned long task_h_load(struct task_struct *p)
6203{
6204 struct cfs_rq *cfs_rq = task_cfs_rq(p);
6205
6206 update_cfs_rq_h_load(cfs_rq);
6207 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
6208 cfs_rq_load_avg(cfs_rq) + 1);
6209}
6210#else
6211static inline void update_blocked_averages(int cpu)
6212{
6213 struct rq *rq = cpu_rq(cpu);
6214 struct cfs_rq *cfs_rq = &rq->cfs;
6215 unsigned long flags;
6216
6217 raw_spin_lock_irqsave(&rq->lock, flags);
6218 update_rq_clock(rq);
6219 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
6220 raw_spin_unlock_irqrestore(&rq->lock, flags);
6221}
6222
6223static unsigned long task_h_load(struct task_struct *p)
6224{
6225 return p->se.avg.load_avg;
6226}
6227#endif
6228
6229
6230
6231enum group_type {
6232 group_other = 0,
6233 group_imbalanced,
6234 group_overloaded,
6235};
6236
6237
6238
6239
6240struct sg_lb_stats {
6241 unsigned long avg_load;
6242 unsigned long group_load;
6243 unsigned long sum_weighted_load;
6244 unsigned long load_per_task;
6245 unsigned long group_capacity;
6246 unsigned long group_util;
6247 unsigned int sum_nr_running;
6248 unsigned int idle_cpus;
6249 unsigned int group_weight;
6250 enum group_type group_type;
6251 int group_no_capacity;
6252#ifdef CONFIG_NUMA_BALANCING
6253 unsigned int nr_numa_running;
6254 unsigned int nr_preferred_running;
6255#endif
6256};
6257
6258
6259
6260
6261
6262struct sd_lb_stats {
6263 struct sched_group *busiest;
6264 struct sched_group *local;
6265 unsigned long total_load;
6266 unsigned long total_capacity;
6267 unsigned long avg_load;
6268
6269 struct sg_lb_stats busiest_stat;
6270 struct sg_lb_stats local_stat;
6271};
6272
6273static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
6274{
6275
6276
6277
6278
6279
6280
6281 *sds = (struct sd_lb_stats){
6282 .busiest = NULL,
6283 .local = NULL,
6284 .total_load = 0UL,
6285 .total_capacity = 0UL,
6286 .busiest_stat = {
6287 .avg_load = 0UL,
6288 .sum_nr_running = 0,
6289 .group_type = group_other,
6290 },
6291 };
6292}
6293
6294
6295
6296
6297
6298
6299
6300
6301static inline int get_sd_load_idx(struct sched_domain *sd,
6302 enum cpu_idle_type idle)
6303{
6304 int load_idx;
6305
6306 switch (idle) {
6307 case CPU_NOT_IDLE:
6308 load_idx = sd->busy_idx;
6309 break;
6310
6311 case CPU_NEWLY_IDLE:
6312 load_idx = sd->newidle_idx;
6313 break;
6314 default:
6315 load_idx = sd->idle_idx;
6316 break;
6317 }
6318
6319 return load_idx;
6320}
6321
6322static unsigned long scale_rt_capacity(int cpu)
6323{
6324 struct rq *rq = cpu_rq(cpu);
6325 u64 total, used, age_stamp, avg;
6326 s64 delta;
6327
6328
6329
6330
6331
6332 age_stamp = READ_ONCE(rq->age_stamp);
6333 avg = READ_ONCE(rq->rt_avg);
6334 delta = __rq_clock_broken(rq) - age_stamp;
6335
6336 if (unlikely(delta < 0))
6337 delta = 0;
6338
6339 total = sched_avg_period() + delta;
6340
6341 used = div_u64(avg, total);
6342
6343 if (likely(used < SCHED_CAPACITY_SCALE))
6344 return SCHED_CAPACITY_SCALE - used;
6345
6346 return 1;
6347}
6348
6349static void update_cpu_capacity(struct sched_domain *sd, int cpu)
6350{
6351 unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
6352 struct sched_group *sdg = sd->groups;
6353
6354 cpu_rq(cpu)->cpu_capacity_orig = capacity;
6355
6356 capacity *= scale_rt_capacity(cpu);
6357 capacity >>= SCHED_CAPACITY_SHIFT;
6358
6359 if (!capacity)
6360 capacity = 1;
6361
6362 cpu_rq(cpu)->cpu_capacity = capacity;
6363 sdg->sgc->capacity = capacity;
6364}
6365
6366void update_group_capacity(struct sched_domain *sd, int cpu)
6367{
6368 struct sched_domain *child = sd->child;
6369 struct sched_group *group, *sdg = sd->groups;
6370 unsigned long capacity;
6371 unsigned long interval;
6372
6373 interval = msecs_to_jiffies(sd->balance_interval);
6374 interval = clamp(interval, 1UL, max_load_balance_interval);
6375 sdg->sgc->next_update = jiffies + interval;
6376
6377 if (!child) {
6378 update_cpu_capacity(sd, cpu);
6379 return;
6380 }
6381
6382 capacity = 0;
6383
6384 if (child->flags & SD_OVERLAP) {
6385
6386
6387
6388
6389
6390 for_each_cpu(cpu, sched_group_cpus(sdg)) {
6391 struct sched_group_capacity *sgc;
6392 struct rq *rq = cpu_rq(cpu);
6393
6394
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405 if (unlikely(!rq->sd)) {
6406 capacity += capacity_of(cpu);
6407 continue;
6408 }
6409
6410 sgc = rq->sd->groups->sgc;
6411 capacity += sgc->capacity;
6412 }
6413 } else {
6414
6415
6416
6417
6418
6419 group = child->groups;
6420 do {
6421 capacity += group->sgc->capacity;
6422 group = group->next;
6423 } while (group != child->groups);
6424 }
6425
6426 sdg->sgc->capacity = capacity;
6427}
6428
6429
6430
6431
6432
6433
6434static inline int
6435check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
6436{
6437 return ((rq->cpu_capacity * sd->imbalance_pct) <
6438 (rq->cpu_capacity_orig * 100));
6439}
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470static inline int sg_imbalanced(struct sched_group *group)
6471{
6472 return group->sgc->imbalance;
6473}
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487static inline bool
6488group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
6489{
6490 if (sgs->sum_nr_running < sgs->group_weight)
6491 return true;
6492
6493 if ((sgs->group_capacity * 100) >
6494 (sgs->group_util * env->sd->imbalance_pct))
6495 return true;
6496
6497 return false;
6498}
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508static inline bool
6509group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
6510{
6511 if (sgs->sum_nr_running <= sgs->group_weight)
6512 return false;
6513
6514 if ((sgs->group_capacity * 100) <
6515 (sgs->group_util * env->sd->imbalance_pct))
6516 return true;
6517
6518 return false;
6519}
6520
6521static inline enum
6522group_type group_classify(struct sched_group *group,
6523 struct sg_lb_stats *sgs)
6524{
6525 if (sgs->group_no_capacity)
6526 return group_overloaded;
6527
6528 if (sg_imbalanced(group))
6529 return group_imbalanced;
6530
6531 return group_other;
6532}
6533
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543static inline void update_sg_lb_stats(struct lb_env *env,
6544 struct sched_group *group, int load_idx,
6545 int local_group, struct sg_lb_stats *sgs,
6546 bool *overload)
6547{
6548 unsigned long load;
6549 int i, nr_running;
6550
6551 memset(sgs, 0, sizeof(*sgs));
6552
6553 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6554 struct rq *rq = cpu_rq(i);
6555
6556
6557 if (local_group)
6558 load = target_load(i, load_idx);
6559 else
6560 load = source_load(i, load_idx);
6561
6562 sgs->group_load += load;
6563 sgs->group_util += cpu_util(i);
6564 sgs->sum_nr_running += rq->cfs.h_nr_running;
6565
6566 nr_running = rq->nr_running;
6567 if (nr_running > 1)
6568 *overload = true;
6569
6570#ifdef CONFIG_NUMA_BALANCING
6571 sgs->nr_numa_running += rq->nr_numa_running;
6572 sgs->nr_preferred_running += rq->nr_preferred_running;
6573#endif
6574 sgs->sum_weighted_load += weighted_cpuload(i);
6575
6576
6577
6578 if (!nr_running && idle_cpu(i))
6579 sgs->idle_cpus++;
6580 }
6581
6582
6583 sgs->group_capacity = group->sgc->capacity;
6584 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
6585
6586 if (sgs->sum_nr_running)
6587 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
6588
6589 sgs->group_weight = group->group_weight;
6590
6591 sgs->group_no_capacity = group_is_overloaded(env, sgs);
6592 sgs->group_type = group_classify(group, sgs);
6593}
6594
6595
6596
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608static bool update_sd_pick_busiest(struct lb_env *env,
6609 struct sd_lb_stats *sds,
6610 struct sched_group *sg,
6611 struct sg_lb_stats *sgs)
6612{
6613 struct sg_lb_stats *busiest = &sds->busiest_stat;
6614
6615 if (sgs->group_type > busiest->group_type)
6616 return true;
6617
6618 if (sgs->group_type < busiest->group_type)
6619 return false;
6620
6621 if (sgs->avg_load <= busiest->avg_load)
6622 return false;
6623
6624
6625 if (!(env->sd->flags & SD_ASYM_PACKING))
6626 return true;
6627
6628
6629
6630
6631
6632
6633 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
6634 if (!sds->busiest)
6635 return true;
6636
6637 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
6638 return true;
6639 }
6640
6641 return false;
6642}
6643
6644#ifdef CONFIG_NUMA_BALANCING
6645static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6646{
6647 if (sgs->sum_nr_running > sgs->nr_numa_running)
6648 return regular;
6649 if (sgs->sum_nr_running > sgs->nr_preferred_running)
6650 return remote;
6651 return all;
6652}
6653
6654static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6655{
6656 if (rq->nr_running > rq->nr_numa_running)
6657 return regular;
6658 if (rq->nr_running > rq->nr_preferred_running)
6659 return remote;
6660 return all;
6661}
6662#else
6663static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6664{
6665 return all;
6666}
6667
6668static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6669{
6670 return regular;
6671}
6672#endif
6673
6674
6675
6676
6677
6678
6679static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
6680{
6681 struct sched_domain *child = env->sd->child;
6682 struct sched_group *sg = env->sd->groups;
6683 struct sg_lb_stats tmp_sgs;
6684 int load_idx, prefer_sibling = 0;
6685 bool overload = false;
6686
6687 if (child && child->flags & SD_PREFER_SIBLING)
6688 prefer_sibling = 1;
6689
6690 load_idx = get_sd_load_idx(env->sd, env->idle);
6691
6692 do {
6693 struct sg_lb_stats *sgs = &tmp_sgs;
6694 int local_group;
6695
6696 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
6697 if (local_group) {
6698 sds->local = sg;
6699 sgs = &sds->local_stat;
6700
6701 if (env->idle != CPU_NEWLY_IDLE ||
6702 time_after_eq(jiffies, sg->sgc->next_update))
6703 update_group_capacity(env->sd, env->dst_cpu);
6704 }
6705
6706 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
6707 &overload);
6708
6709 if (local_group)
6710 goto next_group;
6711
6712
6713
6714
6715
6716
6717
6718
6719
6720
6721
6722 if (prefer_sibling && sds->local &&
6723 group_has_capacity(env, &sds->local_stat) &&
6724 (sgs->sum_nr_running > 1)) {
6725 sgs->group_no_capacity = 1;
6726 sgs->group_type = group_classify(sg, sgs);
6727 }
6728
6729 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
6730 sds->busiest = sg;
6731 sds->busiest_stat = *sgs;
6732 }
6733
6734next_group:
6735
6736 sds->total_load += sgs->group_load;
6737 sds->total_capacity += sgs->group_capacity;
6738
6739 sg = sg->next;
6740 } while (sg != env->sd->groups);
6741
6742 if (env->sd->flags & SD_NUMA)
6743 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
6744
6745 if (!env->sd->parent) {
6746
6747 if (env->dst_rq->rd->overload != overload)
6748 env->dst_rq->rd->overload = overload;
6749 }
6750
6751}
6752
6753
6754
6755
6756
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
6777{
6778 int busiest_cpu;
6779
6780 if (!(env->sd->flags & SD_ASYM_PACKING))
6781 return 0;
6782
6783 if (!sds->busiest)
6784 return 0;
6785
6786 busiest_cpu = group_first_cpu(sds->busiest);
6787 if (env->dst_cpu > busiest_cpu)
6788 return 0;
6789
6790 env->imbalance = DIV_ROUND_CLOSEST(
6791 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
6792 SCHED_CAPACITY_SCALE);
6793
6794 return 1;
6795}
6796
6797
6798
6799
6800
6801
6802
6803
6804static inline
6805void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6806{
6807 unsigned long tmp, capa_now = 0, capa_move = 0;
6808 unsigned int imbn = 2;
6809 unsigned long scaled_busy_load_per_task;
6810 struct sg_lb_stats *local, *busiest;
6811
6812 local = &sds->local_stat;
6813 busiest = &sds->busiest_stat;
6814
6815 if (!local->sum_nr_running)
6816 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6817 else if (busiest->load_per_task > local->load_per_task)
6818 imbn = 1;
6819
6820 scaled_busy_load_per_task =
6821 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6822 busiest->group_capacity;
6823
6824 if (busiest->avg_load + scaled_busy_load_per_task >=
6825 local->avg_load + (scaled_busy_load_per_task * imbn)) {
6826 env->imbalance = busiest->load_per_task;
6827 return;
6828 }
6829
6830
6831
6832
6833
6834
6835
6836 capa_now += busiest->group_capacity *
6837 min(busiest->load_per_task, busiest->avg_load);
6838 capa_now += local->group_capacity *
6839 min(local->load_per_task, local->avg_load);
6840 capa_now /= SCHED_CAPACITY_SCALE;
6841
6842
6843 if (busiest->avg_load > scaled_busy_load_per_task) {
6844 capa_move += busiest->group_capacity *
6845 min(busiest->load_per_task,
6846 busiest->avg_load - scaled_busy_load_per_task);
6847 }
6848
6849
6850 if (busiest->avg_load * busiest->group_capacity <
6851 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
6852 tmp = (busiest->avg_load * busiest->group_capacity) /
6853 local->group_capacity;
6854 } else {
6855 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6856 local->group_capacity;
6857 }
6858 capa_move += local->group_capacity *
6859 min(local->load_per_task, local->avg_load + tmp);
6860 capa_move /= SCHED_CAPACITY_SCALE;
6861
6862
6863 if (capa_move > capa_now)
6864 env->imbalance = busiest->load_per_task;
6865}
6866
6867
6868
6869
6870
6871
6872
6873static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6874{
6875 unsigned long max_pull, load_above_capacity = ~0UL;
6876 struct sg_lb_stats *local, *busiest;
6877
6878 local = &sds->local_stat;
6879 busiest = &sds->busiest_stat;
6880
6881 if (busiest->group_type == group_imbalanced) {
6882
6883
6884
6885
6886 busiest->load_per_task =
6887 min(busiest->load_per_task, sds->avg_load);
6888 }
6889
6890
6891
6892
6893
6894
6895 if (busiest->avg_load <= sds->avg_load ||
6896 local->avg_load >= sds->avg_load) {
6897 env->imbalance = 0;
6898 return fix_small_imbalance(env, sds);
6899 }
6900
6901
6902
6903
6904 if (busiest->group_type == group_overloaded &&
6905 local->group_type == group_overloaded) {
6906 load_above_capacity = busiest->sum_nr_running *
6907 SCHED_LOAD_SCALE;
6908 if (load_above_capacity > busiest->group_capacity)
6909 load_above_capacity -= busiest->group_capacity;
6910 else
6911 load_above_capacity = ~0UL;
6912 }
6913
6914
6915
6916
6917
6918
6919
6920
6921
6922 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
6923
6924
6925 env->imbalance = min(
6926 max_pull * busiest->group_capacity,
6927 (sds->avg_load - local->avg_load) * local->group_capacity
6928 ) / SCHED_CAPACITY_SCALE;
6929
6930
6931
6932
6933
6934
6935
6936 if (env->imbalance < busiest->load_per_task)
6937 return fix_small_imbalance(env, sds);
6938}
6939
6940
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955
6956
6957
6958
6959static struct sched_group *find_busiest_group(struct lb_env *env)
6960{
6961 struct sg_lb_stats *local, *busiest;
6962 struct sd_lb_stats sds;
6963
6964 init_sd_lb_stats(&sds);
6965
6966
6967
6968
6969
6970 update_sd_lb_stats(env, &sds);
6971 local = &sds.local_stat;
6972 busiest = &sds.busiest_stat;
6973
6974
6975 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6976 check_asym_packing(env, &sds))
6977 return sds.busiest;
6978
6979
6980 if (!sds.busiest || busiest->sum_nr_running == 0)
6981 goto out_balanced;
6982
6983 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
6984 / sds.total_capacity;
6985
6986
6987
6988
6989
6990
6991 if (busiest->group_type == group_imbalanced)
6992 goto force_balance;
6993
6994
6995 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
6996 busiest->group_no_capacity)
6997 goto force_balance;
6998
6999
7000
7001
7002
7003 if (local->avg_load >= busiest->avg_load)
7004 goto out_balanced;
7005
7006
7007
7008
7009
7010 if (local->avg_load >= sds.avg_load)
7011 goto out_balanced;
7012
7013 if (env->idle == CPU_IDLE) {
7014
7015
7016
7017
7018
7019
7020
7021 if ((busiest->group_type != group_overloaded) &&
7022 (local->idle_cpus <= (busiest->idle_cpus + 1)))
7023 goto out_balanced;
7024 } else {
7025
7026
7027
7028
7029 if (100 * busiest->avg_load <=
7030 env->sd->imbalance_pct * local->avg_load)
7031 goto out_balanced;
7032 }
7033
7034force_balance:
7035
7036 calculate_imbalance(env, &sds);
7037 return sds.busiest;
7038
7039out_balanced:
7040 env->imbalance = 0;
7041 return NULL;
7042}
7043
7044
7045
7046
7047static struct rq *find_busiest_queue(struct lb_env *env,
7048 struct sched_group *group)
7049{
7050 struct rq *busiest = NULL, *rq;
7051 unsigned long busiest_load = 0, busiest_capacity = 1;
7052 int i;
7053
7054 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7055 unsigned long capacity, wl;
7056 enum fbq_type rt;
7057
7058 rq = cpu_rq(i);
7059 rt = fbq_classify_rq(rq);
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072
7073
7074
7075
7076
7077
7078
7079
7080 if (rt > env->fbq_type)
7081 continue;
7082
7083 capacity = capacity_of(i);
7084
7085 wl = weighted_cpuload(i);
7086
7087
7088
7089
7090
7091
7092 if (rq->nr_running == 1 && wl > env->imbalance &&
7093 !check_cpu_capacity(rq, env->sd))
7094 continue;
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104
7105
7106
7107 if (wl * busiest_capacity > busiest_load * capacity) {
7108 busiest_load = wl;
7109 busiest_capacity = capacity;
7110 busiest = rq;
7111 }
7112 }
7113
7114 return busiest;
7115}
7116
7117
7118
7119
7120
7121#define MAX_PINNED_INTERVAL 512
7122
7123
7124DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
7125
7126static int need_active_balance(struct lb_env *env)
7127{
7128 struct sched_domain *sd = env->sd;
7129
7130 if (env->idle == CPU_NEWLY_IDLE) {
7131
7132
7133
7134
7135
7136
7137 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
7138 return 1;
7139 }
7140
7141
7142
7143
7144
7145
7146
7147 if ((env->idle != CPU_NOT_IDLE) &&
7148 (env->src_rq->cfs.h_nr_running == 1)) {
7149 if ((check_cpu_capacity(env->src_rq, sd)) &&
7150 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
7151 return 1;
7152 }
7153
7154 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
7155}
7156
7157static int active_load_balance_cpu_stop(void *data);
7158
7159static int should_we_balance(struct lb_env *env)
7160{
7161 struct sched_group *sg = env->sd->groups;
7162 struct cpumask *sg_cpus, *sg_mask;
7163 int cpu, balance_cpu = -1;
7164
7165
7166
7167
7168
7169 if (env->idle == CPU_NEWLY_IDLE)
7170 return 1;
7171
7172 sg_cpus = sched_group_cpus(sg);
7173 sg_mask = sched_group_mask(sg);
7174
7175 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
7176 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
7177 continue;
7178
7179 balance_cpu = cpu;
7180 break;
7181 }
7182
7183 if (balance_cpu == -1)
7184 balance_cpu = group_balance_cpu(sg);
7185
7186
7187
7188
7189
7190 return balance_cpu == env->dst_cpu;
7191}
7192
7193
7194
7195
7196
7197static int load_balance(int this_cpu, struct rq *this_rq,
7198 struct sched_domain *sd, enum cpu_idle_type idle,
7199 int *continue_balancing)
7200{
7201 int ld_moved, cur_ld_moved, active_balance = 0;
7202 struct sched_domain *sd_parent = sd->parent;
7203 struct sched_group *group;
7204 struct rq *busiest;
7205 unsigned long flags;
7206 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
7207
7208 struct lb_env env = {
7209 .sd = sd,
7210 .dst_cpu = this_cpu,
7211 .dst_rq = this_rq,
7212 .dst_grpmask = sched_group_cpus(sd->groups),
7213 .idle = idle,
7214 .loop_break = sched_nr_migrate_break,
7215 .cpus = cpus,
7216 .fbq_type = all,
7217 .tasks = LIST_HEAD_INIT(env.tasks),
7218 };
7219
7220
7221
7222
7223
7224 if (idle == CPU_NEWLY_IDLE)
7225 env.dst_grpmask = NULL;
7226
7227 cpumask_copy(cpus, cpu_active_mask);
7228
7229 schedstat_inc(sd, lb_count[idle]);
7230
7231redo:
7232 if (!should_we_balance(&env)) {
7233 *continue_balancing = 0;
7234 goto out_balanced;
7235 }
7236
7237 group = find_busiest_group(&env);
7238 if (!group) {
7239 schedstat_inc(sd, lb_nobusyg[idle]);
7240 goto out_balanced;
7241 }
7242
7243 busiest = find_busiest_queue(&env, group);
7244 if (!busiest) {
7245 schedstat_inc(sd, lb_nobusyq[idle]);
7246 goto out_balanced;
7247 }
7248
7249 BUG_ON(busiest == env.dst_rq);
7250
7251 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
7252
7253 env.src_cpu = busiest->cpu;
7254 env.src_rq = busiest;
7255
7256 ld_moved = 0;
7257 if (busiest->nr_running > 1) {
7258
7259
7260
7261
7262
7263
7264 env.flags |= LBF_ALL_PINNED;
7265 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
7266
7267more_balance:
7268 raw_spin_lock_irqsave(&busiest->lock, flags);
7269
7270
7271
7272
7273
7274 cur_ld_moved = detach_tasks(&env);
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284 raw_spin_unlock(&busiest->lock);
7285
7286 if (cur_ld_moved) {
7287 attach_tasks(&env);
7288 ld_moved += cur_ld_moved;
7289 }
7290
7291 local_irq_restore(flags);
7292
7293 if (env.flags & LBF_NEED_BREAK) {
7294 env.flags &= ~LBF_NEED_BREAK;
7295 goto more_balance;
7296 }
7297
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307
7308
7309
7310
7311
7312
7313
7314
7315
7316
7317 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
7318
7319
7320 cpumask_clear_cpu(env.dst_cpu, env.cpus);
7321
7322 env.dst_rq = cpu_rq(env.new_dst_cpu);
7323 env.dst_cpu = env.new_dst_cpu;
7324 env.flags &= ~LBF_DST_PINNED;
7325 env.loop = 0;
7326 env.loop_break = sched_nr_migrate_break;
7327
7328
7329
7330
7331
7332 goto more_balance;
7333 }
7334
7335
7336
7337
7338 if (sd_parent) {
7339 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7340
7341 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
7342 *group_imbalance = 1;
7343 }
7344
7345
7346 if (unlikely(env.flags & LBF_ALL_PINNED)) {
7347 cpumask_clear_cpu(cpu_of(busiest), cpus);
7348 if (!cpumask_empty(cpus)) {
7349 env.loop = 0;
7350 env.loop_break = sched_nr_migrate_break;
7351 goto redo;
7352 }
7353 goto out_all_pinned;
7354 }
7355 }
7356
7357 if (!ld_moved) {
7358 schedstat_inc(sd, lb_failed[idle]);
7359
7360
7361
7362
7363
7364
7365 if (idle != CPU_NEWLY_IDLE)
7366 sd->nr_balance_failed++;
7367
7368 if (need_active_balance(&env)) {
7369 raw_spin_lock_irqsave(&busiest->lock, flags);
7370
7371
7372
7373
7374
7375 if (!cpumask_test_cpu(this_cpu,
7376 tsk_cpus_allowed(busiest->curr))) {
7377 raw_spin_unlock_irqrestore(&busiest->lock,
7378 flags);
7379 env.flags |= LBF_ALL_PINNED;
7380 goto out_one_pinned;
7381 }
7382
7383
7384
7385
7386
7387
7388 if (!busiest->active_balance) {
7389 busiest->active_balance = 1;
7390 busiest->push_cpu = this_cpu;
7391 active_balance = 1;
7392 }
7393 raw_spin_unlock_irqrestore(&busiest->lock, flags);
7394
7395 if (active_balance) {
7396 stop_one_cpu_nowait(cpu_of(busiest),
7397 active_load_balance_cpu_stop, busiest,
7398 &busiest->active_balance_work);
7399 }
7400
7401
7402
7403
7404
7405 sd->nr_balance_failed = sd->cache_nice_tries+1;
7406 }
7407 } else
7408 sd->nr_balance_failed = 0;
7409
7410 if (likely(!active_balance)) {
7411
7412 sd->balance_interval = sd->min_interval;
7413 } else {
7414
7415
7416
7417
7418
7419
7420 if (sd->balance_interval < sd->max_interval)
7421 sd->balance_interval *= 2;
7422 }
7423
7424 goto out;
7425
7426out_balanced:
7427
7428
7429
7430
7431 if (sd_parent) {
7432 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7433
7434 if (*group_imbalance)
7435 *group_imbalance = 0;
7436 }
7437
7438out_all_pinned:
7439
7440
7441
7442
7443
7444 schedstat_inc(sd, lb_balanced[idle]);
7445
7446 sd->nr_balance_failed = 0;
7447
7448out_one_pinned:
7449
7450 if (((env.flags & LBF_ALL_PINNED) &&
7451 sd->balance_interval < MAX_PINNED_INTERVAL) ||
7452 (sd->balance_interval < sd->max_interval))
7453 sd->balance_interval *= 2;
7454
7455 ld_moved = 0;
7456out:
7457 return ld_moved;
7458}
7459
7460static inline unsigned long
7461get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7462{
7463 unsigned long interval = sd->balance_interval;
7464
7465 if (cpu_busy)
7466 interval *= sd->busy_factor;
7467
7468
7469 interval = msecs_to_jiffies(interval);
7470 interval = clamp(interval, 1UL, max_load_balance_interval);
7471
7472 return interval;
7473}
7474
7475static inline void
7476update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
7477{
7478 unsigned long interval, next;
7479
7480 interval = get_sd_balance_interval(sd, cpu_busy);
7481 next = sd->last_balance + interval;
7482
7483 if (time_after(*next_balance, next))
7484 *next_balance = next;
7485}
7486
7487
7488
7489
7490
7491static int idle_balance(struct rq *this_rq)
7492{
7493 unsigned long next_balance = jiffies + HZ;
7494 int this_cpu = this_rq->cpu;
7495 struct sched_domain *sd;
7496 int pulled_task = 0;
7497 u64 curr_cost = 0;
7498
7499
7500
7501
7502
7503 this_rq->idle_stamp = rq_clock(this_rq);
7504
7505 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7506 !this_rq->rd->overload) {
7507 rcu_read_lock();
7508 sd = rcu_dereference_check_sched_domain(this_rq->sd);
7509 if (sd)
7510 update_next_balance(sd, 0, &next_balance);
7511 rcu_read_unlock();
7512
7513 goto out;
7514 }
7515
7516 raw_spin_unlock(&this_rq->lock);
7517
7518 update_blocked_averages(this_cpu);
7519 rcu_read_lock();
7520 for_each_domain(this_cpu, sd) {
7521 int continue_balancing = 1;
7522 u64 t0, domain_cost;
7523
7524 if (!(sd->flags & SD_LOAD_BALANCE))
7525 continue;
7526
7527 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
7528 update_next_balance(sd, 0, &next_balance);
7529 break;
7530 }
7531
7532 if (sd->flags & SD_BALANCE_NEWIDLE) {
7533 t0 = sched_clock_cpu(this_cpu);
7534
7535 pulled_task = load_balance(this_cpu, this_rq,
7536 sd, CPU_NEWLY_IDLE,
7537 &continue_balancing);
7538
7539 domain_cost = sched_clock_cpu(this_cpu) - t0;
7540 if (domain_cost > sd->max_newidle_lb_cost)
7541 sd->max_newidle_lb_cost = domain_cost;
7542
7543 curr_cost += domain_cost;
7544 }
7545
7546 update_next_balance(sd, 0, &next_balance);
7547
7548
7549
7550
7551
7552 if (pulled_task || this_rq->nr_running > 0)
7553 break;
7554 }
7555 rcu_read_unlock();
7556
7557 raw_spin_lock(&this_rq->lock);
7558
7559 if (curr_cost > this_rq->max_idle_balance_cost)
7560 this_rq->max_idle_balance_cost = curr_cost;
7561
7562
7563
7564
7565
7566
7567 if (this_rq->cfs.h_nr_running && !pulled_task)
7568 pulled_task = 1;
7569
7570out:
7571
7572 if (time_after(this_rq->next_balance, next_balance))
7573 this_rq->next_balance = next_balance;
7574
7575
7576 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
7577 pulled_task = -1;
7578
7579 if (pulled_task)
7580 this_rq->idle_stamp = 0;
7581
7582 return pulled_task;
7583}
7584
7585
7586
7587
7588
7589
7590
7591static int active_load_balance_cpu_stop(void *data)
7592{
7593 struct rq *busiest_rq = data;
7594 int busiest_cpu = cpu_of(busiest_rq);
7595 int target_cpu = busiest_rq->push_cpu;
7596 struct rq *target_rq = cpu_rq(target_cpu);
7597 struct sched_domain *sd;
7598 struct task_struct *p = NULL;
7599
7600 raw_spin_lock_irq(&busiest_rq->lock);
7601
7602
7603 if (unlikely(busiest_cpu != smp_processor_id() ||
7604 !busiest_rq->active_balance))
7605 goto out_unlock;
7606
7607
7608 if (busiest_rq->nr_running <= 1)
7609 goto out_unlock;
7610
7611
7612
7613
7614
7615
7616 BUG_ON(busiest_rq == target_rq);
7617
7618
7619 rcu_read_lock();
7620 for_each_domain(target_cpu, sd) {
7621 if ((sd->flags & SD_LOAD_BALANCE) &&
7622 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
7623 break;
7624 }
7625
7626 if (likely(sd)) {
7627 struct lb_env env = {
7628 .sd = sd,
7629 .dst_cpu = target_cpu,
7630 .dst_rq = target_rq,
7631 .src_cpu = busiest_rq->cpu,
7632 .src_rq = busiest_rq,
7633 .idle = CPU_IDLE,
7634 };
7635
7636 schedstat_inc(sd, alb_count);
7637
7638 p = detach_one_task(&env);
7639 if (p)
7640 schedstat_inc(sd, alb_pushed);
7641 else
7642 schedstat_inc(sd, alb_failed);
7643 }
7644 rcu_read_unlock();
7645out_unlock:
7646 busiest_rq->active_balance = 0;
7647 raw_spin_unlock(&busiest_rq->lock);
7648
7649 if (p)
7650 attach_one_task(target_rq, p);
7651
7652 local_irq_enable();
7653
7654 return 0;
7655}
7656
7657static inline int on_null_domain(struct rq *rq)
7658{
7659 return unlikely(!rcu_dereference_sched(rq->sd));
7660}
7661
7662#ifdef CONFIG_NO_HZ_COMMON
7663
7664
7665
7666
7667
7668
7669static struct {
7670 cpumask_var_t idle_cpus_mask;
7671 atomic_t nr_cpus;
7672 unsigned long next_balance;
7673} nohz ____cacheline_aligned;
7674
7675static inline int find_new_ilb(void)
7676{
7677 int ilb = cpumask_first(nohz.idle_cpus_mask);
7678
7679 if (ilb < nr_cpu_ids && idle_cpu(ilb))
7680 return ilb;
7681
7682 return nr_cpu_ids;
7683}
7684
7685
7686
7687
7688
7689
7690static void nohz_balancer_kick(void)
7691{
7692 int ilb_cpu;
7693
7694 nohz.next_balance++;
7695
7696 ilb_cpu = find_new_ilb();
7697
7698 if (ilb_cpu >= nr_cpu_ids)
7699 return;
7700
7701 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
7702 return;
7703
7704
7705
7706
7707
7708
7709 smp_send_reschedule(ilb_cpu);
7710 return;
7711}
7712
7713static inline void nohz_balance_exit_idle(int cpu)
7714{
7715 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
7716
7717
7718
7719 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
7720 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
7721 atomic_dec(&nohz.nr_cpus);
7722 }
7723 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7724 }
7725}
7726
7727static inline void set_cpu_sd_state_busy(void)
7728{
7729 struct sched_domain *sd;
7730 int cpu = smp_processor_id();
7731
7732 rcu_read_lock();
7733 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7734
7735 if (!sd || !sd->nohz_idle)
7736 goto unlock;
7737 sd->nohz_idle = 0;
7738
7739 atomic_inc(&sd->groups->sgc->nr_busy_cpus);
7740unlock:
7741 rcu_read_unlock();
7742}
7743
7744void set_cpu_sd_state_idle(void)
7745{
7746 struct sched_domain *sd;
7747 int cpu = smp_processor_id();
7748
7749 rcu_read_lock();
7750 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7751
7752 if (!sd || sd->nohz_idle)
7753 goto unlock;
7754 sd->nohz_idle = 1;
7755
7756 atomic_dec(&sd->groups->sgc->nr_busy_cpus);
7757unlock:
7758 rcu_read_unlock();
7759}
7760
7761
7762
7763
7764
7765void nohz_balance_enter_idle(int cpu)
7766{
7767
7768
7769
7770 if (!cpu_active(cpu))
7771 return;
7772
7773 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
7774 return;
7775
7776
7777
7778
7779 if (on_null_domain(cpu_rq(cpu)))
7780 return;
7781
7782 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
7783 atomic_inc(&nohz.nr_cpus);
7784 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7785}
7786
7787static int sched_ilb_notifier(struct notifier_block *nfb,
7788 unsigned long action, void *hcpu)
7789{
7790 switch (action & ~CPU_TASKS_FROZEN) {
7791 case CPU_DYING:
7792 nohz_balance_exit_idle(smp_processor_id());
7793 return NOTIFY_OK;
7794 default:
7795 return NOTIFY_DONE;
7796 }
7797}
7798#endif
7799
7800static DEFINE_SPINLOCK(balancing);
7801
7802
7803
7804
7805
7806void update_max_interval(void)
7807{
7808 max_load_balance_interval = HZ*num_online_cpus()/10;
7809}
7810
7811
7812
7813
7814
7815
7816
7817static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
7818{
7819 int continue_balancing = 1;
7820 int cpu = rq->cpu;
7821 unsigned long interval;
7822 struct sched_domain *sd;
7823
7824 unsigned long next_balance = jiffies + 60*HZ;
7825 int update_next_balance = 0;
7826 int need_serialize, need_decay = 0;
7827 u64 max_cost = 0;
7828
7829 update_blocked_averages(cpu);
7830
7831 rcu_read_lock();
7832 for_each_domain(cpu, sd) {
7833
7834
7835
7836
7837 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
7838 sd->max_newidle_lb_cost =
7839 (sd->max_newidle_lb_cost * 253) / 256;
7840 sd->next_decay_max_lb_cost = jiffies + HZ;
7841 need_decay = 1;
7842 }
7843 max_cost += sd->max_newidle_lb_cost;
7844
7845 if (!(sd->flags & SD_LOAD_BALANCE))
7846 continue;
7847
7848
7849
7850
7851
7852
7853 if (!continue_balancing) {
7854 if (need_decay)
7855 continue;
7856 break;
7857 }
7858
7859 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7860
7861 need_serialize = sd->flags & SD_SERIALIZE;
7862 if (need_serialize) {
7863 if (!spin_trylock(&balancing))
7864 goto out;
7865 }
7866
7867 if (time_after_eq(jiffies, sd->last_balance + interval)) {
7868 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
7869
7870
7871
7872
7873
7874 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
7875 }
7876 sd->last_balance = jiffies;
7877 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7878 }
7879 if (need_serialize)
7880 spin_unlock(&balancing);
7881out:
7882 if (time_after(next_balance, sd->last_balance + interval)) {
7883 next_balance = sd->last_balance + interval;
7884 update_next_balance = 1;
7885 }
7886 }
7887 if (need_decay) {
7888
7889
7890
7891
7892 rq->max_idle_balance_cost =
7893 max((u64)sysctl_sched_migration_cost, max_cost);
7894 }
7895 rcu_read_unlock();
7896
7897
7898
7899
7900
7901
7902 if (likely(update_next_balance)) {
7903 rq->next_balance = next_balance;
7904
7905#ifdef CONFIG_NO_HZ_COMMON
7906
7907
7908
7909
7910
7911
7912
7913
7914 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
7915 nohz.next_balance = rq->next_balance;
7916#endif
7917 }
7918}
7919
7920#ifdef CONFIG_NO_HZ_COMMON
7921
7922
7923
7924
7925static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7926{
7927 int this_cpu = this_rq->cpu;
7928 struct rq *rq;
7929 int balance_cpu;
7930
7931 unsigned long next_balance = jiffies + 60*HZ;
7932 int update_next_balance = 0;
7933
7934 if (idle != CPU_IDLE ||
7935 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7936 goto end;
7937
7938 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
7939 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
7940 continue;
7941
7942
7943
7944
7945
7946
7947 if (need_resched())
7948 break;
7949
7950 rq = cpu_rq(balance_cpu);
7951
7952
7953
7954
7955
7956 if (time_after_eq(jiffies, rq->next_balance)) {
7957 raw_spin_lock_irq(&rq->lock);
7958 update_rq_clock(rq);
7959 update_cpu_load_idle(rq);
7960 raw_spin_unlock_irq(&rq->lock);
7961 rebalance_domains(rq, CPU_IDLE);
7962 }
7963
7964 if (time_after(next_balance, rq->next_balance)) {
7965 next_balance = rq->next_balance;
7966 update_next_balance = 1;
7967 }
7968 }
7969
7970
7971
7972
7973
7974
7975 if (likely(update_next_balance))
7976 nohz.next_balance = next_balance;
7977end:
7978 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
7979}
7980
7981
7982
7983
7984
7985
7986
7987
7988
7989
7990
7991
7992static inline bool nohz_kick_needed(struct rq *rq)
7993{
7994 unsigned long now = jiffies;
7995 struct sched_domain *sd;
7996 struct sched_group_capacity *sgc;
7997 int nr_busy, cpu = rq->cpu;
7998 bool kick = false;
7999
8000 if (unlikely(rq->idle_balance))
8001 return false;
8002
8003
8004
8005
8006
8007 set_cpu_sd_state_busy();
8008 nohz_balance_exit_idle(cpu);
8009
8010
8011
8012
8013
8014 if (likely(!atomic_read(&nohz.nr_cpus)))
8015 return false;
8016
8017 if (time_before(now, nohz.next_balance))
8018 return false;
8019
8020 if (rq->nr_running >= 2)
8021 return true;
8022
8023 rcu_read_lock();
8024 sd = rcu_dereference(per_cpu(sd_busy, cpu));
8025 if (sd) {
8026 sgc = sd->groups->sgc;
8027 nr_busy = atomic_read(&sgc->nr_busy_cpus);
8028
8029 if (nr_busy > 1) {
8030 kick = true;
8031 goto unlock;
8032 }
8033
8034 }
8035
8036 sd = rcu_dereference(rq->sd);
8037 if (sd) {
8038 if ((rq->cfs.h_nr_running >= 1) &&
8039 check_cpu_capacity(rq, sd)) {
8040 kick = true;
8041 goto unlock;
8042 }
8043 }
8044
8045 sd = rcu_dereference(per_cpu(sd_asym, cpu));
8046 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
8047 sched_domain_span(sd)) < cpu)) {
8048 kick = true;
8049 goto unlock;
8050 }
8051
8052unlock:
8053 rcu_read_unlock();
8054 return kick;
8055}
8056#else
8057static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
8058#endif
8059
8060
8061
8062
8063
8064static void run_rebalance_domains(struct softirq_action *h)
8065{
8066 struct rq *this_rq = this_rq();
8067 enum cpu_idle_type idle = this_rq->idle_balance ?
8068 CPU_IDLE : CPU_NOT_IDLE;
8069
8070
8071
8072
8073
8074
8075
8076
8077
8078 nohz_idle_balance(this_rq, idle);
8079 rebalance_domains(this_rq, idle);
8080}
8081
8082
8083
8084
8085void trigger_load_balance(struct rq *rq)
8086{
8087
8088 if (unlikely(on_null_domain(rq)))
8089 return;
8090
8091 if (time_after_eq(jiffies, rq->next_balance))
8092 raise_softirq(SCHED_SOFTIRQ);
8093#ifdef CONFIG_NO_HZ_COMMON
8094 if (nohz_kick_needed(rq))
8095 nohz_balancer_kick();
8096#endif
8097}
8098
8099static void rq_online_fair(struct rq *rq)
8100{
8101 update_sysctl();
8102
8103 update_runtime_enabled(rq);
8104}
8105
8106static void rq_offline_fair(struct rq *rq)
8107{
8108 update_sysctl();
8109
8110
8111 unthrottle_offline_cfs_rqs(rq);
8112}
8113
8114#endif
8115
8116
8117
8118
8119static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
8120{
8121 struct cfs_rq *cfs_rq;
8122 struct sched_entity *se = &curr->se;
8123
8124 for_each_sched_entity(se) {
8125 cfs_rq = cfs_rq_of(se);
8126 entity_tick(cfs_rq, se, queued);
8127 }
8128
8129 if (static_branch_unlikely(&sched_numa_balancing))
8130 task_tick_numa(rq, curr);
8131}
8132
8133
8134
8135
8136
8137
8138static void task_fork_fair(struct task_struct *p)
8139{
8140 struct cfs_rq *cfs_rq;
8141 struct sched_entity *se = &p->se, *curr;
8142 int this_cpu = smp_processor_id();
8143 struct rq *rq = this_rq();
8144 unsigned long flags;
8145
8146 raw_spin_lock_irqsave(&rq->lock, flags);
8147
8148 update_rq_clock(rq);
8149
8150 cfs_rq = task_cfs_rq(current);
8151 curr = cfs_rq->curr;
8152
8153
8154
8155
8156
8157
8158
8159 rcu_read_lock();
8160 __set_task_cpu(p, this_cpu);
8161 rcu_read_unlock();
8162
8163 update_curr(cfs_rq);
8164
8165 if (curr)
8166 se->vruntime = curr->vruntime;
8167 place_entity(cfs_rq, se, 1);
8168
8169 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
8170
8171
8172
8173
8174 swap(curr->vruntime, se->vruntime);
8175 resched_curr(rq);
8176 }
8177
8178 se->vruntime -= cfs_rq->min_vruntime;
8179
8180 raw_spin_unlock_irqrestore(&rq->lock, flags);
8181}
8182
8183
8184
8185
8186
8187static void
8188prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
8189{
8190 if (!task_on_rq_queued(p))
8191 return;
8192
8193
8194
8195
8196
8197
8198 if (rq->curr == p) {
8199 if (p->prio > oldprio)
8200 resched_curr(rq);
8201 } else
8202 check_preempt_curr(rq, p, 0);
8203}
8204
8205static inline bool vruntime_normalized(struct task_struct *p)
8206{
8207 struct sched_entity *se = &p->se;
8208
8209
8210
8211
8212
8213
8214 if (p->on_rq)
8215 return true;
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226 if (!se->sum_exec_runtime || p->state == TASK_WAKING)
8227 return true;
8228
8229 return false;
8230}
8231
8232static void detach_task_cfs_rq(struct task_struct *p)
8233{
8234 struct sched_entity *se = &p->se;
8235 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8236
8237 if (!vruntime_normalized(p)) {
8238
8239
8240
8241
8242 place_entity(cfs_rq, se, 0);
8243 se->vruntime -= cfs_rq->min_vruntime;
8244 }
8245
8246
8247 detach_entity_load_avg(cfs_rq, se);
8248}
8249
8250static void attach_task_cfs_rq(struct task_struct *p)
8251{
8252 struct sched_entity *se = &p->se;
8253 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8254
8255#ifdef CONFIG_FAIR_GROUP_SCHED
8256
8257
8258
8259
8260 se->depth = se->parent ? se->parent->depth + 1 : 0;
8261#endif
8262
8263
8264 attach_entity_load_avg(cfs_rq, se);
8265
8266 if (!vruntime_normalized(p))
8267 se->vruntime += cfs_rq->min_vruntime;
8268}
8269
8270static void switched_from_fair(struct rq *rq, struct task_struct *p)
8271{
8272 detach_task_cfs_rq(p);
8273}
8274
8275static void switched_to_fair(struct rq *rq, struct task_struct *p)
8276{
8277 attach_task_cfs_rq(p);
8278
8279 if (task_on_rq_queued(p)) {
8280
8281
8282
8283
8284
8285 if (rq->curr == p)
8286 resched_curr(rq);
8287 else
8288 check_preempt_curr(rq, p, 0);
8289 }
8290}
8291
8292
8293
8294
8295
8296
8297static void set_curr_task_fair(struct rq *rq)
8298{
8299 struct sched_entity *se = &rq->curr->se;
8300
8301 for_each_sched_entity(se) {
8302 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8303
8304 set_next_entity(cfs_rq, se);
8305
8306 account_cfs_rq_runtime(cfs_rq, 0);
8307 }
8308}
8309
8310void init_cfs_rq(struct cfs_rq *cfs_rq)
8311{
8312 cfs_rq->tasks_timeline = RB_ROOT;
8313 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
8314#ifndef CONFIG_64BIT
8315 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
8316#endif
8317#ifdef CONFIG_SMP
8318 atomic_long_set(&cfs_rq->removed_load_avg, 0);
8319 atomic_long_set(&cfs_rq->removed_util_avg, 0);
8320#endif
8321}
8322
8323#ifdef CONFIG_FAIR_GROUP_SCHED
8324static void task_move_group_fair(struct task_struct *p)
8325{
8326 detach_task_cfs_rq(p);
8327 set_task_rq(p, task_cpu(p));
8328
8329#ifdef CONFIG_SMP
8330
8331 p->se.avg.last_update_time = 0;
8332#endif
8333 attach_task_cfs_rq(p);
8334}
8335
8336void free_fair_sched_group(struct task_group *tg)
8337{
8338 int i;
8339
8340 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
8341
8342 for_each_possible_cpu(i) {
8343 if (tg->cfs_rq)
8344 kfree(tg->cfs_rq[i]);
8345 if (tg->se)
8346 kfree(tg->se[i]);
8347 }
8348
8349 kfree(tg->cfs_rq);
8350 kfree(tg->se);
8351}
8352
8353int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8354{
8355 struct cfs_rq *cfs_rq;
8356 struct sched_entity *se;
8357 int i;
8358
8359 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8360 if (!tg->cfs_rq)
8361 goto err;
8362 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8363 if (!tg->se)
8364 goto err;
8365
8366 tg->shares = NICE_0_LOAD;
8367
8368 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8369
8370 for_each_possible_cpu(i) {
8371 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8372 GFP_KERNEL, cpu_to_node(i));
8373 if (!cfs_rq)
8374 goto err;
8375
8376 se = kzalloc_node(sizeof(struct sched_entity),
8377 GFP_KERNEL, cpu_to_node(i));
8378 if (!se)
8379 goto err_free_rq;
8380
8381 init_cfs_rq(cfs_rq);
8382 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
8383 init_entity_runnable_average(se);
8384 }
8385
8386 return 1;
8387
8388err_free_rq:
8389 kfree(cfs_rq);
8390err:
8391 return 0;
8392}
8393
8394void unregister_fair_sched_group(struct task_group *tg)
8395{
8396 unsigned long flags;
8397 struct rq *rq;
8398 int cpu;
8399
8400 for_each_possible_cpu(cpu) {
8401 if (tg->se[cpu])
8402 remove_entity_load_avg(tg->se[cpu]);
8403
8404
8405
8406
8407
8408 if (!tg->cfs_rq[cpu]->on_list)
8409 continue;
8410
8411 rq = cpu_rq(cpu);
8412
8413 raw_spin_lock_irqsave(&rq->lock, flags);
8414 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8415 raw_spin_unlock_irqrestore(&rq->lock, flags);
8416 }
8417}
8418
8419void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8420 struct sched_entity *se, int cpu,
8421 struct sched_entity *parent)
8422{
8423 struct rq *rq = cpu_rq(cpu);
8424
8425 cfs_rq->tg = tg;
8426 cfs_rq->rq = rq;
8427 init_cfs_rq_runtime(cfs_rq);
8428
8429 tg->cfs_rq[cpu] = cfs_rq;
8430 tg->se[cpu] = se;
8431
8432
8433 if (!se)
8434 return;
8435
8436 if (!parent) {
8437 se->cfs_rq = &rq->cfs;
8438 se->depth = 0;
8439 } else {
8440 se->cfs_rq = parent->my_q;
8441 se->depth = parent->depth + 1;
8442 }
8443
8444 se->my_q = cfs_rq;
8445
8446 update_load_set(&se->load, NICE_0_LOAD);
8447 se->parent = parent;
8448}
8449
8450static DEFINE_MUTEX(shares_mutex);
8451
8452int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8453{
8454 int i;
8455 unsigned long flags;
8456
8457
8458
8459
8460 if (!tg->se[0])
8461 return -EINVAL;
8462
8463 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8464
8465 mutex_lock(&shares_mutex);
8466 if (tg->shares == shares)
8467 goto done;
8468
8469 tg->shares = shares;
8470 for_each_possible_cpu(i) {
8471 struct rq *rq = cpu_rq(i);
8472 struct sched_entity *se;
8473
8474 se = tg->se[i];
8475
8476 raw_spin_lock_irqsave(&rq->lock, flags);
8477
8478
8479 update_rq_clock(rq);
8480 for_each_sched_entity(se)
8481 update_cfs_shares(group_cfs_rq(se));
8482 raw_spin_unlock_irqrestore(&rq->lock, flags);
8483 }
8484
8485done:
8486 mutex_unlock(&shares_mutex);
8487 return 0;
8488}
8489#else
8490
8491void free_fair_sched_group(struct task_group *tg) { }
8492
8493int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8494{
8495 return 1;
8496}
8497
8498void unregister_fair_sched_group(struct task_group *tg) { }
8499
8500#endif
8501
8502
8503static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
8504{
8505 struct sched_entity *se = &task->se;
8506 unsigned int rr_interval = 0;
8507
8508
8509
8510
8511
8512 if (rq->cfs.load.weight)
8513 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
8514
8515 return rr_interval;
8516}
8517
8518
8519
8520
8521const struct sched_class fair_sched_class = {
8522 .next = &idle_sched_class,
8523 .enqueue_task = enqueue_task_fair,
8524 .dequeue_task = dequeue_task_fair,
8525 .yield_task = yield_task_fair,
8526 .yield_to_task = yield_to_task_fair,
8527
8528 .check_preempt_curr = check_preempt_wakeup,
8529
8530 .pick_next_task = pick_next_task_fair,
8531 .put_prev_task = put_prev_task_fair,
8532
8533#ifdef CONFIG_SMP
8534 .select_task_rq = select_task_rq_fair,
8535 .migrate_task_rq = migrate_task_rq_fair,
8536
8537 .rq_online = rq_online_fair,
8538 .rq_offline = rq_offline_fair,
8539
8540 .task_waking = task_waking_fair,
8541 .task_dead = task_dead_fair,
8542 .set_cpus_allowed = set_cpus_allowed_common,
8543#endif
8544
8545 .set_curr_task = set_curr_task_fair,
8546 .task_tick = task_tick_fair,
8547 .task_fork = task_fork_fair,
8548
8549 .prio_changed = prio_changed_fair,
8550 .switched_from = switched_from_fair,
8551 .switched_to = switched_to_fair,
8552
8553 .get_rr_interval = get_rr_interval_fair,
8554
8555 .update_curr = update_curr_fair,
8556
8557#ifdef CONFIG_FAIR_GROUP_SCHED
8558 .task_move_group = task_move_group_fair,
8559#endif
8560};
8561
8562#ifdef CONFIG_SCHED_DEBUG
8563void print_cfs_stats(struct seq_file *m, int cpu)
8564{
8565 struct cfs_rq *cfs_rq;
8566
8567 rcu_read_lock();
8568 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
8569 print_cfs_rq(m, cpu, cfs_rq);
8570 rcu_read_unlock();
8571}
8572
8573#ifdef CONFIG_NUMA_BALANCING
8574void show_numa_stats(struct task_struct *p, struct seq_file *m)
8575{
8576 int node;
8577 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
8578
8579 for_each_online_node(node) {
8580 if (p->numa_faults) {
8581 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
8582 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
8583 }
8584 if (p->numa_group) {
8585 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
8586 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
8587 }
8588 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
8589 }
8590}
8591#endif
8592#endif
8593
8594__init void init_sched_fair_class(void)
8595{
8596#ifdef CONFIG_SMP
8597 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8598
8599#ifdef CONFIG_NO_HZ_COMMON
8600 nohz.next_balance = jiffies;
8601 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8602 cpu_notifier(sched_ilb_notifier, 0);
8603#endif
8604#endif
8605
8606}
8607