1
2#include <linux/sched.h>
3#include <linux/sched/sysctl.h>
4#include <linux/sched/rt.h>
5#include <linux/u64_stats_sync.h>
6#include <linux/sched/deadline.h>
7#include <linux/binfmts.h>
8#include <linux/mutex.h>
9#include <linux/spinlock.h>
10#include <linux/stop_machine.h>
11#include <linux/irq_work.h>
12#include <linux/tick.h>
13#include <linux/slab.h>
14
15#include "cpupri.h"
16#include "cpudeadline.h"
17#include "cpuacct.h"
18
19#ifdef CONFIG_SCHED_DEBUG
20#define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
21#else
22#define SCHED_WARN_ON(x) ((void)(x))
23#endif
24
25struct rq;
26struct cpuidle_state;
27
28
29#define TASK_ON_RQ_QUEUED 1
30#define TASK_ON_RQ_MIGRATING 2
31
32extern __read_mostly int scheduler_running;
33
34extern unsigned long calc_load_update;
35extern atomic_long_t calc_load_tasks;
36
37extern void calc_global_load_tick(struct rq *this_rq);
38extern long calc_load_fold_active(struct rq *this_rq, long adjust);
39
40#ifdef CONFIG_SMP
41extern void cpu_load_update_active(struct rq *this_rq);
42#else
43static inline void cpu_load_update_active(struct rq *this_rq) { }
44#endif
45
46
47
48
49#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#ifdef CONFIG_64BIT
66# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
67# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
68# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
69#else
70# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
71# define scale_load(w) (w)
72# define scale_load_down(w) (w)
73#endif
74
75
76
77
78
79
80
81
82
83
84#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
85
86
87
88
89
90
91#define DL_SCALE (10)
92
93
94
95
96
97
98
99
100#define RUNTIME_INF ((u64)~0ULL)
101
102static inline int idle_policy(int policy)
103{
104 return policy == SCHED_IDLE;
105}
106static inline int fair_policy(int policy)
107{
108 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
109}
110
111static inline int rt_policy(int policy)
112{
113 return policy == SCHED_FIFO || policy == SCHED_RR;
114}
115
116static inline int dl_policy(int policy)
117{
118 return policy == SCHED_DEADLINE;
119}
120static inline bool valid_policy(int policy)
121{
122 return idle_policy(policy) || fair_policy(policy) ||
123 rt_policy(policy) || dl_policy(policy);
124}
125
126static inline int task_has_rt_policy(struct task_struct *p)
127{
128 return rt_policy(p->policy);
129}
130
131static inline int task_has_dl_policy(struct task_struct *p)
132{
133 return dl_policy(p->policy);
134}
135
136
137
138
139static inline bool
140dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
141{
142 return dl_time_before(a->deadline, b->deadline);
143}
144
145
146
147
148struct rt_prio_array {
149 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1);
150 struct list_head queue[MAX_RT_PRIO];
151};
152
153struct rt_bandwidth {
154
155 raw_spinlock_t rt_runtime_lock;
156 ktime_t rt_period;
157 u64 rt_runtime;
158 struct hrtimer rt_period_timer;
159 unsigned int rt_period_active;
160};
161
162void __dl_clear_params(struct task_struct *p);
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188struct dl_bandwidth {
189 raw_spinlock_t dl_runtime_lock;
190 u64 dl_runtime;
191 u64 dl_period;
192};
193
194static inline int dl_bandwidth_enabled(void)
195{
196 return sysctl_sched_rt_runtime >= 0;
197}
198
199extern struct dl_bw *dl_bw_of(int i);
200
201struct dl_bw {
202 raw_spinlock_t lock;
203 u64 bw, total_bw;
204};
205
206static inline
207void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
208{
209 dl_b->total_bw -= tsk_bw;
210}
211
212static inline
213void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
214{
215 dl_b->total_bw += tsk_bw;
216}
217
218static inline
219bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
220{
221 return dl_b->bw != -1 &&
222 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
223}
224
225extern struct mutex sched_domains_mutex;
226
227#ifdef CONFIG_CGROUP_SCHED
228
229#include <linux/cgroup.h>
230
231struct cfs_rq;
232struct rt_rq;
233
234extern struct list_head task_groups;
235
236struct cfs_bandwidth {
237#ifdef CONFIG_CFS_BANDWIDTH
238 raw_spinlock_t lock;
239 ktime_t period;
240 u64 quota, runtime;
241 s64 hierarchical_quota;
242 u64 runtime_expires;
243
244 int idle, period_active;
245 struct hrtimer period_timer, slack_timer;
246 struct list_head throttled_cfs_rq;
247
248
249 int nr_periods, nr_throttled;
250 u64 throttled_time;
251#endif
252};
253
254
255struct task_group {
256 struct cgroup_subsys_state css;
257
258#ifdef CONFIG_FAIR_GROUP_SCHED
259
260 struct sched_entity **se;
261
262 struct cfs_rq **cfs_rq;
263 unsigned long shares;
264
265#ifdef CONFIG_SMP
266
267
268
269
270
271 atomic_long_t load_avg ____cacheline_aligned;
272#endif
273#endif
274
275#ifdef CONFIG_RT_GROUP_SCHED
276 struct sched_rt_entity **rt_se;
277 struct rt_rq **rt_rq;
278
279 struct rt_bandwidth rt_bandwidth;
280#endif
281
282 struct rcu_head rcu;
283 struct list_head list;
284
285 struct task_group *parent;
286 struct list_head siblings;
287 struct list_head children;
288
289#ifdef CONFIG_SCHED_AUTOGROUP
290 struct autogroup *autogroup;
291#endif
292
293 struct cfs_bandwidth cfs_bandwidth;
294};
295
296#ifdef CONFIG_FAIR_GROUP_SCHED
297#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
298
299
300
301
302
303
304
305
306
307#define MIN_SHARES (1UL << 1)
308#define MAX_SHARES (1UL << 18)
309#endif
310
311typedef int (*tg_visitor)(struct task_group *, void *);
312
313extern int walk_tg_tree_from(struct task_group *from,
314 tg_visitor down, tg_visitor up, void *data);
315
316
317
318
319
320
321
322static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
323{
324 return walk_tg_tree_from(&root_task_group, down, up, data);
325}
326
327extern int tg_nop(struct task_group *tg, void *data);
328
329extern void free_fair_sched_group(struct task_group *tg);
330extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
331extern void online_fair_sched_group(struct task_group *tg);
332extern void unregister_fair_sched_group(struct task_group *tg);
333extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
334 struct sched_entity *se, int cpu,
335 struct sched_entity *parent);
336extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
337
338extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
339extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
340extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
341
342extern void free_rt_sched_group(struct task_group *tg);
343extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
344extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
345 struct sched_rt_entity *rt_se, int cpu,
346 struct sched_rt_entity *parent);
347
348extern struct task_group *sched_create_group(struct task_group *parent);
349extern void sched_online_group(struct task_group *tg,
350 struct task_group *parent);
351extern void sched_destroy_group(struct task_group *tg);
352extern void sched_offline_group(struct task_group *tg);
353
354extern void sched_move_task(struct task_struct *tsk);
355
356#ifdef CONFIG_FAIR_GROUP_SCHED
357extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
358
359#ifdef CONFIG_SMP
360extern void set_task_rq_fair(struct sched_entity *se,
361 struct cfs_rq *prev, struct cfs_rq *next);
362#else
363static inline void set_task_rq_fair(struct sched_entity *se,
364 struct cfs_rq *prev, struct cfs_rq *next) { }
365#endif
366#endif
367
368#else
369
370struct cfs_bandwidth { };
371
372#endif
373
374
375struct cfs_rq {
376 struct load_weight load;
377 unsigned int nr_running, h_nr_running;
378
379 u64 exec_clock;
380 u64 min_vruntime;
381#ifndef CONFIG_64BIT
382 u64 min_vruntime_copy;
383#endif
384
385 struct rb_root tasks_timeline;
386 struct rb_node *rb_leftmost;
387
388
389
390
391
392 struct sched_entity *curr, *next, *last, *skip;
393
394#ifdef CONFIG_SCHED_DEBUG
395 unsigned int nr_spread_over;
396#endif
397
398#ifdef CONFIG_SMP
399
400
401
402 struct sched_avg avg;
403 u64 runnable_load_sum;
404 unsigned long runnable_load_avg;
405#ifdef CONFIG_FAIR_GROUP_SCHED
406 unsigned long tg_load_avg_contrib;
407#endif
408 atomic_long_t removed_load_avg, removed_util_avg;
409#ifndef CONFIG_64BIT
410 u64 load_last_update_time_copy;
411#endif
412
413#ifdef CONFIG_FAIR_GROUP_SCHED
414
415
416
417
418
419
420 unsigned long h_load;
421 u64 last_h_load_update;
422 struct sched_entity *h_load_next;
423#endif
424#endif
425
426#ifdef CONFIG_FAIR_GROUP_SCHED
427 struct rq *rq;
428
429
430
431
432
433
434
435
436
437 int on_list;
438 struct list_head leaf_cfs_rq_list;
439 struct task_group *tg;
440
441#ifdef CONFIG_CFS_BANDWIDTH
442 int runtime_enabled;
443 u64 runtime_expires;
444 s64 runtime_remaining;
445
446 u64 throttled_clock, throttled_clock_task;
447 u64 throttled_clock_task_time;
448 int throttled, throttle_count;
449 struct list_head throttled_list;
450#endif
451#endif
452};
453
454static inline int rt_bandwidth_enabled(void)
455{
456 return sysctl_sched_rt_runtime >= 0;
457}
458
459
460#ifdef CONFIG_IRQ_WORK
461# define HAVE_RT_PUSH_IPI
462#endif
463
464
465struct rt_rq {
466 struct rt_prio_array active;
467 unsigned int rt_nr_running;
468 unsigned int rr_nr_running;
469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
470 struct {
471 int curr;
472#ifdef CONFIG_SMP
473 int next;
474#endif
475 } highest_prio;
476#endif
477#ifdef CONFIG_SMP
478 unsigned long rt_nr_migratory;
479 unsigned long rt_nr_total;
480 int overloaded;
481 struct plist_head pushable_tasks;
482#ifdef HAVE_RT_PUSH_IPI
483 int push_flags;
484 int push_cpu;
485 struct irq_work push_work;
486 raw_spinlock_t push_lock;
487#endif
488#endif
489 int rt_queued;
490
491 int rt_throttled;
492 u64 rt_time;
493 u64 rt_runtime;
494
495 raw_spinlock_t rt_runtime_lock;
496
497#ifdef CONFIG_RT_GROUP_SCHED
498 unsigned long rt_nr_boosted;
499
500 struct rq *rq;
501 struct task_group *tg;
502#endif
503};
504
505
506struct dl_rq {
507
508 struct rb_root rb_root;
509 struct rb_node *rb_leftmost;
510
511 unsigned long dl_nr_running;
512
513#ifdef CONFIG_SMP
514
515
516
517
518
519
520 struct {
521 u64 curr;
522 u64 next;
523 } earliest_dl;
524
525 unsigned long dl_nr_migratory;
526 int overloaded;
527
528
529
530
531
532
533 struct rb_root pushable_dl_tasks_root;
534 struct rb_node *pushable_dl_tasks_leftmost;
535#else
536 struct dl_bw dl_bw;
537#endif
538};
539
540#ifdef CONFIG_SMP
541
542
543
544
545
546
547
548
549
550struct root_domain {
551 atomic_t refcount;
552 atomic_t rto_count;
553 struct rcu_head rcu;
554 cpumask_var_t span;
555 cpumask_var_t online;
556
557
558 bool overload;
559
560
561
562
563
564 cpumask_var_t dlo_mask;
565 atomic_t dlo_count;
566 struct dl_bw dl_bw;
567 struct cpudl cpudl;
568
569
570
571
572
573 cpumask_var_t rto_mask;
574 struct cpupri cpupri;
575
576 unsigned long max_cpu_capacity;
577};
578
579extern struct root_domain def_root_domain;
580
581#endif
582
583
584
585
586
587
588
589
590struct rq {
591
592 raw_spinlock_t lock;
593
594
595
596
597
598 unsigned int nr_running;
599#ifdef CONFIG_NUMA_BALANCING
600 unsigned int nr_numa_running;
601 unsigned int nr_preferred_running;
602#endif
603 #define CPU_LOAD_IDX_MAX 5
604 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
605#ifdef CONFIG_NO_HZ_COMMON
606#ifdef CONFIG_SMP
607 unsigned long last_load_update_tick;
608#endif
609 unsigned long nohz_flags;
610#endif
611#ifdef CONFIG_NO_HZ_FULL
612 unsigned long last_sched_tick;
613#endif
614
615 struct load_weight load;
616 unsigned long nr_load_updates;
617 u64 nr_switches;
618
619 struct cfs_rq cfs;
620 struct rt_rq rt;
621 struct dl_rq dl;
622
623#ifdef CONFIG_FAIR_GROUP_SCHED
624
625 struct list_head leaf_cfs_rq_list;
626#endif
627
628
629
630
631
632
633
634 unsigned long nr_uninterruptible;
635
636 struct task_struct *curr, *idle, *stop;
637 unsigned long next_balance;
638 struct mm_struct *prev_mm;
639
640 unsigned int clock_skip_update;
641 u64 clock;
642 u64 clock_task;
643
644 atomic_t nr_iowait;
645
646#ifdef CONFIG_SMP
647 struct root_domain *rd;
648 struct sched_domain *sd;
649
650 unsigned long cpu_capacity;
651 unsigned long cpu_capacity_orig;
652
653 struct callback_head *balance_callback;
654
655 unsigned char idle_balance;
656
657 int active_balance;
658 int push_cpu;
659 struct cpu_stop_work active_balance_work;
660
661 int cpu;
662 int online;
663
664 struct list_head cfs_tasks;
665
666 u64 rt_avg;
667 u64 age_stamp;
668 u64 idle_stamp;
669 u64 avg_idle;
670
671
672 u64 max_idle_balance_cost;
673#endif
674
675#ifdef CONFIG_IRQ_TIME_ACCOUNTING
676 u64 prev_irq_time;
677#endif
678#ifdef CONFIG_PARAVIRT
679 u64 prev_steal_time;
680#endif
681#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
682 u64 prev_steal_time_rq;
683#endif
684
685
686 unsigned long calc_load_update;
687 long calc_load_active;
688
689#ifdef CONFIG_SCHED_HRTICK
690#ifdef CONFIG_SMP
691 int hrtick_csd_pending;
692 struct call_single_data hrtick_csd;
693#endif
694 struct hrtimer hrtick_timer;
695#endif
696
697#ifdef CONFIG_SCHEDSTATS
698
699 struct sched_info rq_sched_info;
700 unsigned long long rq_cpu_time;
701
702
703
704 unsigned int yld_count;
705
706
707 unsigned int sched_count;
708 unsigned int sched_goidle;
709
710
711 unsigned int ttwu_count;
712 unsigned int ttwu_local;
713#endif
714
715#ifdef CONFIG_SMP
716 struct llist_head wake_list;
717#endif
718
719#ifdef CONFIG_CPU_IDLE
720
721 struct cpuidle_state *idle_state;
722#endif
723};
724
725static inline int cpu_of(struct rq *rq)
726{
727#ifdef CONFIG_SMP
728 return rq->cpu;
729#else
730 return 0;
731#endif
732}
733
734
735#ifdef CONFIG_SCHED_SMT
736
737extern struct static_key_false sched_smt_present;
738
739extern void __update_idle_core(struct rq *rq);
740
741static inline void update_idle_core(struct rq *rq)
742{
743 if (static_branch_unlikely(&sched_smt_present))
744 __update_idle_core(rq);
745}
746
747#else
748static inline void update_idle_core(struct rq *rq) { }
749#endif
750
751DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
752
753#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
754#define this_rq() this_cpu_ptr(&runqueues)
755#define task_rq(p) cpu_rq(task_cpu(p))
756#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
757#define raw_rq() raw_cpu_ptr(&runqueues)
758
759static inline u64 __rq_clock_broken(struct rq *rq)
760{
761 return READ_ONCE(rq->clock);
762}
763
764static inline u64 rq_clock(struct rq *rq)
765{
766 lockdep_assert_held(&rq->lock);
767 return rq->clock;
768}
769
770static inline u64 rq_clock_task(struct rq *rq)
771{
772 lockdep_assert_held(&rq->lock);
773 return rq->clock_task;
774}
775
776#define RQCF_REQ_SKIP 0x01
777#define RQCF_ACT_SKIP 0x02
778
779static inline void rq_clock_skip_update(struct rq *rq, bool skip)
780{
781 lockdep_assert_held(&rq->lock);
782 if (skip)
783 rq->clock_skip_update |= RQCF_REQ_SKIP;
784 else
785 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
786}
787
788#ifdef CONFIG_NUMA
789enum numa_topology_type {
790 NUMA_DIRECT,
791 NUMA_GLUELESS_MESH,
792 NUMA_BACKPLANE,
793};
794extern enum numa_topology_type sched_numa_topology_type;
795extern int sched_max_numa_distance;
796extern bool find_numa_distance(int distance);
797#endif
798
799#ifdef CONFIG_NUMA_BALANCING
800
801enum numa_faults_stats {
802 NUMA_MEM = 0,
803 NUMA_CPU,
804 NUMA_MEMBUF,
805 NUMA_CPUBUF
806};
807extern void sched_setnuma(struct task_struct *p, int node);
808extern int migrate_task_to(struct task_struct *p, int cpu);
809extern int migrate_swap(struct task_struct *, struct task_struct *);
810#endif
811
812#ifdef CONFIG_SMP
813
814static inline void
815queue_balance_callback(struct rq *rq,
816 struct callback_head *head,
817 void (*func)(struct rq *rq))
818{
819 lockdep_assert_held(&rq->lock);
820
821 if (unlikely(head->next))
822 return;
823
824 head->func = (void (*)(struct callback_head *))func;
825 head->next = rq->balance_callback;
826 rq->balance_callback = head;
827}
828
829extern void sched_ttwu_pending(void);
830
831#define rcu_dereference_check_sched_domain(p) \
832 rcu_dereference_check((p), \
833 lockdep_is_held(&sched_domains_mutex))
834
835
836
837
838
839
840
841
842#define for_each_domain(cpu, __sd) \
843 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
844 __sd; __sd = __sd->parent)
845
846#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
847
848
849
850
851
852
853
854
855
856
857static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
858{
859 struct sched_domain *sd, *hsd = NULL;
860
861 for_each_domain(cpu, sd) {
862 if (!(sd->flags & flag))
863 break;
864 hsd = sd;
865 }
866
867 return hsd;
868}
869
870static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
871{
872 struct sched_domain *sd;
873
874 for_each_domain(cpu, sd) {
875 if (sd->flags & flag)
876 break;
877 }
878
879 return sd;
880}
881
882DECLARE_PER_CPU(struct sched_domain *, sd_llc);
883DECLARE_PER_CPU(int, sd_llc_size);
884DECLARE_PER_CPU(int, sd_llc_id);
885DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
886DECLARE_PER_CPU(struct sched_domain *, sd_numa);
887DECLARE_PER_CPU(struct sched_domain *, sd_asym);
888
889struct sched_group_capacity {
890 atomic_t ref;
891
892
893
894
895 unsigned int capacity;
896 unsigned long next_update;
897 int imbalance;
898
899 unsigned long cpumask[0];
900};
901
902struct sched_group {
903 struct sched_group *next;
904 atomic_t ref;
905
906 unsigned int group_weight;
907 struct sched_group_capacity *sgc;
908
909
910
911
912
913
914
915
916 unsigned long cpumask[0];
917};
918
919static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
920{
921 return to_cpumask(sg->cpumask);
922}
923
924
925
926
927
928static inline struct cpumask *sched_group_mask(struct sched_group *sg)
929{
930 return to_cpumask(sg->sgc->cpumask);
931}
932
933
934
935
936
937static inline unsigned int group_first_cpu(struct sched_group *group)
938{
939 return cpumask_first(sched_group_cpus(group));
940}
941
942extern int group_balance_cpu(struct sched_group *sg);
943
944#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
945void register_sched_domain_sysctl(void);
946void unregister_sched_domain_sysctl(void);
947#else
948static inline void register_sched_domain_sysctl(void)
949{
950}
951static inline void unregister_sched_domain_sysctl(void)
952{
953}
954#endif
955
956#else
957
958static inline void sched_ttwu_pending(void) { }
959
960#endif
961
962#include "stats.h"
963#include "auto_group.h"
964
965#ifdef CONFIG_CGROUP_SCHED
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980static inline struct task_group *task_group(struct task_struct *p)
981{
982 return p->sched_task_group;
983}
984
985
986static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
987{
988#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
989 struct task_group *tg = task_group(p);
990#endif
991
992#ifdef CONFIG_FAIR_GROUP_SCHED
993 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
994 p->se.cfs_rq = tg->cfs_rq[cpu];
995 p->se.parent = tg->se[cpu];
996#endif
997
998#ifdef CONFIG_RT_GROUP_SCHED
999 p->rt.rt_rq = tg->rt_rq[cpu];
1000 p->rt.parent = tg->rt_se[cpu];
1001#endif
1002}
1003
1004#else
1005
1006static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1007static inline struct task_group *task_group(struct task_struct *p)
1008{
1009 return NULL;
1010}
1011
1012#endif
1013
1014static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1015{
1016 set_task_rq(p, cpu);
1017#ifdef CONFIG_SMP
1018
1019
1020
1021
1022
1023 smp_wmb();
1024#ifdef CONFIG_THREAD_INFO_IN_TASK
1025 p->cpu = cpu;
1026#else
1027 task_thread_info(p)->cpu = cpu;
1028#endif
1029 p->wake_cpu = cpu;
1030#endif
1031}
1032
1033
1034
1035
1036#ifdef CONFIG_SCHED_DEBUG
1037# include <linux/static_key.h>
1038# define const_debug __read_mostly
1039#else
1040# define const_debug const
1041#endif
1042
1043extern const_debug unsigned int sysctl_sched_features;
1044
1045#define SCHED_FEAT(name, enabled) \
1046 __SCHED_FEAT_##name ,
1047
1048enum {
1049#include "features.h"
1050 __SCHED_FEAT_NR,
1051};
1052
1053#undef SCHED_FEAT
1054
1055#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
1056#define SCHED_FEAT(name, enabled) \
1057static __always_inline bool static_branch_##name(struct static_key *key) \
1058{ \
1059 return static_key_##enabled(key); \
1060}
1061
1062#include "features.h"
1063
1064#undef SCHED_FEAT
1065
1066extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1067#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1068#else
1069#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1070#endif
1071
1072extern struct static_key_false sched_numa_balancing;
1073extern struct static_key_false sched_schedstats;
1074
1075static inline u64 global_rt_period(void)
1076{
1077 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1078}
1079
1080static inline u64 global_rt_runtime(void)
1081{
1082 if (sysctl_sched_rt_runtime < 0)
1083 return RUNTIME_INF;
1084
1085 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1086}
1087
1088static inline int task_current(struct rq *rq, struct task_struct *p)
1089{
1090 return rq->curr == p;
1091}
1092
1093static inline int task_running(struct rq *rq, struct task_struct *p)
1094{
1095#ifdef CONFIG_SMP
1096 return p->on_cpu;
1097#else
1098 return task_current(rq, p);
1099#endif
1100}
1101
1102static inline int task_on_rq_queued(struct task_struct *p)
1103{
1104 return p->on_rq == TASK_ON_RQ_QUEUED;
1105}
1106
1107static inline int task_on_rq_migrating(struct task_struct *p)
1108{
1109 return p->on_rq == TASK_ON_RQ_MIGRATING;
1110}
1111
1112#ifndef prepare_arch_switch
1113# define prepare_arch_switch(next) do { } while (0)
1114#endif
1115#ifndef finish_arch_post_lock_switch
1116# define finish_arch_post_lock_switch() do { } while (0)
1117#endif
1118
1119static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1120{
1121#ifdef CONFIG_SMP
1122
1123
1124
1125
1126
1127 next->on_cpu = 1;
1128#endif
1129}
1130
1131static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1132{
1133#ifdef CONFIG_SMP
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144 smp_store_release(&prev->on_cpu, 0);
1145#endif
1146#ifdef CONFIG_DEBUG_SPINLOCK
1147
1148 rq->lock.owner = current;
1149#endif
1150
1151
1152
1153
1154
1155 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1156
1157 raw_spin_unlock_irq(&rq->lock);
1158}
1159
1160
1161
1162
1163#define WF_SYNC 0x01
1164#define WF_FORK 0x02
1165#define WF_MIGRATED 0x4
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176#define WEIGHT_IDLEPRIO 3
1177#define WMULT_IDLEPRIO 1431655765
1178
1179extern const int sched_prio_to_weight[40];
1180extern const u32 sched_prio_to_wmult[40];
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201#define DEQUEUE_SLEEP 0x01
1202#define DEQUEUE_SAVE 0x02
1203#define DEQUEUE_MOVE 0x04
1204
1205#define ENQUEUE_WAKEUP 0x01
1206#define ENQUEUE_RESTORE 0x02
1207#define ENQUEUE_MOVE 0x04
1208
1209#define ENQUEUE_HEAD 0x08
1210#define ENQUEUE_REPLENISH 0x10
1211#ifdef CONFIG_SMP
1212#define ENQUEUE_MIGRATED 0x20
1213#else
1214#define ENQUEUE_MIGRATED 0x00
1215#endif
1216
1217#define RETRY_TASK ((void *)-1UL)
1218
1219struct sched_class {
1220 const struct sched_class *next;
1221
1222 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1223 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1224 void (*yield_task) (struct rq *rq);
1225 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1226
1227 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237 struct task_struct * (*pick_next_task) (struct rq *rq,
1238 struct task_struct *prev,
1239 struct pin_cookie cookie);
1240 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1241
1242#ifdef CONFIG_SMP
1243 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1244 void (*migrate_task_rq)(struct task_struct *p);
1245
1246 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1247
1248 void (*set_cpus_allowed)(struct task_struct *p,
1249 const struct cpumask *newmask);
1250
1251 void (*rq_online)(struct rq *rq);
1252 void (*rq_offline)(struct rq *rq);
1253#endif
1254
1255 void (*set_curr_task) (struct rq *rq);
1256 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1257 void (*task_fork) (struct task_struct *p);
1258 void (*task_dead) (struct task_struct *p);
1259
1260
1261
1262
1263
1264
1265 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1266 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1267 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1268 int oldprio);
1269
1270 unsigned int (*get_rr_interval) (struct rq *rq,
1271 struct task_struct *task);
1272
1273 void (*update_curr) (struct rq *rq);
1274
1275#define TASK_SET_GROUP 0
1276#define TASK_MOVE_GROUP 1
1277
1278#ifdef CONFIG_FAIR_GROUP_SCHED
1279 void (*task_change_group) (struct task_struct *p, int type);
1280#endif
1281};
1282
1283static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1284{
1285 prev->sched_class->put_prev_task(rq, prev);
1286}
1287
1288static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1289{
1290 curr->sched_class->set_curr_task(rq);
1291}
1292
1293#define sched_class_highest (&stop_sched_class)
1294#define for_each_class(class) \
1295 for (class = sched_class_highest; class; class = class->next)
1296
1297extern const struct sched_class stop_sched_class;
1298extern const struct sched_class dl_sched_class;
1299extern const struct sched_class rt_sched_class;
1300extern const struct sched_class fair_sched_class;
1301extern const struct sched_class idle_sched_class;
1302
1303
1304#ifdef CONFIG_SMP
1305
1306extern void update_group_capacity(struct sched_domain *sd, int cpu);
1307
1308extern void trigger_load_balance(struct rq *rq);
1309
1310extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1311
1312#endif
1313
1314#ifdef CONFIG_CPU_IDLE
1315static inline void idle_set_state(struct rq *rq,
1316 struct cpuidle_state *idle_state)
1317{
1318 rq->idle_state = idle_state;
1319}
1320
1321static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1322{
1323 SCHED_WARN_ON(!rcu_read_lock_held());
1324 return rq->idle_state;
1325}
1326#else
1327static inline void idle_set_state(struct rq *rq,
1328 struct cpuidle_state *idle_state)
1329{
1330}
1331
1332static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1333{
1334 return NULL;
1335}
1336#endif
1337
1338extern void sysrq_sched_debug_show(void);
1339extern void sched_init_granularity(void);
1340extern void update_max_interval(void);
1341
1342extern void init_sched_dl_class(void);
1343extern void init_sched_rt_class(void);
1344extern void init_sched_fair_class(void);
1345
1346extern void resched_curr(struct rq *rq);
1347extern void resched_cpu(int cpu);
1348
1349extern struct rt_bandwidth def_rt_bandwidth;
1350extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1351
1352extern struct dl_bandwidth def_dl_bandwidth;
1353extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1354extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1355
1356unsigned long to_ratio(u64 period, u64 runtime);
1357
1358extern void init_entity_runnable_average(struct sched_entity *se);
1359extern void post_init_entity_util_avg(struct sched_entity *se);
1360
1361#ifdef CONFIG_NO_HZ_FULL
1362extern bool sched_can_stop_tick(struct rq *rq);
1363
1364
1365
1366
1367
1368
1369static inline void sched_update_tick_dependency(struct rq *rq)
1370{
1371 int cpu;
1372
1373 if (!tick_nohz_full_enabled())
1374 return;
1375
1376 cpu = cpu_of(rq);
1377
1378 if (!tick_nohz_full_cpu(cpu))
1379 return;
1380
1381 if (sched_can_stop_tick(rq))
1382 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1383 else
1384 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1385}
1386#else
1387static inline void sched_update_tick_dependency(struct rq *rq) { }
1388#endif
1389
1390static inline void add_nr_running(struct rq *rq, unsigned count)
1391{
1392 unsigned prev_nr = rq->nr_running;
1393
1394 rq->nr_running = prev_nr + count;
1395
1396 if (prev_nr < 2 && rq->nr_running >= 2) {
1397#ifdef CONFIG_SMP
1398 if (!rq->rd->overload)
1399 rq->rd->overload = true;
1400#endif
1401 }
1402
1403 sched_update_tick_dependency(rq);
1404}
1405
1406static inline void sub_nr_running(struct rq *rq, unsigned count)
1407{
1408 rq->nr_running -= count;
1409
1410 sched_update_tick_dependency(rq);
1411}
1412
1413static inline void rq_last_tick_reset(struct rq *rq)
1414{
1415#ifdef CONFIG_NO_HZ_FULL
1416 rq->last_sched_tick = jiffies;
1417#endif
1418}
1419
1420extern void update_rq_clock(struct rq *rq);
1421
1422extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1423extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1424
1425extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1426
1427extern const_debug unsigned int sysctl_sched_time_avg;
1428extern const_debug unsigned int sysctl_sched_nr_migrate;
1429extern const_debug unsigned int sysctl_sched_migration_cost;
1430
1431static inline u64 sched_avg_period(void)
1432{
1433 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1434}
1435
1436#ifdef CONFIG_SCHED_HRTICK
1437
1438
1439
1440
1441
1442
1443static inline int hrtick_enabled(struct rq *rq)
1444{
1445 if (!sched_feat(HRTICK))
1446 return 0;
1447 if (!cpu_active(cpu_of(rq)))
1448 return 0;
1449 return hrtimer_is_hres_active(&rq->hrtick_timer);
1450}
1451
1452void hrtick_start(struct rq *rq, u64 delay);
1453
1454#else
1455
1456static inline int hrtick_enabled(struct rq *rq)
1457{
1458 return 0;
1459}
1460
1461#endif
1462
1463#ifdef CONFIG_SMP
1464extern void sched_avg_update(struct rq *rq);
1465
1466#ifndef arch_scale_freq_capacity
1467static __always_inline
1468unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1469{
1470 return SCHED_CAPACITY_SCALE;
1471}
1472#endif
1473
1474#ifndef arch_scale_cpu_capacity
1475static __always_inline
1476unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1477{
1478 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
1479 return sd->smt_gain / sd->span_weight;
1480
1481 return SCHED_CAPACITY_SCALE;
1482}
1483#endif
1484
1485static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1486{
1487 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
1488 sched_avg_update(rq);
1489}
1490#else
1491static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1492static inline void sched_avg_update(struct rq *rq) { }
1493#endif
1494
1495struct rq_flags {
1496 unsigned long flags;
1497 struct pin_cookie cookie;
1498};
1499
1500struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1501 __acquires(rq->lock);
1502struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1503 __acquires(p->pi_lock)
1504 __acquires(rq->lock);
1505
1506static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1507 __releases(rq->lock)
1508{
1509 lockdep_unpin_lock(&rq->lock, rf->cookie);
1510 raw_spin_unlock(&rq->lock);
1511}
1512
1513static inline void
1514task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1515 __releases(rq->lock)
1516 __releases(p->pi_lock)
1517{
1518 lockdep_unpin_lock(&rq->lock, rf->cookie);
1519 raw_spin_unlock(&rq->lock);
1520 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1521}
1522
1523#ifdef CONFIG_SMP
1524#ifdef CONFIG_PREEMPT
1525
1526static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1537 __releases(this_rq->lock)
1538 __acquires(busiest->lock)
1539 __acquires(this_rq->lock)
1540{
1541 raw_spin_unlock(&this_rq->lock);
1542 double_rq_lock(this_rq, busiest);
1543
1544 return 1;
1545}
1546
1547#else
1548
1549
1550
1551
1552
1553
1554
1555static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1556 __releases(this_rq->lock)
1557 __acquires(busiest->lock)
1558 __acquires(this_rq->lock)
1559{
1560 int ret = 0;
1561
1562 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1563 if (busiest < this_rq) {
1564 raw_spin_unlock(&this_rq->lock);
1565 raw_spin_lock(&busiest->lock);
1566 raw_spin_lock_nested(&this_rq->lock,
1567 SINGLE_DEPTH_NESTING);
1568 ret = 1;
1569 } else
1570 raw_spin_lock_nested(&busiest->lock,
1571 SINGLE_DEPTH_NESTING);
1572 }
1573 return ret;
1574}
1575
1576#endif
1577
1578
1579
1580
1581static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1582{
1583 if (unlikely(!irqs_disabled())) {
1584
1585 raw_spin_unlock(&this_rq->lock);
1586 BUG_ON(1);
1587 }
1588
1589 return _double_lock_balance(this_rq, busiest);
1590}
1591
1592static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1593 __releases(busiest->lock)
1594{
1595 raw_spin_unlock(&busiest->lock);
1596 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1597}
1598
1599static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1600{
1601 if (l1 > l2)
1602 swap(l1, l2);
1603
1604 spin_lock(l1);
1605 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1606}
1607
1608static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1609{
1610 if (l1 > l2)
1611 swap(l1, l2);
1612
1613 spin_lock_irq(l1);
1614 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1615}
1616
1617static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1618{
1619 if (l1 > l2)
1620 swap(l1, l2);
1621
1622 raw_spin_lock(l1);
1623 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1624}
1625
1626
1627
1628
1629
1630
1631
1632static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1633 __acquires(rq1->lock)
1634 __acquires(rq2->lock)
1635{
1636 BUG_ON(!irqs_disabled());
1637 if (rq1 == rq2) {
1638 raw_spin_lock(&rq1->lock);
1639 __acquire(rq2->lock);
1640 } else {
1641 if (rq1 < rq2) {
1642 raw_spin_lock(&rq1->lock);
1643 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1644 } else {
1645 raw_spin_lock(&rq2->lock);
1646 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1647 }
1648 }
1649}
1650
1651
1652
1653
1654
1655
1656
1657static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1658 __releases(rq1->lock)
1659 __releases(rq2->lock)
1660{
1661 raw_spin_unlock(&rq1->lock);
1662 if (rq1 != rq2)
1663 raw_spin_unlock(&rq2->lock);
1664 else
1665 __release(rq2->lock);
1666}
1667
1668#else
1669
1670
1671
1672
1673
1674
1675
1676static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1677 __acquires(rq1->lock)
1678 __acquires(rq2->lock)
1679{
1680 BUG_ON(!irqs_disabled());
1681 BUG_ON(rq1 != rq2);
1682 raw_spin_lock(&rq1->lock);
1683 __acquire(rq2->lock);
1684}
1685
1686
1687
1688
1689
1690
1691
1692static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1693 __releases(rq1->lock)
1694 __releases(rq2->lock)
1695{
1696 BUG_ON(rq1 != rq2);
1697 raw_spin_unlock(&rq1->lock);
1698 __release(rq2->lock);
1699}
1700
1701#endif
1702
1703extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1704extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1705
1706#ifdef CONFIG_SCHED_DEBUG
1707extern void print_cfs_stats(struct seq_file *m, int cpu);
1708extern void print_rt_stats(struct seq_file *m, int cpu);
1709extern void print_dl_stats(struct seq_file *m, int cpu);
1710extern void
1711print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
1712
1713#ifdef CONFIG_NUMA_BALANCING
1714extern void
1715show_numa_stats(struct task_struct *p, struct seq_file *m);
1716extern void
1717print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1718 unsigned long tpf, unsigned long gsf, unsigned long gpf);
1719#endif
1720#endif
1721
1722extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1723extern void init_rt_rq(struct rt_rq *rt_rq);
1724extern void init_dl_rq(struct dl_rq *dl_rq);
1725
1726extern void cfs_bandwidth_usage_inc(void);
1727extern void cfs_bandwidth_usage_dec(void);
1728
1729#ifdef CONFIG_NO_HZ_COMMON
1730enum rq_nohz_flag_bits {
1731 NOHZ_TICK_STOPPED,
1732 NOHZ_BALANCE_KICK,
1733};
1734
1735#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1736
1737extern void nohz_balance_exit_idle(unsigned int cpu);
1738#else
1739static inline void nohz_balance_exit_idle(unsigned int cpu) { }
1740#endif
1741
1742#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1743struct irqtime {
1744 u64 hardirq_time;
1745 u64 softirq_time;
1746 u64 irq_start_time;
1747 struct u64_stats_sync sync;
1748};
1749
1750DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
1751
1752static inline u64 irq_time_read(int cpu)
1753{
1754 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
1755 unsigned int seq;
1756 u64 total;
1757
1758 do {
1759 seq = __u64_stats_fetch_begin(&irqtime->sync);
1760 total = irqtime->softirq_time + irqtime->hardirq_time;
1761 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
1762
1763 return total;
1764}
1765#endif
1766
1767#ifdef CONFIG_CPU_FREQ
1768DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
1793{
1794 struct update_util_data *data;
1795
1796 data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
1797 if (data)
1798 data->func(data, rq_clock(rq), flags);
1799}
1800
1801static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
1802{
1803 if (cpu_of(rq) == smp_processor_id())
1804 cpufreq_update_util(rq, flags);
1805}
1806#else
1807static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
1808static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
1809#endif
1810
1811#ifdef arch_scale_freq_capacity
1812#ifndef arch_scale_freq_invariant
1813#define arch_scale_freq_invariant() (true)
1814#endif
1815#else
1816#define arch_scale_freq_invariant() (false)
1817#endif
1818