1
2
3
4
5#include <linux/sched.h>
6
7#include <linux/sched/autogroup.h>
8#include <linux/sched/clock.h>
9#include <linux/sched/coredump.h>
10#include <linux/sched/cpufreq.h>
11#include <linux/sched/cputime.h>
12#include <linux/sched/deadline.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/hotplug.h>
15#include <linux/sched/idle.h>
16#include <linux/sched/init.h>
17#include <linux/sched/isolation.h>
18#include <linux/sched/jobctl.h>
19#include <linux/sched/loadavg.h>
20#include <linux/sched/mm.h>
21#include <linux/sched/nohz.h>
22#include <linux/sched/numa_balancing.h>
23#include <linux/sched/prio.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/signal.h>
26#include <linux/sched/smt.h>
27#include <linux/sched/stat.h>
28#include <linux/sched/sysctl.h>
29#include <linux/sched/task.h>
30#include <linux/sched/task_stack.h>
31#include <linux/sched/topology.h>
32#include <linux/sched/user.h>
33#include <linux/sched/wake_q.h>
34#include <linux/sched/xacct.h>
35
36#include <uapi/linux/sched/types.h>
37
38#include <linux/binfmts.h>
39#include <linux/blkdev.h>
40#include <linux/compat.h>
41#include <linux/context_tracking.h>
42#include <linux/cpufreq.h>
43#include <linux/cpuidle.h>
44#include <linux/cpuset.h>
45#include <linux/ctype.h>
46#include <linux/debugfs.h>
47#include <linux/delayacct.h>
48#include <linux/energy_model.h>
49#include <linux/init_task.h>
50#include <linux/kprobes.h>
51#include <linux/kthread.h>
52#include <linux/membarrier.h>
53#include <linux/migrate.h>
54#include <linux/mmu_context.h>
55#include <linux/nmi.h>
56#include <linux/proc_fs.h>
57#include <linux/prefetch.h>
58#include <linux/profile.h>
59#include <linux/psi.h>
60#include <linux/rcupdate_wait.h>
61#include <linux/security.h>
62#include <linux/stackprotector.h>
63#include <linux/stop_machine.h>
64#include <linux/suspend.h>
65#include <linux/swait.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68#include <linux/tsacct_kern.h>
69#include <linux/rh_kabi.h>
70
71#include <asm/tlb.h>
72
73#ifdef CONFIG_PARAVIRT
74# include <asm/paravirt.h>
75#endif
76
77#include "cpupri.h"
78#include "cpudeadline.h"
79
80#include <trace/events/sched.h>
81
82#ifdef CONFIG_SCHED_DEBUG
83# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
84#else
85# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
86#endif
87
88struct rq;
89struct cpuidle_state;
90
91
92#define TASK_ON_RQ_QUEUED 1
93#define TASK_ON_RQ_MIGRATING 2
94
95extern __read_mostly int scheduler_running;
96
97extern unsigned long calc_load_update;
98extern atomic_long_t calc_load_tasks;
99
100extern void calc_global_load_tick(struct rq *this_rq);
101extern long calc_load_fold_active(struct rq *this_rq, long adjust);
102
103extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
104
105
106
107#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123#ifdef CONFIG_64BIT
124# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
125# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
126# define scale_load_down(w) \
127({ \
128 unsigned long __w = (w); \
129 if (__w) \
130 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
131 __w; \
132})
133#else
134# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
135# define scale_load(w) (w)
136# define scale_load_down(w) (w)
137#endif
138
139
140
141
142
143
144
145
146
147
148#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
149
150
151
152
153
154
155#define DL_SCALE 10
156
157
158
159
160#define RUNTIME_INF ((u64)~0ULL)
161
162static inline int idle_policy(int policy)
163{
164 return policy == SCHED_IDLE;
165}
166static inline int fair_policy(int policy)
167{
168 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
169}
170
171static inline int rt_policy(int policy)
172{
173 return policy == SCHED_FIFO || policy == SCHED_RR;
174}
175
176static inline int dl_policy(int policy)
177{
178 return policy == SCHED_DEADLINE;
179}
180static inline bool valid_policy(int policy)
181{
182 return idle_policy(policy) || fair_policy(policy) ||
183 rt_policy(policy) || dl_policy(policy);
184}
185
186static inline int task_has_idle_policy(struct task_struct *p)
187{
188 return idle_policy(p->policy);
189}
190
191static inline int task_has_rt_policy(struct task_struct *p)
192{
193 return rt_policy(p->policy);
194}
195
196static inline int task_has_dl_policy(struct task_struct *p)
197{
198 return dl_policy(p->policy);
199}
200
201#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
202
203static inline void update_avg(u64 *avg, u64 sample)
204{
205 s64 diff = sample - *avg;
206 *avg += diff / 8;
207}
208
209
210
211
212
213
214
215
216
217
218
219
220
221#define SCHED_FLAG_SUGOV 0x10000000
222
223static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
224{
225#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
226 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
227#else
228 return false;
229#endif
230}
231
232
233
234
235static inline bool
236dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
237{
238 return dl_entity_is_special(a) ||
239 dl_time_before(a->deadline, b->deadline);
240}
241
242
243
244
245struct rt_prio_array {
246 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1);
247 struct list_head queue[MAX_RT_PRIO];
248};
249
250struct rt_bandwidth {
251
252 raw_spinlock_t rt_runtime_lock;
253 ktime_t rt_period;
254 u64 rt_runtime;
255 struct hrtimer rt_period_timer;
256 unsigned int rt_period_active;
257};
258
259void __dl_clear_params(struct task_struct *p);
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285struct dl_bandwidth {
286 raw_spinlock_t dl_runtime_lock;
287 u64 dl_runtime;
288 u64 dl_period;
289};
290
291static inline int dl_bandwidth_enabled(void)
292{
293 return sysctl_sched_rt_runtime >= 0;
294}
295
296struct dl_bw {
297 raw_spinlock_t lock;
298 u64 bw;
299 u64 total_bw;
300};
301
302static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
303
304static inline
305void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
306{
307 dl_b->total_bw -= tsk_bw;
308 __dl_update(dl_b, (s32)tsk_bw / cpus);
309}
310
311static inline
312void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
313{
314 dl_b->total_bw += tsk_bw;
315 __dl_update(dl_b, -((s32)tsk_bw / cpus));
316}
317
318static inline
319bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
320{
321 return dl_b->bw != -1 &&
322 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
323}
324
325extern void init_dl_bw(struct dl_bw *dl_b);
326extern int sched_dl_global_validate(void);
327extern void sched_dl_do_global(void);
328extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
329extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
330extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
331extern bool __checkparam_dl(const struct sched_attr *attr);
332extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
333extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
334extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
335extern bool dl_cpu_busy(unsigned int cpu);
336
337#ifdef CONFIG_CGROUP_SCHED
338
339#include <linux/cgroup.h>
340#include <linux/psi.h>
341
342struct cfs_rq;
343struct rt_rq;
344
345extern struct list_head task_groups;
346
347struct cfs_bandwidth {
348#ifdef CONFIG_CFS_BANDWIDTH
349 raw_spinlock_t lock;
350 ktime_t period;
351 u64 quota;
352 u64 runtime;
353 s64 hierarchical_quota;
354 RH_KABI_DEPRECATE(u64, runtime_expires)
355 RH_KABI_DEPRECATE(int, expires_seq)
356
357 RH_KABI_REPLACE_SPLIT(short idle,
358 u8 idle,
359 u8 period_active)
360 RH_KABI_REPLACE_SPLIT(short period_active,
361 u8 rh_reserved_xx_distribute_running,
362 u8 slack_started)
363
364 struct hrtimer period_timer;
365 struct hrtimer slack_timer;
366 struct list_head throttled_cfs_rq;
367
368
369 int nr_periods;
370 int nr_throttled;
371 u64 throttled_time;
372
373 RH_KABI_DEPRECATE(bool, distribute_running)
374#endif
375};
376
377
378struct task_group {
379 struct cgroup_subsys_state css;
380
381#ifdef CONFIG_FAIR_GROUP_SCHED
382
383 struct sched_entity **se;
384
385 struct cfs_rq **cfs_rq;
386 unsigned long shares;
387
388#ifdef CONFIG_SMP
389
390
391
392
393
394 atomic_long_t load_avg ____cacheline_aligned;
395#endif
396#endif
397
398#ifdef CONFIG_RT_GROUP_SCHED
399 struct sched_rt_entity **rt_se;
400 struct rt_rq **rt_rq;
401
402 struct rt_bandwidth rt_bandwidth;
403#endif
404
405 struct rcu_head rcu;
406 struct list_head list;
407
408 struct task_group *parent;
409 struct list_head siblings;
410 struct list_head children;
411
412#ifdef CONFIG_SCHED_AUTOGROUP
413 struct autogroup *autogroup;
414#endif
415
416 struct cfs_bandwidth cfs_bandwidth;
417
418 RH_KABI_RESERVE(1)
419 RH_KABI_RESERVE(2)
420};
421
422#ifdef CONFIG_FAIR_GROUP_SCHED
423#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
424
425
426
427
428
429
430
431
432
433#define MIN_SHARES (1UL << 1)
434#define MAX_SHARES (1UL << 18)
435#endif
436
437typedef int (*tg_visitor)(struct task_group *, void *);
438
439extern int walk_tg_tree_from(struct task_group *from,
440 tg_visitor down, tg_visitor up, void *data);
441
442
443
444
445
446
447
448static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
449{
450 return walk_tg_tree_from(&root_task_group, down, up, data);
451}
452
453extern int tg_nop(struct task_group *tg, void *data);
454
455extern void free_fair_sched_group(struct task_group *tg);
456extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
457extern void online_fair_sched_group(struct task_group *tg);
458extern void unregister_fair_sched_group(struct task_group *tg);
459extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
460 struct sched_entity *se, int cpu,
461 struct sched_entity *parent);
462extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
463
464extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
465extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
466extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
467
468extern void free_rt_sched_group(struct task_group *tg);
469extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
470extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
471 struct sched_rt_entity *rt_se, int cpu,
472 struct sched_rt_entity *parent);
473extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
474extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
475extern long sched_group_rt_runtime(struct task_group *tg);
476extern long sched_group_rt_period(struct task_group *tg);
477extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
478
479extern struct task_group *sched_create_group(struct task_group *parent);
480extern void sched_online_group(struct task_group *tg,
481 struct task_group *parent);
482extern void sched_destroy_group(struct task_group *tg);
483extern void sched_offline_group(struct task_group *tg);
484
485extern void sched_move_task(struct task_struct *tsk);
486
487#ifdef CONFIG_FAIR_GROUP_SCHED
488extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
489
490#ifdef CONFIG_SMP
491extern void set_task_rq_fair(struct sched_entity *se,
492 struct cfs_rq *prev, struct cfs_rq *next);
493#else
494static inline void set_task_rq_fair(struct sched_entity *se,
495 struct cfs_rq *prev, struct cfs_rq *next) { }
496#endif
497#endif
498
499#else
500
501struct cfs_bandwidth { };
502
503#endif
504
505
506struct cfs_rq {
507 struct load_weight load;
508 RH_KABI_DEPRECATE(unsigned long, runnable_weight)
509 unsigned int nr_running;
510 unsigned int h_nr_running;
511
512 u64 exec_clock;
513 u64 min_vruntime;
514#ifndef CONFIG_64BIT
515 u64 min_vruntime_copy;
516#endif
517
518 struct rb_root_cached tasks_timeline;
519
520
521
522
523
524 struct sched_entity *curr;
525 struct sched_entity *next;
526 struct sched_entity *last;
527 struct sched_entity *skip;
528
529#ifdef CONFIG_SCHED_DEBUG
530 unsigned int nr_spread_over;
531#endif
532 RH_KABI_FILL_HOLE(unsigned int idle_h_nr_running)
533#ifdef CONFIG_SMP
534
535
536
537 struct sched_avg avg;
538#ifndef CONFIG_64BIT
539 u64 load_last_update_time_copy;
540#endif
541 struct {
542 raw_spinlock_t lock ____cacheline_aligned;
543 int nr;
544 unsigned long load_avg;
545 unsigned long util_avg;
546 unsigned long RH_KABI_RENAME(runnable_sum, runnable_avg);
547 } removed;
548
549#ifdef CONFIG_FAIR_GROUP_SCHED
550 unsigned long tg_load_avg_contrib;
551 long propagate;
552 long prop_runnable_sum;
553
554
555
556
557
558
559
560 unsigned long h_load;
561 u64 last_h_load_update;
562 struct sched_entity *h_load_next;
563#endif
564#endif
565
566#ifdef CONFIG_FAIR_GROUP_SCHED
567 struct rq *rq;
568
569
570
571
572
573
574
575
576
577 int on_list;
578 struct list_head leaf_cfs_rq_list;
579 struct task_group *tg;
580
581#ifdef CONFIG_CFS_BANDWIDTH
582 int runtime_enabled;
583 RH_KABI_DEPRECATE(int, expires_seq)
584 RH_KABI_DEPRECATE(u64, runtime_expires)
585 s64 runtime_remaining;
586
587 u64 throttled_clock;
588 u64 throttled_clock_task;
589 u64 throttled_clock_task_time;
590 int throttled;
591 int throttle_count;
592 struct list_head throttled_list;
593#endif
594#endif
595
596 RH_KABI_RESERVE(1)
597 RH_KABI_RESERVE(2)
598};
599
600static inline int rt_bandwidth_enabled(void)
601{
602 return sysctl_sched_rt_runtime >= 0;
603}
604
605
606#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
607# define HAVE_RT_PUSH_IPI
608#endif
609
610
611struct rt_rq {
612 struct rt_prio_array active;
613 unsigned int rt_nr_running;
614 unsigned int rr_nr_running;
615#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
616 struct {
617 int curr;
618#ifdef CONFIG_SMP
619 int next;
620#endif
621 } highest_prio;
622#endif
623#ifdef CONFIG_SMP
624 unsigned long rt_nr_migratory;
625 unsigned long rt_nr_total;
626 int overloaded;
627 struct plist_head pushable_tasks;
628
629#endif
630 int rt_queued;
631
632 int rt_throttled;
633 u64 rt_time;
634 u64 rt_runtime;
635
636 raw_spinlock_t rt_runtime_lock;
637
638#ifdef CONFIG_RT_GROUP_SCHED
639 unsigned long rt_nr_boosted;
640
641 struct rq *rq;
642 struct task_group *tg;
643#endif
644};
645
646static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
647{
648 return rt_rq->rt_queued && rt_rq->rt_nr_running;
649}
650
651
652struct dl_rq {
653
654 struct rb_root_cached root;
655
656 unsigned long dl_nr_running;
657
658#ifdef CONFIG_SMP
659
660
661
662
663
664
665 struct {
666 u64 curr;
667 u64 next;
668 } earliest_dl;
669
670 unsigned long dl_nr_migratory;
671 int overloaded;
672
673
674
675
676
677
678 struct rb_root_cached pushable_dl_tasks_root;
679#else
680 struct dl_bw dl_bw;
681#endif
682
683
684
685
686
687 u64 running_bw;
688
689
690
691
692
693
694
695
696
697
698 u64 this_bw;
699 u64 extra_bw;
700
701
702
703
704
705 u64 bw_ratio;
706};
707
708#ifdef CONFIG_FAIR_GROUP_SCHED
709
710#define entity_is_task(se) (!se->my_q)
711
712static inline void se_update_runnable(struct sched_entity *se)
713{
714 if (!entity_is_task(se))
715 se->runnable_weight = se->my_q->h_nr_running;
716}
717
718static inline long se_runnable(struct sched_entity *se)
719{
720 if (entity_is_task(se))
721 return !!se->on_rq;
722 else
723 return se->runnable_weight;
724}
725
726#else
727#define entity_is_task(se) 1
728
729static inline void se_update_runnable(struct sched_entity *se) {}
730
731static inline long se_runnable(struct sched_entity *se)
732{
733 return !!se->on_rq;
734}
735#endif
736
737#ifdef CONFIG_SMP
738
739
740
741static inline long se_weight(struct sched_entity *se)
742{
743 return scale_load_down(se->load.weight);
744}
745
746
747static inline bool sched_asym_prefer(int a, int b)
748{
749 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
750}
751
752struct perf_domain {
753 struct em_perf_domain *em_pd;
754 struct perf_domain *next;
755 struct rcu_head rcu;
756};
757
758
759#define SG_OVERLOAD 0x1
760#define SG_OVERUTILIZED 0x2
761
762
763
764
765
766
767
768
769
770struct root_domain {
771 atomic_t refcount;
772 atomic_t rto_count;
773 struct rcu_head rcu;
774 cpumask_var_t span;
775 cpumask_var_t online;
776
777
778
779
780
781
782 RH_KABI_BROKEN_REPLACE(bool overload, int overload)
783
784
785
786
787
788 cpumask_var_t dlo_mask;
789 atomic_t dlo_count;
790 struct dl_bw dl_bw;
791 struct cpudl cpudl;
792
793#ifdef HAVE_RT_PUSH_IPI
794
795
796
797 struct irq_work rto_push_work;
798 raw_spinlock_t rto_lock;
799
800 int rto_loop;
801 int rto_cpu;
802
803 atomic_t rto_loop_next;
804 atomic_t rto_loop_start;
805#endif
806
807
808
809
810 cpumask_var_t rto_mask;
811 struct cpupri cpupri;
812
813 unsigned long max_cpu_capacity;
814
815
816
817
818
819 RH_KABI_USE(1, struct perf_domain *pd)
820
821
822 RH_KABI_USE(2, int overutilized)
823
824 RH_KABI_RESERVE(3)
825 RH_KABI_RESERVE(4)
826};
827
828extern void init_defrootdomain(void);
829extern int sched_init_domains(const struct cpumask *cpu_map);
830extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
831extern void sched_get_rd(struct root_domain *rd);
832extern void sched_put_rd(struct root_domain *rd);
833
834#ifdef HAVE_RT_PUSH_IPI
835extern void rto_push_irq_work_func(struct irq_work *work);
836#endif
837#endif
838
839
840
841
842
843
844
845
846struct rq {
847
848 raw_spinlock_t lock;
849
850
851
852
853
854 unsigned int nr_running;
855#ifdef CONFIG_NUMA_BALANCING
856 unsigned int nr_numa_running;
857 unsigned int nr_preferred_running;
858#endif
859 RH_KABI_DEPRECATE(unsigned long, cpu_load[5])
860#ifdef CONFIG_NO_HZ_COMMON
861#ifdef CONFIG_SMP
862 RH_KABI_DEPRECATE(unsigned long, last_load_update_tick)
863 unsigned long last_blocked_load_update_tick;
864 unsigned int has_blocked_load;
865#endif
866 unsigned int nohz_tick_stopped;
867 atomic_t nohz_flags;
868#endif
869
870 RH_KABI_DEPRECATE(struct load_weight, load)
871 unsigned long nr_load_updates;
872 u64 nr_switches;
873
874 struct cfs_rq cfs;
875 struct rt_rq rt;
876 struct dl_rq dl;
877
878#ifdef CONFIG_FAIR_GROUP_SCHED
879
880 struct list_head leaf_cfs_rq_list;
881 struct list_head *tmp_alone_branch;
882#endif
883
884
885
886
887
888
889
890 unsigned long nr_uninterruptible;
891
892 struct task_struct __rcu *curr;
893 struct task_struct *idle;
894 struct task_struct *stop;
895 unsigned long next_balance;
896 struct mm_struct *prev_mm;
897
898 unsigned int clock_update_flags;
899 u64 clock;
900 RH_KABI_DEPRECATE(u64, clock_task)
901
902 atomic_t nr_iowait;
903
904#ifdef CONFIG_MEMBARRIER
905 RH_KABI_FILL_HOLE(int membarrier_state)
906#endif
907
908#ifdef CONFIG_SMP
909 struct root_domain *rd;
910 struct sched_domain *sd;
911
912 unsigned long cpu_capacity;
913 unsigned long cpu_capacity_orig;
914
915 struct callback_head *balance_callback;
916
917 unsigned char idle_balance;
918
919
920 int active_balance;
921 int push_cpu;
922 struct cpu_stop_work active_balance_work;
923
924
925 int cpu;
926 int online;
927
928 struct list_head cfs_tasks;
929
930 RH_KABI_DEPRECATE(u64, rt_avg)
931 RH_KABI_DEPRECATE(u64, age_stamp)
932 u64 idle_stamp;
933 u64 avg_idle;
934
935
936 u64 max_idle_balance_cost;
937#endif
938
939#ifdef CONFIG_IRQ_TIME_ACCOUNTING
940 u64 prev_irq_time;
941#endif
942#ifdef CONFIG_PARAVIRT
943 u64 prev_steal_time;
944#endif
945#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
946 u64 prev_steal_time_rq;
947#endif
948
949
950 unsigned long calc_load_update;
951 long calc_load_active;
952
953#ifdef CONFIG_SCHED_HRTICK
954#ifdef CONFIG_SMP
955 int hrtick_csd_pending;
956 call_single_data_t hrtick_csd;
957#endif
958 struct hrtimer hrtick_timer;
959#endif
960
961#ifdef CONFIG_SCHEDSTATS
962
963 struct sched_info rq_sched_info;
964 unsigned long long rq_cpu_time;
965
966
967
968 unsigned int yld_count;
969
970
971 unsigned int sched_count;
972 unsigned int sched_goidle;
973
974
975 unsigned int ttwu_count;
976 unsigned int ttwu_local;
977#endif
978
979#ifdef CONFIG_SMP
980 struct llist_head wake_list;
981#endif
982
983#ifdef CONFIG_CPU_IDLE
984
985
986
987
988
989 struct cpuidle_state *idle_state;
990#endif
991
992#if defined(CONFIG_SCHED_HRTICK) && defined(CONFIG_SMP)
993 RH_KABI_USE(1, ktime_t hrtick_time)
994#else
995 RH_KABI_RESERVE(1)
996#endif
997 RH_KABI_RESERVE(2)
998#ifdef CONFIG_NUMA_BALANCING
999 RH_KABI_EXTEND(unsigned int numa_migrate_on)
1000#endif
1001 RH_KABI_EXTEND(struct sched_avg avg_rt)
1002 RH_KABI_EXTEND(struct sched_avg avg_dl)
1003#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
1004 RH_KABI_EXTEND(struct sched_avg avg_irq)
1005#endif
1006 RH_KABI_EXTEND(unsigned long misfit_task_load)
1007
1008
1009 RH_KABI_EXTEND(u64 clock_task ____cacheline_aligned)
1010 RH_KABI_EXTEND(u64 clock_pelt)
1011 RH_KABI_EXTEND(unsigned long lost_idle_time)
1012};
1013
1014#ifdef CONFIG_FAIR_GROUP_SCHED
1015
1016
1017static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1018{
1019 return cfs_rq->rq;
1020}
1021
1022#else
1023
1024static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1025{
1026 return container_of(cfs_rq, struct rq, cfs);
1027}
1028#endif
1029
1030static inline int cpu_of(struct rq *rq)
1031{
1032#ifdef CONFIG_SMP
1033 return rq->cpu;
1034#else
1035 return 0;
1036#endif
1037}
1038
1039
1040#ifdef CONFIG_SCHED_SMT
1041extern void __update_idle_core(struct rq *rq);
1042
1043static inline void update_idle_core(struct rq *rq)
1044{
1045 if (static_branch_unlikely(&sched_smt_present))
1046 __update_idle_core(rq);
1047}
1048
1049#else
1050static inline void update_idle_core(struct rq *rq) { }
1051#endif
1052
1053DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1054
1055#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1056#define this_rq() this_cpu_ptr(&runqueues)
1057#define task_rq(p) cpu_rq(task_cpu(p))
1058#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1059#define raw_rq() raw_cpu_ptr(&runqueues)
1060
1061extern void update_rq_clock(struct rq *rq);
1062
1063static inline u64 __rq_clock_broken(struct rq *rq)
1064{
1065 return READ_ONCE(rq->clock);
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091#define RQCF_REQ_SKIP 0x01
1092#define RQCF_ACT_SKIP 0x02
1093#define RQCF_UPDATED 0x04
1094
1095static inline void assert_clock_updated(struct rq *rq)
1096{
1097
1098
1099
1100
1101 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1102}
1103
1104static inline u64 rq_clock(struct rq *rq)
1105{
1106 lockdep_assert_held(&rq->lock);
1107 assert_clock_updated(rq);
1108
1109 return rq->clock;
1110}
1111
1112static inline u64 rq_clock_task(struct rq *rq)
1113{
1114 lockdep_assert_held(&rq->lock);
1115 assert_clock_updated(rq);
1116
1117 return rq->clock_task;
1118}
1119
1120static inline void rq_clock_skip_update(struct rq *rq)
1121{
1122 lockdep_assert_held(&rq->lock);
1123 rq->clock_update_flags |= RQCF_REQ_SKIP;
1124}
1125
1126
1127
1128
1129
1130static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1131{
1132 lockdep_assert_held(&rq->lock);
1133 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1134}
1135
1136struct rq_flags {
1137 unsigned long flags;
1138 struct pin_cookie cookie;
1139#ifdef CONFIG_SCHED_DEBUG
1140
1141
1142
1143
1144
1145 unsigned int clock_update_flags;
1146#endif
1147};
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1160{
1161 rf->cookie = lockdep_pin_lock(&rq->lock);
1162
1163#ifdef CONFIG_SCHED_DEBUG
1164 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1165 rf->clock_update_flags = 0;
1166#endif
1167}
1168
1169static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1170{
1171#ifdef CONFIG_SCHED_DEBUG
1172 if (rq->clock_update_flags > RQCF_ACT_SKIP)
1173 rf->clock_update_flags = RQCF_UPDATED;
1174#endif
1175
1176 lockdep_unpin_lock(&rq->lock, rf->cookie);
1177}
1178
1179static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1180{
1181 lockdep_repin_lock(&rq->lock, rf->cookie);
1182
1183#ifdef CONFIG_SCHED_DEBUG
1184
1185
1186
1187 rq->clock_update_flags |= rf->clock_update_flags;
1188#endif
1189}
1190
1191struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1192 __acquires(rq->lock);
1193
1194struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1195 __acquires(p->pi_lock)
1196 __acquires(rq->lock);
1197
1198static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1199 __releases(rq->lock)
1200{
1201 rq_unpin_lock(rq, rf);
1202 raw_spin_unlock(&rq->lock);
1203}
1204
1205static inline void
1206task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1207 __releases(rq->lock)
1208 __releases(p->pi_lock)
1209{
1210 rq_unpin_lock(rq, rf);
1211 raw_spin_unlock(&rq->lock);
1212 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1213}
1214
1215static inline void
1216rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1217 __acquires(rq->lock)
1218{
1219 raw_spin_lock_irqsave(&rq->lock, rf->flags);
1220 rq_pin_lock(rq, rf);
1221}
1222
1223static inline void
1224rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1225 __acquires(rq->lock)
1226{
1227 raw_spin_lock_irq(&rq->lock);
1228 rq_pin_lock(rq, rf);
1229}
1230
1231static inline void
1232rq_lock(struct rq *rq, struct rq_flags *rf)
1233 __acquires(rq->lock)
1234{
1235 raw_spin_lock(&rq->lock);
1236 rq_pin_lock(rq, rf);
1237}
1238
1239static inline void
1240rq_relock(struct rq *rq, struct rq_flags *rf)
1241 __acquires(rq->lock)
1242{
1243 raw_spin_lock(&rq->lock);
1244 rq_repin_lock(rq, rf);
1245}
1246
1247static inline void
1248rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1249 __releases(rq->lock)
1250{
1251 rq_unpin_lock(rq, rf);
1252 raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1253}
1254
1255static inline void
1256rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1257 __releases(rq->lock)
1258{
1259 rq_unpin_lock(rq, rf);
1260 raw_spin_unlock_irq(&rq->lock);
1261}
1262
1263static inline void
1264rq_unlock(struct rq *rq, struct rq_flags *rf)
1265 __releases(rq->lock)
1266{
1267 rq_unpin_lock(rq, rf);
1268 raw_spin_unlock(&rq->lock);
1269}
1270
1271static inline struct rq *
1272this_rq_lock_irq(struct rq_flags *rf)
1273 __acquires(rq->lock)
1274{
1275 struct rq *rq;
1276
1277 local_irq_disable();
1278 rq = this_rq();
1279 rq_lock(rq, rf);
1280 return rq;
1281}
1282
1283#ifdef CONFIG_NUMA
1284enum numa_topology_type {
1285 NUMA_DIRECT,
1286 NUMA_GLUELESS_MESH,
1287 NUMA_BACKPLANE,
1288};
1289extern enum numa_topology_type sched_numa_topology_type;
1290extern int sched_max_numa_distance;
1291extern bool find_numa_distance(int distance);
1292#endif
1293
1294#ifdef CONFIG_NUMA
1295extern void sched_init_numa(void);
1296extern void sched_domains_numa_masks_set(unsigned int cpu);
1297extern void sched_domains_numa_masks_clear(unsigned int cpu);
1298#else
1299static inline void sched_init_numa(void) { }
1300static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1301static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1302#endif
1303
1304#ifdef CONFIG_NUMA_BALANCING
1305
1306enum numa_faults_stats {
1307 NUMA_MEM = 0,
1308 NUMA_CPU,
1309 NUMA_MEMBUF,
1310 NUMA_CPUBUF
1311};
1312extern void sched_setnuma(struct task_struct *p, int node);
1313extern int migrate_task_to(struct task_struct *p, int cpu);
1314extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1315 int cpu, int scpu);
1316extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1317#else
1318static inline void
1319init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1320{
1321}
1322#endif
1323
1324#ifdef CONFIG_SMP
1325
1326static inline void
1327queue_balance_callback(struct rq *rq,
1328 struct callback_head *head,
1329 void (*func)(struct rq *rq))
1330{
1331 lockdep_assert_held(&rq->lock);
1332
1333 if (unlikely(head->next))
1334 return;
1335
1336 head->func = (void (*)(struct callback_head *))func;
1337 head->next = rq->balance_callback;
1338 rq->balance_callback = head;
1339}
1340
1341extern void sched_ttwu_pending(void);
1342
1343#define rcu_dereference_check_sched_domain(p) \
1344 rcu_dereference_check((p), \
1345 lockdep_is_held(&sched_domains_mutex))
1346
1347
1348
1349
1350
1351
1352
1353
1354#define for_each_domain(cpu, __sd) \
1355 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1356 __sd; __sd = __sd->parent)
1357
1358#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1370{
1371 struct sched_domain *sd, *hsd = NULL;
1372
1373 for_each_domain(cpu, sd) {
1374 if (!(sd->flags & flag))
1375 break;
1376 hsd = sd;
1377 }
1378
1379 return hsd;
1380}
1381
1382static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1383{
1384 struct sched_domain *sd;
1385
1386 for_each_domain(cpu, sd) {
1387 if (sd->flags & flag)
1388 break;
1389 }
1390
1391 return sd;
1392}
1393
1394DECLARE_PER_CPU(struct sched_domain *, sd_llc);
1395DECLARE_PER_CPU(int, sd_llc_size);
1396DECLARE_PER_CPU(int, sd_llc_id);
1397DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
1398DECLARE_PER_CPU(struct sched_domain *, sd_numa);
1399DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing);
1400DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
1401extern struct static_key_false sched_asym_cpucapacity;
1402
1403struct sched_group_capacity {
1404 atomic_t ref;
1405
1406
1407
1408
1409 unsigned long capacity;
1410 unsigned long min_capacity;
1411 RH_KABI_BROKEN_INSERT(unsigned long max_capacity)
1412 unsigned long next_update;
1413 int imbalance;
1414
1415#ifdef CONFIG_SCHED_DEBUG
1416 int id;
1417#endif
1418
1419 unsigned long cpumask[0];
1420};
1421
1422struct sched_group {
1423 struct sched_group *next;
1424 atomic_t ref;
1425
1426 unsigned int group_weight;
1427 struct sched_group_capacity *sgc;
1428 int asym_prefer_cpu;
1429
1430 RH_KABI_RESERVE(1)
1431 RH_KABI_RESERVE(2)
1432
1433
1434
1435
1436
1437
1438
1439
1440 unsigned long cpumask[0];
1441};
1442
1443static inline struct cpumask *sched_group_span(struct sched_group *sg)
1444{
1445 return to_cpumask(sg->cpumask);
1446}
1447
1448
1449
1450
1451static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1452{
1453 return to_cpumask(sg->sgc->cpumask);
1454}
1455
1456
1457
1458
1459
1460static inline unsigned int group_first_cpu(struct sched_group *group)
1461{
1462 return cpumask_first(sched_group_span(group));
1463}
1464
1465extern int group_balance_cpu(struct sched_group *sg);
1466
1467#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1468void register_sched_domain_sysctl(void);
1469void dirty_sched_domain_sysctl(int cpu);
1470void unregister_sched_domain_sysctl(void);
1471#else
1472static inline void register_sched_domain_sysctl(void)
1473{
1474}
1475static inline void dirty_sched_domain_sysctl(int cpu)
1476{
1477}
1478static inline void unregister_sched_domain_sysctl(void)
1479{
1480}
1481#endif
1482
1483#else
1484
1485static inline void sched_ttwu_pending(void) { }
1486
1487#endif
1488
1489#include "stats.h"
1490#include "autogroup.h"
1491
1492#ifdef CONFIG_CGROUP_SCHED
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507static inline struct task_group *task_group(struct task_struct *p)
1508{
1509 return p->sched_task_group;
1510}
1511
1512
1513static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1514{
1515#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1516 struct task_group *tg = task_group(p);
1517#endif
1518
1519#ifdef CONFIG_FAIR_GROUP_SCHED
1520 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1521 p->se.cfs_rq = tg->cfs_rq[cpu];
1522 p->se.parent = tg->se[cpu];
1523#endif
1524
1525#ifdef CONFIG_RT_GROUP_SCHED
1526 p->rt.rt_rq = tg->rt_rq[cpu];
1527 p->rt.parent = tg->rt_se[cpu];
1528#endif
1529}
1530
1531#else
1532
1533static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1534static inline struct task_group *task_group(struct task_struct *p)
1535{
1536 return NULL;
1537}
1538
1539#endif
1540
1541static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1542{
1543 set_task_rq(p, cpu);
1544#ifdef CONFIG_SMP
1545
1546
1547
1548
1549
1550 smp_wmb();
1551#ifdef CONFIG_THREAD_INFO_IN_TASK
1552 WRITE_ONCE(p->cpu, cpu);
1553#else
1554 WRITE_ONCE(task_thread_info(p)->cpu, cpu);
1555#endif
1556 p->wake_cpu = cpu;
1557#endif
1558}
1559
1560
1561
1562
1563#ifdef CONFIG_SCHED_DEBUG
1564# include <linux/static_key.h>
1565# define const_debug __read_mostly
1566#else
1567# define const_debug const
1568#endif
1569
1570#define SCHED_FEAT(name, enabled) \
1571 __SCHED_FEAT_##name ,
1572
1573enum {
1574#include "features.h"
1575 __SCHED_FEAT_NR,
1576};
1577
1578#undef SCHED_FEAT
1579
1580#ifdef CONFIG_SCHED_DEBUG
1581
1582
1583
1584
1585
1586extern const_debug unsigned int sysctl_sched_features;
1587
1588#ifdef HAVE_JUMP_LABEL
1589#define SCHED_FEAT(name, enabled) \
1590static __always_inline bool static_branch_##name(struct static_key *key) \
1591{ \
1592 return static_key_##enabled(key); \
1593}
1594
1595#include "features.h"
1596#undef SCHED_FEAT
1597
1598extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1599#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1600
1601#else
1602
1603#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1604
1605#endif
1606
1607#else
1608
1609
1610
1611
1612
1613
1614#define SCHED_FEAT(name, enabled) \
1615 (1UL << __SCHED_FEAT_##name) * enabled |
1616static const_debug __maybe_unused unsigned int sysctl_sched_features =
1617#include "features.h"
1618 0;
1619#undef SCHED_FEAT
1620
1621#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1622
1623#endif
1624
1625extern struct static_key_false sched_numa_balancing;
1626extern struct static_key_false sched_schedstats;
1627
1628static inline u64 global_rt_period(void)
1629{
1630 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1631}
1632
1633static inline u64 global_rt_runtime(void)
1634{
1635 if (sysctl_sched_rt_runtime < 0)
1636 return RUNTIME_INF;
1637
1638 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1639}
1640
1641static inline int task_current(struct rq *rq, struct task_struct *p)
1642{
1643 return rq->curr == p;
1644}
1645
1646static inline int task_running(struct rq *rq, struct task_struct *p)
1647{
1648#ifdef CONFIG_SMP
1649 return p->on_cpu;
1650#else
1651 return task_current(rq, p);
1652#endif
1653}
1654
1655static inline int task_on_rq_queued(struct task_struct *p)
1656{
1657 return p->on_rq == TASK_ON_RQ_QUEUED;
1658}
1659
1660static inline int task_on_rq_migrating(struct task_struct *p)
1661{
1662 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
1663}
1664
1665
1666
1667
1668#define WF_SYNC 0x01
1669#define WF_FORK 0x02
1670#define WF_MIGRATED 0x04
1671#define WF_ON_CPU 0x08
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682#define WEIGHT_IDLEPRIO 3
1683#define WMULT_IDLEPRIO 1431655765
1684
1685extern const int sched_prio_to_weight[40];
1686extern const u32 sched_prio_to_wmult[40];
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707#define DEQUEUE_SLEEP 0x01
1708#define DEQUEUE_SAVE 0x02
1709#define DEQUEUE_MOVE 0x04
1710#define DEQUEUE_NOCLOCK 0x08
1711
1712#define ENQUEUE_WAKEUP 0x01
1713#define ENQUEUE_RESTORE 0x02
1714#define ENQUEUE_MOVE 0x04
1715#define ENQUEUE_NOCLOCK 0x08
1716
1717#define ENQUEUE_HEAD 0x10
1718#define ENQUEUE_REPLENISH 0x20
1719#ifdef CONFIG_SMP
1720#define ENQUEUE_MIGRATED 0x40
1721#else
1722#define ENQUEUE_MIGRATED 0x00
1723#endif
1724
1725#define RETRY_TASK ((void *)-1UL)
1726
1727struct sched_class {
1728 const struct sched_class *next;
1729
1730 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1731 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1732 void (*yield_task) (struct rq *rq);
1733 RH_KABI_REPLACE(bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt),\
1734 bool (*yield_to_task)(struct rq *rq, struct task_struct *p))
1735
1736 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1737
1738 RH_KABI_REPLACE(struct task_struct * (*pick_next_task)(struct rq *rq,
1739 struct task_struct *prev,
1740 struct rq_flags *rf),
1741 struct task_struct *(*pick_next_task)(struct rq *rq))
1742 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1743
1744#ifdef CONFIG_SMP
1745 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1746 RH_KABI_REPLACE(void (*migrate_task_rq)(struct task_struct *p),\
1747 void (*migrate_task_rq)(struct task_struct *p, int new_cpu))
1748
1749 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1750
1751 void (*set_cpus_allowed)(struct task_struct *p,
1752 const struct cpumask *newmask);
1753
1754 void (*rq_online)(struct rq *rq);
1755 void (*rq_offline)(struct rq *rq);
1756#endif
1757
1758 RH_KABI_REPLACE(void (*set_curr_task)(struct rq *rq),
1759 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first))
1760 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1761 void (*task_fork)(struct task_struct *p);
1762 void (*task_dead)(struct task_struct *p);
1763
1764
1765
1766
1767
1768
1769 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1770 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1771 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1772 int oldprio);
1773
1774 unsigned int (*get_rr_interval)(struct rq *rq,
1775 struct task_struct *task);
1776
1777 void (*update_curr)(struct rq *rq);
1778
1779#define TASK_SET_GROUP 0
1780#define TASK_MOVE_GROUP 1
1781
1782#ifdef CONFIG_FAIR_GROUP_SCHED
1783 void (*task_change_group)(struct task_struct *p, int type);
1784#endif
1785
1786 RH_KABI_USE(1, int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf))
1787 RH_KABI_RESERVE(2)
1788
1789};
1790
1791static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1792{
1793 WARN_ON_ONCE(rq->curr != prev);
1794 prev->sched_class->put_prev_task(rq, prev);
1795}
1796
1797static inline void set_next_task(struct rq *rq, struct task_struct *next)
1798{
1799 WARN_ON_ONCE(rq->curr != next);
1800 next->sched_class->set_next_task(rq, next, false);
1801}
1802
1803#ifdef CONFIG_SMP
1804#define sched_class_highest (&stop_sched_class)
1805#else
1806#define sched_class_highest (&dl_sched_class)
1807#endif
1808
1809#define for_class_range(class, _from, _to) \
1810 for (class = (_from); class != (_to); class = class->next)
1811
1812#define for_each_class(class) \
1813 for_class_range(class, sched_class_highest, NULL)
1814
1815extern const struct sched_class stop_sched_class;
1816extern const struct sched_class dl_sched_class;
1817extern const struct sched_class rt_sched_class;
1818extern const struct sched_class fair_sched_class;
1819extern const struct sched_class idle_sched_class;
1820
1821static inline bool sched_stop_runnable(struct rq *rq)
1822{
1823 return rq->stop && task_on_rq_queued(rq->stop);
1824}
1825
1826static inline bool sched_dl_runnable(struct rq *rq)
1827{
1828 return rq->dl.dl_nr_running > 0;
1829}
1830
1831static inline bool sched_rt_runnable(struct rq *rq)
1832{
1833 return rq->rt.rt_queued > 0;
1834}
1835
1836static inline bool sched_fair_runnable(struct rq *rq)
1837{
1838 return rq->cfs.nr_running > 0;
1839}
1840
1841extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1842extern struct task_struct *pick_next_task_idle(struct rq *rq);
1843
1844#ifdef CONFIG_SMP
1845
1846extern void update_group_capacity(struct sched_domain *sd, int cpu);
1847
1848extern void trigger_load_balance(struct rq *rq);
1849
1850extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1851
1852#endif
1853
1854#ifdef CONFIG_CPU_IDLE
1855static inline void idle_set_state(struct rq *rq,
1856 struct cpuidle_state *idle_state)
1857{
1858 rq->idle_state = idle_state;
1859}
1860
1861static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1862{
1863 SCHED_WARN_ON(!rcu_read_lock_held());
1864
1865 return rq->idle_state;
1866}
1867#else
1868static inline void idle_set_state(struct rq *rq,
1869 struct cpuidle_state *idle_state)
1870{
1871}
1872
1873static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1874{
1875 return NULL;
1876}
1877#endif
1878
1879extern void schedule_idle(void);
1880
1881extern void sysrq_sched_debug_show(void);
1882extern void sched_init_granularity(void);
1883extern void update_max_interval(void);
1884
1885extern void init_sched_dl_class(void);
1886extern void init_sched_rt_class(void);
1887extern void init_sched_fair_class(void);
1888
1889extern void reweight_task(struct task_struct *p, int prio);
1890
1891extern void resched_curr(struct rq *rq);
1892extern void resched_cpu(int cpu);
1893
1894extern struct rt_bandwidth def_rt_bandwidth;
1895extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1896
1897extern struct dl_bandwidth def_dl_bandwidth;
1898extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1899extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1900extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1901
1902#define BW_SHIFT 20
1903#define BW_UNIT (1 << BW_SHIFT)
1904#define RATIO_SHIFT 8
1905#define MAX_BW_BITS (64 - BW_SHIFT)
1906#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
1907unsigned long to_ratio(u64 period, u64 runtime);
1908
1909extern void init_entity_runnable_average(struct sched_entity *se);
1910extern void post_init_entity_util_avg(struct task_struct *p);
1911
1912#ifdef CONFIG_NO_HZ_FULL
1913extern bool sched_can_stop_tick(struct rq *rq);
1914extern int __init sched_tick_offload_init(void);
1915
1916
1917
1918
1919
1920
1921static inline void sched_update_tick_dependency(struct rq *rq)
1922{
1923 int cpu = cpu_of(rq);
1924
1925 if (!tick_nohz_full_cpu(cpu))
1926 return;
1927
1928 if (sched_can_stop_tick(rq))
1929 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1930 else
1931 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1932}
1933#else
1934static inline int sched_tick_offload_init(void) { return 0; }
1935static inline void sched_update_tick_dependency(struct rq *rq) { }
1936#endif
1937
1938static inline void add_nr_running(struct rq *rq, unsigned count)
1939{
1940 unsigned prev_nr = rq->nr_running;
1941
1942 rq->nr_running = prev_nr + count;
1943 if (trace_sched_update_nr_running_tp_enabled()) {
1944 call_trace_sched_update_nr_running(rq, count);
1945 }
1946
1947 if (prev_nr < 2 && rq->nr_running >= 2) {
1948#ifdef CONFIG_SMP
1949 if (!READ_ONCE(rq->rd->overload))
1950 WRITE_ONCE(rq->rd->overload, 1);
1951#endif
1952 }
1953
1954 sched_update_tick_dependency(rq);
1955}
1956
1957static inline void sub_nr_running(struct rq *rq, unsigned count)
1958{
1959 rq->nr_running -= count;
1960 if (trace_sched_update_nr_running_tp_enabled()) {
1961 call_trace_sched_update_nr_running(rq, -count);
1962 }
1963
1964
1965 sched_update_tick_dependency(rq);
1966}
1967
1968extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1969extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1970
1971extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1972
1973extern const_debug unsigned int sysctl_sched_nr_migrate;
1974extern const_debug unsigned int sysctl_sched_migration_cost;
1975
1976#ifdef CONFIG_SCHED_HRTICK
1977
1978
1979
1980
1981
1982
1983static inline int hrtick_enabled(struct rq *rq)
1984{
1985 if (!cpu_active(cpu_of(rq)))
1986 return 0;
1987 return hrtimer_is_hres_active(&rq->hrtick_timer);
1988}
1989
1990static inline int hrtick_enabled_fair(struct rq *rq)
1991{
1992 if (!sched_feat(HRTICK))
1993 return 0;
1994 return hrtick_enabled(rq);
1995}
1996
1997static inline int hrtick_enabled_dl(struct rq *rq)
1998{
1999 if (!sched_feat(HRTICK_DL))
2000 return 0;
2001 return hrtick_enabled(rq);
2002}
2003
2004void hrtick_start(struct rq *rq, u64 delay);
2005
2006#else
2007
2008static inline int hrtick_enabled_fair(struct rq *rq)
2009{
2010 return 0;
2011}
2012
2013static inline int hrtick_enabled_dl(struct rq *rq)
2014{
2015 return 0;
2016}
2017
2018static inline int hrtick_enabled(struct rq *rq)
2019{
2020 return 0;
2021}
2022
2023#endif
2024
2025#ifndef arch_scale_freq_tick
2026static __always_inline
2027void arch_scale_freq_tick(void)
2028{
2029}
2030#endif
2031
2032#ifndef arch_scale_freq_capacity
2033static __always_inline
2034unsigned long arch_scale_freq_capacity(int cpu)
2035{
2036 return SCHED_CAPACITY_SCALE;
2037}
2038#endif
2039
2040#ifdef CONFIG_SMP
2041#ifdef CONFIG_PREEMPTION
2042
2043static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2054 __releases(this_rq->lock)
2055 __acquires(busiest->lock)
2056 __acquires(this_rq->lock)
2057{
2058 raw_spin_unlock(&this_rq->lock);
2059 double_rq_lock(this_rq, busiest);
2060
2061 return 1;
2062}
2063
2064#else
2065
2066
2067
2068
2069
2070
2071
2072static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2073 __releases(this_rq->lock)
2074 __acquires(busiest->lock)
2075 __acquires(this_rq->lock)
2076{
2077 int ret = 0;
2078
2079 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
2080 if (busiest < this_rq) {
2081 raw_spin_unlock(&this_rq->lock);
2082 raw_spin_lock(&busiest->lock);
2083 raw_spin_lock_nested(&this_rq->lock,
2084 SINGLE_DEPTH_NESTING);
2085 ret = 1;
2086 } else
2087 raw_spin_lock_nested(&busiest->lock,
2088 SINGLE_DEPTH_NESTING);
2089 }
2090 return ret;
2091}
2092
2093#endif
2094
2095
2096
2097
2098static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2099{
2100 if (unlikely(!irqs_disabled())) {
2101
2102 raw_spin_unlock(&this_rq->lock);
2103 BUG_ON(1);
2104 }
2105
2106 return _double_lock_balance(this_rq, busiest);
2107}
2108
2109static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2110 __releases(busiest->lock)
2111{
2112 raw_spin_unlock(&busiest->lock);
2113 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2114}
2115
2116static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
2117{
2118 if (l1 > l2)
2119 swap(l1, l2);
2120
2121 spin_lock(l1);
2122 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2123}
2124
2125static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
2126{
2127 if (l1 > l2)
2128 swap(l1, l2);
2129
2130 spin_lock_irq(l1);
2131 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2132}
2133
2134static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2135{
2136 if (l1 > l2)
2137 swap(l1, l2);
2138
2139 raw_spin_lock(l1);
2140 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2141}
2142
2143
2144
2145
2146
2147
2148
2149static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2150 __acquires(rq1->lock)
2151 __acquires(rq2->lock)
2152{
2153 BUG_ON(!irqs_disabled());
2154 if (rq1 == rq2) {
2155 raw_spin_lock(&rq1->lock);
2156 __acquire(rq2->lock);
2157 } else {
2158 if (rq1 < rq2) {
2159 raw_spin_lock(&rq1->lock);
2160 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2161 } else {
2162 raw_spin_lock(&rq2->lock);
2163 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2164 }
2165 }
2166}
2167
2168
2169
2170
2171
2172
2173
2174static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2175 __releases(rq1->lock)
2176 __releases(rq2->lock)
2177{
2178 raw_spin_unlock(&rq1->lock);
2179 if (rq1 != rq2)
2180 raw_spin_unlock(&rq2->lock);
2181 else
2182 __release(rq2->lock);
2183}
2184
2185extern void set_rq_online (struct rq *rq);
2186extern void set_rq_offline(struct rq *rq);
2187extern bool sched_smp_initialized;
2188
2189#else
2190
2191
2192
2193
2194
2195
2196
2197static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2198 __acquires(rq1->lock)
2199 __acquires(rq2->lock)
2200{
2201 BUG_ON(!irqs_disabled());
2202 BUG_ON(rq1 != rq2);
2203 raw_spin_lock(&rq1->lock);
2204 __acquire(rq2->lock);
2205}
2206
2207
2208
2209
2210
2211
2212
2213static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2214 __releases(rq1->lock)
2215 __releases(rq2->lock)
2216{
2217 BUG_ON(rq1 != rq2);
2218 raw_spin_unlock(&rq1->lock);
2219 __release(rq2->lock);
2220}
2221
2222#endif
2223
2224extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2225extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2226
2227#ifdef CONFIG_SCHED_DEBUG
2228extern bool sched_debug_enabled;
2229
2230extern void print_cfs_stats(struct seq_file *m, int cpu);
2231extern void print_rt_stats(struct seq_file *m, int cpu);
2232extern void print_dl_stats(struct seq_file *m, int cpu);
2233extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2234extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2235extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2236#ifdef CONFIG_NUMA_BALANCING
2237extern void
2238show_numa_stats(struct task_struct *p, struct seq_file *m);
2239extern void
2240print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2241 unsigned long tpf, unsigned long gsf, unsigned long gpf);
2242#endif
2243#endif
2244
2245extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2246extern void init_rt_rq(struct rt_rq *rt_rq);
2247extern void init_dl_rq(struct dl_rq *dl_rq);
2248
2249extern void cfs_bandwidth_usage_inc(void);
2250extern void cfs_bandwidth_usage_dec(void);
2251
2252#ifdef CONFIG_NO_HZ_COMMON
2253#define NOHZ_BALANCE_KICK_BIT 0
2254#define NOHZ_STATS_KICK_BIT 1
2255
2256#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
2257#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
2258
2259#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
2260
2261#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2262
2263extern void nohz_balance_exit_idle(struct rq *rq);
2264#else
2265static inline void nohz_balance_exit_idle(struct rq *rq) { }
2266#endif
2267
2268
2269#ifdef CONFIG_SMP
2270static inline
2271void __dl_update(struct dl_bw *dl_b, s64 bw)
2272{
2273 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2274 int i;
2275
2276 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2277 "sched RCU must be held");
2278 for_each_cpu_and(i, rd->span, cpu_active_mask) {
2279 struct rq *rq = cpu_rq(i);
2280
2281 rq->dl.extra_bw += bw;
2282 }
2283}
2284#else
2285static inline
2286void __dl_update(struct dl_bw *dl_b, s64 bw)
2287{
2288 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2289
2290 dl->extra_bw += bw;
2291}
2292#endif
2293
2294
2295#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2296struct irqtime {
2297 u64 total;
2298 u64 tick_delta;
2299 u64 irq_start_time;
2300 struct u64_stats_sync sync;
2301};
2302
2303DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2304
2305
2306
2307
2308
2309
2310static inline u64 irq_time_read(int cpu)
2311{
2312 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2313 unsigned int seq;
2314 u64 total;
2315
2316 do {
2317 seq = __u64_stats_fetch_begin(&irqtime->sync);
2318 total = irqtime->total;
2319 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2320
2321 return total;
2322}
2323#endif
2324
2325#ifdef CONFIG_CPU_FREQ
2326DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2351{
2352 struct update_util_data *data;
2353
2354 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2355 cpu_of(rq)));
2356 if (data)
2357 data->func(data, rq_clock(rq), flags);
2358}
2359#else
2360static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2361#endif
2362
2363#ifdef arch_scale_freq_capacity
2364# ifndef arch_scale_freq_invariant
2365# define arch_scale_freq_invariant() true
2366# endif
2367#else
2368# define arch_scale_freq_invariant() false
2369#endif
2370
2371#ifdef CONFIG_SMP
2372static inline unsigned long capacity_orig_of(int cpu)
2373{
2374 return cpu_rq(cpu)->cpu_capacity_orig;
2375}
2376#endif
2377
2378#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389enum schedutil_type {
2390 FREQUENCY_UTIL,
2391 ENERGY_UTIL,
2392};
2393
2394unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
2395 unsigned long max, enum schedutil_type type);
2396
2397static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
2398{
2399 unsigned long max = arch_scale_cpu_capacity(cpu);
2400
2401 return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL);
2402}
2403
2404static inline unsigned long cpu_bw_dl(struct rq *rq)
2405{
2406 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2407}
2408
2409static inline unsigned long cpu_util_dl(struct rq *rq)
2410{
2411 return READ_ONCE(rq->avg_dl.util_avg);
2412}
2413
2414static inline unsigned long cpu_util_cfs(struct rq *rq)
2415{
2416 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2417
2418 if (sched_feat(UTIL_EST)) {
2419 util = max_t(unsigned long, util,
2420 READ_ONCE(rq->cfs.avg.util_est.enqueued));
2421 }
2422
2423 return util;
2424}
2425
2426static inline unsigned long cpu_util_rt(struct rq *rq)
2427{
2428 return READ_ONCE(rq->avg_rt.util_avg);
2429}
2430#else
2431static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
2432{
2433 return cfs;
2434}
2435#endif
2436
2437#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
2438static inline unsigned long cpu_util_irq(struct rq *rq)
2439{
2440 return rq->avg_irq.util_avg;
2441}
2442
2443static inline
2444unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2445{
2446 util *= (max - irq);
2447 util /= max;
2448
2449 return util;
2450
2451}
2452#else
2453static inline unsigned long cpu_util_irq(struct rq *rq)
2454{
2455 return 0;
2456}
2457
2458static inline
2459unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2460{
2461 return util;
2462}
2463#endif
2464
2465#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2466#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2467#else
2468#define perf_domain_span(pd) NULL
2469#endif
2470
2471#ifdef CONFIG_SMP
2472extern struct static_key_false sched_energy_present;
2473#endif
2474
2475#ifdef CONFIG_MEMBARRIER
2476
2477
2478
2479
2480
2481
2482static inline void membarrier_switch_mm(struct rq *rq,
2483 struct mm_struct *prev_mm,
2484 struct mm_struct *next_mm)
2485{
2486 int membarrier_state;
2487
2488 if (prev_mm == next_mm)
2489 return;
2490
2491 membarrier_state = atomic_read(&next_mm->membarrier_state);
2492 if (READ_ONCE(rq->membarrier_state) == membarrier_state)
2493 return;
2494
2495 WRITE_ONCE(rq->membarrier_state, membarrier_state);
2496}
2497#else
2498static inline void membarrier_switch_mm(struct rq *rq,
2499 struct mm_struct *prev_mm,
2500 struct mm_struct *next_mm)
2501{
2502}
2503#endif
2504
2505#ifdef CONFIG_SMP
2506static inline bool is_per_cpu_kthread(struct task_struct *p)
2507{
2508 if (!(p->flags & PF_KTHREAD))
2509 return false;
2510
2511 if (p->nr_cpus_allowed != 1)
2512 return false;
2513
2514 return true;
2515}
2516#endif
2517
2518void swake_up_all_locked(struct swait_queue_head *q);
2519void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
2520