1
2
3
4
5#include <linux/sched.h>
6
7#include <linux/sched/autogroup.h>
8#include <linux/sched/clock.h>
9#include <linux/sched/coredump.h>
10#include <linux/sched/cpufreq.h>
11#include <linux/sched/cputime.h>
12#include <linux/sched/deadline.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/hotplug.h>
15#include <linux/sched/idle.h>
16#include <linux/sched/init.h>
17#include <linux/sched/isolation.h>
18#include <linux/sched/jobctl.h>
19#include <linux/sched/loadavg.h>
20#include <linux/sched/mm.h>
21#include <linux/sched/nohz.h>
22#include <linux/sched/numa_balancing.h>
23#include <linux/sched/prio.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/signal.h>
26#include <linux/sched/smt.h>
27#include <linux/sched/stat.h>
28#include <linux/sched/sysctl.h>
29#include <linux/sched/task.h>
30#include <linux/sched/task_stack.h>
31#include <linux/sched/topology.h>
32#include <linux/sched/user.h>
33#include <linux/sched/wake_q.h>
34#include <linux/sched/xacct.h>
35
36#include <uapi/linux/sched/types.h>
37
38#include <linux/binfmts.h>
39#include <linux/blkdev.h>
40#include <linux/compat.h>
41#include <linux/context_tracking.h>
42#include <linux/cpufreq.h>
43#include <linux/cpuidle.h>
44#include <linux/cpuset.h>
45#include <linux/ctype.h>
46#include <linux/debugfs.h>
47#include <linux/delayacct.h>
48#include <linux/energy_model.h>
49#include <linux/init_task.h>
50#include <linux/kprobes.h>
51#include <linux/kthread.h>
52#include <linux/membarrier.h>
53#include <linux/migrate.h>
54#include <linux/mmu_context.h>
55#include <linux/nmi.h>
56#include <linux/proc_fs.h>
57#include <linux/prefetch.h>
58#include <linux/profile.h>
59#include <linux/psi.h>
60#include <linux/rcupdate_wait.h>
61#include <linux/security.h>
62#include <linux/stop_machine.h>
63#include <linux/suspend.h>
64#include <linux/swait.h>
65#include <linux/syscalls.h>
66#include <linux/task_work.h>
67#include <linux/tsacct_kern.h>
68
69#include <asm/tlb.h>
70
71#ifdef CONFIG_PARAVIRT
72# include <asm/paravirt.h>
73#endif
74
75#include "cpupri.h"
76#include "cpudeadline.h"
77
78#ifdef CONFIG_SCHED_DEBUG
79# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
80#else
81# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
82#endif
83
84struct rq;
85struct cpuidle_state;
86
87
88#define TASK_ON_RQ_QUEUED 1
89#define TASK_ON_RQ_MIGRATING 2
90
91extern __read_mostly int scheduler_running;
92
93extern unsigned long calc_load_update;
94extern atomic_long_t calc_load_tasks;
95
96extern void calc_global_load_tick(struct rq *this_rq);
97extern long calc_load_fold_active(struct rq *this_rq, long adjust);
98
99
100
101
102#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118#ifdef CONFIG_64BIT
119# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
120# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
121# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
122#else
123# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
124# define scale_load(w) (w)
125# define scale_load_down(w) (w)
126#endif
127
128
129
130
131
132
133
134
135
136
137#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
138
139
140
141
142
143
144#define DL_SCALE 10
145
146
147
148
149#define RUNTIME_INF ((u64)~0ULL)
150
151static inline int idle_policy(int policy)
152{
153 return policy == SCHED_IDLE;
154}
155static inline int fair_policy(int policy)
156{
157 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
158}
159
160static inline int rt_policy(int policy)
161{
162 return policy == SCHED_FIFO || policy == SCHED_RR;
163}
164
165static inline int dl_policy(int policy)
166{
167 return policy == SCHED_DEADLINE;
168}
169static inline bool valid_policy(int policy)
170{
171 return idle_policy(policy) || fair_policy(policy) ||
172 rt_policy(policy) || dl_policy(policy);
173}
174
175static inline int task_has_idle_policy(struct task_struct *p)
176{
177 return idle_policy(p->policy);
178}
179
180static inline int task_has_rt_policy(struct task_struct *p)
181{
182 return rt_policy(p->policy);
183}
184
185static inline int task_has_dl_policy(struct task_struct *p)
186{
187 return dl_policy(p->policy);
188}
189
190#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
191
192
193
194
195
196
197
198
199
200
201
202
203
204#define SCHED_FLAG_SUGOV 0x10000000
205
206static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
207{
208#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
209 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
210#else
211 return false;
212#endif
213}
214
215
216
217
218static inline bool
219dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
220{
221 return dl_entity_is_special(a) ||
222 dl_time_before(a->deadline, b->deadline);
223}
224
225
226
227
228struct rt_prio_array {
229 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1);
230 struct list_head queue[MAX_RT_PRIO];
231};
232
233struct rt_bandwidth {
234
235 raw_spinlock_t rt_runtime_lock;
236 ktime_t rt_period;
237 u64 rt_runtime;
238 struct hrtimer rt_period_timer;
239 unsigned int rt_period_active;
240};
241
242void __dl_clear_params(struct task_struct *p);
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268struct dl_bandwidth {
269 raw_spinlock_t dl_runtime_lock;
270 u64 dl_runtime;
271 u64 dl_period;
272};
273
274static inline int dl_bandwidth_enabled(void)
275{
276 return sysctl_sched_rt_runtime >= 0;
277}
278
279struct dl_bw {
280 raw_spinlock_t lock;
281 u64 bw;
282 u64 total_bw;
283};
284
285static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
286
287static inline
288void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
289{
290 dl_b->total_bw -= tsk_bw;
291 __dl_update(dl_b, (s32)tsk_bw / cpus);
292}
293
294static inline
295void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
296{
297 dl_b->total_bw += tsk_bw;
298 __dl_update(dl_b, -((s32)tsk_bw / cpus));
299}
300
301static inline
302bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
303{
304 return dl_b->bw != -1 &&
305 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
306}
307
308extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
309extern void init_dl_bw(struct dl_bw *dl_b);
310extern int sched_dl_global_validate(void);
311extern void sched_dl_do_global(void);
312extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
313extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
314extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
315extern bool __checkparam_dl(const struct sched_attr *attr);
316extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
317extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
318extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
319extern bool dl_cpu_busy(unsigned int cpu);
320
321#ifdef CONFIG_CGROUP_SCHED
322
323#include <linux/cgroup.h>
324#include <linux/psi.h>
325
326struct cfs_rq;
327struct rt_rq;
328
329extern struct list_head task_groups;
330
331struct cfs_bandwidth {
332#ifdef CONFIG_CFS_BANDWIDTH
333 raw_spinlock_t lock;
334 ktime_t period;
335 u64 quota;
336 u64 runtime;
337 s64 hierarchical_quota;
338 u64 runtime_expires;
339 int expires_seq;
340
341 u8 idle;
342 u8 period_active;
343 u8 distribute_running;
344 u8 slack_started;
345 struct hrtimer period_timer;
346 struct hrtimer slack_timer;
347 struct list_head throttled_cfs_rq;
348
349
350 int nr_periods;
351 int nr_throttled;
352 u64 throttled_time;
353#endif
354};
355
356
357struct task_group {
358 struct cgroup_subsys_state css;
359
360#ifdef CONFIG_FAIR_GROUP_SCHED
361
362 struct sched_entity **se;
363
364 struct cfs_rq **cfs_rq;
365 unsigned long shares;
366
367#ifdef CONFIG_SMP
368
369
370
371
372
373 atomic_long_t load_avg ____cacheline_aligned;
374#endif
375#endif
376
377#ifdef CONFIG_RT_GROUP_SCHED
378 struct sched_rt_entity **rt_se;
379 struct rt_rq **rt_rq;
380
381 struct rt_bandwidth rt_bandwidth;
382#endif
383
384 struct rcu_head rcu;
385 struct list_head list;
386
387 struct task_group *parent;
388 struct list_head siblings;
389 struct list_head children;
390
391#ifdef CONFIG_SCHED_AUTOGROUP
392 struct autogroup *autogroup;
393#endif
394
395 struct cfs_bandwidth cfs_bandwidth;
396};
397
398#ifdef CONFIG_FAIR_GROUP_SCHED
399#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
400
401
402
403
404
405
406
407
408
409#define MIN_SHARES (1UL << 1)
410#define MAX_SHARES (1UL << 18)
411#endif
412
413typedef int (*tg_visitor)(struct task_group *, void *);
414
415extern int walk_tg_tree_from(struct task_group *from,
416 tg_visitor down, tg_visitor up, void *data);
417
418
419
420
421
422
423
424static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
425{
426 return walk_tg_tree_from(&root_task_group, down, up, data);
427}
428
429extern int tg_nop(struct task_group *tg, void *data);
430
431extern void free_fair_sched_group(struct task_group *tg);
432extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
433extern void online_fair_sched_group(struct task_group *tg);
434extern void unregister_fair_sched_group(struct task_group *tg);
435extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
436 struct sched_entity *se, int cpu,
437 struct sched_entity *parent);
438extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
439
440extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
441extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
442extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
443
444extern void free_rt_sched_group(struct task_group *tg);
445extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
446extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
447 struct sched_rt_entity *rt_se, int cpu,
448 struct sched_rt_entity *parent);
449extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
450extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
451extern long sched_group_rt_runtime(struct task_group *tg);
452extern long sched_group_rt_period(struct task_group *tg);
453extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
454
455extern struct task_group *sched_create_group(struct task_group *parent);
456extern void sched_online_group(struct task_group *tg,
457 struct task_group *parent);
458extern void sched_destroy_group(struct task_group *tg);
459extern void sched_offline_group(struct task_group *tg);
460
461extern void sched_move_task(struct task_struct *tsk);
462
463#ifdef CONFIG_FAIR_GROUP_SCHED
464extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
465
466#ifdef CONFIG_SMP
467extern void set_task_rq_fair(struct sched_entity *se,
468 struct cfs_rq *prev, struct cfs_rq *next);
469#else
470static inline void set_task_rq_fair(struct sched_entity *se,
471 struct cfs_rq *prev, struct cfs_rq *next) { }
472#endif
473#endif
474
475#else
476
477struct cfs_bandwidth { };
478
479#endif
480
481
482struct cfs_rq {
483 struct load_weight load;
484 unsigned long runnable_weight;
485 unsigned int nr_running;
486 unsigned int h_nr_running;
487
488 u64 exec_clock;
489 u64 min_vruntime;
490#ifndef CONFIG_64BIT
491 u64 min_vruntime_copy;
492#endif
493
494 struct rb_root_cached tasks_timeline;
495
496
497
498
499
500 struct sched_entity *curr;
501 struct sched_entity *next;
502 struct sched_entity *last;
503 struct sched_entity *skip;
504
505#ifdef CONFIG_SCHED_DEBUG
506 unsigned int nr_spread_over;
507#endif
508
509#ifdef CONFIG_SMP
510
511
512
513 struct sched_avg avg;
514#ifndef CONFIG_64BIT
515 u64 load_last_update_time_copy;
516#endif
517 struct {
518 raw_spinlock_t lock ____cacheline_aligned;
519 int nr;
520 unsigned long load_avg;
521 unsigned long util_avg;
522 unsigned long runnable_sum;
523 } removed;
524
525#ifdef CONFIG_FAIR_GROUP_SCHED
526 unsigned long tg_load_avg_contrib;
527 long propagate;
528 long prop_runnable_sum;
529
530
531
532
533
534
535
536 unsigned long h_load;
537 u64 last_h_load_update;
538 struct sched_entity *h_load_next;
539#endif
540#endif
541
542#ifdef CONFIG_FAIR_GROUP_SCHED
543 struct rq *rq;
544
545
546
547
548
549
550
551
552
553 int on_list;
554 struct list_head leaf_cfs_rq_list;
555 struct task_group *tg;
556
557#ifdef CONFIG_CFS_BANDWIDTH
558 int runtime_enabled;
559 int expires_seq;
560 u64 runtime_expires;
561 s64 runtime_remaining;
562
563 u64 throttled_clock;
564 u64 throttled_clock_task;
565 u64 throttled_clock_task_time;
566 int throttled;
567 int throttle_count;
568 struct list_head throttled_list;
569#endif
570#endif
571};
572
573static inline int rt_bandwidth_enabled(void)
574{
575 return sysctl_sched_rt_runtime >= 0;
576}
577
578
579#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
580# define HAVE_RT_PUSH_IPI
581#endif
582
583
584struct rt_rq {
585 struct rt_prio_array active;
586 unsigned int rt_nr_running;
587 unsigned int rr_nr_running;
588#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
589 struct {
590 int curr;
591#ifdef CONFIG_SMP
592 int next;
593#endif
594 } highest_prio;
595#endif
596#ifdef CONFIG_SMP
597 unsigned long rt_nr_migratory;
598 unsigned long rt_nr_total;
599 int overloaded;
600 struct plist_head pushable_tasks;
601
602#endif
603 int rt_queued;
604
605 int rt_throttled;
606 u64 rt_time;
607 u64 rt_runtime;
608
609 raw_spinlock_t rt_runtime_lock;
610
611#ifdef CONFIG_RT_GROUP_SCHED
612 unsigned long rt_nr_boosted;
613
614 struct rq *rq;
615 struct task_group *tg;
616#endif
617};
618
619static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
620{
621 return rt_rq->rt_queued && rt_rq->rt_nr_running;
622}
623
624
625struct dl_rq {
626
627 struct rb_root_cached root;
628
629 unsigned long dl_nr_running;
630
631#ifdef CONFIG_SMP
632
633
634
635
636
637
638 struct {
639 u64 curr;
640 u64 next;
641 } earliest_dl;
642
643 unsigned long dl_nr_migratory;
644 int overloaded;
645
646
647
648
649
650
651 struct rb_root_cached pushable_dl_tasks_root;
652#else
653 struct dl_bw dl_bw;
654#endif
655
656
657
658
659
660 u64 running_bw;
661
662
663
664
665
666
667
668
669
670
671 u64 this_bw;
672 u64 extra_bw;
673
674
675
676
677
678 u64 bw_ratio;
679};
680
681#ifdef CONFIG_FAIR_GROUP_SCHED
682
683#define entity_is_task(se) (!se->my_q)
684#else
685#define entity_is_task(se) 1
686#endif
687
688#ifdef CONFIG_SMP
689
690
691
692static inline long se_weight(struct sched_entity *se)
693{
694 return scale_load_down(se->load.weight);
695}
696
697static inline long se_runnable(struct sched_entity *se)
698{
699 return scale_load_down(se->runnable_weight);
700}
701
702static inline bool sched_asym_prefer(int a, int b)
703{
704 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
705}
706
707struct perf_domain {
708 struct em_perf_domain *em_pd;
709 struct perf_domain *next;
710 struct rcu_head rcu;
711};
712
713
714#define SG_OVERLOAD 0x1
715#define SG_OVERUTILIZED 0x2
716
717
718
719
720
721
722
723
724
725struct root_domain {
726 atomic_t refcount;
727 atomic_t rto_count;
728 struct rcu_head rcu;
729 cpumask_var_t span;
730 cpumask_var_t online;
731
732
733
734
735
736
737 int overload;
738
739
740 int overutilized;
741
742
743
744
745
746 cpumask_var_t dlo_mask;
747 atomic_t dlo_count;
748 struct dl_bw dl_bw;
749 struct cpudl cpudl;
750
751#ifdef HAVE_RT_PUSH_IPI
752
753
754
755 struct irq_work rto_push_work;
756 raw_spinlock_t rto_lock;
757
758 int rto_loop;
759 int rto_cpu;
760
761 atomic_t rto_loop_next;
762 atomic_t rto_loop_start;
763#endif
764
765
766
767
768 cpumask_var_t rto_mask;
769 struct cpupri cpupri;
770
771 unsigned long max_cpu_capacity;
772
773
774
775
776
777 struct perf_domain __rcu *pd;
778};
779
780extern struct root_domain def_root_domain;
781extern struct mutex sched_domains_mutex;
782
783extern void init_defrootdomain(void);
784extern int sched_init_domains(const struct cpumask *cpu_map);
785extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
786extern void sched_get_rd(struct root_domain *rd);
787extern void sched_put_rd(struct root_domain *rd);
788
789#ifdef HAVE_RT_PUSH_IPI
790extern void rto_push_irq_work_func(struct irq_work *work);
791#endif
792#endif
793
794#ifdef CONFIG_UCLAMP_TASK
795
796
797
798
799
800
801
802
803struct uclamp_bucket {
804 unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
805 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
806};
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830struct uclamp_rq {
831 unsigned int value;
832 struct uclamp_bucket bucket[UCLAMP_BUCKETS];
833};
834#endif
835
836
837
838
839
840
841
842
843struct rq {
844
845 raw_spinlock_t lock;
846
847
848
849
850
851 unsigned int nr_running;
852#ifdef CONFIG_NUMA_BALANCING
853 unsigned int nr_numa_running;
854 unsigned int nr_preferred_running;
855 unsigned int numa_migrate_on;
856#endif
857#ifdef CONFIG_NO_HZ_COMMON
858#ifdef CONFIG_SMP
859 unsigned long last_load_update_tick;
860 unsigned long last_blocked_load_update_tick;
861 unsigned int has_blocked_load;
862#endif
863 unsigned int nohz_tick_stopped;
864 atomic_t nohz_flags;
865#endif
866
867 unsigned long nr_load_updates;
868 u64 nr_switches;
869
870#ifdef CONFIG_UCLAMP_TASK
871
872 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned;
873 unsigned int uclamp_flags;
874#define UCLAMP_FLAG_IDLE 0x01
875#endif
876
877 struct cfs_rq cfs;
878 struct rt_rq rt;
879 struct dl_rq dl;
880
881#ifdef CONFIG_FAIR_GROUP_SCHED
882
883 struct list_head leaf_cfs_rq_list;
884 struct list_head *tmp_alone_branch;
885#endif
886
887
888
889
890
891
892
893 unsigned long nr_uninterruptible;
894
895 struct task_struct *curr;
896 struct task_struct *idle;
897 struct task_struct *stop;
898 unsigned long next_balance;
899 struct mm_struct *prev_mm;
900
901 unsigned int clock_update_flags;
902 u64 clock;
903
904 u64 clock_task ____cacheline_aligned;
905 u64 clock_pelt;
906 unsigned long lost_idle_time;
907
908 atomic_t nr_iowait;
909
910#ifdef CONFIG_SMP
911 struct root_domain *rd;
912 struct sched_domain __rcu *sd;
913
914 unsigned long cpu_capacity;
915 unsigned long cpu_capacity_orig;
916
917 struct callback_head *balance_callback;
918
919 unsigned char idle_balance;
920
921 unsigned long misfit_task_load;
922
923
924 int active_balance;
925 int push_cpu;
926 struct cpu_stop_work active_balance_work;
927
928
929 int cpu;
930 int online;
931
932 struct list_head cfs_tasks;
933
934 struct sched_avg avg_rt;
935 struct sched_avg avg_dl;
936#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
937 struct sched_avg avg_irq;
938#endif
939 u64 idle_stamp;
940 u64 avg_idle;
941
942
943 u64 max_idle_balance_cost;
944#endif
945
946#ifdef CONFIG_IRQ_TIME_ACCOUNTING
947 u64 prev_irq_time;
948#endif
949#ifdef CONFIG_PARAVIRT
950 u64 prev_steal_time;
951#endif
952#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
953 u64 prev_steal_time_rq;
954#endif
955
956
957 unsigned long calc_load_update;
958 long calc_load_active;
959
960#ifdef CONFIG_SCHED_HRTICK
961#ifdef CONFIG_SMP
962 int hrtick_csd_pending;
963 call_single_data_t hrtick_csd;
964#endif
965 struct hrtimer hrtick_timer;
966#endif
967
968#ifdef CONFIG_SCHEDSTATS
969
970 struct sched_info rq_sched_info;
971 unsigned long long rq_cpu_time;
972
973
974
975 unsigned int yld_count;
976
977
978 unsigned int sched_count;
979 unsigned int sched_goidle;
980
981
982 unsigned int ttwu_count;
983 unsigned int ttwu_local;
984#endif
985
986#ifdef CONFIG_SMP
987 struct llist_head wake_list;
988#endif
989
990#ifdef CONFIG_CPU_IDLE
991
992 struct cpuidle_state *idle_state;
993#endif
994};
995
996#ifdef CONFIG_FAIR_GROUP_SCHED
997
998
999static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1000{
1001 return cfs_rq->rq;
1002}
1003
1004#else
1005
1006static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1007{
1008 return container_of(cfs_rq, struct rq, cfs);
1009}
1010#endif
1011
1012static inline int cpu_of(struct rq *rq)
1013{
1014#ifdef CONFIG_SMP
1015 return rq->cpu;
1016#else
1017 return 0;
1018#endif
1019}
1020
1021
1022#ifdef CONFIG_SCHED_SMT
1023extern void __update_idle_core(struct rq *rq);
1024
1025static inline void update_idle_core(struct rq *rq)
1026{
1027 if (static_branch_unlikely(&sched_smt_present))
1028 __update_idle_core(rq);
1029}
1030
1031#else
1032static inline void update_idle_core(struct rq *rq) { }
1033#endif
1034
1035DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1036
1037#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1038#define this_rq() this_cpu_ptr(&runqueues)
1039#define task_rq(p) cpu_rq(task_cpu(p))
1040#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1041#define raw_rq() raw_cpu_ptr(&runqueues)
1042
1043extern void update_rq_clock(struct rq *rq);
1044
1045static inline u64 __rq_clock_broken(struct rq *rq)
1046{
1047 return READ_ONCE(rq->clock);
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073#define RQCF_REQ_SKIP 0x01
1074#define RQCF_ACT_SKIP 0x02
1075#define RQCF_UPDATED 0x04
1076
1077static inline void assert_clock_updated(struct rq *rq)
1078{
1079
1080
1081
1082
1083 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1084}
1085
1086static inline u64 rq_clock(struct rq *rq)
1087{
1088 lockdep_assert_held(&rq->lock);
1089 assert_clock_updated(rq);
1090
1091 return rq->clock;
1092}
1093
1094static inline u64 rq_clock_task(struct rq *rq)
1095{
1096 lockdep_assert_held(&rq->lock);
1097 assert_clock_updated(rq);
1098
1099 return rq->clock_task;
1100}
1101
1102static inline void rq_clock_skip_update(struct rq *rq)
1103{
1104 lockdep_assert_held(&rq->lock);
1105 rq->clock_update_flags |= RQCF_REQ_SKIP;
1106}
1107
1108
1109
1110
1111
1112static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1113{
1114 lockdep_assert_held(&rq->lock);
1115 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1116}
1117
1118struct rq_flags {
1119 unsigned long flags;
1120 struct pin_cookie cookie;
1121#ifdef CONFIG_SCHED_DEBUG
1122
1123
1124
1125
1126
1127 unsigned int clock_update_flags;
1128#endif
1129};
1130
1131static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1132{
1133 rf->cookie = lockdep_pin_lock(&rq->lock);
1134
1135#ifdef CONFIG_SCHED_DEBUG
1136 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1137 rf->clock_update_flags = 0;
1138#endif
1139}
1140
1141static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1142{
1143#ifdef CONFIG_SCHED_DEBUG
1144 if (rq->clock_update_flags > RQCF_ACT_SKIP)
1145 rf->clock_update_flags = RQCF_UPDATED;
1146#endif
1147
1148 lockdep_unpin_lock(&rq->lock, rf->cookie);
1149}
1150
1151static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1152{
1153 lockdep_repin_lock(&rq->lock, rf->cookie);
1154
1155#ifdef CONFIG_SCHED_DEBUG
1156
1157
1158
1159 rq->clock_update_flags |= rf->clock_update_flags;
1160#endif
1161}
1162
1163struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1164 __acquires(rq->lock);
1165
1166struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1167 __acquires(p->pi_lock)
1168 __acquires(rq->lock);
1169
1170static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1171 __releases(rq->lock)
1172{
1173 rq_unpin_lock(rq, rf);
1174 raw_spin_unlock(&rq->lock);
1175}
1176
1177static inline void
1178task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1179 __releases(rq->lock)
1180 __releases(p->pi_lock)
1181{
1182 rq_unpin_lock(rq, rf);
1183 raw_spin_unlock(&rq->lock);
1184 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1185}
1186
1187static inline void
1188rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1189 __acquires(rq->lock)
1190{
1191 raw_spin_lock_irqsave(&rq->lock, rf->flags);
1192 rq_pin_lock(rq, rf);
1193}
1194
1195static inline void
1196rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1197 __acquires(rq->lock)
1198{
1199 raw_spin_lock_irq(&rq->lock);
1200 rq_pin_lock(rq, rf);
1201}
1202
1203static inline void
1204rq_lock(struct rq *rq, struct rq_flags *rf)
1205 __acquires(rq->lock)
1206{
1207 raw_spin_lock(&rq->lock);
1208 rq_pin_lock(rq, rf);
1209}
1210
1211static inline void
1212rq_relock(struct rq *rq, struct rq_flags *rf)
1213 __acquires(rq->lock)
1214{
1215 raw_spin_lock(&rq->lock);
1216 rq_repin_lock(rq, rf);
1217}
1218
1219static inline void
1220rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1221 __releases(rq->lock)
1222{
1223 rq_unpin_lock(rq, rf);
1224 raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1225}
1226
1227static inline void
1228rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1229 __releases(rq->lock)
1230{
1231 rq_unpin_lock(rq, rf);
1232 raw_spin_unlock_irq(&rq->lock);
1233}
1234
1235static inline void
1236rq_unlock(struct rq *rq, struct rq_flags *rf)
1237 __releases(rq->lock)
1238{
1239 rq_unpin_lock(rq, rf);
1240 raw_spin_unlock(&rq->lock);
1241}
1242
1243static inline struct rq *
1244this_rq_lock_irq(struct rq_flags *rf)
1245 __acquires(rq->lock)
1246{
1247 struct rq *rq;
1248
1249 local_irq_disable();
1250 rq = this_rq();
1251 rq_lock(rq, rf);
1252 return rq;
1253}
1254
1255#ifdef CONFIG_NUMA
1256enum numa_topology_type {
1257 NUMA_DIRECT,
1258 NUMA_GLUELESS_MESH,
1259 NUMA_BACKPLANE,
1260};
1261extern enum numa_topology_type sched_numa_topology_type;
1262extern int sched_max_numa_distance;
1263extern bool find_numa_distance(int distance);
1264#endif
1265
1266#ifdef CONFIG_NUMA
1267extern void sched_init_numa(void);
1268extern void sched_domains_numa_masks_set(unsigned int cpu);
1269extern void sched_domains_numa_masks_clear(unsigned int cpu);
1270#else
1271static inline void sched_init_numa(void) { }
1272static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1273static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1274#endif
1275
1276#ifdef CONFIG_NUMA_BALANCING
1277
1278enum numa_faults_stats {
1279 NUMA_MEM = 0,
1280 NUMA_CPU,
1281 NUMA_MEMBUF,
1282 NUMA_CPUBUF
1283};
1284extern void sched_setnuma(struct task_struct *p, int node);
1285extern int migrate_task_to(struct task_struct *p, int cpu);
1286extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1287 int cpu, int scpu);
1288extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1289#else
1290static inline void
1291init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1292{
1293}
1294#endif
1295
1296#ifdef CONFIG_SMP
1297
1298static inline void
1299queue_balance_callback(struct rq *rq,
1300 struct callback_head *head,
1301 void (*func)(struct rq *rq))
1302{
1303 lockdep_assert_held(&rq->lock);
1304
1305 if (unlikely(head->next))
1306 return;
1307
1308 head->func = (void (*)(struct callback_head *))func;
1309 head->next = rq->balance_callback;
1310 rq->balance_callback = head;
1311}
1312
1313extern void sched_ttwu_pending(void);
1314
1315#define rcu_dereference_check_sched_domain(p) \
1316 rcu_dereference_check((p), \
1317 lockdep_is_held(&sched_domains_mutex))
1318
1319
1320
1321
1322
1323
1324
1325
1326#define for_each_domain(cpu, __sd) \
1327 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1328 __sd; __sd = __sd->parent)
1329
1330#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1342{
1343 struct sched_domain *sd, *hsd = NULL;
1344
1345 for_each_domain(cpu, sd) {
1346 if (!(sd->flags & flag))
1347 break;
1348 hsd = sd;
1349 }
1350
1351 return hsd;
1352}
1353
1354static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1355{
1356 struct sched_domain *sd;
1357
1358 for_each_domain(cpu, sd) {
1359 if (sd->flags & flag)
1360 break;
1361 }
1362
1363 return sd;
1364}
1365
1366DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
1367DECLARE_PER_CPU(int, sd_llc_size);
1368DECLARE_PER_CPU(int, sd_llc_id);
1369DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1370DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1371DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1372DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
1373extern struct static_key_false sched_asym_cpucapacity;
1374
1375struct sched_group_capacity {
1376 atomic_t ref;
1377
1378
1379
1380
1381 unsigned long capacity;
1382 unsigned long min_capacity;
1383 unsigned long max_capacity;
1384 unsigned long next_update;
1385 int imbalance;
1386
1387#ifdef CONFIG_SCHED_DEBUG
1388 int id;
1389#endif
1390
1391 unsigned long cpumask[0];
1392};
1393
1394struct sched_group {
1395 struct sched_group *next;
1396 atomic_t ref;
1397
1398 unsigned int group_weight;
1399 struct sched_group_capacity *sgc;
1400 int asym_prefer_cpu;
1401
1402
1403
1404
1405
1406
1407
1408
1409 unsigned long cpumask[0];
1410};
1411
1412static inline struct cpumask *sched_group_span(struct sched_group *sg)
1413{
1414 return to_cpumask(sg->cpumask);
1415}
1416
1417
1418
1419
1420static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1421{
1422 return to_cpumask(sg->sgc->cpumask);
1423}
1424
1425
1426
1427
1428
1429static inline unsigned int group_first_cpu(struct sched_group *group)
1430{
1431 return cpumask_first(sched_group_span(group));
1432}
1433
1434extern int group_balance_cpu(struct sched_group *sg);
1435
1436#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1437void register_sched_domain_sysctl(void);
1438void dirty_sched_domain_sysctl(int cpu);
1439void unregister_sched_domain_sysctl(void);
1440#else
1441static inline void register_sched_domain_sysctl(void)
1442{
1443}
1444static inline void dirty_sched_domain_sysctl(int cpu)
1445{
1446}
1447static inline void unregister_sched_domain_sysctl(void)
1448{
1449}
1450#endif
1451
1452#else
1453
1454static inline void sched_ttwu_pending(void) { }
1455
1456#endif
1457
1458#include "stats.h"
1459#include "autogroup.h"
1460
1461#ifdef CONFIG_CGROUP_SCHED
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476static inline struct task_group *task_group(struct task_struct *p)
1477{
1478 return p->sched_task_group;
1479}
1480
1481
1482static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1483{
1484#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1485 struct task_group *tg = task_group(p);
1486#endif
1487
1488#ifdef CONFIG_FAIR_GROUP_SCHED
1489 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1490 p->se.cfs_rq = tg->cfs_rq[cpu];
1491 p->se.parent = tg->se[cpu];
1492#endif
1493
1494#ifdef CONFIG_RT_GROUP_SCHED
1495 p->rt.rt_rq = tg->rt_rq[cpu];
1496 p->rt.parent = tg->rt_se[cpu];
1497#endif
1498}
1499
1500#else
1501
1502static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1503static inline struct task_group *task_group(struct task_struct *p)
1504{
1505 return NULL;
1506}
1507
1508#endif
1509
1510static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1511{
1512 set_task_rq(p, cpu);
1513#ifdef CONFIG_SMP
1514
1515
1516
1517
1518
1519 smp_wmb();
1520#ifdef CONFIG_THREAD_INFO_IN_TASK
1521 WRITE_ONCE(p->cpu, cpu);
1522#else
1523 WRITE_ONCE(task_thread_info(p)->cpu, cpu);
1524#endif
1525 p->wake_cpu = cpu;
1526#endif
1527}
1528
1529
1530
1531
1532#ifdef CONFIG_SCHED_DEBUG
1533# include <linux/static_key.h>
1534# define const_debug __read_mostly
1535#else
1536# define const_debug const
1537#endif
1538
1539#define SCHED_FEAT(name, enabled) \
1540 __SCHED_FEAT_##name ,
1541
1542enum {
1543#include "features.h"
1544 __SCHED_FEAT_NR,
1545};
1546
1547#undef SCHED_FEAT
1548
1549#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
1550
1551
1552
1553
1554
1555extern const_debug unsigned int sysctl_sched_features;
1556
1557#define SCHED_FEAT(name, enabled) \
1558static __always_inline bool static_branch_##name(struct static_key *key) \
1559{ \
1560 return static_key_##enabled(key); \
1561}
1562
1563#include "features.h"
1564#undef SCHED_FEAT
1565
1566extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1567#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1568
1569#else
1570
1571
1572
1573
1574
1575
1576#define SCHED_FEAT(name, enabled) \
1577 (1UL << __SCHED_FEAT_##name) * enabled |
1578static const_debug __maybe_unused unsigned int sysctl_sched_features =
1579#include "features.h"
1580 0;
1581#undef SCHED_FEAT
1582
1583#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1584
1585#endif
1586
1587extern struct static_key_false sched_numa_balancing;
1588extern struct static_key_false sched_schedstats;
1589
1590static inline u64 global_rt_period(void)
1591{
1592 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1593}
1594
1595static inline u64 global_rt_runtime(void)
1596{
1597 if (sysctl_sched_rt_runtime < 0)
1598 return RUNTIME_INF;
1599
1600 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1601}
1602
1603static inline int task_current(struct rq *rq, struct task_struct *p)
1604{
1605 return rq->curr == p;
1606}
1607
1608static inline int task_running(struct rq *rq, struct task_struct *p)
1609{
1610#ifdef CONFIG_SMP
1611 return p->on_cpu;
1612#else
1613 return task_current(rq, p);
1614#endif
1615}
1616
1617static inline int task_on_rq_queued(struct task_struct *p)
1618{
1619 return p->on_rq == TASK_ON_RQ_QUEUED;
1620}
1621
1622static inline int task_on_rq_migrating(struct task_struct *p)
1623{
1624 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
1625}
1626
1627
1628
1629
1630#define WF_SYNC 0x01
1631#define WF_FORK 0x02
1632#define WF_MIGRATED 0x4
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643#define WEIGHT_IDLEPRIO 3
1644#define WMULT_IDLEPRIO 1431655765
1645
1646extern const int sched_prio_to_weight[40];
1647extern const u32 sched_prio_to_wmult[40];
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668#define DEQUEUE_SLEEP 0x01
1669#define DEQUEUE_SAVE 0x02
1670#define DEQUEUE_MOVE 0x04
1671#define DEQUEUE_NOCLOCK 0x08
1672
1673#define ENQUEUE_WAKEUP 0x01
1674#define ENQUEUE_RESTORE 0x02
1675#define ENQUEUE_MOVE 0x04
1676#define ENQUEUE_NOCLOCK 0x08
1677
1678#define ENQUEUE_HEAD 0x10
1679#define ENQUEUE_REPLENISH 0x20
1680#ifdef CONFIG_SMP
1681#define ENQUEUE_MIGRATED 0x40
1682#else
1683#define ENQUEUE_MIGRATED 0x00
1684#endif
1685
1686#define RETRY_TASK ((void *)-1UL)
1687
1688struct sched_class {
1689 const struct sched_class *next;
1690
1691#ifdef CONFIG_UCLAMP_TASK
1692 int uclamp_enabled;
1693#endif
1694
1695 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1696 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1697 void (*yield_task) (struct rq *rq);
1698 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1699
1700 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710 struct task_struct * (*pick_next_task)(struct rq *rq,
1711 struct task_struct *prev,
1712 struct rq_flags *rf);
1713 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1714
1715#ifdef CONFIG_SMP
1716 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1717 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
1718
1719 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1720
1721 void (*set_cpus_allowed)(struct task_struct *p,
1722 const struct cpumask *newmask);
1723
1724 void (*rq_online)(struct rq *rq);
1725 void (*rq_offline)(struct rq *rq);
1726#endif
1727
1728 void (*set_curr_task)(struct rq *rq);
1729 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1730 void (*task_fork)(struct task_struct *p);
1731 void (*task_dead)(struct task_struct *p);
1732
1733
1734
1735
1736
1737
1738 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1739 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1740 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1741 int oldprio);
1742
1743 unsigned int (*get_rr_interval)(struct rq *rq,
1744 struct task_struct *task);
1745
1746 void (*update_curr)(struct rq *rq);
1747
1748#define TASK_SET_GROUP 0
1749#define TASK_MOVE_GROUP 1
1750
1751#ifdef CONFIG_FAIR_GROUP_SCHED
1752 void (*task_change_group)(struct task_struct *p, int type);
1753#endif
1754};
1755
1756static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1757{
1758 prev->sched_class->put_prev_task(rq, prev);
1759}
1760
1761static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1762{
1763 curr->sched_class->set_curr_task(rq);
1764}
1765
1766#ifdef CONFIG_SMP
1767#define sched_class_highest (&stop_sched_class)
1768#else
1769#define sched_class_highest (&dl_sched_class)
1770#endif
1771#define for_each_class(class) \
1772 for (class = sched_class_highest; class; class = class->next)
1773
1774extern const struct sched_class stop_sched_class;
1775extern const struct sched_class dl_sched_class;
1776extern const struct sched_class rt_sched_class;
1777extern const struct sched_class fair_sched_class;
1778extern const struct sched_class idle_sched_class;
1779
1780
1781#ifdef CONFIG_SMP
1782
1783extern void update_group_capacity(struct sched_domain *sd, int cpu);
1784
1785extern void trigger_load_balance(struct rq *rq);
1786
1787extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1788
1789#endif
1790
1791#ifdef CONFIG_CPU_IDLE
1792static inline void idle_set_state(struct rq *rq,
1793 struct cpuidle_state *idle_state)
1794{
1795 rq->idle_state = idle_state;
1796}
1797
1798static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1799{
1800 SCHED_WARN_ON(!rcu_read_lock_held());
1801
1802 return rq->idle_state;
1803}
1804#else
1805static inline void idle_set_state(struct rq *rq,
1806 struct cpuidle_state *idle_state)
1807{
1808}
1809
1810static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1811{
1812 return NULL;
1813}
1814#endif
1815
1816extern void schedule_idle(void);
1817
1818extern void sysrq_sched_debug_show(void);
1819extern void sched_init_granularity(void);
1820extern void update_max_interval(void);
1821
1822extern void init_sched_dl_class(void);
1823extern void init_sched_rt_class(void);
1824extern void init_sched_fair_class(void);
1825
1826extern void reweight_task(struct task_struct *p, int prio);
1827
1828extern void resched_curr(struct rq *rq);
1829extern void resched_cpu(int cpu);
1830
1831extern struct rt_bandwidth def_rt_bandwidth;
1832extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1833
1834extern struct dl_bandwidth def_dl_bandwidth;
1835extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1836extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1837extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1838extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
1839
1840#define BW_SHIFT 20
1841#define BW_UNIT (1 << BW_SHIFT)
1842#define RATIO_SHIFT 8
1843unsigned long to_ratio(u64 period, u64 runtime);
1844
1845extern void init_entity_runnable_average(struct sched_entity *se);
1846extern void post_init_entity_util_avg(struct task_struct *p);
1847
1848#ifdef CONFIG_NO_HZ_FULL
1849extern bool sched_can_stop_tick(struct rq *rq);
1850extern int __init sched_tick_offload_init(void);
1851
1852
1853
1854
1855
1856
1857static inline void sched_update_tick_dependency(struct rq *rq)
1858{
1859 int cpu;
1860
1861 if (!tick_nohz_full_enabled())
1862 return;
1863
1864 cpu = cpu_of(rq);
1865
1866 if (!tick_nohz_full_cpu(cpu))
1867 return;
1868
1869 if (sched_can_stop_tick(rq))
1870 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1871 else
1872 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1873}
1874#else
1875static inline int sched_tick_offload_init(void) { return 0; }
1876static inline void sched_update_tick_dependency(struct rq *rq) { }
1877#endif
1878
1879static inline void add_nr_running(struct rq *rq, unsigned count)
1880{
1881 unsigned prev_nr = rq->nr_running;
1882
1883 rq->nr_running = prev_nr + count;
1884
1885#ifdef CONFIG_SMP
1886 if (prev_nr < 2 && rq->nr_running >= 2) {
1887 if (!READ_ONCE(rq->rd->overload))
1888 WRITE_ONCE(rq->rd->overload, 1);
1889 }
1890#endif
1891
1892 sched_update_tick_dependency(rq);
1893}
1894
1895static inline void sub_nr_running(struct rq *rq, unsigned count)
1896{
1897 rq->nr_running -= count;
1898
1899 sched_update_tick_dependency(rq);
1900}
1901
1902extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1903extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1904
1905extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1906
1907extern const_debug unsigned int sysctl_sched_nr_migrate;
1908extern const_debug unsigned int sysctl_sched_migration_cost;
1909
1910#ifdef CONFIG_SCHED_HRTICK
1911
1912
1913
1914
1915
1916
1917static inline int hrtick_enabled(struct rq *rq)
1918{
1919 if (!sched_feat(HRTICK))
1920 return 0;
1921 if (!cpu_active(cpu_of(rq)))
1922 return 0;
1923 return hrtimer_is_hres_active(&rq->hrtick_timer);
1924}
1925
1926void hrtick_start(struct rq *rq, u64 delay);
1927
1928#else
1929
1930static inline int hrtick_enabled(struct rq *rq)
1931{
1932 return 0;
1933}
1934
1935#endif
1936
1937#ifndef arch_scale_freq_capacity
1938static __always_inline
1939unsigned long arch_scale_freq_capacity(int cpu)
1940{
1941 return SCHED_CAPACITY_SCALE;
1942}
1943#endif
1944
1945#ifdef CONFIG_SMP
1946#ifdef CONFIG_PREEMPT
1947
1948static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1959 __releases(this_rq->lock)
1960 __acquires(busiest->lock)
1961 __acquires(this_rq->lock)
1962{
1963 raw_spin_unlock(&this_rq->lock);
1964 double_rq_lock(this_rq, busiest);
1965
1966 return 1;
1967}
1968
1969#else
1970
1971
1972
1973
1974
1975
1976
1977static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1978 __releases(this_rq->lock)
1979 __acquires(busiest->lock)
1980 __acquires(this_rq->lock)
1981{
1982 int ret = 0;
1983
1984 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1985 if (busiest < this_rq) {
1986 raw_spin_unlock(&this_rq->lock);
1987 raw_spin_lock(&busiest->lock);
1988 raw_spin_lock_nested(&this_rq->lock,
1989 SINGLE_DEPTH_NESTING);
1990 ret = 1;
1991 } else
1992 raw_spin_lock_nested(&busiest->lock,
1993 SINGLE_DEPTH_NESTING);
1994 }
1995 return ret;
1996}
1997
1998#endif
1999
2000
2001
2002
2003static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2004{
2005 if (unlikely(!irqs_disabled())) {
2006
2007 raw_spin_unlock(&this_rq->lock);
2008 BUG_ON(1);
2009 }
2010
2011 return _double_lock_balance(this_rq, busiest);
2012}
2013
2014static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2015 __releases(busiest->lock)
2016{
2017 raw_spin_unlock(&busiest->lock);
2018 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2019}
2020
2021static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
2022{
2023 if (l1 > l2)
2024 swap(l1, l2);
2025
2026 spin_lock(l1);
2027 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2028}
2029
2030static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
2031{
2032 if (l1 > l2)
2033 swap(l1, l2);
2034
2035 spin_lock_irq(l1);
2036 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2037}
2038
2039static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2040{
2041 if (l1 > l2)
2042 swap(l1, l2);
2043
2044 raw_spin_lock(l1);
2045 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2046}
2047
2048
2049
2050
2051
2052
2053
2054static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2055 __acquires(rq1->lock)
2056 __acquires(rq2->lock)
2057{
2058 BUG_ON(!irqs_disabled());
2059 if (rq1 == rq2) {
2060 raw_spin_lock(&rq1->lock);
2061 __acquire(rq2->lock);
2062 } else {
2063 if (rq1 < rq2) {
2064 raw_spin_lock(&rq1->lock);
2065 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2066 } else {
2067 raw_spin_lock(&rq2->lock);
2068 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2069 }
2070 }
2071}
2072
2073
2074
2075
2076
2077
2078
2079static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2080 __releases(rq1->lock)
2081 __releases(rq2->lock)
2082{
2083 raw_spin_unlock(&rq1->lock);
2084 if (rq1 != rq2)
2085 raw_spin_unlock(&rq2->lock);
2086 else
2087 __release(rq2->lock);
2088}
2089
2090extern void set_rq_online (struct rq *rq);
2091extern void set_rq_offline(struct rq *rq);
2092extern bool sched_smp_initialized;
2093
2094#else
2095
2096
2097
2098
2099
2100
2101
2102static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2103 __acquires(rq1->lock)
2104 __acquires(rq2->lock)
2105{
2106 BUG_ON(!irqs_disabled());
2107 BUG_ON(rq1 != rq2);
2108 raw_spin_lock(&rq1->lock);
2109 __acquire(rq2->lock);
2110}
2111
2112
2113
2114
2115
2116
2117
2118static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2119 __releases(rq1->lock)
2120 __releases(rq2->lock)
2121{
2122 BUG_ON(rq1 != rq2);
2123 raw_spin_unlock(&rq1->lock);
2124 __release(rq2->lock);
2125}
2126
2127#endif
2128
2129extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2130extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2131
2132#ifdef CONFIG_SCHED_DEBUG
2133extern bool sched_debug_enabled;
2134
2135extern void print_cfs_stats(struct seq_file *m, int cpu);
2136extern void print_rt_stats(struct seq_file *m, int cpu);
2137extern void print_dl_stats(struct seq_file *m, int cpu);
2138extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2139extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2140extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2141#ifdef CONFIG_NUMA_BALANCING
2142extern void
2143show_numa_stats(struct task_struct *p, struct seq_file *m);
2144extern void
2145print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2146 unsigned long tpf, unsigned long gsf, unsigned long gpf);
2147#endif
2148#endif
2149
2150extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2151extern void init_rt_rq(struct rt_rq *rt_rq);
2152extern void init_dl_rq(struct dl_rq *dl_rq);
2153
2154extern void cfs_bandwidth_usage_inc(void);
2155extern void cfs_bandwidth_usage_dec(void);
2156
2157#ifdef CONFIG_NO_HZ_COMMON
2158#define NOHZ_BALANCE_KICK_BIT 0
2159#define NOHZ_STATS_KICK_BIT 1
2160
2161#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
2162#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
2163
2164#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
2165
2166#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2167
2168extern void nohz_balance_exit_idle(struct rq *rq);
2169#else
2170static inline void nohz_balance_exit_idle(struct rq *rq) { }
2171#endif
2172
2173
2174#ifdef CONFIG_SMP
2175static inline
2176void __dl_update(struct dl_bw *dl_b, s64 bw)
2177{
2178 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2179 int i;
2180
2181 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2182 "sched RCU must be held");
2183 for_each_cpu_and(i, rd->span, cpu_active_mask) {
2184 struct rq *rq = cpu_rq(i);
2185
2186 rq->dl.extra_bw += bw;
2187 }
2188}
2189#else
2190static inline
2191void __dl_update(struct dl_bw *dl_b, s64 bw)
2192{
2193 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2194
2195 dl->extra_bw += bw;
2196}
2197#endif
2198
2199
2200#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2201struct irqtime {
2202 u64 total;
2203 u64 tick_delta;
2204 u64 irq_start_time;
2205 struct u64_stats_sync sync;
2206};
2207
2208DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2209
2210
2211
2212
2213
2214
2215static inline u64 irq_time_read(int cpu)
2216{
2217 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2218 unsigned int seq;
2219 u64 total;
2220
2221 do {
2222 seq = __u64_stats_fetch_begin(&irqtime->sync);
2223 total = irqtime->total;
2224 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2225
2226 return total;
2227}
2228#endif
2229
2230#ifdef CONFIG_CPU_FREQ
2231DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2256{
2257 struct update_util_data *data;
2258
2259 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2260 cpu_of(rq)));
2261 if (data)
2262 data->func(data, rq_clock(rq), flags);
2263}
2264#else
2265static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2266#endif
2267
2268#ifdef CONFIG_UCLAMP_TASK
2269unsigned int uclamp_eff_value(struct task_struct *p, unsigned int clamp_id);
2270
2271static __always_inline
2272unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
2273 struct task_struct *p)
2274{
2275 unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
2276 unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
2277
2278 if (p) {
2279 min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
2280 max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
2281 }
2282
2283
2284
2285
2286
2287
2288 if (unlikely(min_util >= max_util))
2289 return min_util;
2290
2291 return clamp(util, min_util, max_util);
2292}
2293
2294static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
2295{
2296 return uclamp_util_with(rq, util, NULL);
2297}
2298#else
2299static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
2300 struct task_struct *p)
2301{
2302 return util;
2303}
2304static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
2305{
2306 return util;
2307}
2308#endif
2309
2310#ifdef arch_scale_freq_capacity
2311# ifndef arch_scale_freq_invariant
2312# define arch_scale_freq_invariant() true
2313# endif
2314#else
2315# define arch_scale_freq_invariant() false
2316#endif
2317
2318#ifdef CONFIG_SMP
2319static inline unsigned long capacity_orig_of(int cpu)
2320{
2321 return cpu_rq(cpu)->cpu_capacity_orig;
2322}
2323#endif
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335enum schedutil_type {
2336 FREQUENCY_UTIL,
2337 ENERGY_UTIL,
2338};
2339
2340#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
2341
2342unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
2343 unsigned long max, enum schedutil_type type,
2344 struct task_struct *p);
2345
2346static inline unsigned long cpu_bw_dl(struct rq *rq)
2347{
2348 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2349}
2350
2351static inline unsigned long cpu_util_dl(struct rq *rq)
2352{
2353 return READ_ONCE(rq->avg_dl.util_avg);
2354}
2355
2356static inline unsigned long cpu_util_cfs(struct rq *rq)
2357{
2358 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2359
2360 if (sched_feat(UTIL_EST)) {
2361 util = max_t(unsigned long, util,
2362 READ_ONCE(rq->cfs.avg.util_est.enqueued));
2363 }
2364
2365 return util;
2366}
2367
2368static inline unsigned long cpu_util_rt(struct rq *rq)
2369{
2370 return READ_ONCE(rq->avg_rt.util_avg);
2371}
2372#else
2373static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
2374 unsigned long max, enum schedutil_type type,
2375 struct task_struct *p)
2376{
2377 return 0;
2378}
2379#endif
2380
2381#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
2382static inline unsigned long cpu_util_irq(struct rq *rq)
2383{
2384 return rq->avg_irq.util_avg;
2385}
2386
2387static inline
2388unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2389{
2390 util *= (max - irq);
2391 util /= max;
2392
2393 return util;
2394
2395}
2396#else
2397static inline unsigned long cpu_util_irq(struct rq *rq)
2398{
2399 return 0;
2400}
2401
2402static inline
2403unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2404{
2405 return util;
2406}
2407#endif
2408
2409#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2410
2411#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2412
2413DECLARE_STATIC_KEY_FALSE(sched_energy_present);
2414
2415static inline bool sched_energy_enabled(void)
2416{
2417 return static_branch_unlikely(&sched_energy_present);
2418}
2419
2420#else
2421
2422#define perf_domain_span(pd) NULL
2423static inline bool sched_energy_enabled(void) { return false; }
2424
2425#endif
2426