1
2#include <linux/sched.h>
3#include <linux/sched/sysctl.h>
4#include <linux/sched/rt.h>
5#include <linux/mutex.h>
6#include <linux/spinlock.h>
7#include <linux/stop_machine.h>
8#include <linux/tick.h>
9
10#include "cpupri.h"
11#include "cpuacct.h"
12
13struct rq;
14
15extern __read_mostly int scheduler_running;
16
17extern unsigned long calc_load_update;
18extern atomic_long_t calc_load_tasks;
19
20extern long calc_load_fold_active(struct rq *this_rq);
21extern void update_cpu_load_active(struct rq *this_rq);
22
23
24
25
26
27
28#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
29#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
30#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
31
32
33
34
35
36
37#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
38#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
39#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
40
41
42
43
44#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
45
46
47
48
49
50
51
52
53
54
55
56
57
58#if 0
59# define SCHED_LOAD_RESOLUTION 10
60# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
61# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
62#else
63# define SCHED_LOAD_RESOLUTION 0
64# define scale_load(w) (w)
65# define scale_load_down(w) (w)
66#endif
67
68#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
69#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
70
71#define NICE_0_LOAD SCHED_LOAD_SCALE
72#define NICE_0_SHIFT SCHED_LOAD_SHIFT
73
74
75
76
77
78
79
80
81#define RUNTIME_INF ((u64)~0ULL)
82
83static inline int rt_policy(int policy)
84{
85 if (policy == SCHED_FIFO || policy == SCHED_RR)
86 return 1;
87 return 0;
88}
89
90static inline int task_has_rt_policy(struct task_struct *p)
91{
92 return rt_policy(p->policy);
93}
94
95
96
97
98struct rt_prio_array {
99 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1);
100 struct list_head queue[MAX_RT_PRIO];
101};
102
103struct rt_bandwidth {
104
105 raw_spinlock_t rt_runtime_lock;
106 ktime_t rt_period;
107 u64 rt_runtime;
108 struct hrtimer rt_period_timer;
109};
110
111extern struct mutex sched_domains_mutex;
112
113#ifdef CONFIG_CGROUP_SCHED
114
115#include <linux/cgroup.h>
116
117struct cfs_rq;
118struct rt_rq;
119
120extern struct list_head task_groups;
121
122struct cfs_bandwidth {
123#ifdef CONFIG_CFS_BANDWIDTH
124 raw_spinlock_t lock;
125 ktime_t period;
126 u64 quota, runtime;
127 s64 hierarchal_quota;
128 u64 runtime_expires;
129
130 int idle, timer_active;
131 struct hrtimer period_timer, slack_timer;
132 struct list_head throttled_cfs_rq;
133
134
135 int nr_periods, nr_throttled;
136 u64 throttled_time;
137#endif
138};
139
140
141struct task_group {
142 struct cgroup_subsys_state css;
143
144#ifdef CONFIG_FAIR_GROUP_SCHED
145
146 struct sched_entity **se;
147
148 struct cfs_rq **cfs_rq;
149 unsigned long shares;
150
151#ifdef CONFIG_SMP
152 atomic_long_t load_avg;
153 atomic_t runnable_avg;
154#endif
155#endif
156
157#ifdef CONFIG_RT_GROUP_SCHED
158 struct sched_rt_entity **rt_se;
159 struct rt_rq **rt_rq;
160
161 struct rt_bandwidth rt_bandwidth;
162#endif
163
164 struct rcu_head rcu;
165 struct list_head list;
166
167 struct task_group *parent;
168 struct list_head siblings;
169 struct list_head children;
170
171#ifdef CONFIG_SCHED_AUTOGROUP
172 struct autogroup *autogroup;
173#endif
174
175 struct cfs_bandwidth cfs_bandwidth;
176};
177
178#ifdef CONFIG_FAIR_GROUP_SCHED
179#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
180
181
182
183
184
185
186
187
188
189#define MIN_SHARES (1UL << 1)
190#define MAX_SHARES (1UL << 18)
191#endif
192
193typedef int (*tg_visitor)(struct task_group *, void *);
194
195extern int walk_tg_tree_from(struct task_group *from,
196 tg_visitor down, tg_visitor up, void *data);
197
198
199
200
201
202
203
204static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
205{
206 return walk_tg_tree_from(&root_task_group, down, up, data);
207}
208
209extern int tg_nop(struct task_group *tg, void *data);
210
211extern void free_fair_sched_group(struct task_group *tg);
212extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
213extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
214extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
215 struct sched_entity *se, int cpu,
216 struct sched_entity *parent);
217extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
218extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
219
220extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
221extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
222extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
223
224extern void free_rt_sched_group(struct task_group *tg);
225extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
226extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
227 struct sched_rt_entity *rt_se, int cpu,
228 struct sched_rt_entity *parent);
229
230extern struct task_group *sched_create_group(struct task_group *parent);
231extern void sched_online_group(struct task_group *tg,
232 struct task_group *parent);
233extern void sched_destroy_group(struct task_group *tg);
234extern void sched_offline_group(struct task_group *tg);
235
236extern void sched_move_task(struct task_struct *tsk);
237
238#ifdef CONFIG_FAIR_GROUP_SCHED
239extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
240#endif
241
242#else
243
244struct cfs_bandwidth { };
245
246#endif
247
248
249struct cfs_rq {
250 struct load_weight load;
251 unsigned int nr_running, h_nr_running;
252
253 u64 exec_clock;
254 u64 min_vruntime;
255#ifndef CONFIG_64BIT
256 u64 min_vruntime_copy;
257#endif
258
259 struct rb_root tasks_timeline;
260 struct rb_node *rb_leftmost;
261
262
263
264
265
266 struct sched_entity *curr, *next, *last, *skip;
267
268#ifdef CONFIG_SCHED_DEBUG
269 unsigned int nr_spread_over;
270#endif
271
272#ifdef CONFIG_SMP
273
274
275
276
277
278
279 unsigned long runnable_load_avg, blocked_load_avg;
280 atomic64_t decay_counter;
281 u64 last_decay;
282 atomic_long_t removed_load;
283
284#ifdef CONFIG_FAIR_GROUP_SCHED
285
286 u32 tg_runnable_contrib;
287 unsigned long tg_load_contrib;
288
289
290
291
292
293
294
295 unsigned long h_load;
296 u64 last_h_load_update;
297 struct sched_entity *h_load_next;
298#endif
299#endif
300
301#ifdef CONFIG_FAIR_GROUP_SCHED
302 struct rq *rq;
303
304
305
306
307
308
309
310
311
312 int on_list;
313 struct list_head leaf_cfs_rq_list;
314 struct task_group *tg;
315
316#ifdef CONFIG_CFS_BANDWIDTH
317 int runtime_enabled;
318 u64 runtime_expires;
319 s64 runtime_remaining;
320
321 u64 throttled_clock, throttled_clock_task;
322 u64 throttled_clock_task_time;
323 int throttled, throttle_count;
324 struct list_head throttled_list;
325#endif
326#endif
327};
328
329static inline int rt_bandwidth_enabled(void)
330{
331 return sysctl_sched_rt_runtime >= 0;
332}
333
334
335struct rt_rq {
336 struct rt_prio_array active;
337 unsigned int rt_nr_running;
338#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
339 struct {
340 int curr;
341#ifdef CONFIG_SMP
342 int next;
343#endif
344 } highest_prio;
345#endif
346#ifdef CONFIG_SMP
347 unsigned long rt_nr_migratory;
348 unsigned long rt_nr_total;
349 int overloaded;
350 struct plist_head pushable_tasks;
351#endif
352 int rt_throttled;
353 u64 rt_time;
354 u64 rt_runtime;
355
356 raw_spinlock_t rt_runtime_lock;
357
358#ifdef CONFIG_RT_GROUP_SCHED
359 unsigned long rt_nr_boosted;
360
361 struct rq *rq;
362 struct task_group *tg;
363#endif
364};
365
366#ifdef CONFIG_SMP
367
368
369
370
371
372
373
374
375
376struct root_domain {
377 atomic_t refcount;
378 atomic_t rto_count;
379 struct rcu_head rcu;
380 cpumask_var_t span;
381 cpumask_var_t online;
382
383
384
385
386
387 cpumask_var_t rto_mask;
388 struct cpupri cpupri;
389};
390
391extern struct root_domain def_root_domain;
392
393#endif
394
395
396
397
398
399
400
401
402struct rq {
403
404 raw_spinlock_t lock;
405
406
407
408
409
410 unsigned int nr_running;
411 #define CPU_LOAD_IDX_MAX 5
412 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
413 unsigned long last_load_update_tick;
414#ifdef CONFIG_NO_HZ_COMMON
415 u64 nohz_stamp;
416 unsigned long nohz_flags;
417#endif
418#ifdef CONFIG_NO_HZ_FULL
419 unsigned long last_sched_tick;
420#endif
421 int skip_clock_update;
422
423
424 struct load_weight load;
425 unsigned long nr_load_updates;
426 u64 nr_switches;
427
428 struct cfs_rq cfs;
429 struct rt_rq rt;
430
431#ifdef CONFIG_FAIR_GROUP_SCHED
432
433 struct list_head leaf_cfs_rq_list;
434#endif
435
436#ifdef CONFIG_RT_GROUP_SCHED
437 struct list_head leaf_rt_rq_list;
438#endif
439
440
441
442
443
444
445
446 unsigned long nr_uninterruptible;
447
448 struct task_struct *curr, *idle, *stop;
449 unsigned long next_balance;
450 struct mm_struct *prev_mm;
451
452 u64 clock;
453 u64 clock_task;
454
455 atomic_t nr_iowait;
456
457#ifdef CONFIG_SMP
458 struct root_domain *rd;
459 struct sched_domain *sd;
460
461 unsigned long cpu_power;
462
463 unsigned char idle_balance;
464
465 int post_schedule;
466 int active_balance;
467 int push_cpu;
468 struct cpu_stop_work active_balance_work;
469
470 int cpu;
471 int online;
472
473 struct list_head cfs_tasks;
474
475 u64 rt_avg;
476 u64 age_stamp;
477 u64 idle_stamp;
478 u64 avg_idle;
479#endif
480
481#ifdef CONFIG_IRQ_TIME_ACCOUNTING
482 u64 prev_irq_time;
483#endif
484#ifdef CONFIG_PARAVIRT
485 u64 prev_steal_time;
486#endif
487#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
488 u64 prev_steal_time_rq;
489#endif
490
491
492 unsigned long calc_load_update;
493 long calc_load_active;
494
495#ifdef CONFIG_SCHED_HRTICK
496#ifdef CONFIG_SMP
497 int hrtick_csd_pending;
498 struct call_single_data hrtick_csd;
499#endif
500 struct hrtimer hrtick_timer;
501#endif
502
503#ifdef CONFIG_SCHEDSTATS
504
505 struct sched_info rq_sched_info;
506 unsigned long long rq_cpu_time;
507
508
509
510 unsigned int yld_count;
511
512
513 unsigned int sched_count;
514 unsigned int sched_goidle;
515
516
517 unsigned int ttwu_count;
518 unsigned int ttwu_local;
519#endif
520
521#ifdef CONFIG_SMP
522 struct llist_head wake_list;
523#endif
524
525 struct sched_avg avg;
526};
527
528static inline int cpu_of(struct rq *rq)
529{
530#ifdef CONFIG_SMP
531 return rq->cpu;
532#else
533 return 0;
534#endif
535}
536
537DECLARE_PER_CPU(struct rq, runqueues);
538
539#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
540#define this_rq() (&__get_cpu_var(runqueues))
541#define task_rq(p) cpu_rq(task_cpu(p))
542#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
543#define raw_rq() (&__raw_get_cpu_var(runqueues))
544
545static inline u64 rq_clock(struct rq *rq)
546{
547 return rq->clock;
548}
549
550static inline u64 rq_clock_task(struct rq *rq)
551{
552 return rq->clock_task;
553}
554
555#ifdef CONFIG_SMP
556
557#define rcu_dereference_check_sched_domain(p) \
558 rcu_dereference_check((p), \
559 lockdep_is_held(&sched_domains_mutex))
560
561
562
563
564
565
566
567
568#define for_each_domain(cpu, __sd) \
569 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
570 __sd; __sd = __sd->parent)
571
572#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
573
574
575
576
577
578
579
580
581
582
583static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
584{
585 struct sched_domain *sd, *hsd = NULL;
586
587 for_each_domain(cpu, sd) {
588 if (!(sd->flags & flag))
589 break;
590 hsd = sd;
591 }
592
593 return hsd;
594}
595
596DECLARE_PER_CPU(struct sched_domain *, sd_llc);
597DECLARE_PER_CPU(int, sd_llc_size);
598DECLARE_PER_CPU(int, sd_llc_id);
599
600struct sched_group_power {
601 atomic_t ref;
602
603
604
605
606 unsigned int power, power_orig;
607 unsigned long next_update;
608
609
610
611 atomic_t nr_busy_cpus;
612
613 unsigned long cpumask[0];
614};
615
616struct sched_group {
617 struct sched_group *next;
618 atomic_t ref;
619
620 unsigned int group_weight;
621 struct sched_group_power *sgp;
622
623
624
625
626
627
628
629
630 unsigned long cpumask[0];
631};
632
633static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
634{
635 return to_cpumask(sg->cpumask);
636}
637
638
639
640
641
642static inline struct cpumask *sched_group_mask(struct sched_group *sg)
643{
644 return to_cpumask(sg->sgp->cpumask);
645}
646
647
648
649
650
651static inline unsigned int group_first_cpu(struct sched_group *group)
652{
653 return cpumask_first(sched_group_cpus(group));
654}
655
656extern int group_balance_cpu(struct sched_group *sg);
657
658#endif
659
660#include "stats.h"
661#include "auto_group.h"
662
663#ifdef CONFIG_CGROUP_SCHED
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678static inline struct task_group *task_group(struct task_struct *p)
679{
680 return p->sched_task_group;
681}
682
683
684static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
685{
686#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
687 struct task_group *tg = task_group(p);
688#endif
689
690#ifdef CONFIG_FAIR_GROUP_SCHED
691 p->se.cfs_rq = tg->cfs_rq[cpu];
692 p->se.parent = tg->se[cpu];
693#endif
694
695#ifdef CONFIG_RT_GROUP_SCHED
696 p->rt.rt_rq = tg->rt_rq[cpu];
697 p->rt.parent = tg->rt_se[cpu];
698#endif
699}
700
701#else
702
703static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
704static inline struct task_group *task_group(struct task_struct *p)
705{
706 return NULL;
707}
708
709#endif
710
711static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
712{
713 set_task_rq(p, cpu);
714#ifdef CONFIG_SMP
715
716
717
718
719
720 smp_wmb();
721 task_thread_info(p)->cpu = cpu;
722#endif
723}
724
725
726
727
728#ifdef CONFIG_SCHED_DEBUG
729# include <linux/static_key.h>
730# define const_debug __read_mostly
731#else
732# define const_debug const
733#endif
734
735extern const_debug unsigned int sysctl_sched_features;
736
737#define SCHED_FEAT(name, enabled) \
738 __SCHED_FEAT_##name ,
739
740enum {
741#include "features.h"
742 __SCHED_FEAT_NR,
743};
744
745#undef SCHED_FEAT
746
747#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
748static __always_inline bool static_branch__true(struct static_key *key)
749{
750 return static_key_true(key);
751}
752
753static __always_inline bool static_branch__false(struct static_key *key)
754{
755 return static_key_false(key);
756}
757
758#define SCHED_FEAT(name, enabled) \
759static __always_inline bool static_branch_##name(struct static_key *key) \
760{ \
761 return static_branch__##enabled(key); \
762}
763
764#include "features.h"
765
766#undef SCHED_FEAT
767
768extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
769#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
770#else
771#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
772#endif
773
774#ifdef CONFIG_NUMA_BALANCING
775#define sched_feat_numa(x) sched_feat(x)
776#ifdef CONFIG_SCHED_DEBUG
777#define numabalancing_enabled sched_feat_numa(NUMA)
778#else
779extern bool numabalancing_enabled;
780#endif
781#else
782#define sched_feat_numa(x) (0)
783#define numabalancing_enabled (0)
784#endif
785
786static inline u64 global_rt_period(void)
787{
788 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
789}
790
791static inline u64 global_rt_runtime(void)
792{
793 if (sysctl_sched_rt_runtime < 0)
794 return RUNTIME_INF;
795
796 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
797}
798
799
800
801static inline int task_current(struct rq *rq, struct task_struct *p)
802{
803 return rq->curr == p;
804}
805
806static inline int task_running(struct rq *rq, struct task_struct *p)
807{
808#ifdef CONFIG_SMP
809 return p->on_cpu;
810#else
811 return task_current(rq, p);
812#endif
813}
814
815
816#ifndef prepare_arch_switch
817# define prepare_arch_switch(next) do { } while (0)
818#endif
819#ifndef finish_arch_switch
820# define finish_arch_switch(prev) do { } while (0)
821#endif
822#ifndef finish_arch_post_lock_switch
823# define finish_arch_post_lock_switch() do { } while (0)
824#endif
825
826#ifndef __ARCH_WANT_UNLOCKED_CTXSW
827static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
828{
829#ifdef CONFIG_SMP
830
831
832
833
834
835 next->on_cpu = 1;
836#endif
837}
838
839static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
840{
841#ifdef CONFIG_SMP
842
843
844
845
846
847 smp_wmb();
848 prev->on_cpu = 0;
849#endif
850#ifdef CONFIG_DEBUG_SPINLOCK
851
852 rq->lock.owner = current;
853#endif
854
855
856
857
858
859 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
860
861 raw_spin_unlock_irq(&rq->lock);
862}
863
864#else
865static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
866{
867#ifdef CONFIG_SMP
868
869
870
871
872
873 next->on_cpu = 1;
874#endif
875 raw_spin_unlock(&rq->lock);
876}
877
878static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
879{
880#ifdef CONFIG_SMP
881
882
883
884
885
886 smp_wmb();
887 prev->on_cpu = 0;
888#endif
889 local_irq_enable();
890}
891#endif
892
893
894
895
896#define WF_SYNC 0x01
897#define WF_FORK 0x02
898#define WF_MIGRATED 0x4
899
900
901
902
903
904
905
906
907
908
909#define WEIGHT_IDLEPRIO 3
910#define WMULT_IDLEPRIO 1431655765
911
912
913
914
915
916
917
918
919
920
921
922
923
924static const int prio_to_weight[40] = {
925 88761, 71755, 56483, 46273, 36291,
926 29154, 23254, 18705, 14949, 11916,
927 9548, 7620, 6100, 4904, 3906,
928 3121, 2501, 1991, 1586, 1277,
929 1024, 820, 655, 526, 423,
930 335, 272, 215, 172, 137,
931 110, 87, 70, 56, 45,
932 36, 29, 23, 18, 15,
933};
934
935
936
937
938
939
940
941
942static const u32 prio_to_wmult[40] = {
943 48388, 59856, 76040, 92818, 118348,
944 147320, 184698, 229616, 287308, 360437,
945 449829, 563644, 704093, 875809, 1099582,
946 1376151, 1717300, 2157191, 2708050, 3363326,
947 4194304, 5237765, 6557202, 8165337, 10153587,
948 12820798, 15790321, 19976592, 24970740, 31350126,
949 39045157, 49367440, 61356676, 76695844, 95443717,
950 119304647, 148102320, 186737708, 238609294, 286331153,
951};
952
953#define ENQUEUE_WAKEUP 1
954#define ENQUEUE_HEAD 2
955#ifdef CONFIG_SMP
956#define ENQUEUE_WAKING 4
957#else
958#define ENQUEUE_WAKING 0
959#endif
960
961#define DEQUEUE_SLEEP 1
962
963struct sched_class {
964 const struct sched_class *next;
965
966 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
967 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
968 void (*yield_task) (struct rq *rq);
969 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
970
971 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
972
973 struct task_struct * (*pick_next_task) (struct rq *rq);
974 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
975
976#ifdef CONFIG_SMP
977 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
978 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
979
980 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
981 void (*post_schedule) (struct rq *this_rq);
982 void (*task_waking) (struct task_struct *task);
983 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
984
985 void (*set_cpus_allowed)(struct task_struct *p,
986 const struct cpumask *newmask);
987
988 void (*rq_online)(struct rq *rq);
989 void (*rq_offline)(struct rq *rq);
990#endif
991
992 void (*set_curr_task) (struct rq *rq);
993 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
994 void (*task_fork) (struct task_struct *p);
995
996 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
997 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
998 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
999 int oldprio);
1000
1001 unsigned int (*get_rr_interval) (struct rq *rq,
1002 struct task_struct *task);
1003
1004#ifdef CONFIG_FAIR_GROUP_SCHED
1005 void (*task_move_group) (struct task_struct *p, int on_rq);
1006#endif
1007};
1008
1009#define sched_class_highest (&stop_sched_class)
1010#define for_each_class(class) \
1011 for (class = sched_class_highest; class; class = class->next)
1012
1013extern const struct sched_class stop_sched_class;
1014extern const struct sched_class rt_sched_class;
1015extern const struct sched_class fair_sched_class;
1016extern const struct sched_class idle_sched_class;
1017
1018
1019#ifdef CONFIG_SMP
1020
1021extern void update_group_power(struct sched_domain *sd, int cpu);
1022
1023extern void trigger_load_balance(struct rq *rq, int cpu);
1024extern void idle_balance(int this_cpu, struct rq *this_rq);
1025
1026extern void idle_enter_fair(struct rq *this_rq);
1027extern void idle_exit_fair(struct rq *this_rq);
1028
1029#else
1030
1031static inline void idle_balance(int cpu, struct rq *rq)
1032{
1033}
1034
1035#endif
1036
1037extern void sysrq_sched_debug_show(void);
1038extern void sched_init_granularity(void);
1039extern void update_max_interval(void);
1040extern void init_sched_rt_class(void);
1041extern void init_sched_fair_class(void);
1042
1043extern void resched_task(struct task_struct *p);
1044extern void resched_cpu(int cpu);
1045
1046extern struct rt_bandwidth def_rt_bandwidth;
1047extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1048
1049extern void update_idle_cpu_load(struct rq *this_rq);
1050
1051extern void init_task_runnable_average(struct task_struct *p);
1052
1053#ifdef CONFIG_PARAVIRT
1054static inline u64 steal_ticks(u64 steal)
1055{
1056 if (unlikely(steal > NSEC_PER_SEC))
1057 return div_u64(steal, TICK_NSEC);
1058
1059 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
1060}
1061#endif
1062
1063static inline void inc_nr_running(struct rq *rq)
1064{
1065 rq->nr_running++;
1066
1067#ifdef CONFIG_NO_HZ_FULL
1068 if (rq->nr_running == 2) {
1069 if (tick_nohz_full_cpu(rq->cpu)) {
1070
1071 smp_wmb();
1072 smp_send_reschedule(rq->cpu);
1073 }
1074 }
1075#endif
1076}
1077
1078static inline void dec_nr_running(struct rq *rq)
1079{
1080 rq->nr_running--;
1081}
1082
1083static inline void rq_last_tick_reset(struct rq *rq)
1084{
1085#ifdef CONFIG_NO_HZ_FULL
1086 rq->last_sched_tick = jiffies;
1087#endif
1088}
1089
1090extern void update_rq_clock(struct rq *rq);
1091
1092extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1093extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1094
1095extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1096
1097extern const_debug unsigned int sysctl_sched_time_avg;
1098extern const_debug unsigned int sysctl_sched_nr_migrate;
1099extern const_debug unsigned int sysctl_sched_migration_cost;
1100
1101static inline u64 sched_avg_period(void)
1102{
1103 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1104}
1105
1106#ifdef CONFIG_SCHED_HRTICK
1107
1108
1109
1110
1111
1112
1113static inline int hrtick_enabled(struct rq *rq)
1114{
1115 if (!sched_feat(HRTICK))
1116 return 0;
1117 if (!cpu_active(cpu_of(rq)))
1118 return 0;
1119 return hrtimer_is_hres_active(&rq->hrtick_timer);
1120}
1121
1122void hrtick_start(struct rq *rq, u64 delay);
1123
1124#else
1125
1126static inline int hrtick_enabled(struct rq *rq)
1127{
1128 return 0;
1129}
1130
1131#endif
1132
1133#ifdef CONFIG_SMP
1134extern void sched_avg_update(struct rq *rq);
1135static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1136{
1137 rq->rt_avg += rt_delta;
1138 sched_avg_update(rq);
1139}
1140#else
1141static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1142static inline void sched_avg_update(struct rq *rq) { }
1143#endif
1144
1145extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
1146
1147#ifdef CONFIG_SMP
1148#ifdef CONFIG_PREEMPT
1149
1150static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1161 __releases(this_rq->lock)
1162 __acquires(busiest->lock)
1163 __acquires(this_rq->lock)
1164{
1165 raw_spin_unlock(&this_rq->lock);
1166 double_rq_lock(this_rq, busiest);
1167
1168 return 1;
1169}
1170
1171#else
1172
1173
1174
1175
1176
1177
1178
1179static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1180 __releases(this_rq->lock)
1181 __acquires(busiest->lock)
1182 __acquires(this_rq->lock)
1183{
1184 int ret = 0;
1185
1186 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1187 if (busiest < this_rq) {
1188 raw_spin_unlock(&this_rq->lock);
1189 raw_spin_lock(&busiest->lock);
1190 raw_spin_lock_nested(&this_rq->lock,
1191 SINGLE_DEPTH_NESTING);
1192 ret = 1;
1193 } else
1194 raw_spin_lock_nested(&busiest->lock,
1195 SINGLE_DEPTH_NESTING);
1196 }
1197 return ret;
1198}
1199
1200#endif
1201
1202
1203
1204
1205static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1206{
1207 if (unlikely(!irqs_disabled())) {
1208
1209 raw_spin_unlock(&this_rq->lock);
1210 BUG_ON(1);
1211 }
1212
1213 return _double_lock_balance(this_rq, busiest);
1214}
1215
1216static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1217 __releases(busiest->lock)
1218{
1219 raw_spin_unlock(&busiest->lock);
1220 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1221}
1222
1223
1224
1225
1226
1227
1228
1229static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1230 __acquires(rq1->lock)
1231 __acquires(rq2->lock)
1232{
1233 BUG_ON(!irqs_disabled());
1234 if (rq1 == rq2) {
1235 raw_spin_lock(&rq1->lock);
1236 __acquire(rq2->lock);
1237 } else {
1238 if (rq1 < rq2) {
1239 raw_spin_lock(&rq1->lock);
1240 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1241 } else {
1242 raw_spin_lock(&rq2->lock);
1243 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1244 }
1245 }
1246}
1247
1248
1249
1250
1251
1252
1253
1254static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1255 __releases(rq1->lock)
1256 __releases(rq2->lock)
1257{
1258 raw_spin_unlock(&rq1->lock);
1259 if (rq1 != rq2)
1260 raw_spin_unlock(&rq2->lock);
1261 else
1262 __release(rq2->lock);
1263}
1264
1265#else
1266
1267
1268
1269
1270
1271
1272
1273static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1274 __acquires(rq1->lock)
1275 __acquires(rq2->lock)
1276{
1277 BUG_ON(!irqs_disabled());
1278 BUG_ON(rq1 != rq2);
1279 raw_spin_lock(&rq1->lock);
1280 __acquire(rq2->lock);
1281}
1282
1283
1284
1285
1286
1287
1288
1289static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1290 __releases(rq1->lock)
1291 __releases(rq2->lock)
1292{
1293 BUG_ON(rq1 != rq2);
1294 raw_spin_unlock(&rq1->lock);
1295 __release(rq2->lock);
1296}
1297
1298#endif
1299
1300extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1301extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1302extern void print_cfs_stats(struct seq_file *m, int cpu);
1303extern void print_rt_stats(struct seq_file *m, int cpu);
1304
1305extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1306extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1307
1308extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1309
1310#ifdef CONFIG_NO_HZ_COMMON
1311enum rq_nohz_flag_bits {
1312 NOHZ_TICK_STOPPED,
1313 NOHZ_BALANCE_KICK,
1314};
1315
1316#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1317#endif
1318
1319#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1320
1321DECLARE_PER_CPU(u64, cpu_hardirq_time);
1322DECLARE_PER_CPU(u64, cpu_softirq_time);
1323
1324#ifndef CONFIG_64BIT
1325DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1326
1327static inline void irq_time_write_begin(void)
1328{
1329 __this_cpu_inc(irq_time_seq.sequence);
1330 smp_wmb();
1331}
1332
1333static inline void irq_time_write_end(void)
1334{
1335 smp_wmb();
1336 __this_cpu_inc(irq_time_seq.sequence);
1337}
1338
1339static inline u64 irq_time_read(int cpu)
1340{
1341 u64 irq_time;
1342 unsigned seq;
1343
1344 do {
1345 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1346 irq_time = per_cpu(cpu_softirq_time, cpu) +
1347 per_cpu(cpu_hardirq_time, cpu);
1348 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1349
1350 return irq_time;
1351}
1352#else
1353static inline void irq_time_write_begin(void)
1354{
1355}
1356
1357static inline void irq_time_write_end(void)
1358{
1359}
1360
1361static inline u64 irq_time_read(int cpu)
1362{
1363 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1364}
1365#endif
1366#endif
1367