1
2#include <linux/sched.h>
3#include <linux/mutex.h>
4#include <linux/spinlock.h>
5#include <linux/stop_machine.h>
6
7#include "cpupri.h"
8
9extern __read_mostly int scheduler_running;
10
11
12
13
14
15
16#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
17#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
18#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
19
20
21
22
23
24
25#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
26#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
27#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
28
29
30
31
32#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
33
34#define NICE_0_LOAD SCHED_LOAD_SCALE
35#define NICE_0_SHIFT SCHED_LOAD_SHIFT
36
37
38
39
40
41
42
43
44#define RUNTIME_INF ((u64)~0ULL)
45
46static inline int rt_policy(int policy)
47{
48 if (policy == SCHED_FIFO || policy == SCHED_RR)
49 return 1;
50 return 0;
51}
52
53static inline int task_has_rt_policy(struct task_struct *p)
54{
55 return rt_policy(p->policy);
56}
57
58
59
60
61struct rt_prio_array {
62 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1);
63 struct list_head queue[MAX_RT_PRIO];
64};
65
66struct rt_bandwidth {
67
68 raw_spinlock_t rt_runtime_lock;
69 ktime_t rt_period;
70 u64 rt_runtime;
71 struct hrtimer rt_period_timer;
72};
73
74extern struct mutex sched_domains_mutex;
75
76#ifdef CONFIG_CGROUP_SCHED
77
78#include <linux/cgroup.h>
79
80struct cfs_rq;
81struct rt_rq;
82
83extern struct list_head task_groups;
84
85struct cfs_bandwidth {
86#ifdef CONFIG_CFS_BANDWIDTH
87 raw_spinlock_t lock;
88 ktime_t period;
89 u64 quota, runtime;
90 s64 hierarchal_quota;
91 u64 runtime_expires;
92
93 int idle, timer_active;
94 struct hrtimer period_timer, slack_timer;
95 struct list_head throttled_cfs_rq;
96
97
98 int nr_periods, nr_throttled;
99 u64 throttled_time;
100#endif
101};
102
103
104struct task_group {
105 struct cgroup_subsys_state css;
106
107#ifdef CONFIG_FAIR_GROUP_SCHED
108
109 struct sched_entity **se;
110
111 struct cfs_rq **cfs_rq;
112 unsigned long shares;
113
114 atomic_t load_weight;
115#endif
116
117#ifdef CONFIG_RT_GROUP_SCHED
118 struct sched_rt_entity **rt_se;
119 struct rt_rq **rt_rq;
120
121 struct rt_bandwidth rt_bandwidth;
122#endif
123
124 struct rcu_head rcu;
125 struct list_head list;
126
127 struct task_group *parent;
128 struct list_head siblings;
129 struct list_head children;
130
131#ifdef CONFIG_SCHED_AUTOGROUP
132 struct autogroup *autogroup;
133#endif
134
135 struct cfs_bandwidth cfs_bandwidth;
136};
137
138#ifdef CONFIG_FAIR_GROUP_SCHED
139#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
140
141
142
143
144
145
146
147
148
149#define MIN_SHARES (1UL << 1)
150#define MAX_SHARES (1UL << 18)
151#endif
152
153
154
155
156extern struct task_group root_task_group;
157
158typedef int (*tg_visitor)(struct task_group *, void *);
159
160extern int walk_tg_tree_from(struct task_group *from,
161 tg_visitor down, tg_visitor up, void *data);
162
163
164
165
166
167
168
169static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
170{
171 return walk_tg_tree_from(&root_task_group, down, up, data);
172}
173
174extern int tg_nop(struct task_group *tg, void *data);
175
176extern void free_fair_sched_group(struct task_group *tg);
177extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
178extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
179extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
180 struct sched_entity *se, int cpu,
181 struct sched_entity *parent);
182extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
183extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
184
185extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
186extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
187extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
188
189extern void free_rt_sched_group(struct task_group *tg);
190extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
191extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
192 struct sched_rt_entity *rt_se, int cpu,
193 struct sched_rt_entity *parent);
194
195#else
196
197struct cfs_bandwidth { };
198
199#endif
200
201
202struct cfs_rq {
203 struct load_weight load;
204 unsigned int nr_running, h_nr_running;
205
206 u64 exec_clock;
207 u64 min_vruntime;
208#ifndef CONFIG_64BIT
209 u64 min_vruntime_copy;
210#endif
211
212 struct rb_root tasks_timeline;
213 struct rb_node *rb_leftmost;
214
215
216
217
218
219 struct sched_entity *curr, *next, *last, *skip;
220
221#ifdef CONFIG_SCHED_DEBUG
222 unsigned int nr_spread_over;
223#endif
224
225#ifdef CONFIG_FAIR_GROUP_SCHED
226 struct rq *rq;
227
228
229
230
231
232
233
234
235
236 int on_list;
237 struct list_head leaf_cfs_rq_list;
238 struct task_group *tg;
239
240#ifdef CONFIG_SMP
241
242
243
244
245
246
247 unsigned long h_load;
248
249
250
251
252
253
254
255
256 u64 load_avg;
257 u64 load_period;
258 u64 load_stamp, load_last, load_unacc_exec_time;
259
260 unsigned long load_contribution;
261#endif
262#ifdef CONFIG_CFS_BANDWIDTH
263 int runtime_enabled;
264 u64 runtime_expires;
265 s64 runtime_remaining;
266
267 u64 throttled_timestamp;
268 int throttled, throttle_count;
269 struct list_head throttled_list;
270#endif
271#endif
272};
273
274static inline int rt_bandwidth_enabled(void)
275{
276 return sysctl_sched_rt_runtime >= 0;
277}
278
279
280struct rt_rq {
281 struct rt_prio_array active;
282 unsigned int rt_nr_running;
283#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
284 struct {
285 int curr;
286#ifdef CONFIG_SMP
287 int next;
288#endif
289 } highest_prio;
290#endif
291#ifdef CONFIG_SMP
292 unsigned long rt_nr_migratory;
293 unsigned long rt_nr_total;
294 int overloaded;
295 struct plist_head pushable_tasks;
296#endif
297 int rt_throttled;
298 u64 rt_time;
299 u64 rt_runtime;
300
301 raw_spinlock_t rt_runtime_lock;
302
303#ifdef CONFIG_RT_GROUP_SCHED
304 unsigned long rt_nr_boosted;
305
306 struct rq *rq;
307 struct list_head leaf_rt_rq_list;
308 struct task_group *tg;
309#endif
310};
311
312#ifdef CONFIG_SMP
313
314
315
316
317
318
319
320
321
322struct root_domain {
323 atomic_t refcount;
324 atomic_t rto_count;
325 struct rcu_head rcu;
326 cpumask_var_t span;
327 cpumask_var_t online;
328
329
330
331
332
333 cpumask_var_t rto_mask;
334 struct cpupri cpupri;
335};
336
337extern struct root_domain def_root_domain;
338
339#endif
340
341
342
343
344
345
346
347
348struct rq {
349
350 raw_spinlock_t lock;
351
352
353
354
355
356 unsigned int nr_running;
357 #define CPU_LOAD_IDX_MAX 5
358 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
359 unsigned long last_load_update_tick;
360#ifdef CONFIG_NO_HZ
361 u64 nohz_stamp;
362 unsigned long nohz_flags;
363#endif
364 int skip_clock_update;
365
366
367 struct load_weight load;
368 unsigned long nr_load_updates;
369 u64 nr_switches;
370
371 struct cfs_rq cfs;
372 struct rt_rq rt;
373
374#ifdef CONFIG_FAIR_GROUP_SCHED
375
376 struct list_head leaf_cfs_rq_list;
377#ifdef CONFIG_SMP
378 unsigned long h_load_throttle;
379#endif
380#endif
381
382#ifdef CONFIG_RT_GROUP_SCHED
383 struct list_head leaf_rt_rq_list;
384#endif
385
386
387
388
389
390
391
392 unsigned long nr_uninterruptible;
393
394 struct task_struct *curr, *idle, *stop;
395 unsigned long next_balance;
396 struct mm_struct *prev_mm;
397
398 u64 clock;
399 u64 clock_task;
400
401 atomic_t nr_iowait;
402
403#ifdef CONFIG_SMP
404 struct root_domain *rd;
405 struct sched_domain *sd;
406
407 unsigned long cpu_power;
408
409 unsigned char idle_balance;
410
411 int post_schedule;
412 int active_balance;
413 int push_cpu;
414 struct cpu_stop_work active_balance_work;
415
416 int cpu;
417 int online;
418
419 struct list_head cfs_tasks;
420
421 u64 rt_avg;
422 u64 age_stamp;
423 u64 idle_stamp;
424 u64 avg_idle;
425#endif
426
427#ifdef CONFIG_IRQ_TIME_ACCOUNTING
428 u64 prev_irq_time;
429#endif
430#ifdef CONFIG_PARAVIRT
431 u64 prev_steal_time;
432#endif
433#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
434 u64 prev_steal_time_rq;
435#endif
436
437
438 unsigned long calc_load_update;
439 long calc_load_active;
440
441#ifdef CONFIG_SCHED_HRTICK
442#ifdef CONFIG_SMP
443 int hrtick_csd_pending;
444 struct call_single_data hrtick_csd;
445#endif
446 struct hrtimer hrtick_timer;
447#endif
448
449#ifdef CONFIG_SCHEDSTATS
450
451 struct sched_info rq_sched_info;
452 unsigned long long rq_cpu_time;
453
454
455
456 unsigned int yld_count;
457
458
459 unsigned int sched_count;
460 unsigned int sched_goidle;
461
462
463 unsigned int ttwu_count;
464 unsigned int ttwu_local;
465#endif
466
467#ifdef CONFIG_SMP
468 struct llist_head wake_list;
469#endif
470};
471
472static inline int cpu_of(struct rq *rq)
473{
474#ifdef CONFIG_SMP
475 return rq->cpu;
476#else
477 return 0;
478#endif
479}
480
481DECLARE_PER_CPU(struct rq, runqueues);
482
483#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
484#define this_rq() (&__get_cpu_var(runqueues))
485#define task_rq(p) cpu_rq(task_cpu(p))
486#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
487#define raw_rq() (&__raw_get_cpu_var(runqueues))
488
489#ifdef CONFIG_SMP
490
491#define rcu_dereference_check_sched_domain(p) \
492 rcu_dereference_check((p), \
493 lockdep_is_held(&sched_domains_mutex))
494
495
496
497
498
499
500
501
502#define for_each_domain(cpu, __sd) \
503 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
504 __sd; __sd = __sd->parent)
505
506#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
507
508
509
510
511
512
513
514
515
516
517static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
518{
519 struct sched_domain *sd, *hsd = NULL;
520
521 for_each_domain(cpu, sd) {
522 if (!(sd->flags & flag))
523 break;
524 hsd = sd;
525 }
526
527 return hsd;
528}
529
530DECLARE_PER_CPU(struct sched_domain *, sd_llc);
531DECLARE_PER_CPU(int, sd_llc_id);
532
533extern int group_balance_cpu(struct sched_group *sg);
534
535#endif
536
537#include "stats.h"
538#include "auto_group.h"
539
540#ifdef CONFIG_CGROUP_SCHED
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555static inline struct task_group *task_group(struct task_struct *p)
556{
557 return p->sched_task_group;
558}
559
560
561static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
562{
563#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
564 struct task_group *tg = task_group(p);
565#endif
566
567#ifdef CONFIG_FAIR_GROUP_SCHED
568 p->se.cfs_rq = tg->cfs_rq[cpu];
569 p->se.parent = tg->se[cpu];
570#endif
571
572#ifdef CONFIG_RT_GROUP_SCHED
573 p->rt.rt_rq = tg->rt_rq[cpu];
574 p->rt.parent = tg->rt_se[cpu];
575#endif
576}
577
578#else
579
580static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
581static inline struct task_group *task_group(struct task_struct *p)
582{
583 return NULL;
584}
585
586#endif
587
588static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
589{
590 set_task_rq(p, cpu);
591#ifdef CONFIG_SMP
592
593
594
595
596
597 smp_wmb();
598 task_thread_info(p)->cpu = cpu;
599#endif
600}
601
602
603
604
605#ifdef CONFIG_SCHED_DEBUG
606# include <linux/static_key.h>
607# define const_debug __read_mostly
608#else
609# define const_debug const
610#endif
611
612extern const_debug unsigned int sysctl_sched_features;
613
614#define SCHED_FEAT(name, enabled) \
615 __SCHED_FEAT_##name ,
616
617enum {
618#include "features.h"
619 __SCHED_FEAT_NR,
620};
621
622#undef SCHED_FEAT
623
624#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
625static __always_inline bool static_branch__true(struct static_key *key)
626{
627 return static_key_true(key);
628}
629
630static __always_inline bool static_branch__false(struct static_key *key)
631{
632 return static_key_false(key);
633}
634
635#define SCHED_FEAT(name, enabled) \
636static __always_inline bool static_branch_##name(struct static_key *key) \
637{ \
638 return static_branch__##enabled(key); \
639}
640
641#include "features.h"
642
643#undef SCHED_FEAT
644
645extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
646#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
647#else
648#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
649#endif
650
651static inline u64 global_rt_period(void)
652{
653 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
654}
655
656static inline u64 global_rt_runtime(void)
657{
658 if (sysctl_sched_rt_runtime < 0)
659 return RUNTIME_INF;
660
661 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
662}
663
664
665
666static inline int task_current(struct rq *rq, struct task_struct *p)
667{
668 return rq->curr == p;
669}
670
671static inline int task_running(struct rq *rq, struct task_struct *p)
672{
673#ifdef CONFIG_SMP
674 return p->on_cpu;
675#else
676 return task_current(rq, p);
677#endif
678}
679
680
681#ifndef prepare_arch_switch
682# define prepare_arch_switch(next) do { } while (0)
683#endif
684#ifndef finish_arch_switch
685# define finish_arch_switch(prev) do { } while (0)
686#endif
687#ifndef finish_arch_post_lock_switch
688# define finish_arch_post_lock_switch() do { } while (0)
689#endif
690
691#ifndef __ARCH_WANT_UNLOCKED_CTXSW
692static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
693{
694#ifdef CONFIG_SMP
695
696
697
698
699
700 next->on_cpu = 1;
701#endif
702}
703
704static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
705{
706#ifdef CONFIG_SMP
707
708
709
710
711
712 smp_wmb();
713 prev->on_cpu = 0;
714#endif
715#ifdef CONFIG_DEBUG_SPINLOCK
716
717 rq->lock.owner = current;
718#endif
719
720
721
722
723
724 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
725
726 raw_spin_unlock_irq(&rq->lock);
727}
728
729#else
730static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
731{
732#ifdef CONFIG_SMP
733
734
735
736
737
738 next->on_cpu = 1;
739#endif
740 raw_spin_unlock(&rq->lock);
741}
742
743static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
744{
745#ifdef CONFIG_SMP
746
747
748
749
750
751 smp_wmb();
752 prev->on_cpu = 0;
753#endif
754 local_irq_enable();
755}
756#endif
757
758
759static inline void update_load_add(struct load_weight *lw, unsigned long inc)
760{
761 lw->weight += inc;
762 lw->inv_weight = 0;
763}
764
765static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
766{
767 lw->weight -= dec;
768 lw->inv_weight = 0;
769}
770
771static inline void update_load_set(struct load_weight *lw, unsigned long w)
772{
773 lw->weight = w;
774 lw->inv_weight = 0;
775}
776
777
778
779
780
781
782
783
784
785
786#define WEIGHT_IDLEPRIO 3
787#define WMULT_IDLEPRIO 1431655765
788
789
790
791
792
793
794
795
796
797
798
799
800
801static const int prio_to_weight[40] = {
802 88761, 71755, 56483, 46273, 36291,
803 29154, 23254, 18705, 14949, 11916,
804 9548, 7620, 6100, 4904, 3906,
805 3121, 2501, 1991, 1586, 1277,
806 1024, 820, 655, 526, 423,
807 335, 272, 215, 172, 137,
808 110, 87, 70, 56, 45,
809 36, 29, 23, 18, 15,
810};
811
812
813
814
815
816
817
818
819static const u32 prio_to_wmult[40] = {
820 48388, 59856, 76040, 92818, 118348,
821 147320, 184698, 229616, 287308, 360437,
822 449829, 563644, 704093, 875809, 1099582,
823 1376151, 1717300, 2157191, 2708050, 3363326,
824 4194304, 5237765, 6557202, 8165337, 10153587,
825 12820798, 15790321, 19976592, 24970740, 31350126,
826 39045157, 49367440, 61356676, 76695844, 95443717,
827 119304647, 148102320, 186737708, 238609294, 286331153,
828};
829
830
831enum cpuacct_stat_index {
832 CPUACCT_STAT_USER,
833 CPUACCT_STAT_SYSTEM,
834
835 CPUACCT_STAT_NSTATS,
836};
837
838
839#define sched_class_highest (&stop_sched_class)
840#define for_each_class(class) \
841 for (class = sched_class_highest; class; class = class->next)
842
843extern const struct sched_class stop_sched_class;
844extern const struct sched_class rt_sched_class;
845extern const struct sched_class fair_sched_class;
846extern const struct sched_class idle_sched_class;
847
848
849#ifdef CONFIG_SMP
850
851extern void trigger_load_balance(struct rq *rq, int cpu);
852extern void idle_balance(int this_cpu, struct rq *this_rq);
853
854#else
855
856static inline void idle_balance(int cpu, struct rq *rq)
857{
858}
859
860#endif
861
862extern void sysrq_sched_debug_show(void);
863extern void sched_init_granularity(void);
864extern void update_max_interval(void);
865extern void update_group_power(struct sched_domain *sd, int cpu);
866extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
867extern void init_sched_rt_class(void);
868extern void init_sched_fair_class(void);
869
870extern void resched_task(struct task_struct *p);
871extern void resched_cpu(int cpu);
872
873extern struct rt_bandwidth def_rt_bandwidth;
874extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
875
876extern void update_idle_cpu_load(struct rq *this_rq);
877
878#ifdef CONFIG_CGROUP_CPUACCT
879#include <linux/cgroup.h>
880
881struct cpuacct {
882 struct cgroup_subsys_state css;
883
884 u64 __percpu *cpuusage;
885 struct kernel_cpustat __percpu *cpustat;
886};
887
888extern struct cgroup_subsys cpuacct_subsys;
889extern struct cpuacct root_cpuacct;
890
891
892static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
893{
894 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
895 struct cpuacct, css);
896}
897
898
899static inline struct cpuacct *task_ca(struct task_struct *tsk)
900{
901 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
902 struct cpuacct, css);
903}
904
905static inline struct cpuacct *parent_ca(struct cpuacct *ca)
906{
907 if (!ca || !ca->css.cgroup->parent)
908 return NULL;
909 return cgroup_ca(ca->css.cgroup->parent);
910}
911
912extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
913#else
914static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
915#endif
916
917#ifdef CONFIG_PARAVIRT
918static inline u64 steal_ticks(u64 steal)
919{
920 if (unlikely(steal > NSEC_PER_SEC))
921 return div_u64(steal, TICK_NSEC);
922
923 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
924}
925#endif
926
927static inline void inc_nr_running(struct rq *rq)
928{
929 rq->nr_running++;
930}
931
932static inline void dec_nr_running(struct rq *rq)
933{
934 rq->nr_running--;
935}
936
937extern void update_rq_clock(struct rq *rq);
938
939extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
940extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
941
942extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
943
944extern const_debug unsigned int sysctl_sched_time_avg;
945extern const_debug unsigned int sysctl_sched_nr_migrate;
946extern const_debug unsigned int sysctl_sched_migration_cost;
947
948static inline u64 sched_avg_period(void)
949{
950 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
951}
952
953#ifdef CONFIG_SCHED_HRTICK
954
955
956
957
958
959
960static inline int hrtick_enabled(struct rq *rq)
961{
962 if (!sched_feat(HRTICK))
963 return 0;
964 if (!cpu_active(cpu_of(rq)))
965 return 0;
966 return hrtimer_is_hres_active(&rq->hrtick_timer);
967}
968
969void hrtick_start(struct rq *rq, u64 delay);
970
971#else
972
973static inline int hrtick_enabled(struct rq *rq)
974{
975 return 0;
976}
977
978#endif
979
980#ifdef CONFIG_SMP
981extern void sched_avg_update(struct rq *rq);
982static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
983{
984 rq->rt_avg += rt_delta;
985 sched_avg_update(rq);
986}
987#else
988static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
989static inline void sched_avg_update(struct rq *rq) { }
990#endif
991
992extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
993
994#ifdef CONFIG_SMP
995#ifdef CONFIG_PREEMPT
996
997static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
998
999
1000
1001
1002
1003
1004
1005
1006
1007static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1008 __releases(this_rq->lock)
1009 __acquires(busiest->lock)
1010 __acquires(this_rq->lock)
1011{
1012 raw_spin_unlock(&this_rq->lock);
1013 double_rq_lock(this_rq, busiest);
1014
1015 return 1;
1016}
1017
1018#else
1019
1020
1021
1022
1023
1024
1025
1026static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1027 __releases(this_rq->lock)
1028 __acquires(busiest->lock)
1029 __acquires(this_rq->lock)
1030{
1031 int ret = 0;
1032
1033 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1034 if (busiest < this_rq) {
1035 raw_spin_unlock(&this_rq->lock);
1036 raw_spin_lock(&busiest->lock);
1037 raw_spin_lock_nested(&this_rq->lock,
1038 SINGLE_DEPTH_NESTING);
1039 ret = 1;
1040 } else
1041 raw_spin_lock_nested(&busiest->lock,
1042 SINGLE_DEPTH_NESTING);
1043 }
1044 return ret;
1045}
1046
1047#endif
1048
1049
1050
1051
1052static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1053{
1054 if (unlikely(!irqs_disabled())) {
1055
1056 raw_spin_unlock(&this_rq->lock);
1057 BUG_ON(1);
1058 }
1059
1060 return _double_lock_balance(this_rq, busiest);
1061}
1062
1063static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1064 __releases(busiest->lock)
1065{
1066 raw_spin_unlock(&busiest->lock);
1067 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1068}
1069
1070
1071
1072
1073
1074
1075
1076static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1077 __acquires(rq1->lock)
1078 __acquires(rq2->lock)
1079{
1080 BUG_ON(!irqs_disabled());
1081 if (rq1 == rq2) {
1082 raw_spin_lock(&rq1->lock);
1083 __acquire(rq2->lock);
1084 } else {
1085 if (rq1 < rq2) {
1086 raw_spin_lock(&rq1->lock);
1087 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1088 } else {
1089 raw_spin_lock(&rq2->lock);
1090 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1091 }
1092 }
1093}
1094
1095
1096
1097
1098
1099
1100
1101static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1102 __releases(rq1->lock)
1103 __releases(rq2->lock)
1104{
1105 raw_spin_unlock(&rq1->lock);
1106 if (rq1 != rq2)
1107 raw_spin_unlock(&rq2->lock);
1108 else
1109 __release(rq2->lock);
1110}
1111
1112#else
1113
1114
1115
1116
1117
1118
1119
1120static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1121 __acquires(rq1->lock)
1122 __acquires(rq2->lock)
1123{
1124 BUG_ON(!irqs_disabled());
1125 BUG_ON(rq1 != rq2);
1126 raw_spin_lock(&rq1->lock);
1127 __acquire(rq2->lock);
1128}
1129
1130
1131
1132
1133
1134
1135
1136static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1137 __releases(rq1->lock)
1138 __releases(rq2->lock)
1139{
1140 BUG_ON(rq1 != rq2);
1141 raw_spin_unlock(&rq1->lock);
1142 __release(rq2->lock);
1143}
1144
1145#endif
1146
1147extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1148extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1149extern void print_cfs_stats(struct seq_file *m, int cpu);
1150extern void print_rt_stats(struct seq_file *m, int cpu);
1151
1152extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1153extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1154
1155extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1156
1157#ifdef CONFIG_NO_HZ
1158enum rq_nohz_flag_bits {
1159 NOHZ_TICK_STOPPED,
1160 NOHZ_BALANCE_KICK,
1161 NOHZ_IDLE,
1162};
1163
1164#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1165#endif
1166
1167#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1168
1169DECLARE_PER_CPU(u64, cpu_hardirq_time);
1170DECLARE_PER_CPU(u64, cpu_softirq_time);
1171
1172#ifndef CONFIG_64BIT
1173DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1174
1175static inline void irq_time_write_begin(void)
1176{
1177 __this_cpu_inc(irq_time_seq.sequence);
1178 smp_wmb();
1179}
1180
1181static inline void irq_time_write_end(void)
1182{
1183 smp_wmb();
1184 __this_cpu_inc(irq_time_seq.sequence);
1185}
1186
1187static inline u64 irq_time_read(int cpu)
1188{
1189 u64 irq_time;
1190 unsigned seq;
1191
1192 do {
1193 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1194 irq_time = per_cpu(cpu_softirq_time, cpu) +
1195 per_cpu(cpu_hardirq_time, cpu);
1196 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1197
1198 return irq_time;
1199}
1200#else
1201static inline void irq_time_write_begin(void)
1202{
1203}
1204
1205static inline void irq_time_write_end(void)
1206{
1207}
1208
1209static inline u64 irq_time_read(int cpu)
1210{
1211 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1212}
1213#endif
1214#endif
1215
1216