1
2#include <linux/sched.h>
3#include <linux/mutex.h>
4#include <linux/spinlock.h>
5#include <linux/stop_machine.h>
6
7#include "cpupri.h"
8
9extern __read_mostly int scheduler_running;
10
11
12
13
14
15
16#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
17#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
18#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
19
20
21
22
23
24
25#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
26#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
27#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
28
29
30
31
32#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
33
34#define NICE_0_LOAD SCHED_LOAD_SCALE
35#define NICE_0_SHIFT SCHED_LOAD_SHIFT
36
37
38
39
40
41
42
43
44#define RUNTIME_INF ((u64)~0ULL)
45
46static inline int rt_policy(int policy)
47{
48 if (policy == SCHED_FIFO || policy == SCHED_RR)
49 return 1;
50 return 0;
51}
52
53static inline int task_has_rt_policy(struct task_struct *p)
54{
55 return rt_policy(p->policy);
56}
57
58
59
60
61struct rt_prio_array {
62 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1);
63 struct list_head queue[MAX_RT_PRIO];
64};
65
66struct rt_bandwidth {
67
68 raw_spinlock_t rt_runtime_lock;
69 ktime_t rt_period;
70 u64 rt_runtime;
71 struct hrtimer rt_period_timer;
72};
73
74extern struct mutex sched_domains_mutex;
75
76#ifdef CONFIG_CGROUP_SCHED
77
78#include <linux/cgroup.h>
79
80struct cfs_rq;
81struct rt_rq;
82
83extern struct list_head task_groups;
84
85struct cfs_bandwidth {
86#ifdef CONFIG_CFS_BANDWIDTH
87 raw_spinlock_t lock;
88 ktime_t period;
89 u64 quota, runtime;
90 s64 hierarchal_quota;
91 u64 runtime_expires;
92
93 int idle, timer_active;
94 struct hrtimer period_timer, slack_timer;
95 struct list_head throttled_cfs_rq;
96
97
98 int nr_periods, nr_throttled;
99 u64 throttled_time;
100#endif
101};
102
103
104struct task_group {
105 struct cgroup_subsys_state css;
106
107#ifdef CONFIG_FAIR_GROUP_SCHED
108
109 struct sched_entity **se;
110
111 struct cfs_rq **cfs_rq;
112 unsigned long shares;
113
114 atomic_t load_weight;
115#endif
116
117#ifdef CONFIG_RT_GROUP_SCHED
118 struct sched_rt_entity **rt_se;
119 struct rt_rq **rt_rq;
120
121 struct rt_bandwidth rt_bandwidth;
122#endif
123
124 struct rcu_head rcu;
125 struct list_head list;
126
127 struct task_group *parent;
128 struct list_head siblings;
129 struct list_head children;
130
131#ifdef CONFIG_SCHED_AUTOGROUP
132 struct autogroup *autogroup;
133#endif
134
135 struct cfs_bandwidth cfs_bandwidth;
136};
137
138#ifdef CONFIG_FAIR_GROUP_SCHED
139#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
140
141
142
143
144
145
146
147
148
149#define MIN_SHARES (1UL << 1)
150#define MAX_SHARES (1UL << 18)
151#endif
152
153
154
155
156extern struct task_group root_task_group;
157
158typedef int (*tg_visitor)(struct task_group *, void *);
159
160extern int walk_tg_tree_from(struct task_group *from,
161 tg_visitor down, tg_visitor up, void *data);
162
163
164
165
166
167
168
169static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
170{
171 return walk_tg_tree_from(&root_task_group, down, up, data);
172}
173
174extern int tg_nop(struct task_group *tg, void *data);
175
176extern void free_fair_sched_group(struct task_group *tg);
177extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
178extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
179extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
180 struct sched_entity *se, int cpu,
181 struct sched_entity *parent);
182extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
183extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
184
185extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
186extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
187extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
188
189extern void free_rt_sched_group(struct task_group *tg);
190extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
191extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
192 struct sched_rt_entity *rt_se, int cpu,
193 struct sched_rt_entity *parent);
194
195#else
196
197struct cfs_bandwidth { };
198
199#endif
200
201
202struct cfs_rq {
203 struct load_weight load;
204 unsigned int nr_running, h_nr_running;
205
206 u64 exec_clock;
207 u64 min_vruntime;
208#ifndef CONFIG_64BIT
209 u64 min_vruntime_copy;
210#endif
211
212 struct rb_root tasks_timeline;
213 struct rb_node *rb_leftmost;
214
215
216
217
218
219 struct sched_entity *curr, *next, *last, *skip;
220
221#ifdef CONFIG_SCHED_DEBUG
222 unsigned int nr_spread_over;
223#endif
224
225#ifdef CONFIG_FAIR_GROUP_SCHED
226 struct rq *rq;
227
228
229
230
231
232
233
234
235
236 int on_list;
237 struct list_head leaf_cfs_rq_list;
238 struct task_group *tg;
239
240#ifdef CONFIG_SMP
241
242
243
244
245
246
247 unsigned long h_load;
248
249
250
251
252
253
254
255
256 u64 load_avg;
257 u64 load_period;
258 u64 load_stamp, load_last, load_unacc_exec_time;
259
260 unsigned long load_contribution;
261#endif
262#ifdef CONFIG_CFS_BANDWIDTH
263 int runtime_enabled;
264 u64 runtime_expires;
265 s64 runtime_remaining;
266
267 u64 throttled_timestamp;
268 int throttled, throttle_count;
269 struct list_head throttled_list;
270#endif
271#endif
272};
273
274static inline int rt_bandwidth_enabled(void)
275{
276 return sysctl_sched_rt_runtime >= 0;
277}
278
279
280struct rt_rq {
281 struct rt_prio_array active;
282 unsigned int rt_nr_running;
283#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
284 struct {
285 int curr;
286#ifdef CONFIG_SMP
287 int next;
288#endif
289 } highest_prio;
290#endif
291#ifdef CONFIG_SMP
292 unsigned long rt_nr_migratory;
293 unsigned long rt_nr_total;
294 int overloaded;
295 struct plist_head pushable_tasks;
296#endif
297 int rt_throttled;
298 u64 rt_time;
299 u64 rt_runtime;
300
301 raw_spinlock_t rt_runtime_lock;
302
303#ifdef CONFIG_RT_GROUP_SCHED
304 unsigned long rt_nr_boosted;
305
306 struct rq *rq;
307 struct list_head leaf_rt_rq_list;
308 struct task_group *tg;
309#endif
310};
311
312#ifdef CONFIG_SMP
313
314
315
316
317
318
319
320
321
322struct root_domain {
323 atomic_t refcount;
324 atomic_t rto_count;
325 struct rcu_head rcu;
326 cpumask_var_t span;
327 cpumask_var_t online;
328
329
330
331
332
333 cpumask_var_t rto_mask;
334 struct cpupri cpupri;
335};
336
337extern struct root_domain def_root_domain;
338
339#endif
340
341
342
343
344
345
346
347
348struct rq {
349
350 raw_spinlock_t lock;
351
352
353
354
355
356 unsigned int nr_running;
357 #define CPU_LOAD_IDX_MAX 5
358 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
359 unsigned long last_load_update_tick;
360#ifdef CONFIG_NO_HZ
361 u64 nohz_stamp;
362 unsigned long nohz_flags;
363#endif
364 int skip_clock_update;
365
366
367 struct load_weight load;
368 unsigned long nr_load_updates;
369 u64 nr_switches;
370
371 struct cfs_rq cfs;
372 struct rt_rq rt;
373
374#ifdef CONFIG_FAIR_GROUP_SCHED
375
376 struct list_head leaf_cfs_rq_list;
377#ifdef CONFIG_SMP
378 unsigned long h_load_throttle;
379#endif
380#endif
381
382#ifdef CONFIG_RT_GROUP_SCHED
383 struct list_head leaf_rt_rq_list;
384#endif
385
386
387
388
389
390
391
392 unsigned long nr_uninterruptible;
393
394 struct task_struct *curr, *idle, *stop;
395 unsigned long next_balance;
396 struct mm_struct *prev_mm;
397
398 u64 clock;
399 u64 clock_task;
400
401 atomic_t nr_iowait;
402
403#ifdef CONFIG_SMP
404 struct root_domain *rd;
405 struct sched_domain *sd;
406
407 unsigned long cpu_power;
408
409 unsigned char idle_balance;
410
411 int post_schedule;
412 int active_balance;
413 int push_cpu;
414 struct cpu_stop_work active_balance_work;
415
416 int cpu;
417 int online;
418
419 struct list_head cfs_tasks;
420
421 u64 rt_avg;
422 u64 age_stamp;
423 u64 idle_stamp;
424 u64 avg_idle;
425#endif
426
427#ifdef CONFIG_IRQ_TIME_ACCOUNTING
428 u64 prev_irq_time;
429#endif
430#ifdef CONFIG_PARAVIRT
431 u64 prev_steal_time;
432#endif
433#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
434 u64 prev_steal_time_rq;
435#endif
436
437
438 unsigned long calc_load_update;
439 long calc_load_active;
440
441#ifdef CONFIG_SCHED_HRTICK
442#ifdef CONFIG_SMP
443 int hrtick_csd_pending;
444 struct call_single_data hrtick_csd;
445#endif
446 struct hrtimer hrtick_timer;
447#endif
448
449#ifdef CONFIG_SCHEDSTATS
450
451 struct sched_info rq_sched_info;
452 unsigned long long rq_cpu_time;
453
454
455
456 unsigned int yld_count;
457
458
459 unsigned int sched_count;
460 unsigned int sched_goidle;
461
462
463 unsigned int ttwu_count;
464 unsigned int ttwu_local;
465#endif
466
467#ifdef CONFIG_SMP
468 struct llist_head wake_list;
469#endif
470};
471
472static inline int cpu_of(struct rq *rq)
473{
474#ifdef CONFIG_SMP
475 return rq->cpu;
476#else
477 return 0;
478#endif
479}
480
481DECLARE_PER_CPU(struct rq, runqueues);
482
483#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
484#define this_rq() (&__get_cpu_var(runqueues))
485#define task_rq(p) cpu_rq(task_cpu(p))
486#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
487#define raw_rq() (&__raw_get_cpu_var(runqueues))
488
489#ifdef CONFIG_SMP
490
491#define rcu_dereference_check_sched_domain(p) \
492 rcu_dereference_check((p), \
493 lockdep_is_held(&sched_domains_mutex))
494
495
496
497
498
499
500
501
502#define for_each_domain(cpu, __sd) \
503 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
504 __sd; __sd = __sd->parent)
505
506#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
507
508
509
510
511
512
513
514
515
516
517static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
518{
519 struct sched_domain *sd, *hsd = NULL;
520
521 for_each_domain(cpu, sd) {
522 if (!(sd->flags & flag))
523 break;
524 hsd = sd;
525 }
526
527 return hsd;
528}
529
530DECLARE_PER_CPU(struct sched_domain *, sd_llc);
531DECLARE_PER_CPU(int, sd_llc_id);
532
533extern int group_balance_cpu(struct sched_group *sg);
534
535#endif
536
537#include "stats.h"
538#include "auto_group.h"
539
540#ifdef CONFIG_CGROUP_SCHED
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555static inline struct task_group *task_group(struct task_struct *p)
556{
557 return p->sched_task_group;
558}
559
560
561static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
562{
563#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
564 struct task_group *tg = task_group(p);
565#endif
566
567#ifdef CONFIG_FAIR_GROUP_SCHED
568 p->se.cfs_rq = tg->cfs_rq[cpu];
569 p->se.parent = tg->se[cpu];
570#endif
571
572#ifdef CONFIG_RT_GROUP_SCHED
573 p->rt.rt_rq = tg->rt_rq[cpu];
574 p->rt.parent = tg->rt_se[cpu];
575#endif
576}
577
578#else
579
580static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
581static inline struct task_group *task_group(struct task_struct *p)
582{
583 return NULL;
584}
585
586#endif
587
588static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
589{
590 set_task_rq(p, cpu);
591#ifdef CONFIG_SMP
592
593
594
595
596
597 smp_wmb();
598 task_thread_info(p)->cpu = cpu;
599#endif
600}
601
602
603
604
605#ifdef CONFIG_SCHED_DEBUG
606# include <linux/static_key.h>
607# define const_debug __read_mostly
608#else
609# define const_debug const
610#endif
611
612extern const_debug unsigned int sysctl_sched_features;
613
614#define SCHED_FEAT(name, enabled) \
615 __SCHED_FEAT_##name ,
616
617enum {
618#include "features.h"
619 __SCHED_FEAT_NR,
620};
621
622#undef SCHED_FEAT
623
624#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
625static __always_inline bool static_branch__true(struct static_key *key)
626{
627 return static_key_true(key);
628}
629
630static __always_inline bool static_branch__false(struct static_key *key)
631{
632 return static_key_false(key);
633}
634
635#define SCHED_FEAT(name, enabled) \
636static __always_inline bool static_branch_##name(struct static_key *key) \
637{ \
638 return static_branch__##enabled(key); \
639}
640
641#include "features.h"
642
643#undef SCHED_FEAT
644
645extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
646#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
647#else
648#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
649#endif
650
651static inline u64 global_rt_period(void)
652{
653 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
654}
655
656static inline u64 global_rt_runtime(void)
657{
658 if (sysctl_sched_rt_runtime < 0)
659 return RUNTIME_INF;
660
661 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
662}
663
664
665
666static inline int task_current(struct rq *rq, struct task_struct *p)
667{
668 return rq->curr == p;
669}
670
671static inline int task_running(struct rq *rq, struct task_struct *p)
672{
673#ifdef CONFIG_SMP
674 return p->on_cpu;
675#else
676 return task_current(rq, p);
677#endif
678}
679
680
681#ifndef prepare_arch_switch
682# define prepare_arch_switch(next) do { } while (0)
683#endif
684#ifndef finish_arch_switch
685# define finish_arch_switch(prev) do { } while (0)
686#endif
687#ifndef finish_arch_post_lock_switch
688# define finish_arch_post_lock_switch() do { } while (0)
689#endif
690
691#ifndef __ARCH_WANT_UNLOCKED_CTXSW
692static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
693{
694#ifdef CONFIG_SMP
695
696
697
698
699
700 next->on_cpu = 1;
701#endif
702}
703
704static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
705{
706#ifdef CONFIG_SMP
707
708
709
710
711
712 smp_wmb();
713 prev->on_cpu = 0;
714#endif
715#ifdef CONFIG_DEBUG_SPINLOCK
716
717 rq->lock.owner = current;
718#endif
719
720
721
722
723
724 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
725
726 raw_spin_unlock_irq(&rq->lock);
727}
728
729#else
730static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
731{
732#ifdef CONFIG_SMP
733
734
735
736
737
738 next->on_cpu = 1;
739#endif
740#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
741 raw_spin_unlock_irq(&rq->lock);
742#else
743 raw_spin_unlock(&rq->lock);
744#endif
745}
746
747static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
748{
749#ifdef CONFIG_SMP
750
751
752
753
754
755 smp_wmb();
756 prev->on_cpu = 0;
757#endif
758#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
759 local_irq_enable();
760#endif
761}
762#endif
763
764
765static inline void update_load_add(struct load_weight *lw, unsigned long inc)
766{
767 lw->weight += inc;
768 lw->inv_weight = 0;
769}
770
771static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
772{
773 lw->weight -= dec;
774 lw->inv_weight = 0;
775}
776
777static inline void update_load_set(struct load_weight *lw, unsigned long w)
778{
779 lw->weight = w;
780 lw->inv_weight = 0;
781}
782
783
784
785
786
787
788
789
790
791
792#define WEIGHT_IDLEPRIO 3
793#define WMULT_IDLEPRIO 1431655765
794
795
796
797
798
799
800
801
802
803
804
805
806
807static const int prio_to_weight[40] = {
808 88761, 71755, 56483, 46273, 36291,
809 29154, 23254, 18705, 14949, 11916,
810 9548, 7620, 6100, 4904, 3906,
811 3121, 2501, 1991, 1586, 1277,
812 1024, 820, 655, 526, 423,
813 335, 272, 215, 172, 137,
814 110, 87, 70, 56, 45,
815 36, 29, 23, 18, 15,
816};
817
818
819
820
821
822
823
824
825static const u32 prio_to_wmult[40] = {
826 48388, 59856, 76040, 92818, 118348,
827 147320, 184698, 229616, 287308, 360437,
828 449829, 563644, 704093, 875809, 1099582,
829 1376151, 1717300, 2157191, 2708050, 3363326,
830 4194304, 5237765, 6557202, 8165337, 10153587,
831 12820798, 15790321, 19976592, 24970740, 31350126,
832 39045157, 49367440, 61356676, 76695844, 95443717,
833 119304647, 148102320, 186737708, 238609294, 286331153,
834};
835
836
837enum cpuacct_stat_index {
838 CPUACCT_STAT_USER,
839 CPUACCT_STAT_SYSTEM,
840
841 CPUACCT_STAT_NSTATS,
842};
843
844
845#define sched_class_highest (&stop_sched_class)
846#define for_each_class(class) \
847 for (class = sched_class_highest; class; class = class->next)
848
849extern const struct sched_class stop_sched_class;
850extern const struct sched_class rt_sched_class;
851extern const struct sched_class fair_sched_class;
852extern const struct sched_class idle_sched_class;
853
854
855#ifdef CONFIG_SMP
856
857extern void trigger_load_balance(struct rq *rq, int cpu);
858extern void idle_balance(int this_cpu, struct rq *this_rq);
859
860#else
861
862static inline void idle_balance(int cpu, struct rq *rq)
863{
864}
865
866#endif
867
868extern void sysrq_sched_debug_show(void);
869extern void sched_init_granularity(void);
870extern void update_max_interval(void);
871extern void update_group_power(struct sched_domain *sd, int cpu);
872extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
873extern void init_sched_rt_class(void);
874extern void init_sched_fair_class(void);
875
876extern void resched_task(struct task_struct *p);
877extern void resched_cpu(int cpu);
878
879extern struct rt_bandwidth def_rt_bandwidth;
880extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
881
882extern void update_idle_cpu_load(struct rq *this_rq);
883
884#ifdef CONFIG_CGROUP_CPUACCT
885#include <linux/cgroup.h>
886
887struct cpuacct {
888 struct cgroup_subsys_state css;
889
890 u64 __percpu *cpuusage;
891 struct kernel_cpustat __percpu *cpustat;
892};
893
894
895static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
896{
897 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
898 struct cpuacct, css);
899}
900
901
902static inline struct cpuacct *task_ca(struct task_struct *tsk)
903{
904 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
905 struct cpuacct, css);
906}
907
908static inline struct cpuacct *parent_ca(struct cpuacct *ca)
909{
910 if (!ca || !ca->css.cgroup->parent)
911 return NULL;
912 return cgroup_ca(ca->css.cgroup->parent);
913}
914
915extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
916#else
917static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
918#endif
919
920static inline void inc_nr_running(struct rq *rq)
921{
922 rq->nr_running++;
923}
924
925static inline void dec_nr_running(struct rq *rq)
926{
927 rq->nr_running--;
928}
929
930extern void update_rq_clock(struct rq *rq);
931
932extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
933extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
934
935extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
936
937extern const_debug unsigned int sysctl_sched_time_avg;
938extern const_debug unsigned int sysctl_sched_nr_migrate;
939extern const_debug unsigned int sysctl_sched_migration_cost;
940
941static inline u64 sched_avg_period(void)
942{
943 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
944}
945
946#ifdef CONFIG_SCHED_HRTICK
947
948
949
950
951
952
953static inline int hrtick_enabled(struct rq *rq)
954{
955 if (!sched_feat(HRTICK))
956 return 0;
957 if (!cpu_active(cpu_of(rq)))
958 return 0;
959 return hrtimer_is_hres_active(&rq->hrtick_timer);
960}
961
962void hrtick_start(struct rq *rq, u64 delay);
963
964#else
965
966static inline int hrtick_enabled(struct rq *rq)
967{
968 return 0;
969}
970
971#endif
972
973#ifdef CONFIG_SMP
974extern void sched_avg_update(struct rq *rq);
975static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
976{
977 rq->rt_avg += rt_delta;
978 sched_avg_update(rq);
979}
980#else
981static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
982static inline void sched_avg_update(struct rq *rq) { }
983#endif
984
985extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
986
987#ifdef CONFIG_SMP
988#ifdef CONFIG_PREEMPT
989
990static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
991
992
993
994
995
996
997
998
999
1000static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1001 __releases(this_rq->lock)
1002 __acquires(busiest->lock)
1003 __acquires(this_rq->lock)
1004{
1005 raw_spin_unlock(&this_rq->lock);
1006 double_rq_lock(this_rq, busiest);
1007
1008 return 1;
1009}
1010
1011#else
1012
1013
1014
1015
1016
1017
1018
1019static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1020 __releases(this_rq->lock)
1021 __acquires(busiest->lock)
1022 __acquires(this_rq->lock)
1023{
1024 int ret = 0;
1025
1026 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1027 if (busiest < this_rq) {
1028 raw_spin_unlock(&this_rq->lock);
1029 raw_spin_lock(&busiest->lock);
1030 raw_spin_lock_nested(&this_rq->lock,
1031 SINGLE_DEPTH_NESTING);
1032 ret = 1;
1033 } else
1034 raw_spin_lock_nested(&busiest->lock,
1035 SINGLE_DEPTH_NESTING);
1036 }
1037 return ret;
1038}
1039
1040#endif
1041
1042
1043
1044
1045static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1046{
1047 if (unlikely(!irqs_disabled())) {
1048
1049 raw_spin_unlock(&this_rq->lock);
1050 BUG_ON(1);
1051 }
1052
1053 return _double_lock_balance(this_rq, busiest);
1054}
1055
1056static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1057 __releases(busiest->lock)
1058{
1059 raw_spin_unlock(&busiest->lock);
1060 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1061}
1062
1063
1064
1065
1066
1067
1068
1069static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1070 __acquires(rq1->lock)
1071 __acquires(rq2->lock)
1072{
1073 BUG_ON(!irqs_disabled());
1074 if (rq1 == rq2) {
1075 raw_spin_lock(&rq1->lock);
1076 __acquire(rq2->lock);
1077 } else {
1078 if (rq1 < rq2) {
1079 raw_spin_lock(&rq1->lock);
1080 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1081 } else {
1082 raw_spin_lock(&rq2->lock);
1083 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1084 }
1085 }
1086}
1087
1088
1089
1090
1091
1092
1093
1094static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1095 __releases(rq1->lock)
1096 __releases(rq2->lock)
1097{
1098 raw_spin_unlock(&rq1->lock);
1099 if (rq1 != rq2)
1100 raw_spin_unlock(&rq2->lock);
1101 else
1102 __release(rq2->lock);
1103}
1104
1105#else
1106
1107
1108
1109
1110
1111
1112
1113static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1114 __acquires(rq1->lock)
1115 __acquires(rq2->lock)
1116{
1117 BUG_ON(!irqs_disabled());
1118 BUG_ON(rq1 != rq2);
1119 raw_spin_lock(&rq1->lock);
1120 __acquire(rq2->lock);
1121}
1122
1123
1124
1125
1126
1127
1128
1129static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1130 __releases(rq1->lock)
1131 __releases(rq2->lock)
1132{
1133 BUG_ON(rq1 != rq2);
1134 raw_spin_unlock(&rq1->lock);
1135 __release(rq2->lock);
1136}
1137
1138#endif
1139
1140extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1141extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1142extern void print_cfs_stats(struct seq_file *m, int cpu);
1143extern void print_rt_stats(struct seq_file *m, int cpu);
1144
1145extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1146extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1147
1148extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1149
1150#ifdef CONFIG_NO_HZ
1151enum rq_nohz_flag_bits {
1152 NOHZ_TICK_STOPPED,
1153 NOHZ_BALANCE_KICK,
1154 NOHZ_IDLE,
1155};
1156
1157#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1158#endif
1159