linux/kernel/sched/sched.h
<<
>>
Prefs
   1
   2#include <linux/sched.h>
   3#include <linux/sched/sysctl.h>
   4#include <linux/sched/rt.h>
   5#include <linux/u64_stats_sync.h>
   6#include <linux/sched/deadline.h>
   7#include <linux/binfmts.h>
   8#include <linux/mutex.h>
   9#include <linux/spinlock.h>
  10#include <linux/stop_machine.h>
  11#include <linux/irq_work.h>
  12#include <linux/tick.h>
  13#include <linux/slab.h>
  14
  15#include "cpupri.h"
  16#include "cpudeadline.h"
  17#include "cpuacct.h"
  18
  19#ifdef CONFIG_SCHED_DEBUG
  20#define SCHED_WARN_ON(x)        WARN_ONCE(x, #x)
  21#else
  22#define SCHED_WARN_ON(x)        ((void)(x))
  23#endif
  24
  25struct rq;
  26struct cpuidle_state;
  27
  28/* task_struct::on_rq states: */
  29#define TASK_ON_RQ_QUEUED       1
  30#define TASK_ON_RQ_MIGRATING    2
  31
  32extern __read_mostly int scheduler_running;
  33
  34extern unsigned long calc_load_update;
  35extern atomic_long_t calc_load_tasks;
  36
  37extern void calc_global_load_tick(struct rq *this_rq);
  38extern long calc_load_fold_active(struct rq *this_rq, long adjust);
  39
  40#ifdef CONFIG_SMP
  41extern void cpu_load_update_active(struct rq *this_rq);
  42#else
  43static inline void cpu_load_update_active(struct rq *this_rq) { }
  44#endif
  45
  46/*
  47 * Helpers for converting nanosecond timing to jiffy resolution
  48 */
  49#define NS_TO_JIFFIES(TIME)     ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  50
  51/*
  52 * Increase resolution of nice-level calculations for 64-bit architectures.
  53 * The extra resolution improves shares distribution and load balancing of
  54 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
  55 * hierarchies, especially on larger systems. This is not a user-visible change
  56 * and does not change the user-interface for setting shares/weights.
  57 *
  58 * We increase resolution only if we have enough bits to allow this increased
  59 * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are
  60 * pretty high and the returns do not justify the increased costs.
  61 *
  62 * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to
  63 * increase coverage and consistency always enable it on 64bit platforms.
  64 */
  65#ifdef CONFIG_64BIT
  66# define NICE_0_LOAD_SHIFT      (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
  67# define scale_load(w)          ((w) << SCHED_FIXEDPOINT_SHIFT)
  68# define scale_load_down(w)     ((w) >> SCHED_FIXEDPOINT_SHIFT)
  69#else
  70# define NICE_0_LOAD_SHIFT      (SCHED_FIXEDPOINT_SHIFT)
  71# define scale_load(w)          (w)
  72# define scale_load_down(w)     (w)
  73#endif
  74
  75/*
  76 * Task weight (visible to users) and its load (invisible to users) have
  77 * independent resolution, but they should be well calibrated. We use
  78 * scale_load() and scale_load_down(w) to convert between them. The
  79 * following must be true:
  80 *
  81 *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
  82 *
  83 */
  84#define NICE_0_LOAD             (1L << NICE_0_LOAD_SHIFT)
  85
  86/*
  87 * Single value that decides SCHED_DEADLINE internal math precision.
  88 * 10 -> just above 1us
  89 * 9  -> just above 0.5us
  90 */
  91#define DL_SCALE (10)
  92
  93/*
  94 * These are the 'tuning knobs' of the scheduler:
  95 */
  96
  97/*
  98 * single value that denotes runtime == period, ie unlimited time.
  99 */
 100#define RUNTIME_INF     ((u64)~0ULL)
 101
 102static inline int idle_policy(int policy)
 103{
 104        return policy == SCHED_IDLE;
 105}
 106static inline int fair_policy(int policy)
 107{
 108        return policy == SCHED_NORMAL || policy == SCHED_BATCH;
 109}
 110
 111static inline int rt_policy(int policy)
 112{
 113        return policy == SCHED_FIFO || policy == SCHED_RR;
 114}
 115
 116static inline int dl_policy(int policy)
 117{
 118        return policy == SCHED_DEADLINE;
 119}
 120static inline bool valid_policy(int policy)
 121{
 122        return idle_policy(policy) || fair_policy(policy) ||
 123                rt_policy(policy) || dl_policy(policy);
 124}
 125
 126static inline int task_has_rt_policy(struct task_struct *p)
 127{
 128        return rt_policy(p->policy);
 129}
 130
 131static inline int task_has_dl_policy(struct task_struct *p)
 132{
 133        return dl_policy(p->policy);
 134}
 135
 136/*
 137 * Tells if entity @a should preempt entity @b.
 138 */
 139static inline bool
 140dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
 141{
 142        return dl_time_before(a->deadline, b->deadline);
 143}
 144
 145/*
 146 * This is the priority-queue data structure of the RT scheduling class:
 147 */
 148struct rt_prio_array {
 149        DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
 150        struct list_head queue[MAX_RT_PRIO];
 151};
 152
 153struct rt_bandwidth {
 154        /* nests inside the rq lock: */
 155        raw_spinlock_t          rt_runtime_lock;
 156        ktime_t                 rt_period;
 157        u64                     rt_runtime;
 158        struct hrtimer          rt_period_timer;
 159        unsigned int            rt_period_active;
 160};
 161
 162void __dl_clear_params(struct task_struct *p);
 163
 164/*
 165 * To keep the bandwidth of -deadline tasks and groups under control
 166 * we need some place where:
 167 *  - store the maximum -deadline bandwidth of the system (the group);
 168 *  - cache the fraction of that bandwidth that is currently allocated.
 169 *
 170 * This is all done in the data structure below. It is similar to the
 171 * one used for RT-throttling (rt_bandwidth), with the main difference
 172 * that, since here we are only interested in admission control, we
 173 * do not decrease any runtime while the group "executes", neither we
 174 * need a timer to replenish it.
 175 *
 176 * With respect to SMP, the bandwidth is given on a per-CPU basis,
 177 * meaning that:
 178 *  - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
 179 *  - dl_total_bw array contains, in the i-eth element, the currently
 180 *    allocated bandwidth on the i-eth CPU.
 181 * Moreover, groups consume bandwidth on each CPU, while tasks only
 182 * consume bandwidth on the CPU they're running on.
 183 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
 184 * that will be shown the next time the proc or cgroup controls will
 185 * be red. It on its turn can be changed by writing on its own
 186 * control.
 187 */
 188struct dl_bandwidth {
 189        raw_spinlock_t dl_runtime_lock;
 190        u64 dl_runtime;
 191        u64 dl_period;
 192};
 193
 194static inline int dl_bandwidth_enabled(void)
 195{
 196        return sysctl_sched_rt_runtime >= 0;
 197}
 198
 199extern struct dl_bw *dl_bw_of(int i);
 200
 201struct dl_bw {
 202        raw_spinlock_t lock;
 203        u64 bw, total_bw;
 204};
 205
 206static inline
 207void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
 208{
 209        dl_b->total_bw -= tsk_bw;
 210}
 211
 212static inline
 213void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
 214{
 215        dl_b->total_bw += tsk_bw;
 216}
 217
 218static inline
 219bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
 220{
 221        return dl_b->bw != -1 &&
 222               dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
 223}
 224
 225extern struct mutex sched_domains_mutex;
 226
 227#ifdef CONFIG_CGROUP_SCHED
 228
 229#include <linux/cgroup.h>
 230
 231struct cfs_rq;
 232struct rt_rq;
 233
 234extern struct list_head task_groups;
 235
 236struct cfs_bandwidth {
 237#ifdef CONFIG_CFS_BANDWIDTH
 238        raw_spinlock_t lock;
 239        ktime_t period;
 240        u64 quota, runtime;
 241        s64 hierarchical_quota;
 242        u64 runtime_expires;
 243
 244        int idle, period_active;
 245        struct hrtimer period_timer, slack_timer;
 246        struct list_head throttled_cfs_rq;
 247
 248        /* statistics */
 249        int nr_periods, nr_throttled;
 250        u64 throttled_time;
 251#endif
 252};
 253
 254/* task group related information */
 255struct task_group {
 256        struct cgroup_subsys_state css;
 257
 258#ifdef CONFIG_FAIR_GROUP_SCHED
 259        /* schedulable entities of this group on each cpu */
 260        struct sched_entity **se;
 261        /* runqueue "owned" by this group on each cpu */
 262        struct cfs_rq **cfs_rq;
 263        unsigned long shares;
 264
 265#ifdef  CONFIG_SMP
 266        /*
 267         * load_avg can be heavily contended at clock tick time, so put
 268         * it in its own cacheline separated from the fields above which
 269         * will also be accessed at each tick.
 270         */
 271        atomic_long_t load_avg ____cacheline_aligned;
 272#endif
 273#endif
 274
 275#ifdef CONFIG_RT_GROUP_SCHED
 276        struct sched_rt_entity **rt_se;
 277        struct rt_rq **rt_rq;
 278
 279        struct rt_bandwidth rt_bandwidth;
 280#endif
 281
 282        struct rcu_head rcu;
 283        struct list_head list;
 284
 285        struct task_group *parent;
 286        struct list_head siblings;
 287        struct list_head children;
 288
 289#ifdef CONFIG_SCHED_AUTOGROUP
 290        struct autogroup *autogroup;
 291#endif
 292
 293        struct cfs_bandwidth cfs_bandwidth;
 294};
 295
 296#ifdef CONFIG_FAIR_GROUP_SCHED
 297#define ROOT_TASK_GROUP_LOAD    NICE_0_LOAD
 298
 299/*
 300 * A weight of 0 or 1 can cause arithmetics problems.
 301 * A weight of a cfs_rq is the sum of weights of which entities
 302 * are queued on this cfs_rq, so a weight of a entity should not be
 303 * too large, so as the shares value of a task group.
 304 * (The default weight is 1024 - so there's no practical
 305 *  limitation from this.)
 306 */
 307#define MIN_SHARES      (1UL <<  1)
 308#define MAX_SHARES      (1UL << 18)
 309#endif
 310
 311typedef int (*tg_visitor)(struct task_group *, void *);
 312
 313extern int walk_tg_tree_from(struct task_group *from,
 314                             tg_visitor down, tg_visitor up, void *data);
 315
 316/*
 317 * Iterate the full tree, calling @down when first entering a node and @up when
 318 * leaving it for the final time.
 319 *
 320 * Caller must hold rcu_lock or sufficient equivalent.
 321 */
 322static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
 323{
 324        return walk_tg_tree_from(&root_task_group, down, up, data);
 325}
 326
 327extern int tg_nop(struct task_group *tg, void *data);
 328
 329extern void free_fair_sched_group(struct task_group *tg);
 330extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
 331extern void online_fair_sched_group(struct task_group *tg);
 332extern void unregister_fair_sched_group(struct task_group *tg);
 333extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 334                        struct sched_entity *se, int cpu,
 335                        struct sched_entity *parent);
 336extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 337
 338extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
 339extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 340extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
 341
 342extern void free_rt_sched_group(struct task_group *tg);
 343extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
 344extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 345                struct sched_rt_entity *rt_se, int cpu,
 346                struct sched_rt_entity *parent);
 347
 348extern struct task_group *sched_create_group(struct task_group *parent);
 349extern void sched_online_group(struct task_group *tg,
 350                               struct task_group *parent);
 351extern void sched_destroy_group(struct task_group *tg);
 352extern void sched_offline_group(struct task_group *tg);
 353
 354extern void sched_move_task(struct task_struct *tsk);
 355
 356#ifdef CONFIG_FAIR_GROUP_SCHED
 357extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 358
 359#ifdef CONFIG_SMP
 360extern void set_task_rq_fair(struct sched_entity *se,
 361                             struct cfs_rq *prev, struct cfs_rq *next);
 362#else /* !CONFIG_SMP */
 363static inline void set_task_rq_fair(struct sched_entity *se,
 364                             struct cfs_rq *prev, struct cfs_rq *next) { }
 365#endif /* CONFIG_SMP */
 366#endif /* CONFIG_FAIR_GROUP_SCHED */
 367
 368#else /* CONFIG_CGROUP_SCHED */
 369
 370struct cfs_bandwidth { };
 371
 372#endif  /* CONFIG_CGROUP_SCHED */
 373
 374/* CFS-related fields in a runqueue */
 375struct cfs_rq {
 376        struct load_weight load;
 377        unsigned int nr_running, h_nr_running;
 378
 379        u64 exec_clock;
 380        u64 min_vruntime;
 381#ifndef CONFIG_64BIT
 382        u64 min_vruntime_copy;
 383#endif
 384
 385        struct rb_root tasks_timeline;
 386        struct rb_node *rb_leftmost;
 387
 388        /*
 389         * 'curr' points to currently running entity on this cfs_rq.
 390         * It is set to NULL otherwise (i.e when none are currently running).
 391         */
 392        struct sched_entity *curr, *next, *last, *skip;
 393
 394#ifdef  CONFIG_SCHED_DEBUG
 395        unsigned int nr_spread_over;
 396#endif
 397
 398#ifdef CONFIG_SMP
 399        /*
 400         * CFS load tracking
 401         */
 402        struct sched_avg avg;
 403        u64 runnable_load_sum;
 404        unsigned long runnable_load_avg;
 405#ifdef CONFIG_FAIR_GROUP_SCHED
 406        unsigned long tg_load_avg_contrib;
 407#endif
 408        atomic_long_t removed_load_avg, removed_util_avg;
 409#ifndef CONFIG_64BIT
 410        u64 load_last_update_time_copy;
 411#endif
 412
 413#ifdef CONFIG_FAIR_GROUP_SCHED
 414        /*
 415         *   h_load = weight * f(tg)
 416         *
 417         * Where f(tg) is the recursive weight fraction assigned to
 418         * this group.
 419         */
 420        unsigned long h_load;
 421        u64 last_h_load_update;
 422        struct sched_entity *h_load_next;
 423#endif /* CONFIG_FAIR_GROUP_SCHED */
 424#endif /* CONFIG_SMP */
 425
 426#ifdef CONFIG_FAIR_GROUP_SCHED
 427        struct rq *rq;  /* cpu runqueue to which this cfs_rq is attached */
 428
 429        /*
 430         * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
 431         * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
 432         * (like users, containers etc.)
 433         *
 434         * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
 435         * list is used during load balance.
 436         */
 437        int on_list;
 438        struct list_head leaf_cfs_rq_list;
 439        struct task_group *tg;  /* group that "owns" this runqueue */
 440
 441#ifdef CONFIG_CFS_BANDWIDTH
 442        int runtime_enabled;
 443        u64 runtime_expires;
 444        s64 runtime_remaining;
 445
 446        u64 throttled_clock, throttled_clock_task;
 447        u64 throttled_clock_task_time;
 448        int throttled, throttle_count;
 449        struct list_head throttled_list;
 450#endif /* CONFIG_CFS_BANDWIDTH */
 451#endif /* CONFIG_FAIR_GROUP_SCHED */
 452};
 453
 454static inline int rt_bandwidth_enabled(void)
 455{
 456        return sysctl_sched_rt_runtime >= 0;
 457}
 458
 459/* RT IPI pull logic requires IRQ_WORK */
 460#ifdef CONFIG_IRQ_WORK
 461# define HAVE_RT_PUSH_IPI
 462#endif
 463
 464/* Real-Time classes' related field in a runqueue: */
 465struct rt_rq {
 466        struct rt_prio_array active;
 467        unsigned int rt_nr_running;
 468        unsigned int rr_nr_running;
 469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 470        struct {
 471                int curr; /* highest queued rt task prio */
 472#ifdef CONFIG_SMP
 473                int next; /* next highest */
 474#endif
 475        } highest_prio;
 476#endif
 477#ifdef CONFIG_SMP
 478        unsigned long rt_nr_migratory;
 479        unsigned long rt_nr_total;
 480        int overloaded;
 481        struct plist_head pushable_tasks;
 482#ifdef HAVE_RT_PUSH_IPI
 483        int push_flags;
 484        int push_cpu;
 485        struct irq_work push_work;
 486        raw_spinlock_t push_lock;
 487#endif
 488#endif /* CONFIG_SMP */
 489        int rt_queued;
 490
 491        int rt_throttled;
 492        u64 rt_time;
 493        u64 rt_runtime;
 494        /* Nests inside the rq lock: */
 495        raw_spinlock_t rt_runtime_lock;
 496
 497#ifdef CONFIG_RT_GROUP_SCHED
 498        unsigned long rt_nr_boosted;
 499
 500        struct rq *rq;
 501        struct task_group *tg;
 502#endif
 503};
 504
 505/* Deadline class' related fields in a runqueue */
 506struct dl_rq {
 507        /* runqueue is an rbtree, ordered by deadline */
 508        struct rb_root rb_root;
 509        struct rb_node *rb_leftmost;
 510
 511        unsigned long dl_nr_running;
 512
 513#ifdef CONFIG_SMP
 514        /*
 515         * Deadline values of the currently executing and the
 516         * earliest ready task on this rq. Caching these facilitates
 517         * the decision wether or not a ready but not running task
 518         * should migrate somewhere else.
 519         */
 520        struct {
 521                u64 curr;
 522                u64 next;
 523        } earliest_dl;
 524
 525        unsigned long dl_nr_migratory;
 526        int overloaded;
 527
 528        /*
 529         * Tasks on this rq that can be pushed away. They are kept in
 530         * an rb-tree, ordered by tasks' deadlines, with caching
 531         * of the leftmost (earliest deadline) element.
 532         */
 533        struct rb_root pushable_dl_tasks_root;
 534        struct rb_node *pushable_dl_tasks_leftmost;
 535#else
 536        struct dl_bw dl_bw;
 537#endif
 538};
 539
 540#ifdef CONFIG_SMP
 541
 542/*
 543 * We add the notion of a root-domain which will be used to define per-domain
 544 * variables. Each exclusive cpuset essentially defines an island domain by
 545 * fully partitioning the member cpus from any other cpuset. Whenever a new
 546 * exclusive cpuset is created, we also create and attach a new root-domain
 547 * object.
 548 *
 549 */
 550struct root_domain {
 551        atomic_t refcount;
 552        atomic_t rto_count;
 553        struct rcu_head rcu;
 554        cpumask_var_t span;
 555        cpumask_var_t online;
 556
 557        /* Indicate more than one runnable task for any CPU */
 558        bool overload;
 559
 560        /*
 561         * The bit corresponding to a CPU gets set here if such CPU has more
 562         * than one runnable -deadline task (as it is below for RT tasks).
 563         */
 564        cpumask_var_t dlo_mask;
 565        atomic_t dlo_count;
 566        struct dl_bw dl_bw;
 567        struct cpudl cpudl;
 568
 569        /*
 570         * The "RT overload" flag: it gets set if a CPU has more than
 571         * one runnable RT task.
 572         */
 573        cpumask_var_t rto_mask;
 574        struct cpupri cpupri;
 575
 576        unsigned long max_cpu_capacity;
 577};
 578
 579extern struct root_domain def_root_domain;
 580
 581#endif /* CONFIG_SMP */
 582
 583/*
 584 * This is the main, per-CPU runqueue data structure.
 585 *
 586 * Locking rule: those places that want to lock multiple runqueues
 587 * (such as the load balancing or the thread migration code), lock
 588 * acquire operations must be ordered by ascending &runqueue.
 589 */
 590struct rq {
 591        /* runqueue lock: */
 592        raw_spinlock_t lock;
 593
 594        /*
 595         * nr_running and cpu_load should be in the same cacheline because
 596         * remote CPUs use both these fields when doing load calculation.
 597         */
 598        unsigned int nr_running;
 599#ifdef CONFIG_NUMA_BALANCING
 600        unsigned int nr_numa_running;
 601        unsigned int nr_preferred_running;
 602#endif
 603        #define CPU_LOAD_IDX_MAX 5
 604        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 605#ifdef CONFIG_NO_HZ_COMMON
 606#ifdef CONFIG_SMP
 607        unsigned long last_load_update_tick;
 608#endif /* CONFIG_SMP */
 609        unsigned long nohz_flags;
 610#endif /* CONFIG_NO_HZ_COMMON */
 611#ifdef CONFIG_NO_HZ_FULL
 612        unsigned long last_sched_tick;
 613#endif
 614        /* capture load from *all* tasks on this cpu: */
 615        struct load_weight load;
 616        unsigned long nr_load_updates;
 617        u64 nr_switches;
 618
 619        struct cfs_rq cfs;
 620        struct rt_rq rt;
 621        struct dl_rq dl;
 622
 623#ifdef CONFIG_FAIR_GROUP_SCHED
 624        /* list of leaf cfs_rq on this cpu: */
 625        struct list_head leaf_cfs_rq_list;
 626#endif /* CONFIG_FAIR_GROUP_SCHED */
 627
 628        /*
 629         * This is part of a global counter where only the total sum
 630         * over all CPUs matters. A task can increase this counter on
 631         * one CPU and if it got migrated afterwards it may decrease
 632         * it on another CPU. Always updated under the runqueue lock:
 633         */
 634        unsigned long nr_uninterruptible;
 635
 636        struct task_struct *curr, *idle, *stop;
 637        unsigned long next_balance;
 638        struct mm_struct *prev_mm;
 639
 640        unsigned int clock_skip_update;
 641        u64 clock;
 642        u64 clock_task;
 643
 644        atomic_t nr_iowait;
 645
 646#ifdef CONFIG_SMP
 647        struct root_domain *rd;
 648        struct sched_domain *sd;
 649
 650        unsigned long cpu_capacity;
 651        unsigned long cpu_capacity_orig;
 652
 653        struct callback_head *balance_callback;
 654
 655        unsigned char idle_balance;
 656        /* For active balancing */
 657        int active_balance;
 658        int push_cpu;
 659        struct cpu_stop_work active_balance_work;
 660        /* cpu of this runqueue: */
 661        int cpu;
 662        int online;
 663
 664        struct list_head cfs_tasks;
 665
 666        u64 rt_avg;
 667        u64 age_stamp;
 668        u64 idle_stamp;
 669        u64 avg_idle;
 670
 671        /* This is used to determine avg_idle's max value */
 672        u64 max_idle_balance_cost;
 673#endif
 674
 675#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 676        u64 prev_irq_time;
 677#endif
 678#ifdef CONFIG_PARAVIRT
 679        u64 prev_steal_time;
 680#endif
 681#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 682        u64 prev_steal_time_rq;
 683#endif
 684
 685        /* calc_load related fields */
 686        unsigned long calc_load_update;
 687        long calc_load_active;
 688
 689#ifdef CONFIG_SCHED_HRTICK
 690#ifdef CONFIG_SMP
 691        int hrtick_csd_pending;
 692        struct call_single_data hrtick_csd;
 693#endif
 694        struct hrtimer hrtick_timer;
 695#endif
 696
 697#ifdef CONFIG_SCHEDSTATS
 698        /* latency stats */
 699        struct sched_info rq_sched_info;
 700        unsigned long long rq_cpu_time;
 701        /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 702
 703        /* sys_sched_yield() stats */
 704        unsigned int yld_count;
 705
 706        /* schedule() stats */
 707        unsigned int sched_count;
 708        unsigned int sched_goidle;
 709
 710        /* try_to_wake_up() stats */
 711        unsigned int ttwu_count;
 712        unsigned int ttwu_local;
 713#endif
 714
 715#ifdef CONFIG_SMP
 716        struct llist_head wake_list;
 717#endif
 718
 719#ifdef CONFIG_CPU_IDLE
 720        /* Must be inspected within a rcu lock section */
 721        struct cpuidle_state *idle_state;
 722#endif
 723};
 724
 725static inline int cpu_of(struct rq *rq)
 726{
 727#ifdef CONFIG_SMP
 728        return rq->cpu;
 729#else
 730        return 0;
 731#endif
 732}
 733
 734
 735#ifdef CONFIG_SCHED_SMT
 736
 737extern struct static_key_false sched_smt_present;
 738
 739extern void __update_idle_core(struct rq *rq);
 740
 741static inline void update_idle_core(struct rq *rq)
 742{
 743        if (static_branch_unlikely(&sched_smt_present))
 744                __update_idle_core(rq);
 745}
 746
 747#else
 748static inline void update_idle_core(struct rq *rq) { }
 749#endif
 750
 751DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 752
 753#define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
 754#define this_rq()               this_cpu_ptr(&runqueues)
 755#define task_rq(p)              cpu_rq(task_cpu(p))
 756#define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
 757#define raw_rq()                raw_cpu_ptr(&runqueues)
 758
 759static inline u64 __rq_clock_broken(struct rq *rq)
 760{
 761        return READ_ONCE(rq->clock);
 762}
 763
 764static inline u64 rq_clock(struct rq *rq)
 765{
 766        lockdep_assert_held(&rq->lock);
 767        return rq->clock;
 768}
 769
 770static inline u64 rq_clock_task(struct rq *rq)
 771{
 772        lockdep_assert_held(&rq->lock);
 773        return rq->clock_task;
 774}
 775
 776#define RQCF_REQ_SKIP   0x01
 777#define RQCF_ACT_SKIP   0x02
 778
 779static inline void rq_clock_skip_update(struct rq *rq, bool skip)
 780{
 781        lockdep_assert_held(&rq->lock);
 782        if (skip)
 783                rq->clock_skip_update |= RQCF_REQ_SKIP;
 784        else
 785                rq->clock_skip_update &= ~RQCF_REQ_SKIP;
 786}
 787
 788#ifdef CONFIG_NUMA
 789enum numa_topology_type {
 790        NUMA_DIRECT,
 791        NUMA_GLUELESS_MESH,
 792        NUMA_BACKPLANE,
 793};
 794extern enum numa_topology_type sched_numa_topology_type;
 795extern int sched_max_numa_distance;
 796extern bool find_numa_distance(int distance);
 797#endif
 798
 799#ifdef CONFIG_NUMA_BALANCING
 800/* The regions in numa_faults array from task_struct */
 801enum numa_faults_stats {
 802        NUMA_MEM = 0,
 803        NUMA_CPU,
 804        NUMA_MEMBUF,
 805        NUMA_CPUBUF
 806};
 807extern void sched_setnuma(struct task_struct *p, int node);
 808extern int migrate_task_to(struct task_struct *p, int cpu);
 809extern int migrate_swap(struct task_struct *, struct task_struct *);
 810#endif /* CONFIG_NUMA_BALANCING */
 811
 812#ifdef CONFIG_SMP
 813
 814static inline void
 815queue_balance_callback(struct rq *rq,
 816                       struct callback_head *head,
 817                       void (*func)(struct rq *rq))
 818{
 819        lockdep_assert_held(&rq->lock);
 820
 821        if (unlikely(head->next))
 822                return;
 823
 824        head->func = (void (*)(struct callback_head *))func;
 825        head->next = rq->balance_callback;
 826        rq->balance_callback = head;
 827}
 828
 829extern void sched_ttwu_pending(void);
 830
 831#define rcu_dereference_check_sched_domain(p) \
 832        rcu_dereference_check((p), \
 833                              lockdep_is_held(&sched_domains_mutex))
 834
 835/*
 836 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
 837 * See detach_destroy_domains: synchronize_sched for details.
 838 *
 839 * The domain tree of any CPU may only be accessed from within
 840 * preempt-disabled sections.
 841 */
 842#define for_each_domain(cpu, __sd) \
 843        for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
 844                        __sd; __sd = __sd->parent)
 845
 846#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
 847
 848/**
 849 * highest_flag_domain - Return highest sched_domain containing flag.
 850 * @cpu:        The cpu whose highest level of sched domain is to
 851 *              be returned.
 852 * @flag:       The flag to check for the highest sched_domain
 853 *              for the given cpu.
 854 *
 855 * Returns the highest sched_domain of a cpu which contains the given flag.
 856 */
 857static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
 858{
 859        struct sched_domain *sd, *hsd = NULL;
 860
 861        for_each_domain(cpu, sd) {
 862                if (!(sd->flags & flag))
 863                        break;
 864                hsd = sd;
 865        }
 866
 867        return hsd;
 868}
 869
 870static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
 871{
 872        struct sched_domain *sd;
 873
 874        for_each_domain(cpu, sd) {
 875                if (sd->flags & flag)
 876                        break;
 877        }
 878
 879        return sd;
 880}
 881
 882DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 883DECLARE_PER_CPU(int, sd_llc_size);
 884DECLARE_PER_CPU(int, sd_llc_id);
 885DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
 886DECLARE_PER_CPU(struct sched_domain *, sd_numa);
 887DECLARE_PER_CPU(struct sched_domain *, sd_asym);
 888
 889struct sched_group_capacity {
 890        atomic_t ref;
 891        /*
 892         * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
 893         * for a single CPU.
 894         */
 895        unsigned int capacity;
 896        unsigned long next_update;
 897        int imbalance; /* XXX unrelated to capacity but shared group state */
 898
 899        unsigned long cpumask[0]; /* iteration mask */
 900};
 901
 902struct sched_group {
 903        struct sched_group *next;       /* Must be a circular list */
 904        atomic_t ref;
 905
 906        unsigned int group_weight;
 907        struct sched_group_capacity *sgc;
 908
 909        /*
 910         * The CPUs this group covers.
 911         *
 912         * NOTE: this field is variable length. (Allocated dynamically
 913         * by attaching extra space to the end of the structure,
 914         * depending on how many CPUs the kernel has booted up with)
 915         */
 916        unsigned long cpumask[0];
 917};
 918
 919static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
 920{
 921        return to_cpumask(sg->cpumask);
 922}
 923
 924/*
 925 * cpumask masking which cpus in the group are allowed to iterate up the domain
 926 * tree.
 927 */
 928static inline struct cpumask *sched_group_mask(struct sched_group *sg)
 929{
 930        return to_cpumask(sg->sgc->cpumask);
 931}
 932
 933/**
 934 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
 935 * @group: The group whose first cpu is to be returned.
 936 */
 937static inline unsigned int group_first_cpu(struct sched_group *group)
 938{
 939        return cpumask_first(sched_group_cpus(group));
 940}
 941
 942extern int group_balance_cpu(struct sched_group *sg);
 943
 944#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
 945void register_sched_domain_sysctl(void);
 946void unregister_sched_domain_sysctl(void);
 947#else
 948static inline void register_sched_domain_sysctl(void)
 949{
 950}
 951static inline void unregister_sched_domain_sysctl(void)
 952{
 953}
 954#endif
 955
 956#else
 957
 958static inline void sched_ttwu_pending(void) { }
 959
 960#endif /* CONFIG_SMP */
 961
 962#include "stats.h"
 963#include "auto_group.h"
 964
 965#ifdef CONFIG_CGROUP_SCHED
 966
 967/*
 968 * Return the group to which this tasks belongs.
 969 *
 970 * We cannot use task_css() and friends because the cgroup subsystem
 971 * changes that value before the cgroup_subsys::attach() method is called,
 972 * therefore we cannot pin it and might observe the wrong value.
 973 *
 974 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
 975 * core changes this before calling sched_move_task().
 976 *
 977 * Instead we use a 'copy' which is updated from sched_move_task() while
 978 * holding both task_struct::pi_lock and rq::lock.
 979 */
 980static inline struct task_group *task_group(struct task_struct *p)
 981{
 982        return p->sched_task_group;
 983}
 984
 985/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 986static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 987{
 988#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
 989        struct task_group *tg = task_group(p);
 990#endif
 991
 992#ifdef CONFIG_FAIR_GROUP_SCHED
 993        set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
 994        p->se.cfs_rq = tg->cfs_rq[cpu];
 995        p->se.parent = tg->se[cpu];
 996#endif
 997
 998#ifdef CONFIG_RT_GROUP_SCHED
 999        p->rt.rt_rq  = tg->rt_rq[cpu];
1000        p->rt.parent = tg->rt_se[cpu];
1001#endif
1002}
1003
1004#else /* CONFIG_CGROUP_SCHED */
1005
1006static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1007static inline struct task_group *task_group(struct task_struct *p)
1008{
1009        return NULL;
1010}
1011
1012#endif /* CONFIG_CGROUP_SCHED */
1013
1014static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1015{
1016        set_task_rq(p, cpu);
1017#ifdef CONFIG_SMP
1018        /*
1019         * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1020         * successfuly executed on another CPU. We must ensure that updates of
1021         * per-task data have been completed by this moment.
1022         */
1023        smp_wmb();
1024#ifdef CONFIG_THREAD_INFO_IN_TASK
1025        p->cpu = cpu;
1026#else
1027        task_thread_info(p)->cpu = cpu;
1028#endif
1029        p->wake_cpu = cpu;
1030#endif
1031}
1032
1033/*
1034 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1035 */
1036#ifdef CONFIG_SCHED_DEBUG
1037# include <linux/static_key.h>
1038# define const_debug __read_mostly
1039#else
1040# define const_debug const
1041#endif
1042
1043extern const_debug unsigned int sysctl_sched_features;
1044
1045#define SCHED_FEAT(name, enabled)       \
1046        __SCHED_FEAT_##name ,
1047
1048enum {
1049#include "features.h"
1050        __SCHED_FEAT_NR,
1051};
1052
1053#undef SCHED_FEAT
1054
1055#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
1056#define SCHED_FEAT(name, enabled)                                       \
1057static __always_inline bool static_branch_##name(struct static_key *key) \
1058{                                                                       \
1059        return static_key_##enabled(key);                               \
1060}
1061
1062#include "features.h"
1063
1064#undef SCHED_FEAT
1065
1066extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1067#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1068#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
1069#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1070#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1071
1072extern struct static_key_false sched_numa_balancing;
1073extern struct static_key_false sched_schedstats;
1074
1075static inline u64 global_rt_period(void)
1076{
1077        return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1078}
1079
1080static inline u64 global_rt_runtime(void)
1081{
1082        if (sysctl_sched_rt_runtime < 0)
1083                return RUNTIME_INF;
1084
1085        return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1086}
1087
1088static inline int task_current(struct rq *rq, struct task_struct *p)
1089{
1090        return rq->curr == p;
1091}
1092
1093static inline int task_running(struct rq *rq, struct task_struct *p)
1094{
1095#ifdef CONFIG_SMP
1096        return p->on_cpu;
1097#else
1098        return task_current(rq, p);
1099#endif
1100}
1101
1102static inline int task_on_rq_queued(struct task_struct *p)
1103{
1104        return p->on_rq == TASK_ON_RQ_QUEUED;
1105}
1106
1107static inline int task_on_rq_migrating(struct task_struct *p)
1108{
1109        return p->on_rq == TASK_ON_RQ_MIGRATING;
1110}
1111
1112#ifndef prepare_arch_switch
1113# define prepare_arch_switch(next)      do { } while (0)
1114#endif
1115#ifndef finish_arch_post_lock_switch
1116# define finish_arch_post_lock_switch() do { } while (0)
1117#endif
1118
1119static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1120{
1121#ifdef CONFIG_SMP
1122        /*
1123         * We can optimise this out completely for !SMP, because the
1124         * SMP rebalancing from interrupt is the only thing that cares
1125         * here.
1126         */
1127        next->on_cpu = 1;
1128#endif
1129}
1130
1131static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1132{
1133#ifdef CONFIG_SMP
1134        /*
1135         * After ->on_cpu is cleared, the task can be moved to a different CPU.
1136         * We must ensure this doesn't happen until the switch is completely
1137         * finished.
1138         *
1139         * In particular, the load of prev->state in finish_task_switch() must
1140         * happen before this.
1141         *
1142         * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
1143         */
1144        smp_store_release(&prev->on_cpu, 0);
1145#endif
1146#ifdef CONFIG_DEBUG_SPINLOCK
1147        /* this is a valid case when another task releases the spinlock */
1148        rq->lock.owner = current;
1149#endif
1150        /*
1151         * If we are tracking spinlock dependencies then we have to
1152         * fix up the runqueue lock - which gets 'carried over' from
1153         * prev into current:
1154         */
1155        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1156
1157        raw_spin_unlock_irq(&rq->lock);
1158}
1159
1160/*
1161 * wake flags
1162 */
1163#define WF_SYNC         0x01            /* waker goes to sleep after wakeup */
1164#define WF_FORK         0x02            /* child wakeup after fork */
1165#define WF_MIGRATED     0x4             /* internal use, task got migrated */
1166
1167/*
1168 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1169 * of tasks with abnormal "nice" values across CPUs the contribution that
1170 * each task makes to its run queue's load is weighted according to its
1171 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1172 * scaled version of the new time slice allocation that they receive on time
1173 * slice expiry etc.
1174 */
1175
1176#define WEIGHT_IDLEPRIO                3
1177#define WMULT_IDLEPRIO         1431655765
1178
1179extern const int sched_prio_to_weight[40];
1180extern const u32 sched_prio_to_wmult[40];
1181
1182/*
1183 * {de,en}queue flags:
1184 *
1185 * DEQUEUE_SLEEP  - task is no longer runnable
1186 * ENQUEUE_WAKEUP - task just became runnable
1187 *
1188 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1189 *                are in a known state which allows modification. Such pairs
1190 *                should preserve as much state as possible.
1191 *
1192 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1193 *        in the runqueue.
1194 *
1195 * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
1196 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
1197 * ENQUEUE_MIGRATED  - the task was migrated during wakeup
1198 *
1199 */
1200
1201#define DEQUEUE_SLEEP           0x01
1202#define DEQUEUE_SAVE            0x02 /* matches ENQUEUE_RESTORE */
1203#define DEQUEUE_MOVE            0x04 /* matches ENQUEUE_MOVE */
1204
1205#define ENQUEUE_WAKEUP          0x01
1206#define ENQUEUE_RESTORE         0x02
1207#define ENQUEUE_MOVE            0x04
1208
1209#define ENQUEUE_HEAD            0x08
1210#define ENQUEUE_REPLENISH       0x10
1211#ifdef CONFIG_SMP
1212#define ENQUEUE_MIGRATED        0x20
1213#else
1214#define ENQUEUE_MIGRATED        0x00
1215#endif
1216
1217#define RETRY_TASK              ((void *)-1UL)
1218
1219struct sched_class {
1220        const struct sched_class *next;
1221
1222        void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1223        void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1224        void (*yield_task) (struct rq *rq);
1225        bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1226
1227        void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1228
1229        /*
1230         * It is the responsibility of the pick_next_task() method that will
1231         * return the next task to call put_prev_task() on the @prev task or
1232         * something equivalent.
1233         *
1234         * May return RETRY_TASK when it finds a higher prio class has runnable
1235         * tasks.
1236         */
1237        struct task_struct * (*pick_next_task) (struct rq *rq,
1238                                                struct task_struct *prev,
1239                                                struct pin_cookie cookie);
1240        void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1241
1242#ifdef CONFIG_SMP
1243        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1244        void (*migrate_task_rq)(struct task_struct *p);
1245
1246        void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1247
1248        void (*set_cpus_allowed)(struct task_struct *p,
1249                                 const struct cpumask *newmask);
1250
1251        void (*rq_online)(struct rq *rq);
1252        void (*rq_offline)(struct rq *rq);
1253#endif
1254
1255        void (*set_curr_task) (struct rq *rq);
1256        void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1257        void (*task_fork) (struct task_struct *p);
1258        void (*task_dead) (struct task_struct *p);
1259
1260        /*
1261         * The switched_from() call is allowed to drop rq->lock, therefore we
1262         * cannot assume the switched_from/switched_to pair is serliazed by
1263         * rq->lock. They are however serialized by p->pi_lock.
1264         */
1265        void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1266        void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1267        void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1268                             int oldprio);
1269
1270        unsigned int (*get_rr_interval) (struct rq *rq,
1271                                         struct task_struct *task);
1272
1273        void (*update_curr) (struct rq *rq);
1274
1275#define TASK_SET_GROUP  0
1276#define TASK_MOVE_GROUP 1
1277
1278#ifdef CONFIG_FAIR_GROUP_SCHED
1279        void (*task_change_group) (struct task_struct *p, int type);
1280#endif
1281};
1282
1283static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1284{
1285        prev->sched_class->put_prev_task(rq, prev);
1286}
1287
1288static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1289{
1290        curr->sched_class->set_curr_task(rq);
1291}
1292
1293#define sched_class_highest (&stop_sched_class)
1294#define for_each_class(class) \
1295   for (class = sched_class_highest; class; class = class->next)
1296
1297extern const struct sched_class stop_sched_class;
1298extern const struct sched_class dl_sched_class;
1299extern const struct sched_class rt_sched_class;
1300extern const struct sched_class fair_sched_class;
1301extern const struct sched_class idle_sched_class;
1302
1303
1304#ifdef CONFIG_SMP
1305
1306extern void update_group_capacity(struct sched_domain *sd, int cpu);
1307
1308extern void trigger_load_balance(struct rq *rq);
1309
1310extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1311
1312#endif
1313
1314#ifdef CONFIG_CPU_IDLE
1315static inline void idle_set_state(struct rq *rq,
1316                                  struct cpuidle_state *idle_state)
1317{
1318        rq->idle_state = idle_state;
1319}
1320
1321static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1322{
1323        SCHED_WARN_ON(!rcu_read_lock_held());
1324        return rq->idle_state;
1325}
1326#else
1327static inline void idle_set_state(struct rq *rq,
1328                                  struct cpuidle_state *idle_state)
1329{
1330}
1331
1332static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1333{
1334        return NULL;
1335}
1336#endif
1337
1338extern void sysrq_sched_debug_show(void);
1339extern void sched_init_granularity(void);
1340extern void update_max_interval(void);
1341
1342extern void init_sched_dl_class(void);
1343extern void init_sched_rt_class(void);
1344extern void init_sched_fair_class(void);
1345
1346extern void resched_curr(struct rq *rq);
1347extern void resched_cpu(int cpu);
1348
1349extern struct rt_bandwidth def_rt_bandwidth;
1350extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1351
1352extern struct dl_bandwidth def_dl_bandwidth;
1353extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1354extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1355
1356unsigned long to_ratio(u64 period, u64 runtime);
1357
1358extern void init_entity_runnable_average(struct sched_entity *se);
1359extern void post_init_entity_util_avg(struct sched_entity *se);
1360
1361#ifdef CONFIG_NO_HZ_FULL
1362extern bool sched_can_stop_tick(struct rq *rq);
1363
1364/*
1365 * Tick may be needed by tasks in the runqueue depending on their policy and
1366 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1367 * nohz mode if necessary.
1368 */
1369static inline void sched_update_tick_dependency(struct rq *rq)
1370{
1371        int cpu;
1372
1373        if (!tick_nohz_full_enabled())
1374                return;
1375
1376        cpu = cpu_of(rq);
1377
1378        if (!tick_nohz_full_cpu(cpu))
1379                return;
1380
1381        if (sched_can_stop_tick(rq))
1382                tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1383        else
1384                tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1385}
1386#else
1387static inline void sched_update_tick_dependency(struct rq *rq) { }
1388#endif
1389
1390static inline void add_nr_running(struct rq *rq, unsigned count)
1391{
1392        unsigned prev_nr = rq->nr_running;
1393
1394        rq->nr_running = prev_nr + count;
1395
1396        if (prev_nr < 2 && rq->nr_running >= 2) {
1397#ifdef CONFIG_SMP
1398                if (!rq->rd->overload)
1399                        rq->rd->overload = true;
1400#endif
1401        }
1402
1403        sched_update_tick_dependency(rq);
1404}
1405
1406static inline void sub_nr_running(struct rq *rq, unsigned count)
1407{
1408        rq->nr_running -= count;
1409        /* Check if we still need preemption */
1410        sched_update_tick_dependency(rq);
1411}
1412
1413static inline void rq_last_tick_reset(struct rq *rq)
1414{
1415#ifdef CONFIG_NO_HZ_FULL
1416        rq->last_sched_tick = jiffies;
1417#endif
1418}
1419
1420extern void update_rq_clock(struct rq *rq);
1421
1422extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1423extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1424
1425extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1426
1427extern const_debug unsigned int sysctl_sched_time_avg;
1428extern const_debug unsigned int sysctl_sched_nr_migrate;
1429extern const_debug unsigned int sysctl_sched_migration_cost;
1430
1431static inline u64 sched_avg_period(void)
1432{
1433        return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1434}
1435
1436#ifdef CONFIG_SCHED_HRTICK
1437
1438/*
1439 * Use hrtick when:
1440 *  - enabled by features
1441 *  - hrtimer is actually high res
1442 */
1443static inline int hrtick_enabled(struct rq *rq)
1444{
1445        if (!sched_feat(HRTICK))
1446                return 0;
1447        if (!cpu_active(cpu_of(rq)))
1448                return 0;
1449        return hrtimer_is_hres_active(&rq->hrtick_timer);
1450}
1451
1452void hrtick_start(struct rq *rq, u64 delay);
1453
1454#else
1455
1456static inline int hrtick_enabled(struct rq *rq)
1457{
1458        return 0;
1459}
1460
1461#endif /* CONFIG_SCHED_HRTICK */
1462
1463#ifdef CONFIG_SMP
1464extern void sched_avg_update(struct rq *rq);
1465
1466#ifndef arch_scale_freq_capacity
1467static __always_inline
1468unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1469{
1470        return SCHED_CAPACITY_SCALE;
1471}
1472#endif
1473
1474#ifndef arch_scale_cpu_capacity
1475static __always_inline
1476unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1477{
1478        if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
1479                return sd->smt_gain / sd->span_weight;
1480
1481        return SCHED_CAPACITY_SCALE;
1482}
1483#endif
1484
1485static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1486{
1487        rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
1488        sched_avg_update(rq);
1489}
1490#else
1491static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1492static inline void sched_avg_update(struct rq *rq) { }
1493#endif
1494
1495struct rq_flags {
1496        unsigned long flags;
1497        struct pin_cookie cookie;
1498};
1499
1500struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1501        __acquires(rq->lock);
1502struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1503        __acquires(p->pi_lock)
1504        __acquires(rq->lock);
1505
1506static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1507        __releases(rq->lock)
1508{
1509        lockdep_unpin_lock(&rq->lock, rf->cookie);
1510        raw_spin_unlock(&rq->lock);
1511}
1512
1513static inline void
1514task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1515        __releases(rq->lock)
1516        __releases(p->pi_lock)
1517{
1518        lockdep_unpin_lock(&rq->lock, rf->cookie);
1519        raw_spin_unlock(&rq->lock);
1520        raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1521}
1522
1523#ifdef CONFIG_SMP
1524#ifdef CONFIG_PREEMPT
1525
1526static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1527
1528/*
1529 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1530 * way at the expense of forcing extra atomic operations in all
1531 * invocations.  This assures that the double_lock is acquired using the
1532 * same underlying policy as the spinlock_t on this architecture, which
1533 * reduces latency compared to the unfair variant below.  However, it
1534 * also adds more overhead and therefore may reduce throughput.
1535 */
1536static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1537        __releases(this_rq->lock)
1538        __acquires(busiest->lock)
1539        __acquires(this_rq->lock)
1540{
1541        raw_spin_unlock(&this_rq->lock);
1542        double_rq_lock(this_rq, busiest);
1543
1544        return 1;
1545}
1546
1547#else
1548/*
1549 * Unfair double_lock_balance: Optimizes throughput at the expense of
1550 * latency by eliminating extra atomic operations when the locks are
1551 * already in proper order on entry.  This favors lower cpu-ids and will
1552 * grant the double lock to lower cpus over higher ids under contention,
1553 * regardless of entry order into the function.
1554 */
1555static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1556        __releases(this_rq->lock)
1557        __acquires(busiest->lock)
1558        __acquires(this_rq->lock)
1559{
1560        int ret = 0;
1561
1562        if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1563                if (busiest < this_rq) {
1564                        raw_spin_unlock(&this_rq->lock);
1565                        raw_spin_lock(&busiest->lock);
1566                        raw_spin_lock_nested(&this_rq->lock,
1567                                              SINGLE_DEPTH_NESTING);
1568                        ret = 1;
1569                } else
1570                        raw_spin_lock_nested(&busiest->lock,
1571                                              SINGLE_DEPTH_NESTING);
1572        }
1573        return ret;
1574}
1575
1576#endif /* CONFIG_PREEMPT */
1577
1578/*
1579 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1580 */
1581static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1582{
1583        if (unlikely(!irqs_disabled())) {
1584                /* printk() doesn't work good under rq->lock */
1585                raw_spin_unlock(&this_rq->lock);
1586                BUG_ON(1);
1587        }
1588
1589        return _double_lock_balance(this_rq, busiest);
1590}
1591
1592static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1593        __releases(busiest->lock)
1594{
1595        raw_spin_unlock(&busiest->lock);
1596        lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1597}
1598
1599static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1600{
1601        if (l1 > l2)
1602                swap(l1, l2);
1603
1604        spin_lock(l1);
1605        spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1606}
1607
1608static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1609{
1610        if (l1 > l2)
1611                swap(l1, l2);
1612
1613        spin_lock_irq(l1);
1614        spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1615}
1616
1617static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1618{
1619        if (l1 > l2)
1620                swap(l1, l2);
1621
1622        raw_spin_lock(l1);
1623        raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1624}
1625
1626/*
1627 * double_rq_lock - safely lock two runqueues
1628 *
1629 * Note this does not disable interrupts like task_rq_lock,
1630 * you need to do so manually before calling.
1631 */
1632static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1633        __acquires(rq1->lock)
1634        __acquires(rq2->lock)
1635{
1636        BUG_ON(!irqs_disabled());
1637        if (rq1 == rq2) {
1638                raw_spin_lock(&rq1->lock);
1639                __acquire(rq2->lock);   /* Fake it out ;) */
1640        } else {
1641                if (rq1 < rq2) {
1642                        raw_spin_lock(&rq1->lock);
1643                        raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1644                } else {
1645                        raw_spin_lock(&rq2->lock);
1646                        raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1647                }
1648        }
1649}
1650
1651/*
1652 * double_rq_unlock - safely unlock two runqueues
1653 *
1654 * Note this does not restore interrupts like task_rq_unlock,
1655 * you need to do so manually after calling.
1656 */
1657static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1658        __releases(rq1->lock)
1659        __releases(rq2->lock)
1660{
1661        raw_spin_unlock(&rq1->lock);
1662        if (rq1 != rq2)
1663                raw_spin_unlock(&rq2->lock);
1664        else
1665                __release(rq2->lock);
1666}
1667
1668#else /* CONFIG_SMP */
1669
1670/*
1671 * double_rq_lock - safely lock two runqueues
1672 *
1673 * Note this does not disable interrupts like task_rq_lock,
1674 * you need to do so manually before calling.
1675 */
1676static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1677        __acquires(rq1->lock)
1678        __acquires(rq2->lock)
1679{
1680        BUG_ON(!irqs_disabled());
1681        BUG_ON(rq1 != rq2);
1682        raw_spin_lock(&rq1->lock);
1683        __acquire(rq2->lock);   /* Fake it out ;) */
1684}
1685
1686/*
1687 * double_rq_unlock - safely unlock two runqueues
1688 *
1689 * Note this does not restore interrupts like task_rq_unlock,
1690 * you need to do so manually after calling.
1691 */
1692static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1693        __releases(rq1->lock)
1694        __releases(rq2->lock)
1695{
1696        BUG_ON(rq1 != rq2);
1697        raw_spin_unlock(&rq1->lock);
1698        __release(rq2->lock);
1699}
1700
1701#endif
1702
1703extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1704extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1705
1706#ifdef  CONFIG_SCHED_DEBUG
1707extern void print_cfs_stats(struct seq_file *m, int cpu);
1708extern void print_rt_stats(struct seq_file *m, int cpu);
1709extern void print_dl_stats(struct seq_file *m, int cpu);
1710extern void
1711print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
1712
1713#ifdef CONFIG_NUMA_BALANCING
1714extern void
1715show_numa_stats(struct task_struct *p, struct seq_file *m);
1716extern void
1717print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1718        unsigned long tpf, unsigned long gsf, unsigned long gpf);
1719#endif /* CONFIG_NUMA_BALANCING */
1720#endif /* CONFIG_SCHED_DEBUG */
1721
1722extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1723extern void init_rt_rq(struct rt_rq *rt_rq);
1724extern void init_dl_rq(struct dl_rq *dl_rq);
1725
1726extern void cfs_bandwidth_usage_inc(void);
1727extern void cfs_bandwidth_usage_dec(void);
1728
1729#ifdef CONFIG_NO_HZ_COMMON
1730enum rq_nohz_flag_bits {
1731        NOHZ_TICK_STOPPED,
1732        NOHZ_BALANCE_KICK,
1733};
1734
1735#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1736
1737extern void nohz_balance_exit_idle(unsigned int cpu);
1738#else
1739static inline void nohz_balance_exit_idle(unsigned int cpu) { }
1740#endif
1741
1742#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1743struct irqtime {
1744        u64                     hardirq_time;
1745        u64                     softirq_time;
1746        u64                     irq_start_time;
1747        struct u64_stats_sync   sync;
1748};
1749
1750DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
1751
1752static inline u64 irq_time_read(int cpu)
1753{
1754        struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
1755        unsigned int seq;
1756        u64 total;
1757
1758        do {
1759                seq = __u64_stats_fetch_begin(&irqtime->sync);
1760                total = irqtime->softirq_time + irqtime->hardirq_time;
1761        } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
1762
1763        return total;
1764}
1765#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1766
1767#ifdef CONFIG_CPU_FREQ
1768DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
1769
1770/**
1771 * cpufreq_update_util - Take a note about CPU utilization changes.
1772 * @rq: Runqueue to carry out the update for.
1773 * @flags: Update reason flags.
1774 *
1775 * This function is called by the scheduler on the CPU whose utilization is
1776 * being updated.
1777 *
1778 * It can only be called from RCU-sched read-side critical sections.
1779 *
1780 * The way cpufreq is currently arranged requires it to evaluate the CPU
1781 * performance state (frequency/voltage) on a regular basis to prevent it from
1782 * being stuck in a completely inadequate performance level for too long.
1783 * That is not guaranteed to happen if the updates are only triggered from CFS,
1784 * though, because they may not be coming in if RT or deadline tasks are active
1785 * all the time (or there are RT and DL tasks only).
1786 *
1787 * As a workaround for that issue, this function is called by the RT and DL
1788 * sched classes to trigger extra cpufreq updates to prevent it from stalling,
1789 * but that really is a band-aid.  Going forward it should be replaced with
1790 * solutions targeted more specifically at RT and DL tasks.
1791 */
1792static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
1793{
1794        struct update_util_data *data;
1795
1796        data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
1797        if (data)
1798                data->func(data, rq_clock(rq), flags);
1799}
1800
1801static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
1802{
1803        if (cpu_of(rq) == smp_processor_id())
1804                cpufreq_update_util(rq, flags);
1805}
1806#else
1807static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
1808static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
1809#endif /* CONFIG_CPU_FREQ */
1810
1811#ifdef arch_scale_freq_capacity
1812#ifndef arch_scale_freq_invariant
1813#define arch_scale_freq_invariant()     (true)
1814#endif
1815#else /* arch_scale_freq_capacity */
1816#define arch_scale_freq_invariant()     (false)
1817#endif
1818