linux/kernel/sched/sched.h
<<
>>
Prefs
   1
   2#include <linux/sched.h>
   3#include <linux/sched/sysctl.h>
   4#include <linux/sched/rt.h>
   5#include <linux/mutex.h>
   6#include <linux/spinlock.h>
   7#include <linux/stop_machine.h>
   8#include <linux/tick.h>
   9
  10#include "cpupri.h"
  11#include "cpuacct.h"
  12
  13extern __read_mostly int scheduler_running;
  14
  15/*
  16 * Convert user-nice values [ -20 ... 0 ... 19 ]
  17 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  18 * and back.
  19 */
  20#define NICE_TO_PRIO(nice)      (MAX_RT_PRIO + (nice) + 20)
  21#define PRIO_TO_NICE(prio)      ((prio) - MAX_RT_PRIO - 20)
  22#define TASK_NICE(p)            PRIO_TO_NICE((p)->static_prio)
  23
  24/*
  25 * 'User priority' is the nice value converted to something we
  26 * can work with better when scaling various scheduler parameters,
  27 * it's a [ 0 ... 39 ] range.
  28 */
  29#define USER_PRIO(p)            ((p)-MAX_RT_PRIO)
  30#define TASK_USER_PRIO(p)       USER_PRIO((p)->static_prio)
  31#define MAX_USER_PRIO           (USER_PRIO(MAX_PRIO))
  32
  33/*
  34 * Helpers for converting nanosecond timing to jiffy resolution
  35 */
  36#define NS_TO_JIFFIES(TIME)     ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  37
  38/*
  39 * Increase resolution of nice-level calculations for 64-bit architectures.
  40 * The extra resolution improves shares distribution and load balancing of
  41 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
  42 * hierarchies, especially on larger systems. This is not a user-visible change
  43 * and does not change the user-interface for setting shares/weights.
  44 *
  45 * We increase resolution only if we have enough bits to allow this increased
  46 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
  47 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
  48 * increased costs.
  49 */
  50#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
  51# define SCHED_LOAD_RESOLUTION  10
  52# define scale_load(w)          ((w) << SCHED_LOAD_RESOLUTION)
  53# define scale_load_down(w)     ((w) >> SCHED_LOAD_RESOLUTION)
  54#else
  55# define SCHED_LOAD_RESOLUTION  0
  56# define scale_load(w)          (w)
  57# define scale_load_down(w)     (w)
  58#endif
  59
  60#define SCHED_LOAD_SHIFT        (10 + SCHED_LOAD_RESOLUTION)
  61#define SCHED_LOAD_SCALE        (1L << SCHED_LOAD_SHIFT)
  62
  63#define NICE_0_LOAD             SCHED_LOAD_SCALE
  64#define NICE_0_SHIFT            SCHED_LOAD_SHIFT
  65
  66/*
  67 * These are the 'tuning knobs' of the scheduler:
  68 */
  69
  70/*
  71 * single value that denotes runtime == period, ie unlimited time.
  72 */
  73#define RUNTIME_INF     ((u64)~0ULL)
  74
  75static inline int rt_policy(int policy)
  76{
  77        if (policy == SCHED_FIFO || policy == SCHED_RR)
  78                return 1;
  79        return 0;
  80}
  81
  82static inline int task_has_rt_policy(struct task_struct *p)
  83{
  84        return rt_policy(p->policy);
  85}
  86
  87/*
  88 * This is the priority-queue data structure of the RT scheduling class:
  89 */
  90struct rt_prio_array {
  91        DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
  92        struct list_head queue[MAX_RT_PRIO];
  93};
  94
  95struct rt_bandwidth {
  96        /* nests inside the rq lock: */
  97        raw_spinlock_t          rt_runtime_lock;
  98        ktime_t                 rt_period;
  99        u64                     rt_runtime;
 100        struct hrtimer          rt_period_timer;
 101};
 102
 103extern struct mutex sched_domains_mutex;
 104
 105#ifdef CONFIG_CGROUP_SCHED
 106
 107#include <linux/cgroup.h>
 108
 109struct cfs_rq;
 110struct rt_rq;
 111
 112extern struct list_head task_groups;
 113
 114struct cfs_bandwidth {
 115#ifdef CONFIG_CFS_BANDWIDTH
 116        raw_spinlock_t lock;
 117        ktime_t period;
 118        u64 quota, runtime;
 119        s64 hierarchal_quota;
 120        u64 runtime_expires;
 121
 122        int idle, timer_active;
 123        struct hrtimer period_timer, slack_timer;
 124        struct list_head throttled_cfs_rq;
 125
 126        /* statistics */
 127        int nr_periods, nr_throttled;
 128        u64 throttled_time;
 129#endif
 130};
 131
 132/* task group related information */
 133struct task_group {
 134        struct cgroup_subsys_state css;
 135
 136#ifdef CONFIG_FAIR_GROUP_SCHED
 137        /* schedulable entities of this group on each cpu */
 138        struct sched_entity **se;
 139        /* runqueue "owned" by this group on each cpu */
 140        struct cfs_rq **cfs_rq;
 141        unsigned long shares;
 142
 143        atomic_t load_weight;
 144        atomic64_t load_avg;
 145        atomic_t runnable_avg;
 146#endif
 147
 148#ifdef CONFIG_RT_GROUP_SCHED
 149        struct sched_rt_entity **rt_se;
 150        struct rt_rq **rt_rq;
 151
 152        struct rt_bandwidth rt_bandwidth;
 153#endif
 154
 155        struct rcu_head rcu;
 156        struct list_head list;
 157
 158        struct task_group *parent;
 159        struct list_head siblings;
 160        struct list_head children;
 161
 162#ifdef CONFIG_SCHED_AUTOGROUP
 163        struct autogroup *autogroup;
 164#endif
 165
 166        struct cfs_bandwidth cfs_bandwidth;
 167};
 168
 169#ifdef CONFIG_FAIR_GROUP_SCHED
 170#define ROOT_TASK_GROUP_LOAD    NICE_0_LOAD
 171
 172/*
 173 * A weight of 0 or 1 can cause arithmetics problems.
 174 * A weight of a cfs_rq is the sum of weights of which entities
 175 * are queued on this cfs_rq, so a weight of a entity should not be
 176 * too large, so as the shares value of a task group.
 177 * (The default weight is 1024 - so there's no practical
 178 *  limitation from this.)
 179 */
 180#define MIN_SHARES      (1UL <<  1)
 181#define MAX_SHARES      (1UL << 18)
 182#endif
 183
 184typedef int (*tg_visitor)(struct task_group *, void *);
 185
 186extern int walk_tg_tree_from(struct task_group *from,
 187                             tg_visitor down, tg_visitor up, void *data);
 188
 189/*
 190 * Iterate the full tree, calling @down when first entering a node and @up when
 191 * leaving it for the final time.
 192 *
 193 * Caller must hold rcu_lock or sufficient equivalent.
 194 */
 195static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
 196{
 197        return walk_tg_tree_from(&root_task_group, down, up, data);
 198}
 199
 200extern int tg_nop(struct task_group *tg, void *data);
 201
 202extern void free_fair_sched_group(struct task_group *tg);
 203extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
 204extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
 205extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 206                        struct sched_entity *se, int cpu,
 207                        struct sched_entity *parent);
 208extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 209extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 210
 211extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
 212extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 213extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
 214
 215extern void free_rt_sched_group(struct task_group *tg);
 216extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
 217extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 218                struct sched_rt_entity *rt_se, int cpu,
 219                struct sched_rt_entity *parent);
 220
 221extern struct task_group *sched_create_group(struct task_group *parent);
 222extern void sched_online_group(struct task_group *tg,
 223                               struct task_group *parent);
 224extern void sched_destroy_group(struct task_group *tg);
 225extern void sched_offline_group(struct task_group *tg);
 226
 227extern void sched_move_task(struct task_struct *tsk);
 228
 229#ifdef CONFIG_FAIR_GROUP_SCHED
 230extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 231#endif
 232
 233#else /* CONFIG_CGROUP_SCHED */
 234
 235struct cfs_bandwidth { };
 236
 237#endif  /* CONFIG_CGROUP_SCHED */
 238
 239/* CFS-related fields in a runqueue */
 240struct cfs_rq {
 241        struct load_weight load;
 242        unsigned int nr_running, h_nr_running;
 243
 244        u64 exec_clock;
 245        u64 min_vruntime;
 246#ifndef CONFIG_64BIT
 247        u64 min_vruntime_copy;
 248#endif
 249
 250        struct rb_root tasks_timeline;
 251        struct rb_node *rb_leftmost;
 252
 253        /*
 254         * 'curr' points to currently running entity on this cfs_rq.
 255         * It is set to NULL otherwise (i.e when none are currently running).
 256         */
 257        struct sched_entity *curr, *next, *last, *skip;
 258
 259#ifdef  CONFIG_SCHED_DEBUG
 260        unsigned int nr_spread_over;
 261#endif
 262
 263#ifdef CONFIG_SMP
 264/*
 265 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
 266 * removed when useful for applications beyond shares distribution (e.g.
 267 * load-balance).
 268 */
 269#ifdef CONFIG_FAIR_GROUP_SCHED
 270        /*
 271         * CFS Load tracking
 272         * Under CFS, load is tracked on a per-entity basis and aggregated up.
 273         * This allows for the description of both thread and group usage (in
 274         * the FAIR_GROUP_SCHED case).
 275         */
 276        u64 runnable_load_avg, blocked_load_avg;
 277        atomic64_t decay_counter, removed_load;
 278        u64 last_decay;
 279#endif /* CONFIG_FAIR_GROUP_SCHED */
 280/* These always depend on CONFIG_FAIR_GROUP_SCHED */
 281#ifdef CONFIG_FAIR_GROUP_SCHED
 282        u32 tg_runnable_contrib;
 283        u64 tg_load_contrib;
 284#endif /* CONFIG_FAIR_GROUP_SCHED */
 285
 286        /*
 287         *   h_load = weight * f(tg)
 288         *
 289         * Where f(tg) is the recursive weight fraction assigned to
 290         * this group.
 291         */
 292        unsigned long h_load;
 293#endif /* CONFIG_SMP */
 294
 295#ifdef CONFIG_FAIR_GROUP_SCHED
 296        struct rq *rq;  /* cpu runqueue to which this cfs_rq is attached */
 297
 298        /*
 299         * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
 300         * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
 301         * (like users, containers etc.)
 302         *
 303         * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
 304         * list is used during load balance.
 305         */
 306        int on_list;
 307        struct list_head leaf_cfs_rq_list;
 308        struct task_group *tg;  /* group that "owns" this runqueue */
 309
 310#ifdef CONFIG_CFS_BANDWIDTH
 311        int runtime_enabled;
 312        u64 runtime_expires;
 313        s64 runtime_remaining;
 314
 315        u64 throttled_clock, throttled_clock_task;
 316        u64 throttled_clock_task_time;
 317        int throttled, throttle_count;
 318        struct list_head throttled_list;
 319#endif /* CONFIG_CFS_BANDWIDTH */
 320#endif /* CONFIG_FAIR_GROUP_SCHED */
 321};
 322
 323static inline int rt_bandwidth_enabled(void)
 324{
 325        return sysctl_sched_rt_runtime >= 0;
 326}
 327
 328/* Real-Time classes' related field in a runqueue: */
 329struct rt_rq {
 330        struct rt_prio_array active;
 331        unsigned int rt_nr_running;
 332#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 333        struct {
 334                int curr; /* highest queued rt task prio */
 335#ifdef CONFIG_SMP
 336                int next; /* next highest */
 337#endif
 338        } highest_prio;
 339#endif
 340#ifdef CONFIG_SMP
 341        unsigned long rt_nr_migratory;
 342        unsigned long rt_nr_total;
 343        int overloaded;
 344        struct plist_head pushable_tasks;
 345#endif
 346        int rt_throttled;
 347        u64 rt_time;
 348        u64 rt_runtime;
 349        /* Nests inside the rq lock: */
 350        raw_spinlock_t rt_runtime_lock;
 351
 352#ifdef CONFIG_RT_GROUP_SCHED
 353        unsigned long rt_nr_boosted;
 354
 355        struct rq *rq;
 356        struct list_head leaf_rt_rq_list;
 357        struct task_group *tg;
 358#endif
 359};
 360
 361#ifdef CONFIG_SMP
 362
 363/*
 364 * We add the notion of a root-domain which will be used to define per-domain
 365 * variables. Each exclusive cpuset essentially defines an island domain by
 366 * fully partitioning the member cpus from any other cpuset. Whenever a new
 367 * exclusive cpuset is created, we also create and attach a new root-domain
 368 * object.
 369 *
 370 */
 371struct root_domain {
 372        atomic_t refcount;
 373        atomic_t rto_count;
 374        struct rcu_head rcu;
 375        cpumask_var_t span;
 376        cpumask_var_t online;
 377
 378        /*
 379         * The "RT overload" flag: it gets set if a CPU has more than
 380         * one runnable RT task.
 381         */
 382        cpumask_var_t rto_mask;
 383        struct cpupri cpupri;
 384};
 385
 386extern struct root_domain def_root_domain;
 387
 388#endif /* CONFIG_SMP */
 389
 390/*
 391 * This is the main, per-CPU runqueue data structure.
 392 *
 393 * Locking rule: those places that want to lock multiple runqueues
 394 * (such as the load balancing or the thread migration code), lock
 395 * acquire operations must be ordered by ascending &runqueue.
 396 */
 397struct rq {
 398        /* runqueue lock: */
 399        raw_spinlock_t lock;
 400
 401        /*
 402         * nr_running and cpu_load should be in the same cacheline because
 403         * remote CPUs use both these fields when doing load calculation.
 404         */
 405        unsigned int nr_running;
 406        #define CPU_LOAD_IDX_MAX 5
 407        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 408        unsigned long last_load_update_tick;
 409#ifdef CONFIG_NO_HZ_COMMON
 410        u64 nohz_stamp;
 411        unsigned long nohz_flags;
 412#endif
 413#ifdef CONFIG_NO_HZ_FULL
 414        unsigned long last_sched_tick;
 415#endif
 416        int skip_clock_update;
 417
 418        /* capture load from *all* tasks on this cpu: */
 419        struct load_weight load;
 420        unsigned long nr_load_updates;
 421        u64 nr_switches;
 422
 423        struct cfs_rq cfs;
 424        struct rt_rq rt;
 425
 426#ifdef CONFIG_FAIR_GROUP_SCHED
 427        /* list of leaf cfs_rq on this cpu: */
 428        struct list_head leaf_cfs_rq_list;
 429#ifdef CONFIG_SMP
 430        unsigned long h_load_throttle;
 431#endif /* CONFIG_SMP */
 432#endif /* CONFIG_FAIR_GROUP_SCHED */
 433
 434#ifdef CONFIG_RT_GROUP_SCHED
 435        struct list_head leaf_rt_rq_list;
 436#endif
 437
 438        /*
 439         * This is part of a global counter where only the total sum
 440         * over all CPUs matters. A task can increase this counter on
 441         * one CPU and if it got migrated afterwards it may decrease
 442         * it on another CPU. Always updated under the runqueue lock:
 443         */
 444        unsigned long nr_uninterruptible;
 445
 446        struct task_struct *curr, *idle, *stop;
 447        unsigned long next_balance;
 448        struct mm_struct *prev_mm;
 449
 450        u64 clock;
 451        u64 clock_task;
 452
 453        atomic_t nr_iowait;
 454
 455#ifdef CONFIG_SMP
 456        struct root_domain *rd;
 457        struct sched_domain *sd;
 458
 459        unsigned long cpu_power;
 460
 461        unsigned char idle_balance;
 462        /* For active balancing */
 463        int post_schedule;
 464        int active_balance;
 465        int push_cpu;
 466        struct cpu_stop_work active_balance_work;
 467        /* cpu of this runqueue: */
 468        int cpu;
 469        int online;
 470
 471        struct list_head cfs_tasks;
 472
 473        u64 rt_avg;
 474        u64 age_stamp;
 475        u64 idle_stamp;
 476        u64 avg_idle;
 477#endif
 478
 479#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 480        u64 prev_irq_time;
 481#endif
 482#ifdef CONFIG_PARAVIRT
 483        u64 prev_steal_time;
 484#endif
 485#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 486        u64 prev_steal_time_rq;
 487#endif
 488
 489        /* calc_load related fields */
 490        unsigned long calc_load_update;
 491        long calc_load_active;
 492
 493#ifdef CONFIG_SCHED_HRTICK
 494#ifdef CONFIG_SMP
 495        int hrtick_csd_pending;
 496        struct call_single_data hrtick_csd;
 497#endif
 498        struct hrtimer hrtick_timer;
 499#endif
 500
 501#ifdef CONFIG_SCHEDSTATS
 502        /* latency stats */
 503        struct sched_info rq_sched_info;
 504        unsigned long long rq_cpu_time;
 505        /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 506
 507        /* sys_sched_yield() stats */
 508        unsigned int yld_count;
 509
 510        /* schedule() stats */
 511        unsigned int sched_count;
 512        unsigned int sched_goidle;
 513
 514        /* try_to_wake_up() stats */
 515        unsigned int ttwu_count;
 516        unsigned int ttwu_local;
 517#endif
 518
 519#ifdef CONFIG_SMP
 520        struct llist_head wake_list;
 521#endif
 522
 523        struct sched_avg avg;
 524};
 525
 526static inline int cpu_of(struct rq *rq)
 527{
 528#ifdef CONFIG_SMP
 529        return rq->cpu;
 530#else
 531        return 0;
 532#endif
 533}
 534
 535DECLARE_PER_CPU(struct rq, runqueues);
 536
 537#define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
 538#define this_rq()               (&__get_cpu_var(runqueues))
 539#define task_rq(p)              cpu_rq(task_cpu(p))
 540#define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
 541#define raw_rq()                (&__raw_get_cpu_var(runqueues))
 542
 543#ifdef CONFIG_SMP
 544
 545#define rcu_dereference_check_sched_domain(p) \
 546        rcu_dereference_check((p), \
 547                              lockdep_is_held(&sched_domains_mutex))
 548
 549/*
 550 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
 551 * See detach_destroy_domains: synchronize_sched for details.
 552 *
 553 * The domain tree of any CPU may only be accessed from within
 554 * preempt-disabled sections.
 555 */
 556#define for_each_domain(cpu, __sd) \
 557        for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
 558                        __sd; __sd = __sd->parent)
 559
 560#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
 561
 562/**
 563 * highest_flag_domain - Return highest sched_domain containing flag.
 564 * @cpu:        The cpu whose highest level of sched domain is to
 565 *              be returned.
 566 * @flag:       The flag to check for the highest sched_domain
 567 *              for the given cpu.
 568 *
 569 * Returns the highest sched_domain of a cpu which contains the given flag.
 570 */
 571static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
 572{
 573        struct sched_domain *sd, *hsd = NULL;
 574
 575        for_each_domain(cpu, sd) {
 576                if (!(sd->flags & flag))
 577                        break;
 578                hsd = sd;
 579        }
 580
 581        return hsd;
 582}
 583
 584DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 585DECLARE_PER_CPU(int, sd_llc_id);
 586
 587struct sched_group_power {
 588        atomic_t ref;
 589        /*
 590         * CPU power of this group, SCHED_LOAD_SCALE being max power for a
 591         * single CPU.
 592         */
 593        unsigned int power, power_orig;
 594        unsigned long next_update;
 595        /*
 596         * Number of busy cpus in this group.
 597         */
 598        atomic_t nr_busy_cpus;
 599
 600        unsigned long cpumask[0]; /* iteration mask */
 601};
 602
 603struct sched_group {
 604        struct sched_group *next;       /* Must be a circular list */
 605        atomic_t ref;
 606
 607        unsigned int group_weight;
 608        struct sched_group_power *sgp;
 609
 610        /*
 611         * The CPUs this group covers.
 612         *
 613         * NOTE: this field is variable length. (Allocated dynamically
 614         * by attaching extra space to the end of the structure,
 615         * depending on how many CPUs the kernel has booted up with)
 616         */
 617        unsigned long cpumask[0];
 618};
 619
 620static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
 621{
 622        return to_cpumask(sg->cpumask);
 623}
 624
 625/*
 626 * cpumask masking which cpus in the group are allowed to iterate up the domain
 627 * tree.
 628 */
 629static inline struct cpumask *sched_group_mask(struct sched_group *sg)
 630{
 631        return to_cpumask(sg->sgp->cpumask);
 632}
 633
 634/**
 635 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
 636 * @group: The group whose first cpu is to be returned.
 637 */
 638static inline unsigned int group_first_cpu(struct sched_group *group)
 639{
 640        return cpumask_first(sched_group_cpus(group));
 641}
 642
 643extern int group_balance_cpu(struct sched_group *sg);
 644
 645#endif /* CONFIG_SMP */
 646
 647#include "stats.h"
 648#include "auto_group.h"
 649
 650#ifdef CONFIG_CGROUP_SCHED
 651
 652/*
 653 * Return the group to which this tasks belongs.
 654 *
 655 * We cannot use task_subsys_state() and friends because the cgroup
 656 * subsystem changes that value before the cgroup_subsys::attach() method
 657 * is called, therefore we cannot pin it and might observe the wrong value.
 658 *
 659 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
 660 * core changes this before calling sched_move_task().
 661 *
 662 * Instead we use a 'copy' which is updated from sched_move_task() while
 663 * holding both task_struct::pi_lock and rq::lock.
 664 */
 665static inline struct task_group *task_group(struct task_struct *p)
 666{
 667        return p->sched_task_group;
 668}
 669
 670/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 671static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 672{
 673#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
 674        struct task_group *tg = task_group(p);
 675#endif
 676
 677#ifdef CONFIG_FAIR_GROUP_SCHED
 678        p->se.cfs_rq = tg->cfs_rq[cpu];
 679        p->se.parent = tg->se[cpu];
 680#endif
 681
 682#ifdef CONFIG_RT_GROUP_SCHED
 683        p->rt.rt_rq  = tg->rt_rq[cpu];
 684        p->rt.parent = tg->rt_se[cpu];
 685#endif
 686}
 687
 688#else /* CONFIG_CGROUP_SCHED */
 689
 690static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
 691static inline struct task_group *task_group(struct task_struct *p)
 692{
 693        return NULL;
 694}
 695
 696#endif /* CONFIG_CGROUP_SCHED */
 697
 698static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
 699{
 700        set_task_rq(p, cpu);
 701#ifdef CONFIG_SMP
 702        /*
 703         * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
 704         * successfuly executed on another CPU. We must ensure that updates of
 705         * per-task data have been completed by this moment.
 706         */
 707        smp_wmb();
 708        task_thread_info(p)->cpu = cpu;
 709#endif
 710}
 711
 712/*
 713 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
 714 */
 715#ifdef CONFIG_SCHED_DEBUG
 716# include <linux/static_key.h>
 717# define const_debug __read_mostly
 718#else
 719# define const_debug const
 720#endif
 721
 722extern const_debug unsigned int sysctl_sched_features;
 723
 724#define SCHED_FEAT(name, enabled)       \
 725        __SCHED_FEAT_##name ,
 726
 727enum {
 728#include "features.h"
 729        __SCHED_FEAT_NR,
 730};
 731
 732#undef SCHED_FEAT
 733
 734#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
 735static __always_inline bool static_branch__true(struct static_key *key)
 736{
 737        return static_key_true(key); /* Not out of line branch. */
 738}
 739
 740static __always_inline bool static_branch__false(struct static_key *key)
 741{
 742        return static_key_false(key); /* Out of line branch. */
 743}
 744
 745#define SCHED_FEAT(name, enabled)                                       \
 746static __always_inline bool static_branch_##name(struct static_key *key) \
 747{                                                                       \
 748        return static_branch__##enabled(key);                           \
 749}
 750
 751#include "features.h"
 752
 753#undef SCHED_FEAT
 754
 755extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
 756#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
 757#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
 758#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
 759#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
 760
 761#ifdef CONFIG_NUMA_BALANCING
 762#define sched_feat_numa(x) sched_feat(x)
 763#ifdef CONFIG_SCHED_DEBUG
 764#define numabalancing_enabled sched_feat_numa(NUMA)
 765#else
 766extern bool numabalancing_enabled;
 767#endif /* CONFIG_SCHED_DEBUG */
 768#else
 769#define sched_feat_numa(x) (0)
 770#define numabalancing_enabled (0)
 771#endif /* CONFIG_NUMA_BALANCING */
 772
 773static inline u64 global_rt_period(void)
 774{
 775        return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
 776}
 777
 778static inline u64 global_rt_runtime(void)
 779{
 780        if (sysctl_sched_rt_runtime < 0)
 781                return RUNTIME_INF;
 782
 783        return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
 784}
 785
 786
 787
 788static inline int task_current(struct rq *rq, struct task_struct *p)
 789{
 790        return rq->curr == p;
 791}
 792
 793static inline int task_running(struct rq *rq, struct task_struct *p)
 794{
 795#ifdef CONFIG_SMP
 796        return p->on_cpu;
 797#else
 798        return task_current(rq, p);
 799#endif
 800}
 801
 802
 803#ifndef prepare_arch_switch
 804# define prepare_arch_switch(next)      do { } while (0)
 805#endif
 806#ifndef finish_arch_switch
 807# define finish_arch_switch(prev)       do { } while (0)
 808#endif
 809#ifndef finish_arch_post_lock_switch
 810# define finish_arch_post_lock_switch() do { } while (0)
 811#endif
 812
 813#ifndef __ARCH_WANT_UNLOCKED_CTXSW
 814static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 815{
 816#ifdef CONFIG_SMP
 817        /*
 818         * We can optimise this out completely for !SMP, because the
 819         * SMP rebalancing from interrupt is the only thing that cares
 820         * here.
 821         */
 822        next->on_cpu = 1;
 823#endif
 824}
 825
 826static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 827{
 828#ifdef CONFIG_SMP
 829        /*
 830         * After ->on_cpu is cleared, the task can be moved to a different CPU.
 831         * We must ensure this doesn't happen until the switch is completely
 832         * finished.
 833         */
 834        smp_wmb();
 835        prev->on_cpu = 0;
 836#endif
 837#ifdef CONFIG_DEBUG_SPINLOCK
 838        /* this is a valid case when another task releases the spinlock */
 839        rq->lock.owner = current;
 840#endif
 841        /*
 842         * If we are tracking spinlock dependencies then we have to
 843         * fix up the runqueue lock - which gets 'carried over' from
 844         * prev into current:
 845         */
 846        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 847
 848        raw_spin_unlock_irq(&rq->lock);
 849}
 850
 851#else /* __ARCH_WANT_UNLOCKED_CTXSW */
 852static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 853{
 854#ifdef CONFIG_SMP
 855        /*
 856         * We can optimise this out completely for !SMP, because the
 857         * SMP rebalancing from interrupt is the only thing that cares
 858         * here.
 859         */
 860        next->on_cpu = 1;
 861#endif
 862        raw_spin_unlock(&rq->lock);
 863}
 864
 865static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 866{
 867#ifdef CONFIG_SMP
 868        /*
 869         * After ->on_cpu is cleared, the task can be moved to a different CPU.
 870         * We must ensure this doesn't happen until the switch is completely
 871         * finished.
 872         */
 873        smp_wmb();
 874        prev->on_cpu = 0;
 875#endif
 876        local_irq_enable();
 877}
 878#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 879
 880/*
 881 * wake flags
 882 */
 883#define WF_SYNC         0x01            /* waker goes to sleep after wakeup */
 884#define WF_FORK         0x02            /* child wakeup after fork */
 885#define WF_MIGRATED     0x4             /* internal use, task got migrated */
 886
 887static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 888{
 889        lw->weight += inc;
 890        lw->inv_weight = 0;
 891}
 892
 893static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
 894{
 895        lw->weight -= dec;
 896        lw->inv_weight = 0;
 897}
 898
 899static inline void update_load_set(struct load_weight *lw, unsigned long w)
 900{
 901        lw->weight = w;
 902        lw->inv_weight = 0;
 903}
 904
 905/*
 906 * To aid in avoiding the subversion of "niceness" due to uneven distribution
 907 * of tasks with abnormal "nice" values across CPUs the contribution that
 908 * each task makes to its run queue's load is weighted according to its
 909 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
 910 * scaled version of the new time slice allocation that they receive on time
 911 * slice expiry etc.
 912 */
 913
 914#define WEIGHT_IDLEPRIO                3
 915#define WMULT_IDLEPRIO         1431655765
 916
 917/*
 918 * Nice levels are multiplicative, with a gentle 10% change for every
 919 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
 920 * nice 1, it will get ~10% less CPU time than another CPU-bound task
 921 * that remained on nice 0.
 922 *
 923 * The "10% effect" is relative and cumulative: from _any_ nice level,
 924 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
 925 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
 926 * If a task goes up by ~10% and another task goes down by ~10% then
 927 * the relative distance between them is ~25%.)
 928 */
 929static const int prio_to_weight[40] = {
 930 /* -20 */     88761,     71755,     56483,     46273,     36291,
 931 /* -15 */     29154,     23254,     18705,     14949,     11916,
 932 /* -10 */      9548,      7620,      6100,      4904,      3906,
 933 /*  -5 */      3121,      2501,      1991,      1586,      1277,
 934 /*   0 */      1024,       820,       655,       526,       423,
 935 /*   5 */       335,       272,       215,       172,       137,
 936 /*  10 */       110,        87,        70,        56,        45,
 937 /*  15 */        36,        29,        23,        18,        15,
 938};
 939
 940/*
 941 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
 942 *
 943 * In cases where the weight does not change often, we can use the
 944 * precalculated inverse to speed up arithmetics by turning divisions
 945 * into multiplications:
 946 */
 947static const u32 prio_to_wmult[40] = {
 948 /* -20 */     48388,     59856,     76040,     92818,    118348,
 949 /* -15 */    147320,    184698,    229616,    287308,    360437,
 950 /* -10 */    449829,    563644,    704093,    875809,   1099582,
 951 /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
 952 /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
 953 /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
 954 /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
 955 /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
 956};
 957
 958#define ENQUEUE_WAKEUP          1
 959#define ENQUEUE_HEAD            2
 960#ifdef CONFIG_SMP
 961#define ENQUEUE_WAKING          4       /* sched_class::task_waking was called */
 962#else
 963#define ENQUEUE_WAKING          0
 964#endif
 965
 966#define DEQUEUE_SLEEP           1
 967
 968struct sched_class {
 969        const struct sched_class *next;
 970
 971        void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
 972        void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
 973        void (*yield_task) (struct rq *rq);
 974        bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
 975
 976        void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
 977
 978        struct task_struct * (*pick_next_task) (struct rq *rq);
 979        void (*put_prev_task) (struct rq *rq, struct task_struct *p);
 980
 981#ifdef CONFIG_SMP
 982        int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
 983        void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
 984
 985        void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
 986        void (*post_schedule) (struct rq *this_rq);
 987        void (*task_waking) (struct task_struct *task);
 988        void (*task_woken) (struct rq *this_rq, struct task_struct *task);
 989
 990        void (*set_cpus_allowed)(struct task_struct *p,
 991                                 const struct cpumask *newmask);
 992
 993        void (*rq_online)(struct rq *rq);
 994        void (*rq_offline)(struct rq *rq);
 995#endif
 996
 997        void (*set_curr_task) (struct rq *rq);
 998        void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
 999        void (*task_fork) (struct task_struct *p);
1000
1001        void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1002        void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1003        void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1004                             int oldprio);
1005
1006        unsigned int (*get_rr_interval) (struct rq *rq,
1007                                         struct task_struct *task);
1008
1009#ifdef CONFIG_FAIR_GROUP_SCHED
1010        void (*task_move_group) (struct task_struct *p, int on_rq);
1011#endif
1012};
1013
1014#define sched_class_highest (&stop_sched_class)
1015#define for_each_class(class) \
1016   for (class = sched_class_highest; class; class = class->next)
1017
1018extern const struct sched_class stop_sched_class;
1019extern const struct sched_class rt_sched_class;
1020extern const struct sched_class fair_sched_class;
1021extern const struct sched_class idle_sched_class;
1022
1023
1024#ifdef CONFIG_SMP
1025
1026extern void update_group_power(struct sched_domain *sd, int cpu);
1027
1028extern void trigger_load_balance(struct rq *rq, int cpu);
1029extern void idle_balance(int this_cpu, struct rq *this_rq);
1030
1031/*
1032 * Only depends on SMP, FAIR_GROUP_SCHED may be removed when runnable_avg
1033 * becomes useful in lb
1034 */
1035#if defined(CONFIG_FAIR_GROUP_SCHED)
1036extern void idle_enter_fair(struct rq *this_rq);
1037extern void idle_exit_fair(struct rq *this_rq);
1038#else
1039static inline void idle_enter_fair(struct rq *this_rq) {}
1040static inline void idle_exit_fair(struct rq *this_rq) {}
1041#endif
1042
1043#else   /* CONFIG_SMP */
1044
1045static inline void idle_balance(int cpu, struct rq *rq)
1046{
1047}
1048
1049#endif
1050
1051extern void sysrq_sched_debug_show(void);
1052extern void sched_init_granularity(void);
1053extern void update_max_interval(void);
1054extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
1055extern void init_sched_rt_class(void);
1056extern void init_sched_fair_class(void);
1057
1058extern void resched_task(struct task_struct *p);
1059extern void resched_cpu(int cpu);
1060
1061extern struct rt_bandwidth def_rt_bandwidth;
1062extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1063
1064extern void update_idle_cpu_load(struct rq *this_rq);
1065
1066#ifdef CONFIG_PARAVIRT
1067static inline u64 steal_ticks(u64 steal)
1068{
1069        if (unlikely(steal > NSEC_PER_SEC))
1070                return div_u64(steal, TICK_NSEC);
1071
1072        return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
1073}
1074#endif
1075
1076static inline void inc_nr_running(struct rq *rq)
1077{
1078        rq->nr_running++;
1079
1080#ifdef CONFIG_NO_HZ_FULL
1081        if (rq->nr_running == 2) {
1082                if (tick_nohz_full_cpu(rq->cpu)) {
1083                        /* Order rq->nr_running write against the IPI */
1084                        smp_wmb();
1085                        smp_send_reschedule(rq->cpu);
1086                }
1087       }
1088#endif
1089}
1090
1091static inline void dec_nr_running(struct rq *rq)
1092{
1093        rq->nr_running--;
1094}
1095
1096static inline void rq_last_tick_reset(struct rq *rq)
1097{
1098#ifdef CONFIG_NO_HZ_FULL
1099        rq->last_sched_tick = jiffies;
1100#endif
1101}
1102
1103extern void update_rq_clock(struct rq *rq);
1104
1105extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1106extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1107
1108extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1109
1110extern const_debug unsigned int sysctl_sched_time_avg;
1111extern const_debug unsigned int sysctl_sched_nr_migrate;
1112extern const_debug unsigned int sysctl_sched_migration_cost;
1113
1114static inline u64 sched_avg_period(void)
1115{
1116        return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1117}
1118
1119#ifdef CONFIG_SCHED_HRTICK
1120
1121/*
1122 * Use hrtick when:
1123 *  - enabled by features
1124 *  - hrtimer is actually high res
1125 */
1126static inline int hrtick_enabled(struct rq *rq)
1127{
1128        if (!sched_feat(HRTICK))
1129                return 0;
1130        if (!cpu_active(cpu_of(rq)))
1131                return 0;
1132        return hrtimer_is_hres_active(&rq->hrtick_timer);
1133}
1134
1135void hrtick_start(struct rq *rq, u64 delay);
1136
1137#else
1138
1139static inline int hrtick_enabled(struct rq *rq)
1140{
1141        return 0;
1142}
1143
1144#endif /* CONFIG_SCHED_HRTICK */
1145
1146#ifdef CONFIG_SMP
1147extern void sched_avg_update(struct rq *rq);
1148static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1149{
1150        rq->rt_avg += rt_delta;
1151        sched_avg_update(rq);
1152}
1153#else
1154static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1155static inline void sched_avg_update(struct rq *rq) { }
1156#endif
1157
1158extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
1159
1160#ifdef CONFIG_SMP
1161#ifdef CONFIG_PREEMPT
1162
1163static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1164
1165/*
1166 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1167 * way at the expense of forcing extra atomic operations in all
1168 * invocations.  This assures that the double_lock is acquired using the
1169 * same underlying policy as the spinlock_t on this architecture, which
1170 * reduces latency compared to the unfair variant below.  However, it
1171 * also adds more overhead and therefore may reduce throughput.
1172 */
1173static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1174        __releases(this_rq->lock)
1175        __acquires(busiest->lock)
1176        __acquires(this_rq->lock)
1177{
1178        raw_spin_unlock(&this_rq->lock);
1179        double_rq_lock(this_rq, busiest);
1180
1181        return 1;
1182}
1183
1184#else
1185/*
1186 * Unfair double_lock_balance: Optimizes throughput at the expense of
1187 * latency by eliminating extra atomic operations when the locks are
1188 * already in proper order on entry.  This favors lower cpu-ids and will
1189 * grant the double lock to lower cpus over higher ids under contention,
1190 * regardless of entry order into the function.
1191 */
1192static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1193        __releases(this_rq->lock)
1194        __acquires(busiest->lock)
1195        __acquires(this_rq->lock)
1196{
1197        int ret = 0;
1198
1199        if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1200                if (busiest < this_rq) {
1201                        raw_spin_unlock(&this_rq->lock);
1202                        raw_spin_lock(&busiest->lock);
1203                        raw_spin_lock_nested(&this_rq->lock,
1204                                              SINGLE_DEPTH_NESTING);
1205                        ret = 1;
1206                } else
1207                        raw_spin_lock_nested(&busiest->lock,
1208                                              SINGLE_DEPTH_NESTING);
1209        }
1210        return ret;
1211}
1212
1213#endif /* CONFIG_PREEMPT */
1214
1215/*
1216 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1217 */
1218static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1219{
1220        if (unlikely(!irqs_disabled())) {
1221                /* printk() doesn't work good under rq->lock */
1222                raw_spin_unlock(&this_rq->lock);
1223                BUG_ON(1);
1224        }
1225
1226        return _double_lock_balance(this_rq, busiest);
1227}
1228
1229static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1230        __releases(busiest->lock)
1231{
1232        raw_spin_unlock(&busiest->lock);
1233        lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1234}
1235
1236/*
1237 * double_rq_lock - safely lock two runqueues
1238 *
1239 * Note this does not disable interrupts like task_rq_lock,
1240 * you need to do so manually before calling.
1241 */
1242static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1243        __acquires(rq1->lock)
1244        __acquires(rq2->lock)
1245{
1246        BUG_ON(!irqs_disabled());
1247        if (rq1 == rq2) {
1248                raw_spin_lock(&rq1->lock);
1249                __acquire(rq2->lock);   /* Fake it out ;) */
1250        } else {
1251                if (rq1 < rq2) {
1252                        raw_spin_lock(&rq1->lock);
1253                        raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1254                } else {
1255                        raw_spin_lock(&rq2->lock);
1256                        raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1257                }
1258        }
1259}
1260
1261/*
1262 * double_rq_unlock - safely unlock two runqueues
1263 *
1264 * Note this does not restore interrupts like task_rq_unlock,
1265 * you need to do so manually after calling.
1266 */
1267static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1268        __releases(rq1->lock)
1269        __releases(rq2->lock)
1270{
1271        raw_spin_unlock(&rq1->lock);
1272        if (rq1 != rq2)
1273                raw_spin_unlock(&rq2->lock);
1274        else
1275                __release(rq2->lock);
1276}
1277
1278#else /* CONFIG_SMP */
1279
1280/*
1281 * double_rq_lock - safely lock two runqueues
1282 *
1283 * Note this does not disable interrupts like task_rq_lock,
1284 * you need to do so manually before calling.
1285 */
1286static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1287        __acquires(rq1->lock)
1288        __acquires(rq2->lock)
1289{
1290        BUG_ON(!irqs_disabled());
1291        BUG_ON(rq1 != rq2);
1292        raw_spin_lock(&rq1->lock);
1293        __acquire(rq2->lock);   /* Fake it out ;) */
1294}
1295
1296/*
1297 * double_rq_unlock - safely unlock two runqueues
1298 *
1299 * Note this does not restore interrupts like task_rq_unlock,
1300 * you need to do so manually after calling.
1301 */
1302static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1303        __releases(rq1->lock)
1304        __releases(rq2->lock)
1305{
1306        BUG_ON(rq1 != rq2);
1307        raw_spin_unlock(&rq1->lock);
1308        __release(rq2->lock);
1309}
1310
1311#endif
1312
1313extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1314extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1315extern void print_cfs_stats(struct seq_file *m, int cpu);
1316extern void print_rt_stats(struct seq_file *m, int cpu);
1317
1318extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1319extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1320
1321extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1322
1323#ifdef CONFIG_NO_HZ_COMMON
1324enum rq_nohz_flag_bits {
1325        NOHZ_TICK_STOPPED,
1326        NOHZ_BALANCE_KICK,
1327};
1328
1329#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1330#endif
1331
1332#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1333
1334DECLARE_PER_CPU(u64, cpu_hardirq_time);
1335DECLARE_PER_CPU(u64, cpu_softirq_time);
1336
1337#ifndef CONFIG_64BIT
1338DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1339
1340static inline void irq_time_write_begin(void)
1341{
1342        __this_cpu_inc(irq_time_seq.sequence);
1343        smp_wmb();
1344}
1345
1346static inline void irq_time_write_end(void)
1347{
1348        smp_wmb();
1349        __this_cpu_inc(irq_time_seq.sequence);
1350}
1351
1352static inline u64 irq_time_read(int cpu)
1353{
1354        u64 irq_time;
1355        unsigned seq;
1356
1357        do {
1358                seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1359                irq_time = per_cpu(cpu_softirq_time, cpu) +
1360                           per_cpu(cpu_hardirq_time, cpu);
1361        } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1362
1363        return irq_time;
1364}
1365#else /* CONFIG_64BIT */
1366static inline void irq_time_write_begin(void)
1367{
1368}
1369
1370static inline void irq_time_write_end(void)
1371{
1372}
1373
1374static inline u64 irq_time_read(int cpu)
1375{
1376        return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1377}
1378#endif /* CONFIG_64BIT */
1379#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1380