linux/kernel/sched/core.c
<<
>>
Prefs
   1/*
   2 *  kernel/sched/core.c
   3 *
   4 *  Kernel scheduler and related syscalls
   5 *
   6 *  Copyright (C) 1991-2002  Linus Torvalds
   7 *
   8 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
   9 *              make semaphores SMP safe
  10 *  1998-11-19  Implemented schedule_timeout() and related stuff
  11 *              by Andrea Arcangeli
  12 *  2002-01-04  New ultra-scalable O(1) scheduler by Ingo Molnar:
  13 *              hybrid priority-list and round-robin design with
  14 *              an array-switch method of distributing timeslices
  15 *              and per-CPU runqueues.  Cleanups and useful suggestions
  16 *              by Davide Libenzi, preemptible kernel bits by Robert Love.
  17 *  2003-09-03  Interactivity tuning by Con Kolivas.
  18 *  2004-04-02  Scheduler domains code by Nick Piggin
  19 *  2007-04-15  Work begun on replacing all interactivity tuning with a
  20 *              fair scheduling design by Con Kolivas.
  21 *  2007-05-05  Load balancing (smp-nice) and other improvements
  22 *              by Peter Williams
  23 *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
  24 *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
  25 *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
  26 *              Thomas Gleixner, Mike Kravetz
  27 */
  28
  29#include <linux/kasan.h>
  30#include <linux/mm.h>
  31#include <linux/module.h>
  32#include <linux/nmi.h>
  33#include <linux/init.h>
  34#include <linux/uaccess.h>
  35#include <linux/highmem.h>
  36#include <asm/mmu_context.h>
  37#include <linux/interrupt.h>
  38#include <linux/capability.h>
  39#include <linux/completion.h>
  40#include <linux/kernel_stat.h>
  41#include <linux/debug_locks.h>
  42#include <linux/perf_event.h>
  43#include <linux/security.h>
  44#include <linux/notifier.h>
  45#include <linux/profile.h>
  46#include <linux/freezer.h>
  47#include <linux/vmalloc.h>
  48#include <linux/blkdev.h>
  49#include <linux/delay.h>
  50#include <linux/pid_namespace.h>
  51#include <linux/smp.h>
  52#include <linux/threads.h>
  53#include <linux/timer.h>
  54#include <linux/rcupdate.h>
  55#include <linux/cpu.h>
  56#include <linux/cpuset.h>
  57#include <linux/percpu.h>
  58#include <linux/proc_fs.h>
  59#include <linux/seq_file.h>
  60#include <linux/sysctl.h>
  61#include <linux/syscalls.h>
  62#include <linux/times.h>
  63#include <linux/tsacct_kern.h>
  64#include <linux/kprobes.h>
  65#include <linux/delayacct.h>
  66#include <linux/unistd.h>
  67#include <linux/pagemap.h>
  68#include <linux/hrtimer.h>
  69#include <linux/tick.h>
  70#include <linux/ctype.h>
  71#include <linux/ftrace.h>
  72#include <linux/slab.h>
  73#include <linux/init_task.h>
  74#include <linux/context_tracking.h>
  75#include <linux/compiler.h>
  76#include <linux/frame.h>
  77
  78#include <asm/switch_to.h>
  79#include <asm/tlb.h>
  80#include <asm/irq_regs.h>
  81#include <asm/mutex.h>
  82#ifdef CONFIG_PARAVIRT
  83#include <asm/paravirt.h>
  84#endif
  85
  86#include "sched.h"
  87#include "../workqueue_internal.h"
  88#include "../smpboot.h"
  89
  90#define CREATE_TRACE_POINTS
  91#include <trace/events/sched.h>
  92
  93DEFINE_MUTEX(sched_domains_mutex);
  94DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  95
  96static void update_rq_clock_task(struct rq *rq, s64 delta);
  97
  98void update_rq_clock(struct rq *rq)
  99{
 100        s64 delta;
 101
 102        lockdep_assert_held(&rq->lock);
 103
 104        if (rq->clock_skip_update & RQCF_ACT_SKIP)
 105                return;
 106
 107        delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
 108        if (delta < 0)
 109                return;
 110        rq->clock += delta;
 111        update_rq_clock_task(rq, delta);
 112}
 113
 114/*
 115 * Debugging: various feature bits
 116 */
 117
 118#define SCHED_FEAT(name, enabled)       \
 119        (1UL << __SCHED_FEAT_##name) * enabled |
 120
 121const_debug unsigned int sysctl_sched_features =
 122#include "features.h"
 123        0;
 124
 125#undef SCHED_FEAT
 126
 127/*
 128 * Number of tasks to iterate in a single balance run.
 129 * Limited because this is done with IRQs disabled.
 130 */
 131const_debug unsigned int sysctl_sched_nr_migrate = 32;
 132
 133/*
 134 * period over which we average the RT time consumption, measured
 135 * in ms.
 136 *
 137 * default: 1s
 138 */
 139const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
 140
 141/*
 142 * period over which we measure -rt task cpu usage in us.
 143 * default: 1s
 144 */
 145unsigned int sysctl_sched_rt_period = 1000000;
 146
 147__read_mostly int scheduler_running;
 148
 149/*
 150 * part of the period that we allow rt tasks to run in us.
 151 * default: 0.95s
 152 */
 153int sysctl_sched_rt_runtime = 950000;
 154
 155/* cpus with isolated domains */
 156cpumask_var_t cpu_isolated_map;
 157
 158/*
 159 * this_rq_lock - lock this runqueue and disable interrupts.
 160 */
 161static struct rq *this_rq_lock(void)
 162        __acquires(rq->lock)
 163{
 164        struct rq *rq;
 165
 166        local_irq_disable();
 167        rq = this_rq();
 168        raw_spin_lock(&rq->lock);
 169
 170        return rq;
 171}
 172
 173#ifdef CONFIG_SCHED_HRTICK
 174/*
 175 * Use HR-timers to deliver accurate preemption points.
 176 */
 177
 178static void hrtick_clear(struct rq *rq)
 179{
 180        if (hrtimer_active(&rq->hrtick_timer))
 181                hrtimer_cancel(&rq->hrtick_timer);
 182}
 183
 184/*
 185 * High-resolution timer tick.
 186 * Runs from hardirq context with interrupts disabled.
 187 */
 188static enum hrtimer_restart hrtick(struct hrtimer *timer)
 189{
 190        struct rq *rq = container_of(timer, struct rq, hrtick_timer);
 191
 192        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 193
 194        raw_spin_lock(&rq->lock);
 195        update_rq_clock(rq);
 196        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
 197        raw_spin_unlock(&rq->lock);
 198
 199        return HRTIMER_NORESTART;
 200}
 201
 202#ifdef CONFIG_SMP
 203
 204static void __hrtick_restart(struct rq *rq)
 205{
 206        struct hrtimer *timer = &rq->hrtick_timer;
 207
 208        hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
 209}
 210
 211/*
 212 * called from hardirq (IPI) context
 213 */
 214static void __hrtick_start(void *arg)
 215{
 216        struct rq *rq = arg;
 217
 218        raw_spin_lock(&rq->lock);
 219        __hrtick_restart(rq);
 220        rq->hrtick_csd_pending = 0;
 221        raw_spin_unlock(&rq->lock);
 222}
 223
 224/*
 225 * Called to set the hrtick timer state.
 226 *
 227 * called with rq->lock held and irqs disabled
 228 */
 229void hrtick_start(struct rq *rq, u64 delay)
 230{
 231        struct hrtimer *timer = &rq->hrtick_timer;
 232        ktime_t time;
 233        s64 delta;
 234
 235        /*
 236         * Don't schedule slices shorter than 10000ns, that just
 237         * doesn't make sense and can cause timer DoS.
 238         */
 239        delta = max_t(s64, delay, 10000LL);
 240        time = ktime_add_ns(timer->base->get_time(), delta);
 241
 242        hrtimer_set_expires(timer, time);
 243
 244        if (rq == this_rq()) {
 245                __hrtick_restart(rq);
 246        } else if (!rq->hrtick_csd_pending) {
 247                smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
 248                rq->hrtick_csd_pending = 1;
 249        }
 250}
 251
 252static int
 253hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
 254{
 255        int cpu = (int)(long)hcpu;
 256
 257        switch (action) {
 258        case CPU_UP_CANCELED:
 259        case CPU_UP_CANCELED_FROZEN:
 260        case CPU_DOWN_PREPARE:
 261        case CPU_DOWN_PREPARE_FROZEN:
 262        case CPU_DEAD:
 263        case CPU_DEAD_FROZEN:
 264                hrtick_clear(cpu_rq(cpu));
 265                return NOTIFY_OK;
 266        }
 267
 268        return NOTIFY_DONE;
 269}
 270
 271static __init void init_hrtick(void)
 272{
 273        hotcpu_notifier(hotplug_hrtick, 0);
 274}
 275#else
 276/*
 277 * Called to set the hrtick timer state.
 278 *
 279 * called with rq->lock held and irqs disabled
 280 */
 281void hrtick_start(struct rq *rq, u64 delay)
 282{
 283        /*
 284         * Don't schedule slices shorter than 10000ns, that just
 285         * doesn't make sense. Rely on vruntime for fairness.
 286         */
 287        delay = max_t(u64, delay, 10000LL);
 288        hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
 289                      HRTIMER_MODE_REL_PINNED);
 290}
 291
 292static inline void init_hrtick(void)
 293{
 294}
 295#endif /* CONFIG_SMP */
 296
 297static void init_rq_hrtick(struct rq *rq)
 298{
 299#ifdef CONFIG_SMP
 300        rq->hrtick_csd_pending = 0;
 301
 302        rq->hrtick_csd.flags = 0;
 303        rq->hrtick_csd.func = __hrtick_start;
 304        rq->hrtick_csd.info = rq;
 305#endif
 306
 307        hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 308        rq->hrtick_timer.function = hrtick;
 309}
 310#else   /* CONFIG_SCHED_HRTICK */
 311static inline void hrtick_clear(struct rq *rq)
 312{
 313}
 314
 315static inline void init_rq_hrtick(struct rq *rq)
 316{
 317}
 318
 319static inline void init_hrtick(void)
 320{
 321}
 322#endif  /* CONFIG_SCHED_HRTICK */
 323
 324/*
 325 * cmpxchg based fetch_or, macro so it works for different integer types
 326 */
 327#define fetch_or(ptr, mask)                                             \
 328        ({                                                              \
 329                typeof(ptr) _ptr = (ptr);                               \
 330                typeof(mask) _mask = (mask);                            \
 331                typeof(*_ptr) _old, _val = *_ptr;                       \
 332                                                                        \
 333                for (;;) {                                              \
 334                        _old = cmpxchg(_ptr, _val, _val | _mask);       \
 335                        if (_old == _val)                               \
 336                                break;                                  \
 337                        _val = _old;                                    \
 338                }                                                       \
 339        _old;                                                           \
 340})
 341
 342#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
 343/*
 344 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
 345 * this avoids any races wrt polling state changes and thereby avoids
 346 * spurious IPIs.
 347 */
 348static bool set_nr_and_not_polling(struct task_struct *p)
 349{
 350        struct thread_info *ti = task_thread_info(p);
 351        return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
 352}
 353
 354/*
 355 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
 356 *
 357 * If this returns true, then the idle task promises to call
 358 * sched_ttwu_pending() and reschedule soon.
 359 */
 360static bool set_nr_if_polling(struct task_struct *p)
 361{
 362        struct thread_info *ti = task_thread_info(p);
 363        typeof(ti->flags) old, val = READ_ONCE(ti->flags);
 364
 365        for (;;) {
 366                if (!(val & _TIF_POLLING_NRFLAG))
 367                        return false;
 368                if (val & _TIF_NEED_RESCHED)
 369                        return true;
 370                old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
 371                if (old == val)
 372                        break;
 373                val = old;
 374        }
 375        return true;
 376}
 377
 378#else
 379static bool set_nr_and_not_polling(struct task_struct *p)
 380{
 381        set_tsk_need_resched(p);
 382        return true;
 383}
 384
 385#ifdef CONFIG_SMP
 386static bool set_nr_if_polling(struct task_struct *p)
 387{
 388        return false;
 389}
 390#endif
 391#endif
 392
 393void wake_q_add(struct wake_q_head *head, struct task_struct *task)
 394{
 395        struct wake_q_node *node = &task->wake_q;
 396
 397        /*
 398         * Atomically grab the task, if ->wake_q is !nil already it means
 399         * its already queued (either by us or someone else) and will get the
 400         * wakeup due to that.
 401         *
 402         * This cmpxchg() implies a full barrier, which pairs with the write
 403         * barrier implied by the wakeup in wake_up_list().
 404         */
 405        if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
 406                return;
 407
 408        get_task_struct(task);
 409
 410        /*
 411         * The head is context local, there can be no concurrency.
 412         */
 413        *head->lastp = node;
 414        head->lastp = &node->next;
 415}
 416
 417void wake_up_q(struct wake_q_head *head)
 418{
 419        struct wake_q_node *node = head->first;
 420
 421        while (node != WAKE_Q_TAIL) {
 422                struct task_struct *task;
 423
 424                task = container_of(node, struct task_struct, wake_q);
 425                BUG_ON(!task);
 426                /* task can safely be re-inserted now */
 427                node = node->next;
 428                task->wake_q.next = NULL;
 429
 430                /*
 431                 * wake_up_process() implies a wmb() to pair with the queueing
 432                 * in wake_q_add() so as not to miss wakeups.
 433                 */
 434                wake_up_process(task);
 435                put_task_struct(task);
 436        }
 437}
 438
 439/*
 440 * resched_curr - mark rq's current task 'to be rescheduled now'.
 441 *
 442 * On UP this means the setting of the need_resched flag, on SMP it
 443 * might also involve a cross-CPU call to trigger the scheduler on
 444 * the target CPU.
 445 */
 446void resched_curr(struct rq *rq)
 447{
 448        struct task_struct *curr = rq->curr;
 449        int cpu;
 450
 451        lockdep_assert_held(&rq->lock);
 452
 453        if (test_tsk_need_resched(curr))
 454                return;
 455
 456        cpu = cpu_of(rq);
 457
 458        if (cpu == smp_processor_id()) {
 459                set_tsk_need_resched(curr);
 460                set_preempt_need_resched();
 461                return;
 462        }
 463
 464        if (set_nr_and_not_polling(curr))
 465                smp_send_reschedule(cpu);
 466        else
 467                trace_sched_wake_idle_without_ipi(cpu);
 468}
 469
 470void resched_cpu(int cpu)
 471{
 472        struct rq *rq = cpu_rq(cpu);
 473        unsigned long flags;
 474
 475        if (!raw_spin_trylock_irqsave(&rq->lock, flags))
 476                return;
 477        resched_curr(rq);
 478        raw_spin_unlock_irqrestore(&rq->lock, flags);
 479}
 480
 481#ifdef CONFIG_SMP
 482#ifdef CONFIG_NO_HZ_COMMON
 483/*
 484 * In the semi idle case, use the nearest busy cpu for migrating timers
 485 * from an idle cpu.  This is good for power-savings.
 486 *
 487 * We don't do similar optimization for completely idle system, as
 488 * selecting an idle cpu will add more delays to the timers than intended
 489 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
 490 */
 491int get_nohz_timer_target(void)
 492{
 493        int i, cpu = smp_processor_id();
 494        struct sched_domain *sd;
 495
 496        if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
 497                return cpu;
 498
 499        rcu_read_lock();
 500        for_each_domain(cpu, sd) {
 501                for_each_cpu(i, sched_domain_span(sd)) {
 502                        if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
 503                                cpu = i;
 504                                goto unlock;
 505                        }
 506                }
 507        }
 508
 509        if (!is_housekeeping_cpu(cpu))
 510                cpu = housekeeping_any_cpu();
 511unlock:
 512        rcu_read_unlock();
 513        return cpu;
 514}
 515/*
 516 * When add_timer_on() enqueues a timer into the timer wheel of an
 517 * idle CPU then this timer might expire before the next timer event
 518 * which is scheduled to wake up that CPU. In case of a completely
 519 * idle system the next event might even be infinite time into the
 520 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
 521 * leaves the inner idle loop so the newly added timer is taken into
 522 * account when the CPU goes back to idle and evaluates the timer
 523 * wheel for the next timer event.
 524 */
 525static void wake_up_idle_cpu(int cpu)
 526{
 527        struct rq *rq = cpu_rq(cpu);
 528
 529        if (cpu == smp_processor_id())
 530                return;
 531
 532        if (set_nr_and_not_polling(rq->idle))
 533                smp_send_reschedule(cpu);
 534        else
 535                trace_sched_wake_idle_without_ipi(cpu);
 536}
 537
 538static bool wake_up_full_nohz_cpu(int cpu)
 539{
 540        /*
 541         * We just need the target to call irq_exit() and re-evaluate
 542         * the next tick. The nohz full kick at least implies that.
 543         * If needed we can still optimize that later with an
 544         * empty IRQ.
 545         */
 546        if (tick_nohz_full_cpu(cpu)) {
 547                if (cpu != smp_processor_id() ||
 548                    tick_nohz_tick_stopped())
 549                        tick_nohz_full_kick_cpu(cpu);
 550                return true;
 551        }
 552
 553        return false;
 554}
 555
 556void wake_up_nohz_cpu(int cpu)
 557{
 558        if (!wake_up_full_nohz_cpu(cpu))
 559                wake_up_idle_cpu(cpu);
 560}
 561
 562static inline bool got_nohz_idle_kick(void)
 563{
 564        int cpu = smp_processor_id();
 565
 566        if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
 567                return false;
 568
 569        if (idle_cpu(cpu) && !need_resched())
 570                return true;
 571
 572        /*
 573         * We can't run Idle Load Balance on this CPU for this time so we
 574         * cancel it and clear NOHZ_BALANCE_KICK
 575         */
 576        clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
 577        return false;
 578}
 579
 580#else /* CONFIG_NO_HZ_COMMON */
 581
 582static inline bool got_nohz_idle_kick(void)
 583{
 584        return false;
 585}
 586
 587#endif /* CONFIG_NO_HZ_COMMON */
 588
 589#ifdef CONFIG_NO_HZ_FULL
 590bool sched_can_stop_tick(struct rq *rq)
 591{
 592        int fifo_nr_running;
 593
 594        /* Deadline tasks, even if single, need the tick */
 595        if (rq->dl.dl_nr_running)
 596                return false;
 597
 598        /*
 599         * If there are more than one RR tasks, we need the tick to effect the
 600         * actual RR behaviour.
 601         */
 602        if (rq->rt.rr_nr_running) {
 603                if (rq->rt.rr_nr_running == 1)
 604                        return true;
 605                else
 606                        return false;
 607        }
 608
 609        /*
 610         * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
 611         * forced preemption between FIFO tasks.
 612         */
 613        fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
 614        if (fifo_nr_running)
 615                return true;
 616
 617        /*
 618         * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
 619         * if there's more than one we need the tick for involuntary
 620         * preemption.
 621         */
 622        if (rq->nr_running > 1)
 623                return false;
 624
 625        return true;
 626}
 627#endif /* CONFIG_NO_HZ_FULL */
 628
 629void sched_avg_update(struct rq *rq)
 630{
 631        s64 period = sched_avg_period();
 632
 633        while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
 634                /*
 635                 * Inline assembly required to prevent the compiler
 636                 * optimising this loop into a divmod call.
 637                 * See __iter_div_u64_rem() for another example of this.
 638                 */
 639                asm("" : "+rm" (rq->age_stamp));
 640                rq->age_stamp += period;
 641                rq->rt_avg /= 2;
 642        }
 643}
 644
 645#endif /* CONFIG_SMP */
 646
 647#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
 648                        (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
 649/*
 650 * Iterate task_group tree rooted at *from, calling @down when first entering a
 651 * node and @up when leaving it for the final time.
 652 *
 653 * Caller must hold rcu_lock or sufficient equivalent.
 654 */
 655int walk_tg_tree_from(struct task_group *from,
 656                             tg_visitor down, tg_visitor up, void *data)
 657{
 658        struct task_group *parent, *child;
 659        int ret;
 660
 661        parent = from;
 662
 663down:
 664        ret = (*down)(parent, data);
 665        if (ret)
 666                goto out;
 667        list_for_each_entry_rcu(child, &parent->children, siblings) {
 668                parent = child;
 669                goto down;
 670
 671up:
 672                continue;
 673        }
 674        ret = (*up)(parent, data);
 675        if (ret || parent == from)
 676                goto out;
 677
 678        child = parent;
 679        parent = parent->parent;
 680        if (parent)
 681                goto up;
 682out:
 683        return ret;
 684}
 685
 686int tg_nop(struct task_group *tg, void *data)
 687{
 688        return 0;
 689}
 690#endif
 691
 692static void set_load_weight(struct task_struct *p)
 693{
 694        int prio = p->static_prio - MAX_RT_PRIO;
 695        struct load_weight *load = &p->se.load;
 696
 697        /*
 698         * SCHED_IDLE tasks get minimal weight:
 699         */
 700        if (idle_policy(p->policy)) {
 701                load->weight = scale_load(WEIGHT_IDLEPRIO);
 702                load->inv_weight = WMULT_IDLEPRIO;
 703                return;
 704        }
 705
 706        load->weight = scale_load(sched_prio_to_weight[prio]);
 707        load->inv_weight = sched_prio_to_wmult[prio];
 708}
 709
 710static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 711{
 712        update_rq_clock(rq);
 713        if (!(flags & ENQUEUE_RESTORE))
 714                sched_info_queued(rq, p);
 715        p->sched_class->enqueue_task(rq, p, flags);
 716}
 717
 718static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 719{
 720        update_rq_clock(rq);
 721        if (!(flags & DEQUEUE_SAVE))
 722                sched_info_dequeued(rq, p);
 723        p->sched_class->dequeue_task(rq, p, flags);
 724}
 725
 726void activate_task(struct rq *rq, struct task_struct *p, int flags)
 727{
 728        if (task_contributes_to_load(p))
 729                rq->nr_uninterruptible--;
 730
 731        enqueue_task(rq, p, flags);
 732}
 733
 734void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 735{
 736        if (task_contributes_to_load(p))
 737                rq->nr_uninterruptible++;
 738
 739        dequeue_task(rq, p, flags);
 740}
 741
 742static void update_rq_clock_task(struct rq *rq, s64 delta)
 743{
 744/*
 745 * In theory, the compile should just see 0 here, and optimize out the call
 746 * to sched_rt_avg_update. But I don't trust it...
 747 */
 748#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
 749        s64 steal = 0, irq_delta = 0;
 750#endif
 751#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 752        irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
 753
 754        /*
 755         * Since irq_time is only updated on {soft,}irq_exit, we might run into
 756         * this case when a previous update_rq_clock() happened inside a
 757         * {soft,}irq region.
 758         *
 759         * When this happens, we stop ->clock_task and only update the
 760         * prev_irq_time stamp to account for the part that fit, so that a next
 761         * update will consume the rest. This ensures ->clock_task is
 762         * monotonic.
 763         *
 764         * It does however cause some slight miss-attribution of {soft,}irq
 765         * time, a more accurate solution would be to update the irq_time using
 766         * the current rq->clock timestamp, except that would require using
 767         * atomic ops.
 768         */
 769        if (irq_delta > delta)
 770                irq_delta = delta;
 771
 772        rq->prev_irq_time += irq_delta;
 773        delta -= irq_delta;
 774#endif
 775#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 776        if (static_key_false((&paravirt_steal_rq_enabled))) {
 777                steal = paravirt_steal_clock(cpu_of(rq));
 778                steal -= rq->prev_steal_time_rq;
 779
 780                if (unlikely(steal > delta))
 781                        steal = delta;
 782
 783                rq->prev_steal_time_rq += steal;
 784                delta -= steal;
 785        }
 786#endif
 787
 788        rq->clock_task += delta;
 789
 790#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
 791        if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
 792                sched_rt_avg_update(rq, irq_delta + steal);
 793#endif
 794}
 795
 796void sched_set_stop_task(int cpu, struct task_struct *stop)
 797{
 798        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 799        struct task_struct *old_stop = cpu_rq(cpu)->stop;
 800
 801        if (stop) {
 802                /*
 803                 * Make it appear like a SCHED_FIFO task, its something
 804                 * userspace knows about and won't get confused about.
 805                 *
 806                 * Also, it will make PI more or less work without too
 807                 * much confusion -- but then, stop work should not
 808                 * rely on PI working anyway.
 809                 */
 810                sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
 811
 812                stop->sched_class = &stop_sched_class;
 813        }
 814
 815        cpu_rq(cpu)->stop = stop;
 816
 817        if (old_stop) {
 818                /*
 819                 * Reset it back to a normal scheduling class so that
 820                 * it can die in pieces.
 821                 */
 822                old_stop->sched_class = &rt_sched_class;
 823        }
 824}
 825
 826/*
 827 * __normal_prio - return the priority that is based on the static prio
 828 */
 829static inline int __normal_prio(struct task_struct *p)
 830{
 831        return p->static_prio;
 832}
 833
 834/*
 835 * Calculate the expected normal priority: i.e. priority
 836 * without taking RT-inheritance into account. Might be
 837 * boosted by interactivity modifiers. Changes upon fork,
 838 * setprio syscalls, and whenever the interactivity
 839 * estimator recalculates.
 840 */
 841static inline int normal_prio(struct task_struct *p)
 842{
 843        int prio;
 844
 845        if (task_has_dl_policy(p))
 846                prio = MAX_DL_PRIO-1;
 847        else if (task_has_rt_policy(p))
 848                prio = MAX_RT_PRIO-1 - p->rt_priority;
 849        else
 850                prio = __normal_prio(p);
 851        return prio;
 852}
 853
 854/*
 855 * Calculate the current priority, i.e. the priority
 856 * taken into account by the scheduler. This value might
 857 * be boosted by RT tasks, or might be boosted by
 858 * interactivity modifiers. Will be RT if the task got
 859 * RT-boosted. If not then it returns p->normal_prio.
 860 */
 861static int effective_prio(struct task_struct *p)
 862{
 863        p->normal_prio = normal_prio(p);
 864        /*
 865         * If we are RT tasks or we were boosted to RT priority,
 866         * keep the priority unchanged. Otherwise, update priority
 867         * to the normal priority:
 868         */
 869        if (!rt_prio(p->prio))
 870                return p->normal_prio;
 871        return p->prio;
 872}
 873
 874/**
 875 * task_curr - is this task currently executing on a CPU?
 876 * @p: the task in question.
 877 *
 878 * Return: 1 if the task is currently executing. 0 otherwise.
 879 */
 880inline int task_curr(const struct task_struct *p)
 881{
 882        return cpu_curr(task_cpu(p)) == p;
 883}
 884
 885/*
 886 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
 887 * use the balance_callback list if you want balancing.
 888 *
 889 * this means any call to check_class_changed() must be followed by a call to
 890 * balance_callback().
 891 */
 892static inline void check_class_changed(struct rq *rq, struct task_struct *p,
 893                                       const struct sched_class *prev_class,
 894                                       int oldprio)
 895{
 896        if (prev_class != p->sched_class) {
 897                if (prev_class->switched_from)
 898                        prev_class->switched_from(rq, p);
 899
 900                p->sched_class->switched_to(rq, p);
 901        } else if (oldprio != p->prio || dl_task(p))
 902                p->sched_class->prio_changed(rq, p, oldprio);
 903}
 904
 905void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 906{
 907        const struct sched_class *class;
 908
 909        if (p->sched_class == rq->curr->sched_class) {
 910                rq->curr->sched_class->check_preempt_curr(rq, p, flags);
 911        } else {
 912                for_each_class(class) {
 913                        if (class == rq->curr->sched_class)
 914                                break;
 915                        if (class == p->sched_class) {
 916                                resched_curr(rq);
 917                                break;
 918                        }
 919                }
 920        }
 921
 922        /*
 923         * A queue event has occurred, and we're going to schedule.  In
 924         * this case, we can save a useless back to back clock update.
 925         */
 926        if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
 927                rq_clock_skip_update(rq, true);
 928}
 929
 930#ifdef CONFIG_SMP
 931/*
 932 * This is how migration works:
 933 *
 934 * 1) we invoke migration_cpu_stop() on the target CPU using
 935 *    stop_one_cpu().
 936 * 2) stopper starts to run (implicitly forcing the migrated thread
 937 *    off the CPU)
 938 * 3) it checks whether the migrated task is still in the wrong runqueue.
 939 * 4) if it's in the wrong runqueue then the migration thread removes
 940 *    it and puts it into the right queue.
 941 * 5) stopper completes and stop_one_cpu() returns and the migration
 942 *    is done.
 943 */
 944
 945/*
 946 * move_queued_task - move a queued task to new rq.
 947 *
 948 * Returns (locked) new rq. Old rq's lock is released.
 949 */
 950static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
 951{
 952        lockdep_assert_held(&rq->lock);
 953
 954        p->on_rq = TASK_ON_RQ_MIGRATING;
 955        dequeue_task(rq, p, 0);
 956        set_task_cpu(p, new_cpu);
 957        raw_spin_unlock(&rq->lock);
 958
 959        rq = cpu_rq(new_cpu);
 960
 961        raw_spin_lock(&rq->lock);
 962        BUG_ON(task_cpu(p) != new_cpu);
 963        enqueue_task(rq, p, 0);
 964        p->on_rq = TASK_ON_RQ_QUEUED;
 965        check_preempt_curr(rq, p, 0);
 966
 967        return rq;
 968}
 969
 970struct migration_arg {
 971        struct task_struct *task;
 972        int dest_cpu;
 973};
 974
 975/*
 976 * Move (not current) task off this cpu, onto dest cpu. We're doing
 977 * this because either it can't run here any more (set_cpus_allowed()
 978 * away from this CPU, or CPU going down), or because we're
 979 * attempting to rebalance this task on exec (sched_exec).
 980 *
 981 * So we race with normal scheduler movements, but that's OK, as long
 982 * as the task is no longer on this CPU.
 983 */
 984static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
 985{
 986        if (unlikely(!cpu_active(dest_cpu)))
 987                return rq;
 988
 989        /* Affinity changed (again). */
 990        if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
 991                return rq;
 992
 993        rq = move_queued_task(rq, p, dest_cpu);
 994
 995        return rq;
 996}
 997
 998/*
 999 * migration_cpu_stop - this will be executed by a highprio stopper thread
1000 * and performs thread migration by bumping thread off CPU then
1001 * 'pushing' onto another runqueue.
1002 */
1003static int migration_cpu_stop(void *data)
1004{
1005        struct migration_arg *arg = data;
1006        struct task_struct *p = arg->task;
1007        struct rq *rq = this_rq();
1008
1009        /*
1010         * The original target cpu might have gone down and we might
1011         * be on another cpu but it doesn't matter.
1012         */
1013        local_irq_disable();
1014        /*
1015         * We need to explicitly wake pending tasks before running
1016         * __migrate_task() such that we will not miss enforcing cpus_allowed
1017         * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1018         */
1019        sched_ttwu_pending();
1020
1021        raw_spin_lock(&p->pi_lock);
1022        raw_spin_lock(&rq->lock);
1023        /*
1024         * If task_rq(p) != rq, it cannot be migrated here, because we're
1025         * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1026         * we're holding p->pi_lock.
1027         */
1028        if (task_rq(p) == rq && task_on_rq_queued(p))
1029                rq = __migrate_task(rq, p, arg->dest_cpu);
1030        raw_spin_unlock(&rq->lock);
1031        raw_spin_unlock(&p->pi_lock);
1032
1033        local_irq_enable();
1034        return 0;
1035}
1036
1037/*
1038 * sched_class::set_cpus_allowed must do the below, but is not required to
1039 * actually call this function.
1040 */
1041void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
1042{
1043        cpumask_copy(&p->cpus_allowed, new_mask);
1044        p->nr_cpus_allowed = cpumask_weight(new_mask);
1045}
1046
1047void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1048{
1049        struct rq *rq = task_rq(p);
1050        bool queued, running;
1051
1052        lockdep_assert_held(&p->pi_lock);
1053
1054        queued = task_on_rq_queued(p);
1055        running = task_current(rq, p);
1056
1057        if (queued) {
1058                /*
1059                 * Because __kthread_bind() calls this on blocked tasks without
1060                 * holding rq->lock.
1061                 */
1062                lockdep_assert_held(&rq->lock);
1063                dequeue_task(rq, p, DEQUEUE_SAVE);
1064        }
1065        if (running)
1066                put_prev_task(rq, p);
1067
1068        p->sched_class->set_cpus_allowed(p, new_mask);
1069
1070        if (running)
1071                p->sched_class->set_curr_task(rq);
1072        if (queued)
1073                enqueue_task(rq, p, ENQUEUE_RESTORE);
1074}
1075
1076/*
1077 * Change a given task's CPU affinity. Migrate the thread to a
1078 * proper CPU and schedule it away if the CPU it's executing on
1079 * is removed from the allowed bitmask.
1080 *
1081 * NOTE: the caller must have a valid reference to the task, the
1082 * task must not exit() & deallocate itself prematurely. The
1083 * call is not atomic; no spinlocks may be held.
1084 */
1085static int __set_cpus_allowed_ptr(struct task_struct *p,
1086                                  const struct cpumask *new_mask, bool check)
1087{
1088        unsigned long flags;
1089        struct rq *rq;
1090        unsigned int dest_cpu;
1091        int ret = 0;
1092
1093        rq = task_rq_lock(p, &flags);
1094
1095        /*
1096         * Must re-check here, to close a race against __kthread_bind(),
1097         * sched_setaffinity() is not guaranteed to observe the flag.
1098         */
1099        if (check && (p->flags & PF_NO_SETAFFINITY)) {
1100                ret = -EINVAL;
1101                goto out;
1102        }
1103
1104        if (cpumask_equal(&p->cpus_allowed, new_mask))
1105                goto out;
1106
1107        if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1108                ret = -EINVAL;
1109                goto out;
1110        }
1111
1112        do_set_cpus_allowed(p, new_mask);
1113
1114        /* Can the task run on the task's current CPU? If so, we're done */
1115        if (cpumask_test_cpu(task_cpu(p), new_mask))
1116                goto out;
1117
1118        dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
1119        if (task_running(rq, p) || p->state == TASK_WAKING) {
1120                struct migration_arg arg = { p, dest_cpu };
1121                /* Need help from migration thread: drop lock and wait. */
1122                task_rq_unlock(rq, p, &flags);
1123                stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1124                tlb_migrate_finish(p->mm);
1125                return 0;
1126        } else if (task_on_rq_queued(p)) {
1127                /*
1128                 * OK, since we're going to drop the lock immediately
1129                 * afterwards anyway.
1130                 */
1131                lockdep_unpin_lock(&rq->lock);
1132                rq = move_queued_task(rq, p, dest_cpu);
1133                lockdep_pin_lock(&rq->lock);
1134        }
1135out:
1136        task_rq_unlock(rq, p, &flags);
1137
1138        return ret;
1139}
1140
1141int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1142{
1143        return __set_cpus_allowed_ptr(p, new_mask, false);
1144}
1145EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1146
1147void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1148{
1149#ifdef CONFIG_SCHED_DEBUG
1150        /*
1151         * We should never call set_task_cpu() on a blocked task,
1152         * ttwu() will sort out the placement.
1153         */
1154        WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1155                        !p->on_rq);
1156
1157        /*
1158         * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
1159         * because schedstat_wait_{start,end} rebase migrating task's wait_start
1160         * time relying on p->on_rq.
1161         */
1162        WARN_ON_ONCE(p->state == TASK_RUNNING &&
1163                     p->sched_class == &fair_sched_class &&
1164                     (p->on_rq && !task_on_rq_migrating(p)));
1165
1166#ifdef CONFIG_LOCKDEP
1167        /*
1168         * The caller should hold either p->pi_lock or rq->lock, when changing
1169         * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1170         *
1171         * sched_move_task() holds both and thus holding either pins the cgroup,
1172         * see task_group().
1173         *
1174         * Furthermore, all task_rq users should acquire both locks, see
1175         * task_rq_lock().
1176         */
1177        WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1178                                      lockdep_is_held(&task_rq(p)->lock)));
1179#endif
1180#endif
1181
1182        trace_sched_migrate_task(p, new_cpu);
1183
1184        if (task_cpu(p) != new_cpu) {
1185                if (p->sched_class->migrate_task_rq)
1186                        p->sched_class->migrate_task_rq(p);
1187                p->se.nr_migrations++;
1188                perf_event_task_migrate(p);
1189        }
1190
1191        __set_task_cpu(p, new_cpu);
1192}
1193
1194static void __migrate_swap_task(struct task_struct *p, int cpu)
1195{
1196        if (task_on_rq_queued(p)) {
1197                struct rq *src_rq, *dst_rq;
1198
1199                src_rq = task_rq(p);
1200                dst_rq = cpu_rq(cpu);
1201
1202                p->on_rq = TASK_ON_RQ_MIGRATING;
1203                deactivate_task(src_rq, p, 0);
1204                set_task_cpu(p, cpu);
1205                activate_task(dst_rq, p, 0);
1206                p->on_rq = TASK_ON_RQ_QUEUED;
1207                check_preempt_curr(dst_rq, p, 0);
1208        } else {
1209                /*
1210                 * Task isn't running anymore; make it appear like we migrated
1211                 * it before it went to sleep. This means on wakeup we make the
1212                 * previous cpu our targer instead of where it really is.
1213                 */
1214                p->wake_cpu = cpu;
1215        }
1216}
1217
1218struct migration_swap_arg {
1219        struct task_struct *src_task, *dst_task;
1220        int src_cpu, dst_cpu;
1221};
1222
1223static int migrate_swap_stop(void *data)
1224{
1225        struct migration_swap_arg *arg = data;
1226        struct rq *src_rq, *dst_rq;
1227        int ret = -EAGAIN;
1228
1229        if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
1230                return -EAGAIN;
1231
1232        src_rq = cpu_rq(arg->src_cpu);
1233        dst_rq = cpu_rq(arg->dst_cpu);
1234
1235        double_raw_lock(&arg->src_task->pi_lock,
1236                        &arg->dst_task->pi_lock);
1237        double_rq_lock(src_rq, dst_rq);
1238
1239        if (task_cpu(arg->dst_task) != arg->dst_cpu)
1240                goto unlock;
1241
1242        if (task_cpu(arg->src_task) != arg->src_cpu)
1243                goto unlock;
1244
1245        if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1246                goto unlock;
1247
1248        if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1249                goto unlock;
1250
1251        __migrate_swap_task(arg->src_task, arg->dst_cpu);
1252        __migrate_swap_task(arg->dst_task, arg->src_cpu);
1253
1254        ret = 0;
1255
1256unlock:
1257        double_rq_unlock(src_rq, dst_rq);
1258        raw_spin_unlock(&arg->dst_task->pi_lock);
1259        raw_spin_unlock(&arg->src_task->pi_lock);
1260
1261        return ret;
1262}
1263
1264/*
1265 * Cross migrate two tasks
1266 */
1267int migrate_swap(struct task_struct *cur, struct task_struct *p)
1268{
1269        struct migration_swap_arg arg;
1270        int ret = -EINVAL;
1271
1272        arg = (struct migration_swap_arg){
1273                .src_task = cur,
1274                .src_cpu = task_cpu(cur),
1275                .dst_task = p,
1276                .dst_cpu = task_cpu(p),
1277        };
1278
1279        if (arg.src_cpu == arg.dst_cpu)
1280                goto out;
1281
1282        /*
1283         * These three tests are all lockless; this is OK since all of them
1284         * will be re-checked with proper locks held further down the line.
1285         */
1286        if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1287                goto out;
1288
1289        if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1290                goto out;
1291
1292        if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1293                goto out;
1294
1295        trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
1296        ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1297
1298out:
1299        return ret;
1300}
1301
1302/*
1303 * wait_task_inactive - wait for a thread to unschedule.
1304 *
1305 * If @match_state is nonzero, it's the @p->state value just checked and
1306 * not expected to change.  If it changes, i.e. @p might have woken up,
1307 * then return zero.  When we succeed in waiting for @p to be off its CPU,
1308 * we return a positive number (its total switch count).  If a second call
1309 * a short while later returns the same number, the caller can be sure that
1310 * @p has remained unscheduled the whole time.
1311 *
1312 * The caller must ensure that the task *will* unschedule sometime soon,
1313 * else this function might spin for a *long* time. This function can't
1314 * be called with interrupts off, or it may introduce deadlock with
1315 * smp_call_function() if an IPI is sent by the same process we are
1316 * waiting to become inactive.
1317 */
1318unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1319{
1320        unsigned long flags;
1321        int running, queued;
1322        unsigned long ncsw;
1323        struct rq *rq;
1324
1325        for (;;) {
1326                /*
1327                 * We do the initial early heuristics without holding
1328                 * any task-queue locks at all. We'll only try to get
1329                 * the runqueue lock when things look like they will
1330                 * work out!
1331                 */
1332                rq = task_rq(p);
1333
1334                /*
1335                 * If the task is actively running on another CPU
1336                 * still, just relax and busy-wait without holding
1337                 * any locks.
1338                 *
1339                 * NOTE! Since we don't hold any locks, it's not
1340                 * even sure that "rq" stays as the right runqueue!
1341                 * But we don't care, since "task_running()" will
1342                 * return false if the runqueue has changed and p
1343                 * is actually now running somewhere else!
1344                 */
1345                while (task_running(rq, p)) {
1346                        if (match_state && unlikely(p->state != match_state))
1347                                return 0;
1348                        cpu_relax();
1349                }
1350
1351                /*
1352                 * Ok, time to look more closely! We need the rq
1353                 * lock now, to be *sure*. If we're wrong, we'll
1354                 * just go back and repeat.
1355                 */
1356                rq = task_rq_lock(p, &flags);
1357                trace_sched_wait_task(p);
1358                running = task_running(rq, p);
1359                queued = task_on_rq_queued(p);
1360                ncsw = 0;
1361                if (!match_state || p->state == match_state)
1362                        ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1363                task_rq_unlock(rq, p, &flags);
1364
1365                /*
1366                 * If it changed from the expected state, bail out now.
1367                 */
1368                if (unlikely(!ncsw))
1369                        break;
1370
1371                /*
1372                 * Was it really running after all now that we
1373                 * checked with the proper locks actually held?
1374                 *
1375                 * Oops. Go back and try again..
1376                 */
1377                if (unlikely(running)) {
1378                        cpu_relax();
1379                        continue;
1380                }
1381
1382                /*
1383                 * It's not enough that it's not actively running,
1384                 * it must be off the runqueue _entirely_, and not
1385                 * preempted!
1386                 *
1387                 * So if it was still runnable (but just not actively
1388                 * running right now), it's preempted, and we should
1389                 * yield - it could be a while.
1390                 */
1391                if (unlikely(queued)) {
1392                        ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1393
1394                        set_current_state(TASK_UNINTERRUPTIBLE);
1395                        schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1396                        continue;
1397                }
1398
1399                /*
1400                 * Ahh, all good. It wasn't running, and it wasn't
1401                 * runnable, which means that it will never become
1402                 * running in the future either. We're all done!
1403                 */
1404                break;
1405        }
1406
1407        return ncsw;
1408}
1409
1410/***
1411 * kick_process - kick a running thread to enter/exit the kernel
1412 * @p: the to-be-kicked thread
1413 *
1414 * Cause a process which is running on another CPU to enter
1415 * kernel-mode, without any delay. (to get signals handled.)
1416 *
1417 * NOTE: this function doesn't have to take the runqueue lock,
1418 * because all it wants to ensure is that the remote task enters
1419 * the kernel. If the IPI races and the task has been migrated
1420 * to another CPU then no harm is done and the purpose has been
1421 * achieved as well.
1422 */
1423void kick_process(struct task_struct *p)
1424{
1425        int cpu;
1426
1427        preempt_disable();
1428        cpu = task_cpu(p);
1429        if ((cpu != smp_processor_id()) && task_curr(p))
1430                smp_send_reschedule(cpu);
1431        preempt_enable();
1432}
1433EXPORT_SYMBOL_GPL(kick_process);
1434
1435/*
1436 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1437 */
1438static int select_fallback_rq(int cpu, struct task_struct *p)
1439{
1440        int nid = cpu_to_node(cpu);
1441        const struct cpumask *nodemask = NULL;
1442        enum { cpuset, possible, fail } state = cpuset;
1443        int dest_cpu;
1444
1445        /*
1446         * If the node that the cpu is on has been offlined, cpu_to_node()
1447         * will return -1. There is no cpu on the node, and we should
1448         * select the cpu on the other node.
1449         */
1450        if (nid != -1) {
1451                nodemask = cpumask_of_node(nid);
1452
1453                /* Look for allowed, online CPU in same node. */
1454                for_each_cpu(dest_cpu, nodemask) {
1455                        if (!cpu_online(dest_cpu))
1456                                continue;
1457                        if (!cpu_active(dest_cpu))
1458                                continue;
1459                        if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1460                                return dest_cpu;
1461                }
1462        }
1463
1464        for (;;) {
1465                /* Any allowed, online CPU? */
1466                for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1467                        if (!cpu_online(dest_cpu))
1468                                continue;
1469                        if (!cpu_active(dest_cpu))
1470                                continue;
1471                        goto out;
1472                }
1473
1474                /* No more Mr. Nice Guy. */
1475                switch (state) {
1476                case cpuset:
1477                        if (IS_ENABLED(CONFIG_CPUSETS)) {
1478                                cpuset_cpus_allowed_fallback(p);
1479                                state = possible;
1480                                break;
1481                        }
1482                        /* fall-through */
1483                case possible:
1484                        do_set_cpus_allowed(p, cpu_possible_mask);
1485                        state = fail;
1486                        break;
1487
1488                case fail:
1489                        BUG();
1490                        break;
1491                }
1492        }
1493
1494out:
1495        if (state != cpuset) {
1496                /*
1497                 * Don't tell them about moving exiting tasks or
1498                 * kernel threads (both mm NULL), since they never
1499                 * leave kernel.
1500                 */
1501                if (p->mm && printk_ratelimit()) {
1502                        printk_deferred("process %d (%s) no longer affine to cpu%d\n",
1503                                        task_pid_nr(p), p->comm, cpu);
1504                }
1505        }
1506
1507        return dest_cpu;
1508}
1509
1510/*
1511 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1512 */
1513static inline
1514int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1515{
1516        lockdep_assert_held(&p->pi_lock);
1517
1518        if (p->nr_cpus_allowed > 1)
1519                cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1520
1521        /*
1522         * In order not to call set_task_cpu() on a blocking task we need
1523         * to rely on ttwu() to place the task on a valid ->cpus_allowed
1524         * cpu.
1525         *
1526         * Since this is common to all placement strategies, this lives here.
1527         *
1528         * [ this allows ->select_task() to simply return task_cpu(p) and
1529         *   not worry about this generic constraint ]
1530         */
1531        if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1532                     !cpu_online(cpu)))
1533                cpu = select_fallback_rq(task_cpu(p), p);
1534
1535        return cpu;
1536}
1537
1538static void update_avg(u64 *avg, u64 sample)
1539{
1540        s64 diff = sample - *avg;
1541        *avg += diff >> 3;
1542}
1543
1544#else
1545
1546static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1547                                         const struct cpumask *new_mask, bool check)
1548{
1549        return set_cpus_allowed_ptr(p, new_mask);
1550}
1551
1552#endif /* CONFIG_SMP */
1553
1554static void
1555ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1556{
1557#ifdef CONFIG_SCHEDSTATS
1558        struct rq *rq = this_rq();
1559
1560#ifdef CONFIG_SMP
1561        int this_cpu = smp_processor_id();
1562
1563        if (cpu == this_cpu) {
1564                schedstat_inc(rq, ttwu_local);
1565                schedstat_inc(p, se.statistics.nr_wakeups_local);
1566        } else {
1567                struct sched_domain *sd;
1568
1569                schedstat_inc(p, se.statistics.nr_wakeups_remote);
1570                rcu_read_lock();
1571                for_each_domain(this_cpu, sd) {
1572                        if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1573                                schedstat_inc(sd, ttwu_wake_remote);
1574                                break;
1575                        }
1576                }
1577                rcu_read_unlock();
1578        }
1579
1580        if (wake_flags & WF_MIGRATED)
1581                schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1582
1583#endif /* CONFIG_SMP */
1584
1585        schedstat_inc(rq, ttwu_count);
1586        schedstat_inc(p, se.statistics.nr_wakeups);
1587
1588        if (wake_flags & WF_SYNC)
1589                schedstat_inc(p, se.statistics.nr_wakeups_sync);
1590
1591#endif /* CONFIG_SCHEDSTATS */
1592}
1593
1594static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1595{
1596        activate_task(rq, p, en_flags);
1597        p->on_rq = TASK_ON_RQ_QUEUED;
1598
1599        /* if a worker is waking up, notify workqueue */
1600        if (p->flags & PF_WQ_WORKER)
1601                wq_worker_waking_up(p, cpu_of(rq));
1602}
1603
1604/*
1605 * Mark the task runnable and perform wakeup-preemption.
1606 */
1607static void
1608ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1609{
1610        check_preempt_curr(rq, p, wake_flags);
1611        p->state = TASK_RUNNING;
1612        trace_sched_wakeup(p);
1613
1614#ifdef CONFIG_SMP
1615        if (p->sched_class->task_woken) {
1616                /*
1617                 * Our task @p is fully woken up and running; so its safe to
1618                 * drop the rq->lock, hereafter rq is only used for statistics.
1619                 */
1620                lockdep_unpin_lock(&rq->lock);
1621                p->sched_class->task_woken(rq, p);
1622                lockdep_pin_lock(&rq->lock);
1623        }
1624
1625        if (rq->idle_stamp) {
1626                u64 delta = rq_clock(rq) - rq->idle_stamp;
1627                u64 max = 2*rq->max_idle_balance_cost;
1628
1629                update_avg(&rq->avg_idle, delta);
1630
1631                if (rq->avg_idle > max)
1632                        rq->avg_idle = max;
1633
1634                rq->idle_stamp = 0;
1635        }
1636#endif
1637}
1638
1639static void
1640ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1641{
1642        lockdep_assert_held(&rq->lock);
1643
1644#ifdef CONFIG_SMP
1645        if (p->sched_contributes_to_load)
1646                rq->nr_uninterruptible--;
1647#endif
1648
1649        ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1650        ttwu_do_wakeup(rq, p, wake_flags);
1651}
1652
1653/*
1654 * Called in case the task @p isn't fully descheduled from its runqueue,
1655 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1656 * since all we need to do is flip p->state to TASK_RUNNING, since
1657 * the task is still ->on_rq.
1658 */
1659static int ttwu_remote(struct task_struct *p, int wake_flags)
1660{
1661        struct rq *rq;
1662        int ret = 0;
1663
1664        rq = __task_rq_lock(p);
1665        if (task_on_rq_queued(p)) {
1666                /* check_preempt_curr() may use rq clock */
1667                update_rq_clock(rq);
1668                ttwu_do_wakeup(rq, p, wake_flags);
1669                ret = 1;
1670        }
1671        __task_rq_unlock(rq);
1672
1673        return ret;
1674}
1675
1676#ifdef CONFIG_SMP
1677void sched_ttwu_pending(void)
1678{
1679        struct rq *rq = this_rq();
1680        struct llist_node *llist = llist_del_all(&rq->wake_list);
1681        struct task_struct *p;
1682        unsigned long flags;
1683
1684        if (!llist)
1685                return;
1686
1687        raw_spin_lock_irqsave(&rq->lock, flags);
1688        lockdep_pin_lock(&rq->lock);
1689
1690        while (llist) {
1691                p = llist_entry(llist, struct task_struct, wake_entry);
1692                llist = llist_next(llist);
1693                ttwu_do_activate(rq, p, 0);
1694        }
1695
1696        lockdep_unpin_lock(&rq->lock);
1697        raw_spin_unlock_irqrestore(&rq->lock, flags);
1698}
1699
1700void scheduler_ipi(void)
1701{
1702        /*
1703         * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1704         * TIF_NEED_RESCHED remotely (for the first time) will also send
1705         * this IPI.
1706         */
1707        preempt_fold_need_resched();
1708
1709        if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1710                return;
1711
1712        /*
1713         * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1714         * traditionally all their work was done from the interrupt return
1715         * path. Now that we actually do some work, we need to make sure
1716         * we do call them.
1717         *
1718         * Some archs already do call them, luckily irq_enter/exit nest
1719         * properly.
1720         *
1721         * Arguably we should visit all archs and update all handlers,
1722         * however a fair share of IPIs are still resched only so this would
1723         * somewhat pessimize the simple resched case.
1724         */
1725        irq_enter();
1726        sched_ttwu_pending();
1727
1728        /*
1729         * Check if someone kicked us for doing the nohz idle load balance.
1730         */
1731        if (unlikely(got_nohz_idle_kick())) {
1732                this_rq()->idle_balance = 1;
1733                raise_softirq_irqoff(SCHED_SOFTIRQ);
1734        }
1735        irq_exit();
1736}
1737
1738static void ttwu_queue_remote(struct task_struct *p, int cpu)
1739{
1740        struct rq *rq = cpu_rq(cpu);
1741
1742        if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1743                if (!set_nr_if_polling(rq->idle))
1744                        smp_send_reschedule(cpu);
1745                else
1746                        trace_sched_wake_idle_without_ipi(cpu);
1747        }
1748}
1749
1750void wake_up_if_idle(int cpu)
1751{
1752        struct rq *rq = cpu_rq(cpu);
1753        unsigned long flags;
1754
1755        rcu_read_lock();
1756
1757        if (!is_idle_task(rcu_dereference(rq->curr)))
1758                goto out;
1759
1760        if (set_nr_if_polling(rq->idle)) {
1761                trace_sched_wake_idle_without_ipi(cpu);
1762        } else {
1763                raw_spin_lock_irqsave(&rq->lock, flags);
1764                if (is_idle_task(rq->curr))
1765                        smp_send_reschedule(cpu);
1766                /* Else cpu is not in idle, do nothing here */
1767                raw_spin_unlock_irqrestore(&rq->lock, flags);
1768        }
1769
1770out:
1771        rcu_read_unlock();
1772}
1773
1774bool cpus_share_cache(int this_cpu, int that_cpu)
1775{
1776        return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1777}
1778#endif /* CONFIG_SMP */
1779
1780static void ttwu_queue(struct task_struct *p, int cpu)
1781{
1782        struct rq *rq = cpu_rq(cpu);
1783
1784#if defined(CONFIG_SMP)
1785        if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1786                sched_clock_cpu(cpu); /* sync clocks x-cpu */
1787                ttwu_queue_remote(p, cpu);
1788                return;
1789        }
1790#endif
1791
1792        raw_spin_lock(&rq->lock);
1793        lockdep_pin_lock(&rq->lock);
1794        ttwu_do_activate(rq, p, 0);
1795        lockdep_unpin_lock(&rq->lock);
1796        raw_spin_unlock(&rq->lock);
1797}
1798
1799/*
1800 * Notes on Program-Order guarantees on SMP systems.
1801 *
1802 *  MIGRATION
1803 *
1804 * The basic program-order guarantee on SMP systems is that when a task [t]
1805 * migrates, all its activity on its old cpu [c0] happens-before any subsequent
1806 * execution on its new cpu [c1].
1807 *
1808 * For migration (of runnable tasks) this is provided by the following means:
1809 *
1810 *  A) UNLOCK of the rq(c0)->lock scheduling out task t
1811 *  B) migration for t is required to synchronize *both* rq(c0)->lock and
1812 *     rq(c1)->lock (if not at the same time, then in that order).
1813 *  C) LOCK of the rq(c1)->lock scheduling in task
1814 *
1815 * Transitivity guarantees that B happens after A and C after B.
1816 * Note: we only require RCpc transitivity.
1817 * Note: the cpu doing B need not be c0 or c1
1818 *
1819 * Example:
1820 *
1821 *   CPU0            CPU1            CPU2
1822 *
1823 *   LOCK rq(0)->lock
1824 *   sched-out X
1825 *   sched-in Y
1826 *   UNLOCK rq(0)->lock
1827 *
1828 *                                   LOCK rq(0)->lock // orders against CPU0
1829 *                                   dequeue X
1830 *                                   UNLOCK rq(0)->lock
1831 *
1832 *                                   LOCK rq(1)->lock
1833 *                                   enqueue X
1834 *                                   UNLOCK rq(1)->lock
1835 *
1836 *                   LOCK rq(1)->lock // orders against CPU2
1837 *                   sched-out Z
1838 *                   sched-in X
1839 *                   UNLOCK rq(1)->lock
1840 *
1841 *
1842 *  BLOCKING -- aka. SLEEP + WAKEUP
1843 *
1844 * For blocking we (obviously) need to provide the same guarantee as for
1845 * migration. However the means are completely different as there is no lock
1846 * chain to provide order. Instead we do:
1847 *
1848 *   1) smp_store_release(X->on_cpu, 0)
1849 *   2) smp_cond_acquire(!X->on_cpu)
1850 *
1851 * Example:
1852 *
1853 *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
1854 *
1855 *   LOCK rq(0)->lock LOCK X->pi_lock
1856 *   dequeue X
1857 *   sched-out X
1858 *   smp_store_release(X->on_cpu, 0);
1859 *
1860 *                    smp_cond_acquire(!X->on_cpu);
1861 *                    X->state = WAKING
1862 *                    set_task_cpu(X,2)
1863 *
1864 *                    LOCK rq(2)->lock
1865 *                    enqueue X
1866 *                    X->state = RUNNING
1867 *                    UNLOCK rq(2)->lock
1868 *
1869 *                                          LOCK rq(2)->lock // orders against CPU1
1870 *                                          sched-out Z
1871 *                                          sched-in X
1872 *                                          UNLOCK rq(2)->lock
1873 *
1874 *                    UNLOCK X->pi_lock
1875 *   UNLOCK rq(0)->lock
1876 *
1877 *
1878 * However; for wakeups there is a second guarantee we must provide, namely we
1879 * must observe the state that lead to our wakeup. That is, not only must our
1880 * task observe its own prior state, it must also observe the stores prior to
1881 * its wakeup.
1882 *
1883 * This means that any means of doing remote wakeups must order the CPU doing
1884 * the wakeup against the CPU the task is going to end up running on. This,
1885 * however, is already required for the regular Program-Order guarantee above,
1886 * since the waking CPU is the one issueing the ACQUIRE (smp_cond_acquire).
1887 *
1888 */
1889
1890/**
1891 * try_to_wake_up - wake up a thread
1892 * @p: the thread to be awakened
1893 * @state: the mask of task states that can be woken
1894 * @wake_flags: wake modifier flags (WF_*)
1895 *
1896 * Put it on the run-queue if it's not already there. The "current"
1897 * thread is always on the run-queue (except when the actual
1898 * re-schedule is in progress), and as such you're allowed to do
1899 * the simpler "current->state = TASK_RUNNING" to mark yourself
1900 * runnable without the overhead of this.
1901 *
1902 * Return: %true if @p was woken up, %false if it was already running.
1903 * or @state didn't match @p's state.
1904 */
1905static int
1906try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1907{
1908        unsigned long flags;
1909        int cpu, success = 0;
1910
1911        /*
1912         * If we are going to wake up a thread waiting for CONDITION we
1913         * need to ensure that CONDITION=1 done by the caller can not be
1914         * reordered with p->state check below. This pairs with mb() in
1915         * set_current_state() the waiting thread does.
1916         */
1917        smp_mb__before_spinlock();
1918        raw_spin_lock_irqsave(&p->pi_lock, flags);
1919        if (!(p->state & state))
1920                goto out;
1921
1922        trace_sched_waking(p);
1923
1924        success = 1; /* we're going to change ->state */
1925        cpu = task_cpu(p);
1926
1927        if (p->on_rq && ttwu_remote(p, wake_flags))
1928                goto stat;
1929
1930#ifdef CONFIG_SMP
1931        /*
1932         * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
1933         * possible to, falsely, observe p->on_cpu == 0.
1934         *
1935         * One must be running (->on_cpu == 1) in order to remove oneself
1936         * from the runqueue.
1937         *
1938         *  [S] ->on_cpu = 1;   [L] ->on_rq
1939         *      UNLOCK rq->lock
1940         *                      RMB
1941         *      LOCK   rq->lock
1942         *  [S] ->on_rq = 0;    [L] ->on_cpu
1943         *
1944         * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
1945         * from the consecutive calls to schedule(); the first switching to our
1946         * task, the second putting it to sleep.
1947         */
1948        smp_rmb();
1949
1950        /*
1951         * If the owning (remote) cpu is still in the middle of schedule() with
1952         * this task as prev, wait until its done referencing the task.
1953         *
1954         * Pairs with the smp_store_release() in finish_lock_switch().
1955         *
1956         * This ensures that tasks getting woken will be fully ordered against
1957         * their previous state and preserve Program Order.
1958         */
1959        smp_cond_acquire(!p->on_cpu);
1960
1961        p->sched_contributes_to_load = !!task_contributes_to_load(p);
1962        p->state = TASK_WAKING;
1963
1964        if (p->sched_class->task_waking)
1965                p->sched_class->task_waking(p);
1966
1967        cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
1968        if (task_cpu(p) != cpu) {
1969                wake_flags |= WF_MIGRATED;
1970                set_task_cpu(p, cpu);
1971        }
1972#endif /* CONFIG_SMP */
1973
1974        ttwu_queue(p, cpu);
1975stat:
1976        if (schedstat_enabled())
1977                ttwu_stat(p, cpu, wake_flags);
1978out:
1979        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1980
1981        return success;
1982}
1983
1984/**
1985 * try_to_wake_up_local - try to wake up a local task with rq lock held
1986 * @p: the thread to be awakened
1987 *
1988 * Put @p on the run-queue if it's not already there. The caller must
1989 * ensure that this_rq() is locked, @p is bound to this_rq() and not
1990 * the current task.
1991 */
1992static void try_to_wake_up_local(struct task_struct *p)
1993{
1994        struct rq *rq = task_rq(p);
1995
1996        if (WARN_ON_ONCE(rq != this_rq()) ||
1997            WARN_ON_ONCE(p == current))
1998                return;
1999
2000        lockdep_assert_held(&rq->lock);
2001
2002        if (!raw_spin_trylock(&p->pi_lock)) {
2003                /*
2004                 * This is OK, because current is on_cpu, which avoids it being
2005                 * picked for load-balance and preemption/IRQs are still
2006                 * disabled avoiding further scheduler activity on it and we've
2007                 * not yet picked a replacement task.
2008                 */
2009                lockdep_unpin_lock(&rq->lock);
2010                raw_spin_unlock(&rq->lock);
2011                raw_spin_lock(&p->pi_lock);
2012                raw_spin_lock(&rq->lock);
2013                lockdep_pin_lock(&rq->lock);
2014        }
2015
2016        if (!(p->state & TASK_NORMAL))
2017                goto out;
2018
2019        trace_sched_waking(p);
2020
2021        if (!task_on_rq_queued(p))
2022                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2023
2024        ttwu_do_wakeup(rq, p, 0);
2025        if (schedstat_enabled())
2026                ttwu_stat(p, smp_processor_id(), 0);
2027out:
2028        raw_spin_unlock(&p->pi_lock);
2029}
2030
2031/**
2032 * wake_up_process - Wake up a specific process
2033 * @p: The process to be woken up.
2034 *
2035 * Attempt to wake up the nominated process and move it to the set of runnable
2036 * processes.
2037 *
2038 * Return: 1 if the process was woken up, 0 if it was already running.
2039 *
2040 * It may be assumed that this function implies a write memory barrier before
2041 * changing the task state if and only if any tasks are woken up.
2042 */
2043int wake_up_process(struct task_struct *p)
2044{
2045        return try_to_wake_up(p, TASK_NORMAL, 0);
2046}
2047EXPORT_SYMBOL(wake_up_process);
2048
2049int wake_up_state(struct task_struct *p, unsigned int state)
2050{
2051        return try_to_wake_up(p, state, 0);
2052}
2053
2054/*
2055 * This function clears the sched_dl_entity static params.
2056 */
2057void __dl_clear_params(struct task_struct *p)
2058{
2059        struct sched_dl_entity *dl_se = &p->dl;
2060
2061        dl_se->dl_runtime = 0;
2062        dl_se->dl_deadline = 0;
2063        dl_se->dl_period = 0;
2064        dl_se->flags = 0;
2065        dl_se->dl_bw = 0;
2066
2067        dl_se->dl_throttled = 0;
2068        dl_se->dl_yielded = 0;
2069}
2070
2071/*
2072 * Perform scheduler related setup for a newly forked process p.
2073 * p is forked by current.
2074 *
2075 * __sched_fork() is basic setup used by init_idle() too:
2076 */
2077static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
2078{
2079        p->on_rq                        = 0;
2080
2081        p->se.on_rq                     = 0;
2082        p->se.exec_start                = 0;
2083        p->se.sum_exec_runtime          = 0;
2084        p->se.prev_sum_exec_runtime     = 0;
2085        p->se.nr_migrations             = 0;
2086        p->se.vruntime                  = 0;
2087        INIT_LIST_HEAD(&p->se.group_node);
2088
2089#ifdef CONFIG_FAIR_GROUP_SCHED
2090        p->se.cfs_rq                    = NULL;
2091#endif
2092
2093#ifdef CONFIG_SCHEDSTATS
2094        /* Even if schedstat is disabled, there should not be garbage */
2095        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2096#endif
2097
2098        RB_CLEAR_NODE(&p->dl.rb_node);
2099        init_dl_task_timer(&p->dl);
2100        __dl_clear_params(p);
2101
2102        INIT_LIST_HEAD(&p->rt.run_list);
2103        p->rt.timeout           = 0;
2104        p->rt.time_slice        = sched_rr_timeslice;
2105        p->rt.on_rq             = 0;
2106        p->rt.on_list           = 0;
2107
2108#ifdef CONFIG_PREEMPT_NOTIFIERS
2109        INIT_HLIST_HEAD(&p->preempt_notifiers);
2110#endif
2111
2112#ifdef CONFIG_NUMA_BALANCING
2113        if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
2114                p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2115                p->mm->numa_scan_seq = 0;
2116        }
2117
2118        if (clone_flags & CLONE_VM)
2119                p->numa_preferred_nid = current->numa_preferred_nid;
2120        else
2121                p->numa_preferred_nid = -1;
2122
2123        p->node_stamp = 0ULL;
2124        p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
2125        p->numa_scan_period = sysctl_numa_balancing_scan_delay;
2126        p->numa_work.next = &p->numa_work;
2127        p->numa_faults = NULL;
2128        p->last_task_numa_placement = 0;
2129        p->last_sum_exec_runtime = 0;
2130
2131        p->numa_group = NULL;
2132#endif /* CONFIG_NUMA_BALANCING */
2133}
2134
2135DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2136
2137#ifdef CONFIG_NUMA_BALANCING
2138
2139void set_numabalancing_state(bool enabled)
2140{
2141        if (enabled)
2142                static_branch_enable(&sched_numa_balancing);
2143        else
2144                static_branch_disable(&sched_numa_balancing);
2145}
2146
2147#ifdef CONFIG_PROC_SYSCTL
2148int sysctl_numa_balancing(struct ctl_table *table, int write,
2149                         void __user *buffer, size_t *lenp, loff_t *ppos)
2150{
2151        struct ctl_table t;
2152        int err;
2153        int state = static_branch_likely(&sched_numa_balancing);
2154
2155        if (write && !capable(CAP_SYS_ADMIN))
2156                return -EPERM;
2157
2158        t = *table;
2159        t.data = &state;
2160        err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2161        if (err < 0)
2162                return err;
2163        if (write)
2164                set_numabalancing_state(state);
2165        return err;
2166}
2167#endif
2168#endif
2169
2170DEFINE_STATIC_KEY_FALSE(sched_schedstats);
2171
2172#ifdef CONFIG_SCHEDSTATS
2173static void set_schedstats(bool enabled)
2174{
2175        if (enabled)
2176                static_branch_enable(&sched_schedstats);
2177        else
2178                static_branch_disable(&sched_schedstats);
2179}
2180
2181void force_schedstat_enabled(void)
2182{
2183        if (!schedstat_enabled()) {
2184                pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
2185                static_branch_enable(&sched_schedstats);
2186        }
2187}
2188
2189static int __init setup_schedstats(char *str)
2190{
2191        int ret = 0;
2192        if (!str)
2193                goto out;
2194
2195        if (!strcmp(str, "enable")) {
2196                set_schedstats(true);
2197                ret = 1;
2198        } else if (!strcmp(str, "disable")) {
2199                set_schedstats(false);
2200                ret = 1;
2201        }
2202out:
2203        if (!ret)
2204                pr_warn("Unable to parse schedstats=\n");
2205
2206        return ret;
2207}
2208__setup("schedstats=", setup_schedstats);
2209
2210#ifdef CONFIG_PROC_SYSCTL
2211int sysctl_schedstats(struct ctl_table *table, int write,
2212                         void __user *buffer, size_t *lenp, loff_t *ppos)
2213{
2214        struct ctl_table t;
2215        int err;
2216        int state = static_branch_likely(&sched_schedstats);
2217
2218        if (write && !capable(CAP_SYS_ADMIN))
2219                return -EPERM;
2220
2221        t = *table;
2222        t.data = &state;
2223        err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2224        if (err < 0)
2225                return err;
2226        if (write)
2227                set_schedstats(state);
2228        return err;
2229}
2230#endif
2231#endif
2232
2233/*
2234 * fork()/clone()-time setup:
2235 */
2236int sched_fork(unsigned long clone_flags, struct task_struct *p)
2237{
2238        unsigned long flags;
2239        int cpu = get_cpu();
2240
2241        __sched_fork(clone_flags, p);
2242        /*
2243         * We mark the process as running here. This guarantees that
2244         * nobody will actually run it, and a signal or other external
2245         * event cannot wake it up and insert it on the runqueue either.
2246         */
2247        p->state = TASK_RUNNING;
2248
2249        /*
2250         * Make sure we do not leak PI boosting priority to the child.
2251         */
2252        p->prio = current->normal_prio;
2253
2254        /*
2255         * Revert to default priority/policy on fork if requested.
2256         */
2257        if (unlikely(p->sched_reset_on_fork)) {
2258                if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
2259                        p->policy = SCHED_NORMAL;
2260                        p->static_prio = NICE_TO_PRIO(0);
2261                        p->rt_priority = 0;
2262                } else if (PRIO_TO_NICE(p->static_prio) < 0)
2263                        p->static_prio = NICE_TO_PRIO(0);
2264
2265                p->prio = p->normal_prio = __normal_prio(p);
2266                set_load_weight(p);
2267
2268                /*
2269                 * We don't need the reset flag anymore after the fork. It has
2270                 * fulfilled its duty:
2271                 */
2272                p->sched_reset_on_fork = 0;
2273        }
2274
2275        if (dl_prio(p->prio)) {
2276                put_cpu();
2277                return -EAGAIN;
2278        } else if (rt_prio(p->prio)) {
2279                p->sched_class = &rt_sched_class;
2280        } else {
2281                p->sched_class = &fair_sched_class;
2282        }
2283
2284        if (p->sched_class->task_fork)
2285                p->sched_class->task_fork(p);
2286
2287        /*
2288         * The child is not yet in the pid-hash so no cgroup attach races,
2289         * and the cgroup is pinned to this child due to cgroup_fork()
2290         * is ran before sched_fork().
2291         *
2292         * Silence PROVE_RCU.
2293         */
2294        raw_spin_lock_irqsave(&p->pi_lock, flags);
2295        set_task_cpu(p, cpu);
2296        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2297
2298#ifdef CONFIG_SCHED_INFO
2299        if (likely(sched_info_on()))
2300                memset(&p->sched_info, 0, sizeof(p->sched_info));
2301#endif
2302#if defined(CONFIG_SMP)
2303        p->on_cpu = 0;
2304#endif
2305        init_task_preempt_count(p);
2306#ifdef CONFIG_SMP
2307        plist_node_init(&p->pushable_tasks, MAX_PRIO);
2308        RB_CLEAR_NODE(&p->pushable_dl_tasks);
2309#endif
2310
2311        put_cpu();
2312        return 0;
2313}
2314
2315unsigned long to_ratio(u64 period, u64 runtime)
2316{
2317        if (runtime == RUNTIME_INF)
2318                return 1ULL << 20;
2319
2320        /*
2321         * Doing this here saves a lot of checks in all
2322         * the calling paths, and returning zero seems
2323         * safe for them anyway.
2324         */
2325        if (period == 0)
2326                return 0;
2327
2328        return div64_u64(runtime << 20, period);
2329}
2330
2331#ifdef CONFIG_SMP
2332inline struct dl_bw *dl_bw_of(int i)
2333{
2334        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2335                         "sched RCU must be held");
2336        return &cpu_rq(i)->rd->dl_bw;
2337}
2338
2339static inline int dl_bw_cpus(int i)
2340{
2341        struct root_domain *rd = cpu_rq(i)->rd;
2342        int cpus = 0;
2343
2344        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2345                         "sched RCU must be held");
2346        for_each_cpu_and(i, rd->span, cpu_active_mask)
2347                cpus++;
2348
2349        return cpus;
2350}
2351#else
2352inline struct dl_bw *dl_bw_of(int i)
2353{
2354        return &cpu_rq(i)->dl.dl_bw;
2355}
2356
2357static inline int dl_bw_cpus(int i)
2358{
2359        return 1;
2360}
2361#endif
2362
2363/*
2364 * We must be sure that accepting a new task (or allowing changing the
2365 * parameters of an existing one) is consistent with the bandwidth
2366 * constraints. If yes, this function also accordingly updates the currently
2367 * allocated bandwidth to reflect the new situation.
2368 *
2369 * This function is called while holding p's rq->lock.
2370 *
2371 * XXX we should delay bw change until the task's 0-lag point, see
2372 * __setparam_dl().
2373 */
2374static int dl_overflow(struct task_struct *p, int policy,
2375                       const struct sched_attr *attr)
2376{
2377
2378        struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2379        u64 period = attr->sched_period ?: attr->sched_deadline;
2380        u64 runtime = attr->sched_runtime;
2381        u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2382        int cpus, err = -1;
2383
2384        if (new_bw == p->dl.dl_bw)
2385                return 0;
2386
2387        /*
2388         * Either if a task, enters, leave, or stays -deadline but changes
2389         * its parameters, we may need to update accordingly the total
2390         * allocated bandwidth of the container.
2391         */
2392        raw_spin_lock(&dl_b->lock);
2393        cpus = dl_bw_cpus(task_cpu(p));
2394        if (dl_policy(policy) && !task_has_dl_policy(p) &&
2395            !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2396                __dl_add(dl_b, new_bw);
2397                err = 0;
2398        } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2399                   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2400                __dl_clear(dl_b, p->dl.dl_bw);
2401                __dl_add(dl_b, new_bw);
2402                err = 0;
2403        } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2404                __dl_clear(dl_b, p->dl.dl_bw);
2405                err = 0;
2406        }
2407        raw_spin_unlock(&dl_b->lock);
2408
2409        return err;
2410}
2411
2412extern void init_dl_bw(struct dl_bw *dl_b);
2413
2414/*
2415 * wake_up_new_task - wake up a newly created task for the first time.
2416 *
2417 * This function will do some initial scheduler statistics housekeeping
2418 * that must be done for every newly created context, then puts the task
2419 * on the runqueue and wakes it.
2420 */
2421void wake_up_new_task(struct task_struct *p)
2422{
2423        unsigned long flags;
2424        struct rq *rq;
2425
2426        raw_spin_lock_irqsave(&p->pi_lock, flags);
2427        /* Initialize new task's runnable average */
2428        init_entity_runnable_average(&p->se);
2429#ifdef CONFIG_SMP
2430        /*
2431         * Fork balancing, do it here and not earlier because:
2432         *  - cpus_allowed can change in the fork path
2433         *  - any previously selected cpu might disappear through hotplug
2434         */
2435        set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2436#endif
2437
2438        rq = __task_rq_lock(p);
2439        activate_task(rq, p, 0);
2440        p->on_rq = TASK_ON_RQ_QUEUED;
2441        trace_sched_wakeup_new(p);
2442        check_preempt_curr(rq, p, WF_FORK);
2443#ifdef CONFIG_SMP
2444        if (p->sched_class->task_woken) {
2445                /*
2446                 * Nothing relies on rq->lock after this, so its fine to
2447                 * drop it.
2448                 */
2449                lockdep_unpin_lock(&rq->lock);
2450                p->sched_class->task_woken(rq, p);
2451                lockdep_pin_lock(&rq->lock);
2452        }
2453#endif
2454        task_rq_unlock(rq, p, &flags);
2455}
2456
2457#ifdef CONFIG_PREEMPT_NOTIFIERS
2458
2459static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
2460
2461void preempt_notifier_inc(void)
2462{
2463        static_key_slow_inc(&preempt_notifier_key);
2464}
2465EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2466
2467void preempt_notifier_dec(void)
2468{
2469        static_key_slow_dec(&preempt_notifier_key);
2470}
2471EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2472
2473/**
2474 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2475 * @notifier: notifier struct to register
2476 */
2477void preempt_notifier_register(struct preempt_notifier *notifier)
2478{
2479        if (!static_key_false(&preempt_notifier_key))
2480                WARN(1, "registering preempt_notifier while notifiers disabled\n");
2481
2482        hlist_add_head(&notifier->link, &current->preempt_notifiers);
2483}
2484EXPORT_SYMBOL_GPL(preempt_notifier_register);
2485
2486/**
2487 * preempt_notifier_unregister - no longer interested in preemption notifications
2488 * @notifier: notifier struct to unregister
2489 *
2490 * This is *not* safe to call from within a preemption notifier.
2491 */
2492void preempt_notifier_unregister(struct preempt_notifier *notifier)
2493{
2494        hlist_del(&notifier->link);
2495}
2496EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2497
2498static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
2499{
2500        struct preempt_notifier *notifier;
2501
2502        hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2503                notifier->ops->sched_in(notifier, raw_smp_processor_id());
2504}
2505
2506static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2507{
2508        if (static_key_false(&preempt_notifier_key))
2509                __fire_sched_in_preempt_notifiers(curr);
2510}
2511
2512static void
2513__fire_sched_out_preempt_notifiers(struct task_struct *curr,
2514                                   struct task_struct *next)
2515{
2516        struct preempt_notifier *notifier;
2517
2518        hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2519                notifier->ops->sched_out(notifier, next);
2520}
2521
2522static __always_inline void
2523fire_sched_out_preempt_notifiers(struct task_struct *curr,
2524                                 struct task_struct *next)
2525{
2526        if (static_key_false(&preempt_notifier_key))
2527                __fire_sched_out_preempt_notifiers(curr, next);
2528}
2529
2530#else /* !CONFIG_PREEMPT_NOTIFIERS */
2531
2532static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2533{
2534}
2535
2536static inline void
2537fire_sched_out_preempt_notifiers(struct task_struct *curr,
2538                                 struct task_struct *next)
2539{
2540}
2541
2542#endif /* CONFIG_PREEMPT_NOTIFIERS */
2543
2544/**
2545 * prepare_task_switch - prepare to switch tasks
2546 * @rq: the runqueue preparing to switch
2547 * @prev: the current task that is being switched out
2548 * @next: the task we are going to switch to.
2549 *
2550 * This is called with the rq lock held and interrupts off. It must
2551 * be paired with a subsequent finish_task_switch after the context
2552 * switch.
2553 *
2554 * prepare_task_switch sets up locking and calls architecture specific
2555 * hooks.
2556 */
2557static inline void
2558prepare_task_switch(struct rq *rq, struct task_struct *prev,
2559                    struct task_struct *next)
2560{
2561        sched_info_switch(rq, prev, next);
2562        perf_event_task_sched_out(prev, next);
2563        fire_sched_out_preempt_notifiers(prev, next);
2564        prepare_lock_switch(rq, next);
2565        prepare_arch_switch(next);
2566}
2567
2568/**
2569 * finish_task_switch - clean up after a task-switch
2570 * @prev: the thread we just switched away from.
2571 *
2572 * finish_task_switch must be called after the context switch, paired
2573 * with a prepare_task_switch call before the context switch.
2574 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2575 * and do any other architecture-specific cleanup actions.
2576 *
2577 * Note that we may have delayed dropping an mm in context_switch(). If
2578 * so, we finish that here outside of the runqueue lock. (Doing it
2579 * with the lock held can cause deadlocks; see schedule() for
2580 * details.)
2581 *
2582 * The context switch have flipped the stack from under us and restored the
2583 * local variables which were saved when this task called schedule() in the
2584 * past. prev == current is still correct but we need to recalculate this_rq
2585 * because prev may have moved to another CPU.
2586 */
2587static struct rq *finish_task_switch(struct task_struct *prev)
2588        __releases(rq->lock)
2589{
2590        struct rq *rq = this_rq();
2591        struct mm_struct *mm = rq->prev_mm;
2592        long prev_state;
2593
2594        /*
2595         * The previous task will have left us with a preempt_count of 2
2596         * because it left us after:
2597         *
2598         *      schedule()
2599         *        preempt_disable();                    // 1
2600         *        __schedule()
2601         *          raw_spin_lock_irq(&rq->lock)        // 2
2602         *
2603         * Also, see FORK_PREEMPT_COUNT.
2604         */
2605        if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
2606                      "corrupted preempt_count: %s/%d/0x%x\n",
2607                      current->comm, current->pid, preempt_count()))
2608                preempt_count_set(FORK_PREEMPT_COUNT);
2609
2610        rq->prev_mm = NULL;
2611
2612        /*
2613         * A task struct has one reference for the use as "current".
2614         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2615         * schedule one last time. The schedule call will never return, and
2616         * the scheduled task must drop that reference.
2617         *
2618         * We must observe prev->state before clearing prev->on_cpu (in
2619         * finish_lock_switch), otherwise a concurrent wakeup can get prev
2620         * running on another CPU and we could rave with its RUNNING -> DEAD
2621         * transition, resulting in a double drop.
2622         */
2623        prev_state = prev->state;
2624        vtime_task_switch(prev);
2625        perf_event_task_sched_in(prev, current);
2626        finish_lock_switch(rq, prev);
2627        finish_arch_post_lock_switch();
2628
2629        fire_sched_in_preempt_notifiers(current);
2630        if (mm)
2631                mmdrop(mm);
2632        if (unlikely(prev_state == TASK_DEAD)) {
2633                if (prev->sched_class->task_dead)
2634                        prev->sched_class->task_dead(prev);
2635
2636                /*
2637                 * Remove function-return probe instances associated with this
2638                 * task and put them back on the free list.
2639                 */
2640                kprobe_flush_task(prev);
2641                put_task_struct(prev);
2642        }
2643
2644        tick_nohz_task_switch();
2645        return rq;
2646}
2647
2648#ifdef CONFIG_SMP
2649
2650/* rq->lock is NOT held, but preemption is disabled */
2651static void __balance_callback(struct rq *rq)
2652{
2653        struct callback_head *head, *next;
2654        void (*func)(struct rq *rq);
2655        unsigned long flags;
2656
2657        raw_spin_lock_irqsave(&rq->lock, flags);
2658        head = rq->balance_callback;
2659        rq->balance_callback = NULL;
2660        while (head) {
2661                func = (void (*)(struct rq *))head->func;
2662                next = head->next;
2663                head->next = NULL;
2664                head = next;
2665
2666                func(rq);
2667        }
2668        raw_spin_unlock_irqrestore(&rq->lock, flags);
2669}
2670
2671static inline void balance_callback(struct rq *rq)
2672{
2673        if (unlikely(rq->balance_callback))
2674                __balance_callback(rq);
2675}
2676
2677#else
2678
2679static inline void balance_callback(struct rq *rq)
2680{
2681}
2682
2683#endif
2684
2685/**
2686 * schedule_tail - first thing a freshly forked thread must call.
2687 * @prev: the thread we just switched away from.
2688 */
2689asmlinkage __visible void schedule_tail(struct task_struct *prev)
2690        __releases(rq->lock)
2691{
2692        struct rq *rq;
2693
2694        /*
2695         * New tasks start with FORK_PREEMPT_COUNT, see there and
2696         * finish_task_switch() for details.
2697         *
2698         * finish_task_switch() will drop rq->lock() and lower preempt_count
2699         * and the preempt_enable() will end up enabling preemption (on
2700         * PREEMPT_COUNT kernels).
2701         */
2702
2703        rq = finish_task_switch(prev);
2704        balance_callback(rq);
2705        preempt_enable();
2706
2707        if (current->set_child_tid)
2708                put_user(task_pid_vnr(current), current->set_child_tid);
2709}
2710
2711/*
2712 * context_switch - switch to the new MM and the new thread's register state.
2713 */
2714static __always_inline struct rq *
2715context_switch(struct rq *rq, struct task_struct *prev,
2716               struct task_struct *next)
2717{
2718        struct mm_struct *mm, *oldmm;
2719
2720        prepare_task_switch(rq, prev, next);
2721
2722        mm = next->mm;
2723        oldmm = prev->active_mm;
2724        /*
2725         * For paravirt, this is coupled with an exit in switch_to to
2726         * combine the page table reload and the switch backend into
2727         * one hypercall.
2728         */
2729        arch_start_context_switch(prev);
2730
2731        if (!mm) {
2732                next->active_mm = oldmm;
2733                atomic_inc(&oldmm->mm_count);
2734                enter_lazy_tlb(oldmm, next);
2735        } else
2736                switch_mm(oldmm, mm, next);
2737
2738        if (!prev->mm) {
2739                prev->active_mm = NULL;
2740                rq->prev_mm = oldmm;
2741        }
2742        /*
2743         * Since the runqueue lock will be released by the next
2744         * task (which is an invalid locking op but in the case
2745         * of the scheduler it's an obvious special-case), so we
2746         * do an early lockdep release here:
2747         */
2748        lockdep_unpin_lock(&rq->lock);
2749        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2750
2751        /* Here we just switch the register state and the stack. */
2752        switch_to(prev, next, prev);
2753        barrier();
2754
2755        return finish_task_switch(prev);
2756}
2757
2758/*
2759 * nr_running and nr_context_switches:
2760 *
2761 * externally visible scheduler statistics: current number of runnable
2762 * threads, total number of context switches performed since bootup.
2763 */
2764unsigned long nr_running(void)
2765{
2766        unsigned long i, sum = 0;
2767
2768        for_each_online_cpu(i)
2769                sum += cpu_rq(i)->nr_running;
2770
2771        return sum;
2772}
2773
2774/*
2775 * Check if only the current task is running on the cpu.
2776 *
2777 * Caution: this function does not check that the caller has disabled
2778 * preemption, thus the result might have a time-of-check-to-time-of-use
2779 * race.  The caller is responsible to use it correctly, for example:
2780 *
2781 * - from a non-preemptable section (of course)
2782 *
2783 * - from a thread that is bound to a single CPU
2784 *
2785 * - in a loop with very short iterations (e.g. a polling loop)
2786 */
2787bool single_task_running(void)
2788{
2789        return raw_rq()->nr_running == 1;
2790}
2791EXPORT_SYMBOL(single_task_running);
2792
2793unsigned long long nr_context_switches(void)
2794{
2795        int i;
2796        unsigned long long sum = 0;
2797
2798        for_each_possible_cpu(i)
2799                sum += cpu_rq(i)->nr_switches;
2800
2801        return sum;
2802}
2803
2804unsigned long nr_iowait(void)
2805{
2806        unsigned long i, sum = 0;
2807
2808        for_each_possible_cpu(i)
2809                sum += atomic_read(&cpu_rq(i)->nr_iowait);
2810
2811        return sum;
2812}
2813
2814unsigned long nr_iowait_cpu(int cpu)
2815{
2816        struct rq *this = cpu_rq(cpu);
2817        return atomic_read(&this->nr_iowait);
2818}
2819
2820void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2821{
2822        struct rq *rq = this_rq();
2823        *nr_waiters = atomic_read(&rq->nr_iowait);
2824        *load = rq->load.weight;
2825}
2826
2827#ifdef CONFIG_SMP
2828
2829/*
2830 * sched_exec - execve() is a valuable balancing opportunity, because at
2831 * this point the task has the smallest effective memory and cache footprint.
2832 */
2833void sched_exec(void)
2834{
2835        struct task_struct *p = current;
2836        unsigned long flags;
2837        int dest_cpu;
2838
2839        raw_spin_lock_irqsave(&p->pi_lock, flags);
2840        dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
2841        if (dest_cpu == smp_processor_id())
2842                goto unlock;
2843
2844        if (likely(cpu_active(dest_cpu))) {
2845                struct migration_arg arg = { p, dest_cpu };
2846
2847                raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2848                stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2849                return;
2850        }
2851unlock:
2852        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2853}
2854
2855#endif
2856
2857DEFINE_PER_CPU(struct kernel_stat, kstat);
2858DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
2859
2860EXPORT_PER_CPU_SYMBOL(kstat);
2861EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2862
2863/*
2864 * Return accounted runtime for the task.
2865 * In case the task is currently running, return the runtime plus current's
2866 * pending runtime that have not been accounted yet.
2867 */
2868unsigned long long task_sched_runtime(struct task_struct *p)
2869{
2870        unsigned long flags;
2871        struct rq *rq;
2872        u64 ns;
2873
2874#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2875        /*
2876         * 64-bit doesn't need locks to atomically read a 64bit value.
2877         * So we have a optimization chance when the task's delta_exec is 0.
2878         * Reading ->on_cpu is racy, but this is ok.
2879         *
2880         * If we race with it leaving cpu, we'll take a lock. So we're correct.
2881         * If we race with it entering cpu, unaccounted time is 0. This is
2882         * indistinguishable from the read occurring a few cycles earlier.
2883         * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2884         * been accounted, so we're correct here as well.
2885         */
2886        if (!p->on_cpu || !task_on_rq_queued(p))
2887                return p->se.sum_exec_runtime;
2888#endif
2889
2890        rq = task_rq_lock(p, &flags);
2891        /*
2892         * Must be ->curr _and_ ->on_rq.  If dequeued, we would
2893         * project cycles that may never be accounted to this
2894         * thread, breaking clock_gettime().
2895         */
2896        if (task_current(rq, p) && task_on_rq_queued(p)) {
2897                update_rq_clock(rq);
2898                p->sched_class->update_curr(rq);
2899        }
2900        ns = p->se.sum_exec_runtime;
2901        task_rq_unlock(rq, p, &flags);
2902
2903        return ns;
2904}
2905
2906/*
2907 * This function gets called by the timer code, with HZ frequency.
2908 * We call it with interrupts disabled.
2909 */
2910void scheduler_tick(void)
2911{
2912        int cpu = smp_processor_id();
2913        struct rq *rq = cpu_rq(cpu);
2914        struct task_struct *curr = rq->curr;
2915
2916        sched_clock_tick();
2917
2918        raw_spin_lock(&rq->lock);
2919        update_rq_clock(rq);
2920        curr->sched_class->task_tick(rq, curr, 0);
2921        update_cpu_load_active(rq);
2922        calc_global_load_tick(rq);
2923        raw_spin_unlock(&rq->lock);
2924
2925        perf_event_task_tick();
2926
2927#ifdef CONFIG_SMP
2928        rq->idle_balance = idle_cpu(cpu);
2929        trigger_load_balance(rq);
2930#endif
2931        rq_last_tick_reset(rq);
2932}
2933
2934#ifdef CONFIG_NO_HZ_FULL
2935/**
2936 * scheduler_tick_max_deferment
2937 *
2938 * Keep at least one tick per second when a single
2939 * active task is running because the scheduler doesn't
2940 * yet completely support full dynticks environment.
2941 *
2942 * This makes sure that uptime, CFS vruntime, load
2943 * balancing, etc... continue to move forward, even
2944 * with a very low granularity.
2945 *
2946 * Return: Maximum deferment in nanoseconds.
2947 */
2948u64 scheduler_tick_max_deferment(void)
2949{
2950        struct rq *rq = this_rq();
2951        unsigned long next, now = READ_ONCE(jiffies);
2952
2953        next = rq->last_sched_tick + HZ;
2954
2955        if (time_before_eq(next, now))
2956                return 0;
2957
2958        return jiffies_to_nsecs(next - now);
2959}
2960#endif
2961
2962#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2963                                defined(CONFIG_PREEMPT_TRACER))
2964
2965void preempt_count_add(int val)
2966{
2967#ifdef CONFIG_DEBUG_PREEMPT
2968        /*
2969         * Underflow?
2970         */
2971        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2972                return;
2973#endif
2974        __preempt_count_add(val);
2975#ifdef CONFIG_DEBUG_PREEMPT
2976        /*
2977         * Spinlock count overflowing soon?
2978         */
2979        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2980                                PREEMPT_MASK - 10);
2981#endif
2982        if (preempt_count() == val) {
2983                unsigned long ip = get_lock_parent_ip();
2984#ifdef CONFIG_DEBUG_PREEMPT
2985                current->preempt_disable_ip = ip;
2986#endif
2987                trace_preempt_off(CALLER_ADDR0, ip);
2988        }
2989}
2990EXPORT_SYMBOL(preempt_count_add);
2991NOKPROBE_SYMBOL(preempt_count_add);
2992
2993void preempt_count_sub(int val)
2994{
2995#ifdef CONFIG_DEBUG_PREEMPT
2996        /*
2997         * Underflow?
2998         */
2999        if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3000                return;
3001        /*
3002         * Is the spinlock portion underflowing?
3003         */
3004        if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3005                        !(preempt_count() & PREEMPT_MASK)))
3006                return;
3007#endif
3008
3009        if (preempt_count() == val)
3010                trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
3011        __preempt_count_sub(val);
3012}
3013EXPORT_SYMBOL(preempt_count_sub);
3014NOKPROBE_SYMBOL(preempt_count_sub);
3015
3016#endif
3017
3018/*
3019 * Print scheduling while atomic bug:
3020 */
3021static noinline void __schedule_bug(struct task_struct *prev)
3022{
3023        if (oops_in_progress)
3024                return;
3025
3026        printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3027                prev->comm, prev->pid, preempt_count());
3028
3029        debug_show_held_locks(prev);
3030        print_modules();
3031        if (irqs_disabled())
3032                print_irqtrace_events(prev);
3033#ifdef CONFIG_DEBUG_PREEMPT
3034        if (in_atomic_preempt_off()) {
3035                pr_err("Preemption disabled at:");
3036                print_ip_sym(current->preempt_disable_ip);
3037                pr_cont("\n");
3038        }
3039#endif
3040        dump_stack();
3041        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
3042}
3043
3044/*
3045 * Various schedule()-time debugging checks and statistics:
3046 */
3047static inline void schedule_debug(struct task_struct *prev)
3048{
3049#ifdef CONFIG_SCHED_STACK_END_CHECK
3050        BUG_ON(task_stack_end_corrupted(prev));
3051#endif
3052
3053        if (unlikely(in_atomic_preempt_off())) {
3054                __schedule_bug(prev);
3055                preempt_count_set(PREEMPT_DISABLED);
3056        }
3057        rcu_sleep_check();
3058
3059        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3060
3061        schedstat_inc(this_rq(), sched_count);
3062}
3063
3064/*
3065 * Pick up the highest-prio task:
3066 */
3067static inline struct task_struct *
3068pick_next_task(struct rq *rq, struct task_struct *prev)
3069{
3070        const struct sched_class *class = &fair_sched_class;
3071        struct task_struct *p;
3072
3073        /*
3074         * Optimization: we know that if all tasks are in
3075         * the fair class we can call that function directly:
3076         */
3077        if (likely(prev->sched_class == class &&
3078                   rq->nr_running == rq->cfs.h_nr_running)) {
3079                p = fair_sched_class.pick_next_task(rq, prev);
3080                if (unlikely(p == RETRY_TASK))
3081                        goto again;
3082
3083                /* assumes fair_sched_class->next == idle_sched_class */
3084                if (unlikely(!p))
3085                        p = idle_sched_class.pick_next_task(rq, prev);
3086
3087                return p;
3088        }
3089
3090again:
3091        for_each_class(class) {
3092                p = class->pick_next_task(rq, prev);
3093                if (p) {
3094                        if (unlikely(p == RETRY_TASK))
3095                                goto again;
3096                        return p;
3097                }
3098        }
3099
3100        BUG(); /* the idle class will always have a runnable task */
3101}
3102
3103/*
3104 * __schedule() is the main scheduler function.
3105 *
3106 * The main means of driving the scheduler and thus entering this function are:
3107 *
3108 *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3109 *
3110 *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3111 *      paths. For example, see arch/x86/entry_64.S.
3112 *
3113 *      To drive preemption between tasks, the scheduler sets the flag in timer
3114 *      interrupt handler scheduler_tick().
3115 *
3116 *   3. Wakeups don't really cause entry into schedule(). They add a
3117 *      task to the run-queue and that's it.
3118 *
3119 *      Now, if the new task added to the run-queue preempts the current
3120 *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3121 *      called on the nearest possible occasion:
3122 *
3123 *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
3124 *
3125 *         - in syscall or exception context, at the next outmost
3126 *           preempt_enable(). (this might be as soon as the wake_up()'s
3127 *           spin_unlock()!)
3128 *
3129 *         - in IRQ context, return from interrupt-handler to
3130 *           preemptible context
3131 *
3132 *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3133 *         then at the next:
3134 *
3135 *          - cond_resched() call
3136 *          - explicit schedule() call
3137 *          - return from syscall or exception to user-space
3138 *          - return from interrupt-handler to user-space
3139 *
3140 * WARNING: must be called with preemption disabled!
3141 */
3142static void __sched notrace __schedule(bool preempt)
3143{
3144        struct task_struct *prev, *next;
3145        unsigned long *switch_count;
3146        struct rq *rq;
3147        int cpu;
3148
3149        cpu = smp_processor_id();
3150        rq = cpu_rq(cpu);
3151        prev = rq->curr;
3152
3153        /*
3154         * do_exit() calls schedule() with preemption disabled as an exception;
3155         * however we must fix that up, otherwise the next task will see an
3156         * inconsistent (higher) preempt count.
3157         *
3158         * It also avoids the below schedule_debug() test from complaining
3159         * about this.
3160         */
3161        if (unlikely(prev->state == TASK_DEAD))
3162                preempt_enable_no_resched_notrace();
3163
3164        schedule_debug(prev);
3165
3166        if (sched_feat(HRTICK))
3167                hrtick_clear(rq);
3168
3169        local_irq_disable();
3170        rcu_note_context_switch();
3171
3172        /*
3173         * Make sure that signal_pending_state()->signal_pending() below
3174         * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3175         * done by the caller to avoid the race with signal_wake_up().
3176         */
3177        smp_mb__before_spinlock();
3178        raw_spin_lock(&rq->lock);
3179        lockdep_pin_lock(&rq->lock);
3180
3181        rq->clock_skip_update <<= 1; /* promote REQ to ACT */
3182
3183        switch_count = &prev->nivcsw;
3184        if (!preempt && prev->state) {
3185                if (unlikely(signal_pending_state(prev->state, prev))) {
3186                        prev->state = TASK_RUNNING;
3187                } else {
3188                        deactivate_task(rq, prev, DEQUEUE_SLEEP);
3189                        prev->on_rq = 0;
3190
3191                        /*
3192                         * If a worker went to sleep, notify and ask workqueue
3193                         * whether it wants to wake up a task to maintain
3194                         * concurrency.
3195                         */
3196                        if (prev->flags & PF_WQ_WORKER) {
3197                                struct task_struct *to_wakeup;
3198
3199                                to_wakeup = wq_worker_sleeping(prev);
3200                                if (to_wakeup)
3201                                        try_to_wake_up_local(to_wakeup);
3202                        }
3203                }
3204                switch_count = &prev->nvcsw;
3205        }
3206
3207        if (task_on_rq_queued(prev))
3208                update_rq_clock(rq);
3209
3210        next = pick_next_task(rq, prev);
3211        clear_tsk_need_resched(prev);
3212        clear_preempt_need_resched();
3213        rq->clock_skip_update = 0;
3214
3215        if (likely(prev != next)) {
3216                rq->nr_switches++;
3217                rq->curr = next;
3218                ++*switch_count;
3219
3220                trace_sched_switch(preempt, prev, next);
3221                rq = context_switch(rq, prev, next); /* unlocks the rq */
3222        } else {
3223                lockdep_unpin_lock(&rq->lock);
3224                raw_spin_unlock_irq(&rq->lock);
3225        }
3226
3227        balance_callback(rq);
3228}
3229STACK_FRAME_NON_STANDARD(__schedule); /* switch_to() */
3230
3231static inline void sched_submit_work(struct task_struct *tsk)
3232{
3233        if (!tsk->state || tsk_is_pi_blocked(tsk))
3234                return;
3235        /*
3236         * If we are going to sleep and we have plugged IO queued,
3237         * make sure to submit it to avoid deadlocks.
3238         */
3239        if (blk_needs_flush_plug(tsk))
3240                blk_schedule_flush_plug(tsk);
3241}
3242
3243asmlinkage __visible void __sched schedule(void)
3244{
3245        struct task_struct *tsk = current;
3246
3247        sched_submit_work(tsk);
3248        do {
3249                preempt_disable();
3250                __schedule(false);
3251                sched_preempt_enable_no_resched();
3252        } while (need_resched());
3253}
3254EXPORT_SYMBOL(schedule);
3255
3256#ifdef CONFIG_CONTEXT_TRACKING
3257asmlinkage __visible void __sched schedule_user(void)
3258{
3259        /*
3260         * If we come here after a random call to set_need_resched(),
3261         * or we have been woken up remotely but the IPI has not yet arrived,
3262         * we haven't yet exited the RCU idle mode. Do it here manually until
3263         * we find a better solution.
3264         *
3265         * NB: There are buggy callers of this function.  Ideally we
3266         * should warn if prev_state != CONTEXT_USER, but that will trigger
3267         * too frequently to make sense yet.
3268         */
3269        enum ctx_state prev_state = exception_enter();
3270        schedule();
3271        exception_exit(prev_state);
3272}
3273#endif
3274
3275/**
3276 * schedule_preempt_disabled - called with preemption disabled
3277 *
3278 * Returns with preemption disabled. Note: preempt_count must be 1
3279 */
3280void __sched schedule_preempt_disabled(void)
3281{
3282        sched_preempt_enable_no_resched();
3283        schedule();
3284        preempt_disable();
3285}
3286
3287static void __sched notrace preempt_schedule_common(void)
3288{
3289        do {
3290                preempt_disable_notrace();
3291                __schedule(true);
3292                preempt_enable_no_resched_notrace();
3293
3294                /*
3295                 * Check again in case we missed a preemption opportunity
3296                 * between schedule and now.
3297                 */
3298        } while (need_resched());
3299}
3300
3301#ifdef CONFIG_PREEMPT
3302/*
3303 * this is the entry point to schedule() from in-kernel preemption
3304 * off of preempt_enable. Kernel preemptions off return from interrupt
3305 * occur there and call schedule directly.
3306 */
3307asmlinkage __visible void __sched notrace preempt_schedule(void)
3308{
3309        /*
3310         * If there is a non-zero preempt_count or interrupts are disabled,
3311         * we do not want to preempt the current task. Just return..
3312         */
3313        if (likely(!preemptible()))
3314                return;
3315
3316        preempt_schedule_common();
3317}
3318NOKPROBE_SYMBOL(preempt_schedule);
3319EXPORT_SYMBOL(preempt_schedule);
3320
3321/**
3322 * preempt_schedule_notrace - preempt_schedule called by tracing
3323 *
3324 * The tracing infrastructure uses preempt_enable_notrace to prevent
3325 * recursion and tracing preempt enabling caused by the tracing
3326 * infrastructure itself. But as tracing can happen in areas coming
3327 * from userspace or just about to enter userspace, a preempt enable
3328 * can occur before user_exit() is called. This will cause the scheduler
3329 * to be called when the system is still in usermode.
3330 *
3331 * To prevent this, the preempt_enable_notrace will use this function
3332 * instead of preempt_schedule() to exit user context if needed before
3333 * calling the scheduler.
3334 */
3335asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
3336{
3337        enum ctx_state prev_ctx;
3338
3339        if (likely(!preemptible()))
3340                return;
3341
3342        do {
3343                preempt_disable_notrace();
3344                /*
3345                 * Needs preempt disabled in case user_exit() is traced
3346                 * and the tracer calls preempt_enable_notrace() causing
3347                 * an infinite recursion.
3348                 */
3349                prev_ctx = exception_enter();
3350                __schedule(true);
3351                exception_exit(prev_ctx);
3352
3353                preempt_enable_no_resched_notrace();
3354        } while (need_resched());
3355}
3356EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
3357
3358#endif /* CONFIG_PREEMPT */
3359
3360/*
3361 * this is the entry point to schedule() from kernel preemption
3362 * off of irq context.
3363 * Note, that this is called and return with irqs disabled. This will
3364 * protect us against recursive calling from irq.
3365 */
3366asmlinkage __visible void __sched preempt_schedule_irq(void)
3367{
3368        enum ctx_state prev_state;
3369
3370        /* Catch callers which need to be fixed */
3371        BUG_ON(preempt_count() || !irqs_disabled());
3372
3373        prev_state = exception_enter();
3374
3375        do {
3376                preempt_disable();
3377                local_irq_enable();
3378                __schedule(true);
3379                local_irq_disable();
3380                sched_preempt_enable_no_resched();
3381        } while (need_resched());
3382
3383        exception_exit(prev_state);
3384}
3385
3386int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3387                          void *key)
3388{
3389        return try_to_wake_up(curr->private, mode, wake_flags);
3390}
3391EXPORT_SYMBOL(default_wake_function);
3392
3393#ifdef CONFIG_RT_MUTEXES
3394
3395/*
3396 * rt_mutex_setprio - set the current priority of a task
3397 * @p: task
3398 * @prio: prio value (kernel-internal form)
3399 *
3400 * This function changes the 'effective' priority of a task. It does
3401 * not touch ->normal_prio like __setscheduler().
3402 *
3403 * Used by the rt_mutex code to implement priority inheritance
3404 * logic. Call site only calls if the priority of the task changed.
3405 */
3406void rt_mutex_setprio(struct task_struct *p, int prio)
3407{
3408        int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
3409        struct rq *rq;
3410        const struct sched_class *prev_class;
3411
3412        BUG_ON(prio > MAX_PRIO);
3413
3414        rq = __task_rq_lock(p);
3415
3416        /*
3417         * Idle task boosting is a nono in general. There is one
3418         * exception, when PREEMPT_RT and NOHZ is active:
3419         *
3420         * The idle task calls get_next_timer_interrupt() and holds
3421         * the timer wheel base->lock on the CPU and another CPU wants
3422         * to access the timer (probably to cancel it). We can safely
3423         * ignore the boosting request, as the idle CPU runs this code
3424         * with interrupts disabled and will complete the lock
3425         * protected section without being interrupted. So there is no
3426         * real need to boost.
3427         */
3428        if (unlikely(p == rq->idle)) {
3429                WARN_ON(p != rq->curr);
3430                WARN_ON(p->pi_blocked_on);
3431                goto out_unlock;
3432        }
3433
3434        trace_sched_pi_setprio(p, prio);
3435        oldprio = p->prio;
3436
3437        if (oldprio == prio)
3438                queue_flag &= ~DEQUEUE_MOVE;
3439
3440        prev_class = p->sched_class;
3441        queued = task_on_rq_queued(p);
3442        running = task_current(rq, p);
3443        if (queued)
3444                dequeue_task(rq, p, queue_flag);
3445        if (running)
3446                put_prev_task(rq, p);
3447
3448        /*
3449         * Boosting condition are:
3450         * 1. -rt task is running and holds mutex A
3451         *      --> -dl task blocks on mutex A
3452         *
3453         * 2. -dl task is running and holds mutex A
3454         *      --> -dl task blocks on mutex A and could preempt the
3455         *          running task
3456         */
3457        if (dl_prio(prio)) {
3458                struct task_struct *pi_task = rt_mutex_get_top_task(p);
3459                if (!dl_prio(p->normal_prio) ||
3460                    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
3461                        p->dl.dl_boosted = 1;
3462                        queue_flag |= ENQUEUE_REPLENISH;
3463                } else
3464                        p->dl.dl_boosted = 0;
3465                p->sched_class = &dl_sched_class;
3466        } else if (rt_prio(prio)) {
3467                if (dl_prio(oldprio))
3468                        p->dl.dl_boosted = 0;
3469                if (oldprio < prio)
3470                        queue_flag |= ENQUEUE_HEAD;
3471                p->sched_class = &rt_sched_class;
3472        } else {
3473                if (dl_prio(oldprio))
3474                        p->dl.dl_boosted = 0;
3475                if (rt_prio(oldprio))
3476                        p->rt.timeout = 0;
3477                p->sched_class = &fair_sched_class;
3478        }
3479
3480        p->prio = prio;
3481
3482        if (running)
3483                p->sched_class->set_curr_task(rq);
3484        if (queued)
3485                enqueue_task(rq, p, queue_flag);
3486
3487        check_class_changed(rq, p, prev_class, oldprio);
3488out_unlock:
3489        preempt_disable(); /* avoid rq from going away on us */
3490        __task_rq_unlock(rq);
3491
3492        balance_callback(rq);
3493        preempt_enable();
3494}
3495#endif
3496
3497void set_user_nice(struct task_struct *p, long nice)
3498{
3499        int old_prio, delta, queued;
3500        unsigned long flags;
3501        struct rq *rq;
3502
3503        if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
3504                return;
3505        /*
3506         * We have to be careful, if called from sys_setpriority(),
3507         * the task might be in the middle of scheduling on another CPU.
3508         */
3509        rq = task_rq_lock(p, &flags);
3510        /*
3511         * The RT priorities are set via sched_setscheduler(), but we still
3512         * allow the 'normal' nice value to be set - but as expected
3513         * it wont have any effect on scheduling until the task is
3514         * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
3515         */
3516        if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
3517                p->static_prio = NICE_TO_PRIO(nice);
3518                goto out_unlock;
3519        }
3520        queued = task_on_rq_queued(p);
3521        if (queued)
3522                dequeue_task(rq, p, DEQUEUE_SAVE);
3523
3524        p->static_prio = NICE_TO_PRIO(nice);
3525        set_load_weight(p);
3526        old_prio = p->prio;
3527        p->prio = effective_prio(p);
3528        delta = p->prio - old_prio;
3529
3530        if (queued) {
3531                enqueue_task(rq, p, ENQUEUE_RESTORE);
3532                /*
3533                 * If the task increased its priority or is running and
3534                 * lowered its priority, then reschedule its CPU:
3535                 */
3536                if (delta < 0 || (delta > 0 && task_running(rq, p)))
3537                        resched_curr(rq);
3538        }
3539out_unlock:
3540        task_rq_unlock(rq, p, &flags);
3541}
3542EXPORT_SYMBOL(set_user_nice);
3543
3544/*
3545 * can_nice - check if a task can reduce its nice value
3546 * @p: task
3547 * @nice: nice value
3548 */
3549int can_nice(const struct task_struct *p, const int nice)
3550{
3551        /* convert nice value [19,-20] to rlimit style value [1,40] */
3552        int nice_rlim = nice_to_rlimit(nice);
3553
3554        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3555                capable(CAP_SYS_NICE));
3556}
3557
3558#ifdef __ARCH_WANT_SYS_NICE
3559
3560/*
3561 * sys_nice - change the priority of the current process.
3562 * @increment: priority increment
3563 *
3564 * sys_setpriority is a more generic, but much slower function that
3565 * does similar things.
3566 */
3567SYSCALL_DEFINE1(nice, int, increment)
3568{
3569        long nice, retval;
3570
3571        /*
3572         * Setpriority might change our priority at the same moment.
3573         * We don't have to worry. Conceptually one call occurs first
3574         * and we have a single winner.
3575         */
3576        increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
3577        nice = task_nice(current) + increment;
3578
3579        nice = clamp_val(nice, MIN_NICE, MAX_NICE);
3580        if (increment < 0 && !can_nice(current, nice))
3581                return -EPERM;
3582
3583        retval = security_task_setnice(current, nice);
3584        if (retval)
3585                return retval;
3586
3587        set_user_nice(current, nice);
3588        return 0;
3589}
3590
3591#endif
3592
3593/**
3594 * task_prio - return the priority value of a given task.
3595 * @p: the task in question.
3596 *
3597 * Return: The priority value as seen by users in /proc.
3598 * RT tasks are offset by -200. Normal tasks are centered
3599 * around 0, value goes from -16 to +15.
3600 */
3601int task_prio(const struct task_struct *p)
3602{
3603        return p->prio - MAX_RT_PRIO;
3604}
3605
3606/**
3607 * idle_cpu - is a given cpu idle currently?
3608 * @cpu: the processor in question.
3609 *
3610 * Return: 1 if the CPU is currently idle. 0 otherwise.
3611 */
3612int idle_cpu(int cpu)
3613{
3614        struct rq *rq = cpu_rq(cpu);
3615
3616        if (rq->curr != rq->idle)
3617                return 0;
3618
3619        if (rq->nr_running)
3620                return 0;
3621
3622#ifdef CONFIG_SMP
3623        if (!llist_empty(&rq->wake_list))
3624                return 0;
3625#endif
3626
3627        return 1;
3628}
3629
3630/**
3631 * idle_task - return the idle task for a given cpu.
3632 * @cpu: the processor in question.
3633 *
3634 * Return: The idle task for the cpu @cpu.
3635 */
3636struct task_struct *idle_task(int cpu)
3637{
3638        return cpu_rq(cpu)->idle;
3639}
3640
3641/**
3642 * find_process_by_pid - find a process with a matching PID value.
3643 * @pid: the pid in question.
3644 *
3645 * The task of @pid, if found. %NULL otherwise.
3646 */
3647static struct task_struct *find_process_by_pid(pid_t pid)
3648{
3649        return pid ? find_task_by_vpid(pid) : current;
3650}
3651
3652/*
3653 * This function initializes the sched_dl_entity of a newly becoming
3654 * SCHED_DEADLINE task.
3655 *
3656 * Only the static values are considered here, the actual runtime and the
3657 * absolute deadline will be properly calculated when the task is enqueued
3658 * for the first time with its new policy.
3659 */
3660static void
3661__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3662{
3663        struct sched_dl_entity *dl_se = &p->dl;
3664
3665        dl_se->dl_runtime = attr->sched_runtime;
3666        dl_se->dl_deadline = attr->sched_deadline;
3667        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3668        dl_se->flags = attr->sched_flags;
3669        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3670
3671        /*
3672         * Changing the parameters of a task is 'tricky' and we're not doing
3673         * the correct thing -- also see task_dead_dl() and switched_from_dl().
3674         *
3675         * What we SHOULD do is delay the bandwidth release until the 0-lag
3676         * point. This would include retaining the task_struct until that time
3677         * and change dl_overflow() to not immediately decrement the current
3678         * amount.
3679         *
3680         * Instead we retain the current runtime/deadline and let the new
3681         * parameters take effect after the current reservation period lapses.
3682         * This is safe (albeit pessimistic) because the 0-lag point is always
3683         * before the current scheduling deadline.
3684         *
3685         * We can still have temporary overloads because we do not delay the
3686         * change in bandwidth until that time; so admission control is
3687         * not on the safe side. It does however guarantee tasks will never
3688         * consume more than promised.
3689         */
3690}
3691
3692/*
3693 * sched_setparam() passes in -1 for its policy, to let the functions
3694 * it calls know not to change it.
3695 */
3696#define SETPARAM_POLICY -1
3697
3698static void __setscheduler_params(struct task_struct *p,
3699                const struct sched_attr *attr)
3700{
3701        int policy = attr->sched_policy;
3702
3703        if (policy == SETPARAM_POLICY)
3704                policy = p->policy;
3705
3706        p->policy = policy;
3707
3708        if (dl_policy(policy))
3709                __setparam_dl(p, attr);
3710        else if (fair_policy(policy))
3711                p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3712
3713        /*
3714         * __sched_setscheduler() ensures attr->sched_priority == 0 when
3715         * !rt_policy. Always setting this ensures that things like
3716         * getparam()/getattr() don't report silly values for !rt tasks.
3717         */
3718        p->rt_priority = attr->sched_priority;
3719        p->normal_prio = normal_prio(p);
3720        set_load_weight(p);
3721}
3722
3723/* Actually do priority change: must hold pi & rq lock. */
3724static void __setscheduler(struct rq *rq, struct task_struct *p,
3725                           const struct sched_attr *attr, bool keep_boost)
3726{
3727        __setscheduler_params(p, attr);
3728
3729        /*
3730         * Keep a potential priority boosting if called from
3731         * sched_setscheduler().
3732         */
3733        if (keep_boost)
3734                p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3735        else
3736                p->prio = normal_prio(p);
3737
3738        if (dl_prio(p->prio))
3739                p->sched_class = &dl_sched_class;
3740        else if (rt_prio(p->prio))
3741                p->sched_class = &rt_sched_class;
3742        else
3743                p->sched_class = &fair_sched_class;
3744}
3745
3746static void
3747__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3748{
3749        struct sched_dl_entity *dl_se = &p->dl;
3750
3751        attr->sched_priority = p->rt_priority;
3752        attr->sched_runtime = dl_se->dl_runtime;
3753        attr->sched_deadline = dl_se->dl_deadline;
3754        attr->sched_period = dl_se->dl_period;
3755        attr->sched_flags = dl_se->flags;
3756}
3757
3758/*
3759 * This function validates the new parameters of a -deadline task.
3760 * We ask for the deadline not being zero, and greater or equal
3761 * than the runtime, as well as the period of being zero or
3762 * greater than deadline. Furthermore, we have to be sure that
3763 * user parameters are above the internal resolution of 1us (we
3764 * check sched_runtime only since it is always the smaller one) and
3765 * below 2^63 ns (we have to check both sched_deadline and
3766 * sched_period, as the latter can be zero).
3767 */
3768static bool
3769__checkparam_dl(const struct sched_attr *attr)
3770{
3771        /* deadline != 0 */
3772        if (attr->sched_deadline == 0)
3773                return false;
3774
3775        /*
3776         * Since we truncate DL_SCALE bits, make sure we're at least
3777         * that big.
3778         */
3779        if (attr->sched_runtime < (1ULL << DL_SCALE))
3780                return false;
3781
3782        /*
3783         * Since we use the MSB for wrap-around and sign issues, make
3784         * sure it's not set (mind that period can be equal to zero).
3785         */
3786        if (attr->sched_deadline & (1ULL << 63) ||
3787            attr->sched_period & (1ULL << 63))
3788                return false;
3789
3790        /* runtime <= deadline <= period (if period != 0) */
3791        if ((attr->sched_period != 0 &&
3792             attr->sched_period < attr->sched_deadline) ||
3793            attr->sched_deadline < attr->sched_runtime)
3794                return false;
3795
3796        return true;
3797}
3798
3799/*
3800 * check the target process has a UID that matches the current process's
3801 */
3802static bool check_same_owner(struct task_struct *p)
3803{
3804        const struct cred *cred = current_cred(), *pcred;
3805        bool match;
3806
3807        rcu_read_lock();
3808        pcred = __task_cred(p);
3809        match = (uid_eq(cred->euid, pcred->euid) ||
3810                 uid_eq(cred->euid, pcred->uid));
3811        rcu_read_unlock();
3812        return match;
3813}
3814
3815static bool dl_param_changed(struct task_struct *p,
3816                const struct sched_attr *attr)
3817{
3818        struct sched_dl_entity *dl_se = &p->dl;
3819
3820        if (dl_se->dl_runtime != attr->sched_runtime ||
3821                dl_se->dl_deadline != attr->sched_deadline ||
3822                dl_se->dl_period != attr->sched_period ||
3823                dl_se->flags != attr->sched_flags)
3824                return true;
3825
3826        return false;
3827}
3828
3829static int __sched_setscheduler(struct task_struct *p,
3830                                const struct sched_attr *attr,
3831                                bool user, bool pi)
3832{
3833        int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3834                      MAX_RT_PRIO - 1 - attr->sched_priority;
3835        int retval, oldprio, oldpolicy = -1, queued, running;
3836        int new_effective_prio, policy = attr->sched_policy;
3837        unsigned long flags;
3838        const struct sched_class *prev_class;
3839        struct rq *rq;
3840        int reset_on_fork;
3841        int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
3842
3843        /* may grab non-irq protected spin_locks */
3844        BUG_ON(in_interrupt());
3845recheck:
3846        /* double check policy once rq lock held */
3847        if (policy < 0) {
3848                reset_on_fork = p->sched_reset_on_fork;
3849                policy = oldpolicy = p->policy;
3850        } else {
3851                reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
3852
3853                if (!valid_policy(policy))
3854                        return -EINVAL;
3855        }
3856
3857        if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
3858                return -EINVAL;
3859
3860        /*
3861         * Valid priorities for SCHED_FIFO and SCHED_RR are
3862         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3863         * SCHED_BATCH and SCHED_IDLE is 0.
3864         */
3865        if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
3866            (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
3867                return -EINVAL;
3868        if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
3869            (rt_policy(policy) != (attr->sched_priority != 0)))
3870                return -EINVAL;
3871
3872        /*
3873         * Allow unprivileged RT tasks to decrease priority:
3874         */
3875        if (user && !capable(CAP_SYS_NICE)) {
3876                if (fair_policy(policy)) {
3877                        if (attr->sched_nice < task_nice(p) &&
3878                            !can_nice(p, attr->sched_nice))
3879                                return -EPERM;
3880                }
3881
3882                if (rt_policy(policy)) {
3883                        unsigned long rlim_rtprio =
3884                                        task_rlimit(p, RLIMIT_RTPRIO);
3885
3886                        /* can't set/change the rt policy */
3887                        if (policy != p->policy && !rlim_rtprio)
3888                                return -EPERM;
3889
3890                        /* can't increase priority */
3891                        if (attr->sched_priority > p->rt_priority &&
3892                            attr->sched_priority > rlim_rtprio)
3893                                return -EPERM;
3894                }
3895
3896                 /*
3897                  * Can't set/change SCHED_DEADLINE policy at all for now
3898                  * (safest behavior); in the future we would like to allow
3899                  * unprivileged DL tasks to increase their relative deadline
3900                  * or reduce their runtime (both ways reducing utilization)
3901                  */
3902                if (dl_policy(policy))
3903                        return -EPERM;
3904
3905                /*
3906                 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3907                 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3908                 */
3909                if (idle_policy(p->policy) && !idle_policy(policy)) {
3910                        if (!can_nice(p, task_nice(p)))
3911                                return -EPERM;
3912                }
3913
3914                /* can't change other user's priorities */
3915                if (!check_same_owner(p))
3916                        return -EPERM;
3917
3918                /* Normal users shall not reset the sched_reset_on_fork flag */
3919                if (p->sched_reset_on_fork && !reset_on_fork)
3920                        return -EPERM;
3921        }
3922
3923        if (user) {
3924                retval = security_task_setscheduler(p);
3925                if (retval)
3926                        return retval;
3927        }
3928
3929        /*
3930         * make sure no PI-waiters arrive (or leave) while we are
3931         * changing the priority of the task:
3932         *
3933         * To be able to change p->policy safely, the appropriate
3934         * runqueue lock must be held.
3935         */
3936        rq = task_rq_lock(p, &flags);
3937
3938        /*
3939         * Changing the policy of the stop threads its a very bad idea
3940         */
3941        if (p == rq->stop) {
3942                task_rq_unlock(rq, p, &flags);
3943                return -EINVAL;
3944        }
3945
3946        /*
3947         * If not changing anything there's no need to proceed further,
3948         * but store a possible modification of reset_on_fork.
3949         */
3950        if (unlikely(policy == p->policy)) {
3951                if (fair_policy(policy) && attr->sched_nice != task_nice(p))
3952                        goto change;
3953                if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3954                        goto change;
3955                if (dl_policy(policy) && dl_param_changed(p, attr))
3956                        goto change;
3957
3958                p->sched_reset_on_fork = reset_on_fork;
3959                task_rq_unlock(rq, p, &flags);
3960                return 0;
3961        }
3962change:
3963
3964        if (user) {
3965#ifdef CONFIG_RT_GROUP_SCHED
3966                /*
3967                 * Do not allow realtime tasks into groups that have no runtime
3968                 * assigned.
3969                 */
3970                if (rt_bandwidth_enabled() && rt_policy(policy) &&
3971                                task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3972                                !task_group_is_autogroup(task_group(p))) {
3973                        task_rq_unlock(rq, p, &flags);
3974                        return -EPERM;
3975                }
3976#endif
3977#ifdef CONFIG_SMP
3978                if (dl_bandwidth_enabled() && dl_policy(policy)) {
3979                        cpumask_t *span = rq->rd->span;
3980
3981                        /*
3982                         * Don't allow tasks with an affinity mask smaller than
3983                         * the entire root_domain to become SCHED_DEADLINE. We
3984                         * will also fail if there's no bandwidth available.
3985                         */
3986                        if (!cpumask_subset(span, &p->cpus_allowed) ||
3987                            rq->rd->dl_bw.bw == 0) {
3988                                task_rq_unlock(rq, p, &flags);
3989                                return -EPERM;
3990                        }
3991                }
3992#endif
3993        }
3994
3995        /* recheck policy now with rq lock held */
3996        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3997                policy = oldpolicy = -1;
3998                task_rq_unlock(rq, p, &flags);
3999                goto recheck;
4000        }
4001
4002        /*
4003         * If setscheduling to SCHED_DEADLINE (or changing the parameters
4004         * of a SCHED_DEADLINE task) we need to check if enough bandwidth
4005         * is available.
4006         */
4007        if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
4008                task_rq_unlock(rq, p, &flags);
4009                return -EBUSY;
4010        }
4011
4012        p->sched_reset_on_fork = reset_on_fork;
4013        oldprio = p->prio;
4014
4015        if (pi) {
4016                /*
4017                 * Take priority boosted tasks into account. If the new
4018                 * effective priority is unchanged, we just store the new
4019                 * normal parameters and do not touch the scheduler class and
4020                 * the runqueue. This will be done when the task deboost
4021                 * itself.
4022                 */
4023                new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
4024                if (new_effective_prio == oldprio)
4025                        queue_flags &= ~DEQUEUE_MOVE;
4026        }
4027
4028        queued = task_on_rq_queued(p);
4029        running = task_current(rq, p);
4030        if (queued)
4031                dequeue_task(rq, p, queue_flags);
4032        if (running)
4033                put_prev_task(rq, p);
4034
4035        prev_class = p->sched_class;
4036        __setscheduler(rq, p, attr, pi);
4037
4038        if (running)
4039                p->sched_class->set_curr_task(rq);
4040        if (queued) {
4041                /*
4042                 * We enqueue to tail when the priority of a task is
4043                 * increased (user space view).
4044                 */
4045                if (oldprio < p->prio)
4046                        queue_flags |= ENQUEUE_HEAD;
4047
4048                enqueue_task(rq, p, queue_flags);
4049        }
4050
4051        check_class_changed(rq, p, prev_class, oldprio);
4052        preempt_disable(); /* avoid rq from going away on us */
4053        task_rq_unlock(rq, p, &flags);
4054
4055        if (pi)
4056                rt_mutex_adjust_pi(p);
4057
4058        /*
4059         * Run balance callbacks after we've adjusted the PI chain.
4060         */
4061        balance_callback(rq);
4062        preempt_enable();
4063
4064        return 0;
4065}
4066
4067static int _sched_setscheduler(struct task_struct *p, int policy,
4068                               const struct sched_param *param, bool check)
4069{
4070        struct sched_attr attr = {
4071                .sched_policy   = policy,
4072                .sched_priority = param->sched_priority,
4073                .sched_nice     = PRIO_TO_NICE(p->static_prio),
4074        };
4075
4076        /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
4077        if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
4078                attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4079                policy &= ~SCHED_RESET_ON_FORK;
4080                attr.sched_policy = policy;
4081        }
4082
4083        return __sched_setscheduler(p, &attr, check, true);
4084}
4085/**
4086 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4087 * @p: the task in question.
4088 * @policy: new policy.
4089 * @param: structure containing the new RT priority.
4090 *
4091 * Return: 0 on success. An error code otherwise.
4092 *
4093 * NOTE that the task may be already dead.
4094 */
4095int sched_setscheduler(struct task_struct *p, int policy,
4096                       const struct sched_param *param)
4097{
4098        return _sched_setscheduler(p, policy, param, true);
4099}
4100EXPORT_SYMBOL_GPL(sched_setscheduler);
4101
4102int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4103{
4104        return __sched_setscheduler(p, attr, true, true);
4105}
4106EXPORT_SYMBOL_GPL(sched_setattr);
4107
4108/**
4109 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4110 * @p: the task in question.
4111 * @policy: new policy.
4112 * @param: structure containing the new RT priority.
4113 *
4114 * Just like sched_setscheduler, only don't bother checking if the
4115 * current context has permission.  For example, this is needed in
4116 * stop_machine(): we create temporary high priority worker threads,
4117 * but our caller might not have that capability.
4118 *
4119 * Return: 0 on success. An error code otherwise.
4120 */
4121int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4122                               const struct sched_param *param)
4123{
4124        return _sched_setscheduler(p, policy, param, false);
4125}
4126EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
4127
4128static int
4129do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4130{
4131        struct sched_param lparam;
4132        struct task_struct *p;
4133        int retval;
4134
4135        if (!param || pid < 0)
4136                return -EINVAL;
4137        if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4138                return -EFAULT;
4139
4140        rcu_read_lock();
4141        retval = -ESRCH;
4142        p = find_process_by_pid(pid);
4143        if (p != NULL)
4144                retval = sched_setscheduler(p, policy, &lparam);
4145        rcu_read_unlock();
4146
4147        return retval;
4148}
4149
4150/*
4151 * Mimics kernel/events/core.c perf_copy_attr().
4152 */
4153static int sched_copy_attr(struct sched_attr __user *uattr,
4154                           struct sched_attr *attr)
4155{
4156        u32 size;
4157        int ret;
4158
4159        if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4160                return -EFAULT;
4161
4162        /*
4163         * zero the full structure, so that a short copy will be nice.
4164         */
4165        memset(attr, 0, sizeof(*attr));
4166
4167        ret = get_user(size, &uattr->size);
4168        if (ret)
4169                return ret;
4170
4171        if (size > PAGE_SIZE)   /* silly large */
4172                goto err_size;
4173
4174        if (!size)              /* abi compat */
4175                size = SCHED_ATTR_SIZE_VER0;
4176
4177        if (size < SCHED_ATTR_SIZE_VER0)
4178                goto err_size;
4179
4180        /*
4181         * If we're handed a bigger struct than we know of,
4182         * ensure all the unknown bits are 0 - i.e. new
4183         * user-space does not rely on any kernel feature
4184         * extensions we dont know about yet.
4185         */
4186        if (size > sizeof(*attr)) {
4187                unsigned char __user *addr;
4188                unsigned char __user *end;
4189                unsigned char val;
4190
4191                addr = (void __user *)uattr + sizeof(*attr);
4192                end  = (void __user *)uattr + size;
4193
4194                for (; addr < end; addr++) {
4195                        ret = get_user(val, addr);
4196                        if (ret)
4197                                return ret;
4198                        if (val)
4199                                goto err_size;
4200                }
4201                size = sizeof(*attr);
4202        }
4203
4204        ret = copy_from_user(attr, uattr, size);
4205        if (ret)
4206                return -EFAULT;
4207
4208        /*
4209         * XXX: do we want to be lenient like existing syscalls; or do we want
4210         * to be strict and return an error on out-of-bounds values?
4211         */
4212        attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
4213
4214        return 0;
4215
4216err_size:
4217        put_user(sizeof(*attr), &uattr->size);
4218        return -E2BIG;
4219}
4220
4221/**
4222 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4223 * @pid: the pid in question.
4224 * @policy: new policy.
4225 * @param: structure containing the new RT priority.
4226 *
4227 * Return: 0 on success. An error code otherwise.
4228 */
4229SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4230                struct sched_param __user *, param)
4231{
4232        /* negative values for policy are not valid */
4233        if (policy < 0)
4234                return -EINVAL;
4235
4236        return do_sched_setscheduler(pid, policy, param);
4237}
4238
4239/**
4240 * sys_sched_setparam - set/change the RT priority of a thread
4241 * @pid: the pid in question.
4242 * @param: structure containing the new RT priority.
4243 *
4244 * Return: 0 on success. An error code otherwise.
4245 */
4246SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4247{
4248        return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
4249}
4250
4251/**
4252 * sys_sched_setattr - same as above, but with extended sched_attr
4253 * @pid: the pid in question.
4254 * @uattr: structure containing the extended parameters.
4255 * @flags: for future extension.
4256 */
4257SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4258                               unsigned int, flags)
4259{
4260        struct sched_attr attr;
4261        struct task_struct *p;
4262        int retval;
4263
4264        if (!uattr || pid < 0 || flags)
4265                return -EINVAL;
4266
4267        retval = sched_copy_attr(uattr, &attr);
4268        if (retval)
4269                return retval;
4270
4271        if ((int)attr.sched_policy < 0)
4272                return -EINVAL;
4273
4274        rcu_read_lock();
4275        retval = -ESRCH;
4276        p = find_process_by_pid(pid);
4277        if (p != NULL)
4278                retval = sched_setattr(p, &attr);
4279        rcu_read_unlock();
4280
4281        return retval;
4282}
4283
4284/**
4285 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4286 * @pid: the pid in question.
4287 *
4288 * Return: On success, the policy of the thread. Otherwise, a negative error
4289 * code.
4290 */
4291SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4292{
4293        struct task_struct *p;
4294        int retval;
4295
4296        if (pid < 0)
4297                return -EINVAL;
4298
4299        retval = -ESRCH;
4300        rcu_read_lock();
4301        p = find_process_by_pid(pid);
4302        if (p) {
4303                retval = security_task_getscheduler(p);
4304                if (!retval)
4305                        retval = p->policy
4306                                | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4307        }
4308        rcu_read_unlock();
4309        return retval;
4310}
4311
4312/**
4313 * sys_sched_getparam - get the RT priority of a thread
4314 * @pid: the pid in question.
4315 * @param: structure containing the RT priority.
4316 *
4317 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
4318 * code.
4319 */
4320SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4321{
4322        struct sched_param lp = { .sched_priority = 0 };
4323        struct task_struct *p;
4324        int retval;
4325
4326        if (!param || pid < 0)
4327                return -EINVAL;
4328
4329        rcu_read_lock();
4330        p = find_process_by_pid(pid);
4331        retval = -ESRCH;
4332        if (!p)
4333                goto out_unlock;
4334
4335        retval = security_task_getscheduler(p);
4336        if (retval)
4337                goto out_unlock;
4338
4339        if (task_has_rt_policy(p))
4340                lp.sched_priority = p->rt_priority;
4341        rcu_read_unlock();
4342
4343        /*
4344         * This one might sleep, we cannot do it with a spinlock held ...
4345         */
4346        retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4347
4348        return retval;
4349
4350out_unlock:
4351        rcu_read_unlock();
4352        return retval;
4353}
4354
4355static int sched_read_attr(struct sched_attr __user *uattr,
4356                           struct sched_attr *attr,
4357                           unsigned int usize)
4358{
4359        int ret;
4360
4361        if (!access_ok(VERIFY_WRITE, uattr, usize))
4362                return -EFAULT;
4363
4364        /*
4365         * If we're handed a smaller struct than we know of,
4366         * ensure all the unknown bits are 0 - i.e. old
4367         * user-space does not get uncomplete information.
4368         */
4369        if (usize < sizeof(*attr)) {
4370                unsigned char *addr;
4371                unsigned char *end;
4372
4373                addr = (void *)attr + usize;
4374                end  = (void *)attr + sizeof(*attr);
4375
4376                for (; addr < end; addr++) {
4377                        if (*addr)
4378                                return -EFBIG;
4379                }
4380
4381                attr->size = usize;
4382        }
4383
4384        ret = copy_to_user(uattr, attr, attr->size);
4385        if (ret)
4386                return -EFAULT;
4387
4388        return 0;
4389}
4390
4391/**
4392 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
4393 * @pid: the pid in question.
4394 * @uattr: structure containing the extended parameters.
4395 * @size: sizeof(attr) for fwd/bwd comp.
4396 * @flags: for future extension.
4397 */
4398SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4399                unsigned int, size, unsigned int, flags)
4400{
4401        struct sched_attr attr = {
4402                .size = sizeof(struct sched_attr),
4403        };
4404        struct task_struct *p;
4405        int retval;
4406
4407        if (!uattr || pid < 0 || size > PAGE_SIZE ||
4408            size < SCHED_ATTR_SIZE_VER0 || flags)
4409                return -EINVAL;
4410
4411        rcu_read_lock();
4412        p = find_process_by_pid(pid);
4413        retval = -ESRCH;
4414        if (!p)
4415                goto out_unlock;
4416
4417        retval = security_task_getscheduler(p);
4418        if (retval)
4419                goto out_unlock;
4420
4421        attr.sched_policy = p->policy;
4422        if (p->sched_reset_on_fork)
4423                attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4424        if (task_has_dl_policy(p))
4425                __getparam_dl(p, &attr);
4426        else if (task_has_rt_policy(p))
4427                attr.sched_priority = p->rt_priority;
4428        else
4429                attr.sched_nice = task_nice(p);
4430
4431        rcu_read_unlock();
4432
4433        retval = sched_read_attr(uattr, &attr, size);
4434        return retval;
4435
4436out_unlock:
4437        rcu_read_unlock();
4438        return retval;
4439}
4440
4441long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4442{
4443        cpumask_var_t cpus_allowed, new_mask;
4444        struct task_struct *p;
4445        int retval;
4446
4447        rcu_read_lock();
4448
4449        p = find_process_by_pid(pid);
4450        if (!p) {
4451                rcu_read_unlock();
4452                return -ESRCH;
4453        }
4454
4455        /* Prevent p going away */
4456        get_task_struct(p);
4457        rcu_read_unlock();
4458
4459        if (p->flags & PF_NO_SETAFFINITY) {
4460                retval = -EINVAL;
4461                goto out_put_task;
4462        }
4463        if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4464                retval = -ENOMEM;
4465                goto out_put_task;
4466        }
4467        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4468                retval = -ENOMEM;
4469                goto out_free_cpus_allowed;
4470        }
4471        retval = -EPERM;
4472        if (!check_same_owner(p)) {
4473                rcu_read_lock();
4474                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4475                        rcu_read_unlock();
4476                        goto out_free_new_mask;
4477                }
4478                rcu_read_unlock();
4479        }
4480
4481        retval = security_task_setscheduler(p);
4482        if (retval)
4483                goto out_free_new_mask;
4484
4485
4486        cpuset_cpus_allowed(p, cpus_allowed);
4487        cpumask_and(new_mask, in_mask, cpus_allowed);
4488
4489        /*
4490         * Since bandwidth control happens on root_domain basis,
4491         * if admission test is enabled, we only admit -deadline
4492         * tasks allowed to run on all the CPUs in the task's
4493         * root_domain.
4494         */
4495#ifdef CONFIG_SMP
4496        if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4497                rcu_read_lock();
4498                if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
4499                        retval = -EBUSY;
4500                        rcu_read_unlock();
4501                        goto out_free_new_mask;
4502                }
4503                rcu_read_unlock();
4504        }
4505#endif
4506again:
4507        retval = __set_cpus_allowed_ptr(p, new_mask, true);
4508
4509        if (!retval) {
4510                cpuset_cpus_allowed(p, cpus_allowed);
4511                if (!cpumask_subset(new_mask, cpus_allowed)) {
4512                        /*
4513                         * We must have raced with a concurrent cpuset
4514                         * update. Just reset the cpus_allowed to the
4515                         * cpuset's cpus_allowed
4516                         */
4517                        cpumask_copy(new_mask, cpus_allowed);
4518                        goto again;
4519                }
4520        }
4521out_free_new_mask:
4522        free_cpumask_var(new_mask);
4523out_free_cpus_allowed:
4524        free_cpumask_var(cpus_allowed);
4525out_put_task:
4526        put_task_struct(p);
4527        return retval;
4528}
4529
4530static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4531                             struct cpumask *new_mask)
4532{
4533        if (len < cpumask_size())
4534                cpumask_clear(new_mask);
4535        else if (len > cpumask_size())
4536                len = cpumask_size();
4537
4538        return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4539}
4540
4541/**
4542 * sys_sched_setaffinity - set the cpu affinity of a process
4543 * @pid: pid of the process
4544 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4545 * @user_mask_ptr: user-space pointer to the new cpu mask
4546 *
4547 * Return: 0 on success. An error code otherwise.
4548 */
4549SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4550                unsigned long __user *, user_mask_ptr)
4551{
4552        cpumask_var_t new_mask;
4553        int retval;
4554
4555        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4556                return -ENOMEM;
4557
4558        retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4559        if (retval == 0)
4560                retval = sched_setaffinity(pid, new_mask);
4561        free_cpumask_var(new_mask);
4562        return retval;
4563}
4564
4565long sched_getaffinity(pid_t pid, struct cpumask *mask)
4566{
4567        struct task_struct *p;
4568        unsigned long flags;
4569        int retval;
4570
4571        rcu_read_lock();
4572
4573        retval = -ESRCH;
4574        p = find_process_by_pid(pid);
4575        if (!p)
4576                goto out_unlock;
4577
4578        retval = security_task_getscheduler(p);
4579        if (retval)
4580                goto out_unlock;
4581
4582        raw_spin_lock_irqsave(&p->pi_lock, flags);
4583        cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
4584        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4585
4586out_unlock:
4587        rcu_read_unlock();
4588
4589        return retval;
4590}
4591
4592/**
4593 * sys_sched_getaffinity - get the cpu affinity of a process
4594 * @pid: pid of the process
4595 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4596 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4597 *
4598 * Return: 0 on success. An error code otherwise.
4599 */
4600SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4601                unsigned long __user *, user_mask_ptr)
4602{
4603        int ret;
4604        cpumask_var_t mask;
4605
4606        if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4607                return -EINVAL;
4608        if (len & (sizeof(unsigned long)-1))
4609                return -EINVAL;
4610
4611        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4612                return -ENOMEM;
4613
4614        ret = sched_getaffinity(pid, mask);
4615        if (ret == 0) {
4616                size_t retlen = min_t(size_t, len, cpumask_size());
4617
4618                if (copy_to_user(user_mask_ptr, mask, retlen))
4619                        ret = -EFAULT;
4620                else
4621                        ret = retlen;
4622        }
4623        free_cpumask_var(mask);
4624
4625        return ret;
4626}
4627
4628/**
4629 * sys_sched_yield - yield the current processor to other threads.
4630 *
4631 * This function yields the current CPU to other tasks. If there are no
4632 * other threads running on this CPU then this function will return.
4633 *
4634 * Return: 0.
4635 */
4636SYSCALL_DEFINE0(sched_yield)
4637{
4638        struct rq *rq = this_rq_lock();
4639
4640        schedstat_inc(rq, yld_count);
4641        current->sched_class->yield_task(rq);
4642
4643        /*
4644         * Since we are going to call schedule() anyway, there's
4645         * no need to preempt or enable interrupts:
4646         */
4647        __release(rq->lock);
4648        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4649        do_raw_spin_unlock(&rq->lock);
4650        sched_preempt_enable_no_resched();
4651
4652        schedule();
4653
4654        return 0;
4655}
4656
4657int __sched _cond_resched(void)
4658{
4659        if (should_resched(0)) {
4660                preempt_schedule_common();
4661                return 1;
4662        }
4663        return 0;
4664}
4665EXPORT_SYMBOL(_cond_resched);
4666
4667/*
4668 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
4669 * call schedule, and on return reacquire the lock.
4670 *
4671 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4672 * operations here to prevent schedule() from being called twice (once via
4673 * spin_unlock(), once by hand).
4674 */
4675int __cond_resched_lock(spinlock_t *lock)
4676{
4677        int resched = should_resched(PREEMPT_LOCK_OFFSET);
4678        int ret = 0;
4679
4680        lockdep_assert_held(lock);
4681
4682        if (spin_needbreak(lock) || resched) {
4683                spin_unlock(lock);
4684                if (resched)
4685                        preempt_schedule_common();
4686                else
4687                        cpu_relax();
4688                ret = 1;
4689                spin_lock(lock);
4690        }
4691        return ret;
4692}
4693EXPORT_SYMBOL(__cond_resched_lock);
4694
4695int __sched __cond_resched_softirq(void)
4696{
4697        BUG_ON(!in_softirq());
4698
4699        if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
4700                local_bh_enable();
4701                preempt_schedule_common();
4702                local_bh_disable();
4703                return 1;
4704        }
4705        return 0;
4706}
4707EXPORT_SYMBOL(__cond_resched_softirq);
4708
4709/**
4710 * yield - yield the current processor to other threads.
4711 *
4712 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4713 *
4714 * The scheduler is at all times free to pick the calling task as the most
4715 * eligible task to run, if removing the yield() call from your code breaks
4716 * it, its already broken.
4717 *
4718 * Typical broken usage is:
4719 *
4720 * while (!event)
4721 *      yield();
4722 *
4723 * where one assumes that yield() will let 'the other' process run that will
4724 * make event true. If the current task is a SCHED_FIFO task that will never
4725 * happen. Never use yield() as a progress guarantee!!
4726 *
4727 * If you want to use yield() to wait for something, use wait_event().
4728 * If you want to use yield() to be 'nice' for others, use cond_resched().
4729 * If you still want to use yield(), do not!
4730 */
4731void __sched yield(void)
4732{
4733        set_current_state(TASK_RUNNING);
4734        sys_sched_yield();
4735}
4736EXPORT_SYMBOL(yield);
4737
4738/**
4739 * yield_to - yield the current processor to another thread in
4740 * your thread group, or accelerate that thread toward the
4741 * processor it's on.
4742 * @p: target task
4743 * @preempt: whether task preemption is allowed or not
4744 *
4745 * It's the caller's job to ensure that the target task struct
4746 * can't go away on us before we can do any checks.
4747 *
4748 * Return:
4749 *      true (>0) if we indeed boosted the target task.
4750 *      false (0) if we failed to boost the target.
4751 *      -ESRCH if there's no task to yield to.
4752 */
4753int __sched yield_to(struct task_struct *p, bool preempt)
4754{
4755        struct task_struct *curr = current;
4756        struct rq *rq, *p_rq;
4757        unsigned long flags;
4758        int yielded = 0;
4759
4760        local_irq_save(flags);
4761        rq = this_rq();
4762
4763again:
4764        p_rq = task_rq(p);
4765        /*
4766         * If we're the only runnable task on the rq and target rq also
4767         * has only one task, there's absolutely no point in yielding.
4768         */
4769        if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4770                yielded = -ESRCH;
4771                goto out_irq;
4772        }
4773
4774        double_rq_lock(rq, p_rq);
4775        if (task_rq(p) != p_rq) {
4776                double_rq_unlock(rq, p_rq);
4777                goto again;
4778        }
4779
4780        if (!curr->sched_class->yield_to_task)
4781                goto out_unlock;
4782
4783        if (curr->sched_class != p->sched_class)
4784                goto out_unlock;
4785
4786        if (task_running(p_rq, p) || p->state)
4787                goto out_unlock;
4788
4789        yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4790        if (yielded) {
4791                schedstat_inc(rq, yld_count);
4792                /*
4793                 * Make p's CPU reschedule; pick_next_entity takes care of
4794                 * fairness.
4795                 */
4796                if (preempt && rq != p_rq)
4797                        resched_curr(p_rq);
4798        }
4799
4800out_unlock:
4801        double_rq_unlock(rq, p_rq);
4802out_irq:
4803        local_irq_restore(flags);
4804
4805        if (yielded > 0)
4806                schedule();
4807
4808        return yielded;
4809}
4810EXPORT_SYMBOL_GPL(yield_to);
4811
4812/*
4813 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4814 * that process accounting knows that this is a task in IO wait state.
4815 */
4816long __sched io_schedule_timeout(long timeout)
4817{
4818        int old_iowait = current->in_iowait;
4819        struct rq *rq;
4820        long ret;
4821
4822        current->in_iowait = 1;
4823        blk_schedule_flush_plug(current);
4824
4825        delayacct_blkio_start();
4826        rq = raw_rq();
4827        atomic_inc(&rq->nr_iowait);
4828        ret = schedule_timeout(timeout);
4829        current->in_iowait = old_iowait;
4830        atomic_dec(&rq->nr_iowait);
4831        delayacct_blkio_end();
4832
4833        return ret;
4834}
4835EXPORT_SYMBOL(io_schedule_timeout);
4836
4837/**
4838 * sys_sched_get_priority_max - return maximum RT priority.
4839 * @policy: scheduling class.
4840 *
4841 * Return: On success, this syscall returns the maximum
4842 * rt_priority that can be used by a given scheduling class.
4843 * On failure, a negative error code is returned.
4844 */
4845SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
4846{
4847        int ret = -EINVAL;
4848
4849        switch (policy) {
4850        case SCHED_FIFO:
4851        case SCHED_RR:
4852                ret = MAX_USER_RT_PRIO-1;
4853                break;
4854        case SCHED_DEADLINE:
4855        case SCHED_NORMAL:
4856        case SCHED_BATCH:
4857        case SCHED_IDLE:
4858                ret = 0;
4859                break;
4860        }
4861        return ret;
4862}
4863
4864/**
4865 * sys_sched_get_priority_min - return minimum RT priority.
4866 * @policy: scheduling class.
4867 *
4868 * Return: On success, this syscall returns the minimum
4869 * rt_priority that can be used by a given scheduling class.
4870 * On failure, a negative error code is returned.
4871 */
4872SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4873{
4874        int ret = -EINVAL;
4875
4876        switch (policy) {
4877        case SCHED_FIFO:
4878        case SCHED_RR:
4879                ret = 1;
4880                break;
4881        case SCHED_DEADLINE:
4882        case SCHED_NORMAL:
4883        case SCHED_BATCH:
4884        case SCHED_IDLE:
4885                ret = 0;
4886        }
4887        return ret;
4888}
4889
4890/**
4891 * sys_sched_rr_get_interval - return the default timeslice of a process.
4892 * @pid: pid of the process.
4893 * @interval: userspace pointer to the timeslice value.
4894 *
4895 * this syscall writes the default timeslice value of a given process
4896 * into the user-space timespec buffer. A value of '0' means infinity.
4897 *
4898 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4899 * an error code.
4900 */
4901SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4902                struct timespec __user *, interval)
4903{
4904        struct task_struct *p;
4905        unsigned int time_slice;
4906        unsigned long flags;
4907        struct rq *rq;
4908        int retval;
4909        struct timespec t;
4910
4911        if (pid < 0)
4912                return -EINVAL;
4913
4914        retval = -ESRCH;
4915        rcu_read_lock();
4916        p = find_process_by_pid(pid);
4917        if (!p)
4918                goto out_unlock;
4919
4920        retval = security_task_getscheduler(p);
4921        if (retval)
4922                goto out_unlock;
4923
4924        rq = task_rq_lock(p, &flags);
4925        time_slice = 0;
4926        if (p->sched_class->get_rr_interval)
4927                time_slice = p->sched_class->get_rr_interval(rq, p);
4928        task_rq_unlock(rq, p, &flags);
4929
4930        rcu_read_unlock();
4931        jiffies_to_timespec(time_slice, &t);
4932        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4933        return retval;
4934
4935out_unlock:
4936        rcu_read_unlock();
4937        return retval;
4938}
4939
4940static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
4941
4942void sched_show_task(struct task_struct *p)
4943{
4944        unsigned long free = 0;
4945        int ppid;
4946        unsigned long state = p->state;
4947
4948        if (state)
4949                state = __ffs(state) + 1;
4950        printk(KERN_INFO "%-15.15s %c", p->comm,
4951                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4952#if BITS_PER_LONG == 32
4953        if (state == TASK_RUNNING)
4954                printk(KERN_CONT " running  ");
4955        else
4956                printk(KERN_CONT " %08lx ", thread_saved_pc(p));
4957#else
4958        if (state == TASK_RUNNING)
4959                printk(KERN_CONT "  running task    ");
4960        else
4961                printk(KERN_CONT " %016lx ", thread_saved_pc(p));
4962#endif
4963#ifdef CONFIG_DEBUG_STACK_USAGE
4964        free = stack_not_used(p);
4965#endif
4966        ppid = 0;
4967        rcu_read_lock();
4968        if (pid_alive(p))
4969                ppid = task_pid_nr(rcu_dereference(p->real_parent));
4970        rcu_read_unlock();
4971        printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4972                task_pid_nr(p), ppid,
4973                (unsigned long)task_thread_info(p)->flags);
4974
4975        print_worker_info(KERN_INFO, p);
4976        show_stack(p, NULL);
4977}
4978
4979void show_state_filter(unsigned long state_filter)
4980{
4981        struct task_struct *g, *p;
4982
4983#if BITS_PER_LONG == 32
4984        printk(KERN_INFO
4985                "  task                PC stack   pid father\n");
4986#else
4987        printk(KERN_INFO
4988                "  task                        PC stack   pid father\n");
4989#endif
4990        rcu_read_lock();
4991        for_each_process_thread(g, p) {
4992                /*
4993                 * reset the NMI-timeout, listing all files on a slow
4994                 * console might take a lot of time:
4995                 */
4996                touch_nmi_watchdog();
4997                if (!state_filter || (p->state & state_filter))
4998                        sched_show_task(p);
4999        }
5000
5001        touch_all_softlockup_watchdogs();
5002
5003#ifdef CONFIG_SCHED_DEBUG
5004        sysrq_sched_debug_show();
5005#endif
5006        rcu_read_unlock();
5007        /*
5008         * Only show locks if all tasks are dumped:
5009         */
5010        if (!state_filter)
5011                debug_show_all_locks();
5012}
5013
5014void init_idle_bootup_task(struct task_struct *idle)
5015{
5016        idle->sched_class = &idle_sched_class;
5017}
5018
5019/**
5020 * init_idle - set up an idle thread for a given CPU
5021 * @idle: task in question
5022 * @cpu: cpu the idle task belongs to
5023 *
5024 * NOTE: this function does not set the idle thread's NEED_RESCHED
5025 * flag, to make booting more robust.
5026 */
5027void init_idle(struct task_struct *idle, int cpu)
5028{
5029        struct rq *rq = cpu_rq(cpu);
5030        unsigned long flags;
5031
5032        raw_spin_lock_irqsave(&idle->pi_lock, flags);
5033        raw_spin_lock(&rq->lock);
5034
5035        __sched_fork(0, idle);
5036        idle->state = TASK_RUNNING;
5037        idle->se.exec_start = sched_clock();
5038
5039        kasan_unpoison_task_stack(idle);
5040
5041#ifdef CONFIG_SMP
5042        /*
5043         * Its possible that init_idle() gets called multiple times on a task,
5044         * in that case do_set_cpus_allowed() will not do the right thing.
5045         *
5046         * And since this is boot we can forgo the serialization.
5047         */
5048        set_cpus_allowed_common(idle, cpumask_of(cpu));
5049#endif
5050        /*
5051         * We're having a chicken and egg problem, even though we are
5052         * holding rq->lock, the cpu isn't yet set to this cpu so the
5053         * lockdep check in task_group() will fail.
5054         *
5055         * Similar case to sched_fork(). / Alternatively we could
5056         * use task_rq_lock() here and obtain the other rq->lock.
5057         *
5058         * Silence PROVE_RCU
5059         */
5060        rcu_read_lock();
5061        __set_task_cpu(idle, cpu);
5062        rcu_read_unlock();
5063
5064        rq->curr = rq->idle = idle;
5065        idle->on_rq = TASK_ON_RQ_QUEUED;
5066#ifdef CONFIG_SMP
5067        idle->on_cpu = 1;
5068#endif
5069        raw_spin_unlock(&rq->lock);
5070        raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
5071
5072        /* Set the preempt count _outside_ the spinlocks! */
5073        init_idle_preempt_count(idle, cpu);
5074
5075        /*
5076         * The idle tasks have their own, simple scheduling class:
5077         */
5078        idle->sched_class = &idle_sched_class;
5079        ftrace_graph_init_idle_task(idle, cpu);
5080        vtime_init_idle(idle, cpu);
5081#ifdef CONFIG_SMP
5082        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5083#endif
5084}
5085
5086int cpuset_cpumask_can_shrink(const struct cpumask *cur,
5087                              const struct cpumask *trial)
5088{
5089        int ret = 1, trial_cpus;
5090        struct dl_bw *cur_dl_b;
5091        unsigned long flags;
5092
5093        if (!cpumask_weight(cur))
5094                return ret;
5095
5096        rcu_read_lock_sched();
5097        cur_dl_b = dl_bw_of(cpumask_any(cur));
5098        trial_cpus = cpumask_weight(trial);
5099
5100        raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
5101        if (cur_dl_b->bw != -1 &&
5102            cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
5103                ret = 0;
5104        raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
5105        rcu_read_unlock_sched();
5106
5107        return ret;
5108}
5109
5110int task_can_attach(struct task_struct *p,
5111                    const struct cpumask *cs_cpus_allowed)
5112{
5113        int ret = 0;
5114
5115        /*
5116         * Kthreads which disallow setaffinity shouldn't be moved
5117         * to a new cpuset; we don't want to change their cpu
5118         * affinity and isolating such threads by their set of
5119         * allowed nodes is unnecessary.  Thus, cpusets are not
5120         * applicable for such threads.  This prevents checking for
5121         * success of set_cpus_allowed_ptr() on all attached tasks
5122         * before cpus_allowed may be changed.
5123         */
5124        if (p->flags & PF_NO_SETAFFINITY) {
5125                ret = -EINVAL;
5126                goto out;
5127        }
5128
5129#ifdef CONFIG_SMP
5130        if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
5131                                              cs_cpus_allowed)) {
5132                unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
5133                                                        cs_cpus_allowed);
5134                struct dl_bw *dl_b;
5135                bool overflow;
5136                int cpus;
5137                unsigned long flags;
5138
5139                rcu_read_lock_sched();
5140                dl_b = dl_bw_of(dest_cpu);
5141                raw_spin_lock_irqsave(&dl_b->lock, flags);
5142                cpus = dl_bw_cpus(dest_cpu);
5143                overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
5144                if (overflow)
5145                        ret = -EBUSY;
5146                else {
5147                        /*
5148                         * We reserve space for this task in the destination
5149                         * root_domain, as we can't fail after this point.
5150                         * We will free resources in the source root_domain
5151                         * later on (see set_cpus_allowed_dl()).
5152                         */
5153                        __dl_add(dl_b, p->dl.dl_bw);
5154                }
5155                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
5156                rcu_read_unlock_sched();
5157
5158        }
5159#endif
5160out:
5161        return ret;
5162}
5163
5164#ifdef CONFIG_SMP
5165
5166#ifdef CONFIG_NUMA_BALANCING
5167/* Migrate current task p to target_cpu */
5168int migrate_task_to(struct task_struct *p, int target_cpu)
5169{
5170        struct migration_arg arg = { p, target_cpu };
5171        int curr_cpu = task_cpu(p);
5172
5173        if (curr_cpu == target_cpu)
5174                return 0;
5175
5176        if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
5177                return -EINVAL;
5178
5179        /* TODO: This is not properly updating schedstats */
5180
5181        trace_sched_move_numa(p, curr_cpu, target_cpu);
5182        return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5183}
5184
5185/*
5186 * Requeue a task on a given node and accurately track the number of NUMA
5187 * tasks on the runqueues
5188 */
5189void sched_setnuma(struct task_struct *p, int nid)
5190{
5191        struct rq *rq;
5192        unsigned long flags;
5193        bool queued, running;
5194
5195        rq = task_rq_lock(p, &flags);
5196        queued = task_on_rq_queued(p);
5197        running = task_current(rq, p);
5198
5199        if (queued)
5200                dequeue_task(rq, p, DEQUEUE_SAVE);
5201        if (running)
5202                put_prev_task(rq, p);
5203
5204        p->numa_preferred_nid = nid;
5205
5206        if (running)
5207                p->sched_class->set_curr_task(rq);
5208        if (queued)
5209                enqueue_task(rq, p, ENQUEUE_RESTORE);
5210        task_rq_unlock(rq, p, &flags);
5211}
5212#endif /* CONFIG_NUMA_BALANCING */
5213
5214#ifdef CONFIG_HOTPLUG_CPU
5215/*
5216 * Ensures that the idle task is using init_mm right before its cpu goes
5217 * offline.
5218 */
5219void idle_task_exit(void)
5220{
5221        struct mm_struct *mm = current->active_mm;
5222
5223        BUG_ON(cpu_online(smp_processor_id()));
5224
5225        if (mm != &init_mm) {
5226                switch_mm(mm, &init_mm, current);
5227                finish_arch_post_lock_switch();
5228        }
5229        mmdrop(mm);
5230}
5231
5232/*
5233 * Since this CPU is going 'away' for a while, fold any nr_active delta
5234 * we might have. Assumes we're called after migrate_tasks() so that the
5235 * nr_active count is stable.
5236 *
5237 * Also see the comment "Global load-average calculations".
5238 */
5239static void calc_load_migrate(struct rq *rq)
5240{
5241        long delta = calc_load_fold_active(rq);
5242        if (delta)
5243                atomic_long_add(delta, &calc_load_tasks);
5244}
5245
5246static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5247{
5248}
5249
5250static const struct sched_class fake_sched_class = {
5251        .put_prev_task = put_prev_task_fake,
5252};
5253
5254static struct task_struct fake_task = {
5255        /*
5256         * Avoid pull_{rt,dl}_task()
5257         */
5258        .prio = MAX_PRIO + 1,
5259        .sched_class = &fake_sched_class,
5260};
5261
5262/*
5263 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5264 * try_to_wake_up()->select_task_rq().
5265 *
5266 * Called with rq->lock held even though we'er in stop_machine() and
5267 * there's no concurrency possible, we hold the required locks anyway
5268 * because of lock validation efforts.
5269 */
5270static void migrate_tasks(struct rq *dead_rq)
5271{
5272        struct rq *rq = dead_rq;
5273        struct task_struct *next, *stop = rq->stop;
5274        int dest_cpu;
5275
5276        /*
5277         * Fudge the rq selection such that the below task selection loop
5278         * doesn't get stuck on the currently eligible stop task.
5279         *
5280         * We're currently inside stop_machine() and the rq is either stuck
5281         * in the stop_machine_cpu_stop() loop, or we're executing this code,
5282         * either way we should never end up calling schedule() until we're
5283         * done here.
5284         */
5285        rq->stop = NULL;
5286
5287        /*
5288         * put_prev_task() and pick_next_task() sched
5289         * class method both need to have an up-to-date
5290         * value of rq->clock[_task]
5291         */
5292        update_rq_clock(rq);
5293
5294        for (;;) {
5295                /*
5296                 * There's this thread running, bail when that's the only
5297                 * remaining thread.
5298                 */
5299                if (rq->nr_running == 1)
5300                        break;
5301
5302                /*
5303                 * pick_next_task assumes pinned rq->lock.
5304                 */
5305                lockdep_pin_lock(&rq->lock);
5306                next = pick_next_task(rq, &fake_task);
5307                BUG_ON(!next);
5308                next->sched_class->put_prev_task(rq, next);
5309
5310                /*
5311                 * Rules for changing task_struct::cpus_allowed are holding
5312                 * both pi_lock and rq->lock, such that holding either
5313                 * stabilizes the mask.
5314                 *
5315                 * Drop rq->lock is not quite as disastrous as it usually is
5316                 * because !cpu_active at this point, which means load-balance
5317                 * will not interfere. Also, stop-machine.
5318                 */
5319                lockdep_unpin_lock(&rq->lock);
5320                raw_spin_unlock(&rq->lock);
5321                raw_spin_lock(&next->pi_lock);
5322                raw_spin_lock(&rq->lock);
5323
5324                /*
5325                 * Since we're inside stop-machine, _nothing_ should have
5326                 * changed the task, WARN if weird stuff happened, because in
5327                 * that case the above rq->lock drop is a fail too.
5328                 */
5329                if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5330                        raw_spin_unlock(&next->pi_lock);
5331                        continue;
5332                }
5333
5334                /* Find suitable destination for @next, with force if needed. */
5335                dest_cpu = select_fallback_rq(dead_rq->cpu, next);
5336
5337                rq = __migrate_task(rq, next, dest_cpu);
5338                if (rq != dead_rq) {
5339                        raw_spin_unlock(&rq->lock);
5340                        rq = dead_rq;
5341                        raw_spin_lock(&rq->lock);
5342                }
5343                raw_spin_unlock(&next->pi_lock);
5344        }
5345
5346        rq->stop = stop;
5347}
5348#endif /* CONFIG_HOTPLUG_CPU */
5349
5350static void set_rq_online(struct rq *rq)
5351{
5352        if (!rq->online) {
5353                const struct sched_class *class;
5354
5355                cpumask_set_cpu(rq->cpu, rq->rd->online);
5356                rq->online = 1;
5357
5358                for_each_class(class) {
5359                        if (class->rq_online)
5360                                class->rq_online(rq);
5361                }
5362        }
5363}
5364
5365static void set_rq_offline(struct rq *rq)
5366{
5367        if (rq->online) {
5368                const struct sched_class *class;
5369
5370                for_each_class(class) {
5371                        if (class->rq_offline)
5372                                class->rq_offline(rq);
5373                }
5374
5375                cpumask_clear_cpu(rq->cpu, rq->rd->online);
5376                rq->online = 0;
5377        }
5378}
5379
5380/*
5381 * migration_call - callback that gets triggered when a CPU is added.
5382 * Here we can start up the necessary migration thread for the new CPU.
5383 */
5384static int
5385migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5386{
5387        int cpu = (long)hcpu;
5388        unsigned long flags;
5389        struct rq *rq = cpu_rq(cpu);
5390
5391        switch (action & ~CPU_TASKS_FROZEN) {
5392
5393        case CPU_UP_PREPARE:
5394                rq->calc_load_update = calc_load_update;
5395                account_reset_rq(rq);
5396                break;
5397
5398        case CPU_ONLINE:
5399                /* Update our root-domain */
5400                raw_spin_lock_irqsave(&rq->lock, flags);
5401                if (rq->rd) {
5402                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5403
5404                        set_rq_online(rq);
5405                }
5406                raw_spin_unlock_irqrestore(&rq->lock, flags);
5407                break;
5408
5409#ifdef CONFIG_HOTPLUG_CPU
5410        case CPU_DYING:
5411                sched_ttwu_pending();
5412                /* Update our root-domain */
5413                raw_spin_lock_irqsave(&rq->lock, flags);
5414                if (rq->rd) {
5415                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5416                        set_rq_offline(rq);
5417                }
5418                migrate_tasks(rq);
5419                BUG_ON(rq->nr_running != 1); /* the migration thread */
5420                raw_spin_unlock_irqrestore(&rq->lock, flags);
5421                break;
5422
5423        case CPU_DEAD:
5424                calc_load_migrate(rq);
5425                break;
5426#endif
5427        }
5428
5429        update_max_interval();
5430
5431        return NOTIFY_OK;
5432}
5433
5434/*
5435 * Register at high priority so that task migration (migrate_all_tasks)
5436 * happens before everything else.  This has to be lower priority than
5437 * the notifier in the perf_event subsystem, though.
5438 */
5439static struct notifier_block migration_notifier = {
5440        .notifier_call = migration_call,
5441        .priority = CPU_PRI_MIGRATION,
5442};
5443
5444static void set_cpu_rq_start_time(void)
5445{
5446        int cpu = smp_processor_id();
5447        struct rq *rq = cpu_rq(cpu);
5448        rq->age_stamp = sched_clock_cpu(cpu);
5449}
5450
5451static int sched_cpu_active(struct notifier_block *nfb,
5452                                      unsigned long action, void *hcpu)
5453{
5454        int cpu = (long)hcpu;
5455
5456        switch (action & ~CPU_TASKS_FROZEN) {
5457        case CPU_STARTING:
5458                set_cpu_rq_start_time();
5459                return NOTIFY_OK;
5460
5461        case CPU_DOWN_FAILED:
5462                set_cpu_active(cpu, true);
5463                return NOTIFY_OK;
5464
5465        default:
5466                return NOTIFY_DONE;
5467        }
5468}
5469
5470static int sched_cpu_inactive(struct notifier_block *nfb,
5471                                        unsigned long action, void *hcpu)
5472{
5473        switch (action & ~CPU_TASKS_FROZEN) {
5474        case CPU_DOWN_PREPARE:
5475                set_cpu_active((long)hcpu, false);
5476                return NOTIFY_OK;
5477        default:
5478                return NOTIFY_DONE;
5479        }
5480}
5481
5482static int __init migration_init(void)
5483{
5484        void *cpu = (void *)(long)smp_processor_id();
5485        int err;
5486
5487        /* Initialize migration for the boot CPU */
5488        err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5489        BUG_ON(err == NOTIFY_BAD);
5490        migration_call(&migration_notifier, CPU_ONLINE, cpu);
5491        register_cpu_notifier(&migration_notifier);
5492
5493        /* Register cpu active notifiers */
5494        cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5495        cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5496
5497        return 0;
5498}
5499early_initcall(migration_init);
5500
5501static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5502
5503#ifdef CONFIG_SCHED_DEBUG
5504
5505static __read_mostly int sched_debug_enabled;
5506
5507static int __init sched_debug_setup(char *str)
5508{
5509        sched_debug_enabled = 1;
5510
5511        return 0;
5512}
5513early_param("sched_debug", sched_debug_setup);
5514
5515static inline bool sched_debug(void)
5516{
5517        return sched_debug_enabled;
5518}
5519
5520static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5521                                  struct cpumask *groupmask)
5522{
5523        struct sched_group *group = sd->groups;
5524
5525        cpumask_clear(groupmask);
5526
5527        printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5528
5529        if (!(sd->flags & SD_LOAD_BALANCE)) {
5530                printk("does not load-balance\n");
5531                if (sd->parent)
5532                        printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5533                                        " has parent");
5534                return -1;
5535        }
5536
5537        printk(KERN_CONT "span %*pbl level %s\n",
5538               cpumask_pr_args(sched_domain_span(sd)), sd->name);
5539
5540        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5541                printk(KERN_ERR "ERROR: domain->span does not contain "
5542                                "CPU%d\n", cpu);
5543        }
5544        if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
5545                printk(KERN_ERR "ERROR: domain->groups does not contain"
5546                                " CPU%d\n", cpu);
5547        }
5548
5549        printk(KERN_DEBUG "%*s groups:", level + 1, "");
5550        do {
5551                if (!group) {
5552                        printk("\n");
5553                        printk(KERN_ERR "ERROR: group is NULL\n");
5554                        break;
5555                }
5556
5557                if (!cpumask_weight(sched_group_cpus(group))) {
5558                        printk(KERN_CONT "\n");
5559                        printk(KERN_ERR "ERROR: empty group\n");
5560                        break;
5561                }
5562
5563                if (!(sd->flags & SD_OVERLAP) &&
5564                    cpumask_intersects(groupmask, sched_group_cpus(group))) {
5565                        printk(KERN_CONT "\n");
5566                        printk(KERN_ERR "ERROR: repeated CPUs\n");
5567                        break;
5568                }
5569
5570                cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5571
5572                printk(KERN_CONT " %*pbl",
5573                       cpumask_pr_args(sched_group_cpus(group)));
5574                if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
5575                        printk(KERN_CONT " (cpu_capacity = %d)",
5576                                group->sgc->capacity);
5577                }
5578
5579                group = group->next;
5580        } while (group != sd->groups);
5581        printk(KERN_CONT "\n");
5582
5583        if (!cpumask_equal(sched_domain_span(sd), groupmask))
5584                printk(KERN_ERR "ERROR: groups don't span domain->span\n");
5585
5586        if (sd->parent &&
5587            !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
5588                printk(KERN_ERR "ERROR: parent span is not a superset "
5589                        "of domain->span\n");
5590        return 0;
5591}
5592
5593static void sched_domain_debug(struct sched_domain *sd, int cpu)
5594{
5595        int level = 0;
5596
5597        if (!sched_debug_enabled)
5598                return;
5599
5600        if (!sd) {
5601                printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5602                return;
5603        }
5604
5605        printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5606
5607        for (;;) {
5608                if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
5609                        break;
5610                level++;
5611                sd = sd->parent;
5612                if (!sd)
5613                        break;
5614        }
5615}
5616#else /* !CONFIG_SCHED_DEBUG */
5617# define sched_domain_debug(sd, cpu) do { } while (0)
5618static inline bool sched_debug(void)
5619{
5620        return false;
5621}
5622#endif /* CONFIG_SCHED_DEBUG */
5623
5624static int sd_degenerate(struct sched_domain *sd)
5625{
5626        if (cpumask_weight(sched_domain_span(sd)) == 1)
5627                return 1;
5628
5629        /* Following flags need at least 2 groups */
5630        if (sd->flags & (SD_LOAD_BALANCE |
5631                         SD_BALANCE_NEWIDLE |
5632                         SD_BALANCE_FORK |
5633                         SD_BALANCE_EXEC |
5634                         SD_SHARE_CPUCAPACITY |
5635                         SD_SHARE_PKG_RESOURCES |
5636                         SD_SHARE_POWERDOMAIN)) {
5637                if (sd->groups != sd->groups->next)
5638                        return 0;
5639        }
5640
5641        /* Following flags don't use groups */
5642        if (sd->flags & (SD_WAKE_AFFINE))
5643                return 0;
5644
5645        return 1;
5646}
5647
5648static int
5649sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5650{
5651        unsigned long cflags = sd->flags, pflags = parent->flags;
5652
5653        if (sd_degenerate(parent))
5654                return 1;
5655
5656        if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5657                return 0;
5658
5659        /* Flags needing groups don't count if only 1 group in parent */
5660        if (parent->groups == parent->groups->next) {
5661                pflags &= ~(SD_LOAD_BALANCE |
5662                                SD_BALANCE_NEWIDLE |
5663                                SD_BALANCE_FORK |
5664                                SD_BALANCE_EXEC |
5665                                SD_SHARE_CPUCAPACITY |
5666                                SD_SHARE_PKG_RESOURCES |
5667                                SD_PREFER_SIBLING |
5668                                SD_SHARE_POWERDOMAIN);
5669                if (nr_node_ids == 1)
5670                        pflags &= ~SD_SERIALIZE;
5671        }
5672        if (~cflags & pflags)
5673                return 0;
5674
5675        return 1;
5676}
5677
5678static void free_rootdomain(struct rcu_head *rcu)
5679{
5680        struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5681
5682        cpupri_cleanup(&rd->cpupri);
5683        cpudl_cleanup(&rd->cpudl);
5684        free_cpumask_var(rd->dlo_mask);
5685        free_cpumask_var(rd->rto_mask);
5686        free_cpumask_var(rd->online);
5687        free_cpumask_var(rd->span);
5688        kfree(rd);
5689}
5690
5691static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5692{
5693        struct root_domain *old_rd = NULL;
5694        unsigned long flags;
5695
5696        raw_spin_lock_irqsave(&rq->lock, flags);
5697
5698        if (rq->rd) {
5699                old_rd = rq->rd;
5700
5701                if (cpumask_test_cpu(rq->cpu, old_rd->online))
5702                        set_rq_offline(rq);
5703
5704                cpumask_clear_cpu(rq->cpu, old_rd->span);
5705
5706                /*
5707                 * If we dont want to free the old_rd yet then
5708                 * set old_rd to NULL to skip the freeing later
5709                 * in this function:
5710                 */
5711                if (!atomic_dec_and_test(&old_rd->refcount))
5712                        old_rd = NULL;
5713        }
5714
5715        atomic_inc(&rd->refcount);
5716        rq->rd = rd;
5717
5718        cpumask_set_cpu(rq->cpu, rd->span);
5719        if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5720                set_rq_online(rq);
5721
5722        raw_spin_unlock_irqrestore(&rq->lock, flags);
5723
5724        if (old_rd)
5725                call_rcu_sched(&old_rd->rcu, free_rootdomain);
5726}
5727
5728static int init_rootdomain(struct root_domain *rd)
5729{
5730        memset(rd, 0, sizeof(*rd));
5731
5732        if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
5733                goto out;
5734        if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
5735                goto free_span;
5736        if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5737                goto free_online;
5738        if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5739                goto free_dlo_mask;
5740
5741        init_dl_bw(&rd->dl_bw);
5742        if (cpudl_init(&rd->cpudl) != 0)
5743                goto free_dlo_mask;
5744
5745        if (cpupri_init(&rd->cpupri) != 0)
5746                goto free_rto_mask;
5747        return 0;
5748
5749free_rto_mask:
5750        free_cpumask_var(rd->rto_mask);
5751free_dlo_mask:
5752        free_cpumask_var(rd->dlo_mask);
5753free_online:
5754        free_cpumask_var(rd->online);
5755free_span:
5756        free_cpumask_var(rd->span);
5757out:
5758        return -ENOMEM;
5759}
5760
5761/*
5762 * By default the system creates a single root-domain with all cpus as
5763 * members (mimicking the global state we have today).
5764 */
5765struct root_domain def_root_domain;
5766
5767static void init_defrootdomain(void)
5768{
5769        init_rootdomain(&def_root_domain);
5770
5771        atomic_set(&def_root_domain.refcount, 1);
5772}
5773
5774static struct root_domain *alloc_rootdomain(void)
5775{
5776        struct root_domain *rd;
5777
5778        rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5779        if (!rd)
5780                return NULL;
5781
5782        if (init_rootdomain(rd) != 0) {
5783                kfree(rd);
5784                return NULL;
5785        }
5786
5787        return rd;
5788}
5789
5790static void free_sched_groups(struct sched_group *sg, int free_sgc)
5791{
5792        struct sched_group *tmp, *first;
5793
5794        if (!sg)
5795                return;
5796
5797        first = sg;
5798        do {
5799                tmp = sg->next;
5800
5801                if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5802                        kfree(sg->sgc);
5803
5804                kfree(sg);
5805                sg = tmp;
5806        } while (sg != first);
5807}
5808
5809static void free_sched_domain(struct rcu_head *rcu)
5810{
5811        struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5812
5813        /*
5814         * If its an overlapping domain it has private groups, iterate and
5815         * nuke them all.
5816         */
5817        if (sd->flags & SD_OVERLAP) {
5818                free_sched_groups(sd->groups, 1);
5819        } else if (atomic_dec_and_test(&sd->groups->ref)) {
5820                kfree(sd->groups->sgc);
5821                kfree(sd->groups);
5822        }
5823        kfree(sd);
5824}
5825
5826static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5827{
5828        call_rcu(&sd->rcu, free_sched_domain);
5829}
5830
5831static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5832{
5833        for (; sd; sd = sd->parent)
5834                destroy_sched_domain(sd, cpu);
5835}
5836
5837/*
5838 * Keep a special pointer to the highest sched_domain that has
5839 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5840 * allows us to avoid some pointer chasing select_idle_sibling().
5841 *
5842 * Also keep a unique ID per domain (we use the first cpu number in
5843 * the cpumask of the domain), this allows us to quickly tell if
5844 * two cpus are in the same cache domain, see cpus_share_cache().
5845 */
5846DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5847DEFINE_PER_CPU(int, sd_llc_size);
5848DEFINE_PER_CPU(int, sd_llc_id);
5849DEFINE_PER_CPU(struct sched_domain *, sd_numa);
5850DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5851DEFINE_PER_CPU(struct sched_domain *, sd_asym);
5852
5853static void update_top_cache_domain(int cpu)
5854{
5855        struct sched_domain *sd;
5856        struct sched_domain *busy_sd = NULL;
5857        int id = cpu;
5858        int size = 1;
5859
5860        sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
5861        if (sd) {
5862                id = cpumask_first(sched_domain_span(sd));
5863                size = cpumask_weight(sched_domain_span(sd));
5864                busy_sd = sd->parent; /* sd_busy */
5865        }
5866        rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
5867
5868        rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5869        per_cpu(sd_llc_size, cpu) = size;
5870        per_cpu(sd_llc_id, cpu) = id;
5871
5872        sd = lowest_flag_domain(cpu, SD_NUMA);
5873        rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
5874
5875        sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
5876        rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
5877}
5878
5879/*
5880 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
5881 * hold the hotplug lock.
5882 */
5883static void
5884cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
5885{
5886        struct rq *rq = cpu_rq(cpu);
5887        struct sched_domain *tmp;
5888
5889        /* Remove the sched domains which do not contribute to scheduling. */
5890        for (tmp = sd; tmp; ) {
5891                struct sched_domain *parent = tmp->parent;
5892                if (!parent)
5893                        break;
5894
5895                if (sd_parent_degenerate(tmp, parent)) {
5896                        tmp->parent = parent->parent;
5897                        if (parent->parent)
5898                                parent->parent->child = tmp;
5899                        /*
5900                         * Transfer SD_PREFER_SIBLING down in case of a
5901                         * degenerate parent; the spans match for this
5902                         * so the property transfers.
5903                         */
5904                        if (parent->flags & SD_PREFER_SIBLING)
5905                                tmp->flags |= SD_PREFER_SIBLING;
5906                        destroy_sched_domain(parent, cpu);
5907                } else
5908                        tmp = tmp->parent;
5909        }
5910
5911        if (sd && sd_degenerate(sd)) {
5912                tmp = sd;
5913                sd = sd->parent;
5914                destroy_sched_domain(tmp, cpu);
5915                if (sd)
5916                        sd->child = NULL;
5917        }
5918
5919        sched_domain_debug(sd, cpu);
5920
5921        rq_attach_root(rq, rd);
5922        tmp = rq->sd;
5923        rcu_assign_pointer(rq->sd, sd);
5924        destroy_sched_domains(tmp, cpu);
5925
5926        update_top_cache_domain(cpu);
5927}
5928
5929/* Setup the mask of cpus configured for isolated domains */
5930static int __init isolated_cpu_setup(char *str)
5931{
5932        int ret;
5933
5934        alloc_bootmem_cpumask_var(&cpu_isolated_map);
5935        ret = cpulist_parse(str, cpu_isolated_map);
5936        if (ret) {
5937                pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids);
5938                return 0;
5939        }
5940        return 1;
5941}
5942__setup("isolcpus=", isolated_cpu_setup);
5943
5944struct s_data {
5945        struct sched_domain ** __percpu sd;
5946        struct root_domain      *rd;
5947};
5948
5949enum s_alloc {
5950        sa_rootdomain,
5951        sa_sd,
5952        sa_sd_storage,
5953        sa_none,
5954};
5955
5956/*
5957 * Build an iteration mask that can exclude certain CPUs from the upwards
5958 * domain traversal.
5959 *
5960 * Asymmetric node setups can result in situations where the domain tree is of
5961 * unequal depth, make sure to skip domains that already cover the entire
5962 * range.
5963 *
5964 * In that case build_sched_domains() will have terminated the iteration early
5965 * and our sibling sd spans will be empty. Domains should always include the
5966 * cpu they're built on, so check that.
5967 *
5968 */
5969static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
5970{
5971        const struct cpumask *span = sched_domain_span(sd);
5972        struct sd_data *sdd = sd->private;
5973        struct sched_domain *sibling;
5974        int i;
5975
5976        for_each_cpu(i, span) {
5977                sibling = *per_cpu_ptr(sdd->sd, i);
5978                if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5979                        continue;
5980
5981                cpumask_set_cpu(i, sched_group_mask(sg));
5982        }
5983}
5984
5985/*
5986 * Return the canonical balance cpu for this group, this is the first cpu
5987 * of this group that's also in the iteration mask.
5988 */
5989int group_balance_cpu(struct sched_group *sg)
5990{
5991        return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
5992}
5993
5994static int
5995build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5996{
5997        struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5998        const struct cpumask *span = sched_domain_span(sd);
5999        struct cpumask *covered = sched_domains_tmpmask;
6000        struct sd_data *sdd = sd->private;
6001        struct sched_domain *sibling;
6002        int i;
6003
6004        cpumask_clear(covered);
6005
6006        for_each_cpu(i, span) {
6007                struct cpumask *sg_span;
6008
6009                if (cpumask_test_cpu(i, covered))
6010                        continue;
6011
6012                sibling = *per_cpu_ptr(sdd->sd, i);
6013
6014                /* See the comment near build_group_mask(). */
6015                if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6016                        continue;
6017
6018                sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6019                                GFP_KERNEL, cpu_to_node(cpu));
6020
6021                if (!sg)
6022                        goto fail;
6023
6024                sg_span = sched_group_cpus(sg);
6025                if (sibling->child)
6026                        cpumask_copy(sg_span, sched_domain_span(sibling->child));
6027                else
6028                        cpumask_set_cpu(i, sg_span);
6029
6030                cpumask_or(covered, covered, sg_span);
6031
6032                sg->sgc = *per_cpu_ptr(sdd->sgc, i);
6033                if (atomic_inc_return(&sg->sgc->ref) == 1)
6034                        build_group_mask(sd, sg);
6035
6036                /*
6037                 * Initialize sgc->capacity such that even if we mess up the
6038                 * domains and no possible iteration will get us here, we won't
6039                 * die on a /0 trap.
6040                 */
6041                sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
6042
6043                /*
6044                 * Make sure the first group of this domain contains the
6045                 * canonical balance cpu. Otherwise the sched_domain iteration
6046                 * breaks. See update_sg_lb_stats().
6047                 */
6048                if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
6049                    group_balance_cpu(sg) == cpu)
6050                        groups = sg;
6051
6052                if (!first)
6053                        first = sg;
6054                if (last)
6055                        last->next = sg;
6056                last = sg;
6057                last->next = first;
6058        }
6059        sd->groups = groups;
6060
6061        return 0;
6062
6063fail:
6064        free_sched_groups(first, 0);
6065
6066        return -ENOMEM;
6067}
6068
6069static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6070{
6071        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6072        struct sched_domain *child = sd->child;
6073
6074        if (child)
6075                cpu = cpumask_first(sched_domain_span(child));
6076
6077        if (sg) {
6078                *sg = *per_cpu_ptr(sdd->sg, cpu);
6079                (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
6080                atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
6081        }
6082
6083        return cpu;
6084}
6085
6086/*
6087 * build_sched_groups will build a circular linked list of the groups
6088 * covered by the given span, and will set each group's ->cpumask correctly,
6089 * and ->cpu_capacity to 0.
6090 *
6091 * Assumes the sched_domain tree is fully constructed
6092 */
6093static int
6094build_sched_groups(struct sched_domain *sd, int cpu)
6095{
6096        struct sched_group *first = NULL, *last = NULL;
6097        struct sd_data *sdd = sd->private;
6098        const struct cpumask *span = sched_domain_span(sd);
6099        struct cpumask *covered;
6100        int i;
6101
6102        get_group(cpu, sdd, &sd->groups);
6103        atomic_inc(&sd->groups->ref);
6104
6105        if (cpu != cpumask_first(span))
6106                return 0;
6107
6108        lockdep_assert_held(&sched_domains_mutex);
6109        covered = sched_domains_tmpmask;
6110
6111        cpumask_clear(covered);
6112
6113        for_each_cpu(i, span) {
6114                struct sched_group *sg;
6115                int group, j;
6116
6117                if (cpumask_test_cpu(i, covered))
6118                        continue;
6119
6120                group = get_group(i, sdd, &sg);
6121                cpumask_setall(sched_group_mask(sg));
6122
6123                for_each_cpu(j, span) {
6124                        if (get_group(j, sdd, NULL) != group)
6125                                continue;
6126
6127                        cpumask_set_cpu(j, covered);
6128                        cpumask_set_cpu(j, sched_group_cpus(sg));
6129                }
6130
6131                if (!first)
6132                        first = sg;
6133                if (last)
6134                        last->next = sg;
6135                last = sg;
6136        }
6137        last->next = first;
6138
6139        return 0;
6140}
6141
6142/*
6143 * Initialize sched groups cpu_capacity.
6144 *
6145 * cpu_capacity indicates the capacity of sched group, which is used while
6146 * distributing the load between different sched groups in a sched domain.
6147 * Typically cpu_capacity for all the groups in a sched domain will be same
6148 * unless there are asymmetries in the topology. If there are asymmetries,
6149 * group having more cpu_capacity will pickup more load compared to the
6150 * group having less cpu_capacity.
6151 */
6152static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
6153{
6154        struct sched_group *sg = sd->groups;
6155
6156        WARN_ON(!sg);
6157
6158        do {
6159                sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6160                sg = sg->next;
6161        } while (sg != sd->groups);
6162
6163        if (cpu != group_balance_cpu(sg))
6164                return;
6165
6166        update_group_capacity(sd, cpu);
6167        atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
6168}
6169
6170/*
6171 * Initializers for schedule domains
6172 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6173 */
6174
6175static int default_relax_domain_level = -1;
6176int sched_domain_level_max;
6177
6178static int __init setup_relax_domain_level(char *str)
6179{
6180        if (kstrtoint(str, 0, &default_relax_domain_level))
6181                pr_warn("Unable to set relax_domain_level\n");
6182
6183        return 1;
6184}
6185__setup("relax_domain_level=", setup_relax_domain_level);
6186
6187static void set_domain_attribute(struct sched_domain *sd,
6188                                 struct sched_domain_attr *attr)
6189{
6190        int request;
6191
6192        if (!attr || attr->relax_domain_level < 0) {
6193                if (default_relax_domain_level < 0)
6194                        return;
6195                else
6196                        request = default_relax_domain_level;
6197        } else
6198                request = attr->relax_domain_level;
6199        if (request < sd->level) {
6200                /* turn off idle balance on this domain */
6201                sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6202        } else {
6203                /* turn on idle balance on this domain */
6204                sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6205        }
6206}
6207
6208static void __sdt_free(const struct cpumask *cpu_map);
6209static int __sdt_alloc(const struct cpumask *cpu_map);
6210
6211static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6212                                 const struct cpumask *cpu_map)
6213{
6214        switch (what) {
6215        case sa_rootdomain:
6216                if (!atomic_read(&d->rd->refcount))
6217                        free_rootdomain(&d->rd->rcu); /* fall through */
6218        case sa_sd:
6219                free_percpu(d->sd); /* fall through */
6220        case sa_sd_storage:
6221                __sdt_free(cpu_map); /* fall through */
6222        case sa_none:
6223                break;
6224        }
6225}
6226
6227static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6228                                                   const struct cpumask *cpu_map)
6229{
6230        memset(d, 0, sizeof(*d));
6231
6232        if (__sdt_alloc(cpu_map))
6233                return sa_sd_storage;
6234        d->sd = alloc_percpu(struct sched_domain *);
6235        if (!d->sd)
6236                return sa_sd_storage;
6237        d->rd = alloc_rootdomain();
6238        if (!d->rd)
6239                return sa_sd;
6240        return sa_rootdomain;
6241}
6242
6243/*
6244 * NULL the sd_data elements we've used to build the sched_domain and
6245 * sched_group structure so that the subsequent __free_domain_allocs()
6246 * will not free the data we're using.
6247 */
6248static void claim_allocations(int cpu, struct sched_domain *sd)
6249{
6250        struct sd_data *sdd = sd->private;
6251
6252        WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6253        *per_cpu_ptr(sdd->sd, cpu) = NULL;
6254
6255        if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6256                *per_cpu_ptr(sdd->sg, cpu) = NULL;
6257
6258        if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6259                *per_cpu_ptr(sdd->sgc, cpu) = NULL;
6260}
6261
6262#ifdef CONFIG_NUMA
6263static int sched_domains_numa_levels;
6264enum numa_topology_type sched_numa_topology_type;
6265static int *sched_domains_numa_distance;
6266int sched_max_numa_distance;
6267static struct cpumask ***sched_domains_numa_masks;
6268static int sched_domains_curr_level;
6269#endif
6270
6271/*
6272 * SD_flags allowed in topology descriptions.
6273 *
6274 * SD_SHARE_CPUCAPACITY      - describes SMT topologies
6275 * SD_SHARE_PKG_RESOURCES - describes shared caches
6276 * SD_NUMA                - describes NUMA topologies
6277 * SD_SHARE_POWERDOMAIN   - describes shared power domain
6278 *
6279 * Odd one out:
6280 * SD_ASYM_PACKING        - describes SMT quirks
6281 */
6282#define TOPOLOGY_SD_FLAGS               \
6283        (SD_SHARE_CPUCAPACITY |         \
6284         SD_SHARE_PKG_RESOURCES |       \
6285         SD_NUMA |                      \
6286         SD_ASYM_PACKING |              \
6287         SD_SHARE_POWERDOMAIN)
6288
6289static struct sched_domain *
6290sd_init(struct sched_domain_topology_level *tl, int cpu)
6291{
6292        struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
6293        int sd_weight, sd_flags = 0;
6294
6295#ifdef CONFIG_NUMA
6296        /*
6297         * Ugly hack to pass state to sd_numa_mask()...
6298         */
6299        sched_domains_curr_level = tl->numa_level;
6300#endif
6301
6302        sd_weight = cpumask_weight(tl->mask(cpu));
6303
6304        if (tl->sd_flags)
6305                sd_flags = (*tl->sd_flags)();
6306        if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6307                        "wrong sd_flags in topology description\n"))
6308                sd_flags &= ~TOPOLOGY_SD_FLAGS;
6309
6310        *sd = (struct sched_domain){
6311                .min_interval           = sd_weight,
6312                .max_interval           = 2*sd_weight,
6313                .busy_factor            = 32,
6314                .imbalance_pct          = 125,
6315
6316                .cache_nice_tries       = 0,
6317                .busy_idx               = 0,
6318                .idle_idx               = 0,
6319                .newidle_idx            = 0,
6320                .wake_idx               = 0,
6321                .forkexec_idx           = 0,
6322
6323                .flags                  = 1*SD_LOAD_BALANCE
6324                                        | 1*SD_BALANCE_NEWIDLE
6325                                        | 1*SD_BALANCE_EXEC
6326                                        | 1*SD_BALANCE_FORK
6327                                        | 0*SD_BALANCE_WAKE
6328                                        | 1*SD_WAKE_AFFINE
6329                                        | 0*SD_SHARE_CPUCAPACITY
6330                                        | 0*SD_SHARE_PKG_RESOURCES
6331                                        | 0*SD_SERIALIZE
6332                                        | 0*SD_PREFER_SIBLING
6333                                        | 0*SD_NUMA
6334                                        | sd_flags
6335                                        ,
6336
6337                .last_balance           = jiffies,
6338                .balance_interval       = sd_weight,
6339                .smt_gain               = 0,
6340                .max_newidle_lb_cost    = 0,
6341                .next_decay_max_lb_cost = jiffies,
6342#ifdef CONFIG_SCHED_DEBUG
6343                .name                   = tl->name,
6344#endif
6345        };
6346
6347        /*
6348         * Convert topological properties into behaviour.
6349         */
6350
6351        if (sd->flags & SD_SHARE_CPUCAPACITY) {
6352                sd->flags |= SD_PREFER_SIBLING;
6353                sd->imbalance_pct = 110;
6354                sd->smt_gain = 1178; /* ~15% */
6355
6356        } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6357                sd->imbalance_pct = 117;
6358                sd->cache_nice_tries = 1;
6359                sd->busy_idx = 2;
6360
6361#ifdef CONFIG_NUMA
6362        } else if (sd->flags & SD_NUMA) {
6363                sd->cache_nice_tries = 2;
6364                sd->busy_idx = 3;
6365                sd->idle_idx = 2;
6366
6367                sd->flags |= SD_SERIALIZE;
6368                if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6369                        sd->flags &= ~(SD_BALANCE_EXEC |
6370                                       SD_BALANCE_FORK |
6371                                       SD_WAKE_AFFINE);
6372                }
6373
6374#endif
6375        } else {
6376                sd->flags |= SD_PREFER_SIBLING;
6377                sd->cache_nice_tries = 1;
6378                sd->busy_idx = 2;
6379                sd->idle_idx = 1;
6380        }
6381
6382        sd->private = &tl->data;
6383
6384        return sd;
6385}
6386
6387/*
6388 * Topology list, bottom-up.
6389 */
6390static struct sched_domain_topology_level default_topology[] = {
6391#ifdef CONFIG_SCHED_SMT
6392        { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6393#endif
6394#ifdef CONFIG_SCHED_MC
6395        { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
6396#endif
6397        { cpu_cpu_mask, SD_INIT_NAME(DIE) },
6398        { NULL, },
6399};
6400
6401static struct sched_domain_topology_level *sched_domain_topology =
6402        default_topology;
6403
6404#define for_each_sd_topology(tl)                        \
6405        for (tl = sched_domain_topology; tl->mask; tl++)
6406
6407void set_sched_topology(struct sched_domain_topology_level *tl)
6408{
6409        sched_domain_topology = tl;
6410}
6411
6412#ifdef CONFIG_NUMA
6413
6414static const struct cpumask *sd_numa_mask(int cpu)
6415{
6416        return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6417}
6418
6419static void sched_numa_warn(const char *str)
6420{
6421        static int done = false;
6422        int i,j;
6423
6424        if (done)
6425                return;
6426
6427        done = true;
6428
6429        printk(KERN_WARNING "ERROR: %s\n\n", str);
6430
6431        for (i = 0; i < nr_node_ids; i++) {
6432                printk(KERN_WARNING "  ");
6433                for (j = 0; j < nr_node_ids; j++)
6434                        printk(KERN_CONT "%02d ", node_distance(i,j));
6435                printk(KERN_CONT "\n");
6436        }
6437        printk(KERN_WARNING "\n");
6438}
6439
6440bool find_numa_distance(int distance)
6441{
6442        int i;
6443
6444        if (distance == node_distance(0, 0))
6445                return true;
6446
6447        for (i = 0; i < sched_domains_numa_levels; i++) {
6448                if (sched_domains_numa_distance[i] == distance)
6449                        return true;
6450        }
6451
6452        return false;
6453}
6454
6455/*
6456 * A system can have three types of NUMA topology:
6457 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
6458 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
6459 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
6460 *
6461 * The difference between a glueless mesh topology and a backplane
6462 * topology lies in whether communication between not directly
6463 * connected nodes goes through intermediary nodes (where programs
6464 * could run), or through backplane controllers. This affects
6465 * placement of programs.
6466 *
6467 * The type of topology can be discerned with the following tests:
6468 * - If the maximum distance between any nodes is 1 hop, the system
6469 *   is directly connected.
6470 * - If for two nodes A and B, located N > 1 hops away from each other,
6471 *   there is an intermediary node C, which is < N hops away from both
6472 *   nodes A and B, the system is a glueless mesh.
6473 */
6474static void init_numa_topology_type(void)
6475{
6476        int a, b, c, n;
6477
6478        n = sched_max_numa_distance;
6479
6480        if (sched_domains_numa_levels <= 1) {
6481                sched_numa_topology_type = NUMA_DIRECT;
6482                return;
6483        }
6484
6485        for_each_online_node(a) {
6486                for_each_online_node(b) {
6487                        /* Find two nodes furthest removed from each other. */
6488                        if (node_distance(a, b) < n)
6489                                continue;
6490
6491                        /* Is there an intermediary node between a and b? */
6492                        for_each_online_node(c) {
6493                                if (node_distance(a, c) < n &&
6494                                    node_distance(b, c) < n) {
6495                                        sched_numa_topology_type =
6496                                                        NUMA_GLUELESS_MESH;
6497                                        return;
6498                                }
6499                        }
6500
6501                        sched_numa_topology_type = NUMA_BACKPLANE;
6502                        return;
6503                }
6504        }
6505}
6506
6507static void sched_init_numa(void)
6508{
6509        int next_distance, curr_distance = node_distance(0, 0);
6510        struct sched_domain_topology_level *tl;
6511        int level = 0;
6512        int i, j, k;
6513
6514        sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6515        if (!sched_domains_numa_distance)
6516                return;
6517
6518        /*
6519         * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6520         * unique distances in the node_distance() table.
6521         *
6522         * Assumes node_distance(0,j) includes all distances in
6523         * node_distance(i,j) in order to avoid cubic time.
6524         */
6525        next_distance = curr_distance;
6526        for (i = 0; i < nr_node_ids; i++) {
6527                for (j = 0; j < nr_node_ids; j++) {
6528                        for (k = 0; k < nr_node_ids; k++) {
6529                                int distance = node_distance(i, k);
6530
6531                                if (distance > curr_distance &&
6532                                    (distance < next_distance ||
6533                                     next_distance == curr_distance))
6534                                        next_distance = distance;
6535
6536                                /*
6537                                 * While not a strong assumption it would be nice to know
6538                                 * about cases where if node A is connected to B, B is not
6539                                 * equally connected to A.
6540                                 */
6541                                if (sched_debug() && node_distance(k, i) != distance)
6542                                        sched_numa_warn("Node-distance not symmetric");
6543
6544                                if (sched_debug() && i && !find_numa_distance(distance))
6545                                        sched_numa_warn("Node-0 not representative");
6546                        }
6547                        if (next_distance != curr_distance) {
6548                                sched_domains_numa_distance[level++] = next_distance;
6549                                sched_domains_numa_levels = level;
6550                                curr_distance = next_distance;
6551                        } else break;
6552                }
6553
6554                /*
6555                 * In case of sched_debug() we verify the above assumption.
6556                 */
6557                if (!sched_debug())
6558                        break;
6559        }
6560
6561        if (!level)
6562                return;
6563
6564        /*
6565         * 'level' contains the number of unique distances, excluding the
6566         * identity distance node_distance(i,i).
6567         *
6568         * The sched_domains_numa_distance[] array includes the actual distance
6569         * numbers.
6570         */
6571
6572        /*
6573         * Here, we should temporarily reset sched_domains_numa_levels to 0.
6574         * If it fails to allocate memory for array sched_domains_numa_masks[][],
6575         * the array will contain less then 'level' members. This could be
6576         * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6577         * in other functions.
6578         *
6579         * We reset it to 'level' at the end of this function.
6580         */
6581        sched_domains_numa_levels = 0;
6582
6583        sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6584        if (!sched_domains_numa_masks)
6585                return;
6586
6587        /*
6588         * Now for each level, construct a mask per node which contains all
6589         * cpus of nodes that are that many hops away from us.
6590         */
6591        for (i = 0; i < level; i++) {
6592                sched_domains_numa_masks[i] =
6593                        kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6594                if (!sched_domains_numa_masks[i])
6595                        return;
6596
6597                for (j = 0; j < nr_node_ids; j++) {
6598                        struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
6599                        if (!mask)
6600                                return;
6601
6602                        sched_domains_numa_masks[i][j] = mask;
6603
6604                        for_each_node(k) {
6605                                if (node_distance(j, k) > sched_domains_numa_distance[i])
6606                                        continue;
6607
6608                                cpumask_or(mask, mask, cpumask_of_node(k));
6609                        }
6610                }
6611        }
6612
6613        /* Compute default topology size */
6614        for (i = 0; sched_domain_topology[i].mask; i++);
6615
6616        tl = kzalloc((i + level + 1) *
6617                        sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6618        if (!tl)
6619                return;
6620
6621        /*
6622         * Copy the default topology bits..
6623         */
6624        for (i = 0; sched_domain_topology[i].mask; i++)
6625                tl[i] = sched_domain_topology[i];
6626
6627        /*
6628         * .. and append 'j' levels of NUMA goodness.
6629         */
6630        for (j = 0; j < level; i++, j++) {
6631                tl[i] = (struct sched_domain_topology_level){
6632                        .mask = sd_numa_mask,
6633                        .sd_flags = cpu_numa_flags,
6634                        .flags = SDTL_OVERLAP,
6635                        .numa_level = j,
6636                        SD_INIT_NAME(NUMA)
6637                };
6638        }
6639
6640        sched_domain_topology = tl;
6641
6642        sched_domains_numa_levels = level;
6643        sched_max_numa_distance = sched_domains_numa_distance[level - 1];
6644
6645        init_numa_topology_type();
6646}
6647
6648static void sched_domains_numa_masks_set(int cpu)
6649{
6650        int i, j;
6651        int node = cpu_to_node(cpu);
6652
6653        for (i = 0; i < sched_domains_numa_levels; i++) {
6654                for (j = 0; j < nr_node_ids; j++) {
6655                        if (node_distance(j, node) <= sched_domains_numa_distance[i])
6656                                cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6657                }
6658        }
6659}
6660
6661static void sched_domains_numa_masks_clear(int cpu)
6662{
6663        int i, j;
6664        for (i = 0; i < sched_domains_numa_levels; i++) {
6665                for (j = 0; j < nr_node_ids; j++)
6666                        cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6667        }
6668}
6669
6670/*
6671 * Update sched_domains_numa_masks[level][node] array when new cpus
6672 * are onlined.
6673 */
6674static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6675                                           unsigned long action,
6676                                           void *hcpu)
6677{
6678        int cpu = (long)hcpu;
6679
6680        switch (action & ~CPU_TASKS_FROZEN) {
6681        case CPU_ONLINE:
6682                sched_domains_numa_masks_set(cpu);
6683                break;
6684
6685        case CPU_DEAD:
6686                sched_domains_numa_masks_clear(cpu);
6687                break;
6688
6689        default:
6690                return NOTIFY_DONE;
6691        }
6692
6693        return NOTIFY_OK;
6694}
6695#else
6696static inline void sched_init_numa(void)
6697{
6698}
6699
6700static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6701                                           unsigned long action,
6702                                           void *hcpu)
6703{
6704        return 0;
6705}
6706#endif /* CONFIG_NUMA */
6707
6708static int __sdt_alloc(const struct cpumask *cpu_map)
6709{
6710        struct sched_domain_topology_level *tl;
6711        int j;
6712
6713        for_each_sd_topology(tl) {
6714                struct sd_data *sdd = &tl->data;
6715
6716                sdd->sd = alloc_percpu(struct sched_domain *);
6717                if (!sdd->sd)
6718                        return -ENOMEM;
6719
6720                sdd->sg = alloc_percpu(struct sched_group *);
6721                if (!sdd->sg)
6722                        return -ENOMEM;
6723
6724                sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6725                if (!sdd->sgc)
6726                        return -ENOMEM;
6727
6728                for_each_cpu(j, cpu_map) {
6729                        struct sched_domain *sd;
6730                        struct sched_group *sg;
6731                        struct sched_group_capacity *sgc;
6732
6733                        sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6734                                        GFP_KERNEL, cpu_to_node(j));
6735                        if (!sd)
6736                                return -ENOMEM;
6737
6738                        *per_cpu_ptr(sdd->sd, j) = sd;
6739
6740                        sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6741                                        GFP_KERNEL, cpu_to_node(j));
6742                        if (!sg)
6743                                return -ENOMEM;
6744
6745                        sg->next = sg;
6746
6747                        *per_cpu_ptr(sdd->sg, j) = sg;
6748
6749                        sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
6750                                        GFP_KERNEL, cpu_to_node(j));
6751                        if (!sgc)
6752                                return -ENOMEM;
6753
6754                        *per_cpu_ptr(sdd->sgc, j) = sgc;
6755                }
6756        }
6757
6758        return 0;
6759}
6760
6761static void __sdt_free(const struct cpumask *cpu_map)
6762{
6763        struct sched_domain_topology_level *tl;
6764        int j;
6765
6766        for_each_sd_topology(tl) {
6767                struct sd_data *sdd = &tl->data;
6768
6769                for_each_cpu(j, cpu_map) {
6770                        struct sched_domain *sd;
6771
6772                        if (sdd->sd) {
6773                                sd = *per_cpu_ptr(sdd->sd, j);
6774                                if (sd && (sd->flags & SD_OVERLAP))
6775                                        free_sched_groups(sd->groups, 0);
6776                                kfree(*per_cpu_ptr(sdd->sd, j));
6777                        }
6778
6779                        if (sdd->sg)
6780                                kfree(*per_cpu_ptr(sdd->sg, j));
6781                        if (sdd->sgc)
6782                                kfree(*per_cpu_ptr(sdd->sgc, j));
6783                }
6784                free_percpu(sdd->sd);
6785                sdd->sd = NULL;
6786                free_percpu(sdd->sg);
6787                sdd->sg = NULL;
6788                free_percpu(sdd->sgc);
6789                sdd->sgc = NULL;
6790        }
6791}
6792
6793struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6794                const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6795                struct sched_domain *child, int cpu)
6796{
6797        struct sched_domain *sd = sd_init(tl, cpu);
6798        if (!sd)
6799                return child;
6800
6801        cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6802        if (child) {
6803                sd->level = child->level + 1;
6804                sched_domain_level_max = max(sched_domain_level_max, sd->level);
6805                child->parent = sd;
6806                sd->child = child;
6807
6808                if (!cpumask_subset(sched_domain_span(child),
6809                                    sched_domain_span(sd))) {
6810                        pr_err("BUG: arch topology borken\n");
6811#ifdef CONFIG_SCHED_DEBUG
6812                        pr_err("     the %s domain not a subset of the %s domain\n",
6813                                        child->name, sd->name);
6814#endif
6815                        /* Fixup, ensure @sd has at least @child cpus. */
6816                        cpumask_or(sched_domain_span(sd),
6817                                   sched_domain_span(sd),
6818                                   sched_domain_span(child));
6819                }
6820
6821        }
6822        set_domain_attribute(sd, attr);
6823
6824        return sd;
6825}
6826
6827/*
6828 * Build sched domains for a given set of cpus and attach the sched domains
6829 * to the individual cpus
6830 */
6831static int build_sched_domains(const struct cpumask *cpu_map,
6832                               struct sched_domain_attr *attr)
6833{
6834        enum s_alloc alloc_state;
6835        struct sched_domain *sd;
6836        struct s_data d;
6837        int i, ret = -ENOMEM;
6838
6839        alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6840        if (alloc_state != sa_rootdomain)
6841                goto error;
6842
6843        /* Set up domains for cpus specified by the cpu_map. */
6844        for_each_cpu(i, cpu_map) {
6845                struct sched_domain_topology_level *tl;
6846
6847                sd = NULL;
6848                for_each_sd_topology(tl) {
6849                        sd = build_sched_domain(tl, cpu_map, attr, sd, i);
6850                        if (tl == sched_domain_topology)
6851                                *per_cpu_ptr(d.sd, i) = sd;
6852                        if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6853                                sd->flags |= SD_OVERLAP;
6854                        if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6855                                break;
6856                }
6857        }
6858
6859        /* Build the groups for the domains */
6860        for_each_cpu(i, cpu_map) {
6861                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6862                        sd->span_weight = cpumask_weight(sched_domain_span(sd));
6863                        if (sd->flags & SD_OVERLAP) {
6864                                if (build_overlap_sched_groups(sd, i))
6865                                        goto error;
6866                        } else {
6867                                if (build_sched_groups(sd, i))
6868                                        goto error;
6869                        }
6870                }
6871        }
6872
6873        /* Calculate CPU capacity for physical packages and nodes */
6874        for (i = nr_cpumask_bits-1; i >= 0; i--) {
6875                if (!cpumask_test_cpu(i, cpu_map))
6876                        continue;
6877
6878                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6879                        claim_allocations(i, sd);
6880                        init_sched_groups_capacity(i, sd);
6881                }
6882        }
6883
6884        /* Attach the domains */
6885        rcu_read_lock();
6886        for_each_cpu(i, cpu_map) {
6887                sd = *per_cpu_ptr(d.sd, i);
6888                cpu_attach_domain(sd, d.rd, i);
6889        }
6890        rcu_read_unlock();
6891
6892        ret = 0;
6893error:
6894        __free_domain_allocs(&d, alloc_state, cpu_map);
6895        return ret;
6896}
6897
6898static cpumask_var_t *doms_cur; /* current sched domains */
6899static int ndoms_cur;           /* number of sched domains in 'doms_cur' */
6900static struct sched_domain_attr *dattr_cur;
6901                                /* attribues of custom domains in 'doms_cur' */
6902
6903/*
6904 * Special case: If a kmalloc of a doms_cur partition (array of
6905 * cpumask) fails, then fallback to a single sched domain,
6906 * as determined by the single cpumask fallback_doms.
6907 */
6908static cpumask_var_t fallback_doms;
6909
6910/*
6911 * arch_update_cpu_topology lets virtualized architectures update the
6912 * cpu core maps. It is supposed to return 1 if the topology changed
6913 * or 0 if it stayed the same.
6914 */
6915int __weak arch_update_cpu_topology(void)
6916{
6917        return 0;
6918}
6919
6920cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6921{
6922        int i;
6923        cpumask_var_t *doms;
6924
6925        doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6926        if (!doms)
6927                return NULL;
6928        for (i = 0; i < ndoms; i++) {
6929                if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6930                        free_sched_domains(doms, i);
6931                        return NULL;
6932                }
6933        }
6934        return doms;
6935}
6936
6937void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6938{
6939        unsigned int i;
6940        for (i = 0; i < ndoms; i++)
6941                free_cpumask_var(doms[i]);
6942        kfree(doms);
6943}
6944
6945/*
6946 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6947 * For now this just excludes isolated cpus, but could be used to
6948 * exclude other special cases in the future.
6949 */
6950static int init_sched_domains(const struct cpumask *cpu_map)
6951{
6952        int err;
6953
6954        arch_update_cpu_topology();
6955        ndoms_cur = 1;
6956        doms_cur = alloc_sched_domains(ndoms_cur);
6957        if (!doms_cur)
6958                doms_cur = &fallback_doms;
6959        cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
6960        err = build_sched_domains(doms_cur[0], NULL);
6961        register_sched_domain_sysctl();
6962
6963        return err;
6964}
6965
6966/*
6967 * Detach sched domains from a group of cpus specified in cpu_map
6968 * These cpus will now be attached to the NULL domain
6969 */
6970static void detach_destroy_domains(const struct cpumask *cpu_map)
6971{
6972        int i;
6973
6974        rcu_read_lock();
6975        for_each_cpu(i, cpu_map)
6976                cpu_attach_domain(NULL, &def_root_domain, i);
6977        rcu_read_unlock();
6978}
6979
6980/* handle null as "default" */
6981static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6982                        struct sched_domain_attr *new, int idx_new)
6983{
6984        struct sched_domain_attr tmp;
6985
6986        /* fast path */
6987        if (!new && !cur)
6988                return 1;
6989
6990        tmp = SD_ATTR_INIT;
6991        return !memcmp(cur ? (cur + idx_cur) : &tmp,
6992                        new ? (new + idx_new) : &tmp,
6993                        sizeof(struct sched_domain_attr));
6994}
6995
6996/*
6997 * Partition sched domains as specified by the 'ndoms_new'
6998 * cpumasks in the array doms_new[] of cpumasks. This compares
6999 * doms_new[] to the current sched domain partitioning, doms_cur[].
7000 * It destroys each deleted domain and builds each new domain.
7001 *
7002 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
7003 * The masks don't intersect (don't overlap.) We should setup one
7004 * sched domain for each mask. CPUs not in any of the cpumasks will
7005 * not be load balanced. If the same cpumask appears both in the
7006 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7007 * it as it is.
7008 *
7009 * The passed in 'doms_new' should be allocated using
7010 * alloc_sched_domains.  This routine takes ownership of it and will
7011 * free_sched_domains it when done with it. If the caller failed the
7012 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7013 * and partition_sched_domains() will fallback to the single partition
7014 * 'fallback_doms', it also forces the domains to be rebuilt.
7015 *
7016 * If doms_new == NULL it will be replaced with cpu_online_mask.
7017 * ndoms_new == 0 is a special case for destroying existing domains,
7018 * and it will not create the default domain.
7019 *
7020 * Call with hotplug lock held
7021 */
7022void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
7023                             struct sched_domain_attr *dattr_new)
7024{
7025        int i, j, n;
7026        int new_topology;
7027
7028        mutex_lock(&sched_domains_mutex);
7029
7030        /* always unregister in case we don't destroy any domains */
7031        unregister_sched_domain_sysctl();
7032
7033        /* Let architecture update cpu core mappings. */
7034        new_topology = arch_update_cpu_topology();
7035
7036        n = doms_new ? ndoms_new : 0;
7037
7038        /* Destroy deleted domains */
7039        for (i = 0; i < ndoms_cur; i++) {
7040                for (j = 0; j < n && !new_topology; j++) {
7041                        if (cpumask_equal(doms_cur[i], doms_new[j])
7042                            && dattrs_equal(dattr_cur, i, dattr_new, j))
7043                                goto match1;
7044                }
7045                /* no match - a current sched domain not in new doms_new[] */
7046                detach_destroy_domains(doms_cur[i]);
7047match1:
7048                ;
7049        }
7050
7051        n = ndoms_cur;
7052        if (doms_new == NULL) {
7053                n = 0;
7054                doms_new = &fallback_doms;
7055                cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
7056                WARN_ON_ONCE(dattr_new);
7057        }
7058
7059        /* Build new domains */
7060        for (i = 0; i < ndoms_new; i++) {
7061                for (j = 0; j < n && !new_topology; j++) {
7062                        if (cpumask_equal(doms_new[i], doms_cur[j])
7063                            && dattrs_equal(dattr_new, i, dattr_cur, j))
7064                                goto match2;
7065                }
7066                /* no match - add a new doms_new */
7067                build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
7068match2:
7069                ;
7070        }
7071
7072        /* Remember the new sched domains */
7073        if (doms_cur != &fallback_doms)
7074                free_sched_domains(doms_cur, ndoms_cur);
7075        kfree(dattr_cur);       /* kfree(NULL) is safe */
7076        doms_cur = doms_new;
7077        dattr_cur = dattr_new;
7078        ndoms_cur = ndoms_new;
7079
7080        register_sched_domain_sysctl();
7081
7082        mutex_unlock(&sched_domains_mutex);
7083}
7084
7085static int num_cpus_frozen;     /* used to mark begin/end of suspend/resume */
7086
7087/*
7088 * Update cpusets according to cpu_active mask.  If cpusets are
7089 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7090 * around partition_sched_domains().
7091 *
7092 * If we come here as part of a suspend/resume, don't touch cpusets because we
7093 * want to restore it back to its original state upon resume anyway.
7094 */
7095static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7096                             void *hcpu)
7097{
7098        switch (action) {
7099        case CPU_ONLINE_FROZEN:
7100        case CPU_DOWN_FAILED_FROZEN:
7101
7102                /*
7103                 * num_cpus_frozen tracks how many CPUs are involved in suspend
7104                 * resume sequence. As long as this is not the last online
7105                 * operation in the resume sequence, just build a single sched
7106                 * domain, ignoring cpusets.
7107                 */
7108                num_cpus_frozen--;
7109                if (likely(num_cpus_frozen)) {
7110                        partition_sched_domains(1, NULL, NULL);
7111                        break;
7112                }
7113
7114                /*
7115                 * This is the last CPU online operation. So fall through and
7116                 * restore the original sched domains by considering the
7117                 * cpuset configurations.
7118                 */
7119
7120        case CPU_ONLINE:
7121                cpuset_update_active_cpus(true);
7122                break;
7123        default:
7124                return NOTIFY_DONE;
7125        }
7126        return NOTIFY_OK;
7127}
7128
7129static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7130                               void *hcpu)
7131{
7132        unsigned long flags;
7133        long cpu = (long)hcpu;
7134        struct dl_bw *dl_b;
7135        bool overflow;
7136        int cpus;
7137
7138        switch (action) {
7139        case CPU_DOWN_PREPARE:
7140                rcu_read_lock_sched();
7141                dl_b = dl_bw_of(cpu);
7142
7143                raw_spin_lock_irqsave(&dl_b->lock, flags);
7144                cpus = dl_bw_cpus(cpu);
7145                overflow = __dl_overflow(dl_b, cpus, 0, 0);
7146                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7147
7148                rcu_read_unlock_sched();
7149
7150                if (overflow)
7151                        return notifier_from_errno(-EBUSY);
7152                cpuset_update_active_cpus(false);
7153                break;
7154        case CPU_DOWN_PREPARE_FROZEN:
7155                num_cpus_frozen++;
7156                partition_sched_domains(1, NULL, NULL);
7157                break;
7158        default:
7159                return NOTIFY_DONE;
7160        }
7161        return NOTIFY_OK;
7162}
7163
7164void __init sched_init_smp(void)
7165{
7166        cpumask_var_t non_isolated_cpus;
7167
7168        alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7169        alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
7170
7171        sched_init_numa();
7172
7173        /*
7174         * There's no userspace yet to cause hotplug operations; hence all the
7175         * cpu masks are stable and all blatant races in the below code cannot
7176         * happen.
7177         */
7178        mutex_lock(&sched_domains_mutex);
7179        init_sched_domains(cpu_active_mask);
7180        cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7181        if (cpumask_empty(non_isolated_cpus))
7182                cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
7183        mutex_unlock(&sched_domains_mutex);
7184
7185        hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
7186        hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7187        hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
7188
7189        init_hrtick();
7190
7191        /* Move init over to a non-isolated CPU */
7192        if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
7193                BUG();
7194        sched_init_granularity();
7195        free_cpumask_var(non_isolated_cpus);
7196
7197        init_sched_rt_class();
7198        init_sched_dl_class();
7199}
7200#else
7201void __init sched_init_smp(void)
7202{
7203        sched_init_granularity();
7204}
7205#endif /* CONFIG_SMP */
7206
7207int in_sched_functions(unsigned long addr)
7208{
7209        return in_lock_functions(addr) ||
7210                (addr >= (unsigned long)__sched_text_start
7211                && addr < (unsigned long)__sched_text_end);
7212}
7213
7214#ifdef CONFIG_CGROUP_SCHED
7215/*
7216 * Default task group.
7217 * Every task in system belongs to this group at bootup.
7218 */
7219struct task_group root_task_group;
7220LIST_HEAD(task_groups);
7221
7222/* Cacheline aligned slab cache for task_group */
7223static struct kmem_cache *task_group_cache __read_mostly;
7224#endif
7225
7226DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
7227
7228void __init sched_init(void)
7229{
7230        int i, j;
7231        unsigned long alloc_size = 0, ptr;
7232
7233#ifdef CONFIG_FAIR_GROUP_SCHED
7234        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7235#endif
7236#ifdef CONFIG_RT_GROUP_SCHED
7237        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7238#endif
7239        if (alloc_size) {
7240                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7241
7242#ifdef CONFIG_FAIR_GROUP_SCHED
7243                root_task_group.se = (struct sched_entity **)ptr;
7244                ptr += nr_cpu_ids * sizeof(void **);
7245
7246                root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7247                ptr += nr_cpu_ids * sizeof(void **);
7248
7249#endif /* CONFIG_FAIR_GROUP_SCHED */
7250#ifdef CONFIG_RT_GROUP_SCHED
7251                root_task_group.rt_se = (struct sched_rt_entity **)ptr;
7252                ptr += nr_cpu_ids * sizeof(void **);
7253
7254                root_task_group.rt_rq = (struct rt_rq **)ptr;
7255                ptr += nr_cpu_ids * sizeof(void **);
7256
7257#endif /* CONFIG_RT_GROUP_SCHED */
7258        }
7259#ifdef CONFIG_CPUMASK_OFFSTACK
7260        for_each_possible_cpu(i) {
7261                per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7262                        cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7263        }
7264#endif /* CONFIG_CPUMASK_OFFSTACK */
7265
7266        init_rt_bandwidth(&def_rt_bandwidth,
7267                        global_rt_period(), global_rt_runtime());
7268        init_dl_bandwidth(&def_dl_bandwidth,
7269                        global_rt_period(), global_rt_runtime());
7270
7271#ifdef CONFIG_SMP
7272        init_defrootdomain();
7273#endif
7274
7275#ifdef CONFIG_RT_GROUP_SCHED
7276        init_rt_bandwidth(&root_task_group.rt_bandwidth,
7277                        global_rt_period(), global_rt_runtime());
7278#endif /* CONFIG_RT_GROUP_SCHED */
7279
7280#ifdef CONFIG_CGROUP_SCHED
7281        task_group_cache = KMEM_CACHE(task_group, 0);
7282
7283        list_add(&root_task_group.list, &task_groups);
7284        INIT_LIST_HEAD(&root_task_group.children);
7285        INIT_LIST_HEAD(&root_task_group.siblings);
7286        autogroup_init(&init_task);
7287#endif /* CONFIG_CGROUP_SCHED */
7288
7289        for_each_possible_cpu(i) {
7290                struct rq *rq;
7291
7292                rq = cpu_rq(i);
7293                raw_spin_lock_init(&rq->lock);
7294                rq->nr_running = 0;
7295                rq->calc_load_active = 0;
7296                rq->calc_load_update = jiffies + LOAD_FREQ;
7297                init_cfs_rq(&rq->cfs);
7298                init_rt_rq(&rq->rt);
7299                init_dl_rq(&rq->dl);
7300#ifdef CONFIG_FAIR_GROUP_SCHED
7301                root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7302                INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7303                /*
7304                 * How much cpu bandwidth does root_task_group get?
7305                 *
7306                 * In case of task-groups formed thr' the cgroup filesystem, it
7307                 * gets 100% of the cpu resources in the system. This overall
7308                 * system cpu resource is divided among the tasks of
7309                 * root_task_group and its child task-groups in a fair manner,
7310                 * based on each entity's (task or task-group's) weight
7311                 * (se->load.weight).
7312                 *
7313                 * In other words, if root_task_group has 10 tasks of weight
7314                 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7315                 * then A0's share of the cpu resource is:
7316                 *
7317                 *      A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
7318                 *
7319                 * We achieve this by letting root_task_group's tasks sit
7320                 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
7321                 */
7322                init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
7323                init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7324#endif /* CONFIG_FAIR_GROUP_SCHED */
7325
7326                rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7327#ifdef CONFIG_RT_GROUP_SCHED
7328                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7329#endif
7330
7331                for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7332                        rq->cpu_load[j] = 0;
7333
7334                rq->last_load_update_tick = jiffies;
7335
7336#ifdef CONFIG_SMP
7337                rq->sd = NULL;
7338                rq->rd = NULL;
7339                rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
7340                rq->balance_callback = NULL;
7341                rq->active_balance = 0;
7342                rq->next_balance = jiffies;
7343                rq->push_cpu = 0;
7344                rq->cpu = i;
7345                rq->online = 0;
7346                rq->idle_stamp = 0;
7347                rq->avg_idle = 2*sysctl_sched_migration_cost;
7348                rq->max_idle_balance_cost = sysctl_sched_migration_cost;
7349
7350                INIT_LIST_HEAD(&rq->cfs_tasks);
7351
7352                rq_attach_root(rq, &def_root_domain);
7353#ifdef CONFIG_NO_HZ_COMMON
7354                rq->nohz_flags = 0;
7355#endif
7356#ifdef CONFIG_NO_HZ_FULL
7357                rq->last_sched_tick = 0;
7358#endif
7359#endif
7360                init_rq_hrtick(rq);
7361                atomic_set(&rq->nr_iowait, 0);
7362        }
7363
7364        set_load_weight(&init_task);
7365
7366#ifdef CONFIG_PREEMPT_NOTIFIERS
7367        INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7368#endif
7369
7370        /*
7371         * The boot idle thread does lazy MMU switching as well:
7372         */
7373        atomic_inc(&init_mm.mm_count);
7374        enter_lazy_tlb(&init_mm, current);
7375
7376        /*
7377         * During early bootup we pretend to be a normal task:
7378         */
7379        current->sched_class = &fair_sched_class;
7380
7381        /*
7382         * Make us the idle thread. Technically, schedule() should not be
7383         * called from this thread, however somewhere below it might be,
7384         * but because we are the idle thread, we just pick up running again
7385         * when this runqueue becomes "idle".
7386         */
7387        init_idle(current, smp_processor_id());
7388
7389        calc_load_update = jiffies + LOAD_FREQ;
7390
7391#ifdef CONFIG_SMP
7392        zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7393        /* May be allocated at isolcpus cmdline parse time */
7394        if (cpu_isolated_map == NULL)
7395                zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
7396        idle_thread_set_boot_cpu();
7397        set_cpu_rq_start_time();
7398#endif
7399        init_sched_fair_class();
7400
7401        scheduler_running = 1;
7402}
7403
7404#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
7405static inline int preempt_count_equals(int preempt_offset)
7406{
7407        int nested = preempt_count() + rcu_preempt_depth();
7408
7409        return (nested == preempt_offset);
7410}
7411
7412void __might_sleep(const char *file, int line, int preempt_offset)
7413{
7414        /*
7415         * Blocking primitives will set (and therefore destroy) current->state,
7416         * since we will exit with TASK_RUNNING make sure we enter with it,
7417         * otherwise we will destroy state.
7418         */
7419        WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
7420                        "do not call blocking ops when !TASK_RUNNING; "
7421                        "state=%lx set at [<%p>] %pS\n",
7422                        current->state,
7423                        (void *)current->task_state_change,
7424                        (void *)current->task_state_change);
7425
7426        ___might_sleep(file, line, preempt_offset);
7427}
7428EXPORT_SYMBOL(__might_sleep);
7429
7430void ___might_sleep(const char *file, int line, int preempt_offset)
7431{
7432        static unsigned long prev_jiffy;        /* ratelimiting */
7433
7434        rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
7435        if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7436             !is_idle_task(current)) ||
7437            system_state != SYSTEM_RUNNING || oops_in_progress)
7438                return;
7439        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7440                return;
7441        prev_jiffy = jiffies;
7442
7443        printk(KERN_ERR
7444                "BUG: sleeping function called from invalid context at %s:%d\n",
7445                        file, line);
7446        printk(KERN_ERR
7447                "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7448                        in_atomic(), irqs_disabled(),
7449                        current->pid, current->comm);
7450
7451        if (task_stack_end_corrupted(current))
7452                printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7453
7454        debug_show_held_locks(current);
7455        if (irqs_disabled())
7456                print_irqtrace_events(current);
7457#ifdef CONFIG_DEBUG_PREEMPT
7458        if (!preempt_count_equals(preempt_offset)) {
7459                pr_err("Preemption disabled at:");
7460                print_ip_sym(current->preempt_disable_ip);
7461                pr_cont("\n");
7462        }
7463#endif
7464        dump_stack();
7465}
7466EXPORT_SYMBOL(___might_sleep);
7467#endif
7468
7469#ifdef CONFIG_MAGIC_SYSRQ
7470void normalize_rt_tasks(void)
7471{
7472        struct task_struct *g, *p;
7473        struct sched_attr attr = {
7474                .sched_policy = SCHED_NORMAL,
7475        };
7476
7477        read_lock(&tasklist_lock);
7478        for_each_process_thread(g, p) {
7479                /*
7480                 * Only normalize user tasks:
7481                 */
7482                if (p->flags & PF_KTHREAD)
7483                        continue;
7484
7485                p->se.exec_start                = 0;
7486#ifdef CONFIG_SCHEDSTATS
7487                p->se.statistics.wait_start     = 0;
7488                p->se.statistics.sleep_start    = 0;
7489                p->se.statistics.block_start    = 0;
7490#endif
7491
7492                if (!dl_task(p) && !rt_task(p)) {
7493                        /*
7494                         * Renice negative nice level userspace
7495                         * tasks back to 0:
7496                         */
7497                        if (task_nice(p) < 0)
7498                                set_user_nice(p, 0);
7499                        continue;
7500                }
7501
7502                __sched_setscheduler(p, &attr, false, false);
7503        }
7504        read_unlock(&tasklist_lock);
7505}
7506
7507#endif /* CONFIG_MAGIC_SYSRQ */
7508
7509#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7510/*
7511 * These functions are only useful for the IA64 MCA handling, or kdb.
7512 *
7513 * They can only be called when the whole system has been
7514 * stopped - every CPU needs to be quiescent, and no scheduling
7515 * activity can take place. Using them for anything else would
7516 * be a serious bug, and as a result, they aren't even visible
7517 * under any other configuration.
7518 */
7519
7520/**
7521 * curr_task - return the current task for a given cpu.
7522 * @cpu: the processor in question.
7523 *
7524 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7525 *
7526 * Return: The current task for @cpu.
7527 */
7528struct task_struct *curr_task(int cpu)
7529{
7530        return cpu_curr(cpu);
7531}
7532
7533#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7534
7535#ifdef CONFIG_IA64
7536/**
7537 * set_curr_task - set the current task for a given cpu.
7538 * @cpu: the processor in question.
7539 * @p: the task pointer to set.
7540 *
7541 * Description: This function must only be used when non-maskable interrupts
7542 * are serviced on a separate stack. It allows the architecture to switch the
7543 * notion of the current task on a cpu in a non-blocking manner. This function
7544 * must be called with all CPU's synchronized, and interrupts disabled, the
7545 * and caller must save the original value of the current task (see
7546 * curr_task() above) and restore that value before reenabling interrupts and
7547 * re-starting the system.
7548 *
7549 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7550 */
7551void set_curr_task(int cpu, struct task_struct *p)
7552{
7553        cpu_curr(cpu) = p;
7554}
7555
7556#endif
7557
7558#ifdef CONFIG_CGROUP_SCHED
7559/* task_group_lock serializes the addition/removal of task groups */
7560static DEFINE_SPINLOCK(task_group_lock);
7561
7562static void sched_free_group(struct task_group *tg)
7563{
7564        free_fair_sched_group(tg);
7565        free_rt_sched_group(tg);
7566        autogroup_free(tg);
7567        kmem_cache_free(task_group_cache, tg);
7568}
7569
7570/* allocate runqueue etc for a new task group */
7571struct task_group *sched_create_group(struct task_group *parent)
7572{
7573        struct task_group *tg;
7574
7575        tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
7576        if (!tg)
7577                return ERR_PTR(-ENOMEM);
7578
7579        if (!alloc_fair_sched_group(tg, parent))
7580                goto err;
7581
7582        if (!alloc_rt_sched_group(tg, parent))
7583                goto err;
7584
7585        return tg;
7586
7587err:
7588        sched_free_group(tg);
7589        return ERR_PTR(-ENOMEM);
7590}
7591
7592void sched_online_group(struct task_group *tg, struct task_group *parent)
7593{
7594        unsigned long flags;
7595
7596        spin_lock_irqsave(&task_group_lock, flags);
7597        list_add_rcu(&tg->list, &task_groups);
7598
7599        WARN_ON(!parent); /* root should already exist */
7600
7601        tg->parent = parent;
7602        INIT_LIST_HEAD(&tg->children);
7603        list_add_rcu(&tg->siblings, &parent->children);
7604        spin_unlock_irqrestore(&task_group_lock, flags);
7605}
7606
7607/* rcu callback to free various structures associated with a task group */
7608static void sched_free_group_rcu(struct rcu_head *rhp)
7609{
7610        /* now it should be safe to free those cfs_rqs */
7611        sched_free_group(container_of(rhp, struct task_group, rcu));
7612}
7613
7614void sched_destroy_group(struct task_group *tg)
7615{
7616        /* wait for possible concurrent references to cfs_rqs complete */
7617        call_rcu(&tg->rcu, sched_free_group_rcu);
7618}
7619
7620void sched_offline_group(struct task_group *tg)
7621{
7622        unsigned long flags;
7623
7624        /* end participation in shares distribution */
7625        unregister_fair_sched_group(tg);
7626
7627        spin_lock_irqsave(&task_group_lock, flags);
7628        list_del_rcu(&tg->list);
7629        list_del_rcu(&tg->siblings);
7630        spin_unlock_irqrestore(&task_group_lock, flags);
7631}
7632
7633/* change task's runqueue when it moves between groups.
7634 *      The caller of this function should have put the task in its new group
7635 *      by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7636 *      reflect its new group.
7637 */
7638void sched_move_task(struct task_struct *tsk)
7639{
7640        struct task_group *tg;
7641        int queued, running;
7642        unsigned long flags;
7643        struct rq *rq;
7644
7645        rq = task_rq_lock(tsk, &flags);
7646
7647        running = task_current(rq, tsk);
7648        queued = task_on_rq_queued(tsk);
7649
7650        if (queued)
7651                dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
7652        if (unlikely(running))
7653                put_prev_task(rq, tsk);
7654
7655        /*
7656         * All callers are synchronized by task_rq_lock(); we do not use RCU
7657         * which is pointless here. Thus, we pass "true" to task_css_check()
7658         * to prevent lockdep warnings.
7659         */
7660        tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
7661                          struct task_group, css);
7662        tg = autogroup_task_group(tsk, tg);
7663        tsk->sched_task_group = tg;
7664
7665#ifdef CONFIG_FAIR_GROUP_SCHED
7666        if (tsk->sched_class->task_move_group)
7667                tsk->sched_class->task_move_group(tsk);
7668        else
7669#endif
7670                set_task_rq(tsk, task_cpu(tsk));
7671
7672        if (unlikely(running))
7673                tsk->sched_class->set_curr_task(rq);
7674        if (queued)
7675                enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
7676
7677        task_rq_unlock(rq, tsk, &flags);
7678}
7679#endif /* CONFIG_CGROUP_SCHED */
7680
7681#ifdef CONFIG_RT_GROUP_SCHED
7682/*
7683 * Ensure that the real time constraints are schedulable.
7684 */
7685static DEFINE_MUTEX(rt_constraints_mutex);
7686
7687/* Must be called with tasklist_lock held */
7688static inline int tg_has_rt_tasks(struct task_group *tg)
7689{
7690        struct task_struct *g, *p;
7691
7692        /*
7693         * Autogroups do not have RT tasks; see autogroup_create().
7694         */
7695        if (task_group_is_autogroup(tg))
7696                return 0;
7697
7698        for_each_process_thread(g, p) {
7699                if (rt_task(p) && task_group(p) == tg)
7700                        return 1;
7701        }
7702
7703        return 0;
7704}
7705
7706struct rt_schedulable_data {
7707        struct task_group *tg;
7708        u64 rt_period;
7709        u64 rt_runtime;
7710};
7711
7712static int tg_rt_schedulable(struct task_group *tg, void *data)
7713{
7714        struct rt_schedulable_data *d = data;
7715        struct task_group *child;
7716        unsigned long total, sum = 0;
7717        u64 period, runtime;
7718
7719        period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7720        runtime = tg->rt_bandwidth.rt_runtime;
7721
7722        if (tg == d->tg) {
7723                period = d->rt_period;
7724                runtime = d->rt_runtime;
7725        }
7726
7727        /*
7728         * Cannot have more runtime than the period.
7729         */
7730        if (runtime > period && runtime != RUNTIME_INF)
7731                return -EINVAL;
7732
7733        /*
7734         * Ensure we don't starve existing RT tasks.
7735         */
7736        if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7737                return -EBUSY;
7738
7739        total = to_ratio(period, runtime);
7740
7741        /*
7742         * Nobody can have more than the global setting allows.
7743         */
7744        if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7745                return -EINVAL;
7746
7747        /*
7748         * The sum of our children's runtime should not exceed our own.
7749         */
7750        list_for_each_entry_rcu(child, &tg->children, siblings) {
7751                period = ktime_to_ns(child->rt_bandwidth.rt_period);
7752                runtime = child->rt_bandwidth.rt_runtime;
7753
7754                if (child == d->tg) {
7755                        period = d->rt_period;
7756                        runtime = d->rt_runtime;
7757                }
7758
7759                sum += to_ratio(period, runtime);
7760        }
7761
7762        if (sum > total)
7763                return -EINVAL;
7764
7765        return 0;
7766}
7767
7768static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7769{
7770        int ret;
7771
7772        struct rt_schedulable_data data = {
7773                .tg = tg,
7774                .rt_period = period,
7775                .rt_runtime = runtime,
7776        };
7777
7778        rcu_read_lock();
7779        ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7780        rcu_read_unlock();
7781
7782        return ret;
7783}
7784
7785static int tg_set_rt_bandwidth(struct task_group *tg,
7786                u64 rt_period, u64 rt_runtime)
7787{
7788        int i, err = 0;
7789
7790        /*
7791         * Disallowing the root group RT runtime is BAD, it would disallow the
7792         * kernel creating (and or operating) RT threads.
7793         */
7794        if (tg == &root_task_group && rt_runtime == 0)
7795                return -EINVAL;
7796
7797        /* No period doesn't make any sense. */
7798        if (rt_period == 0)
7799                return -EINVAL;
7800
7801        mutex_lock(&rt_constraints_mutex);
7802        read_lock(&tasklist_lock);
7803        err = __rt_schedulable(tg, rt_period, rt_runtime);
7804        if (err)
7805                goto unlock;
7806
7807        raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7808        tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7809        tg->rt_bandwidth.rt_runtime = rt_runtime;
7810
7811        for_each_possible_cpu(i) {
7812                struct rt_rq *rt_rq = tg->rt_rq[i];
7813
7814                raw_spin_lock(&rt_rq->rt_runtime_lock);
7815                rt_rq->rt_runtime = rt_runtime;
7816                raw_spin_unlock(&rt_rq->rt_runtime_lock);
7817        }
7818        raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7819unlock:
7820        read_unlock(&tasklist_lock);
7821        mutex_unlock(&rt_constraints_mutex);
7822
7823        return err;
7824}
7825
7826static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7827{
7828        u64 rt_runtime, rt_period;
7829
7830        rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7831        rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7832        if (rt_runtime_us < 0)
7833                rt_runtime = RUNTIME_INF;
7834
7835        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7836}
7837
7838static long sched_group_rt_runtime(struct task_group *tg)
7839{
7840        u64 rt_runtime_us;
7841
7842        if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
7843                return -1;
7844
7845        rt_runtime_us = tg->rt_bandwidth.rt_runtime;
7846        do_div(rt_runtime_us, NSEC_PER_USEC);
7847        return rt_runtime_us;
7848}
7849
7850static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
7851{
7852        u64 rt_runtime, rt_period;
7853
7854        rt_period = rt_period_us * NSEC_PER_USEC;
7855        rt_runtime = tg->rt_bandwidth.rt_runtime;
7856
7857        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7858}
7859
7860static long sched_group_rt_period(struct task_group *tg)
7861{
7862        u64 rt_period_us;
7863
7864        rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7865        do_div(rt_period_us, NSEC_PER_USEC);
7866        return rt_period_us;
7867}
7868#endif /* CONFIG_RT_GROUP_SCHED */
7869
7870#ifdef CONFIG_RT_GROUP_SCHED
7871static int sched_rt_global_constraints(void)
7872{
7873        int ret = 0;
7874
7875        mutex_lock(&rt_constraints_mutex);
7876        read_lock(&tasklist_lock);
7877        ret = __rt_schedulable(NULL, 0, 0);
7878        read_unlock(&tasklist_lock);
7879        mutex_unlock(&rt_constraints_mutex);
7880
7881        return ret;
7882}
7883
7884static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7885{
7886        /* Don't accept realtime tasks when there is no way for them to run */
7887        if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7888                return 0;
7889
7890        return 1;
7891}
7892
7893#else /* !CONFIG_RT_GROUP_SCHED */
7894static int sched_rt_global_constraints(void)
7895{
7896        unsigned long flags;
7897        int i, ret = 0;
7898
7899        raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
7900        for_each_possible_cpu(i) {
7901                struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7902
7903                raw_spin_lock(&rt_rq->rt_runtime_lock);
7904                rt_rq->rt_runtime = global_rt_runtime();
7905                raw_spin_unlock(&rt_rq->rt_runtime_lock);
7906        }
7907        raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7908
7909        return ret;
7910}
7911#endif /* CONFIG_RT_GROUP_SCHED */
7912
7913static int sched_dl_global_validate(void)
7914{
7915        u64 runtime = global_rt_runtime();
7916        u64 period = global_rt_period();
7917        u64 new_bw = to_ratio(period, runtime);
7918        struct dl_bw *dl_b;
7919        int cpu, ret = 0;
7920        unsigned long flags;
7921
7922        /*
7923         * Here we want to check the bandwidth not being set to some
7924         * value smaller than the currently allocated bandwidth in
7925         * any of the root_domains.
7926         *
7927         * FIXME: Cycling on all the CPUs is overdoing, but simpler than
7928         * cycling on root_domains... Discussion on different/better
7929         * solutions is welcome!
7930         */
7931        for_each_possible_cpu(cpu) {
7932                rcu_read_lock_sched();
7933                dl_b = dl_bw_of(cpu);
7934
7935                raw_spin_lock_irqsave(&dl_b->lock, flags);
7936                if (new_bw < dl_b->total_bw)
7937                        ret = -EBUSY;
7938                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7939
7940                rcu_read_unlock_sched();
7941
7942                if (ret)
7943                        break;
7944        }
7945
7946        return ret;
7947}
7948
7949static void sched_dl_do_global(void)
7950{
7951        u64 new_bw = -1;
7952        struct dl_bw *dl_b;
7953        int cpu;
7954        unsigned long flags;
7955
7956        def_dl_bandwidth.dl_period = global_rt_period();
7957        def_dl_bandwidth.dl_runtime = global_rt_runtime();
7958
7959        if (global_rt_runtime() != RUNTIME_INF)
7960                new_bw = to_ratio(global_rt_period(), global_rt_runtime());
7961
7962        /*
7963         * FIXME: As above...
7964         */
7965        for_each_possible_cpu(cpu) {
7966                rcu_read_lock_sched();
7967                dl_b = dl_bw_of(cpu);
7968
7969                raw_spin_lock_irqsave(&dl_b->lock, flags);
7970                dl_b->bw = new_bw;
7971                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7972
7973                rcu_read_unlock_sched();
7974        }
7975}
7976
7977static int sched_rt_global_validate(void)
7978{
7979        if (sysctl_sched_rt_period <= 0)
7980                return -EINVAL;
7981
7982        if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
7983                (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
7984                return -EINVAL;
7985
7986        return 0;
7987}
7988
7989static void sched_rt_do_global(void)
7990{
7991        def_rt_bandwidth.rt_runtime = global_rt_runtime();
7992        def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
7993}
7994
7995int sched_rt_handler(struct ctl_table *table, int write,
7996                void __user *buffer, size_t *lenp,
7997                loff_t *ppos)
7998{
7999        int old_period, old_runtime;
8000        static DEFINE_MUTEX(mutex);
8001        int ret;
8002
8003        mutex_lock(&mutex);
8004        old_period = sysctl_sched_rt_period;
8005        old_runtime = sysctl_sched_rt_runtime;
8006
8007        ret = proc_dointvec(table, write, buffer, lenp, ppos);
8008
8009        if (!ret && write) {
8010                ret = sched_rt_global_validate();
8011                if (ret)
8012                        goto undo;
8013
8014                ret = sched_dl_global_validate();
8015                if (ret)
8016                        goto undo;
8017
8018                ret = sched_rt_global_constraints();
8019                if (ret)
8020                        goto undo;
8021
8022                sched_rt_do_global();
8023                sched_dl_do_global();
8024        }
8025        if (0) {
8026undo:
8027                sysctl_sched_rt_period = old_period;
8028                sysctl_sched_rt_runtime = old_runtime;
8029        }
8030        mutex_unlock(&mutex);
8031
8032        return ret;
8033}
8034
8035int sched_rr_handler(struct ctl_table *table, int write,
8036                void __user *buffer, size_t *lenp,
8037                loff_t *ppos)
8038{
8039        int ret;
8040        static DEFINE_MUTEX(mutex);
8041
8042        mutex_lock(&mutex);
8043        ret = proc_dointvec(table, write, buffer, lenp, ppos);
8044        /* make sure that internally we keep jiffies */
8045        /* also, writing zero resets timeslice to default */
8046        if (!ret && write) {
8047                sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8048                        RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
8049        }
8050        mutex_unlock(&mutex);
8051        return ret;
8052}
8053
8054#ifdef CONFIG_CGROUP_SCHED
8055
8056static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
8057{
8058        return css ? container_of(css, struct task_group, css) : NULL;
8059}
8060
8061static struct cgroup_subsys_state *
8062cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8063{
8064        struct task_group *parent = css_tg(parent_css);
8065        struct task_group *tg;
8066
8067        if (!parent) {
8068                /* This is early initialization for the top cgroup */
8069                return &root_task_group.css;
8070        }
8071
8072        tg = sched_create_group(parent);
8073        if (IS_ERR(tg))
8074                return ERR_PTR(-ENOMEM);
8075
8076        sched_online_group(tg, parent);
8077
8078        return &tg->css;
8079}
8080
8081static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
8082{
8083        struct task_group *tg = css_tg(css);
8084
8085        sched_offline_group(tg);
8086}
8087
8088static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
8089{
8090        struct task_group *tg = css_tg(css);
8091
8092        /*
8093         * Relies on the RCU grace period between css_released() and this.
8094         */
8095        sched_free_group(tg);
8096}
8097
8098static void cpu_cgroup_fork(struct task_struct *task)
8099{
8100        sched_move_task(task);
8101}
8102
8103static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8104{
8105        struct task_struct *task;
8106        struct cgroup_subsys_state *css;
8107
8108        cgroup_taskset_for_each(task, css, tset) {
8109#ifdef CONFIG_RT_GROUP_SCHED
8110                if (!sched_rt_can_attach(css_tg(css), task))
8111                        return -EINVAL;
8112#else
8113                /* We don't support RT-tasks being in separate groups */
8114                if (task->sched_class != &fair_sched_class)
8115                        return -EINVAL;
8116#endif
8117        }
8118        return 0;
8119}
8120
8121static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8122{
8123        struct task_struct *task;
8124        struct cgroup_subsys_state *css;
8125
8126        cgroup_taskset_for_each(task, css, tset)
8127                sched_move_task(task);
8128}
8129
8130#ifdef CONFIG_FAIR_GROUP_SCHED
8131static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8132                                struct cftype *cftype, u64 shareval)
8133{
8134        return sched_group_set_shares(css_tg(css), scale_load(shareval));
8135}
8136
8137static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8138                               struct cftype *cft)
8139{
8140        struct task_group *tg = css_tg(css);
8141
8142        return (u64) scale_load_down(tg->shares);
8143}
8144
8145#ifdef CONFIG_CFS_BANDWIDTH
8146static DEFINE_MUTEX(cfs_constraints_mutex);
8147
8148const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8149const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8150
8151static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8152
8153static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8154{
8155        int i, ret = 0, runtime_enabled, runtime_was_enabled;
8156        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8157
8158        if (tg == &root_task_group)
8159                return -EINVAL;
8160
8161        /*
8162         * Ensure we have at some amount of bandwidth every period.  This is
8163         * to prevent reaching a state of large arrears when throttled via
8164         * entity_tick() resulting in prolonged exit starvation.
8165         */
8166        if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8167                return -EINVAL;
8168
8169        /*
8170         * Likewise, bound things on the otherside by preventing insane quota
8171         * periods.  This also allows us to normalize in computing quota
8172         * feasibility.
8173         */
8174        if (period > max_cfs_quota_period)
8175                return -EINVAL;
8176
8177        /*
8178         * Prevent race between setting of cfs_rq->runtime_enabled and
8179         * unthrottle_offline_cfs_rqs().
8180         */
8181        get_online_cpus();
8182        mutex_lock(&cfs_constraints_mutex);
8183        ret = __cfs_schedulable(tg, period, quota);
8184        if (ret)
8185                goto out_unlock;
8186
8187        runtime_enabled = quota != RUNTIME_INF;
8188        runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
8189        /*
8190         * If we need to toggle cfs_bandwidth_used, off->on must occur
8191         * before making related changes, and on->off must occur afterwards
8192         */
8193        if (runtime_enabled && !runtime_was_enabled)
8194                cfs_bandwidth_usage_inc();
8195        raw_spin_lock_irq(&cfs_b->lock);
8196        cfs_b->period = ns_to_ktime(period);
8197        cfs_b->quota = quota;
8198
8199        __refill_cfs_bandwidth_runtime(cfs_b);
8200        /* restart the period timer (if active) to handle new period expiry */
8201        if (runtime_enabled)
8202                start_cfs_bandwidth(cfs_b);
8203        raw_spin_unlock_irq(&cfs_b->lock);
8204
8205        for_each_online_cpu(i) {
8206                struct cfs_rq *cfs_rq = tg->cfs_rq[i];
8207                struct rq *rq = cfs_rq->rq;
8208
8209                raw_spin_lock_irq(&rq->lock);
8210                cfs_rq->runtime_enabled = runtime_enabled;
8211                cfs_rq->runtime_remaining = 0;
8212
8213                if (cfs_rq->throttled)
8214                        unthrottle_cfs_rq(cfs_rq);
8215                raw_spin_unlock_irq(&rq->lock);
8216        }
8217        if (runtime_was_enabled && !runtime_enabled)
8218                cfs_bandwidth_usage_dec();
8219out_unlock:
8220        mutex_unlock(&cfs_constraints_mutex);
8221        put_online_cpus();
8222
8223        return ret;
8224}
8225
8226int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8227{
8228        u64 quota, period;
8229
8230        period = ktime_to_ns(tg->cfs_bandwidth.period);
8231        if (cfs_quota_us < 0)
8232                quota = RUNTIME_INF;
8233        else
8234                quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8235
8236        return tg_set_cfs_bandwidth(tg, period, quota);
8237}
8238
8239long tg_get_cfs_quota(struct task_group *tg)
8240{
8241        u64 quota_us;
8242
8243        if (tg->cfs_bandwidth.quota == RUNTIME_INF)
8244                return -1;
8245
8246        quota_us = tg->cfs_bandwidth.quota;
8247        do_div(quota_us, NSEC_PER_USEC);
8248
8249        return quota_us;
8250}
8251
8252int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8253{
8254        u64 quota, period;
8255
8256        period = (u64)cfs_period_us * NSEC_PER_USEC;
8257        quota = tg->cfs_bandwidth.quota;
8258
8259        return tg_set_cfs_bandwidth(tg, period, quota);
8260}
8261
8262long tg_get_cfs_period(struct task_group *tg)
8263{
8264        u64 cfs_period_us;
8265
8266        cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
8267        do_div(cfs_period_us, NSEC_PER_USEC);
8268
8269        return cfs_period_us;
8270}
8271
8272static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8273                                  struct cftype *cft)
8274{
8275        return tg_get_cfs_quota(css_tg(css));
8276}
8277
8278static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8279                                   struct cftype *cftype, s64 cfs_quota_us)
8280{
8281        return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
8282}
8283
8284static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8285                                   struct cftype *cft)
8286{
8287        return tg_get_cfs_period(css_tg(css));
8288}
8289
8290static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8291                                    struct cftype *cftype, u64 cfs_period_us)
8292{
8293        return tg_set_cfs_period(css_tg(css), cfs_period_us);
8294}
8295
8296struct cfs_schedulable_data {
8297        struct task_group *tg;
8298        u64 period, quota;
8299};
8300
8301/*
8302 * normalize group quota/period to be quota/max_period
8303 * note: units are usecs
8304 */
8305static u64 normalize_cfs_quota(struct task_group *tg,
8306                               struct cfs_schedulable_data *d)
8307{
8308        u64 quota, period;
8309
8310        if (tg == d->tg) {
8311                period = d->period;
8312                quota = d->quota;
8313        } else {
8314                period = tg_get_cfs_period(tg);
8315                quota = tg_get_cfs_quota(tg);
8316        }
8317
8318        /* note: these should typically be equivalent */
8319        if (quota == RUNTIME_INF || quota == -1)
8320                return RUNTIME_INF;
8321
8322        return to_ratio(period, quota);
8323}
8324
8325static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8326{
8327        struct cfs_schedulable_data *d = data;
8328        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8329        s64 quota = 0, parent_quota = -1;
8330
8331        if (!tg->parent) {
8332                quota = RUNTIME_INF;
8333        } else {
8334                struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
8335
8336                quota = normalize_cfs_quota(tg, d);
8337                parent_quota = parent_b->hierarchical_quota;
8338
8339                /*
8340                 * ensure max(child_quota) <= parent_quota, inherit when no
8341                 * limit is set
8342                 */
8343                if (quota == RUNTIME_INF)
8344                        quota = parent_quota;
8345                else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8346                        return -EINVAL;
8347        }
8348        cfs_b->hierarchical_quota = quota;
8349
8350        return 0;
8351}
8352
8353static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8354{
8355        int ret;
8356        struct cfs_schedulable_data data = {
8357                .tg = tg,
8358                .period = period,
8359                .quota = quota,
8360        };
8361
8362        if (quota != RUNTIME_INF) {
8363                do_div(data.period, NSEC_PER_USEC);
8364                do_div(data.quota, NSEC_PER_USEC);
8365        }
8366
8367        rcu_read_lock();
8368        ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8369        rcu_read_unlock();
8370
8371        return ret;
8372}
8373
8374static int cpu_stats_show(struct seq_file *sf, void *v)
8375{
8376        struct task_group *tg = css_tg(seq_css(sf));
8377        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8378
8379        seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8380        seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8381        seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
8382
8383        return 0;
8384}
8385#endif /* CONFIG_CFS_BANDWIDTH */
8386#endif /* CONFIG_FAIR_GROUP_SCHED */
8387
8388#ifdef CONFIG_RT_GROUP_SCHED
8389static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8390                                struct cftype *cft, s64 val)
8391{
8392        return sched_group_set_rt_runtime(css_tg(css), val);
8393}
8394
8395static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8396                               struct cftype *cft)
8397{
8398        return sched_group_rt_runtime(css_tg(css));
8399}
8400
8401static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8402                                    struct cftype *cftype, u64 rt_period_us)
8403{
8404        return sched_group_set_rt_period(css_tg(css), rt_period_us);
8405}
8406
8407static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8408                                   struct cftype *cft)
8409{
8410        return sched_group_rt_period(css_tg(css));
8411}
8412#endif /* CONFIG_RT_GROUP_SCHED */
8413
8414static struct cftype cpu_files[] = {
8415#ifdef CONFIG_FAIR_GROUP_SCHED
8416        {
8417                .name = "shares",
8418                .read_u64 = cpu_shares_read_u64,
8419                .write_u64 = cpu_shares_write_u64,
8420        },
8421#endif
8422#ifdef CONFIG_CFS_BANDWIDTH
8423        {
8424                .name = "cfs_quota_us",
8425                .read_s64 = cpu_cfs_quota_read_s64,
8426                .write_s64 = cpu_cfs_quota_write_s64,
8427        },
8428        {
8429                .name = "cfs_period_us",
8430                .read_u64 = cpu_cfs_period_read_u64,
8431                .write_u64 = cpu_cfs_period_write_u64,
8432        },
8433        {
8434                .name = "stat",
8435                .seq_show = cpu_stats_show,
8436        },
8437#endif
8438#ifdef CONFIG_RT_GROUP_SCHED
8439        {
8440                .name = "rt_runtime_us",
8441                .read_s64 = cpu_rt_runtime_read,
8442                .write_s64 = cpu_rt_runtime_write,
8443        },
8444        {
8445                .name = "rt_period_us",
8446                .read_u64 = cpu_rt_period_read_uint,
8447                .write_u64 = cpu_rt_period_write_uint,
8448        },
8449#endif
8450        { }     /* terminate */
8451};
8452
8453struct cgroup_subsys cpu_cgrp_subsys = {
8454        .css_alloc      = cpu_cgroup_css_alloc,
8455        .css_released   = cpu_cgroup_css_released,
8456        .css_free       = cpu_cgroup_css_free,
8457        .fork           = cpu_cgroup_fork,
8458        .can_attach     = cpu_cgroup_can_attach,
8459        .attach         = cpu_cgroup_attach,
8460        .legacy_cftypes = cpu_files,
8461        .early_init     = true,
8462};
8463
8464#endif  /* CONFIG_CGROUP_SCHED */
8465
8466void dump_cpu_task(int cpu)
8467{
8468        pr_info("Task dump for CPU %d:\n", cpu);
8469        sched_show_task(cpu_curr(cpu));
8470}
8471
8472/*
8473 * Nice levels are multiplicative, with a gentle 10% change for every
8474 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
8475 * nice 1, it will get ~10% less CPU time than another CPU-bound task
8476 * that remained on nice 0.
8477 *
8478 * The "10% effect" is relative and cumulative: from _any_ nice level,
8479 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
8480 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
8481 * If a task goes up by ~10% and another task goes down by ~10% then
8482 * the relative distance between them is ~25%.)
8483 */
8484const int sched_prio_to_weight[40] = {
8485 /* -20 */     88761,     71755,     56483,     46273,     36291,
8486 /* -15 */     29154,     23254,     18705,     14949,     11916,
8487 /* -10 */      9548,      7620,      6100,      4904,      3906,
8488 /*  -5 */      3121,      2501,      1991,      1586,      1277,
8489 /*   0 */      1024,       820,       655,       526,       423,
8490 /*   5 */       335,       272,       215,       172,       137,
8491 /*  10 */       110,        87,        70,        56,        45,
8492 /*  15 */        36,        29,        23,        18,        15,
8493};
8494
8495/*
8496 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
8497 *
8498 * In cases where the weight does not change often, we can use the
8499 * precalculated inverse to speed up arithmetics by turning divisions
8500 * into multiplications:
8501 */
8502const u32 sched_prio_to_wmult[40] = {
8503 /* -20 */     48388,     59856,     76040,     92818,    118348,
8504 /* -15 */    147320,    184698,    229616,    287308,    360437,
8505 /* -10 */    449829,    563644,    704093,    875809,   1099582,
8506 /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
8507 /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
8508 /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
8509 /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
8510 /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
8511};
8512