linux/kernel/sched/core.c
<<
>>
Prefs
   1/*
   2 *  kernel/sched/core.c
   3 *
   4 *  Core kernel scheduler code and related syscalls
   5 *
   6 *  Copyright (C) 1991-2002  Linus Torvalds
   7 */
   8#include <linux/sched.h>
   9#include <linux/sched/clock.h>
  10#include <uapi/linux/sched/types.h>
  11#include <linux/sched/loadavg.h>
  12#include <linux/sched/hotplug.h>
  13#include <linux/cpuset.h>
  14#include <linux/delayacct.h>
  15#include <linux/init_task.h>
  16#include <linux/context_tracking.h>
  17#include <linux/rcupdate_wait.h>
  18
  19#include <linux/blkdev.h>
  20#include <linux/kprobes.h>
  21#include <linux/mmu_context.h>
  22#include <linux/module.h>
  23#include <linux/nmi.h>
  24#include <linux/prefetch.h>
  25#include <linux/profile.h>
  26#include <linux/security.h>
  27#include <linux/syscalls.h>
  28
  29#include <asm/switch_to.h>
  30#include <asm/tlb.h>
  31#ifdef CONFIG_PARAVIRT
  32#include <asm/paravirt.h>
  33#endif
  34
  35#include "sched.h"
  36#include "../workqueue_internal.h"
  37#include "../smpboot.h"
  38
  39#define CREATE_TRACE_POINTS
  40#include <trace/events/sched.h>
  41
  42DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  43
  44/*
  45 * Debugging: various feature bits
  46 */
  47
  48#define SCHED_FEAT(name, enabled)       \
  49        (1UL << __SCHED_FEAT_##name) * enabled |
  50
  51const_debug unsigned int sysctl_sched_features =
  52#include "features.h"
  53        0;
  54
  55#undef SCHED_FEAT
  56
  57/*
  58 * Number of tasks to iterate in a single balance run.
  59 * Limited because this is done with IRQs disabled.
  60 */
  61const_debug unsigned int sysctl_sched_nr_migrate = 32;
  62
  63/*
  64 * period over which we average the RT time consumption, measured
  65 * in ms.
  66 *
  67 * default: 1s
  68 */
  69const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
  70
  71/*
  72 * period over which we measure -rt task CPU usage in us.
  73 * default: 1s
  74 */
  75unsigned int sysctl_sched_rt_period = 1000000;
  76
  77__read_mostly int scheduler_running;
  78
  79/*
  80 * part of the period that we allow rt tasks to run in us.
  81 * default: 0.95s
  82 */
  83int sysctl_sched_rt_runtime = 950000;
  84
  85/* CPUs with isolated domains */
  86cpumask_var_t cpu_isolated_map;
  87
  88/*
  89 * __task_rq_lock - lock the rq @p resides on.
  90 */
  91struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
  92        __acquires(rq->lock)
  93{
  94        struct rq *rq;
  95
  96        lockdep_assert_held(&p->pi_lock);
  97
  98        for (;;) {
  99                rq = task_rq(p);
 100                raw_spin_lock(&rq->lock);
 101                if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
 102                        rq_pin_lock(rq, rf);
 103                        return rq;
 104                }
 105                raw_spin_unlock(&rq->lock);
 106
 107                while (unlikely(task_on_rq_migrating(p)))
 108                        cpu_relax();
 109        }
 110}
 111
 112/*
 113 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
 114 */
 115struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
 116        __acquires(p->pi_lock)
 117        __acquires(rq->lock)
 118{
 119        struct rq *rq;
 120
 121        for (;;) {
 122                raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
 123                rq = task_rq(p);
 124                raw_spin_lock(&rq->lock);
 125                /*
 126                 *      move_queued_task()              task_rq_lock()
 127                 *
 128                 *      ACQUIRE (rq->lock)
 129                 *      [S] ->on_rq = MIGRATING         [L] rq = task_rq()
 130                 *      WMB (__set_task_cpu())          ACQUIRE (rq->lock);
 131                 *      [S] ->cpu = new_cpu             [L] task_rq()
 132                 *                                      [L] ->on_rq
 133                 *      RELEASE (rq->lock)
 134                 *
 135                 * If we observe the old cpu in task_rq_lock, the acquire of
 136                 * the old rq->lock will fully serialize against the stores.
 137                 *
 138                 * If we observe the new CPU in task_rq_lock, the acquire will
 139                 * pair with the WMB to ensure we must then also see migrating.
 140                 */
 141                if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
 142                        rq_pin_lock(rq, rf);
 143                        return rq;
 144                }
 145                raw_spin_unlock(&rq->lock);
 146                raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
 147
 148                while (unlikely(task_on_rq_migrating(p)))
 149                        cpu_relax();
 150        }
 151}
 152
 153/*
 154 * RQ-clock updating methods:
 155 */
 156
 157static void update_rq_clock_task(struct rq *rq, s64 delta)
 158{
 159/*
 160 * In theory, the compile should just see 0 here, and optimize out the call
 161 * to sched_rt_avg_update. But I don't trust it...
 162 */
 163#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
 164        s64 steal = 0, irq_delta = 0;
 165#endif
 166#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 167        irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
 168
 169        /*
 170         * Since irq_time is only updated on {soft,}irq_exit, we might run into
 171         * this case when a previous update_rq_clock() happened inside a
 172         * {soft,}irq region.
 173         *
 174         * When this happens, we stop ->clock_task and only update the
 175         * prev_irq_time stamp to account for the part that fit, so that a next
 176         * update will consume the rest. This ensures ->clock_task is
 177         * monotonic.
 178         *
 179         * It does however cause some slight miss-attribution of {soft,}irq
 180         * time, a more accurate solution would be to update the irq_time using
 181         * the current rq->clock timestamp, except that would require using
 182         * atomic ops.
 183         */
 184        if (irq_delta > delta)
 185                irq_delta = delta;
 186
 187        rq->prev_irq_time += irq_delta;
 188        delta -= irq_delta;
 189#endif
 190#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 191        if (static_key_false((&paravirt_steal_rq_enabled))) {
 192                steal = paravirt_steal_clock(cpu_of(rq));
 193                steal -= rq->prev_steal_time_rq;
 194
 195                if (unlikely(steal > delta))
 196                        steal = delta;
 197
 198                rq->prev_steal_time_rq += steal;
 199                delta -= steal;
 200        }
 201#endif
 202
 203        rq->clock_task += delta;
 204
 205#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
 206        if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
 207                sched_rt_avg_update(rq, irq_delta + steal);
 208#endif
 209}
 210
 211void update_rq_clock(struct rq *rq)
 212{
 213        s64 delta;
 214
 215        lockdep_assert_held(&rq->lock);
 216
 217        if (rq->clock_update_flags & RQCF_ACT_SKIP)
 218                return;
 219
 220#ifdef CONFIG_SCHED_DEBUG
 221        if (sched_feat(WARN_DOUBLE_CLOCK))
 222                SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
 223        rq->clock_update_flags |= RQCF_UPDATED;
 224#endif
 225
 226        delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
 227        if (delta < 0)
 228                return;
 229        rq->clock += delta;
 230        update_rq_clock_task(rq, delta);
 231}
 232
 233
 234#ifdef CONFIG_SCHED_HRTICK
 235/*
 236 * Use HR-timers to deliver accurate preemption points.
 237 */
 238
 239static void hrtick_clear(struct rq *rq)
 240{
 241        if (hrtimer_active(&rq->hrtick_timer))
 242                hrtimer_cancel(&rq->hrtick_timer);
 243}
 244
 245/*
 246 * High-resolution timer tick.
 247 * Runs from hardirq context with interrupts disabled.
 248 */
 249static enum hrtimer_restart hrtick(struct hrtimer *timer)
 250{
 251        struct rq *rq = container_of(timer, struct rq, hrtick_timer);
 252        struct rq_flags rf;
 253
 254        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 255
 256        rq_lock(rq, &rf);
 257        update_rq_clock(rq);
 258        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
 259        rq_unlock(rq, &rf);
 260
 261        return HRTIMER_NORESTART;
 262}
 263
 264#ifdef CONFIG_SMP
 265
 266static void __hrtick_restart(struct rq *rq)
 267{
 268        struct hrtimer *timer = &rq->hrtick_timer;
 269
 270        hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
 271}
 272
 273/*
 274 * called from hardirq (IPI) context
 275 */
 276static void __hrtick_start(void *arg)
 277{
 278        struct rq *rq = arg;
 279        struct rq_flags rf;
 280
 281        rq_lock(rq, &rf);
 282        __hrtick_restart(rq);
 283        rq->hrtick_csd_pending = 0;
 284        rq_unlock(rq, &rf);
 285}
 286
 287/*
 288 * Called to set the hrtick timer state.
 289 *
 290 * called with rq->lock held and irqs disabled
 291 */
 292void hrtick_start(struct rq *rq, u64 delay)
 293{
 294        struct hrtimer *timer = &rq->hrtick_timer;
 295        ktime_t time;
 296        s64 delta;
 297
 298        /*
 299         * Don't schedule slices shorter than 10000ns, that just
 300         * doesn't make sense and can cause timer DoS.
 301         */
 302        delta = max_t(s64, delay, 10000LL);
 303        time = ktime_add_ns(timer->base->get_time(), delta);
 304
 305        hrtimer_set_expires(timer, time);
 306
 307        if (rq == this_rq()) {
 308                __hrtick_restart(rq);
 309        } else if (!rq->hrtick_csd_pending) {
 310                smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
 311                rq->hrtick_csd_pending = 1;
 312        }
 313}
 314
 315#else
 316/*
 317 * Called to set the hrtick timer state.
 318 *
 319 * called with rq->lock held and irqs disabled
 320 */
 321void hrtick_start(struct rq *rq, u64 delay)
 322{
 323        /*
 324         * Don't schedule slices shorter than 10000ns, that just
 325         * doesn't make sense. Rely on vruntime for fairness.
 326         */
 327        delay = max_t(u64, delay, 10000LL);
 328        hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
 329                      HRTIMER_MODE_REL_PINNED);
 330}
 331#endif /* CONFIG_SMP */
 332
 333static void init_rq_hrtick(struct rq *rq)
 334{
 335#ifdef CONFIG_SMP
 336        rq->hrtick_csd_pending = 0;
 337
 338        rq->hrtick_csd.flags = 0;
 339        rq->hrtick_csd.func = __hrtick_start;
 340        rq->hrtick_csd.info = rq;
 341#endif
 342
 343        hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 344        rq->hrtick_timer.function = hrtick;
 345}
 346#else   /* CONFIG_SCHED_HRTICK */
 347static inline void hrtick_clear(struct rq *rq)
 348{
 349}
 350
 351static inline void init_rq_hrtick(struct rq *rq)
 352{
 353}
 354#endif  /* CONFIG_SCHED_HRTICK */
 355
 356/*
 357 * cmpxchg based fetch_or, macro so it works for different integer types
 358 */
 359#define fetch_or(ptr, mask)                                             \
 360        ({                                                              \
 361                typeof(ptr) _ptr = (ptr);                               \
 362                typeof(mask) _mask = (mask);                            \
 363                typeof(*_ptr) _old, _val = *_ptr;                       \
 364                                                                        \
 365                for (;;) {                                              \
 366                        _old = cmpxchg(_ptr, _val, _val | _mask);       \
 367                        if (_old == _val)                               \
 368                                break;                                  \
 369                        _val = _old;                                    \
 370                }                                                       \
 371        _old;                                                           \
 372})
 373
 374#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
 375/*
 376 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
 377 * this avoids any races wrt polling state changes and thereby avoids
 378 * spurious IPIs.
 379 */
 380static bool set_nr_and_not_polling(struct task_struct *p)
 381{
 382        struct thread_info *ti = task_thread_info(p);
 383        return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
 384}
 385
 386/*
 387 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
 388 *
 389 * If this returns true, then the idle task promises to call
 390 * sched_ttwu_pending() and reschedule soon.
 391 */
 392static bool set_nr_if_polling(struct task_struct *p)
 393{
 394        struct thread_info *ti = task_thread_info(p);
 395        typeof(ti->flags) old, val = READ_ONCE(ti->flags);
 396
 397        for (;;) {
 398                if (!(val & _TIF_POLLING_NRFLAG))
 399                        return false;
 400                if (val & _TIF_NEED_RESCHED)
 401                        return true;
 402                old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
 403                if (old == val)
 404                        break;
 405                val = old;
 406        }
 407        return true;
 408}
 409
 410#else
 411static bool set_nr_and_not_polling(struct task_struct *p)
 412{
 413        set_tsk_need_resched(p);
 414        return true;
 415}
 416
 417#ifdef CONFIG_SMP
 418static bool set_nr_if_polling(struct task_struct *p)
 419{
 420        return false;
 421}
 422#endif
 423#endif
 424
 425void wake_q_add(struct wake_q_head *head, struct task_struct *task)
 426{
 427        struct wake_q_node *node = &task->wake_q;
 428
 429        /*
 430         * Atomically grab the task, if ->wake_q is !nil already it means
 431         * its already queued (either by us or someone else) and will get the
 432         * wakeup due to that.
 433         *
 434         * This cmpxchg() implies a full barrier, which pairs with the write
 435         * barrier implied by the wakeup in wake_up_q().
 436         */
 437        if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
 438                return;
 439
 440        get_task_struct(task);
 441
 442        /*
 443         * The head is context local, there can be no concurrency.
 444         */
 445        *head->lastp = node;
 446        head->lastp = &node->next;
 447}
 448
 449void wake_up_q(struct wake_q_head *head)
 450{
 451        struct wake_q_node *node = head->first;
 452
 453        while (node != WAKE_Q_TAIL) {
 454                struct task_struct *task;
 455
 456                task = container_of(node, struct task_struct, wake_q);
 457                BUG_ON(!task);
 458                /* Task can safely be re-inserted now: */
 459                node = node->next;
 460                task->wake_q.next = NULL;
 461
 462                /*
 463                 * wake_up_process() implies a wmb() to pair with the queueing
 464                 * in wake_q_add() so as not to miss wakeups.
 465                 */
 466                wake_up_process(task);
 467                put_task_struct(task);
 468        }
 469}
 470
 471/*
 472 * resched_curr - mark rq's current task 'to be rescheduled now'.
 473 *
 474 * On UP this means the setting of the need_resched flag, on SMP it
 475 * might also involve a cross-CPU call to trigger the scheduler on
 476 * the target CPU.
 477 */
 478void resched_curr(struct rq *rq)
 479{
 480        struct task_struct *curr = rq->curr;
 481        int cpu;
 482
 483        lockdep_assert_held(&rq->lock);
 484
 485        if (test_tsk_need_resched(curr))
 486                return;
 487
 488        cpu = cpu_of(rq);
 489
 490        if (cpu == smp_processor_id()) {
 491                set_tsk_need_resched(curr);
 492                set_preempt_need_resched();
 493                return;
 494        }
 495
 496        if (set_nr_and_not_polling(curr))
 497                smp_send_reschedule(cpu);
 498        else
 499                trace_sched_wake_idle_without_ipi(cpu);
 500}
 501
 502void resched_cpu(int cpu)
 503{
 504        struct rq *rq = cpu_rq(cpu);
 505        unsigned long flags;
 506
 507        if (!raw_spin_trylock_irqsave(&rq->lock, flags))
 508                return;
 509        resched_curr(rq);
 510        raw_spin_unlock_irqrestore(&rq->lock, flags);
 511}
 512
 513#ifdef CONFIG_SMP
 514#ifdef CONFIG_NO_HZ_COMMON
 515/*
 516 * In the semi idle case, use the nearest busy CPU for migrating timers
 517 * from an idle CPU.  This is good for power-savings.
 518 *
 519 * We don't do similar optimization for completely idle system, as
 520 * selecting an idle CPU will add more delays to the timers than intended
 521 * (as that CPU's timer base may not be uptodate wrt jiffies etc).
 522 */
 523int get_nohz_timer_target(void)
 524{
 525        int i, cpu = smp_processor_id();
 526        struct sched_domain *sd;
 527
 528        if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
 529                return cpu;
 530
 531        rcu_read_lock();
 532        for_each_domain(cpu, sd) {
 533                for_each_cpu(i, sched_domain_span(sd)) {
 534                        if (cpu == i)
 535                                continue;
 536
 537                        if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
 538                                cpu = i;
 539                                goto unlock;
 540                        }
 541                }
 542        }
 543
 544        if (!is_housekeeping_cpu(cpu))
 545                cpu = housekeeping_any_cpu();
 546unlock:
 547        rcu_read_unlock();
 548        return cpu;
 549}
 550
 551/*
 552 * When add_timer_on() enqueues a timer into the timer wheel of an
 553 * idle CPU then this timer might expire before the next timer event
 554 * which is scheduled to wake up that CPU. In case of a completely
 555 * idle system the next event might even be infinite time into the
 556 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
 557 * leaves the inner idle loop so the newly added timer is taken into
 558 * account when the CPU goes back to idle and evaluates the timer
 559 * wheel for the next timer event.
 560 */
 561static void wake_up_idle_cpu(int cpu)
 562{
 563        struct rq *rq = cpu_rq(cpu);
 564
 565        if (cpu == smp_processor_id())
 566                return;
 567
 568        if (set_nr_and_not_polling(rq->idle))
 569                smp_send_reschedule(cpu);
 570        else
 571                trace_sched_wake_idle_without_ipi(cpu);
 572}
 573
 574static bool wake_up_full_nohz_cpu(int cpu)
 575{
 576        /*
 577         * We just need the target to call irq_exit() and re-evaluate
 578         * the next tick. The nohz full kick at least implies that.
 579         * If needed we can still optimize that later with an
 580         * empty IRQ.
 581         */
 582        if (cpu_is_offline(cpu))
 583                return true;  /* Don't try to wake offline CPUs. */
 584        if (tick_nohz_full_cpu(cpu)) {
 585                if (cpu != smp_processor_id() ||
 586                    tick_nohz_tick_stopped())
 587                        tick_nohz_full_kick_cpu(cpu);
 588                return true;
 589        }
 590
 591        return false;
 592}
 593
 594/*
 595 * Wake up the specified CPU.  If the CPU is going offline, it is the
 596 * caller's responsibility to deal with the lost wakeup, for example,
 597 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
 598 */
 599void wake_up_nohz_cpu(int cpu)
 600{
 601        if (!wake_up_full_nohz_cpu(cpu))
 602                wake_up_idle_cpu(cpu);
 603}
 604
 605static inline bool got_nohz_idle_kick(void)
 606{
 607        int cpu = smp_processor_id();
 608
 609        if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
 610                return false;
 611
 612        if (idle_cpu(cpu) && !need_resched())
 613                return true;
 614
 615        /*
 616         * We can't run Idle Load Balance on this CPU for this time so we
 617         * cancel it and clear NOHZ_BALANCE_KICK
 618         */
 619        clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
 620        return false;
 621}
 622
 623#else /* CONFIG_NO_HZ_COMMON */
 624
 625static inline bool got_nohz_idle_kick(void)
 626{
 627        return false;
 628}
 629
 630#endif /* CONFIG_NO_HZ_COMMON */
 631
 632#ifdef CONFIG_NO_HZ_FULL
 633bool sched_can_stop_tick(struct rq *rq)
 634{
 635        int fifo_nr_running;
 636
 637        /* Deadline tasks, even if single, need the tick */
 638        if (rq->dl.dl_nr_running)
 639                return false;
 640
 641        /*
 642         * If there are more than one RR tasks, we need the tick to effect the
 643         * actual RR behaviour.
 644         */
 645        if (rq->rt.rr_nr_running) {
 646                if (rq->rt.rr_nr_running == 1)
 647                        return true;
 648                else
 649                        return false;
 650        }
 651
 652        /*
 653         * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
 654         * forced preemption between FIFO tasks.
 655         */
 656        fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
 657        if (fifo_nr_running)
 658                return true;
 659
 660        /*
 661         * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
 662         * if there's more than one we need the tick for involuntary
 663         * preemption.
 664         */
 665        if (rq->nr_running > 1)
 666                return false;
 667
 668        return true;
 669}
 670#endif /* CONFIG_NO_HZ_FULL */
 671
 672void sched_avg_update(struct rq *rq)
 673{
 674        s64 period = sched_avg_period();
 675
 676        while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
 677                /*
 678                 * Inline assembly required to prevent the compiler
 679                 * optimising this loop into a divmod call.
 680                 * See __iter_div_u64_rem() for another example of this.
 681                 */
 682                asm("" : "+rm" (rq->age_stamp));
 683                rq->age_stamp += period;
 684                rq->rt_avg /= 2;
 685        }
 686}
 687
 688#endif /* CONFIG_SMP */
 689
 690#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
 691                        (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
 692/*
 693 * Iterate task_group tree rooted at *from, calling @down when first entering a
 694 * node and @up when leaving it for the final time.
 695 *
 696 * Caller must hold rcu_lock or sufficient equivalent.
 697 */
 698int walk_tg_tree_from(struct task_group *from,
 699                             tg_visitor down, tg_visitor up, void *data)
 700{
 701        struct task_group *parent, *child;
 702        int ret;
 703
 704        parent = from;
 705
 706down:
 707        ret = (*down)(parent, data);
 708        if (ret)
 709                goto out;
 710        list_for_each_entry_rcu(child, &parent->children, siblings) {
 711                parent = child;
 712                goto down;
 713
 714up:
 715                continue;
 716        }
 717        ret = (*up)(parent, data);
 718        if (ret || parent == from)
 719                goto out;
 720
 721        child = parent;
 722        parent = parent->parent;
 723        if (parent)
 724                goto up;
 725out:
 726        return ret;
 727}
 728
 729int tg_nop(struct task_group *tg, void *data)
 730{
 731        return 0;
 732}
 733#endif
 734
 735static void set_load_weight(struct task_struct *p)
 736{
 737        int prio = p->static_prio - MAX_RT_PRIO;
 738        struct load_weight *load = &p->se.load;
 739
 740        /*
 741         * SCHED_IDLE tasks get minimal weight:
 742         */
 743        if (idle_policy(p->policy)) {
 744                load->weight = scale_load(WEIGHT_IDLEPRIO);
 745                load->inv_weight = WMULT_IDLEPRIO;
 746                return;
 747        }
 748
 749        load->weight = scale_load(sched_prio_to_weight[prio]);
 750        load->inv_weight = sched_prio_to_wmult[prio];
 751}
 752
 753static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 754{
 755        if (!(flags & ENQUEUE_NOCLOCK))
 756                update_rq_clock(rq);
 757
 758        if (!(flags & ENQUEUE_RESTORE))
 759                sched_info_queued(rq, p);
 760
 761        p->sched_class->enqueue_task(rq, p, flags);
 762}
 763
 764static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 765{
 766        if (!(flags & DEQUEUE_NOCLOCK))
 767                update_rq_clock(rq);
 768
 769        if (!(flags & DEQUEUE_SAVE))
 770                sched_info_dequeued(rq, p);
 771
 772        p->sched_class->dequeue_task(rq, p, flags);
 773}
 774
 775void activate_task(struct rq *rq, struct task_struct *p, int flags)
 776{
 777        if (task_contributes_to_load(p))
 778                rq->nr_uninterruptible--;
 779
 780        enqueue_task(rq, p, flags);
 781}
 782
 783void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 784{
 785        if (task_contributes_to_load(p))
 786                rq->nr_uninterruptible++;
 787
 788        dequeue_task(rq, p, flags);
 789}
 790
 791void sched_set_stop_task(int cpu, struct task_struct *stop)
 792{
 793        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 794        struct task_struct *old_stop = cpu_rq(cpu)->stop;
 795
 796        if (stop) {
 797                /*
 798                 * Make it appear like a SCHED_FIFO task, its something
 799                 * userspace knows about and won't get confused about.
 800                 *
 801                 * Also, it will make PI more or less work without too
 802                 * much confusion -- but then, stop work should not
 803                 * rely on PI working anyway.
 804                 */
 805                sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
 806
 807                stop->sched_class = &stop_sched_class;
 808        }
 809
 810        cpu_rq(cpu)->stop = stop;
 811
 812        if (old_stop) {
 813                /*
 814                 * Reset it back to a normal scheduling class so that
 815                 * it can die in pieces.
 816                 */
 817                old_stop->sched_class = &rt_sched_class;
 818        }
 819}
 820
 821/*
 822 * __normal_prio - return the priority that is based on the static prio
 823 */
 824static inline int __normal_prio(struct task_struct *p)
 825{
 826        return p->static_prio;
 827}
 828
 829/*
 830 * Calculate the expected normal priority: i.e. priority
 831 * without taking RT-inheritance into account. Might be
 832 * boosted by interactivity modifiers. Changes upon fork,
 833 * setprio syscalls, and whenever the interactivity
 834 * estimator recalculates.
 835 */
 836static inline int normal_prio(struct task_struct *p)
 837{
 838        int prio;
 839
 840        if (task_has_dl_policy(p))
 841                prio = MAX_DL_PRIO-1;
 842        else if (task_has_rt_policy(p))
 843                prio = MAX_RT_PRIO-1 - p->rt_priority;
 844        else
 845                prio = __normal_prio(p);
 846        return prio;
 847}
 848
 849/*
 850 * Calculate the current priority, i.e. the priority
 851 * taken into account by the scheduler. This value might
 852 * be boosted by RT tasks, or might be boosted by
 853 * interactivity modifiers. Will be RT if the task got
 854 * RT-boosted. If not then it returns p->normal_prio.
 855 */
 856static int effective_prio(struct task_struct *p)
 857{
 858        p->normal_prio = normal_prio(p);
 859        /*
 860         * If we are RT tasks or we were boosted to RT priority,
 861         * keep the priority unchanged. Otherwise, update priority
 862         * to the normal priority:
 863         */
 864        if (!rt_prio(p->prio))
 865                return p->normal_prio;
 866        return p->prio;
 867}
 868
 869/**
 870 * task_curr - is this task currently executing on a CPU?
 871 * @p: the task in question.
 872 *
 873 * Return: 1 if the task is currently executing. 0 otherwise.
 874 */
 875inline int task_curr(const struct task_struct *p)
 876{
 877        return cpu_curr(task_cpu(p)) == p;
 878}
 879
 880/*
 881 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
 882 * use the balance_callback list if you want balancing.
 883 *
 884 * this means any call to check_class_changed() must be followed by a call to
 885 * balance_callback().
 886 */
 887static inline void check_class_changed(struct rq *rq, struct task_struct *p,
 888                                       const struct sched_class *prev_class,
 889                                       int oldprio)
 890{
 891        if (prev_class != p->sched_class) {
 892                if (prev_class->switched_from)
 893                        prev_class->switched_from(rq, p);
 894
 895                p->sched_class->switched_to(rq, p);
 896        } else if (oldprio != p->prio || dl_task(p))
 897                p->sched_class->prio_changed(rq, p, oldprio);
 898}
 899
 900void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 901{
 902        const struct sched_class *class;
 903
 904        if (p->sched_class == rq->curr->sched_class) {
 905                rq->curr->sched_class->check_preempt_curr(rq, p, flags);
 906        } else {
 907                for_each_class(class) {
 908                        if (class == rq->curr->sched_class)
 909                                break;
 910                        if (class == p->sched_class) {
 911                                resched_curr(rq);
 912                                break;
 913                        }
 914                }
 915        }
 916
 917        /*
 918         * A queue event has occurred, and we're going to schedule.  In
 919         * this case, we can save a useless back to back clock update.
 920         */
 921        if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
 922                rq_clock_skip_update(rq, true);
 923}
 924
 925#ifdef CONFIG_SMP
 926/*
 927 * This is how migration works:
 928 *
 929 * 1) we invoke migration_cpu_stop() on the target CPU using
 930 *    stop_one_cpu().
 931 * 2) stopper starts to run (implicitly forcing the migrated thread
 932 *    off the CPU)
 933 * 3) it checks whether the migrated task is still in the wrong runqueue.
 934 * 4) if it's in the wrong runqueue then the migration thread removes
 935 *    it and puts it into the right queue.
 936 * 5) stopper completes and stop_one_cpu() returns and the migration
 937 *    is done.
 938 */
 939
 940/*
 941 * move_queued_task - move a queued task to new rq.
 942 *
 943 * Returns (locked) new rq. Old rq's lock is released.
 944 */
 945static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
 946                                   struct task_struct *p, int new_cpu)
 947{
 948        lockdep_assert_held(&rq->lock);
 949
 950        p->on_rq = TASK_ON_RQ_MIGRATING;
 951        dequeue_task(rq, p, DEQUEUE_NOCLOCK);
 952        set_task_cpu(p, new_cpu);
 953        rq_unlock(rq, rf);
 954
 955        rq = cpu_rq(new_cpu);
 956
 957        rq_lock(rq, rf);
 958        BUG_ON(task_cpu(p) != new_cpu);
 959        enqueue_task(rq, p, 0);
 960        p->on_rq = TASK_ON_RQ_QUEUED;
 961        check_preempt_curr(rq, p, 0);
 962
 963        return rq;
 964}
 965
 966struct migration_arg {
 967        struct task_struct *task;
 968        int dest_cpu;
 969};
 970
 971/*
 972 * Move (not current) task off this CPU, onto the destination CPU. We're doing
 973 * this because either it can't run here any more (set_cpus_allowed()
 974 * away from this CPU, or CPU going down), or because we're
 975 * attempting to rebalance this task on exec (sched_exec).
 976 *
 977 * So we race with normal scheduler movements, but that's OK, as long
 978 * as the task is no longer on this CPU.
 979 */
 980static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
 981                                 struct task_struct *p, int dest_cpu)
 982{
 983        if (unlikely(!cpu_active(dest_cpu)))
 984                return rq;
 985
 986        /* Affinity changed (again). */
 987        if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
 988                return rq;
 989
 990        update_rq_clock(rq);
 991        rq = move_queued_task(rq, rf, p, dest_cpu);
 992
 993        return rq;
 994}
 995
 996/*
 997 * migration_cpu_stop - this will be executed by a highprio stopper thread
 998 * and performs thread migration by bumping thread off CPU then
 999 * 'pushing' onto another runqueue.
1000 */
1001static int migration_cpu_stop(void *data)
1002{
1003        struct migration_arg *arg = data;
1004        struct task_struct *p = arg->task;
1005        struct rq *rq = this_rq();
1006        struct rq_flags rf;
1007
1008        /*
1009         * The original target CPU might have gone down and we might
1010         * be on another CPU but it doesn't matter.
1011         */
1012        local_irq_disable();
1013        /*
1014         * We need to explicitly wake pending tasks before running
1015         * __migrate_task() such that we will not miss enforcing cpus_allowed
1016         * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1017         */
1018        sched_ttwu_pending();
1019
1020        raw_spin_lock(&p->pi_lock);
1021        rq_lock(rq, &rf);
1022        /*
1023         * If task_rq(p) != rq, it cannot be migrated here, because we're
1024         * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1025         * we're holding p->pi_lock.
1026         */
1027        if (task_rq(p) == rq) {
1028                if (task_on_rq_queued(p))
1029                        rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
1030                else
1031                        p->wake_cpu = arg->dest_cpu;
1032        }
1033        rq_unlock(rq, &rf);
1034        raw_spin_unlock(&p->pi_lock);
1035
1036        local_irq_enable();
1037        return 0;
1038}
1039
1040/*
1041 * sched_class::set_cpus_allowed must do the below, but is not required to
1042 * actually call this function.
1043 */
1044void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
1045{
1046        cpumask_copy(&p->cpus_allowed, new_mask);
1047        p->nr_cpus_allowed = cpumask_weight(new_mask);
1048}
1049
1050void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1051{
1052        struct rq *rq = task_rq(p);
1053        bool queued, running;
1054
1055        lockdep_assert_held(&p->pi_lock);
1056
1057        queued = task_on_rq_queued(p);
1058        running = task_current(rq, p);
1059
1060        if (queued) {
1061                /*
1062                 * Because __kthread_bind() calls this on blocked tasks without
1063                 * holding rq->lock.
1064                 */
1065                lockdep_assert_held(&rq->lock);
1066                dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
1067        }
1068        if (running)
1069                put_prev_task(rq, p);
1070
1071        p->sched_class->set_cpus_allowed(p, new_mask);
1072
1073        if (queued)
1074                enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
1075        if (running)
1076                set_curr_task(rq, p);
1077}
1078
1079/*
1080 * Change a given task's CPU affinity. Migrate the thread to a
1081 * proper CPU and schedule it away if the CPU it's executing on
1082 * is removed from the allowed bitmask.
1083 *
1084 * NOTE: the caller must have a valid reference to the task, the
1085 * task must not exit() & deallocate itself prematurely. The
1086 * call is not atomic; no spinlocks may be held.
1087 */
1088static int __set_cpus_allowed_ptr(struct task_struct *p,
1089                                  const struct cpumask *new_mask, bool check)
1090{
1091        const struct cpumask *cpu_valid_mask = cpu_active_mask;
1092        unsigned int dest_cpu;
1093        struct rq_flags rf;
1094        struct rq *rq;
1095        int ret = 0;
1096
1097        rq = task_rq_lock(p, &rf);
1098        update_rq_clock(rq);
1099
1100        if (p->flags & PF_KTHREAD) {
1101                /*
1102                 * Kernel threads are allowed on online && !active CPUs
1103                 */
1104                cpu_valid_mask = cpu_online_mask;
1105        }
1106
1107        /*
1108         * Must re-check here, to close a race against __kthread_bind(),
1109         * sched_setaffinity() is not guaranteed to observe the flag.
1110         */
1111        if (check && (p->flags & PF_NO_SETAFFINITY)) {
1112                ret = -EINVAL;
1113                goto out;
1114        }
1115
1116        if (cpumask_equal(&p->cpus_allowed, new_mask))
1117                goto out;
1118
1119        if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
1120                ret = -EINVAL;
1121                goto out;
1122        }
1123
1124        do_set_cpus_allowed(p, new_mask);
1125
1126        if (p->flags & PF_KTHREAD) {
1127                /*
1128                 * For kernel threads that do indeed end up on online &&
1129                 * !active we want to ensure they are strict per-CPU threads.
1130                 */
1131                WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
1132                        !cpumask_intersects(new_mask, cpu_active_mask) &&
1133                        p->nr_cpus_allowed != 1);
1134        }
1135
1136        /* Can the task run on the task's current CPU? If so, we're done */
1137        if (cpumask_test_cpu(task_cpu(p), new_mask))
1138                goto out;
1139
1140        dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
1141        if (task_running(rq, p) || p->state == TASK_WAKING) {
1142                struct migration_arg arg = { p, dest_cpu };
1143                /* Need help from migration thread: drop lock and wait. */
1144                task_rq_unlock(rq, p, &rf);
1145                stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1146                tlb_migrate_finish(p->mm);
1147                return 0;
1148        } else if (task_on_rq_queued(p)) {
1149                /*
1150                 * OK, since we're going to drop the lock immediately
1151                 * afterwards anyway.
1152                 */
1153                rq = move_queued_task(rq, &rf, p, dest_cpu);
1154        }
1155out:
1156        task_rq_unlock(rq, p, &rf);
1157
1158        return ret;
1159}
1160
1161int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1162{
1163        return __set_cpus_allowed_ptr(p, new_mask, false);
1164}
1165EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1166
1167void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1168{
1169#ifdef CONFIG_SCHED_DEBUG
1170        /*
1171         * We should never call set_task_cpu() on a blocked task,
1172         * ttwu() will sort out the placement.
1173         */
1174        WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1175                        !p->on_rq);
1176
1177        /*
1178         * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
1179         * because schedstat_wait_{start,end} rebase migrating task's wait_start
1180         * time relying on p->on_rq.
1181         */
1182        WARN_ON_ONCE(p->state == TASK_RUNNING &&
1183                     p->sched_class == &fair_sched_class &&
1184                     (p->on_rq && !task_on_rq_migrating(p)));
1185
1186#ifdef CONFIG_LOCKDEP
1187        /*
1188         * The caller should hold either p->pi_lock or rq->lock, when changing
1189         * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1190         *
1191         * sched_move_task() holds both and thus holding either pins the cgroup,
1192         * see task_group().
1193         *
1194         * Furthermore, all task_rq users should acquire both locks, see
1195         * task_rq_lock().
1196         */
1197        WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1198                                      lockdep_is_held(&task_rq(p)->lock)));
1199#endif
1200#endif
1201
1202        trace_sched_migrate_task(p, new_cpu);
1203
1204        if (task_cpu(p) != new_cpu) {
1205                if (p->sched_class->migrate_task_rq)
1206                        p->sched_class->migrate_task_rq(p);
1207                p->se.nr_migrations++;
1208                perf_event_task_migrate(p);
1209        }
1210
1211        __set_task_cpu(p, new_cpu);
1212}
1213
1214static void __migrate_swap_task(struct task_struct *p, int cpu)
1215{
1216        if (task_on_rq_queued(p)) {
1217                struct rq *src_rq, *dst_rq;
1218                struct rq_flags srf, drf;
1219
1220                src_rq = task_rq(p);
1221                dst_rq = cpu_rq(cpu);
1222
1223                rq_pin_lock(src_rq, &srf);
1224                rq_pin_lock(dst_rq, &drf);
1225
1226                p->on_rq = TASK_ON_RQ_MIGRATING;
1227                deactivate_task(src_rq, p, 0);
1228                set_task_cpu(p, cpu);
1229                activate_task(dst_rq, p, 0);
1230                p->on_rq = TASK_ON_RQ_QUEUED;
1231                check_preempt_curr(dst_rq, p, 0);
1232
1233                rq_unpin_lock(dst_rq, &drf);
1234                rq_unpin_lock(src_rq, &srf);
1235
1236        } else {
1237                /*
1238                 * Task isn't running anymore; make it appear like we migrated
1239                 * it before it went to sleep. This means on wakeup we make the
1240                 * previous CPU our target instead of where it really is.
1241                 */
1242                p->wake_cpu = cpu;
1243        }
1244}
1245
1246struct migration_swap_arg {
1247        struct task_struct *src_task, *dst_task;
1248        int src_cpu, dst_cpu;
1249};
1250
1251static int migrate_swap_stop(void *data)
1252{
1253        struct migration_swap_arg *arg = data;
1254        struct rq *src_rq, *dst_rq;
1255        int ret = -EAGAIN;
1256
1257        if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
1258                return -EAGAIN;
1259
1260        src_rq = cpu_rq(arg->src_cpu);
1261        dst_rq = cpu_rq(arg->dst_cpu);
1262
1263        double_raw_lock(&arg->src_task->pi_lock,
1264                        &arg->dst_task->pi_lock);
1265        double_rq_lock(src_rq, dst_rq);
1266
1267        if (task_cpu(arg->dst_task) != arg->dst_cpu)
1268                goto unlock;
1269
1270        if (task_cpu(arg->src_task) != arg->src_cpu)
1271                goto unlock;
1272
1273        if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed))
1274                goto unlock;
1275
1276        if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed))
1277                goto unlock;
1278
1279        __migrate_swap_task(arg->src_task, arg->dst_cpu);
1280        __migrate_swap_task(arg->dst_task, arg->src_cpu);
1281
1282        ret = 0;
1283
1284unlock:
1285        double_rq_unlock(src_rq, dst_rq);
1286        raw_spin_unlock(&arg->dst_task->pi_lock);
1287        raw_spin_unlock(&arg->src_task->pi_lock);
1288
1289        return ret;
1290}
1291
1292/*
1293 * Cross migrate two tasks
1294 */
1295int migrate_swap(struct task_struct *cur, struct task_struct *p)
1296{
1297        struct migration_swap_arg arg;
1298        int ret = -EINVAL;
1299
1300        arg = (struct migration_swap_arg){
1301                .src_task = cur,
1302                .src_cpu = task_cpu(cur),
1303                .dst_task = p,
1304                .dst_cpu = task_cpu(p),
1305        };
1306
1307        if (arg.src_cpu == arg.dst_cpu)
1308                goto out;
1309
1310        /*
1311         * These three tests are all lockless; this is OK since all of them
1312         * will be re-checked with proper locks held further down the line.
1313         */
1314        if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1315                goto out;
1316
1317        if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed))
1318                goto out;
1319
1320        if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed))
1321                goto out;
1322
1323        trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
1324        ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1325
1326out:
1327        return ret;
1328}
1329
1330/*
1331 * wait_task_inactive - wait for a thread to unschedule.
1332 *
1333 * If @match_state is nonzero, it's the @p->state value just checked and
1334 * not expected to change.  If it changes, i.e. @p might have woken up,
1335 * then return zero.  When we succeed in waiting for @p to be off its CPU,
1336 * we return a positive number (its total switch count).  If a second call
1337 * a short while later returns the same number, the caller can be sure that
1338 * @p has remained unscheduled the whole time.
1339 *
1340 * The caller must ensure that the task *will* unschedule sometime soon,
1341 * else this function might spin for a *long* time. This function can't
1342 * be called with interrupts off, or it may introduce deadlock with
1343 * smp_call_function() if an IPI is sent by the same process we are
1344 * waiting to become inactive.
1345 */
1346unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1347{
1348        int running, queued;
1349        struct rq_flags rf;
1350        unsigned long ncsw;
1351        struct rq *rq;
1352
1353        for (;;) {
1354                /*
1355                 * We do the initial early heuristics without holding
1356                 * any task-queue locks at all. We'll only try to get
1357                 * the runqueue lock when things look like they will
1358                 * work out!
1359                 */
1360                rq = task_rq(p);
1361
1362                /*
1363                 * If the task is actively running on another CPU
1364                 * still, just relax and busy-wait without holding
1365                 * any locks.
1366                 *
1367                 * NOTE! Since we don't hold any locks, it's not
1368                 * even sure that "rq" stays as the right runqueue!
1369                 * But we don't care, since "task_running()" will
1370                 * return false if the runqueue has changed and p
1371                 * is actually now running somewhere else!
1372                 */
1373                while (task_running(rq, p)) {
1374                        if (match_state && unlikely(p->state != match_state))
1375                                return 0;
1376                        cpu_relax();
1377                }
1378
1379                /*
1380                 * Ok, time to look more closely! We need the rq
1381                 * lock now, to be *sure*. If we're wrong, we'll
1382                 * just go back and repeat.
1383                 */
1384                rq = task_rq_lock(p, &rf);
1385                trace_sched_wait_task(p);
1386                running = task_running(rq, p);
1387                queued = task_on_rq_queued(p);
1388                ncsw = 0;
1389                if (!match_state || p->state == match_state)
1390                        ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1391                task_rq_unlock(rq, p, &rf);
1392
1393                /*
1394                 * If it changed from the expected state, bail out now.
1395                 */
1396                if (unlikely(!ncsw))
1397                        break;
1398
1399                /*
1400                 * Was it really running after all now that we
1401                 * checked with the proper locks actually held?
1402                 *
1403                 * Oops. Go back and try again..
1404                 */
1405                if (unlikely(running)) {
1406                        cpu_relax();
1407                        continue;
1408                }
1409
1410                /*
1411                 * It's not enough that it's not actively running,
1412                 * it must be off the runqueue _entirely_, and not
1413                 * preempted!
1414                 *
1415                 * So if it was still runnable (but just not actively
1416                 * running right now), it's preempted, and we should
1417                 * yield - it could be a while.
1418                 */
1419                if (unlikely(queued)) {
1420                        ktime_t to = NSEC_PER_SEC / HZ;
1421
1422                        set_current_state(TASK_UNINTERRUPTIBLE);
1423                        schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1424                        continue;
1425                }
1426
1427                /*
1428                 * Ahh, all good. It wasn't running, and it wasn't
1429                 * runnable, which means that it will never become
1430                 * running in the future either. We're all done!
1431                 */
1432                break;
1433        }
1434
1435        return ncsw;
1436}
1437
1438/***
1439 * kick_process - kick a running thread to enter/exit the kernel
1440 * @p: the to-be-kicked thread
1441 *
1442 * Cause a process which is running on another CPU to enter
1443 * kernel-mode, without any delay. (to get signals handled.)
1444 *
1445 * NOTE: this function doesn't have to take the runqueue lock,
1446 * because all it wants to ensure is that the remote task enters
1447 * the kernel. If the IPI races and the task has been migrated
1448 * to another CPU then no harm is done and the purpose has been
1449 * achieved as well.
1450 */
1451void kick_process(struct task_struct *p)
1452{
1453        int cpu;
1454
1455        preempt_disable();
1456        cpu = task_cpu(p);
1457        if ((cpu != smp_processor_id()) && task_curr(p))
1458                smp_send_reschedule(cpu);
1459        preempt_enable();
1460}
1461EXPORT_SYMBOL_GPL(kick_process);
1462
1463/*
1464 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1465 *
1466 * A few notes on cpu_active vs cpu_online:
1467 *
1468 *  - cpu_active must be a subset of cpu_online
1469 *
1470 *  - on cpu-up we allow per-cpu kthreads on the online && !active cpu,
1471 *    see __set_cpus_allowed_ptr(). At this point the newly online
1472 *    CPU isn't yet part of the sched domains, and balancing will not
1473 *    see it.
1474 *
1475 *  - on CPU-down we clear cpu_active() to mask the sched domains and
1476 *    avoid the load balancer to place new tasks on the to be removed
1477 *    CPU. Existing tasks will remain running there and will be taken
1478 *    off.
1479 *
1480 * This means that fallback selection must not select !active CPUs.
1481 * And can assume that any active CPU must be online. Conversely
1482 * select_task_rq() below may allow selection of !active CPUs in order
1483 * to satisfy the above rules.
1484 */
1485static int select_fallback_rq(int cpu, struct task_struct *p)
1486{
1487        int nid = cpu_to_node(cpu);
1488        const struct cpumask *nodemask = NULL;
1489        enum { cpuset, possible, fail } state = cpuset;
1490        int dest_cpu;
1491
1492        /*
1493         * If the node that the CPU is on has been offlined, cpu_to_node()
1494         * will return -1. There is no CPU on the node, and we should
1495         * select the CPU on the other node.
1496         */
1497        if (nid != -1) {
1498                nodemask = cpumask_of_node(nid);
1499
1500                /* Look for allowed, online CPU in same node. */
1501                for_each_cpu(dest_cpu, nodemask) {
1502                        if (!cpu_active(dest_cpu))
1503                                continue;
1504                        if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
1505                                return dest_cpu;
1506                }
1507        }
1508
1509        for (;;) {
1510                /* Any allowed, online CPU? */
1511                for_each_cpu(dest_cpu, &p->cpus_allowed) {
1512                        if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
1513                                continue;
1514                        if (!cpu_online(dest_cpu))
1515                                continue;
1516                        goto out;
1517                }
1518
1519                /* No more Mr. Nice Guy. */
1520                switch (state) {
1521                case cpuset:
1522                        if (IS_ENABLED(CONFIG_CPUSETS)) {
1523                                cpuset_cpus_allowed_fallback(p);
1524                                state = possible;
1525                                break;
1526                        }
1527                        /* Fall-through */
1528                case possible:
1529                        do_set_cpus_allowed(p, cpu_possible_mask);
1530                        state = fail;
1531                        break;
1532
1533                case fail:
1534                        BUG();
1535                        break;
1536                }
1537        }
1538
1539out:
1540        if (state != cpuset) {
1541                /*
1542                 * Don't tell them about moving exiting tasks or
1543                 * kernel threads (both mm NULL), since they never
1544                 * leave kernel.
1545                 */
1546                if (p->mm && printk_ratelimit()) {
1547                        printk_deferred("process %d (%s) no longer affine to cpu%d\n",
1548                                        task_pid_nr(p), p->comm, cpu);
1549                }
1550        }
1551
1552        return dest_cpu;
1553}
1554
1555/*
1556 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1557 */
1558static inline
1559int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1560{
1561        lockdep_assert_held(&p->pi_lock);
1562
1563        if (p->nr_cpus_allowed > 1)
1564                cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1565        else
1566                cpu = cpumask_any(&p->cpus_allowed);
1567
1568        /*
1569         * In order not to call set_task_cpu() on a blocking task we need
1570         * to rely on ttwu() to place the task on a valid ->cpus_allowed
1571         * CPU.
1572         *
1573         * Since this is common to all placement strategies, this lives here.
1574         *
1575         * [ this allows ->select_task() to simply return task_cpu(p) and
1576         *   not worry about this generic constraint ]
1577         */
1578        if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
1579                     !cpu_online(cpu)))
1580                cpu = select_fallback_rq(task_cpu(p), p);
1581
1582        return cpu;
1583}
1584
1585static void update_avg(u64 *avg, u64 sample)
1586{
1587        s64 diff = sample - *avg;
1588        *avg += diff >> 3;
1589}
1590
1591#else
1592
1593static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1594                                         const struct cpumask *new_mask, bool check)
1595{
1596        return set_cpus_allowed_ptr(p, new_mask);
1597}
1598
1599#endif /* CONFIG_SMP */
1600
1601static void
1602ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1603{
1604        struct rq *rq;
1605
1606        if (!schedstat_enabled())
1607                return;
1608
1609        rq = this_rq();
1610
1611#ifdef CONFIG_SMP
1612        if (cpu == rq->cpu) {
1613                schedstat_inc(rq->ttwu_local);
1614                schedstat_inc(p->se.statistics.nr_wakeups_local);
1615        } else {
1616                struct sched_domain *sd;
1617
1618                schedstat_inc(p->se.statistics.nr_wakeups_remote);
1619                rcu_read_lock();
1620                for_each_domain(rq->cpu, sd) {
1621                        if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1622                                schedstat_inc(sd->ttwu_wake_remote);
1623                                break;
1624                        }
1625                }
1626                rcu_read_unlock();
1627        }
1628
1629        if (wake_flags & WF_MIGRATED)
1630                schedstat_inc(p->se.statistics.nr_wakeups_migrate);
1631#endif /* CONFIG_SMP */
1632
1633        schedstat_inc(rq->ttwu_count);
1634        schedstat_inc(p->se.statistics.nr_wakeups);
1635
1636        if (wake_flags & WF_SYNC)
1637                schedstat_inc(p->se.statistics.nr_wakeups_sync);
1638}
1639
1640static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1641{
1642        activate_task(rq, p, en_flags);
1643        p->on_rq = TASK_ON_RQ_QUEUED;
1644
1645        /* If a worker is waking up, notify the workqueue: */
1646        if (p->flags & PF_WQ_WORKER)
1647                wq_worker_waking_up(p, cpu_of(rq));
1648}
1649
1650/*
1651 * Mark the task runnable and perform wakeup-preemption.
1652 */
1653static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
1654                           struct rq_flags *rf)
1655{
1656        check_preempt_curr(rq, p, wake_flags);
1657        p->state = TASK_RUNNING;
1658        trace_sched_wakeup(p);
1659
1660#ifdef CONFIG_SMP
1661        if (p->sched_class->task_woken) {
1662                /*
1663                 * Our task @p is fully woken up and running; so its safe to
1664                 * drop the rq->lock, hereafter rq is only used for statistics.
1665                 */
1666                rq_unpin_lock(rq, rf);
1667                p->sched_class->task_woken(rq, p);
1668                rq_repin_lock(rq, rf);
1669        }
1670
1671        if (rq->idle_stamp) {
1672                u64 delta = rq_clock(rq) - rq->idle_stamp;
1673                u64 max = 2*rq->max_idle_balance_cost;
1674
1675                update_avg(&rq->avg_idle, delta);
1676
1677                if (rq->avg_idle > max)
1678                        rq->avg_idle = max;
1679
1680                rq->idle_stamp = 0;
1681        }
1682#endif
1683}
1684
1685static void
1686ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
1687                 struct rq_flags *rf)
1688{
1689        int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
1690
1691        lockdep_assert_held(&rq->lock);
1692
1693#ifdef CONFIG_SMP
1694        if (p->sched_contributes_to_load)
1695                rq->nr_uninterruptible--;
1696
1697        if (wake_flags & WF_MIGRATED)
1698                en_flags |= ENQUEUE_MIGRATED;
1699#endif
1700
1701        ttwu_activate(rq, p, en_flags);
1702        ttwu_do_wakeup(rq, p, wake_flags, rf);
1703}
1704
1705/*
1706 * Called in case the task @p isn't fully descheduled from its runqueue,
1707 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1708 * since all we need to do is flip p->state to TASK_RUNNING, since
1709 * the task is still ->on_rq.
1710 */
1711static int ttwu_remote(struct task_struct *p, int wake_flags)
1712{
1713        struct rq_flags rf;
1714        struct rq *rq;
1715        int ret = 0;
1716
1717        rq = __task_rq_lock(p, &rf);
1718        if (task_on_rq_queued(p)) {
1719                /* check_preempt_curr() may use rq clock */
1720                update_rq_clock(rq);
1721                ttwu_do_wakeup(rq, p, wake_flags, &rf);
1722                ret = 1;
1723        }
1724        __task_rq_unlock(rq, &rf);
1725
1726        return ret;
1727}
1728
1729#ifdef CONFIG_SMP
1730void sched_ttwu_pending(void)
1731{
1732        struct rq *rq = this_rq();
1733        struct llist_node *llist = llist_del_all(&rq->wake_list);
1734        struct task_struct *p;
1735        struct rq_flags rf;
1736
1737        if (!llist)
1738                return;
1739
1740        rq_lock_irqsave(rq, &rf);
1741        update_rq_clock(rq);
1742
1743        while (llist) {
1744                int wake_flags = 0;
1745
1746                p = llist_entry(llist, struct task_struct, wake_entry);
1747                llist = llist_next(llist);
1748
1749                if (p->sched_remote_wakeup)
1750                        wake_flags = WF_MIGRATED;
1751
1752                ttwu_do_activate(rq, p, wake_flags, &rf);
1753        }
1754
1755        rq_unlock_irqrestore(rq, &rf);
1756}
1757
1758void scheduler_ipi(void)
1759{
1760        /*
1761         * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1762         * TIF_NEED_RESCHED remotely (for the first time) will also send
1763         * this IPI.
1764         */
1765        preempt_fold_need_resched();
1766
1767        if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1768                return;
1769
1770        /*
1771         * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1772         * traditionally all their work was done from the interrupt return
1773         * path. Now that we actually do some work, we need to make sure
1774         * we do call them.
1775         *
1776         * Some archs already do call them, luckily irq_enter/exit nest
1777         * properly.
1778         *
1779         * Arguably we should visit all archs and update all handlers,
1780         * however a fair share of IPIs are still resched only so this would
1781         * somewhat pessimize the simple resched case.
1782         */
1783        irq_enter();
1784        sched_ttwu_pending();
1785
1786        /*
1787         * Check if someone kicked us for doing the nohz idle load balance.
1788         */
1789        if (unlikely(got_nohz_idle_kick())) {
1790                this_rq()->idle_balance = 1;
1791                raise_softirq_irqoff(SCHED_SOFTIRQ);
1792        }
1793        irq_exit();
1794}
1795
1796static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
1797{
1798        struct rq *rq = cpu_rq(cpu);
1799
1800        p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
1801
1802        if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1803                if (!set_nr_if_polling(rq->idle))
1804                        smp_send_reschedule(cpu);
1805                else
1806                        trace_sched_wake_idle_without_ipi(cpu);
1807        }
1808}
1809
1810void wake_up_if_idle(int cpu)
1811{
1812        struct rq *rq = cpu_rq(cpu);
1813        struct rq_flags rf;
1814
1815        rcu_read_lock();
1816
1817        if (!is_idle_task(rcu_dereference(rq->curr)))
1818                goto out;
1819
1820        if (set_nr_if_polling(rq->idle)) {
1821                trace_sched_wake_idle_without_ipi(cpu);
1822        } else {
1823                rq_lock_irqsave(rq, &rf);
1824                if (is_idle_task(rq->curr))
1825                        smp_send_reschedule(cpu);
1826                /* Else CPU is not idle, do nothing here: */
1827                rq_unlock_irqrestore(rq, &rf);
1828        }
1829
1830out:
1831        rcu_read_unlock();
1832}
1833
1834bool cpus_share_cache(int this_cpu, int that_cpu)
1835{
1836        return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1837}
1838#endif /* CONFIG_SMP */
1839
1840static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
1841{
1842        struct rq *rq = cpu_rq(cpu);
1843        struct rq_flags rf;
1844
1845#if defined(CONFIG_SMP)
1846        if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1847                sched_clock_cpu(cpu); /* Sync clocks across CPUs */
1848                ttwu_queue_remote(p, cpu, wake_flags);
1849                return;
1850        }
1851#endif
1852
1853        rq_lock(rq, &rf);
1854        update_rq_clock(rq);
1855        ttwu_do_activate(rq, p, wake_flags, &rf);
1856        rq_unlock(rq, &rf);
1857}
1858
1859/*
1860 * Notes on Program-Order guarantees on SMP systems.
1861 *
1862 *  MIGRATION
1863 *
1864 * The basic program-order guarantee on SMP systems is that when a task [t]
1865 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
1866 * execution on its new CPU [c1].
1867 *
1868 * For migration (of runnable tasks) this is provided by the following means:
1869 *
1870 *  A) UNLOCK of the rq(c0)->lock scheduling out task t
1871 *  B) migration for t is required to synchronize *both* rq(c0)->lock and
1872 *     rq(c1)->lock (if not at the same time, then in that order).
1873 *  C) LOCK of the rq(c1)->lock scheduling in task
1874 *
1875 * Transitivity guarantees that B happens after A and C after B.
1876 * Note: we only require RCpc transitivity.
1877 * Note: the CPU doing B need not be c0 or c1
1878 *
1879 * Example:
1880 *
1881 *   CPU0            CPU1            CPU2
1882 *
1883 *   LOCK rq(0)->lock
1884 *   sched-out X
1885 *   sched-in Y
1886 *   UNLOCK rq(0)->lock
1887 *
1888 *                                   LOCK rq(0)->lock // orders against CPU0
1889 *                                   dequeue X
1890 *                                   UNLOCK rq(0)->lock
1891 *
1892 *                                   LOCK rq(1)->lock
1893 *                                   enqueue X
1894 *                                   UNLOCK rq(1)->lock
1895 *
1896 *                   LOCK rq(1)->lock // orders against CPU2
1897 *                   sched-out Z
1898 *                   sched-in X
1899 *                   UNLOCK rq(1)->lock
1900 *
1901 *
1902 *  BLOCKING -- aka. SLEEP + WAKEUP
1903 *
1904 * For blocking we (obviously) need to provide the same guarantee as for
1905 * migration. However the means are completely different as there is no lock
1906 * chain to provide order. Instead we do:
1907 *
1908 *   1) smp_store_release(X->on_cpu, 0)
1909 *   2) smp_cond_load_acquire(!X->on_cpu)
1910 *
1911 * Example:
1912 *
1913 *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
1914 *
1915 *   LOCK rq(0)->lock LOCK X->pi_lock
1916 *   dequeue X
1917 *   sched-out X
1918 *   smp_store_release(X->on_cpu, 0);
1919 *
1920 *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
1921 *                    X->state = WAKING
1922 *                    set_task_cpu(X,2)
1923 *
1924 *                    LOCK rq(2)->lock
1925 *                    enqueue X
1926 *                    X->state = RUNNING
1927 *                    UNLOCK rq(2)->lock
1928 *
1929 *                                          LOCK rq(2)->lock // orders against CPU1
1930 *                                          sched-out Z
1931 *                                          sched-in X
1932 *                                          UNLOCK rq(2)->lock
1933 *
1934 *                    UNLOCK X->pi_lock
1935 *   UNLOCK rq(0)->lock
1936 *
1937 *
1938 * However; for wakeups there is a second guarantee we must provide, namely we
1939 * must observe the state that lead to our wakeup. That is, not only must our
1940 * task observe its own prior state, it must also observe the stores prior to
1941 * its wakeup.
1942 *
1943 * This means that any means of doing remote wakeups must order the CPU doing
1944 * the wakeup against the CPU the task is going to end up running on. This,
1945 * however, is already required for the regular Program-Order guarantee above,
1946 * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
1947 *
1948 */
1949
1950/**
1951 * try_to_wake_up - wake up a thread
1952 * @p: the thread to be awakened
1953 * @state: the mask of task states that can be woken
1954 * @wake_flags: wake modifier flags (WF_*)
1955 *
1956 * If (@state & @p->state) @p->state = TASK_RUNNING.
1957 *
1958 * If the task was not queued/runnable, also place it back on a runqueue.
1959 *
1960 * Atomic against schedule() which would dequeue a task, also see
1961 * set_current_state().
1962 *
1963 * Return: %true if @p->state changes (an actual wakeup was done),
1964 *         %false otherwise.
1965 */
1966static int
1967try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1968{
1969        unsigned long flags;
1970        int cpu, success = 0;
1971
1972        /*
1973         * If we are going to wake up a thread waiting for CONDITION we
1974         * need to ensure that CONDITION=1 done by the caller can not be
1975         * reordered with p->state check below. This pairs with mb() in
1976         * set_current_state() the waiting thread does.
1977         */
1978        smp_mb__before_spinlock();
1979        raw_spin_lock_irqsave(&p->pi_lock, flags);
1980        if (!(p->state & state))
1981                goto out;
1982
1983        trace_sched_waking(p);
1984
1985        /* We're going to change ->state: */
1986        success = 1;
1987        cpu = task_cpu(p);
1988
1989        /*
1990         * Ensure we load p->on_rq _after_ p->state, otherwise it would
1991         * be possible to, falsely, observe p->on_rq == 0 and get stuck
1992         * in smp_cond_load_acquire() below.
1993         *
1994         * sched_ttwu_pending()                 try_to_wake_up()
1995         *   [S] p->on_rq = 1;                  [L] P->state
1996         *       UNLOCK rq->lock  -----.
1997         *                              \
1998         *                               +---   RMB
1999         * schedule()                   /
2000         *       LOCK rq->lock    -----'
2001         *       UNLOCK rq->lock
2002         *
2003         * [task p]
2004         *   [S] p->state = UNINTERRUPTIBLE     [L] p->on_rq
2005         *
2006         * Pairs with the UNLOCK+LOCK on rq->lock from the
2007         * last wakeup of our task and the schedule that got our task
2008         * current.
2009         */
2010        smp_rmb();
2011        if (p->on_rq && ttwu_remote(p, wake_flags))
2012                goto stat;
2013
2014#ifdef CONFIG_SMP
2015        /*
2016         * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
2017         * possible to, falsely, observe p->on_cpu == 0.
2018         *
2019         * One must be running (->on_cpu == 1) in order to remove oneself
2020         * from the runqueue.
2021         *
2022         *  [S] ->on_cpu = 1;   [L] ->on_rq
2023         *      UNLOCK rq->lock
2024         *                      RMB
2025         *      LOCK   rq->lock
2026         *  [S] ->on_rq = 0;    [L] ->on_cpu
2027         *
2028         * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
2029         * from the consecutive calls to schedule(); the first switching to our
2030         * task, the second putting it to sleep.
2031         */
2032        smp_rmb();
2033
2034        /*
2035         * If the owning (remote) CPU is still in the middle of schedule() with
2036         * this task as prev, wait until its done referencing the task.
2037         *
2038         * Pairs with the smp_store_release() in finish_lock_switch().
2039         *
2040         * This ensures that tasks getting woken will be fully ordered against
2041         * their previous state and preserve Program Order.
2042         */
2043        smp_cond_load_acquire(&p->on_cpu, !VAL);
2044
2045        p->sched_contributes_to_load = !!task_contributes_to_load(p);
2046        p->state = TASK_WAKING;
2047
2048        if (p->in_iowait) {
2049                delayacct_blkio_end();
2050                atomic_dec(&task_rq(p)->nr_iowait);
2051        }
2052
2053        cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
2054        if (task_cpu(p) != cpu) {
2055                wake_flags |= WF_MIGRATED;
2056                set_task_cpu(p, cpu);
2057        }
2058
2059#else /* CONFIG_SMP */
2060
2061        if (p->in_iowait) {
2062                delayacct_blkio_end();
2063                atomic_dec(&task_rq(p)->nr_iowait);
2064        }
2065
2066#endif /* CONFIG_SMP */
2067
2068        ttwu_queue(p, cpu, wake_flags);
2069stat:
2070        ttwu_stat(p, cpu, wake_flags);
2071out:
2072        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2073
2074        return success;
2075}
2076
2077/**
2078 * try_to_wake_up_local - try to wake up a local task with rq lock held
2079 * @p: the thread to be awakened
2080 * @cookie: context's cookie for pinning
2081 *
2082 * Put @p on the run-queue if it's not already there. The caller must
2083 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2084 * the current task.
2085 */
2086static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
2087{
2088        struct rq *rq = task_rq(p);
2089
2090        if (WARN_ON_ONCE(rq != this_rq()) ||
2091            WARN_ON_ONCE(p == current))
2092                return;
2093
2094        lockdep_assert_held(&rq->lock);
2095
2096        if (!raw_spin_trylock(&p->pi_lock)) {
2097                /*
2098                 * This is OK, because current is on_cpu, which avoids it being
2099                 * picked for load-balance and preemption/IRQs are still
2100                 * disabled avoiding further scheduler activity on it and we've
2101                 * not yet picked a replacement task.
2102                 */
2103                rq_unlock(rq, rf);
2104                raw_spin_lock(&p->pi_lock);
2105                rq_relock(rq, rf);
2106        }
2107
2108        if (!(p->state & TASK_NORMAL))
2109                goto out;
2110
2111        trace_sched_waking(p);
2112
2113        if (!task_on_rq_queued(p)) {
2114                if (p->in_iowait) {
2115                        delayacct_blkio_end();
2116                        atomic_dec(&rq->nr_iowait);
2117                }
2118                ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
2119        }
2120
2121        ttwu_do_wakeup(rq, p, 0, rf);
2122        ttwu_stat(p, smp_processor_id(), 0);
2123out:
2124        raw_spin_unlock(&p->pi_lock);
2125}
2126
2127/**
2128 * wake_up_process - Wake up a specific process
2129 * @p: The process to be woken up.
2130 *
2131 * Attempt to wake up the nominated process and move it to the set of runnable
2132 * processes.
2133 *
2134 * Return: 1 if the process was woken up, 0 if it was already running.
2135 *
2136 * It may be assumed that this function implies a write memory barrier before
2137 * changing the task state if and only if any tasks are woken up.
2138 */
2139int wake_up_process(struct task_struct *p)
2140{
2141        return try_to_wake_up(p, TASK_NORMAL, 0);
2142}
2143EXPORT_SYMBOL(wake_up_process);
2144
2145int wake_up_state(struct task_struct *p, unsigned int state)
2146{
2147        return try_to_wake_up(p, state, 0);
2148}
2149
2150/*
2151 * This function clears the sched_dl_entity static params.
2152 */
2153void __dl_clear_params(struct task_struct *p)
2154{
2155        struct sched_dl_entity *dl_se = &p->dl;
2156
2157        dl_se->dl_runtime = 0;
2158        dl_se->dl_deadline = 0;
2159        dl_se->dl_period = 0;
2160        dl_se->flags = 0;
2161        dl_se->dl_bw = 0;
2162
2163        dl_se->dl_throttled = 0;
2164        dl_se->dl_yielded = 0;
2165}
2166
2167/*
2168 * Perform scheduler related setup for a newly forked process p.
2169 * p is forked by current.
2170 *
2171 * __sched_fork() is basic setup used by init_idle() too:
2172 */
2173static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
2174{
2175        p->on_rq                        = 0;
2176
2177        p->se.on_rq                     = 0;
2178        p->se.exec_start                = 0;
2179        p->se.sum_exec_runtime          = 0;
2180        p->se.prev_sum_exec_runtime     = 0;
2181        p->se.nr_migrations             = 0;
2182        p->se.vruntime                  = 0;
2183        INIT_LIST_HEAD(&p->se.group_node);
2184
2185#ifdef CONFIG_FAIR_GROUP_SCHED
2186        p->se.cfs_rq                    = NULL;
2187#endif
2188
2189#ifdef CONFIG_SCHEDSTATS
2190        /* Even if schedstat is disabled, there should not be garbage */
2191        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2192#endif
2193
2194        RB_CLEAR_NODE(&p->dl.rb_node);
2195        init_dl_task_timer(&p->dl);
2196        __dl_clear_params(p);
2197
2198        INIT_LIST_HEAD(&p->rt.run_list);
2199        p->rt.timeout           = 0;
2200        p->rt.time_slice        = sched_rr_timeslice;
2201        p->rt.on_rq             = 0;
2202        p->rt.on_list           = 0;
2203
2204#ifdef CONFIG_PREEMPT_NOTIFIERS
2205        INIT_HLIST_HEAD(&p->preempt_notifiers);
2206#endif
2207
2208#ifdef CONFIG_NUMA_BALANCING
2209        if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
2210                p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2211                p->mm->numa_scan_seq = 0;
2212        }
2213
2214        if (clone_flags & CLONE_VM)
2215                p->numa_preferred_nid = current->numa_preferred_nid;
2216        else
2217                p->numa_preferred_nid = -1;
2218
2219        p->node_stamp = 0ULL;
2220        p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
2221        p->numa_scan_period = sysctl_numa_balancing_scan_delay;
2222        p->numa_work.next = &p->numa_work;
2223        p->numa_faults = NULL;
2224        p->last_task_numa_placement = 0;
2225        p->last_sum_exec_runtime = 0;
2226
2227        p->numa_group = NULL;
2228#endif /* CONFIG_NUMA_BALANCING */
2229}
2230
2231DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2232
2233#ifdef CONFIG_NUMA_BALANCING
2234
2235void set_numabalancing_state(bool enabled)
2236{
2237        if (enabled)
2238                static_branch_enable(&sched_numa_balancing);
2239        else
2240                static_branch_disable(&sched_numa_balancing);
2241}
2242
2243#ifdef CONFIG_PROC_SYSCTL
2244int sysctl_numa_balancing(struct ctl_table *table, int write,
2245                         void __user *buffer, size_t *lenp, loff_t *ppos)
2246{
2247        struct ctl_table t;
2248        int err;
2249        int state = static_branch_likely(&sched_numa_balancing);
2250
2251        if (write && !capable(CAP_SYS_ADMIN))
2252                return -EPERM;
2253
2254        t = *table;
2255        t.data = &state;
2256        err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2257        if (err < 0)
2258                return err;
2259        if (write)
2260                set_numabalancing_state(state);
2261        return err;
2262}
2263#endif
2264#endif
2265
2266#ifdef CONFIG_SCHEDSTATS
2267
2268DEFINE_STATIC_KEY_FALSE(sched_schedstats);
2269static bool __initdata __sched_schedstats = false;
2270
2271static void set_schedstats(bool enabled)
2272{
2273        if (enabled)
2274                static_branch_enable(&sched_schedstats);
2275        else
2276                static_branch_disable(&sched_schedstats);
2277}
2278
2279void force_schedstat_enabled(void)
2280{
2281        if (!schedstat_enabled()) {
2282                pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
2283                static_branch_enable(&sched_schedstats);
2284        }
2285}
2286
2287static int __init setup_schedstats(char *str)
2288{
2289        int ret = 0;
2290        if (!str)
2291                goto out;
2292
2293        /*
2294         * This code is called before jump labels have been set up, so we can't
2295         * change the static branch directly just yet.  Instead set a temporary
2296         * variable so init_schedstats() can do it later.
2297         */
2298        if (!strcmp(str, "enable")) {
2299                __sched_schedstats = true;
2300                ret = 1;
2301        } else if (!strcmp(str, "disable")) {
2302                __sched_schedstats = false;
2303                ret = 1;
2304        }
2305out:
2306        if (!ret)
2307                pr_warn("Unable to parse schedstats=\n");
2308
2309        return ret;
2310}
2311__setup("schedstats=", setup_schedstats);
2312
2313static void __init init_schedstats(void)
2314{
2315        set_schedstats(__sched_schedstats);
2316}
2317
2318#ifdef CONFIG_PROC_SYSCTL
2319int sysctl_schedstats(struct ctl_table *table, int write,
2320                         void __user *buffer, size_t *lenp, loff_t *ppos)
2321{
2322        struct ctl_table t;
2323        int err;
2324        int state = static_branch_likely(&sched_schedstats);
2325
2326        if (write && !capable(CAP_SYS_ADMIN))
2327                return -EPERM;
2328
2329        t = *table;
2330        t.data = &state;
2331        err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2332        if (err < 0)
2333                return err;
2334        if (write)
2335                set_schedstats(state);
2336        return err;
2337}
2338#endif /* CONFIG_PROC_SYSCTL */
2339#else  /* !CONFIG_SCHEDSTATS */
2340static inline void init_schedstats(void) {}
2341#endif /* CONFIG_SCHEDSTATS */
2342
2343/*
2344 * fork()/clone()-time setup:
2345 */
2346int sched_fork(unsigned long clone_flags, struct task_struct *p)
2347{
2348        unsigned long flags;
2349        int cpu = get_cpu();
2350
2351        __sched_fork(clone_flags, p);
2352        /*
2353         * We mark the process as NEW here. This guarantees that
2354         * nobody will actually run it, and a signal or other external
2355         * event cannot wake it up and insert it on the runqueue either.
2356         */
2357        p->state = TASK_NEW;
2358
2359        /*
2360         * Make sure we do not leak PI boosting priority to the child.
2361         */
2362        p->prio = current->normal_prio;
2363
2364        /*
2365         * Revert to default priority/policy on fork if requested.
2366         */
2367        if (unlikely(p->sched_reset_on_fork)) {
2368                if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
2369                        p->policy = SCHED_NORMAL;
2370                        p->static_prio = NICE_TO_PRIO(0);
2371                        p->rt_priority = 0;
2372                } else if (PRIO_TO_NICE(p->static_prio) < 0)
2373                        p->static_prio = NICE_TO_PRIO(0);
2374
2375                p->prio = p->normal_prio = __normal_prio(p);
2376                set_load_weight(p);
2377
2378                /*
2379                 * We don't need the reset flag anymore after the fork. It has
2380                 * fulfilled its duty:
2381                 */
2382                p->sched_reset_on_fork = 0;
2383        }
2384
2385        if (dl_prio(p->prio)) {
2386                put_cpu();
2387                return -EAGAIN;
2388        } else if (rt_prio(p->prio)) {
2389                p->sched_class = &rt_sched_class;
2390        } else {
2391                p->sched_class = &fair_sched_class;
2392        }
2393
2394        init_entity_runnable_average(&p->se);
2395
2396        /*
2397         * The child is not yet in the pid-hash so no cgroup attach races,
2398         * and the cgroup is pinned to this child due to cgroup_fork()
2399         * is ran before sched_fork().
2400         *
2401         * Silence PROVE_RCU.
2402         */
2403        raw_spin_lock_irqsave(&p->pi_lock, flags);
2404        /*
2405         * We're setting the CPU for the first time, we don't migrate,
2406         * so use __set_task_cpu().
2407         */
2408        __set_task_cpu(p, cpu);
2409        if (p->sched_class->task_fork)
2410                p->sched_class->task_fork(p);
2411        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2412
2413#ifdef CONFIG_SCHED_INFO
2414        if (likely(sched_info_on()))
2415                memset(&p->sched_info, 0, sizeof(p->sched_info));
2416#endif
2417#if defined(CONFIG_SMP)
2418        p->on_cpu = 0;
2419#endif
2420        init_task_preempt_count(p);
2421#ifdef CONFIG_SMP
2422        plist_node_init(&p->pushable_tasks, MAX_PRIO);
2423        RB_CLEAR_NODE(&p->pushable_dl_tasks);
2424#endif
2425
2426        put_cpu();
2427        return 0;
2428}
2429
2430unsigned long to_ratio(u64 period, u64 runtime)
2431{
2432        if (runtime == RUNTIME_INF)
2433                return 1ULL << 20;
2434
2435        /*
2436         * Doing this here saves a lot of checks in all
2437         * the calling paths, and returning zero seems
2438         * safe for them anyway.
2439         */
2440        if (period == 0)
2441                return 0;
2442
2443        return div64_u64(runtime << 20, period);
2444}
2445
2446#ifdef CONFIG_SMP
2447inline struct dl_bw *dl_bw_of(int i)
2448{
2449        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2450                         "sched RCU must be held");
2451        return &cpu_rq(i)->rd->dl_bw;
2452}
2453
2454static inline int dl_bw_cpus(int i)
2455{
2456        struct root_domain *rd = cpu_rq(i)->rd;
2457        int cpus = 0;
2458
2459        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2460                         "sched RCU must be held");
2461        for_each_cpu_and(i, rd->span, cpu_active_mask)
2462                cpus++;
2463
2464        return cpus;
2465}
2466#else
2467inline struct dl_bw *dl_bw_of(int i)
2468{
2469        return &cpu_rq(i)->dl.dl_bw;
2470}
2471
2472static inline int dl_bw_cpus(int i)
2473{
2474        return 1;
2475}
2476#endif
2477
2478/*
2479 * We must be sure that accepting a new task (or allowing changing the
2480 * parameters of an existing one) is consistent with the bandwidth
2481 * constraints. If yes, this function also accordingly updates the currently
2482 * allocated bandwidth to reflect the new situation.
2483 *
2484 * This function is called while holding p's rq->lock.
2485 *
2486 * XXX we should delay bw change until the task's 0-lag point, see
2487 * __setparam_dl().
2488 */
2489static int dl_overflow(struct task_struct *p, int policy,
2490                       const struct sched_attr *attr)
2491{
2492
2493        struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2494        u64 period = attr->sched_period ?: attr->sched_deadline;
2495        u64 runtime = attr->sched_runtime;
2496        u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2497        int cpus, err = -1;
2498
2499        /* !deadline task may carry old deadline bandwidth */
2500        if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2501                return 0;
2502
2503        /*
2504         * Either if a task, enters, leave, or stays -deadline but changes
2505         * its parameters, we may need to update accordingly the total
2506         * allocated bandwidth of the container.
2507         */
2508        raw_spin_lock(&dl_b->lock);
2509        cpus = dl_bw_cpus(task_cpu(p));
2510        if (dl_policy(policy) && !task_has_dl_policy(p) &&
2511            !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2512                __dl_add(dl_b, new_bw);
2513                err = 0;
2514        } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2515                   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2516                __dl_clear(dl_b, p->dl.dl_bw);
2517                __dl_add(dl_b, new_bw);
2518                err = 0;
2519        } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2520                __dl_clear(dl_b, p->dl.dl_bw);
2521                err = 0;
2522        }
2523        raw_spin_unlock(&dl_b->lock);
2524
2525        return err;
2526}
2527
2528extern void init_dl_bw(struct dl_bw *dl_b);
2529
2530/*
2531 * wake_up_new_task - wake up a newly created task for the first time.
2532 *
2533 * This function will do some initial scheduler statistics housekeeping
2534 * that must be done for every newly created context, then puts the task
2535 * on the runqueue and wakes it.
2536 */
2537void wake_up_new_task(struct task_struct *p)
2538{
2539        struct rq_flags rf;
2540        struct rq *rq;
2541
2542        raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2543        p->state = TASK_RUNNING;
2544#ifdef CONFIG_SMP
2545        /*
2546         * Fork balancing, do it here and not earlier because:
2547         *  - cpus_allowed can change in the fork path
2548         *  - any previously selected CPU might disappear through hotplug
2549         *
2550         * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
2551         * as we're not fully set-up yet.
2552         */
2553        __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2554#endif
2555        rq = __task_rq_lock(p, &rf);
2556        update_rq_clock(rq);
2557        post_init_entity_util_avg(&p->se);
2558
2559        activate_task(rq, p, ENQUEUE_NOCLOCK);
2560        p->on_rq = TASK_ON_RQ_QUEUED;
2561        trace_sched_wakeup_new(p);
2562        check_preempt_curr(rq, p, WF_FORK);
2563#ifdef CONFIG_SMP
2564        if (p->sched_class->task_woken) {
2565                /*
2566                 * Nothing relies on rq->lock after this, so its fine to
2567                 * drop it.
2568                 */
2569                rq_unpin_lock(rq, &rf);
2570                p->sched_class->task_woken(rq, p);
2571                rq_repin_lock(rq, &rf);
2572        }
2573#endif
2574        task_rq_unlock(rq, p, &rf);
2575}
2576
2577#ifdef CONFIG_PREEMPT_NOTIFIERS
2578
2579static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
2580
2581void preempt_notifier_inc(void)
2582{
2583        static_key_slow_inc(&preempt_notifier_key);
2584}
2585EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2586
2587void preempt_notifier_dec(void)
2588{
2589        static_key_slow_dec(&preempt_notifier_key);
2590}
2591EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2592
2593/**
2594 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2595 * @notifier: notifier struct to register
2596 */
2597void preempt_notifier_register(struct preempt_notifier *notifier)
2598{
2599        if (!static_key_false(&preempt_notifier_key))
2600                WARN(1, "registering preempt_notifier while notifiers disabled\n");
2601
2602        hlist_add_head(&notifier->link, &current->preempt_notifiers);
2603}
2604EXPORT_SYMBOL_GPL(preempt_notifier_register);
2605
2606/**
2607 * preempt_notifier_unregister - no longer interested in preemption notifications
2608 * @notifier: notifier struct to unregister
2609 *
2610 * This is *not* safe to call from within a preemption notifier.
2611 */
2612void preempt_notifier_unregister(struct preempt_notifier *notifier)
2613{
2614        hlist_del(&notifier->link);
2615}
2616EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2617
2618static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
2619{
2620        struct preempt_notifier *notifier;
2621
2622        hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2623                notifier->ops->sched_in(notifier, raw_smp_processor_id());
2624}
2625
2626static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2627{
2628        if (static_key_false(&preempt_notifier_key))
2629                __fire_sched_in_preempt_notifiers(curr);
2630}
2631
2632static void
2633__fire_sched_out_preempt_notifiers(struct task_struct *curr,
2634                                   struct task_struct *next)
2635{
2636        struct preempt_notifier *notifier;
2637
2638        hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2639                notifier->ops->sched_out(notifier, next);
2640}
2641
2642static __always_inline void
2643fire_sched_out_preempt_notifiers(struct task_struct *curr,
2644                                 struct task_struct *next)
2645{
2646        if (static_key_false(&preempt_notifier_key))
2647                __fire_sched_out_preempt_notifiers(curr, next);
2648}
2649
2650#else /* !CONFIG_PREEMPT_NOTIFIERS */
2651
2652static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2653{
2654}
2655
2656static inline void
2657fire_sched_out_preempt_notifiers(struct task_struct *curr,
2658                                 struct task_struct *next)
2659{
2660}
2661
2662#endif /* CONFIG_PREEMPT_NOTIFIERS */
2663
2664/**
2665 * prepare_task_switch - prepare to switch tasks
2666 * @rq: the runqueue preparing to switch
2667 * @prev: the current task that is being switched out
2668 * @next: the task we are going to switch to.
2669 *
2670 * This is called with the rq lock held and interrupts off. It must
2671 * be paired with a subsequent finish_task_switch after the context
2672 * switch.
2673 *
2674 * prepare_task_switch sets up locking and calls architecture specific
2675 * hooks.
2676 */
2677static inline void
2678prepare_task_switch(struct rq *rq, struct task_struct *prev,
2679                    struct task_struct *next)
2680{
2681        sched_info_switch(rq, prev, next);
2682        perf_event_task_sched_out(prev, next);
2683        fire_sched_out_preempt_notifiers(prev, next);
2684        prepare_lock_switch(rq, next);
2685        prepare_arch_switch(next);
2686}
2687
2688/**
2689 * finish_task_switch - clean up after a task-switch
2690 * @prev: the thread we just switched away from.
2691 *
2692 * finish_task_switch must be called after the context switch, paired
2693 * with a prepare_task_switch call before the context switch.
2694 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2695 * and do any other architecture-specific cleanup actions.
2696 *
2697 * Note that we may have delayed dropping an mm in context_switch(). If
2698 * so, we finish that here outside of the runqueue lock. (Doing it
2699 * with the lock held can cause deadlocks; see schedule() for
2700 * details.)
2701 *
2702 * The context switch have flipped the stack from under us and restored the
2703 * local variables which were saved when this task called schedule() in the
2704 * past. prev == current is still correct but we need to recalculate this_rq
2705 * because prev may have moved to another CPU.
2706 */
2707static struct rq *finish_task_switch(struct task_struct *prev)
2708        __releases(rq->lock)
2709{
2710        struct rq *rq = this_rq();
2711        struct mm_struct *mm = rq->prev_mm;
2712        long prev_state;
2713
2714        /*
2715         * The previous task will have left us with a preempt_count of 2
2716         * because it left us after:
2717         *
2718         *      schedule()
2719         *        preempt_disable();                    // 1
2720         *        __schedule()
2721         *          raw_spin_lock_irq(&rq->lock)        // 2
2722         *
2723         * Also, see FORK_PREEMPT_COUNT.
2724         */
2725        if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
2726                      "corrupted preempt_count: %s/%d/0x%x\n",
2727                      current->comm, current->pid, preempt_count()))
2728                preempt_count_set(FORK_PREEMPT_COUNT);
2729
2730        rq->prev_mm = NULL;
2731
2732        /*
2733         * A task struct has one reference for the use as "current".
2734         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2735         * schedule one last time. The schedule call will never return, and
2736         * the scheduled task must drop that reference.
2737         *
2738         * We must observe prev->state before clearing prev->on_cpu (in
2739         * finish_lock_switch), otherwise a concurrent wakeup can get prev
2740         * running on another CPU and we could rave with its RUNNING -> DEAD
2741         * transition, resulting in a double drop.
2742         */
2743        prev_state = prev->state;
2744        vtime_task_switch(prev);
2745        perf_event_task_sched_in(prev, current);
2746        finish_lock_switch(rq, prev);
2747        finish_arch_post_lock_switch();
2748
2749        fire_sched_in_preempt_notifiers(current);
2750        if (mm)
2751                mmdrop(mm);
2752        if (unlikely(prev_state == TASK_DEAD)) {
2753                if (prev->sched_class->task_dead)
2754                        prev->sched_class->task_dead(prev);
2755
2756                /*
2757                 * Remove function-return probe instances associated with this
2758                 * task and put them back on the free list.
2759                 */
2760                kprobe_flush_task(prev);
2761
2762                /* Task is done with its stack. */
2763                put_task_stack(prev);
2764
2765                put_task_struct(prev);
2766        }
2767
2768        tick_nohz_task_switch();
2769        return rq;
2770}
2771
2772#ifdef CONFIG_SMP
2773
2774/* rq->lock is NOT held, but preemption is disabled */
2775static void __balance_callback(struct rq *rq)
2776{
2777        struct callback_head *head, *next;
2778        void (*func)(struct rq *rq);
2779        unsigned long flags;
2780
2781        raw_spin_lock_irqsave(&rq->lock, flags);
2782        head = rq->balance_callback;
2783        rq->balance_callback = NULL;
2784        while (head) {
2785                func = (void (*)(struct rq *))head->func;
2786                next = head->next;
2787                head->next = NULL;
2788                head = next;
2789
2790                func(rq);
2791        }
2792        raw_spin_unlock_irqrestore(&rq->lock, flags);
2793}
2794
2795static inline void balance_callback(struct rq *rq)
2796{
2797        if (unlikely(rq->balance_callback))
2798                __balance_callback(rq);
2799}
2800
2801#else
2802
2803static inline void balance_callback(struct rq *rq)
2804{
2805}
2806
2807#endif
2808
2809/**
2810 * schedule_tail - first thing a freshly forked thread must call.
2811 * @prev: the thread we just switched away from.
2812 */
2813asmlinkage __visible void schedule_tail(struct task_struct *prev)
2814        __releases(rq->lock)
2815{
2816        struct rq *rq;
2817
2818        /*
2819         * New tasks start with FORK_PREEMPT_COUNT, see there and
2820         * finish_task_switch() for details.
2821         *
2822         * finish_task_switch() will drop rq->lock() and lower preempt_count
2823         * and the preempt_enable() will end up enabling preemption (on
2824         * PREEMPT_COUNT kernels).
2825         */
2826
2827        rq = finish_task_switch(prev);
2828        balance_callback(rq);
2829        preempt_enable();
2830
2831        if (current->set_child_tid)
2832                put_user(task_pid_vnr(current), current->set_child_tid);
2833}
2834
2835/*
2836 * context_switch - switch to the new MM and the new thread's register state.
2837 */
2838static __always_inline struct rq *
2839context_switch(struct rq *rq, struct task_struct *prev,
2840               struct task_struct *next, struct rq_flags *rf)
2841{
2842        struct mm_struct *mm, *oldmm;
2843
2844        prepare_task_switch(rq, prev, next);
2845
2846        mm = next->mm;
2847        oldmm = prev->active_mm;
2848        /*
2849         * For paravirt, this is coupled with an exit in switch_to to
2850         * combine the page table reload and the switch backend into
2851         * one hypercall.
2852         */
2853        arch_start_context_switch(prev);
2854
2855        if (!mm) {
2856                next->active_mm = oldmm;
2857                mmgrab(oldmm);
2858                enter_lazy_tlb(oldmm, next);
2859        } else
2860                switch_mm_irqs_off(oldmm, mm, next);
2861
2862        if (!prev->mm) {
2863                prev->active_mm = NULL;
2864                rq->prev_mm = oldmm;
2865        }
2866
2867        rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
2868
2869        /*
2870         * Since the runqueue lock will be released by the next
2871         * task (which is an invalid locking op but in the case
2872         * of the scheduler it's an obvious special-case), so we
2873         * do an early lockdep release here:
2874         */
2875        rq_unpin_lock(rq, rf);
2876        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2877
2878        /* Here we just switch the register state and the stack. */
2879        switch_to(prev, next, prev);
2880        barrier();
2881
2882        return finish_task_switch(prev);
2883}
2884
2885/*
2886 * nr_running and nr_context_switches:
2887 *
2888 * externally visible scheduler statistics: current number of runnable
2889 * threads, total number of context switches performed since bootup.
2890 */
2891unsigned long nr_running(void)
2892{
2893        unsigned long i, sum = 0;
2894
2895        for_each_online_cpu(i)
2896                sum += cpu_rq(i)->nr_running;
2897
2898        return sum;
2899}
2900
2901/*
2902 * Check if only the current task is running on the CPU.
2903 *
2904 * Caution: this function does not check that the caller has disabled
2905 * preemption, thus the result might have a time-of-check-to-time-of-use
2906 * race.  The caller is responsible to use it correctly, for example:
2907 *
2908 * - from a non-preemptable section (of course)
2909 *
2910 * - from a thread that is bound to a single CPU
2911 *
2912 * - in a loop with very short iterations (e.g. a polling loop)
2913 */
2914bool single_task_running(void)
2915{
2916        return raw_rq()->nr_running == 1;
2917}
2918EXPORT_SYMBOL(single_task_running);
2919
2920unsigned long long nr_context_switches(void)
2921{
2922        int i;
2923        unsigned long long sum = 0;
2924
2925        for_each_possible_cpu(i)
2926                sum += cpu_rq(i)->nr_switches;
2927
2928        return sum;
2929}
2930
2931/*
2932 * IO-wait accounting, and how its mostly bollocks (on SMP).
2933 *
2934 * The idea behind IO-wait account is to account the idle time that we could
2935 * have spend running if it were not for IO. That is, if we were to improve the
2936 * storage performance, we'd have a proportional reduction in IO-wait time.
2937 *
2938 * This all works nicely on UP, where, when a task blocks on IO, we account
2939 * idle time as IO-wait, because if the storage were faster, it could've been
2940 * running and we'd not be idle.
2941 *
2942 * This has been extended to SMP, by doing the same for each CPU. This however
2943 * is broken.
2944 *
2945 * Imagine for instance the case where two tasks block on one CPU, only the one
2946 * CPU will have IO-wait accounted, while the other has regular idle. Even
2947 * though, if the storage were faster, both could've ran at the same time,
2948 * utilising both CPUs.
2949 *
2950 * This means, that when looking globally, the current IO-wait accounting on
2951 * SMP is a lower bound, by reason of under accounting.
2952 *
2953 * Worse, since the numbers are provided per CPU, they are sometimes
2954 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
2955 * associated with any one particular CPU, it can wake to another CPU than it
2956 * blocked on. This means the per CPU IO-wait number is meaningless.
2957 *
2958 * Task CPU affinities can make all that even more 'interesting'.
2959 */
2960
2961unsigned long nr_iowait(void)
2962{
2963        unsigned long i, sum = 0;
2964
2965        for_each_possible_cpu(i)
2966                sum += atomic_read(&cpu_rq(i)->nr_iowait);
2967
2968        return sum;
2969}
2970
2971/*
2972 * Consumers of these two interfaces, like for example the cpufreq menu
2973 * governor are using nonsensical data. Boosting frequency for a CPU that has
2974 * IO-wait which might not even end up running the task when it does become
2975 * runnable.
2976 */
2977
2978unsigned long nr_iowait_cpu(int cpu)
2979{
2980        struct rq *this = cpu_rq(cpu);
2981        return atomic_read(&this->nr_iowait);
2982}
2983
2984void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2985{
2986        struct rq *rq = this_rq();
2987        *nr_waiters = atomic_read(&rq->nr_iowait);
2988        *load = rq->load.weight;
2989}
2990
2991#ifdef CONFIG_SMP
2992
2993/*
2994 * sched_exec - execve() is a valuable balancing opportunity, because at
2995 * this point the task has the smallest effective memory and cache footprint.
2996 */
2997void sched_exec(void)
2998{
2999        struct task_struct *p = current;
3000        unsigned long flags;
3001        int dest_cpu;
3002
3003        raw_spin_lock_irqsave(&p->pi_lock, flags);
3004        dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
3005        if (dest_cpu == smp_processor_id())
3006                goto unlock;
3007
3008        if (likely(cpu_active(dest_cpu))) {
3009                struct migration_arg arg = { p, dest_cpu };
3010
3011                raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3012                stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
3013                return;
3014        }
3015unlock:
3016        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3017}
3018
3019#endif
3020
3021DEFINE_PER_CPU(struct kernel_stat, kstat);
3022DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
3023
3024EXPORT_PER_CPU_SYMBOL(kstat);
3025EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
3026
3027/*
3028 * The function fair_sched_class.update_curr accesses the struct curr
3029 * and its field curr->exec_start; when called from task_sched_runtime(),
3030 * we observe a high rate of cache misses in practice.
3031 * Prefetching this data results in improved performance.
3032 */
3033static inline void prefetch_curr_exec_start(struct task_struct *p)
3034{
3035#ifdef CONFIG_FAIR_GROUP_SCHED
3036        struct sched_entity *curr = (&p->se)->cfs_rq->curr;
3037#else
3038        struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
3039#endif
3040        prefetch(curr);
3041        prefetch(&curr->exec_start);
3042}
3043
3044/*
3045 * Return accounted runtime for the task.
3046 * In case the task is currently running, return the runtime plus current's
3047 * pending runtime that have not been accounted yet.
3048 */
3049unsigned long long task_sched_runtime(struct task_struct *p)
3050{
3051        struct rq_flags rf;
3052        struct rq *rq;
3053        u64 ns;
3054
3055#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
3056        /*
3057         * 64-bit doesn't need locks to atomically read a 64bit value.
3058         * So we have a optimization chance when the task's delta_exec is 0.
3059         * Reading ->on_cpu is racy, but this is ok.
3060         *
3061         * If we race with it leaving CPU, we'll take a lock. So we're correct.
3062         * If we race with it entering CPU, unaccounted time is 0. This is
3063         * indistinguishable from the read occurring a few cycles earlier.
3064         * If we see ->on_cpu without ->on_rq, the task is leaving, and has
3065         * been accounted, so we're correct here as well.
3066         */
3067        if (!p->on_cpu || !task_on_rq_queued(p))
3068                return p->se.sum_exec_runtime;
3069#endif
3070
3071        rq = task_rq_lock(p, &rf);
3072        /*
3073         * Must be ->curr _and_ ->on_rq.  If dequeued, we would
3074         * project cycles that may never be accounted to this
3075         * thread, breaking clock_gettime().
3076         */
3077        if (task_current(rq, p) && task_on_rq_queued(p)) {
3078                prefetch_curr_exec_start(p);
3079                update_rq_clock(rq);
3080                p->sched_class->update_curr(rq);
3081        }
3082        ns = p->se.sum_exec_runtime;
3083        task_rq_unlock(rq, p, &rf);
3084
3085        return ns;
3086}
3087
3088/*
3089 * This function gets called by the timer code, with HZ frequency.
3090 * We call it with interrupts disabled.
3091 */
3092void scheduler_tick(void)
3093{
3094        int cpu = smp_processor_id();
3095        struct rq *rq = cpu_rq(cpu);
3096        struct task_struct *curr = rq->curr;
3097        struct rq_flags rf;
3098
3099        sched_clock_tick();
3100
3101        rq_lock(rq, &rf);
3102
3103        update_rq_clock(rq);
3104        curr->sched_class->task_tick(rq, curr, 0);
3105        cpu_load_update_active(rq);
3106        calc_global_load_tick(rq);
3107
3108        rq_unlock(rq, &rf);
3109
3110        perf_event_task_tick();
3111
3112#ifdef CONFIG_SMP
3113        rq->idle_balance = idle_cpu(cpu);
3114        trigger_load_balance(rq);
3115#endif
3116        rq_last_tick_reset(rq);
3117}
3118
3119#ifdef CONFIG_NO_HZ_FULL
3120/**
3121 * scheduler_tick_max_deferment
3122 *
3123 * Keep at least one tick per second when a single
3124 * active task is running because the scheduler doesn't
3125 * yet completely support full dynticks environment.
3126 *
3127 * This makes sure that uptime, CFS vruntime, load
3128 * balancing, etc... continue to move forward, even
3129 * with a very low granularity.
3130 *
3131 * Return: Maximum deferment in nanoseconds.
3132 */
3133u64 scheduler_tick_max_deferment(void)
3134{
3135        struct rq *rq = this_rq();
3136        unsigned long next, now = READ_ONCE(jiffies);
3137
3138        next = rq->last_sched_tick + HZ;
3139
3140        if (time_before_eq(next, now))
3141                return 0;
3142
3143        return jiffies_to_nsecs(next - now);
3144}
3145#endif
3146
3147#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3148                                defined(CONFIG_PREEMPT_TRACER))
3149/*
3150 * If the value passed in is equal to the current preempt count
3151 * then we just disabled preemption. Start timing the latency.
3152 */
3153static inline void preempt_latency_start(int val)
3154{
3155        if (preempt_count() == val) {
3156                unsigned long ip = get_lock_parent_ip();
3157#ifdef CONFIG_DEBUG_PREEMPT
3158                current->preempt_disable_ip = ip;
3159#endif
3160                trace_preempt_off(CALLER_ADDR0, ip);
3161        }
3162}
3163
3164void preempt_count_add(int val)
3165{
3166#ifdef CONFIG_DEBUG_PREEMPT
3167        /*
3168         * Underflow?
3169         */
3170        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3171                return;
3172#endif
3173        __preempt_count_add(val);
3174#ifdef CONFIG_DEBUG_PREEMPT
3175        /*
3176         * Spinlock count overflowing soon?
3177         */
3178        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3179                                PREEMPT_MASK - 10);
3180#endif
3181        preempt_latency_start(val);
3182}
3183EXPORT_SYMBOL(preempt_count_add);
3184NOKPROBE_SYMBOL(preempt_count_add);
3185
3186/*
3187 * If the value passed in equals to the current preempt count
3188 * then we just enabled preemption. Stop timing the latency.
3189 */
3190static inline void preempt_latency_stop(int val)
3191{
3192        if (preempt_count() == val)
3193                trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
3194}
3195
3196void preempt_count_sub(int val)
3197{
3198#ifdef CONFIG_DEBUG_PREEMPT
3199        /*
3200         * Underflow?
3201         */
3202        if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3203                return;
3204        /*
3205         * Is the spinlock portion underflowing?
3206         */
3207        if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3208                        !(preempt_count() & PREEMPT_MASK)))
3209                return;
3210#endif
3211
3212        preempt_latency_stop(val);
3213        __preempt_count_sub(val);
3214}
3215EXPORT_SYMBOL(preempt_count_sub);
3216NOKPROBE_SYMBOL(preempt_count_sub);
3217
3218#else
3219static inline void preempt_latency_start(int val) { }
3220static inline void preempt_latency_stop(int val) { }
3221#endif
3222
3223static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
3224{
3225#ifdef CONFIG_DEBUG_PREEMPT
3226        return p->preempt_disable_ip;
3227#else
3228        return 0;
3229#endif
3230}
3231
3232/*
3233 * Print scheduling while atomic bug:
3234 */
3235static noinline void __schedule_bug(struct task_struct *prev)
3236{
3237        /* Save this before calling printk(), since that will clobber it */
3238        unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
3239
3240        if (oops_in_progress)
3241                return;
3242
3243        printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3244                prev->comm, prev->pid, preempt_count());
3245
3246        debug_show_held_locks(prev);
3247        print_modules();
3248        if (irqs_disabled())
3249                print_irqtrace_events(prev);
3250        if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
3251            && in_atomic_preempt_off()) {
3252                pr_err("Preemption disabled at:");
3253                print_ip_sym(preempt_disable_ip);
3254                pr_cont("\n");
3255        }
3256        if (panic_on_warn)
3257                panic("scheduling while atomic\n");
3258
3259        dump_stack();
3260        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
3261}
3262
3263/*
3264 * Various schedule()-time debugging checks and statistics:
3265 */
3266static inline void schedule_debug(struct task_struct *prev)
3267{
3268#ifdef CONFIG_SCHED_STACK_END_CHECK
3269        if (task_stack_end_corrupted(prev))
3270                panic("corrupted stack end detected inside scheduler\n");
3271#endif
3272
3273        if (unlikely(in_atomic_preempt_off())) {
3274                __schedule_bug(prev);
3275                preempt_count_set(PREEMPT_DISABLED);
3276        }
3277        rcu_sleep_check();
3278
3279        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3280
3281        schedstat_inc(this_rq()->sched_count);
3282}
3283
3284/*
3285 * Pick up the highest-prio task:
3286 */
3287static inline struct task_struct *
3288pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3289{
3290        const struct sched_class *class;
3291        struct task_struct *p;
3292
3293        /*
3294         * Optimization: we know that if all tasks are in the fair class we can
3295         * call that function directly, but only if the @prev task wasn't of a
3296         * higher scheduling class, because otherwise those loose the
3297         * opportunity to pull in more work from other CPUs.
3298         */
3299        if (likely((prev->sched_class == &idle_sched_class ||
3300                    prev->sched_class == &fair_sched_class) &&
3301                   rq->nr_running == rq->cfs.h_nr_running)) {
3302
3303                p = fair_sched_class.pick_next_task(rq, prev, rf);
3304                if (unlikely(p == RETRY_TASK))
3305                        goto again;
3306
3307                /* Assumes fair_sched_class->next == idle_sched_class */
3308                if (unlikely(!p))
3309                        p = idle_sched_class.pick_next_task(rq, prev, rf);
3310
3311                return p;
3312        }
3313
3314again:
3315        for_each_class(class) {
3316                p = class->pick_next_task(rq, prev, rf);
3317                if (p) {
3318                        if (unlikely(p == RETRY_TASK))
3319                                goto again;
3320                        return p;
3321                }
3322        }
3323
3324        /* The idle class should always have a runnable task: */
3325        BUG();
3326}
3327
3328/*
3329 * __schedule() is the main scheduler function.
3330 *
3331 * The main means of driving the scheduler and thus entering this function are:
3332 *
3333 *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3334 *
3335 *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3336 *      paths. For example, see arch/x86/entry_64.S.
3337 *
3338 *      To drive preemption between tasks, the scheduler sets the flag in timer
3339 *      interrupt handler scheduler_tick().
3340 *
3341 *   3. Wakeups don't really cause entry into schedule(). They add a
3342 *      task to the run-queue and that's it.
3343 *
3344 *      Now, if the new task added to the run-queue preempts the current
3345 *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3346 *      called on the nearest possible occasion:
3347 *
3348 *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
3349 *
3350 *         - in syscall or exception context, at the next outmost
3351 *           preempt_enable(). (this might be as soon as the wake_up()'s
3352 *           spin_unlock()!)
3353 *
3354 *         - in IRQ context, return from interrupt-handler to
3355 *           preemptible context
3356 *
3357 *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3358 *         then at the next:
3359 *
3360 *          - cond_resched() call
3361 *          - explicit schedule() call
3362 *          - return from syscall or exception to user-space
3363 *          - return from interrupt-handler to user-space
3364 *
3365 * WARNING: must be called with preemption disabled!
3366 */
3367static void __sched notrace __schedule(bool preempt)
3368{
3369        struct task_struct *prev, *next;
3370        unsigned long *switch_count;
3371        struct rq_flags rf;
3372        struct rq *rq;
3373        int cpu;
3374
3375        cpu = smp_processor_id();
3376        rq = cpu_rq(cpu);
3377        prev = rq->curr;
3378
3379        schedule_debug(prev);
3380
3381        if (sched_feat(HRTICK))
3382                hrtick_clear(rq);
3383
3384        local_irq_disable();
3385        rcu_note_context_switch(preempt);
3386
3387        /*
3388         * Make sure that signal_pending_state()->signal_pending() below
3389         * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3390         * done by the caller to avoid the race with signal_wake_up().
3391         */
3392        smp_mb__before_spinlock();
3393        rq_lock(rq, &rf);
3394
3395        /* Promote REQ to ACT */
3396        rq->clock_update_flags <<= 1;
3397        update_rq_clock(rq);
3398
3399        switch_count = &prev->nivcsw;
3400        if (!preempt && prev->state) {
3401                if (unlikely(signal_pending_state(prev->state, prev))) {
3402                        prev->state = TASK_RUNNING;
3403                } else {
3404                        deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
3405                        prev->on_rq = 0;
3406
3407                        if (prev->in_iowait) {
3408                                atomic_inc(&rq->nr_iowait);
3409                                delayacct_blkio_start();
3410                        }
3411
3412                        /*
3413                         * If a worker went to sleep, notify and ask workqueue
3414                         * whether it wants to wake up a task to maintain
3415                         * concurrency.
3416                         */
3417                        if (prev->flags & PF_WQ_WORKER) {
3418                                struct task_struct *to_wakeup;
3419
3420                                to_wakeup = wq_worker_sleeping(prev);
3421                                if (to_wakeup)
3422                                        try_to_wake_up_local(to_wakeup, &rf);
3423                        }
3424                }
3425                switch_count = &prev->nvcsw;
3426        }
3427
3428        next = pick_next_task(rq, prev, &rf);
3429        clear_tsk_need_resched(prev);
3430        clear_preempt_need_resched();
3431
3432        if (likely(prev != next)) {
3433                rq->nr_switches++;
3434                rq->curr = next;
3435                ++*switch_count;
3436
3437                trace_sched_switch(preempt, prev, next);
3438
3439                /* Also unlocks the rq: */
3440                rq = context_switch(rq, prev, next, &rf);
3441        } else {
3442                rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
3443                rq_unlock_irq(rq, &rf);
3444        }
3445
3446        balance_callback(rq);
3447}
3448
3449void __noreturn do_task_dead(void)
3450{
3451        /*
3452         * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
3453         * when the following two conditions become true.
3454         *   - There is race condition of mmap_sem (It is acquired by
3455         *     exit_mm()), and
3456         *   - SMI occurs before setting TASK_RUNINNG.
3457         *     (or hypervisor of virtual machine switches to other guest)
3458         *  As a result, we may become TASK_RUNNING after becoming TASK_DEAD
3459         *
3460         * To avoid it, we have to wait for releasing tsk->pi_lock which
3461         * is held by try_to_wake_up()
3462         */
3463        smp_mb();
3464        raw_spin_unlock_wait(&current->pi_lock);
3465
3466        /* Causes final put_task_struct in finish_task_switch(): */
3467        __set_current_state(TASK_DEAD);
3468
3469        /* Tell freezer to ignore us: */
3470        current->flags |= PF_NOFREEZE;
3471
3472        __schedule(false);
3473        BUG();
3474
3475        /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
3476        for (;;)
3477                cpu_relax();
3478}
3479
3480static inline void sched_submit_work(struct task_struct *tsk)
3481{
3482        if (!tsk->state || tsk_is_pi_blocked(tsk))
3483                return;
3484        /*
3485         * If we are going to sleep and we have plugged IO queued,
3486         * make sure to submit it to avoid deadlocks.
3487         */
3488        if (blk_needs_flush_plug(tsk))
3489                blk_schedule_flush_plug(tsk);
3490}
3491
3492asmlinkage __visible void __sched schedule(void)
3493{
3494        struct task_struct *tsk = current;
3495
3496        sched_submit_work(tsk);
3497        do {
3498                preempt_disable();
3499                __schedule(false);
3500                sched_preempt_enable_no_resched();
3501        } while (need_resched());
3502}
3503EXPORT_SYMBOL(schedule);
3504
3505/*
3506 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
3507 * state (have scheduled out non-voluntarily) by making sure that all
3508 * tasks have either left the run queue or have gone into user space.
3509 * As idle tasks do not do either, they must not ever be preempted
3510 * (schedule out non-voluntarily).
3511 *
3512 * schedule_idle() is similar to schedule_preempt_disable() except that it
3513 * never enables preemption because it does not call sched_submit_work().
3514 */
3515void __sched schedule_idle(void)
3516{
3517        /*
3518         * As this skips calling sched_submit_work(), which the idle task does
3519         * regardless because that function is a nop when the task is in a
3520         * TASK_RUNNING state, make sure this isn't used someplace that the
3521         * current task can be in any other state. Note, idle is always in the
3522         * TASK_RUNNING state.
3523         */
3524        WARN_ON_ONCE(current->state);
3525        do {
3526                __schedule(false);
3527        } while (need_resched());
3528}
3529
3530#ifdef CONFIG_CONTEXT_TRACKING
3531asmlinkage __visible void __sched schedule_user(void)
3532{
3533        /*
3534         * If we come here after a random call to set_need_resched(),
3535         * or we have been woken up remotely but the IPI has not yet arrived,
3536         * we haven't yet exited the RCU idle mode. Do it here manually until
3537         * we find a better solution.
3538         *
3539         * NB: There are buggy callers of this function.  Ideally we
3540         * should warn if prev_state != CONTEXT_USER, but that will trigger
3541         * too frequently to make sense yet.
3542         */
3543        enum ctx_state prev_state = exception_enter();
3544        schedule();
3545        exception_exit(prev_state);
3546}
3547#endif
3548
3549/**
3550 * schedule_preempt_disabled - called with preemption disabled
3551 *
3552 * Returns with preemption disabled. Note: preempt_count must be 1
3553 */
3554void __sched schedule_preempt_disabled(void)
3555{
3556        sched_preempt_enable_no_resched();
3557        schedule();
3558        preempt_disable();
3559}
3560
3561static void __sched notrace preempt_schedule_common(void)
3562{
3563        do {
3564                /*
3565                 * Because the function tracer can trace preempt_count_sub()
3566                 * and it also uses preempt_enable/disable_notrace(), if
3567                 * NEED_RESCHED is set, the preempt_enable_notrace() called
3568                 * by the function tracer will call this function again and
3569                 * cause infinite recursion.
3570                 *
3571                 * Preemption must be disabled here before the function
3572                 * tracer can trace. Break up preempt_disable() into two
3573                 * calls. One to disable preemption without fear of being
3574                 * traced. The other to still record the preemption latency,
3575                 * which can also be traced by the function tracer.
3576                 */
3577                preempt_disable_notrace();
3578                preempt_latency_start(1);
3579                __schedule(true);
3580                preempt_latency_stop(1);
3581                preempt_enable_no_resched_notrace();
3582
3583                /*
3584                 * Check again in case we missed a preemption opportunity
3585                 * between schedule and now.
3586                 */
3587        } while (need_resched());
3588}
3589
3590#ifdef CONFIG_PREEMPT
3591/*
3592 * this is the entry point to schedule() from in-kernel preemption
3593 * off of preempt_enable. Kernel preemptions off return from interrupt
3594 * occur there and call schedule directly.
3595 */
3596asmlinkage __visible void __sched notrace preempt_schedule(void)
3597{
3598        /*
3599         * If there is a non-zero preempt_count or interrupts are disabled,
3600         * we do not want to preempt the current task. Just return..
3601         */
3602        if (likely(!preemptible()))
3603                return;
3604
3605        preempt_schedule_common();
3606}
3607NOKPROBE_SYMBOL(preempt_schedule);
3608EXPORT_SYMBOL(preempt_schedule);
3609
3610/**
3611 * preempt_schedule_notrace - preempt_schedule called by tracing
3612 *
3613 * The tracing infrastructure uses preempt_enable_notrace to prevent
3614 * recursion and tracing preempt enabling caused by the tracing
3615 * infrastructure itself. But as tracing can happen in areas coming
3616 * from userspace or just about to enter userspace, a preempt enable
3617 * can occur before user_exit() is called. This will cause the scheduler
3618 * to be called when the system is still in usermode.
3619 *
3620 * To prevent this, the preempt_enable_notrace will use this function
3621 * instead of preempt_schedule() to exit user context if needed before
3622 * calling the scheduler.
3623 */
3624asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
3625{
3626        enum ctx_state prev_ctx;
3627
3628        if (likely(!preemptible()))
3629                return;
3630
3631        do {
3632                /*
3633                 * Because the function tracer can trace preempt_count_sub()
3634                 * and it also uses preempt_enable/disable_notrace(), if
3635                 * NEED_RESCHED is set, the preempt_enable_notrace() called
3636                 * by the function tracer will call this function again and
3637                 * cause infinite recursion.
3638                 *
3639                 * Preemption must be disabled here before the function
3640                 * tracer can trace. Break up preempt_disable() into two
3641                 * calls. One to disable preemption without fear of being
3642                 * traced. The other to still record the preemption latency,
3643                 * which can also be traced by the function tracer.
3644                 */
3645                preempt_disable_notrace();
3646                preempt_latency_start(1);
3647                /*
3648                 * Needs preempt disabled in case user_exit() is traced
3649                 * and the tracer calls preempt_enable_notrace() causing
3650                 * an infinite recursion.
3651                 */
3652                prev_ctx = exception_enter();
3653                __schedule(true);
3654                exception_exit(prev_ctx);
3655
3656                preempt_latency_stop(1);
3657                preempt_enable_no_resched_notrace();
3658        } while (need_resched());
3659}
3660EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
3661
3662#endif /* CONFIG_PREEMPT */
3663
3664/*
3665 * this is the entry point to schedule() from kernel preemption
3666 * off of irq context.
3667 * Note, that this is called and return with irqs disabled. This will
3668 * protect us against recursive calling from irq.
3669 */
3670asmlinkage __visible void __sched preempt_schedule_irq(void)
3671{
3672        enum ctx_state prev_state;
3673
3674        /* Catch callers which need to be fixed */
3675        BUG_ON(preempt_count() || !irqs_disabled());
3676
3677        prev_state = exception_enter();
3678
3679        do {
3680                preempt_disable();
3681                local_irq_enable();
3682                __schedule(true);
3683                local_irq_disable();
3684                sched_preempt_enable_no_resched();
3685        } while (need_resched());
3686
3687        exception_exit(prev_state);
3688}
3689
3690int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3691                          void *key)
3692{
3693        return try_to_wake_up(curr->private, mode, wake_flags);
3694}
3695EXPORT_SYMBOL(default_wake_function);
3696
3697#ifdef CONFIG_RT_MUTEXES
3698
3699static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
3700{
3701        if (pi_task)
3702                prio = min(prio, pi_task->prio);
3703
3704        return prio;
3705}
3706
3707static inline int rt_effective_prio(struct task_struct *p, int prio)
3708{
3709        struct task_struct *pi_task = rt_mutex_get_top_task(p);
3710
3711        return __rt_effective_prio(pi_task, prio);
3712}
3713
3714/*
3715 * rt_mutex_setprio - set the current priority of a task
3716 * @p: task to boost
3717 * @pi_task: donor task
3718 *
3719 * This function changes the 'effective' priority of a task. It does
3720 * not touch ->normal_prio like __setscheduler().
3721 *
3722 * Used by the rt_mutex code to implement priority inheritance
3723 * logic. Call site only calls if the priority of the task changed.
3724 */
3725void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
3726{
3727        int prio, oldprio, queued, running, queue_flag =
3728                DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
3729        const struct sched_class *prev_class;
3730        struct rq_flags rf;
3731        struct rq *rq;
3732
3733        /* XXX used to be waiter->prio, not waiter->task->prio */
3734        prio = __rt_effective_prio(pi_task, p->normal_prio);
3735
3736        /*
3737         * If nothing changed; bail early.
3738         */
3739        if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
3740                return;
3741
3742        rq = __task_rq_lock(p, &rf);
3743        update_rq_clock(rq);
3744        /*
3745         * Set under pi_lock && rq->lock, such that the value can be used under
3746         * either lock.
3747         *
3748         * Note that there is loads of tricky to make this pointer cache work
3749         * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
3750         * ensure a task is de-boosted (pi_task is set to NULL) before the
3751         * task is allowed to run again (and can exit). This ensures the pointer
3752         * points to a blocked task -- which guaratees the task is present.
3753         */
3754        p->pi_top_task = pi_task;
3755
3756        /*
3757         * For FIFO/RR we only need to set prio, if that matches we're done.
3758         */
3759        if (prio == p->prio && !dl_prio(prio))
3760                goto out_unlock;
3761
3762        /*
3763         * Idle task boosting is a nono in general. There is one
3764         * exception, when PREEMPT_RT and NOHZ is active:
3765         *
3766         * The idle task calls get_next_timer_interrupt() and holds
3767         * the timer wheel base->lock on the CPU and another CPU wants
3768         * to access the timer (probably to cancel it). We can safely
3769         * ignore the boosting request, as the idle CPU runs this code
3770         * with interrupts disabled and will complete the lock
3771         * protected section without being interrupted. So there is no
3772         * real need to boost.
3773         */
3774        if (unlikely(p == rq->idle)) {
3775                WARN_ON(p != rq->curr);
3776                WARN_ON(p->pi_blocked_on);
3777                goto out_unlock;
3778        }
3779
3780        trace_sched_pi_setprio(p, pi_task);
3781        oldprio = p->prio;
3782
3783        if (oldprio == prio)
3784                queue_flag &= ~DEQUEUE_MOVE;
3785
3786        prev_class = p->sched_class;
3787        queued = task_on_rq_queued(p);
3788        running = task_current(rq, p);
3789        if (queued)
3790                dequeue_task(rq, p, queue_flag);
3791        if (running)
3792                put_prev_task(rq, p);
3793
3794        /*
3795         * Boosting condition are:
3796         * 1. -rt task is running and holds mutex A
3797         *      --> -dl task blocks on mutex A
3798         *
3799         * 2. -dl task is running and holds mutex A
3800         *      --> -dl task blocks on mutex A and could preempt the
3801         *          running task
3802         */
3803        if (dl_prio(prio)) {
3804                if (!dl_prio(p->normal_prio) ||
3805                    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
3806                        p->dl.dl_boosted = 1;
3807                        queue_flag |= ENQUEUE_REPLENISH;
3808                } else
3809                        p->dl.dl_boosted = 0;
3810                p->sched_class = &dl_sched_class;
3811        } else if (rt_prio(prio)) {
3812                if (dl_prio(oldprio))
3813                        p->dl.dl_boosted = 0;
3814                if (oldprio < prio)
3815                        queue_flag |= ENQUEUE_HEAD;
3816                p->sched_class = &rt_sched_class;
3817        } else {
3818                if (dl_prio(oldprio))
3819                        p->dl.dl_boosted = 0;
3820                if (rt_prio(oldprio))
3821                        p->rt.timeout = 0;
3822                p->sched_class = &fair_sched_class;
3823        }
3824
3825        p->prio = prio;
3826
3827        if (queued)
3828                enqueue_task(rq, p, queue_flag);
3829        if (running)
3830                set_curr_task(rq, p);
3831
3832        check_class_changed(rq, p, prev_class, oldprio);
3833out_unlock:
3834        /* Avoid rq from going away on us: */
3835        preempt_disable();
3836        __task_rq_unlock(rq, &rf);
3837
3838        balance_callback(rq);
3839        preempt_enable();
3840}
3841#else
3842static inline int rt_effective_prio(struct task_struct *p, int prio)
3843{
3844        return prio;
3845}
3846#endif
3847
3848void set_user_nice(struct task_struct *p, long nice)
3849{
3850        bool queued, running;
3851        int old_prio, delta;
3852        struct rq_flags rf;
3853        struct rq *rq;
3854
3855        if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
3856                return;
3857        /*
3858         * We have to be careful, if called from sys_setpriority(),
3859         * the task might be in the middle of scheduling on another CPU.
3860         */
3861        rq = task_rq_lock(p, &rf);
3862        update_rq_clock(rq);
3863
3864        /*
3865         * The RT priorities are set via sched_setscheduler(), but we still
3866         * allow the 'normal' nice value to be set - but as expected
3867         * it wont have any effect on scheduling until the task is
3868         * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
3869         */
3870        if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
3871                p->static_prio = NICE_TO_PRIO(nice);
3872                goto out_unlock;
3873        }
3874        queued = task_on_rq_queued(p);
3875        running = task_current(rq, p);
3876        if (queued)
3877                dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
3878        if (running)
3879                put_prev_task(rq, p);
3880
3881        p->static_prio = NICE_TO_PRIO(nice);
3882        set_load_weight(p);
3883        old_prio = p->prio;
3884        p->prio = effective_prio(p);
3885        delta = p->prio - old_prio;
3886
3887        if (queued) {
3888                enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
3889                /*
3890                 * If the task increased its priority or is running and
3891                 * lowered its priority, then reschedule its CPU:
3892                 */
3893                if (delta < 0 || (delta > 0 && task_running(rq, p)))
3894                        resched_curr(rq);
3895        }
3896        if (running)
3897                set_curr_task(rq, p);
3898out_unlock:
3899        task_rq_unlock(rq, p, &rf);
3900}
3901EXPORT_SYMBOL(set_user_nice);
3902
3903/*
3904 * can_nice - check if a task can reduce its nice value
3905 * @p: task
3906 * @nice: nice value
3907 */
3908int can_nice(const struct task_struct *p, const int nice)
3909{
3910        /* Convert nice value [19,-20] to rlimit style value [1,40]: */
3911        int nice_rlim = nice_to_rlimit(nice);
3912
3913        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3914                capable(CAP_SYS_NICE));
3915}
3916
3917#ifdef __ARCH_WANT_SYS_NICE
3918
3919/*
3920 * sys_nice - change the priority of the current process.
3921 * @increment: priority increment
3922 *
3923 * sys_setpriority is a more generic, but much slower function that
3924 * does similar things.
3925 */
3926SYSCALL_DEFINE1(nice, int, increment)
3927{
3928        long nice, retval;
3929
3930        /*
3931         * Setpriority might change our priority at the same moment.
3932         * We don't have to worry. Conceptually one call occurs first
3933         * and we have a single winner.
3934         */
3935        increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
3936        nice = task_nice(current) + increment;
3937
3938        nice = clamp_val(nice, MIN_NICE, MAX_NICE);
3939        if (increment < 0 && !can_nice(current, nice))
3940                return -EPERM;
3941
3942        retval = security_task_setnice(current, nice);
3943        if (retval)
3944                return retval;
3945
3946        set_user_nice(current, nice);
3947        return 0;
3948}
3949
3950#endif
3951
3952/**
3953 * task_prio - return the priority value of a given task.
3954 * @p: the task in question.
3955 *
3956 * Return: The priority value as seen by users in /proc.
3957 * RT tasks are offset by -200. Normal tasks are centered
3958 * around 0, value goes from -16 to +15.
3959 */
3960int task_prio(const struct task_struct *p)
3961{
3962        return p->prio - MAX_RT_PRIO;
3963}
3964
3965/**
3966 * idle_cpu - is a given CPU idle currently?
3967 * @cpu: the processor in question.
3968 *
3969 * Return: 1 if the CPU is currently idle. 0 otherwise.
3970 */
3971int idle_cpu(int cpu)
3972{
3973        struct rq *rq = cpu_rq(cpu);
3974
3975        if (rq->curr != rq->idle)
3976                return 0;
3977
3978        if (rq->nr_running)
3979                return 0;
3980
3981#ifdef CONFIG_SMP
3982        if (!llist_empty(&rq->wake_list))
3983                return 0;
3984#endif
3985
3986        return 1;
3987}
3988
3989/**
3990 * idle_task - return the idle task for a given CPU.
3991 * @cpu: the processor in question.
3992 *
3993 * Return: The idle task for the CPU @cpu.
3994 */
3995struct task_struct *idle_task(int cpu)
3996{
3997        return cpu_rq(cpu)->idle;
3998}
3999
4000/**
4001 * find_process_by_pid - find a process with a matching PID value.
4002 * @pid: the pid in question.
4003 *
4004 * The task of @pid, if found. %NULL otherwise.
4005 */
4006static struct task_struct *find_process_by_pid(pid_t pid)
4007{
4008        return pid ? find_task_by_vpid(pid) : current;
4009}
4010
4011/*
4012 * This function initializes the sched_dl_entity of a newly becoming
4013 * SCHED_DEADLINE task.
4014 *
4015 * Only the static values are considered here, the actual runtime and the
4016 * absolute deadline will be properly calculated when the task is enqueued
4017 * for the first time with its new policy.
4018 */
4019static void
4020__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
4021{
4022        struct sched_dl_entity *dl_se = &p->dl;
4023
4024        dl_se->dl_runtime = attr->sched_runtime;
4025        dl_se->dl_deadline = attr->sched_deadline;
4026        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
4027        dl_se->flags = attr->sched_flags;
4028        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
4029
4030        /*
4031         * Changing the parameters of a task is 'tricky' and we're not doing
4032         * the correct thing -- also see task_dead_dl() and switched_from_dl().
4033         *
4034         * What we SHOULD do is delay the bandwidth release until the 0-lag
4035         * point. This would include retaining the task_struct until that time
4036         * and change dl_overflow() to not immediately decrement the current
4037         * amount.
4038         *
4039         * Instead we retain the current runtime/deadline and let the new
4040         * parameters take effect after the current reservation period lapses.
4041         * This is safe (albeit pessimistic) because the 0-lag point is always
4042         * before the current scheduling deadline.
4043         *
4044         * We can still have temporary overloads because we do not delay the
4045         * change in bandwidth until that time; so admission control is
4046         * not on the safe side. It does however guarantee tasks will never
4047         * consume more than promised.
4048         */
4049}
4050
4051/*
4052 * sched_setparam() passes in -1 for its policy, to let the functions
4053 * it calls know not to change it.
4054 */
4055#define SETPARAM_POLICY -1
4056
4057static void __setscheduler_params(struct task_struct *p,
4058                const struct sched_attr *attr)
4059{
4060        int policy = attr->sched_policy;
4061
4062        if (policy == SETPARAM_POLICY)
4063                policy = p->policy;
4064
4065        p->policy = policy;
4066
4067        if (dl_policy(policy))
4068                __setparam_dl(p, attr);
4069        else if (fair_policy(policy))
4070                p->static_prio = NICE_TO_PRIO(attr->sched_nice);
4071
4072        /*
4073         * __sched_setscheduler() ensures attr->sched_priority == 0 when
4074         * !rt_policy. Always setting this ensures that things like
4075         * getparam()/getattr() don't report silly values for !rt tasks.
4076         */
4077        p->rt_priority = attr->sched_priority;
4078        p->normal_prio = normal_prio(p);
4079        set_load_weight(p);
4080}
4081
4082/* Actually do priority change: must hold pi & rq lock. */
4083static void __setscheduler(struct rq *rq, struct task_struct *p,
4084                           const struct sched_attr *attr, bool keep_boost)
4085{
4086        __setscheduler_params(p, attr);
4087
4088        /*
4089         * Keep a potential priority boosting if called from
4090         * sched_setscheduler().
4091         */
4092        p->prio = normal_prio(p);
4093        if (keep_boost)
4094                p->prio = rt_effective_prio(p, p->prio);
4095
4096        if (dl_prio(p->prio))
4097                p->sched_class = &dl_sched_class;
4098        else if (rt_prio(p->prio))
4099                p->sched_class = &rt_sched_class;
4100        else
4101                p->sched_class = &fair_sched_class;
4102}
4103
4104static void
4105__getparam_dl(struct task_struct *p, struct sched_attr *attr)
4106{
4107        struct sched_dl_entity *dl_se = &p->dl;
4108
4109        attr->sched_priority = p->rt_priority;
4110        attr->sched_runtime = dl_se->dl_runtime;
4111        attr->sched_deadline = dl_se->dl_deadline;
4112        attr->sched_period = dl_se->dl_period;
4113        attr->sched_flags = dl_se->flags;
4114}
4115
4116/*
4117 * This function validates the new parameters of a -deadline task.
4118 * We ask for the deadline not being zero, and greater or equal
4119 * than the runtime, as well as the period of being zero or
4120 * greater than deadline. Furthermore, we have to be sure that
4121 * user parameters are above the internal resolution of 1us (we
4122 * check sched_runtime only since it is always the smaller one) and
4123 * below 2^63 ns (we have to check both sched_deadline and
4124 * sched_period, as the latter can be zero).
4125 */
4126static bool
4127__checkparam_dl(const struct sched_attr *attr)
4128{
4129        /* deadline != 0 */
4130        if (attr->sched_deadline == 0)
4131                return false;
4132
4133        /*
4134         * Since we truncate DL_SCALE bits, make sure we're at least
4135         * that big.
4136         */
4137        if (attr->sched_runtime < (1ULL << DL_SCALE))
4138                return false;
4139
4140        /*
4141         * Since we use the MSB for wrap-around and sign issues, make
4142         * sure it's not set (mind that period can be equal to zero).
4143         */
4144        if (attr->sched_deadline & (1ULL << 63) ||
4145            attr->sched_period & (1ULL << 63))
4146                return false;
4147
4148        /* runtime <= deadline <= period (if period != 0) */
4149        if ((attr->sched_period != 0 &&
4150             attr->sched_period < attr->sched_deadline) ||
4151            attr->sched_deadline < attr->sched_runtime)
4152                return false;
4153
4154        return true;
4155}
4156
4157/*
4158 * Check the target process has a UID that matches the current process's:
4159 */
4160static bool check_same_owner(struct task_struct *p)
4161{
4162        const struct cred *cred = current_cred(), *pcred;
4163        bool match;
4164
4165        rcu_read_lock();
4166        pcred = __task_cred(p);
4167        match = (uid_eq(cred->euid, pcred->euid) ||
4168                 uid_eq(cred->euid, pcred->uid));
4169        rcu_read_unlock();
4170        return match;
4171}
4172
4173static bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
4174{
4175        struct sched_dl_entity *dl_se = &p->dl;
4176
4177        if (dl_se->dl_runtime != attr->sched_runtime ||
4178                dl_se->dl_deadline != attr->sched_deadline ||
4179                dl_se->dl_period != attr->sched_period ||
4180                dl_se->flags != attr->sched_flags)
4181                return true;
4182
4183        return false;
4184}
4185
4186static int __sched_setscheduler(struct task_struct *p,
4187                                const struct sched_attr *attr,
4188                                bool user, bool pi)
4189{
4190        int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
4191                      MAX_RT_PRIO - 1 - attr->sched_priority;
4192        int retval, oldprio, oldpolicy = -1, queued, running;
4193        int new_effective_prio, policy = attr->sched_policy;
4194        const struct sched_class *prev_class;
4195        struct rq_flags rf;
4196        int reset_on_fork;
4197        int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
4198        struct rq *rq;
4199
4200        /* May grab non-irq protected spin_locks: */
4201        BUG_ON(in_interrupt());
4202recheck:
4203        /* Double check policy once rq lock held: */
4204        if (policy < 0) {
4205                reset_on_fork = p->sched_reset_on_fork;
4206                policy = oldpolicy = p->policy;
4207        } else {
4208                reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
4209
4210                if (!valid_policy(policy))
4211                        return -EINVAL;
4212        }
4213
4214        if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
4215                return -EINVAL;
4216
4217        /*
4218         * Valid priorities for SCHED_FIFO and SCHED_RR are
4219         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4220         * SCHED_BATCH and SCHED_IDLE is 0.
4221         */
4222        if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
4223            (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
4224                return -EINVAL;
4225        if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
4226            (rt_policy(policy) != (attr->sched_priority != 0)))
4227                return -EINVAL;
4228
4229        /*
4230         * Allow unprivileged RT tasks to decrease priority:
4231         */
4232        if (user && !capable(CAP_SYS_NICE)) {
4233                if (fair_policy(policy)) {
4234                        if (attr->sched_nice < task_nice(p) &&
4235                            !can_nice(p, attr->sched_nice))
4236                                return -EPERM;
4237                }
4238
4239                if (rt_policy(policy)) {
4240                        unsigned long rlim_rtprio =
4241                                        task_rlimit(p, RLIMIT_RTPRIO);
4242
4243                        /* Can't set/change the rt policy: */
4244                        if (policy != p->policy && !rlim_rtprio)
4245                                return -EPERM;
4246
4247                        /* Can't increase priority: */
4248                        if (attr->sched_priority > p->rt_priority &&
4249                            attr->sched_priority > rlim_rtprio)
4250                                return -EPERM;
4251                }
4252
4253                 /*
4254                  * Can't set/change SCHED_DEADLINE policy at all for now
4255                  * (safest behavior); in the future we would like to allow
4256                  * unprivileged DL tasks to increase their relative deadline
4257                  * or reduce their runtime (both ways reducing utilization)
4258                  */
4259                if (dl_policy(policy))
4260                        return -EPERM;
4261
4262                /*
4263                 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4264                 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
4265                 */
4266                if (idle_policy(p->policy) && !idle_policy(policy)) {
4267                        if (!can_nice(p, task_nice(p)))
4268                                return -EPERM;
4269                }
4270
4271                /* Can't change other user's priorities: */
4272                if (!check_same_owner(p))
4273                        return -EPERM;
4274
4275                /* Normal users shall not reset the sched_reset_on_fork flag: */
4276                if (p->sched_reset_on_fork && !reset_on_fork)
4277                        return -EPERM;
4278        }
4279
4280        if (user) {
4281                retval = security_task_setscheduler(p);
4282                if (retval)
4283                        return retval;
4284        }
4285
4286        /*
4287         * Make sure no PI-waiters arrive (or leave) while we are
4288         * changing the priority of the task:
4289         *
4290         * To be able to change p->policy safely, the appropriate
4291         * runqueue lock must be held.
4292         */
4293        rq = task_rq_lock(p, &rf);
4294        update_rq_clock(rq);
4295
4296        /*
4297         * Changing the policy of the stop threads its a very bad idea:
4298         */
4299        if (p == rq->stop) {
4300                task_rq_unlock(rq, p, &rf);
4301                return -EINVAL;
4302        }
4303
4304        /*
4305         * If not changing anything there's no need to proceed further,
4306         * but store a possible modification of reset_on_fork.
4307         */
4308        if (unlikely(policy == p->policy)) {
4309                if (fair_policy(policy) && attr->sched_nice != task_nice(p))
4310                        goto change;
4311                if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
4312                        goto change;
4313                if (dl_policy(policy) && dl_param_changed(p, attr))
4314                        goto change;
4315
4316                p->sched_reset_on_fork = reset_on_fork;
4317                task_rq_unlock(rq, p, &rf);
4318                return 0;
4319        }
4320change:
4321
4322        if (user) {
4323#ifdef CONFIG_RT_GROUP_SCHED
4324                /*
4325                 * Do not allow realtime tasks into groups that have no runtime
4326                 * assigned.
4327                 */
4328                if (rt_bandwidth_enabled() && rt_policy(policy) &&
4329                                task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4330                                !task_group_is_autogroup(task_group(p))) {
4331                        task_rq_unlock(rq, p, &rf);
4332                        return -EPERM;
4333                }
4334#endif
4335#ifdef CONFIG_SMP
4336                if (dl_bandwidth_enabled() && dl_policy(policy)) {
4337                        cpumask_t *span = rq->rd->span;
4338
4339                        /*
4340                         * Don't allow tasks with an affinity mask smaller than
4341                         * the entire root_domain to become SCHED_DEADLINE. We
4342                         * will also fail if there's no bandwidth available.
4343                         */
4344                        if (!cpumask_subset(span, &p->cpus_allowed) ||
4345                            rq->rd->dl_bw.bw == 0) {
4346                                task_rq_unlock(rq, p, &rf);
4347                                return -EPERM;
4348                        }
4349                }
4350#endif
4351        }
4352
4353        /* Re-check policy now with rq lock held: */
4354        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4355                policy = oldpolicy = -1;
4356                task_rq_unlock(rq, p, &rf);
4357                goto recheck;
4358        }
4359
4360        /*
4361         * If setscheduling to SCHED_DEADLINE (or changing the parameters
4362         * of a SCHED_DEADLINE task) we need to check if enough bandwidth
4363         * is available.
4364         */
4365        if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
4366                task_rq_unlock(rq, p, &rf);
4367                return -EBUSY;
4368        }
4369
4370        p->sched_reset_on_fork = reset_on_fork;
4371        oldprio = p->prio;
4372
4373        if (pi) {
4374                /*
4375                 * Take priority boosted tasks into account. If the new
4376                 * effective priority is unchanged, we just store the new
4377                 * normal parameters and do not touch the scheduler class and
4378                 * the runqueue. This will be done when the task deboost
4379                 * itself.
4380                 */
4381                new_effective_prio = rt_effective_prio(p, newprio);
4382                if (new_effective_prio == oldprio)
4383                        queue_flags &= ~DEQUEUE_MOVE;
4384        }
4385
4386        queued = task_on_rq_queued(p);
4387        running = task_current(rq, p);
4388        if (queued)
4389                dequeue_task(rq, p, queue_flags);
4390        if (running)
4391                put_prev_task(rq, p);
4392
4393        prev_class = p->sched_class;
4394        __setscheduler(rq, p, attr, pi);
4395
4396        if (queued) {
4397                /*
4398                 * We enqueue to tail when the priority of a task is
4399                 * increased (user space view).
4400                 */
4401                if (oldprio < p->prio)
4402                        queue_flags |= ENQUEUE_HEAD;
4403
4404                enqueue_task(rq, p, queue_flags);
4405        }
4406        if (running)
4407                set_curr_task(rq, p);
4408
4409        check_class_changed(rq, p, prev_class, oldprio);
4410
4411        /* Avoid rq from going away on us: */
4412        preempt_disable();
4413        task_rq_unlock(rq, p, &rf);
4414
4415        if (pi)
4416                rt_mutex_adjust_pi(p);
4417
4418        /* Run balance callbacks after we've adjusted the PI chain: */
4419        balance_callback(rq);
4420        preempt_enable();
4421
4422        return 0;
4423}
4424
4425static int _sched_setscheduler(struct task_struct *p, int policy,
4426                               const struct sched_param *param, bool check)
4427{
4428        struct sched_attr attr = {
4429                .sched_policy   = policy,
4430                .sched_priority = param->sched_priority,
4431                .sched_nice     = PRIO_TO_NICE(p->static_prio),
4432        };
4433
4434        /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
4435        if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
4436                attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4437                policy &= ~SCHED_RESET_ON_FORK;
4438                attr.sched_policy = policy;
4439        }
4440
4441        return __sched_setscheduler(p, &attr, check, true);
4442}
4443/**
4444 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4445 * @p: the task in question.
4446 * @policy: new policy.
4447 * @param: structure containing the new RT priority.
4448 *
4449 * Return: 0 on success. An error code otherwise.
4450 *
4451 * NOTE that the task may be already dead.
4452 */
4453int sched_setscheduler(struct task_struct *p, int policy,
4454                       const struct sched_param *param)
4455{
4456        return _sched_setscheduler(p, policy, param, true);
4457}
4458EXPORT_SYMBOL_GPL(sched_setscheduler);
4459
4460int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4461{
4462        return __sched_setscheduler(p, attr, true, true);
4463}
4464EXPORT_SYMBOL_GPL(sched_setattr);
4465
4466/**
4467 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4468 * @p: the task in question.
4469 * @policy: new policy.
4470 * @param: structure containing the new RT priority.
4471 *
4472 * Just like sched_setscheduler, only don't bother checking if the
4473 * current context has permission.  For example, this is needed in
4474 * stop_machine(): we create temporary high priority worker threads,
4475 * but our caller might not have that capability.
4476 *
4477 * Return: 0 on success. An error code otherwise.
4478 */
4479int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4480                               const struct sched_param *param)
4481{
4482        return _sched_setscheduler(p, policy, param, false);
4483}
4484EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
4485
4486static int
4487do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4488{
4489        struct sched_param lparam;
4490        struct task_struct *p;
4491        int retval;
4492
4493        if (!param || pid < 0)
4494                return -EINVAL;
4495        if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4496                return -EFAULT;
4497
4498        rcu_read_lock();
4499        retval = -ESRCH;
4500        p = find_process_by_pid(pid);
4501        if (p != NULL)
4502                retval = sched_setscheduler(p, policy, &lparam);
4503        rcu_read_unlock();
4504
4505        return retval;
4506}
4507
4508/*
4509 * Mimics kernel/events/core.c perf_copy_attr().
4510 */
4511static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
4512{
4513        u32 size;
4514        int ret;
4515
4516        if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4517                return -EFAULT;
4518
4519        /* Zero the full structure, so that a short copy will be nice: */
4520        memset(attr, 0, sizeof(*attr));
4521
4522        ret = get_user(size, &uattr->size);
4523        if (ret)
4524                return ret;
4525
4526        /* Bail out on silly large: */
4527        if (size > PAGE_SIZE)
4528                goto err_size;
4529
4530        /* ABI compatibility quirk: */
4531        if (!size)
4532                size = SCHED_ATTR_SIZE_VER0;
4533
4534        if (size < SCHED_ATTR_SIZE_VER0)
4535                goto err_size;
4536
4537        /*
4538         * If we're handed a bigger struct than we know of,
4539         * ensure all the unknown bits are 0 - i.e. new
4540         * user-space does not rely on any kernel feature
4541         * extensions we dont know about yet.
4542         */
4543        if (size > sizeof(*attr)) {
4544                unsigned char __user *addr;
4545                unsigned char __user *end;
4546                unsigned char val;
4547
4548                addr = (void __user *)uattr + sizeof(*attr);
4549                end  = (void __user *)uattr + size;
4550
4551                for (; addr < end; addr++) {
4552                        ret = get_user(val, addr);
4553                        if (ret)
4554                                return ret;
4555                        if (val)
4556                                goto err_size;
4557                }
4558                size = sizeof(*attr);
4559        }
4560
4561        ret = copy_from_user(attr, uattr, size);
4562        if (ret)
4563                return -EFAULT;
4564
4565        /*
4566         * XXX: Do we want to be lenient like existing syscalls; or do we want
4567         * to be strict and return an error on out-of-bounds values?
4568         */
4569        attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
4570
4571        return 0;
4572
4573err_size:
4574        put_user(sizeof(*attr), &uattr->size);
4575        return -E2BIG;
4576}
4577
4578/**
4579 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4580 * @pid: the pid in question.
4581 * @policy: new policy.
4582 * @param: structure containing the new RT priority.
4583 *
4584 * Return: 0 on success. An error code otherwise.
4585 */
4586SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
4587{
4588        if (policy < 0)
4589                return -EINVAL;
4590
4591        return do_sched_setscheduler(pid, policy, param);
4592}
4593
4594/**
4595 * sys_sched_setparam - set/change the RT priority of a thread
4596 * @pid: the pid in question.
4597 * @param: structure containing the new RT priority.
4598 *
4599 * Return: 0 on success. An error code otherwise.
4600 */
4601SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4602{
4603        return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
4604}
4605
4606/**
4607 * sys_sched_setattr - same as above, but with extended sched_attr
4608 * @pid: the pid in question.
4609 * @uattr: structure containing the extended parameters.
4610 * @flags: for future extension.
4611 */
4612SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4613                               unsigned int, flags)
4614{
4615        struct sched_attr attr;
4616        struct task_struct *p;
4617        int retval;
4618
4619        if (!uattr || pid < 0 || flags)
4620                return -EINVAL;
4621
4622        retval = sched_copy_attr(uattr, &attr);
4623        if (retval)
4624                return retval;
4625
4626        if ((int)attr.sched_policy < 0)
4627                return -EINVAL;
4628
4629        rcu_read_lock();
4630        retval = -ESRCH;
4631        p = find_process_by_pid(pid);
4632        if (p != NULL)
4633                retval = sched_setattr(p, &attr);
4634        rcu_read_unlock();
4635
4636        return retval;
4637}
4638
4639/**
4640 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4641 * @pid: the pid in question.
4642 *
4643 * Return: On success, the policy of the thread. Otherwise, a negative error
4644 * code.
4645 */
4646SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4647{
4648        struct task_struct *p;
4649        int retval;
4650
4651        if (pid < 0)
4652                return -EINVAL;
4653
4654        retval = -ESRCH;
4655        rcu_read_lock();
4656        p = find_process_by_pid(pid);
4657        if (p) {
4658                retval = security_task_getscheduler(p);
4659                if (!retval)
4660                        retval = p->policy
4661                                | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4662        }
4663        rcu_read_unlock();
4664        return retval;
4665}
4666
4667/**
4668 * sys_sched_getparam - get the RT priority of a thread
4669 * @pid: the pid in question.
4670 * @param: structure containing the RT priority.
4671 *
4672 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
4673 * code.
4674 */
4675SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4676{
4677        struct sched_param lp = { .sched_priority = 0 };
4678        struct task_struct *p;
4679        int retval;
4680
4681        if (!param || pid < 0)
4682                return -EINVAL;
4683
4684        rcu_read_lock();
4685        p = find_process_by_pid(pid);
4686        retval = -ESRCH;
4687        if (!p)
4688                goto out_unlock;
4689
4690        retval = security_task_getscheduler(p);
4691        if (retval)
4692                goto out_unlock;
4693
4694        if (task_has_rt_policy(p))
4695                lp.sched_priority = p->rt_priority;
4696        rcu_read_unlock();
4697
4698        /*
4699         * This one might sleep, we cannot do it with a spinlock held ...
4700         */
4701        retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4702
4703        return retval;
4704
4705out_unlock:
4706        rcu_read_unlock();
4707        return retval;
4708}
4709
4710static int sched_read_attr(struct sched_attr __user *uattr,
4711                           struct sched_attr *attr,
4712                           unsigned int usize)
4713{
4714        int ret;
4715
4716        if (!access_ok(VERIFY_WRITE, uattr, usize))
4717                return -EFAULT;
4718
4719        /*
4720         * If we're handed a smaller struct than we know of,
4721         * ensure all the unknown bits are 0 - i.e. old
4722         * user-space does not get uncomplete information.
4723         */
4724        if (usize < sizeof(*attr)) {
4725                unsigned char *addr;
4726                unsigned char *end;
4727
4728                addr = (void *)attr + usize;
4729                end  = (void *)attr + sizeof(*attr);
4730
4731                for (; addr < end; addr++) {
4732                        if (*addr)
4733                                return -EFBIG;
4734                }
4735
4736                attr->size = usize;
4737        }
4738
4739        ret = copy_to_user(uattr, attr, attr->size);
4740        if (ret)
4741                return -EFAULT;
4742
4743        return 0;
4744}
4745
4746/**
4747 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
4748 * @pid: the pid in question.
4749 * @uattr: structure containing the extended parameters.
4750 * @size: sizeof(attr) for fwd/bwd comp.
4751 * @flags: for future extension.
4752 */
4753SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4754                unsigned int, size, unsigned int, flags)
4755{
4756        struct sched_attr attr = {
4757                .size = sizeof(struct sched_attr),
4758        };
4759        struct task_struct *p;
4760        int retval;
4761
4762        if (!uattr || pid < 0 || size > PAGE_SIZE ||
4763            size < SCHED_ATTR_SIZE_VER0 || flags)
4764                return -EINVAL;
4765
4766        rcu_read_lock();
4767        p = find_process_by_pid(pid);
4768        retval = -ESRCH;
4769        if (!p)
4770                goto out_unlock;
4771
4772        retval = security_task_getscheduler(p);
4773        if (retval)
4774                goto out_unlock;
4775
4776        attr.sched_policy = p->policy;
4777        if (p->sched_reset_on_fork)
4778                attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4779        if (task_has_dl_policy(p))
4780                __getparam_dl(p, &attr);
4781        else if (task_has_rt_policy(p))
4782                attr.sched_priority = p->rt_priority;
4783        else
4784                attr.sched_nice = task_nice(p);
4785
4786        rcu_read_unlock();
4787
4788        retval = sched_read_attr(uattr, &attr, size);
4789        return retval;
4790
4791out_unlock:
4792        rcu_read_unlock();
4793        return retval;
4794}
4795
4796long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4797{
4798        cpumask_var_t cpus_allowed, new_mask;
4799        struct task_struct *p;
4800        int retval;
4801
4802        rcu_read_lock();
4803
4804        p = find_process_by_pid(pid);
4805        if (!p) {
4806                rcu_read_unlock();
4807                return -ESRCH;
4808        }
4809
4810        /* Prevent p going away */
4811        get_task_struct(p);
4812        rcu_read_unlock();
4813
4814        if (p->flags & PF_NO_SETAFFINITY) {
4815                retval = -EINVAL;
4816                goto out_put_task;
4817        }
4818        if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4819                retval = -ENOMEM;
4820                goto out_put_task;
4821        }
4822        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4823                retval = -ENOMEM;
4824                goto out_free_cpus_allowed;
4825        }
4826        retval = -EPERM;
4827        if (!check_same_owner(p)) {
4828                rcu_read_lock();
4829                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4830                        rcu_read_unlock();
4831                        goto out_free_new_mask;
4832                }
4833                rcu_read_unlock();
4834        }
4835
4836        retval = security_task_setscheduler(p);
4837        if (retval)
4838                goto out_free_new_mask;
4839
4840
4841        cpuset_cpus_allowed(p, cpus_allowed);
4842        cpumask_and(new_mask, in_mask, cpus_allowed);
4843
4844        /*
4845         * Since bandwidth control happens on root_domain basis,
4846         * if admission test is enabled, we only admit -deadline
4847         * tasks allowed to run on all the CPUs in the task's
4848         * root_domain.
4849         */
4850#ifdef CONFIG_SMP
4851        if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4852                rcu_read_lock();
4853                if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
4854                        retval = -EBUSY;
4855                        rcu_read_unlock();
4856                        goto out_free_new_mask;
4857                }
4858                rcu_read_unlock();
4859        }
4860#endif
4861again:
4862        retval = __set_cpus_allowed_ptr(p, new_mask, true);
4863
4864        if (!retval) {
4865                cpuset_cpus_allowed(p, cpus_allowed);
4866                if (!cpumask_subset(new_mask, cpus_allowed)) {
4867                        /*
4868                         * We must have raced with a concurrent cpuset
4869                         * update. Just reset the cpus_allowed to the
4870                         * cpuset's cpus_allowed
4871                         */
4872                        cpumask_copy(new_mask, cpus_allowed);
4873                        goto again;
4874                }
4875        }
4876out_free_new_mask:
4877        free_cpumask_var(new_mask);
4878out_free_cpus_allowed:
4879        free_cpumask_var(cpus_allowed);
4880out_put_task:
4881        put_task_struct(p);
4882        return retval;
4883}
4884
4885static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4886                             struct cpumask *new_mask)
4887{
4888        if (len < cpumask_size())
4889                cpumask_clear(new_mask);
4890        else if (len > cpumask_size())
4891                len = cpumask_size();
4892
4893        return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4894}
4895
4896/**
4897 * sys_sched_setaffinity - set the CPU affinity of a process
4898 * @pid: pid of the process
4899 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4900 * @user_mask_ptr: user-space pointer to the new CPU mask
4901 *
4902 * Return: 0 on success. An error code otherwise.
4903 */
4904SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4905                unsigned long __user *, user_mask_ptr)
4906{
4907        cpumask_var_t new_mask;
4908        int retval;
4909
4910        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4911                return -ENOMEM;
4912
4913        retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4914        if (retval == 0)
4915                retval = sched_setaffinity(pid, new_mask);
4916        free_cpumask_var(new_mask);
4917        return retval;
4918}
4919
4920long sched_getaffinity(pid_t pid, struct cpumask *mask)
4921{
4922        struct task_struct *p;
4923        unsigned long flags;
4924        int retval;
4925
4926        rcu_read_lock();
4927
4928        retval = -ESRCH;
4929        p = find_process_by_pid(pid);
4930        if (!p)
4931                goto out_unlock;
4932
4933        retval = security_task_getscheduler(p);
4934        if (retval)
4935                goto out_unlock;
4936
4937        raw_spin_lock_irqsave(&p->pi_lock, flags);
4938        cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
4939        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4940
4941out_unlock:
4942        rcu_read_unlock();
4943
4944        return retval;
4945}
4946
4947/**
4948 * sys_sched_getaffinity - get the CPU affinity of a process
4949 * @pid: pid of the process
4950 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4951 * @user_mask_ptr: user-space pointer to hold the current CPU mask
4952 *
4953 * Return: size of CPU mask copied to user_mask_ptr on success. An
4954 * error code otherwise.
4955 */
4956SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4957                unsigned long __user *, user_mask_ptr)
4958{
4959        int ret;
4960        cpumask_var_t mask;
4961
4962        if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4963                return -EINVAL;
4964        if (len & (sizeof(unsigned long)-1))
4965                return -EINVAL;
4966
4967        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4968                return -ENOMEM;
4969
4970        ret = sched_getaffinity(pid, mask);
4971        if (ret == 0) {
4972                size_t retlen = min_t(size_t, len, cpumask_size());
4973
4974                if (copy_to_user(user_mask_ptr, mask, retlen))
4975                        ret = -EFAULT;
4976                else
4977                        ret = retlen;
4978        }
4979        free_cpumask_var(mask);
4980
4981        return ret;
4982}
4983
4984/**
4985 * sys_sched_yield - yield the current processor to other threads.
4986 *
4987 * This function yields the current CPU to other tasks. If there are no
4988 * other threads running on this CPU then this function will return.
4989 *
4990 * Return: 0.
4991 */
4992SYSCALL_DEFINE0(sched_yield)
4993{
4994        struct rq_flags rf;
4995        struct rq *rq;
4996
4997        local_irq_disable();
4998        rq = this_rq();
4999        rq_lock(rq, &rf);
5000
5001        schedstat_inc(rq->yld_count);
5002        current->sched_class->yield_task(rq);
5003
5004        /*
5005         * Since we are going to call schedule() anyway, there's
5006         * no need to preempt or enable interrupts:
5007         */
5008        preempt_disable();
5009        rq_unlock(rq, &rf);
5010        sched_preempt_enable_no_resched();
5011
5012        schedule();
5013
5014        return 0;
5015}
5016
5017#ifndef CONFIG_PREEMPT
5018int __sched _cond_resched(void)
5019{
5020        if (should_resched(0)) {
5021                preempt_schedule_common();
5022                return 1;
5023        }
5024        return 0;
5025}
5026EXPORT_SYMBOL(_cond_resched);
5027#endif
5028
5029/*
5030 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
5031 * call schedule, and on return reacquire the lock.
5032 *
5033 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
5034 * operations here to prevent schedule() from being called twice (once via
5035 * spin_unlock(), once by hand).
5036 */
5037int __cond_resched_lock(spinlock_t *lock)
5038{
5039        int resched = should_resched(PREEMPT_LOCK_OFFSET);
5040        int ret = 0;
5041
5042        lockdep_assert_held(lock);
5043
5044        if (spin_needbreak(lock) || resched) {
5045                spin_unlock(lock);
5046                if (resched)
5047                        preempt_schedule_common();
5048                else
5049                        cpu_relax();
5050                ret = 1;
5051                spin_lock(lock);
5052        }
5053        return ret;
5054}
5055EXPORT_SYMBOL(__cond_resched_lock);
5056
5057int __sched __cond_resched_softirq(void)
5058{
5059        BUG_ON(!in_softirq());
5060
5061        if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
5062                local_bh_enable();
5063                preempt_schedule_common();
5064                local_bh_disable();
5065                return 1;
5066        }
5067        return 0;
5068}
5069EXPORT_SYMBOL(__cond_resched_softirq);
5070
5071/**
5072 * yield - yield the current processor to other threads.
5073 *
5074 * Do not ever use this function, there's a 99% chance you're doing it wrong.
5075 *
5076 * The scheduler is at all times free to pick the calling task as the most
5077 * eligible task to run, if removing the yield() call from your code breaks
5078 * it, its already broken.
5079 *
5080 * Typical broken usage is:
5081 *
5082 * while (!event)
5083 *      yield();
5084 *
5085 * where one assumes that yield() will let 'the other' process run that will
5086 * make event true. If the current task is a SCHED_FIFO task that will never
5087 * happen. Never use yield() as a progress guarantee!!
5088 *
5089 * If you want to use yield() to wait for something, use wait_event().
5090 * If you want to use yield() to be 'nice' for others, use cond_resched().
5091 * If you still want to use yield(), do not!
5092 */
5093void __sched yield(void)
5094{
5095        set_current_state(TASK_RUNNING);
5096        sys_sched_yield();
5097}
5098EXPORT_SYMBOL(yield);
5099
5100/**
5101 * yield_to - yield the current processor to another thread in
5102 * your thread group, or accelerate that thread toward the
5103 * processor it's on.
5104 * @p: target task
5105 * @preempt: whether task preemption is allowed or not
5106 *
5107 * It's the caller's job to ensure that the target task struct
5108 * can't go away on us before we can do any checks.
5109 *
5110 * Return:
5111 *      true (>0) if we indeed boosted the target task.
5112 *      false (0) if we failed to boost the target.
5113 *      -ESRCH if there's no task to yield to.
5114 */
5115int __sched yield_to(struct task_struct *p, bool preempt)
5116{
5117        struct task_struct *curr = current;
5118        struct rq *rq, *p_rq;
5119        unsigned long flags;
5120        int yielded = 0;
5121
5122        local_irq_save(flags);
5123        rq = this_rq();
5124
5125again:
5126        p_rq = task_rq(p);
5127        /*
5128         * If we're the only runnable task on the rq and target rq also
5129         * has only one task, there's absolutely no point in yielding.
5130         */
5131        if (rq->nr_running == 1 && p_rq->nr_running == 1) {
5132                yielded = -ESRCH;
5133                goto out_irq;
5134        }
5135
5136        double_rq_lock(rq, p_rq);
5137        if (task_rq(p) != p_rq) {
5138                double_rq_unlock(rq, p_rq);
5139                goto again;
5140        }
5141
5142        if (!curr->sched_class->yield_to_task)
5143                goto out_unlock;
5144
5145        if (curr->sched_class != p->sched_class)
5146                goto out_unlock;
5147
5148        if (task_running(p_rq, p) || p->state)
5149                goto out_unlock;
5150
5151        yielded = curr->sched_class->yield_to_task(rq, p, preempt);
5152        if (yielded) {
5153                schedstat_inc(rq->yld_count);
5154                /*
5155                 * Make p's CPU reschedule; pick_next_entity takes care of
5156                 * fairness.
5157                 */
5158                if (preempt && rq != p_rq)
5159                        resched_curr(p_rq);
5160        }
5161
5162out_unlock:
5163        double_rq_unlock(rq, p_rq);
5164out_irq:
5165        local_irq_restore(flags);
5166
5167        if (yielded > 0)
5168                schedule();
5169
5170        return yielded;
5171}
5172EXPORT_SYMBOL_GPL(yield_to);
5173
5174int io_schedule_prepare(void)
5175{
5176        int old_iowait = current->in_iowait;
5177
5178        current->in_iowait = 1;
5179        blk_schedule_flush_plug(current);
5180
5181        return old_iowait;
5182}
5183
5184void io_schedule_finish(int token)
5185{
5186        current->in_iowait = token;
5187}
5188
5189/*
5190 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5191 * that process accounting knows that this is a task in IO wait state.
5192 */
5193long __sched io_schedule_timeout(long timeout)
5194{
5195        int token;
5196        long ret;
5197
5198        token = io_schedule_prepare();
5199        ret = schedule_timeout(timeout);
5200        io_schedule_finish(token);
5201
5202        return ret;
5203}
5204EXPORT_SYMBOL(io_schedule_timeout);
5205
5206void io_schedule(void)
5207{
5208        int token;
5209
5210        token = io_schedule_prepare();
5211        schedule();
5212        io_schedule_finish(token);
5213}
5214EXPORT_SYMBOL(io_schedule);
5215
5216/**
5217 * sys_sched_get_priority_max - return maximum RT priority.
5218 * @policy: scheduling class.
5219 *
5220 * Return: On success, this syscall returns the maximum
5221 * rt_priority that can be used by a given scheduling class.
5222 * On failure, a negative error code is returned.
5223 */
5224SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
5225{
5226        int ret = -EINVAL;
5227
5228        switch (policy) {
5229        case SCHED_FIFO:
5230        case SCHED_RR:
5231                ret = MAX_USER_RT_PRIO-1;
5232                break;
5233        case SCHED_DEADLINE:
5234        case SCHED_NORMAL:
5235        case SCHED_BATCH:
5236        case SCHED_IDLE:
5237                ret = 0;
5238                break;
5239        }
5240        return ret;
5241}
5242
5243/**
5244 * sys_sched_get_priority_min - return minimum RT priority.
5245 * @policy: scheduling class.
5246 *
5247 * Return: On success, this syscall returns the minimum
5248 * rt_priority that can be used by a given scheduling class.
5249 * On failure, a negative error code is returned.
5250 */
5251SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5252{
5253        int ret = -EINVAL;
5254
5255        switch (policy) {
5256        case SCHED_FIFO:
5257        case SCHED_RR:
5258                ret = 1;
5259                break;
5260        case SCHED_DEADLINE:
5261        case SCHED_NORMAL:
5262        case SCHED_BATCH:
5263        case SCHED_IDLE:
5264                ret = 0;
5265        }
5266        return ret;
5267}
5268
5269/**
5270 * sys_sched_rr_get_interval - return the default timeslice of a process.
5271 * @pid: pid of the process.
5272 * @interval: userspace pointer to the timeslice value.
5273 *
5274 * this syscall writes the default timeslice value of a given process
5275 * into the user-space timespec buffer. A value of '0' means infinity.
5276 *
5277 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
5278 * an error code.
5279 */
5280SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5281                struct timespec __user *, interval)
5282{
5283        struct task_struct *p;
5284        unsigned int time_slice;
5285        struct rq_flags rf;
5286        struct timespec t;
5287        struct rq *rq;
5288        int retval;
5289
5290        if (pid < 0)
5291                return -EINVAL;
5292
5293        retval = -ESRCH;
5294        rcu_read_lock();
5295        p = find_process_by_pid(pid);
5296        if (!p)
5297                goto out_unlock;
5298
5299        retval = security_task_getscheduler(p);
5300        if (retval)
5301                goto out_unlock;
5302
5303        rq = task_rq_lock(p, &rf);
5304        time_slice = 0;
5305        if (p->sched_class->get_rr_interval)
5306                time_slice = p->sched_class->get_rr_interval(rq, p);
5307        task_rq_unlock(rq, p, &rf);
5308
5309        rcu_read_unlock();
5310        jiffies_to_timespec(time_slice, &t);
5311        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
5312        return retval;
5313
5314out_unlock:
5315        rcu_read_unlock();
5316        return retval;
5317}
5318
5319static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
5320
5321void sched_show_task(struct task_struct *p)
5322{
5323        unsigned long free = 0;
5324        int ppid;
5325        unsigned long state = p->state;
5326
5327        /* Make sure the string lines up properly with the number of task states: */
5328        BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1);
5329
5330        if (!try_get_task_stack(p))
5331                return;
5332        if (state)
5333                state = __ffs(state) + 1;
5334        printk(KERN_INFO "%-15.15s %c", p->comm,
5335                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5336        if (state == TASK_RUNNING)
5337                printk(KERN_CONT "  running task    ");
5338#ifdef CONFIG_DEBUG_STACK_USAGE
5339        free = stack_not_used(p);
5340#endif
5341        ppid = 0;
5342        rcu_read_lock();
5343        if (pid_alive(p))
5344                ppid = task_pid_nr(rcu_dereference(p->real_parent));
5345        rcu_read_unlock();
5346        printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
5347                task_pid_nr(p), ppid,
5348                (unsigned long)task_thread_info(p)->flags);
5349
5350        print_worker_info(KERN_INFO, p);
5351        show_stack(p, NULL);
5352        put_task_stack(p);
5353}
5354
5355void show_state_filter(unsigned long state_filter)
5356{
5357        struct task_struct *g, *p;
5358
5359#if BITS_PER_LONG == 32
5360        printk(KERN_INFO
5361                "  task                PC stack   pid father\n");
5362#else
5363        printk(KERN_INFO
5364                "  task                        PC stack   pid father\n");
5365#endif
5366        rcu_read_lock();
5367        for_each_process_thread(g, p) {
5368                /*
5369                 * reset the NMI-timeout, listing all files on a slow
5370                 * console might take a lot of time:
5371                 * Also, reset softlockup watchdogs on all CPUs, because
5372                 * another CPU might be blocked waiting for us to process
5373                 * an IPI.
5374                 */
5375                touch_nmi_watchdog();
5376                touch_all_softlockup_watchdogs();
5377                if (!state_filter || (p->state & state_filter))
5378                        sched_show_task(p);
5379        }
5380
5381#ifdef CONFIG_SCHED_DEBUG
5382        if (!state_filter)
5383                sysrq_sched_debug_show();
5384#endif
5385        rcu_read_unlock();
5386        /*
5387         * Only show locks if all tasks are dumped:
5388         */
5389        if (!state_filter)
5390                debug_show_all_locks();
5391}
5392
5393void init_idle_bootup_task(struct task_struct *idle)
5394{
5395        idle->sched_class = &idle_sched_class;
5396}
5397
5398/**
5399 * init_idle - set up an idle thread for a given CPU
5400 * @idle: task in question
5401 * @cpu: CPU the idle task belongs to
5402 *
5403 * NOTE: this function does not set the idle thread's NEED_RESCHED
5404 * flag, to make booting more robust.
5405 */
5406void init_idle(struct task_struct *idle, int cpu)
5407{
5408        struct rq *rq = cpu_rq(cpu);
5409        unsigned long flags;
5410
5411        raw_spin_lock_irqsave(&idle->pi_lock, flags);
5412        raw_spin_lock(&rq->lock);
5413
5414        __sched_fork(0, idle);
5415        idle->state = TASK_RUNNING;
5416        idle->se.exec_start = sched_clock();
5417        idle->flags |= PF_IDLE;
5418
5419        kasan_unpoison_task_stack(idle);
5420
5421#ifdef CONFIG_SMP
5422        /*
5423         * Its possible that init_idle() gets called multiple times on a task,
5424         * in that case do_set_cpus_allowed() will not do the right thing.
5425         *
5426         * And since this is boot we can forgo the serialization.
5427         */
5428        set_cpus_allowed_common(idle, cpumask_of(cpu));
5429#endif
5430        /*
5431         * We're having a chicken and egg problem, even though we are
5432         * holding rq->lock, the CPU isn't yet set to this CPU so the
5433         * lockdep check in task_group() will fail.
5434         *
5435         * Similar case to sched_fork(). / Alternatively we could
5436         * use task_rq_lock() here and obtain the other rq->lock.
5437         *
5438         * Silence PROVE_RCU
5439         */
5440        rcu_read_lock();
5441        __set_task_cpu(idle, cpu);
5442        rcu_read_unlock();
5443
5444        rq->curr = rq->idle = idle;
5445        idle->on_rq = TASK_ON_RQ_QUEUED;
5446#ifdef CONFIG_SMP
5447        idle->on_cpu = 1;
5448#endif
5449        raw_spin_unlock(&rq->lock);
5450        raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
5451
5452        /* Set the preempt count _outside_ the spinlocks! */
5453        init_idle_preempt_count(idle, cpu);
5454
5455        /*
5456         * The idle tasks have their own, simple scheduling class:
5457         */
5458        idle->sched_class = &idle_sched_class;
5459        ftrace_graph_init_idle_task(idle, cpu);
5460        vtime_init_idle(idle, cpu);
5461#ifdef CONFIG_SMP
5462        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5463#endif
5464}
5465
5466int cpuset_cpumask_can_shrink(const struct cpumask *cur,
5467                              const struct cpumask *trial)
5468{
5469        int ret = 1, trial_cpus;
5470        struct dl_bw *cur_dl_b;
5471        unsigned long flags;
5472
5473        if (!cpumask_weight(cur))
5474                return ret;
5475
5476        rcu_read_lock_sched();
5477        cur_dl_b = dl_bw_of(cpumask_any(cur));
5478        trial_cpus = cpumask_weight(trial);
5479
5480        raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
5481        if (cur_dl_b->bw != -1 &&
5482            cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
5483                ret = 0;
5484        raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
5485        rcu_read_unlock_sched();
5486
5487        return ret;
5488}
5489
5490int task_can_attach(struct task_struct *p,
5491                    const struct cpumask *cs_cpus_allowed)
5492{
5493        int ret = 0;
5494
5495        /*
5496         * Kthreads which disallow setaffinity shouldn't be moved
5497         * to a new cpuset; we don't want to change their CPU
5498         * affinity and isolating such threads by their set of
5499         * allowed nodes is unnecessary.  Thus, cpusets are not
5500         * applicable for such threads.  This prevents checking for
5501         * success of set_cpus_allowed_ptr() on all attached tasks
5502         * before cpus_allowed may be changed.
5503         */
5504        if (p->flags & PF_NO_SETAFFINITY) {
5505                ret = -EINVAL;
5506                goto out;
5507        }
5508
5509#ifdef CONFIG_SMP
5510        if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
5511                                              cs_cpus_allowed)) {
5512                unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
5513                                                        cs_cpus_allowed);
5514                struct dl_bw *dl_b;
5515                bool overflow;
5516                int cpus;
5517                unsigned long flags;
5518
5519                rcu_read_lock_sched();
5520                dl_b = dl_bw_of(dest_cpu);
5521                raw_spin_lock_irqsave(&dl_b->lock, flags);
5522                cpus = dl_bw_cpus(dest_cpu);
5523                overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
5524                if (overflow)
5525                        ret = -EBUSY;
5526                else {
5527                        /*
5528                         * We reserve space for this task in the destination
5529                         * root_domain, as we can't fail after this point.
5530                         * We will free resources in the source root_domain
5531                         * later on (see set_cpus_allowed_dl()).
5532                         */
5533                        __dl_add(dl_b, p->dl.dl_bw);
5534                }
5535                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
5536                rcu_read_unlock_sched();
5537
5538        }
5539#endif
5540out:
5541        return ret;
5542}
5543
5544#ifdef CONFIG_SMP
5545
5546bool sched_smp_initialized __read_mostly;
5547
5548#ifdef CONFIG_NUMA_BALANCING
5549/* Migrate current task p to target_cpu */
5550int migrate_task_to(struct task_struct *p, int target_cpu)
5551{
5552        struct migration_arg arg = { p, target_cpu };
5553        int curr_cpu = task_cpu(p);
5554
5555        if (curr_cpu == target_cpu)
5556                return 0;
5557
5558        if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed))
5559                return -EINVAL;
5560
5561        /* TODO: This is not properly updating schedstats */
5562
5563        trace_sched_move_numa(p, curr_cpu, target_cpu);
5564        return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5565}
5566
5567/*
5568 * Requeue a task on a given node and accurately track the number of NUMA
5569 * tasks on the runqueues
5570 */
5571void sched_setnuma(struct task_struct *p, int nid)
5572{
5573        bool queued, running;
5574        struct rq_flags rf;
5575        struct rq *rq;
5576
5577        rq = task_rq_lock(p, &rf);
5578        queued = task_on_rq_queued(p);
5579        running = task_current(rq, p);
5580
5581        if (queued)
5582                dequeue_task(rq, p, DEQUEUE_SAVE);
5583        if (running)
5584                put_prev_task(rq, p);
5585
5586        p->numa_preferred_nid = nid;
5587
5588        if (queued)
5589                enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
5590        if (running)
5591                set_curr_task(rq, p);
5592        task_rq_unlock(rq, p, &rf);
5593}
5594#endif /* CONFIG_NUMA_BALANCING */
5595
5596#ifdef CONFIG_HOTPLUG_CPU
5597/*
5598 * Ensure that the idle task is using init_mm right before its CPU goes
5599 * offline.
5600 */
5601void idle_task_exit(void)
5602{
5603        struct mm_struct *mm = current->active_mm;
5604
5605        BUG_ON(cpu_online(smp_processor_id()));
5606
5607        if (mm != &init_mm) {
5608                switch_mm(mm, &init_mm, current);
5609                finish_arch_post_lock_switch();
5610        }
5611        mmdrop(mm);
5612}
5613
5614/*
5615 * Since this CPU is going 'away' for a while, fold any nr_active delta
5616 * we might have. Assumes we're called after migrate_tasks() so that the
5617 * nr_active count is stable. We need to take the teardown thread which
5618 * is calling this into account, so we hand in adjust = 1 to the load
5619 * calculation.
5620 *
5621 * Also see the comment "Global load-average calculations".
5622 */
5623static void calc_load_migrate(struct rq *rq)
5624{
5625        long delta = calc_load_fold_active(rq, 1);
5626        if (delta)
5627                atomic_long_add(delta, &calc_load_tasks);
5628}
5629
5630static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5631{
5632}
5633
5634static const struct sched_class fake_sched_class = {
5635        .put_prev_task = put_prev_task_fake,
5636};
5637
5638static struct task_struct fake_task = {
5639        /*
5640         * Avoid pull_{rt,dl}_task()
5641         */
5642        .prio = MAX_PRIO + 1,
5643        .sched_class = &fake_sched_class,
5644};
5645
5646/*
5647 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5648 * try_to_wake_up()->select_task_rq().
5649 *
5650 * Called with rq->lock held even though we'er in stop_machine() and
5651 * there's no concurrency possible, we hold the required locks anyway
5652 * because of lock validation efforts.
5653 */
5654static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
5655{
5656        struct rq *rq = dead_rq;
5657        struct task_struct *next, *stop = rq->stop;
5658        struct rq_flags orf = *rf;
5659        int dest_cpu;
5660
5661        /*
5662         * Fudge the rq selection such that the below task selection loop
5663         * doesn't get stuck on the currently eligible stop task.
5664         *
5665         * We're currently inside stop_machine() and the rq is either stuck
5666         * in the stop_machine_cpu_stop() loop, or we're executing this code,
5667         * either way we should never end up calling schedule() until we're
5668         * done here.
5669         */
5670        rq->stop = NULL;
5671
5672        /*
5673         * put_prev_task() and pick_next_task() sched
5674         * class method both need to have an up-to-date
5675         * value of rq->clock[_task]
5676         */
5677        update_rq_clock(rq);
5678
5679        for (;;) {
5680                /*
5681                 * There's this thread running, bail when that's the only
5682                 * remaining thread:
5683                 */
5684                if (rq->nr_running == 1)
5685                        break;
5686
5687                /*
5688                 * pick_next_task() assumes pinned rq->lock:
5689                 */
5690                next = pick_next_task(rq, &fake_task, rf);
5691                BUG_ON(!next);
5692                next->sched_class->put_prev_task(rq, next);
5693
5694                /*
5695                 * Rules for changing task_struct::cpus_allowed are holding
5696                 * both pi_lock and rq->lock, such that holding either
5697                 * stabilizes the mask.
5698                 *
5699                 * Drop rq->lock is not quite as disastrous as it usually is
5700                 * because !cpu_active at this point, which means load-balance
5701                 * will not interfere. Also, stop-machine.
5702                 */
5703                rq_unlock(rq, rf);
5704                raw_spin_lock(&next->pi_lock);
5705                rq_relock(rq, rf);
5706
5707                /*
5708                 * Since we're inside stop-machine, _nothing_ should have
5709                 * changed the task, WARN if weird stuff happened, because in
5710                 * that case the above rq->lock drop is a fail too.
5711                 */
5712                if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5713                        raw_spin_unlock(&next->pi_lock);
5714                        continue;
5715                }
5716
5717                /* Find suitable destination for @next, with force if needed. */
5718                dest_cpu = select_fallback_rq(dead_rq->cpu, next);
5719                rq = __migrate_task(rq, rf, next, dest_cpu);
5720                if (rq != dead_rq) {
5721                        rq_unlock(rq, rf);
5722                        rq = dead_rq;
5723                        *rf = orf;
5724                        rq_relock(rq, rf);
5725                }
5726                raw_spin_unlock(&next->pi_lock);
5727        }
5728
5729        rq->stop = stop;
5730}
5731#endif /* CONFIG_HOTPLUG_CPU */
5732
5733void set_rq_online(struct rq *rq)
5734{
5735        if (!rq->online) {
5736                const struct sched_class *class;
5737
5738                cpumask_set_cpu(rq->cpu, rq->rd->online);
5739                rq->online = 1;
5740
5741                for_each_class(class) {
5742                        if (class->rq_online)
5743                                class->rq_online(rq);
5744                }
5745        }
5746}
5747
5748void set_rq_offline(struct rq *rq)
5749{
5750        if (rq->online) {
5751                const struct sched_class *class;
5752
5753                for_each_class(class) {
5754                        if (class->rq_offline)
5755                                class->rq_offline(rq);
5756                }
5757
5758                cpumask_clear_cpu(rq->cpu, rq->rd->online);
5759                rq->online = 0;
5760        }
5761}
5762
5763static void set_cpu_rq_start_time(unsigned int cpu)
5764{
5765        struct rq *rq = cpu_rq(cpu);
5766
5767        rq->age_stamp = sched_clock_cpu(cpu);
5768}
5769
5770/*
5771 * used to mark begin/end of suspend/resume:
5772 */
5773static int num_cpus_frozen;
5774
5775/*
5776 * Update cpusets according to cpu_active mask.  If cpusets are
5777 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
5778 * around partition_sched_domains().
5779 *
5780 * If we come here as part of a suspend/resume, don't touch cpusets because we
5781 * want to restore it back to its original state upon resume anyway.
5782 */
5783static void cpuset_cpu_active(void)
5784{
5785        if (cpuhp_tasks_frozen) {
5786                /*
5787                 * num_cpus_frozen tracks how many CPUs are involved in suspend
5788                 * resume sequence. As long as this is not the last online
5789                 * operation in the resume sequence, just build a single sched
5790                 * domain, ignoring cpusets.
5791                 */
5792                num_cpus_frozen--;
5793                if (likely(num_cpus_frozen)) {
5794                        partition_sched_domains(1, NULL, NULL);
5795                        return;
5796                }
5797                /*
5798                 * This is the last CPU online operation. So fall through and
5799                 * restore the original sched domains by considering the
5800                 * cpuset configurations.
5801                 */
5802        }
5803        cpuset_update_active_cpus();
5804}
5805
5806static int cpuset_cpu_inactive(unsigned int cpu)
5807{
5808        unsigned long flags;
5809        struct dl_bw *dl_b;
5810        bool overflow;
5811        int cpus;
5812
5813        if (!cpuhp_tasks_frozen) {
5814                rcu_read_lock_sched();
5815                dl_b = dl_bw_of(cpu);
5816
5817                raw_spin_lock_irqsave(&dl_b->lock, flags);
5818                cpus = dl_bw_cpus(cpu);
5819                overflow = __dl_overflow(dl_b, cpus, 0, 0);
5820                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
5821
5822                rcu_read_unlock_sched();
5823
5824                if (overflow)
5825                        return -EBUSY;
5826                cpuset_update_active_cpus();
5827        } else {
5828                num_cpus_frozen++;
5829                partition_sched_domains(1, NULL, NULL);
5830        }
5831        return 0;
5832}
5833
5834int sched_cpu_activate(unsigned int cpu)
5835{
5836        struct rq *rq = cpu_rq(cpu);
5837        struct rq_flags rf;
5838
5839        set_cpu_active(cpu, true);
5840
5841        if (sched_smp_initialized) {
5842                sched_domains_numa_masks_set(cpu);
5843                cpuset_cpu_active();
5844        }
5845
5846        /*
5847         * Put the rq online, if not already. This happens:
5848         *
5849         * 1) In the early boot process, because we build the real domains
5850         *    after all CPUs have been brought up.
5851         *
5852         * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
5853         *    domains.
5854         */
5855        rq_lock_irqsave(rq, &rf);
5856        if (rq->rd) {
5857                BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5858                set_rq_online(rq);
5859        }
5860        rq_unlock_irqrestore(rq, &rf);
5861
5862        update_max_interval();
5863
5864        return 0;
5865}
5866
5867int sched_cpu_deactivate(unsigned int cpu)
5868{
5869        int ret;
5870
5871        set_cpu_active(cpu, false);
5872        /*
5873         * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
5874         * users of this state to go away such that all new such users will
5875         * observe it.
5876         *
5877         * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
5878         * not imply sync_sched(), so wait for both.
5879         *
5880         * Do sync before park smpboot threads to take care the rcu boost case.
5881         */
5882        if (IS_ENABLED(CONFIG_PREEMPT))
5883                synchronize_rcu_mult(call_rcu, call_rcu_sched);
5884        else
5885                synchronize_rcu();
5886
5887        if (!sched_smp_initialized)
5888                return 0;
5889
5890        ret = cpuset_cpu_inactive(cpu);
5891        if (ret) {
5892                set_cpu_active(cpu, true);
5893                return ret;
5894        }
5895        sched_domains_numa_masks_clear(cpu);
5896        return 0;
5897}
5898
5899static void sched_rq_cpu_starting(unsigned int cpu)
5900{
5901        struct rq *rq = cpu_rq(cpu);
5902
5903        rq->calc_load_update = calc_load_update;
5904        update_max_interval();
5905}
5906
5907int sched_cpu_starting(unsigned int cpu)
5908{
5909        set_cpu_rq_start_time(cpu);
5910        sched_rq_cpu_starting(cpu);
5911        return 0;
5912}
5913
5914#ifdef CONFIG_HOTPLUG_CPU
5915int sched_cpu_dying(unsigned int cpu)
5916{
5917        struct rq *rq = cpu_rq(cpu);
5918        struct rq_flags rf;
5919
5920        /* Handle pending wakeups and then migrate everything off */
5921        sched_ttwu_pending();
5922
5923        rq_lock_irqsave(rq, &rf);
5924        if (rq->rd) {
5925                BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5926                set_rq_offline(rq);
5927        }
5928        migrate_tasks(rq, &rf);
5929        BUG_ON(rq->nr_running != 1);
5930        rq_unlock_irqrestore(rq, &rf);
5931
5932        calc_load_migrate(rq);
5933        update_max_interval();
5934        nohz_balance_exit_idle(cpu);
5935        hrtick_clear(rq);
5936        return 0;
5937}
5938#endif
5939
5940#ifdef CONFIG_SCHED_SMT
5941DEFINE_STATIC_KEY_FALSE(sched_smt_present);
5942
5943static void sched_init_smt(void)
5944{
5945        /*
5946         * We've enumerated all CPUs and will assume that if any CPU
5947         * has SMT siblings, CPU0 will too.
5948         */
5949        if (cpumask_weight(cpu_smt_mask(0)) > 1)
5950                static_branch_enable(&sched_smt_present);
5951}
5952#else
5953static inline void sched_init_smt(void) { }
5954#endif
5955
5956void __init sched_init_smp(void)
5957{
5958        cpumask_var_t non_isolated_cpus;
5959
5960        alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
5961        alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5962
5963        sched_init_numa();
5964
5965        /*
5966         * There's no userspace yet to cause hotplug operations; hence all the
5967         * CPU masks are stable and all blatant races in the below code cannot
5968         * happen.
5969         */
5970        mutex_lock(&sched_domains_mutex);
5971        init_sched_domains(cpu_active_mask);
5972        cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
5973        if (cpumask_empty(non_isolated_cpus))
5974                cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
5975        mutex_unlock(&sched_domains_mutex);
5976
5977        /* Move init over to a non-isolated CPU */
5978        if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5979                BUG();
5980        sched_init_granularity();
5981        free_cpumask_var(non_isolated_cpus);
5982
5983        init_sched_rt_class();
5984        init_sched_dl_class();
5985
5986        sched_init_smt();
5987        sched_clock_init_late();
5988
5989        sched_smp_initialized = true;
5990}
5991
5992static int __init migration_init(void)
5993{
5994        sched_rq_cpu_starting(smp_processor_id());
5995        return 0;
5996}
5997early_initcall(migration_init);
5998
5999#else
6000void __init sched_init_smp(void)
6001{
6002        sched_init_granularity();
6003        sched_clock_init_late();
6004}
6005#endif /* CONFIG_SMP */
6006
6007int in_sched_functions(unsigned long addr)
6008{
6009        return in_lock_functions(addr) ||
6010                (addr >= (unsigned long)__sched_text_start
6011                && addr < (unsigned long)__sched_text_end);
6012}
6013
6014#ifdef CONFIG_CGROUP_SCHED
6015/*
6016 * Default task group.
6017 * Every task in system belongs to this group at bootup.
6018 */
6019struct task_group root_task_group;
6020LIST_HEAD(task_groups);
6021
6022/* Cacheline aligned slab cache for task_group */
6023static struct kmem_cache *task_group_cache __read_mostly;
6024#endif
6025
6026DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6027DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
6028
6029#define WAIT_TABLE_BITS 8
6030#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
6031static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
6032
6033wait_queue_head_t *bit_waitqueue(void *word, int bit)
6034{
6035        const int shift = BITS_PER_LONG == 32 ? 5 : 6;
6036        unsigned long val = (unsigned long)word << shift | bit;
6037
6038        return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
6039}
6040EXPORT_SYMBOL(bit_waitqueue);
6041
6042void __init sched_init(void)
6043{
6044        int i, j;
6045        unsigned long alloc_size = 0, ptr;
6046
6047        sched_clock_init();
6048
6049        for (i = 0; i < WAIT_TABLE_SIZE; i++)
6050                init_waitqueue_head(bit_wait_table + i);
6051
6052#ifdef CONFIG_FAIR_GROUP_SCHED
6053        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6054#endif
6055#ifdef CONFIG_RT_GROUP_SCHED
6056        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6057#endif
6058        if (alloc_size) {
6059                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
6060
6061#ifdef CONFIG_FAIR_GROUP_SCHED
6062                root_task_group.se = (struct sched_entity **)ptr;
6063                ptr += nr_cpu_ids * sizeof(void **);
6064
6065                root_task_group.cfs_rq = (struct cfs_rq **)ptr;
6066                ptr += nr_cpu_ids * sizeof(void **);
6067
6068#endif /* CONFIG_FAIR_GROUP_SCHED */
6069#ifdef CONFIG_RT_GROUP_SCHED
6070                root_task_group.rt_se = (struct sched_rt_entity **)ptr;
6071                ptr += nr_cpu_ids * sizeof(void **);
6072
6073                root_task_group.rt_rq = (struct rt_rq **)ptr;
6074                ptr += nr_cpu_ids * sizeof(void **);
6075
6076#endif /* CONFIG_RT_GROUP_SCHED */
6077        }
6078#ifdef CONFIG_CPUMASK_OFFSTACK
6079        for_each_possible_cpu(i) {
6080                per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
6081                        cpumask_size(), GFP_KERNEL, cpu_to_node(i));
6082                per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
6083                        cpumask_size(), GFP_KERNEL, cpu_to_node(i));
6084        }
6085#endif /* CONFIG_CPUMASK_OFFSTACK */
6086
6087        init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
6088        init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
6089
6090#ifdef CONFIG_SMP
6091        init_defrootdomain();
6092#endif
6093
6094#ifdef CONFIG_RT_GROUP_SCHED
6095        init_rt_bandwidth(&root_task_group.rt_bandwidth,
6096                        global_rt_period(), global_rt_runtime());
6097#endif /* CONFIG_RT_GROUP_SCHED */
6098
6099#ifdef CONFIG_CGROUP_SCHED
6100        task_group_cache = KMEM_CACHE(task_group, 0);
6101
6102        list_add(&root_task_group.list, &task_groups);
6103        INIT_LIST_HEAD(&root_task_group.children);
6104        INIT_LIST_HEAD(&root_task_group.siblings);
6105        autogroup_init(&init_task);
6106#endif /* CONFIG_CGROUP_SCHED */
6107
6108        for_each_possible_cpu(i) {
6109                struct rq *rq;
6110
6111                rq = cpu_rq(i);
6112                raw_spin_lock_init(&rq->lock);
6113                rq->nr_running = 0;
6114                rq->calc_load_active = 0;
6115                rq->calc_load_update = jiffies + LOAD_FREQ;
6116                init_cfs_rq(&rq->cfs);
6117                init_rt_rq(&rq->rt);
6118                init_dl_rq(&rq->dl);
6119#ifdef CONFIG_FAIR_GROUP_SCHED
6120                root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6121                INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
6122                rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
6123                /*
6124                 * How much CPU bandwidth does root_task_group get?
6125                 *
6126                 * In case of task-groups formed thr' the cgroup filesystem, it
6127                 * gets 100% of the CPU resources in the system. This overall
6128                 * system CPU resource is divided among the tasks of
6129                 * root_task_group and its child task-groups in a fair manner,
6130                 * based on each entity's (task or task-group's) weight
6131                 * (se->load.weight).
6132                 *
6133                 * In other words, if root_task_group has 10 tasks of weight
6134                 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6135                 * then A0's share of the CPU resource is:
6136                 *
6137                 *      A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
6138                 *
6139                 * We achieve this by letting root_task_group's tasks sit
6140                 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
6141                 */
6142                init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
6143                init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
6144#endif /* CONFIG_FAIR_GROUP_SCHED */
6145
6146                rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
6147#ifdef CONFIG_RT_GROUP_SCHED
6148                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
6149#endif
6150
6151                for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6152                        rq->cpu_load[j] = 0;
6153
6154#ifdef CONFIG_SMP
6155                rq->sd = NULL;
6156                rq->rd = NULL;
6157                rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
6158                rq->balance_callback = NULL;
6159                rq->active_balance = 0;
6160                rq->next_balance = jiffies;
6161                rq->push_cpu = 0;
6162                rq->cpu = i;
6163                rq->online = 0;
6164                rq->idle_stamp = 0;
6165                rq->avg_idle = 2*sysctl_sched_migration_cost;
6166                rq->max_idle_balance_cost = sysctl_sched_migration_cost;
6167
6168                INIT_LIST_HEAD(&rq->cfs_tasks);
6169
6170                rq_attach_root(rq, &def_root_domain);
6171#ifdef CONFIG_NO_HZ_COMMON
6172                rq->last_load_update_tick = jiffies;
6173                rq->nohz_flags = 0;
6174#endif
6175#ifdef CONFIG_NO_HZ_FULL
6176                rq->last_sched_tick = 0;
6177#endif
6178#endif /* CONFIG_SMP */
6179                init_rq_hrtick(rq);
6180                atomic_set(&rq->nr_iowait, 0);
6181        }
6182
6183        set_load_weight(&init_task);
6184
6185        /*
6186         * The boot idle thread does lazy MMU switching as well:
6187         */
6188        mmgrab(&init_mm);
6189        enter_lazy_tlb(&init_mm, current);
6190
6191        /*
6192         * Make us the idle thread. Technically, schedule() should not be
6193         * called from this thread, however somewhere below it might be,
6194         * but because we are the idle thread, we just pick up running again
6195         * when this runqueue becomes "idle".
6196         */
6197        init_idle(current, smp_processor_id());
6198
6199        calc_load_update = jiffies + LOAD_FREQ;
6200
6201#ifdef CONFIG_SMP
6202        zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
6203        /* May be allocated at isolcpus cmdline parse time */
6204        if (cpu_isolated_map == NULL)
6205                zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
6206        idle_thread_set_boot_cpu();
6207        set_cpu_rq_start_time(smp_processor_id());
6208#endif
6209        init_sched_fair_class();
6210
6211        init_schedstats();
6212
6213        scheduler_running = 1;
6214}
6215
6216#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
6217static inline int preempt_count_equals(int preempt_offset)
6218{
6219        int nested = preempt_count() + rcu_preempt_depth();
6220
6221        return (nested == preempt_offset);
6222}
6223
6224void __might_sleep(const char *file, int line, int preempt_offset)
6225{
6226        /*
6227         * Blocking primitives will set (and therefore destroy) current->state,
6228         * since we will exit with TASK_RUNNING make sure we enter with it,
6229         * otherwise we will destroy state.
6230         */
6231        WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
6232                        "do not call blocking ops when !TASK_RUNNING; "
6233                        "state=%lx set at [<%p>] %pS\n",
6234                        current->state,
6235                        (void *)current->task_state_change,
6236                        (void *)current->task_state_change);
6237
6238        ___might_sleep(file, line, preempt_offset);
6239}
6240EXPORT_SYMBOL(__might_sleep);
6241
6242void ___might_sleep(const char *file, int line, int preempt_offset)
6243{
6244        /* Ratelimiting timestamp: */
6245        static unsigned long prev_jiffy;
6246
6247        unsigned long preempt_disable_ip;
6248
6249        /* WARN_ON_ONCE() by default, no rate limit required: */
6250        rcu_sleep_check();
6251
6252        if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
6253             !is_idle_task(current)) ||
6254            system_state != SYSTEM_RUNNING || oops_in_progress)
6255                return;
6256        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6257                return;
6258        prev_jiffy = jiffies;
6259
6260        /* Save this before calling printk(), since that will clobber it: */
6261        preempt_disable_ip = get_preempt_disable_ip(current);
6262
6263        printk(KERN_ERR
6264                "BUG: sleeping function called from invalid context at %s:%d\n",
6265                        file, line);
6266        printk(KERN_ERR
6267                "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6268                        in_atomic(), irqs_disabled(),
6269                        current->pid, current->comm);
6270
6271        if (task_stack_end_corrupted(current))
6272                printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
6273
6274        debug_show_held_locks(current);
6275        if (irqs_disabled())
6276                print_irqtrace_events(current);
6277        if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
6278            && !preempt_count_equals(preempt_offset)) {
6279                pr_err("Preemption disabled at:");
6280                print_ip_sym(preempt_disable_ip);
6281                pr_cont("\n");
6282        }
6283        dump_stack();
6284        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
6285}
6286EXPORT_SYMBOL(___might_sleep);
6287#endif
6288
6289#ifdef CONFIG_MAGIC_SYSRQ
6290void normalize_rt_tasks(void)
6291{
6292        struct task_struct *g, *p;
6293        struct sched_attr attr = {
6294                .sched_policy = SCHED_NORMAL,
6295        };
6296
6297        read_lock(&tasklist_lock);
6298        for_each_process_thread(g, p) {
6299                /*
6300                 * Only normalize user tasks:
6301                 */
6302                if (p->flags & PF_KTHREAD)
6303                        continue;
6304
6305                p->se.exec_start = 0;
6306                schedstat_set(p->se.statistics.wait_start,  0);
6307                schedstat_set(p->se.statistics.sleep_start, 0);
6308                schedstat_set(p->se.statistics.block_start, 0);
6309
6310                if (!dl_task(p) && !rt_task(p)) {
6311                        /*
6312                         * Renice negative nice level userspace
6313                         * tasks back to 0:
6314                         */
6315                        if (task_nice(p) < 0)
6316                                set_user_nice(p, 0);
6317                        continue;
6318                }
6319
6320                __sched_setscheduler(p, &attr, false, false);
6321        }
6322        read_unlock(&tasklist_lock);
6323}
6324
6325#endif /* CONFIG_MAGIC_SYSRQ */
6326
6327#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
6328/*
6329 * These functions are only useful for the IA64 MCA handling, or kdb.
6330 *
6331 * They can only be called when the whole system has been
6332 * stopped - every CPU needs to be quiescent, and no scheduling
6333 * activity can take place. Using them for anything else would
6334 * be a serious bug, and as a result, they aren't even visible
6335 * under any other configuration.
6336 */
6337
6338/**
6339 * curr_task - return the current task for a given CPU.
6340 * @cpu: the processor in question.
6341 *
6342 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6343 *
6344 * Return: The current task for @cpu.
6345 */
6346struct task_struct *curr_task(int cpu)
6347{
6348        return cpu_curr(cpu);
6349}
6350
6351#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
6352
6353#ifdef CONFIG_IA64
6354/**
6355 * set_curr_task - set the current task for a given CPU.
6356 * @cpu: the processor in question.
6357 * @p: the task pointer to set.
6358 *
6359 * Description: This function must only be used when non-maskable interrupts
6360 * are serviced on a separate stack. It allows the architecture to switch the
6361 * notion of the current task on a CPU in a non-blocking manner. This function
6362 * must be called with all CPU's synchronized, and interrupts disabled, the
6363 * and caller must save the original value of the current task (see
6364 * curr_task() above) and restore that value before reenabling interrupts and
6365 * re-starting the system.
6366 *
6367 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6368 */
6369void ia64_set_curr_task(int cpu, struct task_struct *p)
6370{
6371        cpu_curr(cpu) = p;
6372}
6373
6374#endif
6375
6376#ifdef CONFIG_CGROUP_SCHED
6377/* task_group_lock serializes the addition/removal of task groups */
6378static DEFINE_SPINLOCK(task_group_lock);
6379
6380static void sched_free_group(struct task_group *tg)
6381{
6382        free_fair_sched_group(tg);
6383        free_rt_sched_group(tg);
6384        autogroup_free(tg);
6385        kmem_cache_free(task_group_cache, tg);
6386}
6387
6388/* allocate runqueue etc for a new task group */
6389struct task_group *sched_create_group(struct task_group *parent)
6390{
6391        struct task_group *tg;
6392
6393        tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
6394        if (!tg)
6395                return ERR_PTR(-ENOMEM);
6396
6397        if (!alloc_fair_sched_group(tg, parent))
6398                goto err;
6399
6400        if (!alloc_rt_sched_group(tg, parent))
6401                goto err;
6402
6403        return tg;
6404
6405err:
6406        sched_free_group(tg);
6407        return ERR_PTR(-ENOMEM);
6408}
6409
6410void sched_online_group(struct task_group *tg, struct task_group *parent)
6411{
6412        unsigned long flags;
6413
6414        spin_lock_irqsave(&task_group_lock, flags);
6415        list_add_rcu(&tg->list, &task_groups);
6416
6417        /* Root should already exist: */
6418        WARN_ON(!parent);
6419
6420        tg->parent = parent;
6421        INIT_LIST_HEAD(&tg->children);
6422        list_add_rcu(&tg->siblings, &parent->children);
6423        spin_unlock_irqrestore(&task_group_lock, flags);
6424
6425        online_fair_sched_group(tg);
6426}
6427
6428/* rcu callback to free various structures associated with a task group */
6429static void sched_free_group_rcu(struct rcu_head *rhp)
6430{
6431        /* Now it should be safe to free those cfs_rqs: */
6432        sched_free_group(container_of(rhp, struct task_group, rcu));
6433}
6434
6435void sched_destroy_group(struct task_group *tg)
6436{
6437        /* Wait for possible concurrent references to cfs_rqs complete: */
6438        call_rcu(&tg->rcu, sched_free_group_rcu);
6439}
6440
6441void sched_offline_group(struct task_group *tg)
6442{
6443        unsigned long flags;
6444
6445        /* End participation in shares distribution: */
6446        unregister_fair_sched_group(tg);
6447
6448        spin_lock_irqsave(&task_group_lock, flags);
6449        list_del_rcu(&tg->list);
6450        list_del_rcu(&tg->siblings);
6451        spin_unlock_irqrestore(&task_group_lock, flags);
6452}
6453
6454static void sched_change_group(struct task_struct *tsk, int type)
6455{
6456        struct task_group *tg;
6457
6458        /*
6459         * All callers are synchronized by task_rq_lock(); we do not use RCU
6460         * which is pointless here. Thus, we pass "true" to task_css_check()
6461         * to prevent lockdep warnings.
6462         */
6463        tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
6464                          struct task_group, css);
6465        tg = autogroup_task_group(tsk, tg);
6466        tsk->sched_task_group = tg;
6467
6468#ifdef CONFIG_FAIR_GROUP_SCHED
6469        if (tsk->sched_class->task_change_group)
6470                tsk->sched_class->task_change_group(tsk, type);
6471        else
6472#endif
6473                set_task_rq(tsk, task_cpu(tsk));
6474}
6475
6476/*
6477 * Change task's runqueue when it moves between groups.
6478 *
6479 * The caller of this function should have put the task in its new group by
6480 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
6481 * its new group.
6482 */
6483void sched_move_task(struct task_struct *tsk)
6484{
6485        int queued, running, queue_flags =
6486                DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
6487        struct rq_flags rf;
6488        struct rq *rq;
6489
6490        rq = task_rq_lock(tsk, &rf);
6491        update_rq_clock(rq);
6492
6493        running = task_current(rq, tsk);
6494        queued = task_on_rq_queued(tsk);
6495
6496        if (queued)
6497                dequeue_task(rq, tsk, queue_flags);
6498        if (running)
6499                put_prev_task(rq, tsk);
6500
6501        sched_change_group(tsk, TASK_MOVE_GROUP);
6502
6503        if (queued)
6504                enqueue_task(rq, tsk, queue_flags);
6505        if (running)
6506                set_curr_task(rq, tsk);
6507
6508        task_rq_unlock(rq, tsk, &rf);
6509}
6510#endif /* CONFIG_CGROUP_SCHED */
6511
6512#ifdef CONFIG_RT_GROUP_SCHED
6513/*
6514 * Ensure that the real time constraints are schedulable.
6515 */
6516static DEFINE_MUTEX(rt_constraints_mutex);
6517
6518/* Must be called with tasklist_lock held */
6519static inline int tg_has_rt_tasks(struct task_group *tg)
6520{
6521        struct task_struct *g, *p;
6522
6523        /*
6524         * Autogroups do not have RT tasks; see autogroup_create().
6525         */
6526        if (task_group_is_autogroup(tg))
6527                return 0;
6528
6529        for_each_process_thread(g, p) {
6530                if (rt_task(p) && task_group(p) == tg)
6531                        return 1;
6532        }
6533
6534        return 0;
6535}
6536
6537struct rt_schedulable_data {
6538        struct task_group *tg;
6539        u64 rt_period;
6540        u64 rt_runtime;
6541};
6542
6543static int tg_rt_schedulable(struct task_group *tg, void *data)
6544{
6545        struct rt_schedulable_data *d = data;
6546        struct task_group *child;
6547        unsigned long total, sum = 0;
6548        u64 period, runtime;
6549
6550        period = ktime_to_ns(tg->rt_bandwidth.rt_period);
6551        runtime = tg->rt_bandwidth.rt_runtime;
6552
6553        if (tg == d->tg) {
6554                period = d->rt_period;
6555                runtime = d->rt_runtime;
6556        }
6557
6558        /*
6559         * Cannot have more runtime than the period.
6560         */
6561        if (runtime > period && runtime != RUNTIME_INF)
6562                return -EINVAL;
6563
6564        /*
6565         * Ensure we don't starve existing RT tasks.
6566         */
6567        if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
6568                return -EBUSY;
6569
6570        total = to_ratio(period, runtime);
6571
6572        /*
6573         * Nobody can have more than the global setting allows.
6574         */
6575        if (total > to_ratio(global_rt_period(), global_rt_runtime()))
6576                return -EINVAL;
6577
6578        /*
6579         * The sum of our children's runtime should not exceed our own.
6580         */
6581        list_for_each_entry_rcu(child, &tg->children, siblings) {
6582                period = ktime_to_ns(child->rt_bandwidth.rt_period);
6583                runtime = child->rt_bandwidth.rt_runtime;
6584
6585                if (child == d->tg) {
6586                        period = d->rt_period;
6587                        runtime = d->rt_runtime;
6588                }
6589
6590                sum += to_ratio(period, runtime);
6591        }
6592
6593        if (sum > total)
6594                return -EINVAL;
6595
6596        return 0;
6597}
6598
6599static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
6600{
6601        int ret;
6602
6603        struct rt_schedulable_data data = {
6604                .tg = tg,
6605                .rt_period = period,
6606                .rt_runtime = runtime,
6607        };
6608
6609        rcu_read_lock();
6610        ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
6611        rcu_read_unlock();
6612
6613        return ret;
6614}
6615
6616static int tg_set_rt_bandwidth(struct task_group *tg,
6617                u64 rt_period, u64 rt_runtime)
6618{
6619        int i, err = 0;
6620
6621        /*
6622         * Disallowing the root group RT runtime is BAD, it would disallow the
6623         * kernel creating (and or operating) RT threads.
6624         */
6625        if (tg == &root_task_group && rt_runtime == 0)
6626                return -EINVAL;
6627
6628        /* No period doesn't make any sense. */
6629        if (rt_period == 0)
6630                return -EINVAL;
6631
6632        mutex_lock(&rt_constraints_mutex);
6633        read_lock(&tasklist_lock);
6634        err = __rt_schedulable(tg, rt_period, rt_runtime);
6635        if (err)
6636                goto unlock;
6637
6638        raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
6639        tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
6640        tg->rt_bandwidth.rt_runtime = rt_runtime;
6641
6642        for_each_possible_cpu(i) {
6643                struct rt_rq *rt_rq = tg->rt_rq[i];
6644
6645                raw_spin_lock(&rt_rq->rt_runtime_lock);
6646                rt_rq->rt_runtime = rt_runtime;
6647                raw_spin_unlock(&rt_rq->rt_runtime_lock);
6648        }
6649        raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
6650unlock:
6651        read_unlock(&tasklist_lock);
6652        mutex_unlock(&rt_constraints_mutex);
6653
6654        return err;
6655}
6656
6657static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
6658{
6659        u64 rt_runtime, rt_period;
6660
6661        rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
6662        rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
6663        if (rt_runtime_us < 0)
6664                rt_runtime = RUNTIME_INF;
6665
6666        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
6667}
6668
6669static long sched_group_rt_runtime(struct task_group *tg)
6670{
6671        u64 rt_runtime_us;
6672
6673        if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
6674                return -1;
6675
6676        rt_runtime_us = tg->rt_bandwidth.rt_runtime;
6677        do_div(rt_runtime_us, NSEC_PER_USEC);
6678        return rt_runtime_us;
6679}
6680
6681static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
6682{
6683        u64 rt_runtime, rt_period;
6684
6685        rt_period = rt_period_us * NSEC_PER_USEC;
6686        rt_runtime = tg->rt_bandwidth.rt_runtime;
6687
6688        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
6689}
6690
6691static long sched_group_rt_period(struct task_group *tg)
6692{
6693        u64 rt_period_us;
6694
6695        rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
6696        do_div(rt_period_us, NSEC_PER_USEC);
6697        return rt_period_us;
6698}
6699#endif /* CONFIG_RT_GROUP_SCHED */
6700
6701#ifdef CONFIG_RT_GROUP_SCHED
6702static int sched_rt_global_constraints(void)
6703{
6704        int ret = 0;
6705
6706        mutex_lock(&rt_constraints_mutex);
6707        read_lock(&tasklist_lock);
6708        ret = __rt_schedulable(NULL, 0, 0);
6709        read_unlock(&tasklist_lock);
6710        mutex_unlock(&rt_constraints_mutex);
6711
6712        return ret;
6713}
6714
6715static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
6716{
6717        /* Don't accept realtime tasks when there is no way for them to run */
6718        if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
6719                return 0;
6720
6721        return 1;
6722}
6723
6724#else /* !CONFIG_RT_GROUP_SCHED */
6725static int sched_rt_global_constraints(void)
6726{
6727        unsigned long flags;
6728        int i;
6729
6730        raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
6731        for_each_possible_cpu(i) {
6732                struct rt_rq *rt_rq = &cpu_rq(i)->rt;
6733
6734                raw_spin_lock(&rt_rq->rt_runtime_lock);
6735                rt_rq->rt_runtime = global_rt_runtime();
6736                raw_spin_unlock(&rt_rq->rt_runtime_lock);
6737        }
6738        raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
6739
6740        return 0;
6741}
6742#endif /* CONFIG_RT_GROUP_SCHED */
6743
6744static int sched_dl_global_validate(void)
6745{
6746        u64 runtime = global_rt_runtime();
6747        u64 period = global_rt_period();
6748        u64 new_bw = to_ratio(period, runtime);
6749        struct dl_bw *dl_b;
6750        int cpu, ret = 0;
6751        unsigned long flags;
6752
6753        /*
6754         * Here we want to check the bandwidth not being set to some
6755         * value smaller than the currently allocated bandwidth in
6756         * any of the root_domains.
6757         *
6758         * FIXME: Cycling on all the CPUs is overdoing, but simpler than
6759         * cycling on root_domains... Discussion on different/better
6760         * solutions is welcome!
6761         */
6762        for_each_possible_cpu(cpu) {
6763                rcu_read_lock_sched();
6764                dl_b = dl_bw_of(cpu);
6765
6766                raw_spin_lock_irqsave(&dl_b->lock, flags);
6767                if (new_bw < dl_b->total_bw)
6768                        ret = -EBUSY;
6769                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
6770
6771                rcu_read_unlock_sched();
6772
6773                if (ret)
6774                        break;
6775        }
6776
6777        return ret;
6778}
6779
6780static void sched_dl_do_global(void)
6781{
6782        u64 new_bw = -1;
6783        struct dl_bw *dl_b;
6784        int cpu;
6785        unsigned long flags;
6786
6787        def_dl_bandwidth.dl_period = global_rt_period();
6788        def_dl_bandwidth.dl_runtime = global_rt_runtime();
6789
6790        if (global_rt_runtime() != RUNTIME_INF)
6791                new_bw = to_ratio(global_rt_period(), global_rt_runtime());
6792
6793        /*
6794         * FIXME: As above...
6795         */
6796        for_each_possible_cpu(cpu) {
6797                rcu_read_lock_sched();
6798                dl_b = dl_bw_of(cpu);
6799
6800                raw_spin_lock_irqsave(&dl_b->lock, flags);
6801                dl_b->bw = new_bw;
6802                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
6803
6804                rcu_read_unlock_sched();
6805        }
6806}
6807
6808static int sched_rt_global_validate(void)
6809{
6810        if (sysctl_sched_rt_period <= 0)
6811                return -EINVAL;
6812
6813        if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
6814                (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
6815                return -EINVAL;
6816
6817        return 0;
6818}
6819
6820static void sched_rt_do_global(void)
6821{
6822        def_rt_bandwidth.rt_runtime = global_rt_runtime();
6823        def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
6824}
6825
6826int sched_rt_handler(struct ctl_table *table, int write,
6827                void __user *buffer, size_t *lenp,
6828                loff_t *ppos)
6829{
6830        int old_period, old_runtime;
6831        static DEFINE_MUTEX(mutex);
6832        int ret;
6833
6834        mutex_lock(&mutex);
6835        old_period = sysctl_sched_rt_period;
6836        old_runtime = sysctl_sched_rt_runtime;
6837
6838        ret = proc_dointvec(table, write, buffer, lenp, ppos);
6839
6840        if (!ret && write) {
6841                ret = sched_rt_global_validate();
6842                if (ret)
6843                        goto undo;
6844
6845                ret = sched_dl_global_validate();
6846                if (ret)
6847                        goto undo;
6848
6849                ret = sched_rt_global_constraints();
6850                if (ret)
6851                        goto undo;
6852
6853                sched_rt_do_global();
6854                sched_dl_do_global();
6855        }
6856        if (0) {
6857undo:
6858                sysctl_sched_rt_period = old_period;
6859                sysctl_sched_rt_runtime = old_runtime;
6860        }
6861        mutex_unlock(&mutex);
6862
6863        return ret;
6864}
6865
6866int sched_rr_handler(struct ctl_table *table, int write,
6867                void __user *buffer, size_t *lenp,
6868                loff_t *ppos)
6869{
6870        int ret;
6871        static DEFINE_MUTEX(mutex);
6872
6873        mutex_lock(&mutex);
6874        ret = proc_dointvec(table, write, buffer, lenp, ppos);
6875        /*
6876         * Make sure that internally we keep jiffies.
6877         * Also, writing zero resets the timeslice to default:
6878         */
6879        if (!ret && write) {
6880                sched_rr_timeslice =
6881                        sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
6882                        msecs_to_jiffies(sysctl_sched_rr_timeslice);
6883        }
6884        mutex_unlock(&mutex);
6885        return ret;
6886}
6887
6888#ifdef CONFIG_CGROUP_SCHED
6889
6890static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
6891{
6892        return css ? container_of(css, struct task_group, css) : NULL;
6893}
6894
6895static struct cgroup_subsys_state *
6896cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6897{
6898        struct task_group *parent = css_tg(parent_css);
6899        struct task_group *tg;
6900
6901        if (!parent) {
6902                /* This is early initialization for the top cgroup */
6903                return &root_task_group.css;
6904        }
6905
6906        tg = sched_create_group(parent);
6907        if (IS_ERR(tg))
6908                return ERR_PTR(-ENOMEM);
6909
6910        return &tg->css;
6911}
6912
6913/* Expose task group only after completing cgroup initialization */
6914static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
6915{
6916        struct task_group *tg = css_tg(css);
6917        struct task_group *parent = css_tg(css->parent);
6918
6919        if (parent)
6920                sched_online_group(tg, parent);
6921        return 0;
6922}
6923
6924static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
6925{
6926        struct task_group *tg = css_tg(css);
6927
6928        sched_offline_group(tg);
6929}
6930
6931static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
6932{
6933        struct task_group *tg = css_tg(css);
6934
6935        /*
6936         * Relies on the RCU grace period between css_released() and this.
6937         */
6938        sched_free_group(tg);
6939}
6940
6941/*
6942 * This is called before wake_up_new_task(), therefore we really only
6943 * have to set its group bits, all the other stuff does not apply.
6944 */
6945static void cpu_cgroup_fork(struct task_struct *task)
6946{
6947        struct rq_flags rf;
6948        struct rq *rq;
6949
6950        rq = task_rq_lock(task, &rf);
6951
6952        update_rq_clock(rq);
6953        sched_change_group(task, TASK_SET_GROUP);
6954
6955        task_rq_unlock(rq, task, &rf);
6956}
6957
6958static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
6959{
6960        struct task_struct *task;
6961        struct cgroup_subsys_state *css;
6962        int ret = 0;
6963
6964        cgroup_taskset_for_each(task, css, tset) {
6965#ifdef CONFIG_RT_GROUP_SCHED
6966                if (!sched_rt_can_attach(css_tg(css), task))
6967                        return -EINVAL;
6968#else
6969                /* We don't support RT-tasks being in separate groups */
6970                if (task->sched_class != &fair_sched_class)
6971                        return -EINVAL;
6972#endif
6973                /*
6974                 * Serialize against wake_up_new_task() such that if its
6975                 * running, we're sure to observe its full state.
6976                 */
6977                raw_spin_lock_irq(&task->pi_lock);
6978                /*
6979                 * Avoid calling sched_move_task() before wake_up_new_task()
6980                 * has happened. This would lead to problems with PELT, due to
6981                 * move wanting to detach+attach while we're not attached yet.
6982                 */
6983                if (task->state == TASK_NEW)
6984                        ret = -EINVAL;
6985                raw_spin_unlock_irq(&task->pi_lock);
6986
6987                if (ret)
6988                        break;
6989        }
6990        return ret;
6991}
6992
6993static void cpu_cgroup_attach(struct cgroup_taskset *tset)
6994{
6995        struct task_struct *task;
6996        struct cgroup_subsys_state *css;
6997
6998        cgroup_taskset_for_each(task, css, tset)
6999                sched_move_task(task);
7000}
7001
7002#ifdef CONFIG_FAIR_GROUP_SCHED
7003static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
7004                                struct cftype *cftype, u64 shareval)
7005{
7006        return sched_group_set_shares(css_tg(css), scale_load(shareval));
7007}
7008
7009static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
7010                               struct cftype *cft)
7011{
7012        struct task_group *tg = css_tg(css);
7013
7014        return (u64) scale_load_down(tg->shares);
7015}
7016
7017#ifdef CONFIG_CFS_BANDWIDTH
7018static DEFINE_MUTEX(cfs_constraints_mutex);
7019
7020const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7021const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7022
7023static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7024
7025static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7026{
7027        int i, ret = 0, runtime_enabled, runtime_was_enabled;
7028        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7029
7030        if (tg == &root_task_group)
7031                return -EINVAL;
7032
7033        /*
7034         * Ensure we have at some amount of bandwidth every period.  This is
7035         * to prevent reaching a state of large arrears when throttled via
7036         * entity_tick() resulting in prolonged exit starvation.
7037         */
7038        if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7039                return -EINVAL;
7040
7041        /*
7042         * Likewise, bound things on the otherside by preventing insane quota
7043         * periods.  This also allows us to normalize in computing quota
7044         * feasibility.
7045         */
7046        if (period > max_cfs_quota_period)
7047                return -EINVAL;
7048
7049        /*
7050         * Prevent race between setting of cfs_rq->runtime_enabled and
7051         * unthrottle_offline_cfs_rqs().
7052         */
7053        get_online_cpus();
7054        mutex_lock(&cfs_constraints_mutex);
7055        ret = __cfs_schedulable(tg, period, quota);
7056        if (ret)
7057                goto out_unlock;
7058
7059        runtime_enabled = quota != RUNTIME_INF;
7060        runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7061        /*
7062         * If we need to toggle cfs_bandwidth_used, off->on must occur
7063         * before making related changes, and on->off must occur afterwards
7064         */
7065        if (runtime_enabled && !runtime_was_enabled)
7066                cfs_bandwidth_usage_inc();
7067        raw_spin_lock_irq(&cfs_b->lock);
7068        cfs_b->period = ns_to_ktime(period);
7069        cfs_b->quota = quota;
7070
7071        __refill_cfs_bandwidth_runtime(cfs_b);
7072
7073        /* Restart the period timer (if active) to handle new period expiry: */
7074        if (runtime_enabled)
7075                start_cfs_bandwidth(cfs_b);
7076
7077        raw_spin_unlock_irq(&cfs_b->lock);
7078
7079        for_each_online_cpu(i) {
7080                struct cfs_rq *cfs_rq = tg->cfs_rq[i];
7081                struct rq *rq = cfs_rq->rq;
7082                struct rq_flags rf;
7083
7084                rq_lock_irq(rq, &rf);
7085                cfs_rq->runtime_enabled = runtime_enabled;
7086                cfs_rq->runtime_remaining = 0;
7087
7088                if (cfs_rq->throttled)
7089                        unthrottle_cfs_rq(cfs_rq);
7090                rq_unlock_irq(rq, &rf);
7091        }
7092        if (runtime_was_enabled && !runtime_enabled)
7093                cfs_bandwidth_usage_dec();
7094out_unlock:
7095        mutex_unlock(&cfs_constraints_mutex);
7096        put_online_cpus();
7097
7098        return ret;
7099}
7100
7101int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7102{
7103        u64 quota, period;
7104
7105        period = ktime_to_ns(tg->cfs_bandwidth.period);
7106        if (cfs_quota_us < 0)
7107                quota = RUNTIME_INF;
7108        else
7109                quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7110
7111        return tg_set_cfs_bandwidth(tg, period, quota);
7112}
7113
7114long tg_get_cfs_quota(struct task_group *tg)
7115{
7116        u64 quota_us;
7117
7118        if (tg->cfs_bandwidth.quota == RUNTIME_INF)
7119                return -1;
7120
7121        quota_us = tg->cfs_bandwidth.quota;
7122        do_div(quota_us, NSEC_PER_USEC);
7123
7124        return quota_us;
7125}
7126
7127int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7128{
7129        u64 quota, period;
7130
7131        period = (u64)cfs_period_us * NSEC_PER_USEC;
7132        quota = tg->cfs_bandwidth.quota;
7133
7134        return tg_set_cfs_bandwidth(tg, period, quota);
7135}
7136
7137long tg_get_cfs_period(struct task_group *tg)
7138{
7139        u64 cfs_period_us;
7140
7141        cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
7142        do_div(cfs_period_us, NSEC_PER_USEC);
7143
7144        return cfs_period_us;
7145}
7146
7147static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
7148                                  struct cftype *cft)
7149{
7150        return tg_get_cfs_quota(css_tg(css));
7151}
7152
7153static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
7154                                   struct cftype *cftype, s64 cfs_quota_us)
7155{
7156        return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
7157}
7158
7159static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
7160                                   struct cftype *cft)
7161{
7162        return tg_get_cfs_period(css_tg(css));
7163}
7164
7165static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
7166                                    struct cftype *cftype, u64 cfs_period_us)
7167{
7168        return tg_set_cfs_period(css_tg(css), cfs_period_us);
7169}
7170
7171struct cfs_schedulable_data {
7172        struct task_group *tg;
7173        u64 period, quota;
7174};
7175
7176/*
7177 * normalize group quota/period to be quota/max_period
7178 * note: units are usecs
7179 */
7180static u64 normalize_cfs_quota(struct task_group *tg,
7181                               struct cfs_schedulable_data *d)
7182{
7183        u64 quota, period;
7184
7185        if (tg == d->tg) {
7186                period = d->period;
7187                quota = d->quota;
7188        } else {
7189                period = tg_get_cfs_period(tg);
7190                quota = tg_get_cfs_quota(tg);
7191        }
7192
7193        /* note: these should typically be equivalent */
7194        if (quota == RUNTIME_INF || quota == -1)
7195                return RUNTIME_INF;
7196
7197        return to_ratio(period, quota);
7198}
7199
7200static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7201{
7202        struct cfs_schedulable_data *d = data;
7203        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7204        s64 quota = 0, parent_quota = -1;
7205
7206        if (!tg->parent) {
7207                quota = RUNTIME_INF;
7208        } else {
7209                struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
7210
7211                quota = normalize_cfs_quota(tg, d);
7212                parent_quota = parent_b->hierarchical_quota;
7213
7214                /*
7215                 * Ensure max(child_quota) <= parent_quota, inherit when no
7216                 * limit is set:
7217                 */
7218                if (quota == RUNTIME_INF)
7219                        quota = parent_quota;
7220                else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7221                        return -EINVAL;
7222        }
7223        cfs_b->hierarchical_quota = quota;
7224
7225        return 0;
7226}
7227
7228static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7229{
7230        int ret;
7231        struct cfs_schedulable_data data = {
7232                .tg = tg,
7233                .period = period,
7234                .quota = quota,
7235        };
7236
7237        if (quota != RUNTIME_INF) {
7238                do_div(data.period, NSEC_PER_USEC);
7239                do_div(data.quota, NSEC_PER_USEC);
7240        }
7241
7242        rcu_read_lock();
7243        ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7244        rcu_read_unlock();
7245
7246        return ret;
7247}
7248
7249static int cpu_stats_show(struct seq_file *sf, void *v)
7250{
7251        struct task_group *tg = css_tg(seq_css(sf));
7252        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7253
7254        seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
7255        seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
7256        seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
7257
7258        return 0;
7259}
7260#endif /* CONFIG_CFS_BANDWIDTH */
7261#endif /* CONFIG_FAIR_GROUP_SCHED */
7262
7263#ifdef CONFIG_RT_GROUP_SCHED
7264static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
7265                                struct cftype *cft, s64 val)
7266{
7267        return sched_group_set_rt_runtime(css_tg(css), val);
7268}
7269
7270static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
7271                               struct cftype *cft)
7272{
7273        return sched_group_rt_runtime(css_tg(css));
7274}
7275
7276static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
7277                                    struct cftype *cftype, u64 rt_period_us)
7278{
7279        return sched_group_set_rt_period(css_tg(css), rt_period_us);
7280}
7281
7282static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
7283                                   struct cftype *cft)
7284{
7285        return sched_group_rt_period(css_tg(css));
7286}
7287#endif /* CONFIG_RT_GROUP_SCHED */
7288
7289static struct cftype cpu_files[] = {
7290#ifdef CONFIG_FAIR_GROUP_SCHED
7291        {
7292                .name = "shares",
7293                .read_u64 = cpu_shares_read_u64,
7294                .write_u64 = cpu_shares_write_u64,
7295        },
7296#endif
7297#ifdef CONFIG_CFS_BANDWIDTH
7298        {
7299                .name = "cfs_quota_us",
7300                .read_s64 = cpu_cfs_quota_read_s64,
7301                .write_s64 = cpu_cfs_quota_write_s64,
7302        },
7303        {
7304                .name = "cfs_period_us",
7305                .read_u64 = cpu_cfs_period_read_u64,
7306                .write_u64 = cpu_cfs_period_write_u64,
7307        },
7308        {
7309                .name = "stat",
7310                .seq_show = cpu_stats_show,
7311        },
7312#endif
7313#ifdef CONFIG_RT_GROUP_SCHED
7314        {
7315                .name = "rt_runtime_us",
7316                .read_s64 = cpu_rt_runtime_read,
7317                .write_s64 = cpu_rt_runtime_write,
7318        },
7319        {
7320                .name = "rt_period_us",
7321                .read_u64 = cpu_rt_period_read_uint,
7322                .write_u64 = cpu_rt_period_write_uint,
7323        },
7324#endif
7325        { }     /* Terminate */
7326};
7327
7328struct cgroup_subsys cpu_cgrp_subsys = {
7329        .css_alloc      = cpu_cgroup_css_alloc,
7330        .css_online     = cpu_cgroup_css_online,
7331        .css_released   = cpu_cgroup_css_released,
7332        .css_free       = cpu_cgroup_css_free,
7333        .fork           = cpu_cgroup_fork,
7334        .can_attach     = cpu_cgroup_can_attach,
7335        .attach         = cpu_cgroup_attach,
7336        .legacy_cftypes = cpu_files,
7337        .early_init     = true,
7338};
7339
7340#endif  /* CONFIG_CGROUP_SCHED */
7341
7342void dump_cpu_task(int cpu)
7343{
7344        pr_info("Task dump for CPU %d:\n", cpu);
7345        sched_show_task(cpu_curr(cpu));
7346}
7347
7348/*
7349 * Nice levels are multiplicative, with a gentle 10% change for every
7350 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
7351 * nice 1, it will get ~10% less CPU time than another CPU-bound task
7352 * that remained on nice 0.
7353 *
7354 * The "10% effect" is relative and cumulative: from _any_ nice level,
7355 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
7356 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
7357 * If a task goes up by ~10% and another task goes down by ~10% then
7358 * the relative distance between them is ~25%.)
7359 */
7360const int sched_prio_to_weight[40] = {
7361 /* -20 */     88761,     71755,     56483,     46273,     36291,
7362 /* -15 */     29154,     23254,     18705,     14949,     11916,
7363 /* -10 */      9548,      7620,      6100,      4904,      3906,
7364 /*  -5 */      3121,      2501,      1991,      1586,      1277,
7365 /*   0 */      1024,       820,       655,       526,       423,
7366 /*   5 */       335,       272,       215,       172,       137,
7367 /*  10 */       110,        87,        70,        56,        45,
7368 /*  15 */        36,        29,        23,        18,        15,
7369};
7370
7371/*
7372 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
7373 *
7374 * In cases where the weight does not change often, we can use the
7375 * precalculated inverse to speed up arithmetics by turning divisions
7376 * into multiplications:
7377 */
7378const u32 sched_prio_to_wmult[40] = {
7379 /* -20 */     48388,     59856,     76040,     92818,    118348,
7380 /* -15 */    147320,    184698,    229616,    287308,    360437,
7381 /* -10 */    449829,    563644,    704093,    875809,   1099582,
7382 /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
7383 /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
7384 /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
7385 /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
7386 /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
7387};
7388