linux/kernel/sched/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  kernel/sched/core.c
   4 *
   5 *  Core kernel scheduler code and related syscalls
   6 *
   7 *  Copyright (C) 1991-2002  Linus Torvalds
   8 */
   9#define CREATE_TRACE_POINTS
  10#include <trace/events/sched.h>
  11#undef CREATE_TRACE_POINTS
  12
  13#include "sched.h"
  14
  15#include <linux/nospec.h>
  16
  17#include <linux/kcov.h>
  18#include <linux/scs.h>
  19
  20#include <asm/switch_to.h>
  21#include <asm/tlb.h>
  22
  23#include "../workqueue_internal.h"
  24#include "../../fs/io-wq.h"
  25#include "../smpboot.h"
  26
  27#include "pelt.h"
  28#include "smp.h"
  29
  30/*
  31 * Export tracepoints that act as a bare tracehook (ie: have no trace event
  32 * associated with them) to allow external modules to probe them.
  33 */
  34EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
  35EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
  36EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
  37EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
  38EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
  39EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
  40EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
  41EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
  42EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
  43EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
  44
  45DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  46
  47#ifdef CONFIG_SCHED_DEBUG
  48/*
  49 * Debugging: various feature bits
  50 *
  51 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
  52 * sysctl_sched_features, defined in sched.h, to allow constants propagation
  53 * at compile time and compiler optimization based on features default.
  54 */
  55#define SCHED_FEAT(name, enabled)       \
  56        (1UL << __SCHED_FEAT_##name) * enabled |
  57const_debug unsigned int sysctl_sched_features =
  58#include "features.h"
  59        0;
  60#undef SCHED_FEAT
  61
  62/*
  63 * Print a warning if need_resched is set for the given duration (if
  64 * LATENCY_WARN is enabled).
  65 *
  66 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
  67 * per boot.
  68 */
  69__read_mostly int sysctl_resched_latency_warn_ms = 100;
  70__read_mostly int sysctl_resched_latency_warn_once = 1;
  71#endif /* CONFIG_SCHED_DEBUG */
  72
  73/*
  74 * Number of tasks to iterate in a single balance run.
  75 * Limited because this is done with IRQs disabled.
  76 */
  77const_debug unsigned int sysctl_sched_nr_migrate = 32;
  78
  79/*
  80 * period over which we measure -rt task CPU usage in us.
  81 * default: 1s
  82 */
  83unsigned int sysctl_sched_rt_period = 1000000;
  84
  85__read_mostly int scheduler_running;
  86
  87/*
  88 * part of the period that we allow rt tasks to run in us.
  89 * default: 0.95s
  90 */
  91int sysctl_sched_rt_runtime = 950000;
  92
  93
  94/*
  95 * Serialization rules:
  96 *
  97 * Lock order:
  98 *
  99 *   p->pi_lock
 100 *     rq->lock
 101 *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
 102 *
 103 *  rq1->lock
 104 *    rq2->lock  where: rq1 < rq2
 105 *
 106 * Regular state:
 107 *
 108 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
 109 * local CPU's rq->lock, it optionally removes the task from the runqueue and
 110 * always looks at the local rq data structures to find the most eligible task
 111 * to run next.
 112 *
 113 * Task enqueue is also under rq->lock, possibly taken from another CPU.
 114 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
 115 * the local CPU to avoid bouncing the runqueue state around [ see
 116 * ttwu_queue_wakelist() ]
 117 *
 118 * Task wakeup, specifically wakeups that involve migration, are horribly
 119 * complicated to avoid having to take two rq->locks.
 120 *
 121 * Special state:
 122 *
 123 * System-calls and anything external will use task_rq_lock() which acquires
 124 * both p->pi_lock and rq->lock. As a consequence the state they change is
 125 * stable while holding either lock:
 126 *
 127 *  - sched_setaffinity()/
 128 *    set_cpus_allowed_ptr():   p->cpus_ptr, p->nr_cpus_allowed
 129 *  - set_user_nice():          p->se.load, p->*prio
 130 *  - __sched_setscheduler():   p->sched_class, p->policy, p->*prio,
 131 *                              p->se.load, p->rt_priority,
 132 *                              p->dl.dl_{runtime, deadline, period, flags, bw, density}
 133 *  - sched_setnuma():          p->numa_preferred_nid
 134 *  - sched_move_task()/
 135 *    cpu_cgroup_fork():        p->sched_task_group
 136 *  - uclamp_update_active()    p->uclamp*
 137 *
 138 * p->state <- TASK_*:
 139 *
 140 *   is changed locklessly using set_current_state(), __set_current_state() or
 141 *   set_special_state(), see their respective comments, or by
 142 *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
 143 *   concurrent self.
 144 *
 145 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
 146 *
 147 *   is set by activate_task() and cleared by deactivate_task(), under
 148 *   rq->lock. Non-zero indicates the task is runnable, the special
 149 *   ON_RQ_MIGRATING state is used for migration without holding both
 150 *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
 151 *
 152 * p->on_cpu <- { 0, 1 }:
 153 *
 154 *   is set by prepare_task() and cleared by finish_task() such that it will be
 155 *   set before p is scheduled-in and cleared after p is scheduled-out, both
 156 *   under rq->lock. Non-zero indicates the task is running on its CPU.
 157 *
 158 *   [ The astute reader will observe that it is possible for two tasks on one
 159 *     CPU to have ->on_cpu = 1 at the same time. ]
 160 *
 161 * task_cpu(p): is changed by set_task_cpu(), the rules are:
 162 *
 163 *  - Don't call set_task_cpu() on a blocked task:
 164 *
 165 *    We don't care what CPU we're not running on, this simplifies hotplug,
 166 *    the CPU assignment of blocked tasks isn't required to be valid.
 167 *
 168 *  - for try_to_wake_up(), called under p->pi_lock:
 169 *
 170 *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
 171 *
 172 *  - for migration called under rq->lock:
 173 *    [ see task_on_rq_migrating() in task_rq_lock() ]
 174 *
 175 *    o move_queued_task()
 176 *    o detach_task()
 177 *
 178 *  - for migration called under double_rq_lock():
 179 *
 180 *    o __migrate_swap_task()
 181 *    o push_rt_task() / pull_rt_task()
 182 *    o push_dl_task() / pull_dl_task()
 183 *    o dl_task_offline_migration()
 184 *
 185 */
 186
 187/*
 188 * __task_rq_lock - lock the rq @p resides on.
 189 */
 190struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
 191        __acquires(rq->lock)
 192{
 193        struct rq *rq;
 194
 195        lockdep_assert_held(&p->pi_lock);
 196
 197        for (;;) {
 198                rq = task_rq(p);
 199                raw_spin_lock(&rq->lock);
 200                if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
 201                        rq_pin_lock(rq, rf);
 202                        return rq;
 203                }
 204                raw_spin_unlock(&rq->lock);
 205
 206                while (unlikely(task_on_rq_migrating(p)))
 207                        cpu_relax();
 208        }
 209}
 210
 211/*
 212 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
 213 */
 214struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
 215        __acquires(p->pi_lock)
 216        __acquires(rq->lock)
 217{
 218        struct rq *rq;
 219
 220        for (;;) {
 221                raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
 222                rq = task_rq(p);
 223                raw_spin_lock(&rq->lock);
 224                /*
 225                 *      move_queued_task()              task_rq_lock()
 226                 *
 227                 *      ACQUIRE (rq->lock)
 228                 *      [S] ->on_rq = MIGRATING         [L] rq = task_rq()
 229                 *      WMB (__set_task_cpu())          ACQUIRE (rq->lock);
 230                 *      [S] ->cpu = new_cpu             [L] task_rq()
 231                 *                                      [L] ->on_rq
 232                 *      RELEASE (rq->lock)
 233                 *
 234                 * If we observe the old CPU in task_rq_lock(), the acquire of
 235                 * the old rq->lock will fully serialize against the stores.
 236                 *
 237                 * If we observe the new CPU in task_rq_lock(), the address
 238                 * dependency headed by '[L] rq = task_rq()' and the acquire
 239                 * will pair with the WMB to ensure we then also see migrating.
 240                 */
 241                if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
 242                        rq_pin_lock(rq, rf);
 243                        return rq;
 244                }
 245                raw_spin_unlock(&rq->lock);
 246                raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
 247
 248                while (unlikely(task_on_rq_migrating(p)))
 249                        cpu_relax();
 250        }
 251}
 252
 253/*
 254 * RQ-clock updating methods:
 255 */
 256
 257static void update_rq_clock_task(struct rq *rq, s64 delta)
 258{
 259/*
 260 * In theory, the compile should just see 0 here, and optimize out the call
 261 * to sched_rt_avg_update. But I don't trust it...
 262 */
 263        s64 __maybe_unused steal = 0, irq_delta = 0;
 264
 265#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 266        irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
 267
 268        /*
 269         * Since irq_time is only updated on {soft,}irq_exit, we might run into
 270         * this case when a previous update_rq_clock() happened inside a
 271         * {soft,}irq region.
 272         *
 273         * When this happens, we stop ->clock_task and only update the
 274         * prev_irq_time stamp to account for the part that fit, so that a next
 275         * update will consume the rest. This ensures ->clock_task is
 276         * monotonic.
 277         *
 278         * It does however cause some slight miss-attribution of {soft,}irq
 279         * time, a more accurate solution would be to update the irq_time using
 280         * the current rq->clock timestamp, except that would require using
 281         * atomic ops.
 282         */
 283        if (irq_delta > delta)
 284                irq_delta = delta;
 285
 286        rq->prev_irq_time += irq_delta;
 287        delta -= irq_delta;
 288#endif
 289#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 290        if (static_key_false((&paravirt_steal_rq_enabled))) {
 291                steal = paravirt_steal_clock(cpu_of(rq));
 292                steal -= rq->prev_steal_time_rq;
 293
 294                if (unlikely(steal > delta))
 295                        steal = delta;
 296
 297                rq->prev_steal_time_rq += steal;
 298                delta -= steal;
 299        }
 300#endif
 301
 302        rq->clock_task += delta;
 303
 304#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 305        if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
 306                update_irq_load_avg(rq, irq_delta + steal);
 307#endif
 308        update_rq_clock_pelt(rq, delta);
 309}
 310
 311void update_rq_clock(struct rq *rq)
 312{
 313        s64 delta;
 314
 315        lockdep_assert_held(&rq->lock);
 316
 317        if (rq->clock_update_flags & RQCF_ACT_SKIP)
 318                return;
 319
 320#ifdef CONFIG_SCHED_DEBUG
 321        if (sched_feat(WARN_DOUBLE_CLOCK))
 322                SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
 323        rq->clock_update_flags |= RQCF_UPDATED;
 324#endif
 325
 326        delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
 327        if (delta < 0)
 328                return;
 329        rq->clock += delta;
 330        update_rq_clock_task(rq, delta);
 331}
 332
 333#ifdef CONFIG_SCHED_HRTICK
 334/*
 335 * Use HR-timers to deliver accurate preemption points.
 336 */
 337
 338static void hrtick_clear(struct rq *rq)
 339{
 340        if (hrtimer_active(&rq->hrtick_timer))
 341                hrtimer_cancel(&rq->hrtick_timer);
 342}
 343
 344/*
 345 * High-resolution timer tick.
 346 * Runs from hardirq context with interrupts disabled.
 347 */
 348static enum hrtimer_restart hrtick(struct hrtimer *timer)
 349{
 350        struct rq *rq = container_of(timer, struct rq, hrtick_timer);
 351        struct rq_flags rf;
 352
 353        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 354
 355        rq_lock(rq, &rf);
 356        update_rq_clock(rq);
 357        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
 358        rq_unlock(rq, &rf);
 359
 360        return HRTIMER_NORESTART;
 361}
 362
 363#ifdef CONFIG_SMP
 364
 365static void __hrtick_restart(struct rq *rq)
 366{
 367        struct hrtimer *timer = &rq->hrtick_timer;
 368        ktime_t time = rq->hrtick_time;
 369
 370        hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
 371}
 372
 373/*
 374 * called from hardirq (IPI) context
 375 */
 376static void __hrtick_start(void *arg)
 377{
 378        struct rq *rq = arg;
 379        struct rq_flags rf;
 380
 381        rq_lock(rq, &rf);
 382        __hrtick_restart(rq);
 383        rq_unlock(rq, &rf);
 384}
 385
 386/*
 387 * Called to set the hrtick timer state.
 388 *
 389 * called with rq->lock held and irqs disabled
 390 */
 391void hrtick_start(struct rq *rq, u64 delay)
 392{
 393        struct hrtimer *timer = &rq->hrtick_timer;
 394        s64 delta;
 395
 396        /*
 397         * Don't schedule slices shorter than 10000ns, that just
 398         * doesn't make sense and can cause timer DoS.
 399         */
 400        delta = max_t(s64, delay, 10000LL);
 401        rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
 402
 403        if (rq == this_rq())
 404                __hrtick_restart(rq);
 405        else
 406                smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
 407}
 408
 409#else
 410/*
 411 * Called to set the hrtick timer state.
 412 *
 413 * called with rq->lock held and irqs disabled
 414 */
 415void hrtick_start(struct rq *rq, u64 delay)
 416{
 417        /*
 418         * Don't schedule slices shorter than 10000ns, that just
 419         * doesn't make sense. Rely on vruntime for fairness.
 420         */
 421        delay = max_t(u64, delay, 10000LL);
 422        hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
 423                      HRTIMER_MODE_REL_PINNED_HARD);
 424}
 425
 426#endif /* CONFIG_SMP */
 427
 428static void hrtick_rq_init(struct rq *rq)
 429{
 430#ifdef CONFIG_SMP
 431        INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
 432#endif
 433        hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
 434        rq->hrtick_timer.function = hrtick;
 435}
 436#else   /* CONFIG_SCHED_HRTICK */
 437static inline void hrtick_clear(struct rq *rq)
 438{
 439}
 440
 441static inline void hrtick_rq_init(struct rq *rq)
 442{
 443}
 444#endif  /* CONFIG_SCHED_HRTICK */
 445
 446/*
 447 * cmpxchg based fetch_or, macro so it works for different integer types
 448 */
 449#define fetch_or(ptr, mask)                                             \
 450        ({                                                              \
 451                typeof(ptr) _ptr = (ptr);                               \
 452                typeof(mask) _mask = (mask);                            \
 453                typeof(*_ptr) _old, _val = *_ptr;                       \
 454                                                                        \
 455                for (;;) {                                              \
 456                        _old = cmpxchg(_ptr, _val, _val | _mask);       \
 457                        if (_old == _val)                               \
 458                                break;                                  \
 459                        _val = _old;                                    \
 460                }                                                       \
 461        _old;                                                           \
 462})
 463
 464#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
 465/*
 466 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
 467 * this avoids any races wrt polling state changes and thereby avoids
 468 * spurious IPIs.
 469 */
 470static bool set_nr_and_not_polling(struct task_struct *p)
 471{
 472        struct thread_info *ti = task_thread_info(p);
 473        return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
 474}
 475
 476/*
 477 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
 478 *
 479 * If this returns true, then the idle task promises to call
 480 * sched_ttwu_pending() and reschedule soon.
 481 */
 482static bool set_nr_if_polling(struct task_struct *p)
 483{
 484        struct thread_info *ti = task_thread_info(p);
 485        typeof(ti->flags) old, val = READ_ONCE(ti->flags);
 486
 487        for (;;) {
 488                if (!(val & _TIF_POLLING_NRFLAG))
 489                        return false;
 490                if (val & _TIF_NEED_RESCHED)
 491                        return true;
 492                old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
 493                if (old == val)
 494                        break;
 495                val = old;
 496        }
 497        return true;
 498}
 499
 500#else
 501static bool set_nr_and_not_polling(struct task_struct *p)
 502{
 503        set_tsk_need_resched(p);
 504        return true;
 505}
 506
 507#ifdef CONFIG_SMP
 508static bool set_nr_if_polling(struct task_struct *p)
 509{
 510        return false;
 511}
 512#endif
 513#endif
 514
 515static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
 516{
 517        struct wake_q_node *node = &task->wake_q;
 518
 519        /*
 520         * Atomically grab the task, if ->wake_q is !nil already it means
 521         * it's already queued (either by us or someone else) and will get the
 522         * wakeup due to that.
 523         *
 524         * In order to ensure that a pending wakeup will observe our pending
 525         * state, even in the failed case, an explicit smp_mb() must be used.
 526         */
 527        smp_mb__before_atomic();
 528        if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
 529                return false;
 530
 531        /*
 532         * The head is context local, there can be no concurrency.
 533         */
 534        *head->lastp = node;
 535        head->lastp = &node->next;
 536        return true;
 537}
 538
 539/**
 540 * wake_q_add() - queue a wakeup for 'later' waking.
 541 * @head: the wake_q_head to add @task to
 542 * @task: the task to queue for 'later' wakeup
 543 *
 544 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
 545 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
 546 * instantly.
 547 *
 548 * This function must be used as-if it were wake_up_process(); IOW the task
 549 * must be ready to be woken at this location.
 550 */
 551void wake_q_add(struct wake_q_head *head, struct task_struct *task)
 552{
 553        if (__wake_q_add(head, task))
 554                get_task_struct(task);
 555}
 556
 557/**
 558 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
 559 * @head: the wake_q_head to add @task to
 560 * @task: the task to queue for 'later' wakeup
 561 *
 562 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
 563 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
 564 * instantly.
 565 *
 566 * This function must be used as-if it were wake_up_process(); IOW the task
 567 * must be ready to be woken at this location.
 568 *
 569 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
 570 * that already hold reference to @task can call the 'safe' version and trust
 571 * wake_q to do the right thing depending whether or not the @task is already
 572 * queued for wakeup.
 573 */
 574void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
 575{
 576        if (!__wake_q_add(head, task))
 577                put_task_struct(task);
 578}
 579
 580void wake_up_q(struct wake_q_head *head)
 581{
 582        struct wake_q_node *node = head->first;
 583
 584        while (node != WAKE_Q_TAIL) {
 585                struct task_struct *task;
 586
 587                task = container_of(node, struct task_struct, wake_q);
 588                BUG_ON(!task);
 589                /* Task can safely be re-inserted now: */
 590                node = node->next;
 591                task->wake_q.next = NULL;
 592
 593                /*
 594                 * wake_up_process() executes a full barrier, which pairs with
 595                 * the queueing in wake_q_add() so as not to miss wakeups.
 596                 */
 597                wake_up_process(task);
 598                put_task_struct(task);
 599        }
 600}
 601
 602/*
 603 * resched_curr - mark rq's current task 'to be rescheduled now'.
 604 *
 605 * On UP this means the setting of the need_resched flag, on SMP it
 606 * might also involve a cross-CPU call to trigger the scheduler on
 607 * the target CPU.
 608 */
 609void resched_curr(struct rq *rq)
 610{
 611        struct task_struct *curr = rq->curr;
 612        int cpu;
 613
 614        lockdep_assert_held(&rq->lock);
 615
 616        if (test_tsk_need_resched(curr))
 617                return;
 618
 619        cpu = cpu_of(rq);
 620
 621        if (cpu == smp_processor_id()) {
 622                set_tsk_need_resched(curr);
 623                set_preempt_need_resched();
 624                return;
 625        }
 626
 627        if (set_nr_and_not_polling(curr))
 628                smp_send_reschedule(cpu);
 629        else
 630                trace_sched_wake_idle_without_ipi(cpu);
 631}
 632
 633void resched_cpu(int cpu)
 634{
 635        struct rq *rq = cpu_rq(cpu);
 636        unsigned long flags;
 637
 638        raw_spin_lock_irqsave(&rq->lock, flags);
 639        if (cpu_online(cpu) || cpu == smp_processor_id())
 640                resched_curr(rq);
 641        raw_spin_unlock_irqrestore(&rq->lock, flags);
 642}
 643
 644#ifdef CONFIG_SMP
 645#ifdef CONFIG_NO_HZ_COMMON
 646/*
 647 * In the semi idle case, use the nearest busy CPU for migrating timers
 648 * from an idle CPU.  This is good for power-savings.
 649 *
 650 * We don't do similar optimization for completely idle system, as
 651 * selecting an idle CPU will add more delays to the timers than intended
 652 * (as that CPU's timer base may not be uptodate wrt jiffies etc).
 653 */
 654int get_nohz_timer_target(void)
 655{
 656        int i, cpu = smp_processor_id(), default_cpu = -1;
 657        struct sched_domain *sd;
 658
 659        if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
 660                if (!idle_cpu(cpu))
 661                        return cpu;
 662                default_cpu = cpu;
 663        }
 664
 665        rcu_read_lock();
 666        for_each_domain(cpu, sd) {
 667                for_each_cpu_and(i, sched_domain_span(sd),
 668                        housekeeping_cpumask(HK_FLAG_TIMER)) {
 669                        if (cpu == i)
 670                                continue;
 671
 672                        if (!idle_cpu(i)) {
 673                                cpu = i;
 674                                goto unlock;
 675                        }
 676                }
 677        }
 678
 679        if (default_cpu == -1)
 680                default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
 681        cpu = default_cpu;
 682unlock:
 683        rcu_read_unlock();
 684        return cpu;
 685}
 686
 687/*
 688 * When add_timer_on() enqueues a timer into the timer wheel of an
 689 * idle CPU then this timer might expire before the next timer event
 690 * which is scheduled to wake up that CPU. In case of a completely
 691 * idle system the next event might even be infinite time into the
 692 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
 693 * leaves the inner idle loop so the newly added timer is taken into
 694 * account when the CPU goes back to idle and evaluates the timer
 695 * wheel for the next timer event.
 696 */
 697static void wake_up_idle_cpu(int cpu)
 698{
 699        struct rq *rq = cpu_rq(cpu);
 700
 701        if (cpu == smp_processor_id())
 702                return;
 703
 704        if (set_nr_and_not_polling(rq->idle))
 705                smp_send_reschedule(cpu);
 706        else
 707                trace_sched_wake_idle_without_ipi(cpu);
 708}
 709
 710static bool wake_up_full_nohz_cpu(int cpu)
 711{
 712        /*
 713         * We just need the target to call irq_exit() and re-evaluate
 714         * the next tick. The nohz full kick at least implies that.
 715         * If needed we can still optimize that later with an
 716         * empty IRQ.
 717         */
 718        if (cpu_is_offline(cpu))
 719                return true;  /* Don't try to wake offline CPUs. */
 720        if (tick_nohz_full_cpu(cpu)) {
 721                if (cpu != smp_processor_id() ||
 722                    tick_nohz_tick_stopped())
 723                        tick_nohz_full_kick_cpu(cpu);
 724                return true;
 725        }
 726
 727        return false;
 728}
 729
 730/*
 731 * Wake up the specified CPU.  If the CPU is going offline, it is the
 732 * caller's responsibility to deal with the lost wakeup, for example,
 733 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
 734 */
 735void wake_up_nohz_cpu(int cpu)
 736{
 737        if (!wake_up_full_nohz_cpu(cpu))
 738                wake_up_idle_cpu(cpu);
 739}
 740
 741static void nohz_csd_func(void *info)
 742{
 743        struct rq *rq = info;
 744        int cpu = cpu_of(rq);
 745        unsigned int flags;
 746
 747        /*
 748         * Release the rq::nohz_csd.
 749         */
 750        flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
 751        WARN_ON(!(flags & NOHZ_KICK_MASK));
 752
 753        rq->idle_balance = idle_cpu(cpu);
 754        if (rq->idle_balance && !need_resched()) {
 755                rq->nohz_idle_balance = flags;
 756                raise_softirq_irqoff(SCHED_SOFTIRQ);
 757        }
 758}
 759
 760#endif /* CONFIG_NO_HZ_COMMON */
 761
 762#ifdef CONFIG_NO_HZ_FULL
 763bool sched_can_stop_tick(struct rq *rq)
 764{
 765        int fifo_nr_running;
 766
 767        /* Deadline tasks, even if single, need the tick */
 768        if (rq->dl.dl_nr_running)
 769                return false;
 770
 771        /*
 772         * If there are more than one RR tasks, we need the tick to affect the
 773         * actual RR behaviour.
 774         */
 775        if (rq->rt.rr_nr_running) {
 776                if (rq->rt.rr_nr_running == 1)
 777                        return true;
 778                else
 779                        return false;
 780        }
 781
 782        /*
 783         * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
 784         * forced preemption between FIFO tasks.
 785         */
 786        fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
 787        if (fifo_nr_running)
 788                return true;
 789
 790        /*
 791         * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
 792         * if there's more than one we need the tick for involuntary
 793         * preemption.
 794         */
 795        if (rq->nr_running > 1)
 796                return false;
 797
 798        return true;
 799}
 800#endif /* CONFIG_NO_HZ_FULL */
 801#endif /* CONFIG_SMP */
 802
 803#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
 804                        (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
 805/*
 806 * Iterate task_group tree rooted at *from, calling @down when first entering a
 807 * node and @up when leaving it for the final time.
 808 *
 809 * Caller must hold rcu_lock or sufficient equivalent.
 810 */
 811int walk_tg_tree_from(struct task_group *from,
 812                             tg_visitor down, tg_visitor up, void *data)
 813{
 814        struct task_group *parent, *child;
 815        int ret;
 816
 817        parent = from;
 818
 819down:
 820        ret = (*down)(parent, data);
 821        if (ret)
 822                goto out;
 823        list_for_each_entry_rcu(child, &parent->children, siblings) {
 824                parent = child;
 825                goto down;
 826
 827up:
 828                continue;
 829        }
 830        ret = (*up)(parent, data);
 831        if (ret || parent == from)
 832                goto out;
 833
 834        child = parent;
 835        parent = parent->parent;
 836        if (parent)
 837                goto up;
 838out:
 839        return ret;
 840}
 841
 842int tg_nop(struct task_group *tg, void *data)
 843{
 844        return 0;
 845}
 846#endif
 847
 848static void set_load_weight(struct task_struct *p, bool update_load)
 849{
 850        int prio = p->static_prio - MAX_RT_PRIO;
 851        struct load_weight *load = &p->se.load;
 852
 853        /*
 854         * SCHED_IDLE tasks get minimal weight:
 855         */
 856        if (task_has_idle_policy(p)) {
 857                load->weight = scale_load(WEIGHT_IDLEPRIO);
 858                load->inv_weight = WMULT_IDLEPRIO;
 859                return;
 860        }
 861
 862        /*
 863         * SCHED_OTHER tasks have to update their load when changing their
 864         * weight
 865         */
 866        if (update_load && p->sched_class == &fair_sched_class) {
 867                reweight_task(p, prio);
 868        } else {
 869                load->weight = scale_load(sched_prio_to_weight[prio]);
 870                load->inv_weight = sched_prio_to_wmult[prio];
 871        }
 872}
 873
 874#ifdef CONFIG_UCLAMP_TASK
 875/*
 876 * Serializes updates of utilization clamp values
 877 *
 878 * The (slow-path) user-space triggers utilization clamp value updates which
 879 * can require updates on (fast-path) scheduler's data structures used to
 880 * support enqueue/dequeue operations.
 881 * While the per-CPU rq lock protects fast-path update operations, user-space
 882 * requests are serialized using a mutex to reduce the risk of conflicting
 883 * updates or API abuses.
 884 */
 885static DEFINE_MUTEX(uclamp_mutex);
 886
 887/* Max allowed minimum utilization */
 888unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
 889
 890/* Max allowed maximum utilization */
 891unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
 892
 893/*
 894 * By default RT tasks run at the maximum performance point/capacity of the
 895 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
 896 * SCHED_CAPACITY_SCALE.
 897 *
 898 * This knob allows admins to change the default behavior when uclamp is being
 899 * used. In battery powered devices, particularly, running at the maximum
 900 * capacity and frequency will increase energy consumption and shorten the
 901 * battery life.
 902 *
 903 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
 904 *
 905 * This knob will not override the system default sched_util_clamp_min defined
 906 * above.
 907 */
 908unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
 909
 910/* All clamps are required to be less or equal than these values */
 911static struct uclamp_se uclamp_default[UCLAMP_CNT];
 912
 913/*
 914 * This static key is used to reduce the uclamp overhead in the fast path. It
 915 * primarily disables the call to uclamp_rq_{inc, dec}() in
 916 * enqueue/dequeue_task().
 917 *
 918 * This allows users to continue to enable uclamp in their kernel config with
 919 * minimum uclamp overhead in the fast path.
 920 *
 921 * As soon as userspace modifies any of the uclamp knobs, the static key is
 922 * enabled, since we have an actual users that make use of uclamp
 923 * functionality.
 924 *
 925 * The knobs that would enable this static key are:
 926 *
 927 *   * A task modifying its uclamp value with sched_setattr().
 928 *   * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
 929 *   * An admin modifying the cgroup cpu.uclamp.{min, max}
 930 */
 931DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
 932
 933/* Integer rounded range for each bucket */
 934#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
 935
 936#define for_each_clamp_id(clamp_id) \
 937        for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
 938
 939static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
 940{
 941        return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
 942}
 943
 944static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
 945{
 946        if (clamp_id == UCLAMP_MIN)
 947                return 0;
 948        return SCHED_CAPACITY_SCALE;
 949}
 950
 951static inline void uclamp_se_set(struct uclamp_se *uc_se,
 952                                 unsigned int value, bool user_defined)
 953{
 954        uc_se->value = value;
 955        uc_se->bucket_id = uclamp_bucket_id(value);
 956        uc_se->user_defined = user_defined;
 957}
 958
 959static inline unsigned int
 960uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
 961                  unsigned int clamp_value)
 962{
 963        /*
 964         * Avoid blocked utilization pushing up the frequency when we go
 965         * idle (which drops the max-clamp) by retaining the last known
 966         * max-clamp.
 967         */
 968        if (clamp_id == UCLAMP_MAX) {
 969                rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
 970                return clamp_value;
 971        }
 972
 973        return uclamp_none(UCLAMP_MIN);
 974}
 975
 976static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
 977                                     unsigned int clamp_value)
 978{
 979        /* Reset max-clamp retention only on idle exit */
 980        if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
 981                return;
 982
 983        WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
 984}
 985
 986static inline
 987unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
 988                                   unsigned int clamp_value)
 989{
 990        struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
 991        int bucket_id = UCLAMP_BUCKETS - 1;
 992
 993        /*
 994         * Since both min and max clamps are max aggregated, find the
 995         * top most bucket with tasks in.
 996         */
 997        for ( ; bucket_id >= 0; bucket_id--) {
 998                if (!bucket[bucket_id].tasks)
 999                        continue;
1000                return bucket[bucket_id].value;
1001        }
1002
1003        /* No tasks -- default clamp values */
1004        return uclamp_idle_value(rq, clamp_id, clamp_value);
1005}
1006
1007static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1008{
1009        unsigned int default_util_min;
1010        struct uclamp_se *uc_se;
1011
1012        lockdep_assert_held(&p->pi_lock);
1013
1014        uc_se = &p->uclamp_req[UCLAMP_MIN];
1015
1016        /* Only sync if user didn't override the default */
1017        if (uc_se->user_defined)
1018                return;
1019
1020        default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1021        uclamp_se_set(uc_se, default_util_min, false);
1022}
1023
1024static void uclamp_update_util_min_rt_default(struct task_struct *p)
1025{
1026        struct rq_flags rf;
1027        struct rq *rq;
1028
1029        if (!rt_task(p))
1030                return;
1031
1032        /* Protect updates to p->uclamp_* */
1033        rq = task_rq_lock(p, &rf);
1034        __uclamp_update_util_min_rt_default(p);
1035        task_rq_unlock(rq, p, &rf);
1036}
1037
1038static void uclamp_sync_util_min_rt_default(void)
1039{
1040        struct task_struct *g, *p;
1041
1042        /*
1043         * copy_process()                       sysctl_uclamp
1044         *                                        uclamp_min_rt = X;
1045         *   write_lock(&tasklist_lock)           read_lock(&tasklist_lock)
1046         *   // link thread                       smp_mb__after_spinlock()
1047         *   write_unlock(&tasklist_lock)         read_unlock(&tasklist_lock);
1048         *   sched_post_fork()                    for_each_process_thread()
1049         *     __uclamp_sync_rt()                   __uclamp_sync_rt()
1050         *
1051         * Ensures that either sched_post_fork() will observe the new
1052         * uclamp_min_rt or for_each_process_thread() will observe the new
1053         * task.
1054         */
1055        read_lock(&tasklist_lock);
1056        smp_mb__after_spinlock();
1057        read_unlock(&tasklist_lock);
1058
1059        rcu_read_lock();
1060        for_each_process_thread(g, p)
1061                uclamp_update_util_min_rt_default(p);
1062        rcu_read_unlock();
1063}
1064
1065static inline struct uclamp_se
1066uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1067{
1068        struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1069#ifdef CONFIG_UCLAMP_TASK_GROUP
1070        struct uclamp_se uc_max;
1071
1072        /*
1073         * Tasks in autogroups or root task group will be
1074         * restricted by system defaults.
1075         */
1076        if (task_group_is_autogroup(task_group(p)))
1077                return uc_req;
1078        if (task_group(p) == &root_task_group)
1079                return uc_req;
1080
1081        uc_max = task_group(p)->uclamp[clamp_id];
1082        if (uc_req.value > uc_max.value || !uc_req.user_defined)
1083                return uc_max;
1084#endif
1085
1086        return uc_req;
1087}
1088
1089/*
1090 * The effective clamp bucket index of a task depends on, by increasing
1091 * priority:
1092 * - the task specific clamp value, when explicitly requested from userspace
1093 * - the task group effective clamp value, for tasks not either in the root
1094 *   group or in an autogroup
1095 * - the system default clamp value, defined by the sysadmin
1096 */
1097static inline struct uclamp_se
1098uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1099{
1100        struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1101        struct uclamp_se uc_max = uclamp_default[clamp_id];
1102
1103        /* System default restrictions always apply */
1104        if (unlikely(uc_req.value > uc_max.value))
1105                return uc_max;
1106
1107        return uc_req;
1108}
1109
1110unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1111{
1112        struct uclamp_se uc_eff;
1113
1114        /* Task currently refcounted: use back-annotated (effective) value */
1115        if (p->uclamp[clamp_id].active)
1116                return (unsigned long)p->uclamp[clamp_id].value;
1117
1118        uc_eff = uclamp_eff_get(p, clamp_id);
1119
1120        return (unsigned long)uc_eff.value;
1121}
1122
1123/*
1124 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1125 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1126 * updates the rq's clamp value if required.
1127 *
1128 * Tasks can have a task-specific value requested from user-space, track
1129 * within each bucket the maximum value for tasks refcounted in it.
1130 * This "local max aggregation" allows to track the exact "requested" value
1131 * for each bucket when all its RUNNABLE tasks require the same clamp.
1132 */
1133static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1134                                    enum uclamp_id clamp_id)
1135{
1136        struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1137        struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1138        struct uclamp_bucket *bucket;
1139
1140        lockdep_assert_held(&rq->lock);
1141
1142        /* Update task effective clamp */
1143        p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1144
1145        bucket = &uc_rq->bucket[uc_se->bucket_id];
1146        bucket->tasks++;
1147        uc_se->active = true;
1148
1149        uclamp_idle_reset(rq, clamp_id, uc_se->value);
1150
1151        /*
1152         * Local max aggregation: rq buckets always track the max
1153         * "requested" clamp value of its RUNNABLE tasks.
1154         */
1155        if (bucket->tasks == 1 || uc_se->value > bucket->value)
1156                bucket->value = uc_se->value;
1157
1158        if (uc_se->value > READ_ONCE(uc_rq->value))
1159                WRITE_ONCE(uc_rq->value, uc_se->value);
1160}
1161
1162/*
1163 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1164 * is released. If this is the last task reference counting the rq's max
1165 * active clamp value, then the rq's clamp value is updated.
1166 *
1167 * Both refcounted tasks and rq's cached clamp values are expected to be
1168 * always valid. If it's detected they are not, as defensive programming,
1169 * enforce the expected state and warn.
1170 */
1171static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1172                                    enum uclamp_id clamp_id)
1173{
1174        struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1175        struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1176        struct uclamp_bucket *bucket;
1177        unsigned int bkt_clamp;
1178        unsigned int rq_clamp;
1179
1180        lockdep_assert_held(&rq->lock);
1181
1182        /*
1183         * If sched_uclamp_used was enabled after task @p was enqueued,
1184         * we could end up with unbalanced call to uclamp_rq_dec_id().
1185         *
1186         * In this case the uc_se->active flag should be false since no uclamp
1187         * accounting was performed at enqueue time and we can just return
1188         * here.
1189         *
1190         * Need to be careful of the following enqueue/dequeue ordering
1191         * problem too
1192         *
1193         *      enqueue(taskA)
1194         *      // sched_uclamp_used gets enabled
1195         *      enqueue(taskB)
1196         *      dequeue(taskA)
1197         *      // Must not decrement bucket->tasks here
1198         *      dequeue(taskB)
1199         *
1200         * where we could end up with stale data in uc_se and
1201         * bucket[uc_se->bucket_id].
1202         *
1203         * The following check here eliminates the possibility of such race.
1204         */
1205        if (unlikely(!uc_se->active))
1206                return;
1207
1208        bucket = &uc_rq->bucket[uc_se->bucket_id];
1209
1210        SCHED_WARN_ON(!bucket->tasks);
1211        if (likely(bucket->tasks))
1212                bucket->tasks--;
1213
1214        uc_se->active = false;
1215
1216        /*
1217         * Keep "local max aggregation" simple and accept to (possibly)
1218         * overboost some RUNNABLE tasks in the same bucket.
1219         * The rq clamp bucket value is reset to its base value whenever
1220         * there are no more RUNNABLE tasks refcounting it.
1221         */
1222        if (likely(bucket->tasks))
1223                return;
1224
1225        rq_clamp = READ_ONCE(uc_rq->value);
1226        /*
1227         * Defensive programming: this should never happen. If it happens,
1228         * e.g. due to future modification, warn and fixup the expected value.
1229         */
1230        SCHED_WARN_ON(bucket->value > rq_clamp);
1231        if (bucket->value >= rq_clamp) {
1232                bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1233                WRITE_ONCE(uc_rq->value, bkt_clamp);
1234        }
1235}
1236
1237static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1238{
1239        enum uclamp_id clamp_id;
1240
1241        /*
1242         * Avoid any overhead until uclamp is actually used by the userspace.
1243         *
1244         * The condition is constructed such that a NOP is generated when
1245         * sched_uclamp_used is disabled.
1246         */
1247        if (!static_branch_unlikely(&sched_uclamp_used))
1248                return;
1249
1250        if (unlikely(!p->sched_class->uclamp_enabled))
1251                return;
1252
1253        for_each_clamp_id(clamp_id)
1254                uclamp_rq_inc_id(rq, p, clamp_id);
1255
1256        /* Reset clamp idle holding when there is one RUNNABLE task */
1257        if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1258                rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1259}
1260
1261static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1262{
1263        enum uclamp_id clamp_id;
1264
1265        /*
1266         * Avoid any overhead until uclamp is actually used by the userspace.
1267         *
1268         * The condition is constructed such that a NOP is generated when
1269         * sched_uclamp_used is disabled.
1270         */
1271        if (!static_branch_unlikely(&sched_uclamp_used))
1272                return;
1273
1274        if (unlikely(!p->sched_class->uclamp_enabled))
1275                return;
1276
1277        for_each_clamp_id(clamp_id)
1278                uclamp_rq_dec_id(rq, p, clamp_id);
1279}
1280
1281static inline void
1282uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
1283{
1284        struct rq_flags rf;
1285        struct rq *rq;
1286
1287        /*
1288         * Lock the task and the rq where the task is (or was) queued.
1289         *
1290         * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1291         * price to pay to safely serialize util_{min,max} updates with
1292         * enqueues, dequeues and migration operations.
1293         * This is the same locking schema used by __set_cpus_allowed_ptr().
1294         */
1295        rq = task_rq_lock(p, &rf);
1296
1297        /*
1298         * Setting the clamp bucket is serialized by task_rq_lock().
1299         * If the task is not yet RUNNABLE and its task_struct is not
1300         * affecting a valid clamp bucket, the next time it's enqueued,
1301         * it will already see the updated clamp bucket value.
1302         */
1303        if (p->uclamp[clamp_id].active) {
1304                uclamp_rq_dec_id(rq, p, clamp_id);
1305                uclamp_rq_inc_id(rq, p, clamp_id);
1306        }
1307
1308        task_rq_unlock(rq, p, &rf);
1309}
1310
1311#ifdef CONFIG_UCLAMP_TASK_GROUP
1312static inline void
1313uclamp_update_active_tasks(struct cgroup_subsys_state *css,
1314                           unsigned int clamps)
1315{
1316        enum uclamp_id clamp_id;
1317        struct css_task_iter it;
1318        struct task_struct *p;
1319
1320        css_task_iter_start(css, 0, &it);
1321        while ((p = css_task_iter_next(&it))) {
1322                for_each_clamp_id(clamp_id) {
1323                        if ((0x1 << clamp_id) & clamps)
1324                                uclamp_update_active(p, clamp_id);
1325                }
1326        }
1327        css_task_iter_end(&it);
1328}
1329
1330static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1331static void uclamp_update_root_tg(void)
1332{
1333        struct task_group *tg = &root_task_group;
1334
1335        uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1336                      sysctl_sched_uclamp_util_min, false);
1337        uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1338                      sysctl_sched_uclamp_util_max, false);
1339
1340        rcu_read_lock();
1341        cpu_util_update_eff(&root_task_group.css);
1342        rcu_read_unlock();
1343}
1344#else
1345static void uclamp_update_root_tg(void) { }
1346#endif
1347
1348int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
1349                                void *buffer, size_t *lenp, loff_t *ppos)
1350{
1351        bool update_root_tg = false;
1352        int old_min, old_max, old_min_rt;
1353        int result;
1354
1355        mutex_lock(&uclamp_mutex);
1356        old_min = sysctl_sched_uclamp_util_min;
1357        old_max = sysctl_sched_uclamp_util_max;
1358        old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1359
1360        result = proc_dointvec(table, write, buffer, lenp, ppos);
1361        if (result)
1362                goto undo;
1363        if (!write)
1364                goto done;
1365
1366        if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1367            sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1368            sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1369
1370                result = -EINVAL;
1371                goto undo;
1372        }
1373
1374        if (old_min != sysctl_sched_uclamp_util_min) {
1375                uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1376                              sysctl_sched_uclamp_util_min, false);
1377                update_root_tg = true;
1378        }
1379        if (old_max != sysctl_sched_uclamp_util_max) {
1380                uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1381                              sysctl_sched_uclamp_util_max, false);
1382                update_root_tg = true;
1383        }
1384
1385        if (update_root_tg) {
1386                static_branch_enable(&sched_uclamp_used);
1387                uclamp_update_root_tg();
1388        }
1389
1390        if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1391                static_branch_enable(&sched_uclamp_used);
1392                uclamp_sync_util_min_rt_default();
1393        }
1394
1395        /*
1396         * We update all RUNNABLE tasks only when task groups are in use.
1397         * Otherwise, keep it simple and do just a lazy update at each next
1398         * task enqueue time.
1399         */
1400
1401        goto done;
1402
1403undo:
1404        sysctl_sched_uclamp_util_min = old_min;
1405        sysctl_sched_uclamp_util_max = old_max;
1406        sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1407done:
1408        mutex_unlock(&uclamp_mutex);
1409
1410        return result;
1411}
1412
1413static int uclamp_validate(struct task_struct *p,
1414                           const struct sched_attr *attr)
1415{
1416        int util_min = p->uclamp_req[UCLAMP_MIN].value;
1417        int util_max = p->uclamp_req[UCLAMP_MAX].value;
1418
1419        if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1420                util_min = attr->sched_util_min;
1421
1422                if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
1423                        return -EINVAL;
1424        }
1425
1426        if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1427                util_max = attr->sched_util_max;
1428
1429                if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
1430                        return -EINVAL;
1431        }
1432
1433        if (util_min != -1 && util_max != -1 && util_min > util_max)
1434                return -EINVAL;
1435
1436        /*
1437         * We have valid uclamp attributes; make sure uclamp is enabled.
1438         *
1439         * We need to do that here, because enabling static branches is a
1440         * blocking operation which obviously cannot be done while holding
1441         * scheduler locks.
1442         */
1443        static_branch_enable(&sched_uclamp_used);
1444
1445        return 0;
1446}
1447
1448static bool uclamp_reset(const struct sched_attr *attr,
1449                         enum uclamp_id clamp_id,
1450                         struct uclamp_se *uc_se)
1451{
1452        /* Reset on sched class change for a non user-defined clamp value. */
1453        if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
1454            !uc_se->user_defined)
1455                return true;
1456
1457        /* Reset on sched_util_{min,max} == -1. */
1458        if (clamp_id == UCLAMP_MIN &&
1459            attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1460            attr->sched_util_min == -1) {
1461                return true;
1462        }
1463
1464        if (clamp_id == UCLAMP_MAX &&
1465            attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1466            attr->sched_util_max == -1) {
1467                return true;
1468        }
1469
1470        return false;
1471}
1472
1473static void __setscheduler_uclamp(struct task_struct *p,
1474                                  const struct sched_attr *attr)
1475{
1476        enum uclamp_id clamp_id;
1477
1478        for_each_clamp_id(clamp_id) {
1479                struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1480                unsigned int value;
1481
1482                if (!uclamp_reset(attr, clamp_id, uc_se))
1483                        continue;
1484
1485                /*
1486                 * RT by default have a 100% boost value that could be modified
1487                 * at runtime.
1488                 */
1489                if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1490                        value = sysctl_sched_uclamp_util_min_rt_default;
1491                else
1492                        value = uclamp_none(clamp_id);
1493
1494                uclamp_se_set(uc_se, value, false);
1495
1496        }
1497
1498        if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1499                return;
1500
1501        if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1502            attr->sched_util_min != -1) {
1503                uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1504                              attr->sched_util_min, true);
1505        }
1506
1507        if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1508            attr->sched_util_max != -1) {
1509                uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1510                              attr->sched_util_max, true);
1511        }
1512}
1513
1514static void uclamp_fork(struct task_struct *p)
1515{
1516        enum uclamp_id clamp_id;
1517
1518        /*
1519         * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1520         * as the task is still at its early fork stages.
1521         */
1522        for_each_clamp_id(clamp_id)
1523                p->uclamp[clamp_id].active = false;
1524
1525        if (likely(!p->sched_reset_on_fork))
1526                return;
1527
1528        for_each_clamp_id(clamp_id) {
1529                uclamp_se_set(&p->uclamp_req[clamp_id],
1530                              uclamp_none(clamp_id), false);
1531        }
1532}
1533
1534static void uclamp_post_fork(struct task_struct *p)
1535{
1536        uclamp_update_util_min_rt_default(p);
1537}
1538
1539static void __init init_uclamp_rq(struct rq *rq)
1540{
1541        enum uclamp_id clamp_id;
1542        struct uclamp_rq *uc_rq = rq->uclamp;
1543
1544        for_each_clamp_id(clamp_id) {
1545                uc_rq[clamp_id] = (struct uclamp_rq) {
1546                        .value = uclamp_none(clamp_id)
1547                };
1548        }
1549
1550        rq->uclamp_flags = 0;
1551}
1552
1553static void __init init_uclamp(void)
1554{
1555        struct uclamp_se uc_max = {};
1556        enum uclamp_id clamp_id;
1557        int cpu;
1558
1559        for_each_possible_cpu(cpu)
1560                init_uclamp_rq(cpu_rq(cpu));
1561
1562        for_each_clamp_id(clamp_id) {
1563                uclamp_se_set(&init_task.uclamp_req[clamp_id],
1564                              uclamp_none(clamp_id), false);
1565        }
1566
1567        /* System defaults allow max clamp values for both indexes */
1568        uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
1569        for_each_clamp_id(clamp_id) {
1570                uclamp_default[clamp_id] = uc_max;
1571#ifdef CONFIG_UCLAMP_TASK_GROUP
1572                root_task_group.uclamp_req[clamp_id] = uc_max;
1573                root_task_group.uclamp[clamp_id] = uc_max;
1574#endif
1575        }
1576}
1577
1578#else /* CONFIG_UCLAMP_TASK */
1579static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
1580static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
1581static inline int uclamp_validate(struct task_struct *p,
1582                                  const struct sched_attr *attr)
1583{
1584        return -EOPNOTSUPP;
1585}
1586static void __setscheduler_uclamp(struct task_struct *p,
1587                                  const struct sched_attr *attr) { }
1588static inline void uclamp_fork(struct task_struct *p) { }
1589static inline void uclamp_post_fork(struct task_struct *p) { }
1590static inline void init_uclamp(void) { }
1591#endif /* CONFIG_UCLAMP_TASK */
1592
1593static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1594{
1595        if (!(flags & ENQUEUE_NOCLOCK))
1596                update_rq_clock(rq);
1597
1598        if (!(flags & ENQUEUE_RESTORE)) {
1599                sched_info_queued(rq, p);
1600                psi_enqueue(p, flags & ENQUEUE_WAKEUP);
1601        }
1602
1603        uclamp_rq_inc(rq, p);
1604        p->sched_class->enqueue_task(rq, p, flags);
1605}
1606
1607static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1608{
1609        if (!(flags & DEQUEUE_NOCLOCK))
1610                update_rq_clock(rq);
1611
1612        if (!(flags & DEQUEUE_SAVE)) {
1613                sched_info_dequeued(rq, p);
1614                psi_dequeue(p, flags & DEQUEUE_SLEEP);
1615        }
1616
1617        uclamp_rq_dec(rq, p);
1618        p->sched_class->dequeue_task(rq, p, flags);
1619}
1620
1621void activate_task(struct rq *rq, struct task_struct *p, int flags)
1622{
1623        enqueue_task(rq, p, flags);
1624
1625        p->on_rq = TASK_ON_RQ_QUEUED;
1626}
1627
1628void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1629{
1630        p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
1631
1632        dequeue_task(rq, p, flags);
1633}
1634
1635/*
1636 * __normal_prio - return the priority that is based on the static prio
1637 */
1638static inline int __normal_prio(struct task_struct *p)
1639{
1640        return p->static_prio;
1641}
1642
1643/*
1644 * Calculate the expected normal priority: i.e. priority
1645 * without taking RT-inheritance into account. Might be
1646 * boosted by interactivity modifiers. Changes upon fork,
1647 * setprio syscalls, and whenever the interactivity
1648 * estimator recalculates.
1649 */
1650static inline int normal_prio(struct task_struct *p)
1651{
1652        int prio;
1653
1654        if (task_has_dl_policy(p))
1655                prio = MAX_DL_PRIO-1;
1656        else if (task_has_rt_policy(p))
1657                prio = MAX_RT_PRIO-1 - p->rt_priority;
1658        else
1659                prio = __normal_prio(p);
1660        return prio;
1661}
1662
1663/*
1664 * Calculate the current priority, i.e. the priority
1665 * taken into account by the scheduler. This value might
1666 * be boosted by RT tasks, or might be boosted by
1667 * interactivity modifiers. Will be RT if the task got
1668 * RT-boosted. If not then it returns p->normal_prio.
1669 */
1670static int effective_prio(struct task_struct *p)
1671{
1672        p->normal_prio = normal_prio(p);
1673        /*
1674         * If we are RT tasks or we were boosted to RT priority,
1675         * keep the priority unchanged. Otherwise, update priority
1676         * to the normal priority:
1677         */
1678        if (!rt_prio(p->prio))
1679                return p->normal_prio;
1680        return p->prio;
1681}
1682
1683/**
1684 * task_curr - is this task currently executing on a CPU?
1685 * @p: the task in question.
1686 *
1687 * Return: 1 if the task is currently executing. 0 otherwise.
1688 */
1689inline int task_curr(const struct task_struct *p)
1690{
1691        return cpu_curr(task_cpu(p)) == p;
1692}
1693
1694/*
1695 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
1696 * use the balance_callback list if you want balancing.
1697 *
1698 * this means any call to check_class_changed() must be followed by a call to
1699 * balance_callback().
1700 */
1701static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1702                                       const struct sched_class *prev_class,
1703                                       int oldprio)
1704{
1705        if (prev_class != p->sched_class) {
1706                if (prev_class->switched_from)
1707                        prev_class->switched_from(rq, p);
1708
1709                p->sched_class->switched_to(rq, p);
1710        } else if (oldprio != p->prio || dl_task(p))
1711                p->sched_class->prio_changed(rq, p, oldprio);
1712}
1713
1714void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1715{
1716        if (p->sched_class == rq->curr->sched_class)
1717                rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1718        else if (p->sched_class > rq->curr->sched_class)
1719                resched_curr(rq);
1720
1721        /*
1722         * A queue event has occurred, and we're going to schedule.  In
1723         * this case, we can save a useless back to back clock update.
1724         */
1725        if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
1726                rq_clock_skip_update(rq);
1727}
1728
1729#ifdef CONFIG_SMP
1730
1731static void
1732__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
1733
1734static int __set_cpus_allowed_ptr(struct task_struct *p,
1735                                  const struct cpumask *new_mask,
1736                                  u32 flags);
1737
1738static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
1739{
1740        if (likely(!p->migration_disabled))
1741                return;
1742
1743        if (p->cpus_ptr != &p->cpus_mask)
1744                return;
1745
1746        /*
1747         * Violates locking rules! see comment in __do_set_cpus_allowed().
1748         */
1749        __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE);
1750}
1751
1752void migrate_disable(void)
1753{
1754        struct task_struct *p = current;
1755
1756        if (p->migration_disabled) {
1757                p->migration_disabled++;
1758                return;
1759        }
1760
1761        preempt_disable();
1762        this_rq()->nr_pinned++;
1763        p->migration_disabled = 1;
1764        preempt_enable();
1765}
1766EXPORT_SYMBOL_GPL(migrate_disable);
1767
1768void migrate_enable(void)
1769{
1770        struct task_struct *p = current;
1771
1772        if (p->migration_disabled > 1) {
1773                p->migration_disabled--;
1774                return;
1775        }
1776
1777        /*
1778         * Ensure stop_task runs either before or after this, and that
1779         * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
1780         */
1781        preempt_disable();
1782        if (p->cpus_ptr != &p->cpus_mask)
1783                __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
1784        /*
1785         * Mustn't clear migration_disabled() until cpus_ptr points back at the
1786         * regular cpus_mask, otherwise things that race (eg.
1787         * select_fallback_rq) get confused.
1788         */
1789        barrier();
1790        p->migration_disabled = 0;
1791        this_rq()->nr_pinned--;
1792        preempt_enable();
1793}
1794EXPORT_SYMBOL_GPL(migrate_enable);
1795
1796static inline bool rq_has_pinned_tasks(struct rq *rq)
1797{
1798        return rq->nr_pinned;
1799}
1800
1801/*
1802 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
1803 * __set_cpus_allowed_ptr() and select_fallback_rq().
1804 */
1805static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
1806{
1807        /* When not in the task's cpumask, no point in looking further. */
1808        if (!cpumask_test_cpu(cpu, p->cpus_ptr))
1809                return false;
1810
1811        /* migrate_disabled() must be allowed to finish. */
1812        if (is_migration_disabled(p))
1813                return cpu_online(cpu);
1814
1815        /* Non kernel threads are not allowed during either online or offline. */
1816        if (!(p->flags & PF_KTHREAD))
1817                return cpu_active(cpu);
1818
1819        /* KTHREAD_IS_PER_CPU is always allowed. */
1820        if (kthread_is_per_cpu(p))
1821                return cpu_online(cpu);
1822
1823        /* Regular kernel threads don't get to stay during offline. */
1824        if (cpu_dying(cpu))
1825                return false;
1826
1827        /* But are allowed during online. */
1828        return cpu_online(cpu);
1829}
1830
1831/*
1832 * This is how migration works:
1833 *
1834 * 1) we invoke migration_cpu_stop() on the target CPU using
1835 *    stop_one_cpu().
1836 * 2) stopper starts to run (implicitly forcing the migrated thread
1837 *    off the CPU)
1838 * 3) it checks whether the migrated task is still in the wrong runqueue.
1839 * 4) if it's in the wrong runqueue then the migration thread removes
1840 *    it and puts it into the right queue.
1841 * 5) stopper completes and stop_one_cpu() returns and the migration
1842 *    is done.
1843 */
1844
1845/*
1846 * move_queued_task - move a queued task to new rq.
1847 *
1848 * Returns (locked) new rq. Old rq's lock is released.
1849 */
1850static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
1851                                   struct task_struct *p, int new_cpu)
1852{
1853        lockdep_assert_held(&rq->lock);
1854
1855        deactivate_task(rq, p, DEQUEUE_NOCLOCK);
1856        set_task_cpu(p, new_cpu);
1857        rq_unlock(rq, rf);
1858
1859        rq = cpu_rq(new_cpu);
1860
1861        rq_lock(rq, rf);
1862        BUG_ON(task_cpu(p) != new_cpu);
1863        activate_task(rq, p, 0);
1864        check_preempt_curr(rq, p, 0);
1865
1866        return rq;
1867}
1868
1869struct migration_arg {
1870        struct task_struct              *task;
1871        int                             dest_cpu;
1872        struct set_affinity_pending     *pending;
1873};
1874
1875/*
1876 * @refs: number of wait_for_completion()
1877 * @stop_pending: is @stop_work in use
1878 */
1879struct set_affinity_pending {
1880        refcount_t              refs;
1881        unsigned int            stop_pending;
1882        struct completion       done;
1883        struct cpu_stop_work    stop_work;
1884        struct migration_arg    arg;
1885};
1886
1887/*
1888 * Move (not current) task off this CPU, onto the destination CPU. We're doing
1889 * this because either it can't run here any more (set_cpus_allowed()
1890 * away from this CPU, or CPU going down), or because we're
1891 * attempting to rebalance this task on exec (sched_exec).
1892 *
1893 * So we race with normal scheduler movements, but that's OK, as long
1894 * as the task is no longer on this CPU.
1895 */
1896static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
1897                                 struct task_struct *p, int dest_cpu)
1898{
1899        /* Affinity changed (again). */
1900        if (!is_cpu_allowed(p, dest_cpu))
1901                return rq;
1902
1903        update_rq_clock(rq);
1904        rq = move_queued_task(rq, rf, p, dest_cpu);
1905
1906        return rq;
1907}
1908
1909/*
1910 * migration_cpu_stop - this will be executed by a highprio stopper thread
1911 * and performs thread migration by bumping thread off CPU then
1912 * 'pushing' onto another runqueue.
1913 */
1914static int migration_cpu_stop(void *data)
1915{
1916        struct migration_arg *arg = data;
1917        struct set_affinity_pending *pending = arg->pending;
1918        struct task_struct *p = arg->task;
1919        int dest_cpu = arg->dest_cpu;
1920        struct rq *rq = this_rq();
1921        bool complete = false;
1922        struct rq_flags rf;
1923
1924        /*
1925         * The original target CPU might have gone down and we might
1926         * be on another CPU but it doesn't matter.
1927         */
1928        local_irq_save(rf.flags);
1929        /*
1930         * We need to explicitly wake pending tasks before running
1931         * __migrate_task() such that we will not miss enforcing cpus_ptr
1932         * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1933         */
1934        flush_smp_call_function_from_idle();
1935
1936        raw_spin_lock(&p->pi_lock);
1937        rq_lock(rq, &rf);
1938
1939        /*
1940         * If we were passed a pending, then ->stop_pending was set, thus
1941         * p->migration_pending must have remained stable.
1942         */
1943        WARN_ON_ONCE(pending && pending != p->migration_pending);
1944
1945        /*
1946         * If task_rq(p) != rq, it cannot be migrated here, because we're
1947         * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1948         * we're holding p->pi_lock.
1949         */
1950        if (task_rq(p) == rq) {
1951                if (is_migration_disabled(p))
1952                        goto out;
1953
1954                if (pending) {
1955                        p->migration_pending = NULL;
1956                        complete = true;
1957                }
1958
1959                if (dest_cpu < 0) {
1960                        if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
1961                                goto out;
1962
1963                        dest_cpu = cpumask_any_distribute(&p->cpus_mask);
1964                }
1965
1966                if (task_on_rq_queued(p))
1967                        rq = __migrate_task(rq, &rf, p, dest_cpu);
1968                else
1969                        p->wake_cpu = dest_cpu;
1970
1971                /*
1972                 * XXX __migrate_task() can fail, at which point we might end
1973                 * up running on a dodgy CPU, AFAICT this can only happen
1974                 * during CPU hotplug, at which point we'll get pushed out
1975                 * anyway, so it's probably not a big deal.
1976                 */
1977
1978        } else if (pending) {
1979                /*
1980                 * This happens when we get migrated between migrate_enable()'s
1981                 * preempt_enable() and scheduling the stopper task. At that
1982                 * point we're a regular task again and not current anymore.
1983                 *
1984                 * A !PREEMPT kernel has a giant hole here, which makes it far
1985                 * more likely.
1986                 */
1987
1988                /*
1989                 * The task moved before the stopper got to run. We're holding
1990                 * ->pi_lock, so the allowed mask is stable - if it got
1991                 * somewhere allowed, we're done.
1992                 */
1993                if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
1994                        p->migration_pending = NULL;
1995                        complete = true;
1996                        goto out;
1997                }
1998
1999                /*
2000                 * When migrate_enable() hits a rq mis-match we can't reliably
2001                 * determine is_migration_disabled() and so have to chase after
2002                 * it.
2003                 */
2004                WARN_ON_ONCE(!pending->stop_pending);
2005                task_rq_unlock(rq, p, &rf);
2006                stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2007                                    &pending->arg, &pending->stop_work);
2008                return 0;
2009        }
2010out:
2011        if (pending)
2012                pending->stop_pending = false;
2013        task_rq_unlock(rq, p, &rf);
2014
2015        if (complete)
2016                complete_all(&pending->done);
2017
2018        return 0;
2019}
2020
2021int push_cpu_stop(void *arg)
2022{
2023        struct rq *lowest_rq = NULL, *rq = this_rq();
2024        struct task_struct *p = arg;
2025
2026        raw_spin_lock_irq(&p->pi_lock);
2027        raw_spin_lock(&rq->lock);
2028
2029        if (task_rq(p) != rq)
2030                goto out_unlock;
2031
2032        if (is_migration_disabled(p)) {
2033                p->migration_flags |= MDF_PUSH;
2034                goto out_unlock;
2035        }
2036
2037        p->migration_flags &= ~MDF_PUSH;
2038
2039        if (p->sched_class->find_lock_rq)
2040                lowest_rq = p->sched_class->find_lock_rq(p, rq);
2041
2042        if (!lowest_rq)
2043                goto out_unlock;
2044
2045        // XXX validate p is still the highest prio task
2046        if (task_rq(p) == rq) {
2047                deactivate_task(rq, p, 0);
2048                set_task_cpu(p, lowest_rq->cpu);
2049                activate_task(lowest_rq, p, 0);
2050                resched_curr(lowest_rq);
2051        }
2052
2053        double_unlock_balance(rq, lowest_rq);
2054
2055out_unlock:
2056        rq->push_busy = false;
2057        raw_spin_unlock(&rq->lock);
2058        raw_spin_unlock_irq(&p->pi_lock);
2059
2060        put_task_struct(p);
2061        return 0;
2062}
2063
2064/*
2065 * sched_class::set_cpus_allowed must do the below, but is not required to
2066 * actually call this function.
2067 */
2068void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
2069{
2070        if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2071                p->cpus_ptr = new_mask;
2072                return;
2073        }
2074
2075        cpumask_copy(&p->cpus_mask, new_mask);
2076        p->nr_cpus_allowed = cpumask_weight(new_mask);
2077}
2078
2079static void
2080__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
2081{
2082        struct rq *rq = task_rq(p);
2083        bool queued, running;
2084
2085        /*
2086         * This here violates the locking rules for affinity, since we're only
2087         * supposed to change these variables while holding both rq->lock and
2088         * p->pi_lock.
2089         *
2090         * HOWEVER, it magically works, because ttwu() is the only code that
2091         * accesses these variables under p->pi_lock and only does so after
2092         * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2093         * before finish_task().
2094         *
2095         * XXX do further audits, this smells like something putrid.
2096         */
2097        if (flags & SCA_MIGRATE_DISABLE)
2098                SCHED_WARN_ON(!p->on_cpu);
2099        else
2100                lockdep_assert_held(&p->pi_lock);
2101
2102        queued = task_on_rq_queued(p);
2103        running = task_current(rq, p);
2104
2105        if (queued) {
2106                /*
2107                 * Because __kthread_bind() calls this on blocked tasks without
2108                 * holding rq->lock.
2109                 */
2110                lockdep_assert_held(&rq->lock);
2111                dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2112        }
2113        if (running)
2114                put_prev_task(rq, p);
2115
2116        p->sched_class->set_cpus_allowed(p, new_mask, flags);
2117
2118        if (queued)
2119                enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2120        if (running)
2121                set_next_task(rq, p);
2122}
2123
2124void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2125{
2126        __do_set_cpus_allowed(p, new_mask, 0);
2127}
2128
2129/*
2130 * This function is wildly self concurrent; here be dragons.
2131 *
2132 *
2133 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2134 * designated task is enqueued on an allowed CPU. If that task is currently
2135 * running, we have to kick it out using the CPU stopper.
2136 *
2137 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2138 * Consider:
2139 *
2140 *     Initial conditions: P0->cpus_mask = [0, 1]
2141 *
2142 *     P0@CPU0                  P1
2143 *
2144 *     migrate_disable();
2145 *     <preempted>
2146 *                              set_cpus_allowed_ptr(P0, [1]);
2147 *
2148 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2149 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2150 * This means we need the following scheme:
2151 *
2152 *     P0@CPU0                  P1
2153 *
2154 *     migrate_disable();
2155 *     <preempted>
2156 *                              set_cpus_allowed_ptr(P0, [1]);
2157 *                                <blocks>
2158 *     <resumes>
2159 *     migrate_enable();
2160 *       __set_cpus_allowed_ptr();
2161 *       <wakes local stopper>
2162 *                         `--> <woken on migration completion>
2163 *
2164 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2165 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2166 * task p are serialized by p->pi_lock, which we can leverage: the one that
2167 * should come into effect at the end of the Migrate-Disable region is the last
2168 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2169 * but we still need to properly signal those waiting tasks at the appropriate
2170 * moment.
2171 *
2172 * This is implemented using struct set_affinity_pending. The first
2173 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2174 * setup an instance of that struct and install it on the targeted task_struct.
2175 * Any and all further callers will reuse that instance. Those then wait for
2176 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2177 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2178 *
2179 *
2180 * (1) In the cases covered above. There is one more where the completion is
2181 * signaled within affine_move_task() itself: when a subsequent affinity request
2182 * occurs after the stopper bailed out due to the targeted task still being
2183 * Migrate-Disable. Consider:
2184 *
2185 *     Initial conditions: P0->cpus_mask = [0, 1]
2186 *
2187 *     CPU0               P1                            P2
2188 *     <P0>
2189 *       migrate_disable();
2190 *       <preempted>
2191 *                        set_cpus_allowed_ptr(P0, [1]);
2192 *                          <blocks>
2193 *     <migration/0>
2194 *       migration_cpu_stop()
2195 *         is_migration_disabled()
2196 *           <bails>
2197 *                                                       set_cpus_allowed_ptr(P0, [0, 1]);
2198 *                                                         <signal completion>
2199 *                          <awakes>
2200 *
2201 * Note that the above is safe vs a concurrent migrate_enable(), as any
2202 * pending affinity completion is preceded by an uninstallation of
2203 * p->migration_pending done with p->pi_lock held.
2204 */
2205static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2206                            int dest_cpu, unsigned int flags)
2207{
2208        struct set_affinity_pending my_pending = { }, *pending = NULL;
2209        bool stop_pending, complete = false;
2210
2211        /* Can the task run on the task's current CPU? If so, we're done */
2212        if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2213                struct task_struct *push_task = NULL;
2214
2215                if ((flags & SCA_MIGRATE_ENABLE) &&
2216                    (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2217                        rq->push_busy = true;
2218                        push_task = get_task_struct(p);
2219                }
2220
2221                /*
2222                 * If there are pending waiters, but no pending stop_work,
2223                 * then complete now.
2224                 */
2225                pending = p->migration_pending;
2226                if (pending && !pending->stop_pending) {
2227                        p->migration_pending = NULL;
2228                        complete = true;
2229                }
2230
2231                task_rq_unlock(rq, p, rf);
2232
2233                if (push_task) {
2234                        stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2235                                            p, &rq->push_work);
2236                }
2237
2238                if (complete)
2239                        complete_all(&pending->done);
2240
2241                return 0;
2242        }
2243
2244        if (!(flags & SCA_MIGRATE_ENABLE)) {
2245                /* serialized by p->pi_lock */
2246                if (!p->migration_pending) {
2247                        /* Install the request */
2248                        refcount_set(&my_pending.refs, 1);
2249                        init_completion(&my_pending.done);
2250                        my_pending.arg = (struct migration_arg) {
2251                                .task = p,
2252                                .dest_cpu = -1,         /* any */
2253                                .pending = &my_pending,
2254                        };
2255
2256                        p->migration_pending = &my_pending;
2257                } else {
2258                        pending = p->migration_pending;
2259                        refcount_inc(&pending->refs);
2260                }
2261        }
2262        pending = p->migration_pending;
2263        /*
2264         * - !MIGRATE_ENABLE:
2265         *   we'll have installed a pending if there wasn't one already.
2266         *
2267         * - MIGRATE_ENABLE:
2268         *   we're here because the current CPU isn't matching anymore,
2269         *   the only way that can happen is because of a concurrent
2270         *   set_cpus_allowed_ptr() call, which should then still be
2271         *   pending completion.
2272         *
2273         * Either way, we really should have a @pending here.
2274         */
2275        if (WARN_ON_ONCE(!pending)) {
2276                task_rq_unlock(rq, p, rf);
2277                return -EINVAL;
2278        }
2279
2280        if (task_running(rq, p) || p->state == TASK_WAKING) {
2281                /*
2282                 * MIGRATE_ENABLE gets here because 'p == current', but for
2283                 * anything else we cannot do is_migration_disabled(), punt
2284                 * and have the stopper function handle it all race-free.
2285                 */
2286                stop_pending = pending->stop_pending;
2287                if (!stop_pending)
2288                        pending->stop_pending = true;
2289
2290                if (flags & SCA_MIGRATE_ENABLE)
2291                        p->migration_flags &= ~MDF_PUSH;
2292
2293                task_rq_unlock(rq, p, rf);
2294
2295                if (!stop_pending) {
2296                        stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2297                                            &pending->arg, &pending->stop_work);
2298                }
2299
2300                if (flags & SCA_MIGRATE_ENABLE)
2301                        return 0;
2302        } else {
2303
2304                if (!is_migration_disabled(p)) {
2305                        if (task_on_rq_queued(p))
2306                                rq = move_queued_task(rq, rf, p, dest_cpu);
2307
2308                        if (!pending->stop_pending) {
2309                                p->migration_pending = NULL;
2310                                complete = true;
2311                        }
2312                }
2313                task_rq_unlock(rq, p, rf);
2314
2315                if (complete)
2316                        complete_all(&pending->done);
2317        }
2318
2319        wait_for_completion(&pending->done);
2320
2321        if (refcount_dec_and_test(&pending->refs))
2322                wake_up_var(&pending->refs); /* No UaF, just an address */
2323
2324        /*
2325         * Block the original owner of &pending until all subsequent callers
2326         * have seen the completion and decremented the refcount
2327         */
2328        wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
2329
2330        /* ARGH */
2331        WARN_ON_ONCE(my_pending.stop_pending);
2332
2333        return 0;
2334}
2335
2336/*
2337 * Change a given task's CPU affinity. Migrate the thread to a
2338 * proper CPU and schedule it away if the CPU it's executing on
2339 * is removed from the allowed bitmask.
2340 *
2341 * NOTE: the caller must have a valid reference to the task, the
2342 * task must not exit() & deallocate itself prematurely. The
2343 * call is not atomic; no spinlocks may be held.
2344 */
2345static int __set_cpus_allowed_ptr(struct task_struct *p,
2346                                  const struct cpumask *new_mask,
2347                                  u32 flags)
2348{
2349        const struct cpumask *cpu_valid_mask = cpu_active_mask;
2350        unsigned int dest_cpu;
2351        struct rq_flags rf;
2352        struct rq *rq;
2353        int ret = 0;
2354
2355        rq = task_rq_lock(p, &rf);
2356        update_rq_clock(rq);
2357
2358        if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
2359                /*
2360                 * Kernel threads are allowed on online && !active CPUs,
2361                 * however, during cpu-hot-unplug, even these might get pushed
2362                 * away if not KTHREAD_IS_PER_CPU.
2363                 *
2364                 * Specifically, migration_disabled() tasks must not fail the
2365                 * cpumask_any_and_distribute() pick below, esp. so on
2366                 * SCA_MIGRATE_ENABLE, otherwise we'll not call
2367                 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
2368                 */
2369                cpu_valid_mask = cpu_online_mask;
2370        }
2371
2372        /*
2373         * Must re-check here, to close a race against __kthread_bind(),
2374         * sched_setaffinity() is not guaranteed to observe the flag.
2375         */
2376        if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
2377                ret = -EINVAL;
2378                goto out;
2379        }
2380
2381        if (!(flags & SCA_MIGRATE_ENABLE)) {
2382                if (cpumask_equal(&p->cpus_mask, new_mask))
2383                        goto out;
2384
2385                if (WARN_ON_ONCE(p == current &&
2386                                 is_migration_disabled(p) &&
2387                                 !cpumask_test_cpu(task_cpu(p), new_mask))) {
2388                        ret = -EBUSY;
2389                        goto out;
2390                }
2391        }
2392
2393        /*
2394         * Picking a ~random cpu helps in cases where we are changing affinity
2395         * for groups of tasks (ie. cpuset), so that load balancing is not
2396         * immediately required to distribute the tasks within their new mask.
2397         */
2398        dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
2399        if (dest_cpu >= nr_cpu_ids) {
2400                ret = -EINVAL;
2401                goto out;
2402        }
2403
2404        __do_set_cpus_allowed(p, new_mask, flags);
2405
2406        return affine_move_task(rq, p, &rf, dest_cpu, flags);
2407
2408out:
2409        task_rq_unlock(rq, p, &rf);
2410
2411        return ret;
2412}
2413
2414int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
2415{
2416        return __set_cpus_allowed_ptr(p, new_mask, 0);
2417}
2418EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
2419
2420void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2421{
2422#ifdef CONFIG_SCHED_DEBUG
2423        /*
2424         * We should never call set_task_cpu() on a blocked task,
2425         * ttwu() will sort out the placement.
2426         */
2427        WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2428                        !p->on_rq);
2429
2430        /*
2431         * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
2432         * because schedstat_wait_{start,end} rebase migrating task's wait_start
2433         * time relying on p->on_rq.
2434         */
2435        WARN_ON_ONCE(p->state == TASK_RUNNING &&
2436                     p->sched_class == &fair_sched_class &&
2437                     (p->on_rq && !task_on_rq_migrating(p)));
2438
2439#ifdef CONFIG_LOCKDEP
2440        /*
2441         * The caller should hold either p->pi_lock or rq->lock, when changing
2442         * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2443         *
2444         * sched_move_task() holds both and thus holding either pins the cgroup,
2445         * see task_group().
2446         *
2447         * Furthermore, all task_rq users should acquire both locks, see
2448         * task_rq_lock().
2449         */
2450        WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2451                                      lockdep_is_held(&task_rq(p)->lock)));
2452#endif
2453        /*
2454         * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
2455         */
2456        WARN_ON_ONCE(!cpu_online(new_cpu));
2457
2458        WARN_ON_ONCE(is_migration_disabled(p));
2459#endif
2460
2461        trace_sched_migrate_task(p, new_cpu);
2462
2463        if (task_cpu(p) != new_cpu) {
2464                if (p->sched_class->migrate_task_rq)
2465                        p->sched_class->migrate_task_rq(p, new_cpu);
2466                p->se.nr_migrations++;
2467                rseq_migrate(p);
2468                perf_event_task_migrate(p);
2469        }
2470
2471        __set_task_cpu(p, new_cpu);
2472}
2473
2474#ifdef CONFIG_NUMA_BALANCING
2475static void __migrate_swap_task(struct task_struct *p, int cpu)
2476{
2477        if (task_on_rq_queued(p)) {
2478                struct rq *src_rq, *dst_rq;
2479                struct rq_flags srf, drf;
2480
2481                src_rq = task_rq(p);
2482                dst_rq = cpu_rq(cpu);
2483
2484                rq_pin_lock(src_rq, &srf);
2485                rq_pin_lock(dst_rq, &drf);
2486
2487                deactivate_task(src_rq, p, 0);
2488                set_task_cpu(p, cpu);
2489                activate_task(dst_rq, p, 0);
2490                check_preempt_curr(dst_rq, p, 0);
2491
2492                rq_unpin_lock(dst_rq, &drf);
2493                rq_unpin_lock(src_rq, &srf);
2494
2495        } else {
2496                /*
2497                 * Task isn't running anymore; make it appear like we migrated
2498                 * it before it went to sleep. This means on wakeup we make the
2499                 * previous CPU our target instead of where it really is.
2500                 */
2501                p->wake_cpu = cpu;
2502        }
2503}
2504
2505struct migration_swap_arg {
2506        struct task_struct *src_task, *dst_task;
2507        int src_cpu, dst_cpu;
2508};
2509
2510static int migrate_swap_stop(void *data)
2511{
2512        struct migration_swap_arg *arg = data;
2513        struct rq *src_rq, *dst_rq;
2514        int ret = -EAGAIN;
2515
2516        if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
2517                return -EAGAIN;
2518
2519        src_rq = cpu_rq(arg->src_cpu);
2520        dst_rq = cpu_rq(arg->dst_cpu);
2521
2522        double_raw_lock(&arg->src_task->pi_lock,
2523                        &arg->dst_task->pi_lock);
2524        double_rq_lock(src_rq, dst_rq);
2525
2526        if (task_cpu(arg->dst_task) != arg->dst_cpu)
2527                goto unlock;
2528
2529        if (task_cpu(arg->src_task) != arg->src_cpu)
2530                goto unlock;
2531
2532        if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
2533                goto unlock;
2534
2535        if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
2536                goto unlock;
2537
2538        __migrate_swap_task(arg->src_task, arg->dst_cpu);
2539        __migrate_swap_task(arg->dst_task, arg->src_cpu);
2540
2541        ret = 0;
2542
2543unlock:
2544        double_rq_unlock(src_rq, dst_rq);
2545        raw_spin_unlock(&arg->dst_task->pi_lock);
2546        raw_spin_unlock(&arg->src_task->pi_lock);
2547
2548        return ret;
2549}
2550
2551/*
2552 * Cross migrate two tasks
2553 */
2554int migrate_swap(struct task_struct *cur, struct task_struct *p,
2555                int target_cpu, int curr_cpu)
2556{
2557        struct migration_swap_arg arg;
2558        int ret = -EINVAL;
2559
2560        arg = (struct migration_swap_arg){
2561                .src_task = cur,
2562                .src_cpu = curr_cpu,
2563                .dst_task = p,
2564                .dst_cpu = target_cpu,
2565        };
2566
2567        if (arg.src_cpu == arg.dst_cpu)
2568                goto out;
2569
2570        /*
2571         * These three tests are all lockless; this is OK since all of them
2572         * will be re-checked with proper locks held further down the line.
2573         */
2574        if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
2575                goto out;
2576
2577        if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
2578                goto out;
2579
2580        if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
2581                goto out;
2582
2583        trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
2584        ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
2585
2586out:
2587        return ret;
2588}
2589#endif /* CONFIG_NUMA_BALANCING */
2590
2591/*
2592 * wait_task_inactive - wait for a thread to unschedule.
2593 *
2594 * If @match_state is nonzero, it's the @p->state value just checked and
2595 * not expected to change.  If it changes, i.e. @p might have woken up,
2596 * then return zero.  When we succeed in waiting for @p to be off its CPU,
2597 * we return a positive number (its total switch count).  If a second call
2598 * a short while later returns the same number, the caller can be sure that
2599 * @p has remained unscheduled the whole time.
2600 *
2601 * The caller must ensure that the task *will* unschedule sometime soon,
2602 * else this function might spin for a *long* time. This function can't
2603 * be called with interrupts off, or it may introduce deadlock with
2604 * smp_call_function() if an IPI is sent by the same process we are
2605 * waiting to become inactive.
2606 */
2607unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2608{
2609        int running, queued;
2610        struct rq_flags rf;
2611        unsigned long ncsw;
2612        struct rq *rq;
2613
2614        for (;;) {
2615                /*
2616                 * We do the initial early heuristics without holding
2617                 * any task-queue locks at all. We'll only try to get
2618                 * the runqueue lock when things look like they will
2619                 * work out!
2620                 */
2621                rq = task_rq(p);
2622
2623                /*
2624                 * If the task is actively running on another CPU
2625                 * still, just relax and busy-wait without holding
2626                 * any locks.
2627                 *
2628                 * NOTE! Since we don't hold any locks, it's not
2629                 * even sure that "rq" stays as the right runqueue!
2630                 * But we don't care, since "task_running()" will
2631                 * return false if the runqueue has changed and p
2632                 * is actually now running somewhere else!
2633                 */
2634                while (task_running(rq, p)) {
2635                        if (match_state && unlikely(p->state != match_state))
2636                                return 0;
2637                        cpu_relax();
2638                }
2639
2640                /*
2641                 * Ok, time to look more closely! We need the rq
2642                 * lock now, to be *sure*. If we're wrong, we'll
2643                 * just go back and repeat.
2644                 */
2645                rq = task_rq_lock(p, &rf);
2646                trace_sched_wait_task(p);
2647                running = task_running(rq, p);
2648                queued = task_on_rq_queued(p);
2649                ncsw = 0;
2650                if (!match_state || p->state == match_state)
2651                        ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2652                task_rq_unlock(rq, p, &rf);
2653
2654                /*
2655                 * If it changed from the expected state, bail out now.
2656                 */
2657                if (unlikely(!ncsw))
2658                        break;
2659
2660                /*
2661                 * Was it really running after all now that we
2662                 * checked with the proper locks actually held?
2663                 *
2664                 * Oops. Go back and try again..
2665                 */
2666                if (unlikely(running)) {
2667                        cpu_relax();
2668                        continue;
2669                }
2670
2671                /*
2672                 * It's not enough that it's not actively running,
2673                 * it must be off the runqueue _entirely_, and not
2674                 * preempted!
2675                 *
2676                 * So if it was still runnable (but just not actively
2677                 * running right now), it's preempted, and we should
2678                 * yield - it could be a while.
2679                 */
2680                if (unlikely(queued)) {
2681                        ktime_t to = NSEC_PER_SEC / HZ;
2682
2683                        set_current_state(TASK_UNINTERRUPTIBLE);
2684                        schedule_hrtimeout(&to, HRTIMER_MODE_REL);
2685                        continue;
2686                }
2687
2688                /*
2689                 * Ahh, all good. It wasn't running, and it wasn't
2690                 * runnable, which means that it will never become
2691                 * running in the future either. We're all done!
2692                 */
2693                break;
2694        }
2695
2696        return ncsw;
2697}
2698
2699/***
2700 * kick_process - kick a running thread to enter/exit the kernel
2701 * @p: the to-be-kicked thread
2702 *
2703 * Cause a process which is running on another CPU to enter
2704 * kernel-mode, without any delay. (to get signals handled.)
2705 *
2706 * NOTE: this function doesn't have to take the runqueue lock,
2707 * because all it wants to ensure is that the remote task enters
2708 * the kernel. If the IPI races and the task has been migrated
2709 * to another CPU then no harm is done and the purpose has been
2710 * achieved as well.
2711 */
2712void kick_process(struct task_struct *p)
2713{
2714        int cpu;
2715
2716        preempt_disable();
2717        cpu = task_cpu(p);
2718        if ((cpu != smp_processor_id()) && task_curr(p))
2719                smp_send_reschedule(cpu);
2720        preempt_enable();
2721}
2722EXPORT_SYMBOL_GPL(kick_process);
2723
2724/*
2725 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
2726 *
2727 * A few notes on cpu_active vs cpu_online:
2728 *
2729 *  - cpu_active must be a subset of cpu_online
2730 *
2731 *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
2732 *    see __set_cpus_allowed_ptr(). At this point the newly online
2733 *    CPU isn't yet part of the sched domains, and balancing will not
2734 *    see it.
2735 *
2736 *  - on CPU-down we clear cpu_active() to mask the sched domains and
2737 *    avoid the load balancer to place new tasks on the to be removed
2738 *    CPU. Existing tasks will remain running there and will be taken
2739 *    off.
2740 *
2741 * This means that fallback selection must not select !active CPUs.
2742 * And can assume that any active CPU must be online. Conversely
2743 * select_task_rq() below may allow selection of !active CPUs in order
2744 * to satisfy the above rules.
2745 */
2746static int select_fallback_rq(int cpu, struct task_struct *p)
2747{
2748        int nid = cpu_to_node(cpu);
2749        const struct cpumask *nodemask = NULL;
2750        enum { cpuset, possible, fail } state = cpuset;
2751        int dest_cpu;
2752
2753        /*
2754         * If the node that the CPU is on has been offlined, cpu_to_node()
2755         * will return -1. There is no CPU on the node, and we should
2756         * select the CPU on the other node.
2757         */
2758        if (nid != -1) {
2759                nodemask = cpumask_of_node(nid);
2760
2761                /* Look for allowed, online CPU in same node. */
2762                for_each_cpu(dest_cpu, nodemask) {
2763                        if (!cpu_active(dest_cpu))
2764                                continue;
2765                        if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
2766                                return dest_cpu;
2767                }
2768        }
2769
2770        for (;;) {
2771                /* Any allowed, online CPU? */
2772                for_each_cpu(dest_cpu, p->cpus_ptr) {
2773                        if (!is_cpu_allowed(p, dest_cpu))
2774                                continue;
2775
2776                        goto out;
2777                }
2778
2779                /* No more Mr. Nice Guy. */
2780                switch (state) {
2781                case cpuset:
2782                        if (IS_ENABLED(CONFIG_CPUSETS)) {
2783                                cpuset_cpus_allowed_fallback(p);
2784                                state = possible;
2785                                break;
2786                        }
2787                        fallthrough;
2788                case possible:
2789                        /*
2790                         * XXX When called from select_task_rq() we only
2791                         * hold p->pi_lock and again violate locking order.
2792                         *
2793                         * More yuck to audit.
2794                         */
2795                        do_set_cpus_allowed(p, cpu_possible_mask);
2796                        state = fail;
2797                        break;
2798
2799                case fail:
2800                        BUG();
2801                        break;
2802                }
2803        }
2804
2805out:
2806        if (state != cpuset) {
2807                /*
2808                 * Don't tell them about moving exiting tasks or
2809                 * kernel threads (both mm NULL), since they never
2810                 * leave kernel.
2811                 */
2812                if (p->mm && printk_ratelimit()) {
2813                        printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2814                                        task_pid_nr(p), p->comm, cpu);
2815                }
2816        }
2817
2818        return dest_cpu;
2819}
2820
2821/*
2822 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
2823 */
2824static inline
2825int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
2826{
2827        lockdep_assert_held(&p->pi_lock);
2828
2829        if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
2830                cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
2831        else
2832                cpu = cpumask_any(p->cpus_ptr);
2833
2834        /*
2835         * In order not to call set_task_cpu() on a blocking task we need
2836         * to rely on ttwu() to place the task on a valid ->cpus_ptr
2837         * CPU.
2838         *
2839         * Since this is common to all placement strategies, this lives here.
2840         *
2841         * [ this allows ->select_task() to simply return task_cpu(p) and
2842         *   not worry about this generic constraint ]
2843         */
2844        if (unlikely(!is_cpu_allowed(p, cpu)))
2845                cpu = select_fallback_rq(task_cpu(p), p);
2846
2847        return cpu;
2848}
2849
2850void sched_set_stop_task(int cpu, struct task_struct *stop)
2851{
2852        static struct lock_class_key stop_pi_lock;
2853        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2854        struct task_struct *old_stop = cpu_rq(cpu)->stop;
2855
2856        if (stop) {
2857                /*
2858                 * Make it appear like a SCHED_FIFO task, its something
2859                 * userspace knows about and won't get confused about.
2860                 *
2861                 * Also, it will make PI more or less work without too
2862                 * much confusion -- but then, stop work should not
2863                 * rely on PI working anyway.
2864                 */
2865                sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2866
2867                stop->sched_class = &stop_sched_class;
2868
2869                /*
2870                 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
2871                 * adjust the effective priority of a task. As a result,
2872                 * rt_mutex_setprio() can trigger (RT) balancing operations,
2873                 * which can then trigger wakeups of the stop thread to push
2874                 * around the current task.
2875                 *
2876                 * The stop task itself will never be part of the PI-chain, it
2877                 * never blocks, therefore that ->pi_lock recursion is safe.
2878                 * Tell lockdep about this by placing the stop->pi_lock in its
2879                 * own class.
2880                 */
2881                lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
2882        }
2883
2884        cpu_rq(cpu)->stop = stop;
2885
2886        if (old_stop) {
2887                /*
2888                 * Reset it back to a normal scheduling class so that
2889                 * it can die in pieces.
2890                 */
2891                old_stop->sched_class = &rt_sched_class;
2892        }
2893}
2894
2895#else /* CONFIG_SMP */
2896
2897static inline int __set_cpus_allowed_ptr(struct task_struct *p,
2898                                         const struct cpumask *new_mask,
2899                                         u32 flags)
2900{
2901        return set_cpus_allowed_ptr(p, new_mask);
2902}
2903
2904static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
2905
2906static inline bool rq_has_pinned_tasks(struct rq *rq)
2907{
2908        return false;
2909}
2910
2911#endif /* !CONFIG_SMP */
2912
2913static void
2914ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2915{
2916        struct rq *rq;
2917
2918        if (!schedstat_enabled())
2919                return;
2920
2921        rq = this_rq();
2922
2923#ifdef CONFIG_SMP
2924        if (cpu == rq->cpu) {
2925                __schedstat_inc(rq->ttwu_local);
2926                __schedstat_inc(p->se.statistics.nr_wakeups_local);
2927        } else {
2928                struct sched_domain *sd;
2929
2930                __schedstat_inc(p->se.statistics.nr_wakeups_remote);
2931                rcu_read_lock();
2932                for_each_domain(rq->cpu, sd) {
2933                        if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2934                                __schedstat_inc(sd->ttwu_wake_remote);
2935                                break;
2936                        }
2937                }
2938                rcu_read_unlock();
2939        }
2940
2941        if (wake_flags & WF_MIGRATED)
2942                __schedstat_inc(p->se.statistics.nr_wakeups_migrate);
2943#endif /* CONFIG_SMP */
2944
2945        __schedstat_inc(rq->ttwu_count);
2946        __schedstat_inc(p->se.statistics.nr_wakeups);
2947
2948        if (wake_flags & WF_SYNC)
2949                __schedstat_inc(p->se.statistics.nr_wakeups_sync);
2950}
2951
2952/*
2953 * Mark the task runnable and perform wakeup-preemption.
2954 */
2955static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
2956                           struct rq_flags *rf)
2957{
2958        check_preempt_curr(rq, p, wake_flags);
2959        p->state = TASK_RUNNING;
2960        trace_sched_wakeup(p);
2961
2962#ifdef CONFIG_SMP
2963        if (p->sched_class->task_woken) {
2964                /*
2965                 * Our task @p is fully woken up and running; so it's safe to
2966                 * drop the rq->lock, hereafter rq is only used for statistics.
2967                 */
2968                rq_unpin_lock(rq, rf);
2969                p->sched_class->task_woken(rq, p);
2970                rq_repin_lock(rq, rf);
2971        }
2972
2973        if (rq->idle_stamp) {
2974                u64 delta = rq_clock(rq) - rq->idle_stamp;
2975                u64 max = 2*rq->max_idle_balance_cost;
2976
2977                update_avg(&rq->avg_idle, delta);
2978
2979                if (rq->avg_idle > max)
2980                        rq->avg_idle = max;
2981
2982                rq->idle_stamp = 0;
2983        }
2984#endif
2985}
2986
2987static void
2988ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
2989                 struct rq_flags *rf)
2990{
2991        int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
2992
2993        lockdep_assert_held(&rq->lock);
2994
2995        if (p->sched_contributes_to_load)
2996                rq->nr_uninterruptible--;
2997
2998#ifdef CONFIG_SMP
2999        if (wake_flags & WF_MIGRATED)
3000                en_flags |= ENQUEUE_MIGRATED;
3001        else
3002#endif
3003        if (p->in_iowait) {
3004                delayacct_blkio_end(p);
3005                atomic_dec(&task_rq(p)->nr_iowait);
3006        }
3007
3008        activate_task(rq, p, en_flags);
3009        ttwu_do_wakeup(rq, p, wake_flags, rf);
3010}
3011
3012/*
3013 * Consider @p being inside a wait loop:
3014 *
3015 *   for (;;) {
3016 *      set_current_state(TASK_UNINTERRUPTIBLE);
3017 *
3018 *      if (CONDITION)
3019 *         break;
3020 *
3021 *      schedule();
3022 *   }
3023 *   __set_current_state(TASK_RUNNING);
3024 *
3025 * between set_current_state() and schedule(). In this case @p is still
3026 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3027 * an atomic manner.
3028 *
3029 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3030 * then schedule() must still happen and p->state can be changed to
3031 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3032 * need to do a full wakeup with enqueue.
3033 *
3034 * Returns: %true when the wakeup is done,
3035 *          %false otherwise.
3036 */
3037static int ttwu_runnable(struct task_struct *p, int wake_flags)
3038{
3039        struct rq_flags rf;
3040        struct rq *rq;
3041        int ret = 0;
3042
3043        rq = __task_rq_lock(p, &rf);
3044        if (task_on_rq_queued(p)) {
3045                /* check_preempt_curr() may use rq clock */
3046                update_rq_clock(rq);
3047                ttwu_do_wakeup(rq, p, wake_flags, &rf);
3048                ret = 1;
3049        }
3050        __task_rq_unlock(rq, &rf);
3051
3052        return ret;
3053}
3054
3055#ifdef CONFIG_SMP
3056void sched_ttwu_pending(void *arg)
3057{
3058        struct llist_node *llist = arg;
3059        struct rq *rq = this_rq();
3060        struct task_struct *p, *t;
3061        struct rq_flags rf;
3062
3063        if (!llist)
3064                return;
3065
3066        /*
3067         * rq::ttwu_pending racy indication of out-standing wakeups.
3068         * Races such that false-negatives are possible, since they
3069         * are shorter lived that false-positives would be.
3070         */
3071        WRITE_ONCE(rq->ttwu_pending, 0);
3072
3073        rq_lock_irqsave(rq, &rf);
3074        update_rq_clock(rq);
3075
3076        llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3077                if (WARN_ON_ONCE(p->on_cpu))
3078                        smp_cond_load_acquire(&p->on_cpu, !VAL);
3079
3080                if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3081                        set_task_cpu(p, cpu_of(rq));
3082
3083                ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3084        }
3085
3086        rq_unlock_irqrestore(rq, &rf);
3087}
3088
3089void send_call_function_single_ipi(int cpu)
3090{
3091        struct rq *rq = cpu_rq(cpu);
3092
3093        if (!set_nr_if_polling(rq->idle))
3094                arch_send_call_function_single_ipi(cpu);
3095        else
3096                trace_sched_wake_idle_without_ipi(cpu);
3097}
3098
3099/*
3100 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3101 * necessary. The wakee CPU on receipt of the IPI will queue the task
3102 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3103 * of the wakeup instead of the waker.
3104 */
3105static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3106{
3107        struct rq *rq = cpu_rq(cpu);
3108
3109        p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3110
3111        WRITE_ONCE(rq->ttwu_pending, 1);
3112        __smp_call_single_queue(cpu, &p->wake_entry.llist);
3113}
3114
3115void wake_up_if_idle(int cpu)
3116{
3117        struct rq *rq = cpu_rq(cpu);
3118        struct rq_flags rf;
3119
3120        rcu_read_lock();
3121
3122        if (!is_idle_task(rcu_dereference(rq->curr)))
3123                goto out;
3124
3125        if (set_nr_if_polling(rq->idle)) {
3126                trace_sched_wake_idle_without_ipi(cpu);
3127        } else {
3128                rq_lock_irqsave(rq, &rf);
3129                if (is_idle_task(rq->curr))
3130                        smp_send_reschedule(cpu);
3131                /* Else CPU is not idle, do nothing here: */
3132                rq_unlock_irqrestore(rq, &rf);
3133        }
3134
3135out:
3136        rcu_read_unlock();
3137}
3138
3139bool cpus_share_cache(int this_cpu, int that_cpu)
3140{
3141        return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3142}
3143
3144static inline bool ttwu_queue_cond(int cpu, int wake_flags)
3145{
3146        /*
3147         * Do not complicate things with the async wake_list while the CPU is
3148         * in hotplug state.
3149         */
3150        if (!cpu_active(cpu))
3151                return false;
3152
3153        /*
3154         * If the CPU does not share cache, then queue the task on the
3155         * remote rqs wakelist to avoid accessing remote data.
3156         */
3157        if (!cpus_share_cache(smp_processor_id(), cpu))
3158                return true;
3159
3160        /*
3161         * If the task is descheduling and the only running task on the
3162         * CPU then use the wakelist to offload the task activation to
3163         * the soon-to-be-idle CPU as the current CPU is likely busy.
3164         * nr_running is checked to avoid unnecessary task stacking.
3165         */
3166        if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
3167                return true;
3168
3169        return false;
3170}
3171
3172static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3173{
3174        if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
3175                if (WARN_ON_ONCE(cpu == smp_processor_id()))
3176                        return false;
3177
3178                sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3179                __ttwu_queue_wakelist(p, cpu, wake_flags);
3180                return true;
3181        }
3182
3183        return false;
3184}
3185
3186#else /* !CONFIG_SMP */
3187
3188static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3189{
3190        return false;
3191}
3192
3193#endif /* CONFIG_SMP */
3194
3195static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3196{
3197        struct rq *rq = cpu_rq(cpu);
3198        struct rq_flags rf;
3199
3200        if (ttwu_queue_wakelist(p, cpu, wake_flags))
3201                return;
3202
3203        rq_lock(rq, &rf);
3204        update_rq_clock(rq);
3205        ttwu_do_activate(rq, p, wake_flags, &rf);
3206        rq_unlock(rq, &rf);
3207}
3208
3209/*
3210 * Notes on Program-Order guarantees on SMP systems.
3211 *
3212 *  MIGRATION
3213 *
3214 * The basic program-order guarantee on SMP systems is that when a task [t]
3215 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
3216 * execution on its new CPU [c1].
3217 *
3218 * For migration (of runnable tasks) this is provided by the following means:
3219 *
3220 *  A) UNLOCK of the rq(c0)->lock scheduling out task t
3221 *  B) migration for t is required to synchronize *both* rq(c0)->lock and
3222 *     rq(c1)->lock (if not at the same time, then in that order).
3223 *  C) LOCK of the rq(c1)->lock scheduling in task
3224 *
3225 * Release/acquire chaining guarantees that B happens after A and C after B.
3226 * Note: the CPU doing B need not be c0 or c1
3227 *
3228 * Example:
3229 *
3230 *   CPU0            CPU1            CPU2
3231 *
3232 *   LOCK rq(0)->lock
3233 *   sched-out X
3234 *   sched-in Y
3235 *   UNLOCK rq(0)->lock
3236 *
3237 *                                   LOCK rq(0)->lock // orders against CPU0
3238 *                                   dequeue X
3239 *                                   UNLOCK rq(0)->lock
3240 *
3241 *                                   LOCK rq(1)->lock
3242 *                                   enqueue X
3243 *                                   UNLOCK rq(1)->lock
3244 *
3245 *                   LOCK rq(1)->lock // orders against CPU2
3246 *                   sched-out Z
3247 *                   sched-in X
3248 *                   UNLOCK rq(1)->lock
3249 *
3250 *
3251 *  BLOCKING -- aka. SLEEP + WAKEUP
3252 *
3253 * For blocking we (obviously) need to provide the same guarantee as for
3254 * migration. However the means are completely different as there is no lock
3255 * chain to provide order. Instead we do:
3256 *
3257 *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
3258 *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
3259 *
3260 * Example:
3261 *
3262 *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
3263 *
3264 *   LOCK rq(0)->lock LOCK X->pi_lock
3265 *   dequeue X
3266 *   sched-out X
3267 *   smp_store_release(X->on_cpu, 0);
3268 *
3269 *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
3270 *                    X->state = WAKING
3271 *                    set_task_cpu(X,2)
3272 *
3273 *                    LOCK rq(2)->lock
3274 *                    enqueue X
3275 *                    X->state = RUNNING
3276 *                    UNLOCK rq(2)->lock
3277 *
3278 *                                          LOCK rq(2)->lock // orders against CPU1
3279 *                                          sched-out Z
3280 *                                          sched-in X
3281 *                                          UNLOCK rq(2)->lock
3282 *
3283 *                    UNLOCK X->pi_lock
3284 *   UNLOCK rq(0)->lock
3285 *
3286 *
3287 * However, for wakeups there is a second guarantee we must provide, namely we
3288 * must ensure that CONDITION=1 done by the caller can not be reordered with
3289 * accesses to the task state; see try_to_wake_up() and set_current_state().
3290 */
3291
3292/**
3293 * try_to_wake_up - wake up a thread
3294 * @p: the thread to be awakened
3295 * @state: the mask of task states that can be woken
3296 * @wake_flags: wake modifier flags (WF_*)
3297 *
3298 * Conceptually does:
3299 *
3300 *   If (@state & @p->state) @p->state = TASK_RUNNING.
3301 *
3302 * If the task was not queued/runnable, also place it back on a runqueue.
3303 *
3304 * This function is atomic against schedule() which would dequeue the task.
3305 *
3306 * It issues a full memory barrier before accessing @p->state, see the comment
3307 * with set_current_state().
3308 *
3309 * Uses p->pi_lock to serialize against concurrent wake-ups.
3310 *
3311 * Relies on p->pi_lock stabilizing:
3312 *  - p->sched_class
3313 *  - p->cpus_ptr
3314 *  - p->sched_task_group
3315 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
3316 *
3317 * Tries really hard to only take one task_rq(p)->lock for performance.
3318 * Takes rq->lock in:
3319 *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
3320 *  - ttwu_queue()       -- new rq, for enqueue of the task;
3321 *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
3322 *
3323 * As a consequence we race really badly with just about everything. See the
3324 * many memory barriers and their comments for details.
3325 *
3326 * Return: %true if @p->state changes (an actual wakeup was done),
3327 *         %false otherwise.
3328 */
3329static int
3330try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
3331{
3332        unsigned long flags;
3333        int cpu, success = 0;
3334
3335        preempt_disable();
3336        if (p == current) {
3337                /*
3338                 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
3339                 * == smp_processor_id()'. Together this means we can special
3340                 * case the whole 'p->on_rq && ttwu_runnable()' case below
3341                 * without taking any locks.
3342                 *
3343                 * In particular:
3344                 *  - we rely on Program-Order guarantees for all the ordering,
3345                 *  - we're serialized against set_special_state() by virtue of
3346                 *    it disabling IRQs (this allows not taking ->pi_lock).
3347                 */
3348                if (!(p->state & state))
3349                        goto out;
3350
3351                success = 1;
3352                trace_sched_waking(p);
3353                p->state = TASK_RUNNING;
3354                trace_sched_wakeup(p);
3355                goto out;
3356        }
3357
3358        /*
3359         * If we are going to wake up a thread waiting for CONDITION we
3360         * need to ensure that CONDITION=1 done by the caller can not be
3361         * reordered with p->state check below. This pairs with smp_store_mb()
3362         * in set_current_state() that the waiting thread does.
3363         */
3364        raw_spin_lock_irqsave(&p->pi_lock, flags);
3365        smp_mb__after_spinlock();
3366        if (!(p->state & state))
3367                goto unlock;
3368
3369        trace_sched_waking(p);
3370
3371        /* We're going to change ->state: */
3372        success = 1;
3373
3374        /*
3375         * Ensure we load p->on_rq _after_ p->state, otherwise it would
3376         * be possible to, falsely, observe p->on_rq == 0 and get stuck
3377         * in smp_cond_load_acquire() below.
3378         *
3379         * sched_ttwu_pending()                 try_to_wake_up()
3380         *   STORE p->on_rq = 1                   LOAD p->state
3381         *   UNLOCK rq->lock
3382         *
3383         * __schedule() (switch to task 'p')
3384         *   LOCK rq->lock                        smp_rmb();
3385         *   smp_mb__after_spinlock();
3386         *   UNLOCK rq->lock
3387         *
3388         * [task p]
3389         *   STORE p->state = UNINTERRUPTIBLE     LOAD p->on_rq
3390         *
3391         * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
3392         * __schedule().  See the comment for smp_mb__after_spinlock().
3393         *
3394         * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
3395         */
3396        smp_rmb();
3397        if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
3398                goto unlock;
3399
3400#ifdef CONFIG_SMP
3401        /*
3402         * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
3403         * possible to, falsely, observe p->on_cpu == 0.
3404         *
3405         * One must be running (->on_cpu == 1) in order to remove oneself
3406         * from the runqueue.
3407         *
3408         * __schedule() (switch to task 'p')    try_to_wake_up()
3409         *   STORE p->on_cpu = 1                  LOAD p->on_rq
3410         *   UNLOCK rq->lock
3411         *
3412         * __schedule() (put 'p' to sleep)
3413         *   LOCK rq->lock                        smp_rmb();
3414         *   smp_mb__after_spinlock();
3415         *   STORE p->on_rq = 0                   LOAD p->on_cpu
3416         *
3417         * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
3418         * __schedule().  See the comment for smp_mb__after_spinlock().
3419         *
3420         * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
3421         * schedule()'s deactivate_task() has 'happened' and p will no longer
3422         * care about it's own p->state. See the comment in __schedule().
3423         */
3424        smp_acquire__after_ctrl_dep();
3425
3426        /*
3427         * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
3428         * == 0), which means we need to do an enqueue, change p->state to
3429         * TASK_WAKING such that we can unlock p->pi_lock before doing the
3430         * enqueue, such as ttwu_queue_wakelist().
3431         */
3432        p->state = TASK_WAKING;
3433
3434        /*
3435         * If the owning (remote) CPU is still in the middle of schedule() with
3436         * this task as prev, considering queueing p on the remote CPUs wake_list
3437         * which potentially sends an IPI instead of spinning on p->on_cpu to
3438         * let the waker make forward progress. This is safe because IRQs are
3439         * disabled and the IPI will deliver after on_cpu is cleared.
3440         *
3441         * Ensure we load task_cpu(p) after p->on_cpu:
3442         *
3443         * set_task_cpu(p, cpu);
3444         *   STORE p->cpu = @cpu
3445         * __schedule() (switch to task 'p')
3446         *   LOCK rq->lock
3447         *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
3448         *   STORE p->on_cpu = 1                LOAD p->cpu
3449         *
3450         * to ensure we observe the correct CPU on which the task is currently
3451         * scheduling.
3452         */
3453        if (smp_load_acquire(&p->on_cpu) &&
3454            ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
3455                goto unlock;
3456
3457        /*
3458         * If the owning (remote) CPU is still in the middle of schedule() with
3459         * this task as prev, wait until it's done referencing the task.
3460         *
3461         * Pairs with the smp_store_release() in finish_task().
3462         *
3463         * This ensures that tasks getting woken will be fully ordered against
3464         * their previous state and preserve Program Order.
3465         */
3466        smp_cond_load_acquire(&p->on_cpu, !VAL);
3467
3468        cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
3469        if (task_cpu(p) != cpu) {
3470                if (p->in_iowait) {
3471                        delayacct_blkio_end(p);
3472                        atomic_dec(&task_rq(p)->nr_iowait);
3473                }
3474
3475                wake_flags |= WF_MIGRATED;
3476                psi_ttwu_dequeue(p);
3477                set_task_cpu(p, cpu);
3478        }
3479#else
3480        cpu = task_cpu(p);
3481#endif /* CONFIG_SMP */
3482
3483        ttwu_queue(p, cpu, wake_flags);
3484unlock:
3485        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3486out:
3487        if (success)
3488                ttwu_stat(p, task_cpu(p), wake_flags);
3489        preempt_enable();
3490
3491        return success;
3492}
3493
3494/**
3495 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state
3496 * @p: Process for which the function is to be invoked, can be @current.
3497 * @func: Function to invoke.
3498 * @arg: Argument to function.
3499 *
3500 * If the specified task can be quickly locked into a definite state
3501 * (either sleeping or on a given runqueue), arrange to keep it in that
3502 * state while invoking @func(@arg).  This function can use ->on_rq and
3503 * task_curr() to work out what the state is, if required.  Given that
3504 * @func can be invoked with a runqueue lock held, it had better be quite
3505 * lightweight.
3506 *
3507 * Returns:
3508 *      @false if the task slipped out from under the locks.
3509 *      @true if the task was locked onto a runqueue or is sleeping.
3510 *              However, @func can override this by returning @false.
3511 */
3512bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
3513{
3514        struct rq_flags rf;
3515        bool ret = false;
3516        struct rq *rq;
3517
3518        raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
3519        if (p->on_rq) {
3520                rq = __task_rq_lock(p, &rf);
3521                if (task_rq(p) == rq)
3522                        ret = func(p, arg);
3523                rq_unlock(rq, &rf);
3524        } else {
3525                switch (p->state) {
3526                case TASK_RUNNING:
3527                case TASK_WAKING:
3528                        break;
3529                default:
3530                        smp_rmb(); // See smp_rmb() comment in try_to_wake_up().
3531                        if (!p->on_rq)
3532                                ret = func(p, arg);
3533                }
3534        }
3535        raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
3536        return ret;
3537}
3538
3539/**
3540 * wake_up_process - Wake up a specific process
3541 * @p: The process to be woken up.
3542 *
3543 * Attempt to wake up the nominated process and move it to the set of runnable
3544 * processes.
3545 *
3546 * Return: 1 if the process was woken up, 0 if it was already running.
3547 *
3548 * This function executes a full memory barrier before accessing the task state.
3549 */
3550int wake_up_process(struct task_struct *p)
3551{
3552        return try_to_wake_up(p, TASK_NORMAL, 0);
3553}
3554EXPORT_SYMBOL(wake_up_process);
3555
3556int wake_up_state(struct task_struct *p, unsigned int state)
3557{
3558        return try_to_wake_up(p, state, 0);
3559}
3560
3561/*
3562 * Perform scheduler related setup for a newly forked process p.
3563 * p is forked by current.
3564 *
3565 * __sched_fork() is basic setup used by init_idle() too:
3566 */
3567static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
3568{
3569        p->on_rq                        = 0;
3570
3571        p->se.on_rq                     = 0;
3572        p->se.exec_start                = 0;
3573        p->se.sum_exec_runtime          = 0;
3574        p->se.prev_sum_exec_runtime     = 0;
3575        p->se.nr_migrations             = 0;
3576        p->se.vruntime                  = 0;
3577        INIT_LIST_HEAD(&p->se.group_node);
3578
3579#ifdef CONFIG_FAIR_GROUP_SCHED
3580        p->se.cfs_rq                    = NULL;
3581#endif
3582
3583#ifdef CONFIG_SCHEDSTATS
3584        /* Even if schedstat is disabled, there should not be garbage */
3585        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
3586#endif
3587
3588        RB_CLEAR_NODE(&p->dl.rb_node);
3589        init_dl_task_timer(&p->dl);
3590        init_dl_inactive_task_timer(&p->dl);
3591        __dl_clear_params(p);
3592
3593        INIT_LIST_HEAD(&p->rt.run_list);
3594        p->rt.timeout           = 0;
3595        p->rt.time_slice        = sched_rr_timeslice;
3596        p->rt.on_rq             = 0;
3597        p->rt.on_list           = 0;
3598
3599#ifdef CONFIG_PREEMPT_NOTIFIERS
3600        INIT_HLIST_HEAD(&p->preempt_notifiers);
3601#endif
3602
3603#ifdef CONFIG_COMPACTION
3604        p->capture_control = NULL;
3605#endif
3606        init_numa_balancing(clone_flags, p);
3607#ifdef CONFIG_SMP
3608        p->wake_entry.u_flags = CSD_TYPE_TTWU;
3609        p->migration_pending = NULL;
3610#endif
3611}
3612
3613DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
3614
3615#ifdef CONFIG_NUMA_BALANCING
3616
3617void set_numabalancing_state(bool enabled)
3618{
3619        if (enabled)
3620                static_branch_enable(&sched_numa_balancing);
3621        else
3622                static_branch_disable(&sched_numa_balancing);
3623}
3624
3625#ifdef CONFIG_PROC_SYSCTL
3626int sysctl_numa_balancing(struct ctl_table *table, int write,
3627                          void *buffer, size_t *lenp, loff_t *ppos)
3628{
3629        struct ctl_table t;
3630        int err;
3631        int state = static_branch_likely(&sched_numa_balancing);
3632
3633        if (write && !capable(CAP_SYS_ADMIN))
3634                return -EPERM;
3635
3636        t = *table;
3637        t.data = &state;
3638        err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3639        if (err < 0)
3640                return err;
3641        if (write)
3642                set_numabalancing_state(state);
3643        return err;
3644}
3645#endif
3646#endif
3647
3648#ifdef CONFIG_SCHEDSTATS
3649
3650DEFINE_STATIC_KEY_FALSE(sched_schedstats);
3651static bool __initdata __sched_schedstats = false;
3652
3653static void set_schedstats(bool enabled)
3654{
3655        if (enabled)
3656                static_branch_enable(&sched_schedstats);
3657        else
3658                static_branch_disable(&sched_schedstats);
3659}
3660
3661void force_schedstat_enabled(void)
3662{
3663        if (!schedstat_enabled()) {
3664                pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
3665                static_branch_enable(&sched_schedstats);
3666        }
3667}
3668
3669static int __init setup_schedstats(char *str)
3670{
3671        int ret = 0;
3672        if (!str)
3673                goto out;
3674
3675        /*
3676         * This code is called before jump labels have been set up, so we can't
3677         * change the static branch directly just yet.  Instead set a temporary
3678         * variable so init_schedstats() can do it later.
3679         */
3680        if (!strcmp(str, "enable")) {
3681                __sched_schedstats = true;
3682                ret = 1;
3683        } else if (!strcmp(str, "disable")) {
3684                __sched_schedstats = false;
3685                ret = 1;
3686        }
3687out:
3688        if (!ret)
3689                pr_warn("Unable to parse schedstats=\n");
3690
3691        return ret;
3692}
3693__setup("schedstats=", setup_schedstats);
3694
3695static void __init init_schedstats(void)
3696{
3697        set_schedstats(__sched_schedstats);
3698}
3699
3700#ifdef CONFIG_PROC_SYSCTL
3701int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
3702                size_t *lenp, loff_t *ppos)
3703{
3704        struct ctl_table t;
3705        int err;
3706        int state = static_branch_likely(&sched_schedstats);
3707
3708        if (write && !capable(CAP_SYS_ADMIN))
3709                return -EPERM;
3710
3711        t = *table;
3712        t.data = &state;
3713        err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3714        if (err < 0)
3715                return err;
3716        if (write)
3717                set_schedstats(state);
3718        return err;
3719}
3720#endif /* CONFIG_PROC_SYSCTL */
3721#else  /* !CONFIG_SCHEDSTATS */
3722static inline void init_schedstats(void) {}
3723#endif /* CONFIG_SCHEDSTATS */
3724
3725/*
3726 * fork()/clone()-time setup:
3727 */
3728int sched_fork(unsigned long clone_flags, struct task_struct *p)
3729{
3730        unsigned long flags;
3731
3732        __sched_fork(clone_flags, p);
3733        /*
3734         * We mark the process as NEW here. This guarantees that
3735         * nobody will actually run it, and a signal or other external
3736         * event cannot wake it up and insert it on the runqueue either.
3737         */
3738        p->state = TASK_NEW;
3739
3740        /*
3741         * Make sure we do not leak PI boosting priority to the child.
3742         */
3743        p->prio = current->normal_prio;
3744
3745        uclamp_fork(p);
3746
3747        /*
3748         * Revert to default priority/policy on fork if requested.
3749         */
3750        if (unlikely(p->sched_reset_on_fork)) {
3751                if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
3752                        p->policy = SCHED_NORMAL;
3753                        p->static_prio = NICE_TO_PRIO(0);
3754                        p->rt_priority = 0;
3755                } else if (PRIO_TO_NICE(p->static_prio) < 0)
3756                        p->static_prio = NICE_TO_PRIO(0);
3757
3758                p->prio = p->normal_prio = __normal_prio(p);
3759                set_load_weight(p, false);
3760
3761                /*
3762                 * We don't need the reset flag anymore after the fork. It has
3763                 * fulfilled its duty:
3764                 */
3765                p->sched_reset_on_fork = 0;
3766        }
3767
3768        if (dl_prio(p->prio))
3769                return -EAGAIN;
3770        else if (rt_prio(p->prio))
3771                p->sched_class = &rt_sched_class;
3772        else
3773                p->sched_class = &fair_sched_class;
3774
3775        init_entity_runnable_average(&p->se);
3776
3777        /*
3778         * The child is not yet in the pid-hash so no cgroup attach races,
3779         * and the cgroup is pinned to this child due to cgroup_fork()
3780         * is ran before sched_fork().
3781         *
3782         * Silence PROVE_RCU.
3783         */
3784        raw_spin_lock_irqsave(&p->pi_lock, flags);
3785        rseq_migrate(p);
3786        /*
3787         * We're setting the CPU for the first time, we don't migrate,
3788         * so use __set_task_cpu().
3789         */
3790        __set_task_cpu(p, smp_processor_id());
3791        if (p->sched_class->task_fork)
3792                p->sched_class->task_fork(p);
3793        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3794
3795#ifdef CONFIG_SCHED_INFO
3796        if (likely(sched_info_on()))
3797                memset(&p->sched_info, 0, sizeof(p->sched_info));
3798#endif
3799#if defined(CONFIG_SMP)
3800        p->on_cpu = 0;
3801#endif
3802        init_task_preempt_count(p);
3803#ifdef CONFIG_SMP
3804        plist_node_init(&p->pushable_tasks, MAX_PRIO);
3805        RB_CLEAR_NODE(&p->pushable_dl_tasks);
3806#endif
3807        return 0;
3808}
3809
3810void sched_post_fork(struct task_struct *p)
3811{
3812        uclamp_post_fork(p);
3813}
3814
3815unsigned long to_ratio(u64 period, u64 runtime)
3816{
3817        if (runtime == RUNTIME_INF)
3818                return BW_UNIT;
3819
3820        /*
3821         * Doing this here saves a lot of checks in all
3822         * the calling paths, and returning zero seems
3823         * safe for them anyway.
3824         */
3825        if (period == 0)
3826                return 0;
3827
3828        return div64_u64(runtime << BW_SHIFT, period);
3829}
3830
3831/*
3832 * wake_up_new_task - wake up a newly created task for the first time.
3833 *
3834 * This function will do some initial scheduler statistics housekeeping
3835 * that must be done for every newly created context, then puts the task
3836 * on the runqueue and wakes it.
3837 */
3838void wake_up_new_task(struct task_struct *p)
3839{
3840        struct rq_flags rf;
3841        struct rq *rq;
3842
3843        raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
3844        p->state = TASK_RUNNING;
3845#ifdef CONFIG_SMP
3846        /*
3847         * Fork balancing, do it here and not earlier because:
3848         *  - cpus_ptr can change in the fork path
3849         *  - any previously selected CPU might disappear through hotplug
3850         *
3851         * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
3852         * as we're not fully set-up yet.
3853         */
3854        p->recent_used_cpu = task_cpu(p);
3855        rseq_migrate(p);
3856        __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
3857#endif
3858        rq = __task_rq_lock(p, &rf);
3859        update_rq_clock(rq);
3860        post_init_entity_util_avg(p);
3861
3862        activate_task(rq, p, ENQUEUE_NOCLOCK);
3863        trace_sched_wakeup_new(p);
3864        check_preempt_curr(rq, p, WF_FORK);
3865#ifdef CONFIG_SMP
3866        if (p->sched_class->task_woken) {
3867                /*
3868                 * Nothing relies on rq->lock after this, so it's fine to
3869                 * drop it.
3870                 */
3871                rq_unpin_lock(rq, &rf);
3872                p->sched_class->task_woken(rq, p);
3873                rq_repin_lock(rq, &rf);
3874        }
3875#endif
3876        task_rq_unlock(rq, p, &rf);
3877}
3878
3879#ifdef CONFIG_PREEMPT_NOTIFIERS
3880
3881static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
3882
3883void preempt_notifier_inc(void)
3884{
3885        static_branch_inc(&preempt_notifier_key);
3886}
3887EXPORT_SYMBOL_GPL(preempt_notifier_inc);
3888
3889void preempt_notifier_dec(void)
3890{
3891        static_branch_dec(&preempt_notifier_key);
3892}
3893EXPORT_SYMBOL_GPL(preempt_notifier_dec);
3894
3895/**
3896 * preempt_notifier_register - tell me when current is being preempted & rescheduled
3897 * @notifier: notifier struct to register
3898 */
3899void preempt_notifier_register(struct preempt_notifier *notifier)
3900{
3901        if (!static_branch_unlikely(&preempt_notifier_key))
3902                WARN(1, "registering preempt_notifier while notifiers disabled\n");
3903
3904        hlist_add_head(&notifier->link, &current->preempt_notifiers);
3905}
3906EXPORT_SYMBOL_GPL(preempt_notifier_register);
3907
3908/**
3909 * preempt_notifier_unregister - no longer interested in preemption notifications
3910 * @notifier: notifier struct to unregister
3911 *
3912 * This is *not* safe to call from within a preemption notifier.
3913 */
3914void preempt_notifier_unregister(struct preempt_notifier *notifier)
3915{
3916        hlist_del(&notifier->link);
3917}
3918EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
3919
3920static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
3921{
3922        struct preempt_notifier *notifier;
3923
3924        hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
3925                notifier->ops->sched_in(notifier, raw_smp_processor_id());
3926}
3927
3928static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
3929{
3930        if (static_branch_unlikely(&preempt_notifier_key))
3931                __fire_sched_in_preempt_notifiers(curr);
3932}
3933
3934static void
3935__fire_sched_out_preempt_notifiers(struct task_struct *curr,
3936                                   struct task_struct *next)
3937{
3938        struct preempt_notifier *notifier;
3939
3940        hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
3941                notifier->ops->sched_out(notifier, next);
3942}
3943
3944static __always_inline void
3945fire_sched_out_preempt_notifiers(struct task_struct *curr,
3946                                 struct task_struct *next)
3947{
3948        if (static_branch_unlikely(&preempt_notifier_key))
3949                __fire_sched_out_preempt_notifiers(curr, next);
3950}
3951
3952#else /* !CONFIG_PREEMPT_NOTIFIERS */
3953
3954static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
3955{
3956}
3957
3958static inline void
3959fire_sched_out_preempt_notifiers(struct task_struct *curr,
3960                                 struct task_struct *next)
3961{
3962}
3963
3964#endif /* CONFIG_PREEMPT_NOTIFIERS */
3965
3966static inline void prepare_task(struct task_struct *next)
3967{
3968#ifdef CONFIG_SMP
3969        /*
3970         * Claim the task as running, we do this before switching to it
3971         * such that any running task will have this set.
3972         *
3973         * See the ttwu() WF_ON_CPU case and its ordering comment.
3974         */
3975        WRITE_ONCE(next->on_cpu, 1);
3976#endif
3977}
3978
3979static inline void finish_task(struct task_struct *prev)
3980{
3981#ifdef CONFIG_SMP
3982        /*
3983         * This must be the very last reference to @prev from this CPU. After
3984         * p->on_cpu is cleared, the task can be moved to a different CPU. We
3985         * must ensure this doesn't happen until the switch is completely
3986         * finished.
3987         *
3988         * In particular, the load of prev->state in finish_task_switch() must
3989         * happen before this.
3990         *
3991         * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
3992         */
3993        smp_store_release(&prev->on_cpu, 0);
3994#endif
3995}
3996
3997#ifdef CONFIG_SMP
3998
3999static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
4000{
4001        void (*func)(struct rq *rq);
4002        struct callback_head *next;
4003
4004        lockdep_assert_held(&rq->lock);
4005
4006        while (head) {
4007                func = (void (*)(struct rq *))head->func;
4008                next = head->next;
4009                head->next = NULL;
4010                head = next;
4011
4012                func(rq);
4013        }
4014}
4015
4016static void balance_push(struct rq *rq);
4017
4018struct callback_head balance_push_callback = {
4019        .next = NULL,
4020        .func = (void (*)(struct callback_head *))balance_push,
4021};
4022
4023static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4024{
4025        struct callback_head *head = rq->balance_callback;
4026
4027        lockdep_assert_held(&rq->lock);
4028        if (head)
4029                rq->balance_callback = NULL;
4030
4031        return head;
4032}
4033
4034static void __balance_callbacks(struct rq *rq)
4035{
4036        do_balance_callbacks(rq, splice_balance_callbacks(rq));
4037}
4038
4039static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4040{
4041        unsigned long flags;
4042
4043        if (unlikely(head)) {
4044                raw_spin_lock_irqsave(&rq->lock, flags);
4045                do_balance_callbacks(rq, head);
4046                raw_spin_unlock_irqrestore(&rq->lock, flags);
4047        }
4048}
4049
4050#else
4051
4052static inline void __balance_callbacks(struct rq *rq)
4053{
4054}
4055
4056static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4057{
4058        return NULL;
4059}
4060
4061static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4062{
4063}
4064
4065#endif
4066
4067static inline void
4068prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
4069{
4070        /*
4071         * Since the runqueue lock will be released by the next
4072         * task (which is an invalid locking op but in the case
4073         * of the scheduler it's an obvious special-case), so we
4074         * do an early lockdep release here:
4075         */
4076        rq_unpin_lock(rq, rf);
4077        spin_release(&rq->lock.dep_map, _THIS_IP_);
4078#ifdef CONFIG_DEBUG_SPINLOCK
4079        /* this is a valid case when another task releases the spinlock */
4080        rq->lock.owner = next;
4081#endif
4082}
4083
4084static inline void finish_lock_switch(struct rq *rq)
4085{
4086        /*
4087         * If we are tracking spinlock dependencies then we have to
4088         * fix up the runqueue lock - which gets 'carried over' from
4089         * prev into current:
4090         */
4091        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
4092        __balance_callbacks(rq);
4093        raw_spin_unlock_irq(&rq->lock);
4094}
4095
4096/*
4097 * NOP if the arch has not defined these:
4098 */
4099
4100#ifndef prepare_arch_switch
4101# define prepare_arch_switch(next)      do { } while (0)
4102#endif
4103
4104#ifndef finish_arch_post_lock_switch
4105# define finish_arch_post_lock_switch() do { } while (0)
4106#endif
4107
4108static inline void kmap_local_sched_out(void)
4109{
4110#ifdef CONFIG_KMAP_LOCAL
4111        if (unlikely(current->kmap_ctrl.idx))
4112                __kmap_local_sched_out();
4113#endif
4114}
4115
4116static inline void kmap_local_sched_in(void)
4117{
4118#ifdef CONFIG_KMAP_LOCAL
4119        if (unlikely(current->kmap_ctrl.idx))
4120                __kmap_local_sched_in();
4121#endif
4122}
4123
4124/**
4125 * prepare_task_switch - prepare to switch tasks
4126 * @rq: the runqueue preparing to switch
4127 * @prev: the current task that is being switched out
4128 * @next: the task we are going to switch to.
4129 *
4130 * This is called with the rq lock held and interrupts off. It must
4131 * be paired with a subsequent finish_task_switch after the context
4132 * switch.
4133 *
4134 * prepare_task_switch sets up locking and calls architecture specific
4135 * hooks.
4136 */
4137static inline void
4138prepare_task_switch(struct rq *rq, struct task_struct *prev,
4139                    struct task_struct *next)
4140{
4141        kcov_prepare_switch(prev);
4142        sched_info_switch(rq, prev, next);
4143        perf_event_task_sched_out(prev, next);
4144        rseq_preempt(prev);
4145        fire_sched_out_preempt_notifiers(prev, next);
4146        kmap_local_sched_out();
4147        prepare_task(next);
4148        prepare_arch_switch(next);
4149}
4150
4151/**
4152 * finish_task_switch - clean up after a task-switch
4153 * @prev: the thread we just switched away from.
4154 *
4155 * finish_task_switch must be called after the context switch, paired
4156 * with a prepare_task_switch call before the context switch.
4157 * finish_task_switch will reconcile locking set up by prepare_task_switch,
4158 * and do any other architecture-specific cleanup actions.
4159 *
4160 * Note that we may have delayed dropping an mm in context_switch(). If
4161 * so, we finish that here outside of the runqueue lock. (Doing it
4162 * with the lock held can cause deadlocks; see schedule() for
4163 * details.)
4164 *
4165 * The context switch have flipped the stack from under us and restored the
4166 * local variables which were saved when this task called schedule() in the
4167 * past. prev == current is still correct but we need to recalculate this_rq
4168 * because prev may have moved to another CPU.
4169 */
4170static struct rq *finish_task_switch(struct task_struct *prev)
4171        __releases(rq->lock)
4172{
4173        struct rq *rq = this_rq();
4174        struct mm_struct *mm = rq->prev_mm;
4175        long prev_state;
4176
4177        /*
4178         * The previous task will have left us with a preempt_count of 2
4179         * because it left us after:
4180         *
4181         *      schedule()
4182         *        preempt_disable();                    // 1
4183         *        __schedule()
4184         *          raw_spin_lock_irq(&rq->lock)        // 2
4185         *
4186         * Also, see FORK_PREEMPT_COUNT.
4187         */
4188        if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
4189                      "corrupted preempt_count: %s/%d/0x%x\n",
4190                      current->comm, current->pid, preempt_count()))
4191                preempt_count_set(FORK_PREEMPT_COUNT);
4192
4193        rq->prev_mm = NULL;
4194
4195        /*
4196         * A task struct has one reference for the use as "current".
4197         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
4198         * schedule one last time. The schedule call will never return, and
4199         * the scheduled task must drop that reference.
4200         *
4201         * We must observe prev->state before clearing prev->on_cpu (in
4202         * finish_task), otherwise a concurrent wakeup can get prev
4203         * running on another CPU and we could rave with its RUNNING -> DEAD
4204         * transition, resulting in a double drop.
4205         */
4206        prev_state = prev->state;
4207        vtime_task_switch(prev);
4208        perf_event_task_sched_in(prev, current);
4209        finish_task(prev);
4210        finish_lock_switch(rq);
4211        finish_arch_post_lock_switch();
4212        kcov_finish_switch(current);
4213        /*
4214         * kmap_local_sched_out() is invoked with rq::lock held and
4215         * interrupts disabled. There is no requirement for that, but the
4216         * sched out code does not have an interrupt enabled section.
4217         * Restoring the maps on sched in does not require interrupts being
4218         * disabled either.
4219         */
4220        kmap_local_sched_in();
4221
4222        fire_sched_in_preempt_notifiers(current);
4223        /*
4224         * When switching through a kernel thread, the loop in
4225         * membarrier_{private,global}_expedited() may have observed that
4226         * kernel thread and not issued an IPI. It is therefore possible to
4227         * schedule between user->kernel->user threads without passing though
4228         * switch_mm(). Membarrier requires a barrier after storing to
4229         * rq->curr, before returning to userspace, so provide them here:
4230         *
4231         * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
4232         *   provided by mmdrop(),
4233         * - a sync_core for SYNC_CORE.
4234         */
4235        if (mm) {
4236                membarrier_mm_sync_core_before_usermode(mm);
4237                mmdrop(mm);
4238        }
4239        if (unlikely(prev_state == TASK_DEAD)) {
4240                if (prev->sched_class->task_dead)
4241                        prev->sched_class->task_dead(prev);
4242
4243                /*
4244                 * Remove function-return probe instances associated with this
4245                 * task and put them back on the free list.
4246                 */
4247                kprobe_flush_task(prev);
4248
4249                /* Task is done with its stack. */
4250                put_task_stack(prev);
4251
4252                put_task_struct_rcu_user(prev);
4253        }
4254
4255        tick_nohz_task_switch();
4256        return rq;
4257}
4258
4259/**
4260 * schedule_tail - first thing a freshly forked thread must call.
4261 * @prev: the thread we just switched away from.
4262 */
4263asmlinkage __visible void schedule_tail(struct task_struct *prev)
4264        __releases(rq->lock)
4265{
4266        /*
4267         * New tasks start with FORK_PREEMPT_COUNT, see there and
4268         * finish_task_switch() for details.
4269         *
4270         * finish_task_switch() will drop rq->lock() and lower preempt_count
4271         * and the preempt_enable() will end up enabling preemption (on
4272         * PREEMPT_COUNT kernels).
4273         */
4274
4275        finish_task_switch(prev);
4276        preempt_enable();
4277
4278        if (current->set_child_tid)
4279                put_user(task_pid_vnr(current), current->set_child_tid);
4280
4281        calculate_sigpending();
4282}
4283
4284/*
4285 * context_switch - switch to the new MM and the new thread's register state.
4286 */
4287static __always_inline struct rq *
4288context_switch(struct rq *rq, struct task_struct *prev,
4289               struct task_struct *next, struct rq_flags *rf)
4290{
4291        prepare_task_switch(rq, prev, next);
4292
4293        /*
4294         * For paravirt, this is coupled with an exit in switch_to to
4295         * combine the page table reload and the switch backend into
4296         * one hypercall.
4297         */
4298        arch_start_context_switch(prev);
4299
4300        /*
4301         * kernel -> kernel   lazy + transfer active
4302         *   user -> kernel   lazy + mmgrab() active
4303         *
4304         * kernel ->   user   switch + mmdrop() active
4305         *   user ->   user   switch
4306         */
4307        if (!next->mm) {                                // to kernel
4308                enter_lazy_tlb(prev->active_mm, next);
4309
4310                next->active_mm = prev->active_mm;
4311                if (prev->mm)                           // from user
4312                        mmgrab(prev->active_mm);
4313                else
4314                        prev->active_mm = NULL;
4315        } else {                                        // to user
4316                membarrier_switch_mm(rq, prev->active_mm, next->mm);
4317                /*
4318                 * sys_membarrier() requires an smp_mb() between setting
4319                 * rq->curr / membarrier_switch_mm() and returning to userspace.
4320                 *
4321                 * The below provides this either through switch_mm(), or in
4322                 * case 'prev->active_mm == next->mm' through
4323                 * finish_task_switch()'s mmdrop().
4324                 */
4325                switch_mm_irqs_off(prev->active_mm, next->mm, next);
4326
4327                if (!prev->mm) {                        // from kernel
4328                        /* will mmdrop() in finish_task_switch(). */
4329                        rq->prev_mm = prev->active_mm;
4330                        prev->active_mm = NULL;
4331                }
4332        }
4333
4334        rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
4335
4336        prepare_lock_switch(rq, next, rf);
4337
4338        /* Here we just switch the register state and the stack. */
4339        switch_to(prev, next, prev);
4340        barrier();
4341
4342        return finish_task_switch(prev);
4343}
4344
4345/*
4346 * nr_running and nr_context_switches:
4347 *
4348 * externally visible scheduler statistics: current number of runnable
4349 * threads, total number of context switches performed since bootup.
4350 */
4351unsigned long nr_running(void)
4352{
4353        unsigned long i, sum = 0;
4354
4355        for_each_online_cpu(i)
4356                sum += cpu_rq(i)->nr_running;
4357
4358        return sum;
4359}
4360
4361/*
4362 * Check if only the current task is running on the CPU.
4363 *
4364 * Caution: this function does not check that the caller has disabled
4365 * preemption, thus the result might have a time-of-check-to-time-of-use
4366 * race.  The caller is responsible to use it correctly, for example:
4367 *
4368 * - from a non-preemptible section (of course)
4369 *
4370 * - from a thread that is bound to a single CPU
4371 *
4372 * - in a loop with very short iterations (e.g. a polling loop)
4373 */
4374bool single_task_running(void)
4375{
4376        return raw_rq()->nr_running == 1;
4377}
4378EXPORT_SYMBOL(single_task_running);
4379
4380unsigned long long nr_context_switches(void)
4381{
4382        int i;
4383        unsigned long long sum = 0;
4384
4385        for_each_possible_cpu(i)
4386                sum += cpu_rq(i)->nr_switches;
4387
4388        return sum;
4389}
4390
4391/*
4392 * Consumers of these two interfaces, like for example the cpuidle menu
4393 * governor, are using nonsensical data. Preferring shallow idle state selection
4394 * for a CPU that has IO-wait which might not even end up running the task when
4395 * it does become runnable.
4396 */
4397
4398unsigned long nr_iowait_cpu(int cpu)
4399{
4400        return atomic_read(&cpu_rq(cpu)->nr_iowait);
4401}
4402
4403/*
4404 * IO-wait accounting, and how it's mostly bollocks (on SMP).
4405 *
4406 * The idea behind IO-wait account is to account the idle time that we could
4407 * have spend running if it were not for IO. That is, if we were to improve the
4408 * storage performance, we'd have a proportional reduction in IO-wait time.
4409 *
4410 * This all works nicely on UP, where, when a task blocks on IO, we account
4411 * idle time as IO-wait, because if the storage were faster, it could've been
4412 * running and we'd not be idle.
4413 *
4414 * This has been extended to SMP, by doing the same for each CPU. This however
4415 * is broken.
4416 *
4417 * Imagine for instance the case where two tasks block on one CPU, only the one
4418 * CPU will have IO-wait accounted, while the other has regular idle. Even
4419 * though, if the storage were faster, both could've ran at the same time,
4420 * utilising both CPUs.
4421 *
4422 * This means, that when looking globally, the current IO-wait accounting on
4423 * SMP is a lower bound, by reason of under accounting.
4424 *
4425 * Worse, since the numbers are provided per CPU, they are sometimes
4426 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
4427 * associated with any one particular CPU, it can wake to another CPU than it
4428 * blocked on. This means the per CPU IO-wait number is meaningless.
4429 *
4430 * Task CPU affinities can make all that even more 'interesting'.
4431 */
4432
4433unsigned long nr_iowait(void)
4434{
4435        unsigned long i, sum = 0;
4436
4437        for_each_possible_cpu(i)
4438                sum += nr_iowait_cpu(i);
4439
4440        return sum;
4441}
4442
4443#ifdef CONFIG_SMP
4444
4445/*
4446 * sched_exec - execve() is a valuable balancing opportunity, because at
4447 * this point the task has the smallest effective memory and cache footprint.
4448 */
4449void sched_exec(void)
4450{
4451        struct task_struct *p = current;
4452        unsigned long flags;
4453        int dest_cpu;
4454
4455        raw_spin_lock_irqsave(&p->pi_lock, flags);
4456        dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
4457        if (dest_cpu == smp_processor_id())
4458                goto unlock;
4459
4460        if (likely(cpu_active(dest_cpu))) {
4461                struct migration_arg arg = { p, dest_cpu };
4462
4463                raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4464                stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
4465                return;
4466        }
4467unlock:
4468        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4469}
4470
4471#endif
4472
4473DEFINE_PER_CPU(struct kernel_stat, kstat);
4474DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
4475
4476EXPORT_PER_CPU_SYMBOL(kstat);
4477EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
4478
4479/*
4480 * The function fair_sched_class.update_curr accesses the struct curr
4481 * and its field curr->exec_start; when called from task_sched_runtime(),
4482 * we observe a high rate of cache misses in practice.
4483 * Prefetching this data results in improved performance.
4484 */
4485static inline void prefetch_curr_exec_start(struct task_struct *p)
4486{
4487#ifdef CONFIG_FAIR_GROUP_SCHED
4488        struct sched_entity *curr = (&p->se)->cfs_rq->curr;
4489#else
4490        struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
4491#endif
4492        prefetch(curr);
4493        prefetch(&curr->exec_start);
4494}
4495
4496/*
4497 * Return accounted runtime for the task.
4498 * In case the task is currently running, return the runtime plus current's
4499 * pending runtime that have not been accounted yet.
4500 */
4501unsigned long long task_sched_runtime(struct task_struct *p)
4502{
4503        struct rq_flags rf;
4504        struct rq *rq;
4505        u64 ns;
4506
4507#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
4508        /*
4509         * 64-bit doesn't need locks to atomically read a 64-bit value.
4510         * So we have a optimization chance when the task's delta_exec is 0.
4511         * Reading ->on_cpu is racy, but this is ok.
4512         *
4513         * If we race with it leaving CPU, we'll take a lock. So we're correct.
4514         * If we race with it entering CPU, unaccounted time is 0. This is
4515         * indistinguishable from the read occurring a few cycles earlier.
4516         * If we see ->on_cpu without ->on_rq, the task is leaving, and has
4517         * been accounted, so we're correct here as well.
4518         */
4519        if (!p->on_cpu || !task_on_rq_queued(p))
4520                return p->se.sum_exec_runtime;
4521#endif
4522
4523        rq = task_rq_lock(p, &rf);
4524        /*
4525         * Must be ->curr _and_ ->on_rq.  If dequeued, we would
4526         * project cycles that may never be accounted to this
4527         * thread, breaking clock_gettime().
4528         */
4529        if (task_current(rq, p) && task_on_rq_queued(p)) {
4530                prefetch_curr_exec_start(p);
4531                update_rq_clock(rq);
4532                p->sched_class->update_curr(rq);
4533        }
4534        ns = p->se.sum_exec_runtime;
4535        task_rq_unlock(rq, p, &rf);
4536
4537        return ns;
4538}
4539
4540#ifdef CONFIG_SCHED_DEBUG
4541static u64 cpu_resched_latency(struct rq *rq)
4542{
4543        int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
4544        u64 resched_latency, now = rq_clock(rq);
4545        static bool warned_once;
4546
4547        if (sysctl_resched_latency_warn_once && warned_once)
4548                return 0;
4549
4550        if (!need_resched() || !latency_warn_ms)
4551                return 0;
4552
4553        if (system_state == SYSTEM_BOOTING)
4554                return 0;
4555
4556        if (!rq->last_seen_need_resched_ns) {
4557                rq->last_seen_need_resched_ns = now;
4558                rq->ticks_without_resched = 0;
4559                return 0;
4560        }
4561
4562        rq->ticks_without_resched++;
4563        resched_latency = now - rq->last_seen_need_resched_ns;
4564        if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
4565                return 0;
4566
4567        warned_once = true;
4568
4569        return resched_latency;
4570}
4571
4572static int __init setup_resched_latency_warn_ms(char *str)
4573{
4574        long val;
4575
4576        if ((kstrtol(str, 0, &val))) {
4577                pr_warn("Unable to set resched_latency_warn_ms\n");
4578                return 1;
4579        }
4580
4581        sysctl_resched_latency_warn_ms = val;
4582        return 1;
4583}
4584__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
4585#else
4586static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
4587#endif /* CONFIG_SCHED_DEBUG */
4588
4589/*
4590 * This function gets called by the timer code, with HZ frequency.
4591 * We call it with interrupts disabled.
4592 */
4593void scheduler_tick(void)
4594{
4595        int cpu = smp_processor_id();
4596        struct rq *rq = cpu_rq(cpu);
4597        struct task_struct *curr = rq->curr;
4598        struct rq_flags rf;
4599        unsigned long thermal_pressure;
4600        u64 resched_latency;
4601
4602        arch_scale_freq_tick();
4603        sched_clock_tick();
4604
4605        rq_lock(rq, &rf);
4606
4607        update_rq_clock(rq);
4608        thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
4609        update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
4610        curr->sched_class->task_tick(rq, curr, 0);
4611        if (sched_feat(LATENCY_WARN))
4612                resched_latency = cpu_resched_latency(rq);
4613        calc_global_load_tick(rq);
4614
4615        rq_unlock(rq, &rf);
4616
4617        if (sched_feat(LATENCY_WARN) && resched_latency)
4618                resched_latency_warn(cpu, resched_latency);
4619
4620        perf_event_task_tick();
4621
4622#ifdef CONFIG_SMP
4623        rq->idle_balance = idle_cpu(cpu);
4624        trigger_load_balance(rq);
4625#endif
4626}
4627
4628#ifdef CONFIG_NO_HZ_FULL
4629
4630struct tick_work {
4631        int                     cpu;
4632        atomic_t                state;
4633        struct delayed_work     work;
4634};
4635/* Values for ->state, see diagram below. */
4636#define TICK_SCHED_REMOTE_OFFLINE       0
4637#define TICK_SCHED_REMOTE_OFFLINING     1
4638#define TICK_SCHED_REMOTE_RUNNING       2
4639
4640/*
4641 * State diagram for ->state:
4642 *
4643 *
4644 *          TICK_SCHED_REMOTE_OFFLINE
4645 *                    |   ^
4646 *                    |   |
4647 *                    |   | sched_tick_remote()
4648 *                    |   |
4649 *                    |   |
4650 *                    +--TICK_SCHED_REMOTE_OFFLINING
4651 *                    |   ^
4652 *                    |   |
4653 * sched_tick_start() |   | sched_tick_stop()
4654 *                    |   |
4655 *                    V   |
4656 *          TICK_SCHED_REMOTE_RUNNING
4657 *
4658 *
4659 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
4660 * and sched_tick_start() are happy to leave the state in RUNNING.
4661 */
4662
4663static struct tick_work __percpu *tick_work_cpu;
4664
4665static void sched_tick_remote(struct work_struct *work)
4666{
4667        struct delayed_work *dwork = to_delayed_work(work);
4668        struct tick_work *twork = container_of(dwork, struct tick_work, work);
4669        int cpu = twork->cpu;
4670        struct rq *rq = cpu_rq(cpu);
4671        struct task_struct *curr;
4672        struct rq_flags rf;
4673        u64 delta;
4674        int os;
4675
4676        /*
4677         * Handle the tick only if it appears the remote CPU is running in full
4678         * dynticks mode. The check is racy by nature, but missing a tick or
4679         * having one too much is no big deal because the scheduler tick updates
4680         * statistics and checks timeslices in a time-independent way, regardless
4681         * of when exactly it is running.
4682         */
4683        if (!tick_nohz_tick_stopped_cpu(cpu))
4684                goto out_requeue;
4685
4686        rq_lock_irq(rq, &rf);
4687        curr = rq->curr;
4688        if (cpu_is_offline(cpu))
4689                goto out_unlock;
4690
4691        update_rq_clock(rq);
4692
4693        if (!is_idle_task(curr)) {
4694                /*
4695                 * Make sure the next tick runs within a reasonable
4696                 * amount of time.
4697                 */
4698                delta = rq_clock_task(rq) - curr->se.exec_start;
4699                WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
4700        }
4701        curr->sched_class->task_tick(rq, curr, 0);
4702
4703        calc_load_nohz_remote(rq);
4704out_unlock:
4705        rq_unlock_irq(rq, &rf);
4706out_requeue:
4707
4708        /*
4709         * Run the remote tick once per second (1Hz). This arbitrary
4710         * frequency is large enough to avoid overload but short enough
4711         * to keep scheduler internal stats reasonably up to date.  But
4712         * first update state to reflect hotplug activity if required.
4713         */
4714        os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
4715        WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
4716        if (os == TICK_SCHED_REMOTE_RUNNING)
4717                queue_delayed_work(system_unbound_wq, dwork, HZ);
4718}
4719
4720static void sched_tick_start(int cpu)
4721{
4722        int os;
4723        struct tick_work *twork;
4724
4725        if (housekeeping_cpu(cpu, HK_FLAG_TICK))
4726                return;
4727
4728        WARN_ON_ONCE(!tick_work_cpu);
4729
4730        twork = per_cpu_ptr(tick_work_cpu, cpu);
4731        os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
4732        WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
4733        if (os == TICK_SCHED_REMOTE_OFFLINE) {
4734                twork->cpu = cpu;
4735                INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
4736                queue_delayed_work(system_unbound_wq, &twork->work, HZ);
4737        }
4738}
4739
4740#ifdef CONFIG_HOTPLUG_CPU
4741static void sched_tick_stop(int cpu)
4742{
4743        struct tick_work *twork;
4744        int os;
4745
4746        if (housekeeping_cpu(cpu, HK_FLAG_TICK))
4747                return;
4748
4749        WARN_ON_ONCE(!tick_work_cpu);
4750
4751        twork = per_cpu_ptr(tick_work_cpu, cpu);
4752        /* There cannot be competing actions, but don't rely on stop-machine. */
4753        os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
4754        WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
4755        /* Don't cancel, as this would mess up the state machine. */
4756}
4757#endif /* CONFIG_HOTPLUG_CPU */
4758
4759int __init sched_tick_offload_init(void)
4760{
4761        tick_work_cpu = alloc_percpu(struct tick_work);
4762        BUG_ON(!tick_work_cpu);
4763        return 0;
4764}
4765
4766#else /* !CONFIG_NO_HZ_FULL */
4767static inline void sched_tick_start(int cpu) { }
4768static inline void sched_tick_stop(int cpu) { }
4769#endif
4770
4771#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
4772                                defined(CONFIG_TRACE_PREEMPT_TOGGLE))
4773/*
4774 * If the value passed in is equal to the current preempt count
4775 * then we just disabled preemption. Start timing the latency.
4776 */
4777static inline void preempt_latency_start(int val)
4778{
4779        if (preempt_count() == val) {
4780                unsigned long ip = get_lock_parent_ip();
4781#ifdef CONFIG_DEBUG_PREEMPT
4782                current->preempt_disable_ip = ip;
4783#endif
4784                trace_preempt_off(CALLER_ADDR0, ip);
4785        }
4786}
4787
4788void preempt_count_add(int val)
4789{
4790#ifdef CONFIG_DEBUG_PREEMPT
4791        /*
4792         * Underflow?
4793         */
4794        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4795                return;
4796#endif
4797        __preempt_count_add(val);
4798#ifdef CONFIG_DEBUG_PREEMPT
4799        /*
4800         * Spinlock count overflowing soon?
4801         */
4802        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4803                                PREEMPT_MASK - 10);
4804#endif
4805        preempt_latency_start(val);
4806}
4807EXPORT_SYMBOL(preempt_count_add);
4808NOKPROBE_SYMBOL(preempt_count_add);
4809
4810/*
4811 * If the value passed in equals to the current preempt count
4812 * then we just enabled preemption. Stop timing the latency.
4813 */
4814static inline void preempt_latency_stop(int val)
4815{
4816        if (preempt_count() == val)
4817                trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
4818}
4819
4820void preempt_count_sub(int val)
4821{
4822#ifdef CONFIG_DEBUG_PREEMPT
4823        /*
4824         * Underflow?
4825         */
4826        if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
4827                return;
4828        /*
4829         * Is the spinlock portion underflowing?
4830         */
4831        if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4832                        !(preempt_count() & PREEMPT_MASK)))
4833                return;
4834#endif
4835
4836        preempt_latency_stop(val);
4837        __preempt_count_sub(val);
4838}
4839EXPORT_SYMBOL(preempt_count_sub);
4840NOKPROBE_SYMBOL(preempt_count_sub);
4841
4842#else
4843static inline void preempt_latency_start(int val) { }
4844static inline void preempt_latency_stop(int val) { }
4845#endif
4846
4847static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
4848{
4849#ifdef CONFIG_DEBUG_PREEMPT
4850        return p->preempt_disable_ip;
4851#else
4852        return 0;
4853#endif
4854}
4855
4856/*
4857 * Print scheduling while atomic bug:
4858 */
4859static noinline void __schedule_bug(struct task_struct *prev)
4860{
4861        /* Save this before calling printk(), since that will clobber it */
4862        unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
4863
4864        if (oops_in_progress)
4865                return;
4866
4867        printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4868                prev->comm, prev->pid, preempt_count());
4869
4870        debug_show_held_locks(prev);
4871        print_modules();
4872        if (irqs_disabled())
4873                print_irqtrace_events(prev);
4874        if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
4875            && in_atomic_preempt_off()) {
4876                pr_err("Preemption disabled at:");
4877                print_ip_sym(KERN_ERR, preempt_disable_ip);
4878        }
4879        if (panic_on_warn)
4880                panic("scheduling while atomic\n");
4881
4882        dump_stack();
4883        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
4884}
4885
4886/*
4887 * Various schedule()-time debugging checks and statistics:
4888 */
4889static inline void schedule_debug(struct task_struct *prev, bool preempt)
4890{
4891#ifdef CONFIG_SCHED_STACK_END_CHECK
4892        if (task_stack_end_corrupted(prev))
4893                panic("corrupted stack end detected inside scheduler\n");
4894
4895        if (task_scs_end_corrupted(prev))
4896                panic("corrupted shadow stack detected inside scheduler\n");
4897#endif
4898
4899#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
4900        if (!preempt && prev->state && prev->non_block_count) {
4901                printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
4902                        prev->comm, prev->pid, prev->non_block_count);
4903                dump_stack();
4904                add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
4905        }
4906#endif
4907
4908        if (unlikely(in_atomic_preempt_off())) {
4909                __schedule_bug(prev);
4910                preempt_count_set(PREEMPT_DISABLED);
4911        }
4912        rcu_sleep_check();
4913        SCHED_WARN_ON(ct_state() == CONTEXT_USER);
4914
4915        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4916
4917        schedstat_inc(this_rq()->sched_count);
4918}
4919
4920static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
4921                                  struct rq_flags *rf)
4922{
4923#ifdef CONFIG_SMP
4924        const struct sched_class *class;
4925        /*
4926         * We must do the balancing pass before put_prev_task(), such
4927         * that when we release the rq->lock the task is in the same
4928         * state as before we took rq->lock.
4929         *
4930         * We can terminate the balance pass as soon as we know there is
4931         * a runnable task of @class priority or higher.
4932         */
4933        for_class_range(class, prev->sched_class, &idle_sched_class) {
4934                if (class->balance(rq, prev, rf))
4935                        break;
4936        }
4937#endif
4938
4939        put_prev_task(rq, prev);
4940}
4941
4942/*
4943 * Pick up the highest-prio task:
4944 */
4945static inline struct task_struct *
4946pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
4947{
4948        const struct sched_class *class;
4949        struct task_struct *p;
4950
4951        /*
4952         * Optimization: we know that if all tasks are in the fair class we can
4953         * call that function directly, but only if the @prev task wasn't of a
4954         * higher scheduling class, because otherwise those lose the
4955         * opportunity to pull in more work from other CPUs.
4956         */
4957        if (likely(prev->sched_class <= &fair_sched_class &&
4958                   rq->nr_running == rq->cfs.h_nr_running)) {
4959
4960                p = pick_next_task_fair(rq, prev, rf);
4961                if (unlikely(p == RETRY_TASK))
4962                        goto restart;
4963
4964                /* Assumes fair_sched_class->next == idle_sched_class */
4965                if (!p) {
4966                        put_prev_task(rq, prev);
4967                        p = pick_next_task_idle(rq);
4968                }
4969
4970                return p;
4971        }
4972
4973restart:
4974        put_prev_task_balance(rq, prev, rf);
4975
4976        for_each_class(class) {
4977                p = class->pick_next_task(rq);
4978                if (p)
4979                        return p;
4980        }
4981
4982        /* The idle class should always have a runnable task: */
4983        BUG();
4984}
4985
4986/*
4987 * __schedule() is the main scheduler function.
4988 *
4989 * The main means of driving the scheduler and thus entering this function are:
4990 *
4991 *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
4992 *
4993 *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
4994 *      paths. For example, see arch/x86/entry_64.S.
4995 *
4996 *      To drive preemption between tasks, the scheduler sets the flag in timer
4997 *      interrupt handler scheduler_tick().
4998 *
4999 *   3. Wakeups don't really cause entry into schedule(). They add a
5000 *      task to the run-queue and that's it.
5001 *
5002 *      Now, if the new task added to the run-queue preempts the current
5003 *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
5004 *      called on the nearest possible occasion:
5005 *
5006 *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
5007 *
5008 *         - in syscall or exception context, at the next outmost
5009 *           preempt_enable(). (this might be as soon as the wake_up()'s
5010 *           spin_unlock()!)
5011 *
5012 *         - in IRQ context, return from interrupt-handler to
5013 *           preemptible context
5014 *
5015 *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
5016 *         then at the next:
5017 *
5018 *          - cond_resched() call
5019 *          - explicit schedule() call
5020 *          - return from syscall or exception to user-space
5021 *          - return from interrupt-handler to user-space
5022 *
5023 * WARNING: must be called with preemption disabled!
5024 */
5025static void __sched notrace __schedule(bool preempt)
5026{
5027        struct task_struct *prev, *next;
5028        unsigned long *switch_count;
5029        unsigned long prev_state;
5030        struct rq_flags rf;
5031        struct rq *rq;
5032        int cpu;
5033
5034        cpu = smp_processor_id();
5035        rq = cpu_rq(cpu);
5036        prev = rq->curr;
5037
5038        schedule_debug(prev, preempt);
5039
5040        if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
5041                hrtick_clear(rq);
5042
5043        local_irq_disable();
5044        rcu_note_context_switch(preempt);
5045
5046        /*
5047         * Make sure that signal_pending_state()->signal_pending() below
5048         * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
5049         * done by the caller to avoid the race with signal_wake_up():
5050         *
5051         * __set_current_state(@state)          signal_wake_up()
5052         * schedule()                             set_tsk_thread_flag(p, TIF_SIGPENDING)
5053         *                                        wake_up_state(p, state)
5054         *   LOCK rq->lock                          LOCK p->pi_state
5055         *   smp_mb__after_spinlock()               smp_mb__after_spinlock()
5056         *     if (signal_pending_state())          if (p->state & @state)
5057         *
5058         * Also, the membarrier system call requires a full memory barrier
5059         * after coming from user-space, before storing to rq->curr.
5060         */
5061        rq_lock(rq, &rf);
5062        smp_mb__after_spinlock();
5063
5064        /* Promote REQ to ACT */
5065        rq->clock_update_flags <<= 1;
5066        update_rq_clock(rq);
5067
5068        switch_count = &prev->nivcsw;
5069
5070        /*
5071         * We must load prev->state once (task_struct::state is volatile), such
5072         * that:
5073         *
5074         *  - we form a control dependency vs deactivate_task() below.
5075         *  - ptrace_{,un}freeze_traced() can change ->state underneath us.
5076         */
5077        prev_state = prev->state;
5078        if (!preempt && prev_state) {
5079                if (signal_pending_state(prev_state, prev)) {
5080                        prev->state = TASK_RUNNING;
5081                } else {
5082                        prev->sched_contributes_to_load =
5083                                (prev_state & TASK_UNINTERRUPTIBLE) &&
5084                                !(prev_state & TASK_NOLOAD) &&
5085                                !(prev->flags & PF_FROZEN);
5086
5087                        if (prev->sched_contributes_to_load)
5088                                rq->nr_uninterruptible++;
5089
5090                        /*
5091                         * __schedule()                 ttwu()
5092                         *   prev_state = prev->state;    if (p->on_rq && ...)
5093                         *   if (prev_state)                goto out;
5094                         *     p->on_rq = 0;              smp_acquire__after_ctrl_dep();
5095                         *                                p->state = TASK_WAKING
5096                         *
5097                         * Where __schedule() and ttwu() have matching control dependencies.
5098                         *
5099                         * After this, schedule() must not care about p->state any more.
5100                         */
5101                        deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
5102
5103                        if (prev->in_iowait) {
5104                                atomic_inc(&rq->nr_iowait);
5105                                delayacct_blkio_start();
5106                        }
5107                }
5108                switch_count = &prev->nvcsw;
5109        }
5110
5111        next = pick_next_task(rq, prev, &rf);
5112        clear_tsk_need_resched(prev);
5113        clear_preempt_need_resched();
5114#ifdef CONFIG_SCHED_DEBUG
5115        rq->last_seen_need_resched_ns = 0;
5116#endif
5117
5118        if (likely(prev != next)) {
5119                rq->nr_switches++;
5120                /*
5121                 * RCU users of rcu_dereference(rq->curr) may not see
5122                 * changes to task_struct made by pick_next_task().
5123                 */
5124                RCU_INIT_POINTER(rq->curr, next);
5125                /*
5126                 * The membarrier system call requires each architecture
5127                 * to have a full memory barrier after updating
5128                 * rq->curr, before returning to user-space.
5129                 *
5130                 * Here are the schemes providing that barrier on the
5131                 * various architectures:
5132                 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
5133                 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
5134                 * - finish_lock_switch() for weakly-ordered
5135                 *   architectures where spin_unlock is a full barrier,
5136                 * - switch_to() for arm64 (weakly-ordered, spin_unlock
5137                 *   is a RELEASE barrier),
5138                 */
5139                ++*switch_count;
5140
5141                migrate_disable_switch(rq, prev);
5142                psi_sched_switch(prev, next, !task_on_rq_queued(prev));
5143
5144                trace_sched_switch(preempt, prev, next);
5145
5146                /* Also unlocks the rq: */
5147                rq = context_switch(rq, prev, next, &rf);
5148        } else {
5149                rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
5150
5151                rq_unpin_lock(rq, &rf);
5152                __balance_callbacks(rq);
5153                raw_spin_unlock_irq(&rq->lock);
5154        }
5155}
5156
5157void __noreturn do_task_dead(void)
5158{
5159        /* Causes final put_task_struct in finish_task_switch(): */
5160        set_special_state(TASK_DEAD);
5161
5162        /* Tell freezer to ignore us: */
5163        current->flags |= PF_NOFREEZE;
5164
5165        __schedule(false);
5166        BUG();
5167
5168        /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
5169        for (;;)
5170                cpu_relax();
5171}
5172
5173static inline void sched_submit_work(struct task_struct *tsk)
5174{
5175        unsigned int task_flags;
5176
5177        if (!tsk->state)
5178                return;
5179
5180        task_flags = tsk->flags;
5181        /*
5182         * If a worker went to sleep, notify and ask workqueue whether
5183         * it wants to wake up a task to maintain concurrency.
5184         * As this function is called inside the schedule() context,
5185         * we disable preemption to avoid it calling schedule() again
5186         * in the possible wakeup of a kworker and because wq_worker_sleeping()
5187         * requires it.
5188         */
5189        if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
5190                preempt_disable();
5191                if (task_flags & PF_WQ_WORKER)
5192                        wq_worker_sleeping(tsk);
5193                else
5194                        io_wq_worker_sleeping(tsk);
5195                preempt_enable_no_resched();
5196        }
5197
5198        if (tsk_is_pi_blocked(tsk))
5199                return;
5200
5201        /*
5202         * If we are going to sleep and we have plugged IO queued,
5203         * make sure to submit it to avoid deadlocks.
5204         */
5205        if (blk_needs_flush_plug(tsk))
5206                blk_schedule_flush_plug(tsk);
5207}
5208
5209static void sched_update_worker(struct task_struct *tsk)
5210{
5211        if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
5212                if (tsk->flags & PF_WQ_WORKER)
5213                        wq_worker_running(tsk);
5214                else
5215                        io_wq_worker_running(tsk);
5216        }
5217}
5218
5219asmlinkage __visible void __sched schedule(void)
5220{
5221        struct task_struct *tsk = current;
5222
5223        sched_submit_work(tsk);
5224        do {
5225                preempt_disable();
5226                __schedule(false);
5227                sched_preempt_enable_no_resched();
5228        } while (need_resched());
5229        sched_update_worker(tsk);
5230}
5231EXPORT_SYMBOL(schedule);
5232
5233/*
5234 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
5235 * state (have scheduled out non-voluntarily) by making sure that all
5236 * tasks have either left the run queue or have gone into user space.
5237 * As idle tasks do not do either, they must not ever be preempted
5238 * (schedule out non-voluntarily).
5239 *
5240 * schedule_idle() is similar to schedule_preempt_disable() except that it
5241 * never enables preemption because it does not call sched_submit_work().
5242 */
5243void __sched schedule_idle(void)
5244{
5245        /*
5246         * As this skips calling sched_submit_work(), which the idle task does
5247         * regardless because that function is a nop when the task is in a
5248         * TASK_RUNNING state, make sure this isn't used someplace that the
5249         * current task can be in any other state. Note, idle is always in the
5250         * TASK_RUNNING state.
5251         */
5252        WARN_ON_ONCE(current->state);
5253        do {
5254                __schedule(false);
5255        } while (need_resched());
5256}
5257
5258#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
5259asmlinkage __visible void __sched schedule_user(void)
5260{
5261        /*
5262         * If we come here after a random call to set_need_resched(),
5263         * or we have been woken up remotely but the IPI has not yet arrived,
5264         * we haven't yet exited the RCU idle mode. Do it here manually until
5265         * we find a better solution.
5266         *
5267         * NB: There are buggy callers of this function.  Ideally we
5268         * should warn if prev_state != CONTEXT_USER, but that will trigger
5269         * too frequently to make sense yet.
5270         */
5271        enum ctx_state prev_state = exception_enter();
5272        schedule();
5273        exception_exit(prev_state);
5274}
5275#endif
5276
5277/**
5278 * schedule_preempt_disabled - called with preemption disabled
5279 *
5280 * Returns with preemption disabled. Note: preempt_count must be 1
5281 */
5282void __sched schedule_preempt_disabled(void)
5283{
5284        sched_preempt_enable_no_resched();
5285        schedule();
5286        preempt_disable();
5287}
5288
5289static void __sched notrace preempt_schedule_common(void)
5290{
5291        do {
5292                /*
5293                 * Because the function tracer can trace preempt_count_sub()
5294                 * and it also uses preempt_enable/disable_notrace(), if
5295                 * NEED_RESCHED is set, the preempt_enable_notrace() called
5296                 * by the function tracer will call this function again and
5297                 * cause infinite recursion.
5298                 *
5299                 * Preemption must be disabled here before the function
5300                 * tracer can trace. Break up preempt_disable() into two
5301                 * calls. One to disable preemption without fear of being
5302                 * traced. The other to still record the preemption latency,
5303                 * which can also be traced by the function tracer.
5304                 */
5305                preempt_disable_notrace();
5306                preempt_latency_start(1);
5307                __schedule(true);
5308                preempt_latency_stop(1);
5309                preempt_enable_no_resched_notrace();
5310
5311                /*
5312                 * Check again in case we missed a preemption opportunity
5313                 * between schedule and now.
5314                 */
5315        } while (need_resched());
5316}
5317
5318#ifdef CONFIG_PREEMPTION
5319/*
5320 * This is the entry point to schedule() from in-kernel preemption
5321 * off of preempt_enable.
5322 */
5323asmlinkage __visible void __sched notrace preempt_schedule(void)
5324{
5325        /*
5326         * If there is a non-zero preempt_count or interrupts are disabled,
5327         * we do not want to preempt the current task. Just return..
5328         */
5329        if (likely(!preemptible()))
5330                return;
5331
5332        preempt_schedule_common();
5333}
5334NOKPROBE_SYMBOL(preempt_schedule);
5335EXPORT_SYMBOL(preempt_schedule);
5336
5337#ifdef CONFIG_PREEMPT_DYNAMIC
5338DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
5339EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
5340#endif
5341
5342
5343/**
5344 * preempt_schedule_notrace - preempt_schedule called by tracing
5345 *
5346 * The tracing infrastructure uses preempt_enable_notrace to prevent
5347 * recursion and tracing preempt enabling caused by the tracing
5348 * infrastructure itself. But as tracing can happen in areas coming
5349 * from userspace or just about to enter userspace, a preempt enable
5350 * can occur before user_exit() is called. This will cause the scheduler
5351 * to be called when the system is still in usermode.
5352 *
5353 * To prevent this, the preempt_enable_notrace will use this function
5354 * instead of preempt_schedule() to exit user context if needed before
5355 * calling the scheduler.
5356 */
5357asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
5358{
5359        enum ctx_state prev_ctx;
5360
5361        if (likely(!preemptible()))
5362                return;
5363
5364        do {
5365                /*
5366                 * Because the function tracer can trace preempt_count_sub()
5367                 * and it also uses preempt_enable/disable_notrace(), if
5368                 * NEED_RESCHED is set, the preempt_enable_notrace() called
5369                 * by the function tracer will call this function again and
5370                 * cause infinite recursion.
5371                 *
5372                 * Preemption must be disabled here before the function
5373                 * tracer can trace. Break up preempt_disable() into two
5374                 * calls. One to disable preemption without fear of being
5375                 * traced. The other to still record the preemption latency,
5376                 * which can also be traced by the function tracer.
5377                 */
5378                preempt_disable_notrace();
5379                preempt_latency_start(1);
5380                /*
5381                 * Needs preempt disabled in case user_exit() is traced
5382                 * and the tracer calls preempt_enable_notrace() causing
5383                 * an infinite recursion.
5384                 */
5385                prev_ctx = exception_enter();
5386                __schedule(true);
5387                exception_exit(prev_ctx);
5388
5389                preempt_latency_stop(1);
5390                preempt_enable_no_resched_notrace();
5391        } while (need_resched());
5392}
5393EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
5394
5395#ifdef CONFIG_PREEMPT_DYNAMIC
5396DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
5397EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
5398#endif
5399
5400#endif /* CONFIG_PREEMPTION */
5401
5402#ifdef CONFIG_PREEMPT_DYNAMIC
5403
5404#include <linux/entry-common.h>
5405
5406/*
5407 * SC:cond_resched
5408 * SC:might_resched
5409 * SC:preempt_schedule
5410 * SC:preempt_schedule_notrace
5411 * SC:irqentry_exit_cond_resched
5412 *
5413 *
5414 * NONE:
5415 *   cond_resched               <- __cond_resched
5416 *   might_resched              <- RET0
5417 *   preempt_schedule           <- NOP
5418 *   preempt_schedule_notrace   <- NOP
5419 *   irqentry_exit_cond_resched <- NOP
5420 *
5421 * VOLUNTARY:
5422 *   cond_resched               <- __cond_resched
5423 *   might_resched              <- __cond_resched
5424 *   preempt_schedule           <- NOP
5425 *   preempt_schedule_notrace   <- NOP
5426 *   irqentry_exit_cond_resched <- NOP
5427 *
5428 * FULL:
5429 *   cond_resched               <- RET0
5430 *   might_resched              <- RET0
5431 *   preempt_schedule           <- preempt_schedule
5432 *   preempt_schedule_notrace   <- preempt_schedule_notrace
5433 *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
5434 */
5435
5436enum {
5437        preempt_dynamic_none = 0,
5438        preempt_dynamic_voluntary,
5439        preempt_dynamic_full,
5440};
5441
5442int preempt_dynamic_mode = preempt_dynamic_full;
5443
5444int sched_dynamic_mode(const char *str)
5445{
5446        if (!strcmp(str, "none"))
5447                return preempt_dynamic_none;
5448
5449        if (!strcmp(str, "voluntary"))
5450                return preempt_dynamic_voluntary;
5451
5452        if (!strcmp(str, "full"))
5453                return preempt_dynamic_full;
5454
5455        return -EINVAL;
5456}
5457
5458void sched_dynamic_update(int mode)
5459{
5460        /*
5461         * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
5462         * the ZERO state, which is invalid.
5463         */
5464        static_call_update(cond_resched, __cond_resched);
5465        static_call_update(might_resched, __cond_resched);
5466        static_call_update(preempt_schedule, __preempt_schedule_func);
5467        static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
5468        static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
5469
5470        switch (mode) {
5471        case preempt_dynamic_none:
5472                static_call_update(cond_resched, __cond_resched);
5473                static_call_update(might_resched, (void *)&__static_call_return0);
5474                static_call_update(preempt_schedule, NULL);
5475                static_call_update(preempt_schedule_notrace, NULL);
5476                static_call_update(irqentry_exit_cond_resched, NULL);
5477                pr_info("Dynamic Preempt: none\n");
5478                break;
5479
5480        case preempt_dynamic_voluntary:
5481                static_call_update(cond_resched, __cond_resched);
5482                static_call_update(might_resched, __cond_resched);
5483                static_call_update(preempt_schedule, NULL);
5484                static_call_update(preempt_schedule_notrace, NULL);
5485                static_call_update(irqentry_exit_cond_resched, NULL);
5486                pr_info("Dynamic Preempt: voluntary\n");
5487                break;
5488
5489        case preempt_dynamic_full:
5490                static_call_update(cond_resched, (void *)&__static_call_return0);
5491                static_call_update(might_resched, (void *)&__static_call_return0);
5492                static_call_update(preempt_schedule, __preempt_schedule_func);
5493                static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
5494                static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
5495                pr_info("Dynamic Preempt: full\n");
5496                break;
5497        }
5498
5499        preempt_dynamic_mode = mode;
5500}
5501
5502static int __init setup_preempt_mode(char *str)
5503{
5504        int mode = sched_dynamic_mode(str);
5505        if (mode < 0) {
5506                pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
5507                return 1;
5508        }
5509
5510        sched_dynamic_update(mode);
5511        return 0;
5512}
5513__setup("preempt=", setup_preempt_mode);
5514
5515#endif /* CONFIG_PREEMPT_DYNAMIC */
5516
5517/*
5518 * This is the entry point to schedule() from kernel preemption
5519 * off of irq context.
5520 * Note, that this is called and return with irqs disabled. This will
5521 * protect us against recursive calling from irq.
5522 */
5523asmlinkage __visible void __sched preempt_schedule_irq(void)
5524{
5525        enum ctx_state prev_state;
5526
5527        /* Catch callers which need to be fixed */
5528        BUG_ON(preempt_count() || !irqs_disabled());
5529
5530        prev_state = exception_enter();
5531
5532        do {
5533                preempt_disable();
5534                local_irq_enable();
5535                __schedule(true);
5536                local_irq_disable();
5537                sched_preempt_enable_no_resched();
5538        } while (need_resched());
5539
5540        exception_exit(prev_state);
5541}
5542
5543int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
5544                          void *key)
5545{
5546        WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
5547        return try_to_wake_up(curr->private, mode, wake_flags);
5548}
5549EXPORT_SYMBOL(default_wake_function);
5550
5551#ifdef CONFIG_RT_MUTEXES
5552
5553static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
5554{
5555        if (pi_task)
5556                prio = min(prio, pi_task->prio);
5557
5558        return prio;
5559}
5560
5561static inline int rt_effective_prio(struct task_struct *p, int prio)
5562{
5563        struct task_struct *pi_task = rt_mutex_get_top_task(p);
5564
5565        return __rt_effective_prio(pi_task, prio);
5566}
5567
5568/*
5569 * rt_mutex_setprio - set the current priority of a task
5570 * @p: task to boost
5571 * @pi_task: donor task
5572 *
5573 * This function changes the 'effective' priority of a task. It does
5574 * not touch ->normal_prio like __setscheduler().
5575 *
5576 * Used by the rt_mutex code to implement priority inheritance
5577 * logic. Call site only calls if the priority of the task changed.
5578 */
5579void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
5580{
5581        int prio, oldprio, queued, running, queue_flag =
5582                DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
5583        const struct sched_class *prev_class;
5584        struct rq_flags rf;
5585        struct rq *rq;
5586
5587        /* XXX used to be waiter->prio, not waiter->task->prio */
5588        prio = __rt_effective_prio(pi_task, p->normal_prio);
5589
5590        /*
5591         * If nothing changed; bail early.
5592         */
5593        if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
5594                return;
5595
5596        rq = __task_rq_lock(p, &rf);
5597        update_rq_clock(rq);
5598        /*
5599         * Set under pi_lock && rq->lock, such that the value can be used under
5600         * either lock.
5601         *
5602         * Note that there is loads of tricky to make this pointer cache work
5603         * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
5604         * ensure a task is de-boosted (pi_task is set to NULL) before the
5605         * task is allowed to run again (and can exit). This ensures the pointer
5606         * points to a blocked task -- which guarantees the task is present.
5607         */
5608        p->pi_top_task = pi_task;
5609
5610        /*
5611         * For FIFO/RR we only need to set prio, if that matches we're done.
5612         */
5613        if (prio == p->prio && !dl_prio(prio))
5614                goto out_unlock;
5615
5616        /*
5617         * Idle task boosting is a nono in general. There is one
5618         * exception, when PREEMPT_RT and NOHZ is active:
5619         *
5620         * The idle task calls get_next_timer_interrupt() and holds
5621         * the timer wheel base->lock on the CPU and another CPU wants
5622         * to access the timer (probably to cancel it). We can safely
5623         * ignore the boosting request, as the idle CPU runs this code
5624         * with interrupts disabled and will complete the lock
5625         * protected section without being interrupted. So there is no
5626         * real need to boost.
5627         */
5628        if (unlikely(p == rq->idle)) {
5629                WARN_ON(p != rq->curr);
5630                WARN_ON(p->pi_blocked_on);
5631                goto out_unlock;
5632        }
5633
5634        trace_sched_pi_setprio(p, pi_task);
5635        oldprio = p->prio;
5636
5637        if (oldprio == prio)
5638                queue_flag &= ~DEQUEUE_MOVE;
5639
5640        prev_class = p->sched_class;
5641        queued = task_on_rq_queued(p);
5642        running = task_current(rq, p);
5643        if (queued)
5644                dequeue_task(rq, p, queue_flag);
5645        if (running)
5646                put_prev_task(rq, p);
5647
5648        /*
5649         * Boosting condition are:
5650         * 1. -rt task is running and holds mutex A
5651         *      --> -dl task blocks on mutex A
5652         *
5653         * 2. -dl task is running and holds mutex A
5654         *      --> -dl task blocks on mutex A and could preempt the
5655         *          running task
5656         */
5657        if (dl_prio(prio)) {
5658                if (!dl_prio(p->normal_prio) ||
5659                    (pi_task && dl_prio(pi_task->prio) &&
5660                     dl_entity_preempt(&pi_task->dl, &p->dl))) {
5661                        p->dl.pi_se = pi_task->dl.pi_se;
5662                        queue_flag |= ENQUEUE_REPLENISH;
5663                } else {
5664                        p->dl.pi_se = &p->dl;
5665                }
5666                p->sched_class = &dl_sched_class;
5667        } else if (rt_prio(prio)) {
5668                if (dl_prio(oldprio))
5669                        p->dl.pi_se = &p->dl;
5670                if (oldprio < prio)
5671                        queue_flag |= ENQUEUE_HEAD;
5672                p->sched_class = &rt_sched_class;
5673        } else {
5674                if (dl_prio(oldprio))
5675                        p->dl.pi_se = &p->dl;
5676                if (rt_prio(oldprio))
5677                        p->rt.timeout = 0;
5678                p->sched_class = &fair_sched_class;
5679        }
5680
5681        p->prio = prio;
5682
5683        if (queued)
5684                enqueue_task(rq, p, queue_flag);
5685        if (running)
5686                set_next_task(rq, p);
5687
5688        check_class_changed(rq, p, prev_class, oldprio);
5689out_unlock:
5690        /* Avoid rq from going away on us: */
5691        preempt_disable();
5692
5693        rq_unpin_lock(rq, &rf);
5694        __balance_callbacks(rq);
5695        raw_spin_unlock(&rq->lock);
5696
5697        preempt_enable();
5698}
5699#else
5700static inline int rt_effective_prio(struct task_struct *p, int prio)
5701{
5702        return prio;
5703}
5704#endif
5705
5706void set_user_nice(struct task_struct *p, long nice)
5707{
5708        bool queued, running;
5709        int old_prio;
5710        struct rq_flags rf;
5711        struct rq *rq;
5712
5713        if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
5714                return;
5715        /*
5716         * We have to be careful, if called from sys_setpriority(),
5717         * the task might be in the middle of scheduling on another CPU.
5718         */
5719        rq = task_rq_lock(p, &rf);
5720        update_rq_clock(rq);
5721
5722        /*
5723         * The RT priorities are set via sched_setscheduler(), but we still
5724         * allow the 'normal' nice value to be set - but as expected
5725         * it won't have any effect on scheduling until the task is
5726         * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
5727         */
5728        if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
5729                p->static_prio = NICE_TO_PRIO(nice);
5730                goto out_unlock;
5731        }
5732        queued = task_on_rq_queued(p);
5733        running = task_current(rq, p);
5734        if (queued)
5735                dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
5736        if (running)
5737                put_prev_task(rq, p);
5738
5739        p->static_prio = NICE_TO_PRIO(nice);
5740        set_load_weight(p, true);
5741        old_prio = p->prio;
5742        p->prio = effective_prio(p);
5743
5744        if (queued)
5745                enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
5746        if (running)
5747                set_next_task(rq, p);
5748
5749        /*
5750         * If the task increased its priority or is running and
5751         * lowered its priority, then reschedule its CPU:
5752         */
5753        p->sched_class->prio_changed(rq, p, old_prio);
5754
5755out_unlock:
5756        task_rq_unlock(rq, p, &rf);
5757}
5758EXPORT_SYMBOL(set_user_nice);
5759
5760/*
5761 * can_nice - check if a task can reduce its nice value
5762 * @p: task
5763 * @nice: nice value
5764 */
5765int can_nice(const struct task_struct *p, const int nice)
5766{
5767        /* Convert nice value [19,-20] to rlimit style value [1,40]: */
5768        int nice_rlim = nice_to_rlimit(nice);
5769
5770        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
5771                capable(CAP_SYS_NICE));
5772}
5773
5774#ifdef __ARCH_WANT_SYS_NICE
5775
5776/*
5777 * sys_nice - change the priority of the current process.
5778 * @increment: priority increment
5779 *
5780 * sys_setpriority is a more generic, but much slower function that
5781 * does similar things.
5782 */
5783SYSCALL_DEFINE1(nice, int, increment)
5784{
5785        long nice, retval;
5786
5787        /*
5788         * Setpriority might change our priority at the same moment.
5789         * We don't have to worry. Conceptually one call occurs first
5790         * and we have a single winner.
5791         */
5792        increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
5793        nice = task_nice(current) + increment;
5794
5795        nice = clamp_val(nice, MIN_NICE, MAX_NICE);
5796        if (increment < 0 && !can_nice(current, nice))
5797                return -EPERM;
5798
5799        retval = security_task_setnice(current, nice);
5800        if (retval)
5801                return retval;
5802
5803        set_user_nice(current, nice);
5804        return 0;
5805}
5806
5807#endif
5808
5809/**
5810 * task_prio - return the priority value of a given task.
5811 * @p: the task in question.
5812 *
5813 * Return: The priority value as seen by users in /proc.
5814 *
5815 * sched policy         return value   kernel prio    user prio/nice
5816 *
5817 * normal, batch, idle     [0 ... 39]  [100 ... 139]          0/[-20 ... 19]
5818 * fifo, rr             [-2 ... -100]     [98 ... 0]  [1 ... 99]
5819 * deadline                     -101             -1           0
5820 */
5821int task_prio(const struct task_struct *p)
5822{
5823        return p->prio - MAX_RT_PRIO;
5824}
5825
5826/**
5827 * idle_cpu - is a given CPU idle currently?
5828 * @cpu: the processor in question.
5829 *
5830 * Return: 1 if the CPU is currently idle. 0 otherwise.
5831 */
5832int idle_cpu(int cpu)
5833{
5834        struct rq *rq = cpu_rq(cpu);
5835
5836        if (rq->curr != rq->idle)
5837                return 0;
5838
5839        if (rq->nr_running)
5840                return 0;
5841
5842#ifdef CONFIG_SMP
5843        if (rq->ttwu_pending)
5844                return 0;
5845#endif
5846
5847        return 1;
5848}
5849
5850/**
5851 * available_idle_cpu - is a given CPU idle for enqueuing work.
5852 * @cpu: the CPU in question.
5853 *
5854 * Return: 1 if the CPU is currently idle. 0 otherwise.
5855 */
5856int available_idle_cpu(int cpu)
5857{
5858        if (!idle_cpu(cpu))
5859                return 0;
5860
5861        if (vcpu_is_preempted(cpu))
5862                return 0;
5863
5864        return 1;
5865}
5866
5867/**
5868 * idle_task - return the idle task for a given CPU.
5869 * @cpu: the processor in question.
5870 *
5871 * Return: The idle task for the CPU @cpu.
5872 */
5873struct task_struct *idle_task(int cpu)
5874{
5875        return cpu_rq(cpu)->idle;
5876}
5877
5878#ifdef CONFIG_SMP
5879/*
5880 * This function computes an effective utilization for the given CPU, to be
5881 * used for frequency selection given the linear relation: f = u * f_max.
5882 *
5883 * The scheduler tracks the following metrics:
5884 *
5885 *   cpu_util_{cfs,rt,dl,irq}()
5886 *   cpu_bw_dl()
5887 *
5888 * Where the cfs,rt and dl util numbers are tracked with the same metric and
5889 * synchronized windows and are thus directly comparable.
5890 *
5891 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
5892 * which excludes things like IRQ and steal-time. These latter are then accrued
5893 * in the irq utilization.
5894 *
5895 * The DL bandwidth number otoh is not a measured metric but a value computed
5896 * based on the task model parameters and gives the minimal utilization
5897 * required to meet deadlines.
5898 */
5899unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
5900                                 unsigned long max, enum cpu_util_type type,
5901                                 struct task_struct *p)
5902{
5903        unsigned long dl_util, util, irq;
5904        struct rq *rq = cpu_rq(cpu);
5905
5906        if (!uclamp_is_used() &&
5907            type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
5908                return max;
5909        }
5910
5911        /*
5912         * Early check to see if IRQ/steal time saturates the CPU, can be
5913         * because of inaccuracies in how we track these -- see
5914         * update_irq_load_avg().
5915         */
5916        irq = cpu_util_irq(rq);
5917        if (unlikely(irq >= max))
5918                return max;
5919
5920        /*
5921         * Because the time spend on RT/DL tasks is visible as 'lost' time to
5922         * CFS tasks and we use the same metric to track the effective
5923         * utilization (PELT windows are synchronized) we can directly add them
5924         * to obtain the CPU's actual utilization.
5925         *
5926         * CFS and RT utilization can be boosted or capped, depending on
5927         * utilization clamp constraints requested by currently RUNNABLE
5928         * tasks.
5929         * When there are no CFS RUNNABLE tasks, clamps are released and
5930         * frequency will be gracefully reduced with the utilization decay.
5931         */
5932        util = util_cfs + cpu_util_rt(rq);
5933        if (type == FREQUENCY_UTIL)
5934                util = uclamp_rq_util_with(rq, util, p);
5935
5936        dl_util = cpu_util_dl(rq);
5937
5938        /*
5939         * For frequency selection we do not make cpu_util_dl() a permanent part
5940         * of this sum because we want to use cpu_bw_dl() later on, but we need
5941         * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
5942         * that we select f_max when there is no idle time.
5943         *
5944         * NOTE: numerical errors or stop class might cause us to not quite hit
5945         * saturation when we should -- something for later.
5946         */
5947        if (util + dl_util >= max)
5948                return max;
5949
5950        /*
5951         * OTOH, for energy computation we need the estimated running time, so
5952         * include util_dl and ignore dl_bw.
5953         */
5954        if (type == ENERGY_UTIL)
5955                util += dl_util;
5956
5957        /*
5958         * There is still idle time; further improve the number by using the
5959         * irq metric. Because IRQ/steal time is hidden from the task clock we
5960         * need to scale the task numbers:
5961         *
5962         *              max - irq
5963         *   U' = irq + --------- * U
5964         *                 max
5965         */
5966        util = scale_irq_capacity(util, irq, max);
5967        util += irq;
5968
5969        /*
5970         * Bandwidth required by DEADLINE must always be granted while, for
5971         * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
5972         * to gracefully reduce the frequency when no tasks show up for longer
5973         * periods of time.
5974         *
5975         * Ideally we would like to set bw_dl as min/guaranteed freq and util +
5976         * bw_dl as requested freq. However, cpufreq is not yet ready for such
5977         * an interface. So, we only do the latter for now.
5978         */
5979        if (type == FREQUENCY_UTIL)
5980                util += cpu_bw_dl(rq);
5981
5982        return min(max, util);
5983}
5984
5985unsigned long sched_cpu_util(int cpu, unsigned long max)
5986{
5987        return effective_cpu_util(cpu, cpu_util_cfs(cpu_rq(cpu)), max,
5988                                  ENERGY_UTIL, NULL);
5989}
5990#endif /* CONFIG_SMP */
5991
5992/**
5993 * find_process_by_pid - find a process with a matching PID value.
5994 * @pid: the pid in question.
5995 *
5996 * The task of @pid, if found. %NULL otherwise.
5997 */
5998static struct task_struct *find_process_by_pid(pid_t pid)
5999{
6000        return pid ? find_task_by_vpid(pid) : current;
6001}
6002
6003/*
6004 * sched_setparam() passes in -1 for its policy, to let the functions
6005 * it calls know not to change it.
6006 */
6007#define SETPARAM_POLICY -1
6008
6009static void __setscheduler_params(struct task_struct *p,
6010                const struct sched_attr *attr)
6011{
6012        int policy = attr->sched_policy;
6013
6014        if (policy == SETPARAM_POLICY)
6015                policy = p->policy;
6016
6017        p->policy = policy;
6018
6019        if (dl_policy(policy))
6020                __setparam_dl(p, attr);
6021        else if (fair_policy(policy))
6022                p->static_prio = NICE_TO_PRIO(attr->sched_nice);
6023
6024        /*
6025         * __sched_setscheduler() ensures attr->sched_priority == 0 when
6026         * !rt_policy. Always setting this ensures that things like
6027         * getparam()/getattr() don't report silly values for !rt tasks.
6028         */
6029        p->rt_priority = attr->sched_priority;
6030        p->normal_prio = normal_prio(p);
6031        set_load_weight(p, true);
6032}
6033
6034/* Actually do priority change: must hold pi & rq lock. */
6035static void __setscheduler(struct rq *rq, struct task_struct *p,
6036                           const struct sched_attr *attr, bool keep_boost)
6037{
6038        /*
6039         * If params can't change scheduling class changes aren't allowed
6040         * either.
6041         */
6042        if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
6043                return;
6044
6045        __setscheduler_params(p, attr);
6046
6047        /*
6048         * Keep a potential priority boosting if called from
6049         * sched_setscheduler().
6050         */
6051        p->prio = normal_prio(p);
6052        if (keep_boost)
6053                p->prio = rt_effective_prio(p, p->prio);
6054
6055        if (dl_prio(p->prio))
6056                p->sched_class = &dl_sched_class;
6057        else if (rt_prio(p->prio))
6058                p->sched_class = &rt_sched_class;
6059        else
6060                p->sched_class = &fair_sched_class;
6061}
6062
6063/*
6064 * Check the target process has a UID that matches the current process's:
6065 */
6066static bool check_same_owner(struct task_struct *p)
6067{
6068        const struct cred *cred = current_cred(), *pcred;
6069        bool match;
6070
6071        rcu_read_lock();
6072        pcred = __task_cred(p);
6073        match = (uid_eq(cred->euid, pcred->euid) ||
6074                 uid_eq(cred->euid, pcred->uid));
6075        rcu_read_unlock();
6076        return match;
6077}
6078
6079static int __sched_setscheduler(struct task_struct *p,
6080                                const struct sched_attr *attr,
6081                                bool user, bool pi)
6082{
6083        int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
6084                      MAX_RT_PRIO - 1 - attr->sched_priority;
6085        int retval, oldprio, oldpolicy = -1, queued, running;
6086        int new_effective_prio, policy = attr->sched_policy;
6087        const struct sched_class *prev_class;
6088        struct callback_head *head;
6089        struct rq_flags rf;
6090        int reset_on_fork;
6091        int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
6092        struct rq *rq;
6093
6094        /* The pi code expects interrupts enabled */
6095        BUG_ON(pi && in_interrupt());
6096recheck:
6097        /* Double check policy once rq lock held: */
6098        if (policy < 0) {
6099                reset_on_fork = p->sched_reset_on_fork;
6100                policy = oldpolicy = p->policy;
6101        } else {
6102                reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
6103
6104                if (!valid_policy(policy))
6105                        return -EINVAL;
6106        }
6107
6108        if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
6109                return -EINVAL;
6110
6111        /*
6112         * Valid priorities for SCHED_FIFO and SCHED_RR are
6113         * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
6114         * SCHED_BATCH and SCHED_IDLE is 0.
6115         */
6116        if (attr->sched_priority > MAX_RT_PRIO-1)
6117                return -EINVAL;
6118        if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
6119            (rt_policy(policy) != (attr->sched_priority != 0)))
6120                return -EINVAL;
6121
6122        /*
6123         * Allow unprivileged RT tasks to decrease priority:
6124         */
6125        if (user && !capable(CAP_SYS_NICE)) {
6126                if (fair_policy(policy)) {
6127                        if (attr->sched_nice < task_nice(p) &&
6128                            !can_nice(p, attr->sched_nice))
6129                                return -EPERM;
6130                }
6131
6132                if (rt_policy(policy)) {
6133                        unsigned long rlim_rtprio =
6134                                        task_rlimit(p, RLIMIT_RTPRIO);
6135
6136                        /* Can't set/change the rt policy: */
6137                        if (policy != p->policy && !rlim_rtprio)
6138                                return -EPERM;
6139
6140                        /* Can't increase priority: */
6141                        if (attr->sched_priority > p->rt_priority &&
6142                            attr->sched_priority > rlim_rtprio)
6143                                return -EPERM;
6144                }
6145
6146                 /*
6147                  * Can't set/change SCHED_DEADLINE policy at all for now
6148                  * (safest behavior); in the future we would like to allow
6149                  * unprivileged DL tasks to increase their relative deadline
6150                  * or reduce their runtime (both ways reducing utilization)
6151                  */
6152                if (dl_policy(policy))
6153                        return -EPERM;
6154
6155                /*
6156                 * Treat SCHED_IDLE as nice 20. Only allow a switch to
6157                 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
6158                 */
6159                if (task_has_idle_policy(p) && !idle_policy(policy)) {
6160                        if (!can_nice(p, task_nice(p)))
6161                                return -EPERM;
6162                }
6163
6164                /* Can't change other user's priorities: */
6165                if (!check_same_owner(p))
6166                        return -EPERM;
6167
6168                /* Normal users shall not reset the sched_reset_on_fork flag: */
6169                if (p->sched_reset_on_fork && !reset_on_fork)
6170                        return -EPERM;
6171        }
6172
6173        if (user) {
6174                if (attr->sched_flags & SCHED_FLAG_SUGOV)
6175                        return -EINVAL;
6176
6177                retval = security_task_setscheduler(p);
6178                if (retval)
6179                        return retval;
6180        }
6181
6182        /* Update task specific "requested" clamps */
6183        if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
6184                retval = uclamp_validate(p, attr);
6185                if (retval)
6186                        return retval;
6187        }
6188
6189        if (pi)
6190                cpuset_read_lock();
6191
6192        /*
6193         * Make sure no PI-waiters arrive (or leave) while we are
6194         * changing the priority of the task:
6195         *
6196         * To be able to change p->policy safely, the appropriate
6197         * runqueue lock must be held.
6198         */
6199        rq = task_rq_lock(p, &rf);
6200        update_rq_clock(rq);
6201
6202        /*
6203         * Changing the policy of the stop threads its a very bad idea:
6204         */
6205        if (p == rq->stop) {
6206                retval = -EINVAL;
6207                goto unlock;
6208        }
6209
6210        /*
6211         * If not changing anything there's no need to proceed further,
6212         * but store a possible modification of reset_on_fork.
6213         */
6214        if (unlikely(policy == p->policy)) {
6215                if (fair_policy(policy) && attr->sched_nice != task_nice(p))
6216                        goto change;
6217                if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
6218                        goto change;
6219                if (dl_policy(policy) && dl_param_changed(p, attr))
6220                        goto change;
6221                if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
6222                        goto change;
6223
6224                p->sched_reset_on_fork = reset_on_fork;
6225                retval = 0;
6226                goto unlock;
6227        }
6228change:
6229
6230        if (user) {
6231#ifdef CONFIG_RT_GROUP_SCHED
6232                /*
6233                 * Do not allow realtime tasks into groups that have no runtime
6234                 * assigned.
6235                 */
6236                if (rt_bandwidth_enabled() && rt_policy(policy) &&
6237                                task_group(p)->rt_bandwidth.rt_runtime == 0 &&
6238                                !task_group_is_autogroup(task_group(p))) {
6239                        retval = -EPERM;
6240                        goto unlock;
6241                }
6242#endif
6243#ifdef CONFIG_SMP
6244                if (dl_bandwidth_enabled() && dl_policy(policy) &&
6245                                !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
6246                        cpumask_t *span = rq->rd->span;
6247
6248                        /*
6249                         * Don't allow tasks with an affinity mask smaller than
6250                         * the entire root_domain to become SCHED_DEADLINE. We
6251                         * will also fail if there's no bandwidth available.
6252                         */
6253                        if (!cpumask_subset(span, p->cpus_ptr) ||
6254                            rq->rd->dl_bw.bw == 0) {
6255                                retval = -EPERM;
6256                                goto unlock;
6257                        }
6258                }
6259#endif
6260        }
6261
6262        /* Re-check policy now with rq lock held: */
6263        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6264                policy = oldpolicy = -1;
6265                task_rq_unlock(rq, p, &rf);
6266                if (pi)
6267                        cpuset_read_unlock();
6268                goto recheck;
6269        }
6270
6271        /*
6272         * If setscheduling to SCHED_DEADLINE (or changing the parameters
6273         * of a SCHED_DEADLINE task) we need to check if enough bandwidth
6274         * is available.
6275         */
6276        if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
6277                retval = -EBUSY;
6278                goto unlock;
6279        }
6280
6281        p->sched_reset_on_fork = reset_on_fork;
6282        oldprio = p->prio;
6283
6284        if (pi) {
6285                /*
6286                 * Take priority boosted tasks into account. If the new
6287                 * effective priority is unchanged, we just store the new
6288                 * normal parameters and do not touch the scheduler class and
6289                 * the runqueue. This will be done when the task deboost
6290                 * itself.
6291                 */
6292                new_effective_prio = rt_effective_prio(p, newprio);
6293                if (new_effective_prio == oldprio)
6294                        queue_flags &= ~DEQUEUE_MOVE;
6295        }
6296
6297        queued = task_on_rq_queued(p);
6298        running = task_current(rq, p);
6299        if (queued)
6300                dequeue_task(rq, p, queue_flags);
6301        if (running)
6302                put_prev_task(rq, p);
6303
6304        prev_class = p->sched_class;
6305
6306        __setscheduler(rq, p, attr, pi);
6307        __setscheduler_uclamp(p, attr);
6308
6309        if (queued) {
6310                /*
6311                 * We enqueue to tail when the priority of a task is
6312                 * increased (user space view).
6313                 */
6314                if (oldprio < p->prio)
6315                        queue_flags |= ENQUEUE_HEAD;
6316
6317                enqueue_task(rq, p, queue_flags);
6318        }
6319        if (running)
6320                set_next_task(rq, p);
6321
6322        check_class_changed(rq, p, prev_class, oldprio);
6323
6324        /* Avoid rq from going away on us: */
6325        preempt_disable();
6326        head = splice_balance_callbacks(rq);
6327        task_rq_unlock(rq, p, &rf);
6328
6329        if (pi) {
6330                cpuset_read_unlock();
6331                rt_mutex_adjust_pi(p);
6332        }
6333
6334        /* Run balance callbacks after we've adjusted the PI chain: */
6335        balance_callbacks(rq, head);
6336        preempt_enable();
6337
6338        return 0;
6339
6340unlock:
6341        task_rq_unlock(rq, p, &rf);
6342        if (pi)
6343                cpuset_read_unlock();
6344        return retval;
6345}
6346
6347static int _sched_setscheduler(struct task_struct *p, int policy,
6348                               const struct sched_param *param, bool check)
6349{
6350        struct sched_attr attr = {
6351                .sched_policy   = policy,
6352                .sched_priority = param->sched_priority,
6353                .sched_nice     = PRIO_TO_NICE(p->static_prio),
6354        };
6355
6356        /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
6357        if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
6358                attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
6359                policy &= ~SCHED_RESET_ON_FORK;
6360                attr.sched_policy = policy;
6361        }
6362
6363        return __sched_setscheduler(p, &attr, check, true);
6364}
6365/**
6366 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
6367 * @p: the task in question.
6368 * @policy: new policy.
6369 * @param: structure containing the new RT priority.
6370 *
6371 * Use sched_set_fifo(), read its comment.
6372 *
6373 * Return: 0 on success. An error code otherwise.
6374 *
6375 * NOTE that the task may be already dead.
6376 */
6377int sched_setscheduler(struct task_struct *p, int policy,
6378                       const struct sched_param *param)
6379{
6380        return _sched_setscheduler(p, policy, param, true);
6381}
6382
6383int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
6384{
6385        return __sched_setscheduler(p, attr, true, true);
6386}
6387
6388int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
6389{
6390        return __sched_setscheduler(p, attr, false, true);
6391}
6392
6393/**
6394 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
6395 * @p: the task in question.
6396 * @policy: new policy.
6397 * @param: structure containing the new RT priority.
6398 *
6399 * Just like sched_setscheduler, only don't bother checking if the
6400 * current context has permission.  For example, this is needed in
6401 * stop_machine(): we create temporary high priority worker threads,
6402 * but our caller might not have that capability.
6403 *
6404 * Return: 0 on success. An error code otherwise.
6405 */
6406int sched_setscheduler_nocheck(struct task_struct *p, int policy,
6407                               const struct sched_param *param)
6408{
6409        return _sched_setscheduler(p, policy, param, false);
6410}
6411
6412/*
6413 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
6414 * incapable of resource management, which is the one thing an OS really should
6415 * be doing.
6416 *
6417 * This is of course the reason it is limited to privileged users only.
6418 *
6419 * Worse still; it is fundamentally impossible to compose static priority
6420 * workloads. You cannot take two correctly working static prio workloads
6421 * and smash them together and still expect them to work.
6422 *
6423 * For this reason 'all' FIFO tasks the kernel creates are basically at:
6424 *
6425 *   MAX_RT_PRIO / 2
6426 *
6427 * The administrator _MUST_ configure the system, the kernel simply doesn't
6428 * know enough information to make a sensible choice.
6429 */
6430void sched_set_fifo(struct task_struct *p)
6431{
6432        struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
6433        WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
6434}
6435EXPORT_SYMBOL_GPL(sched_set_fifo);
6436
6437/*
6438 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
6439 */
6440void sched_set_fifo_low(struct task_struct *p)
6441{
6442        struct sched_param sp = { .sched_priority = 1 };
6443        WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
6444}
6445EXPORT_SYMBOL_GPL(sched_set_fifo_low);
6446
6447void sched_set_normal(struct task_struct *p, int nice)
6448{
6449        struct sched_attr attr = {
6450                .sched_policy = SCHED_NORMAL,
6451                .sched_nice = nice,
6452        };
6453        WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
6454}
6455EXPORT_SYMBOL_GPL(sched_set_normal);
6456
6457static int
6458do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
6459{
6460        struct sched_param lparam;
6461        struct task_struct *p;
6462        int retval;
6463
6464        if (!param || pid < 0)
6465                return -EINVAL;
6466        if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
6467                return -EFAULT;
6468
6469        rcu_read_lock();
6470        retval = -ESRCH;
6471        p = find_process_by_pid(pid);
6472        if (likely(p))
6473                get_task_struct(p);
6474        rcu_read_unlock();
6475
6476        if (likely(p)) {
6477                retval = sched_setscheduler(p, policy, &lparam);
6478                put_task_struct(p);
6479        }
6480
6481        return retval;
6482}
6483
6484/*
6485 * Mimics kernel/events/core.c perf_copy_attr().
6486 */
6487static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
6488{
6489        u32 size;
6490        int ret;
6491
6492        /* Zero the full structure, so that a short copy will be nice: */
6493        memset(attr, 0, sizeof(*attr));
6494
6495        ret = get_user(size, &uattr->size);
6496        if (ret)
6497                return ret;
6498
6499        /* ABI compatibility quirk: */
6500        if (!size)
6501                size = SCHED_ATTR_SIZE_VER0;
6502        if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
6503                goto err_size;
6504
6505        ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
6506        if (ret) {
6507                if (ret == -E2BIG)
6508                        goto err_size;
6509                return ret;
6510        }
6511
6512        if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
6513            size < SCHED_ATTR_SIZE_VER1)
6514                return -EINVAL;
6515
6516        /*
6517         * XXX: Do we want to be lenient like existing syscalls; or do we want
6518         * to be strict and return an error on out-of-bounds values?
6519         */
6520        attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
6521
6522        return 0;
6523
6524err_size:
6525        put_user(sizeof(*attr), &uattr->size);
6526        return -E2BIG;
6527}
6528
6529/**
6530 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
6531 * @pid: the pid in question.
6532 * @policy: new policy.
6533 * @param: structure containing the new RT priority.
6534 *
6535 * Return: 0 on success. An error code otherwise.
6536 */
6537SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
6538{
6539        if (policy < 0)
6540                return -EINVAL;
6541
6542        return do_sched_setscheduler(pid, policy, param);
6543}
6544
6545/**
6546 * sys_sched_setparam - set/change the RT priority of a thread
6547 * @pid: the pid in question.
6548 * @param: structure containing the new RT priority.
6549 *
6550 * Return: 0 on success. An error code otherwise.
6551 */
6552SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
6553{
6554        return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
6555}
6556
6557/**
6558 * sys_sched_setattr - same as above, but with extended sched_attr
6559 * @pid: the pid in question.
6560 * @uattr: structure containing the extended parameters.
6561 * @flags: for future extension.
6562 */
6563SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
6564                               unsigned int, flags)
6565{
6566        struct sched_attr attr;
6567        struct task_struct *p;
6568        int retval;
6569
6570        if (!uattr || pid < 0 || flags)
6571                return -EINVAL;
6572
6573        retval = sched_copy_attr(uattr, &attr);
6574        if (retval)
6575                return retval;
6576
6577        if ((int)attr.sched_policy < 0)
6578                return -EINVAL;
6579        if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
6580                attr.sched_policy = SETPARAM_POLICY;
6581
6582        rcu_read_lock();
6583        retval = -ESRCH;
6584        p = find_process_by_pid(pid);
6585        if (likely(p))
6586                get_task_struct(p);
6587        rcu_read_unlock();
6588
6589        if (likely(p)) {
6590                retval = sched_setattr(p, &attr);
6591                put_task_struct(p);
6592        }
6593
6594        return retval;
6595}
6596
6597/**
6598 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
6599 * @pid: the pid in question.
6600 *
6601 * Return: On success, the policy of the thread. Otherwise, a negative error
6602 * code.
6603 */
6604SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6605{
6606        struct task_struct *p;
6607        int retval;
6608
6609        if (pid < 0)
6610                return -EINVAL;
6611
6612        retval = -ESRCH;
6613        rcu_read_lock();
6614        p = find_process_by_pid(pid);
6615        if (p) {
6616                retval = security_task_getscheduler(p);
6617                if (!retval)
6618                        retval = p->policy
6619                                | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
6620        }
6621        rcu_read_unlock();
6622        return retval;
6623}
6624
6625/**
6626 * sys_sched_getparam - get the RT priority of a thread
6627 * @pid: the pid in question.
6628 * @param: structure containing the RT priority.
6629 *
6630 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
6631 * code.
6632 */
6633SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6634{
6635        struct sched_param lp = { .sched_priority = 0 };
6636        struct task_struct *p;
6637        int retval;
6638
6639        if (!param || pid < 0)
6640                return -EINVAL;
6641
6642        rcu_read_lock();
6643        p = find_process_by_pid(pid);
6644        retval = -ESRCH;
6645        if (!p)
6646                goto out_unlock;
6647
6648        retval = security_task_getscheduler(p);
6649        if (retval)
6650                goto out_unlock;
6651
6652        if (task_has_rt_policy(p))
6653                lp.sched_priority = p->rt_priority;
6654        rcu_read_unlock();
6655
6656        /*
6657         * This one might sleep, we cannot do it with a spinlock held ...
6658         */
6659        retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
6660
6661        return retval;
6662
6663out_unlock:
6664        rcu_read_unlock();
6665        return retval;
6666}
6667
6668/*
6669 * Copy the kernel size attribute structure (which might be larger
6670 * than what user-space knows about) to user-space.
6671 *
6672 * Note that all cases are valid: user-space buffer can be larger or
6673 * smaller than the kernel-space buffer. The usual case is that both
6674 * have the same size.
6675 */
6676static int
6677sched_attr_copy_to_user(struct sched_attr __user *uattr,
6678                        struct sched_attr *kattr,
6679                        unsigned int usize)
6680{
6681        unsigned int ksize = sizeof(*kattr);
6682
6683        if (!access_ok(uattr, usize))
6684                return -EFAULT;
6685
6686        /*
6687         * sched_getattr() ABI forwards and backwards compatibility:
6688         *
6689         * If usize == ksize then we just copy everything to user-space and all is good.
6690         *
6691         * If usize < ksize then we only copy as much as user-space has space for,
6692         * this keeps ABI compatibility as well. We skip the rest.
6693         *
6694         * If usize > ksize then user-space is using a newer version of the ABI,
6695         * which part the kernel doesn't know about. Just ignore it - tooling can
6696         * detect the kernel's knowledge of attributes from the attr->size value
6697         * which is set to ksize in this case.
6698         */
6699        kattr->size = min(usize, ksize);
6700
6701        if (copy_to_user(uattr, kattr, kattr->size))
6702                return -EFAULT;
6703
6704        return 0;
6705}
6706
6707/**
6708 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
6709 * @pid: the pid in question.
6710 * @uattr: structure containing the extended parameters.
6711 * @usize: sizeof(attr) for fwd/bwd comp.
6712 * @flags: for future extension.
6713 */
6714SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
6715                unsigned int, usize, unsigned int, flags)
6716{
6717        struct sched_attr kattr = { };
6718        struct task_struct *p;
6719        int retval;
6720
6721        if (!uattr || pid < 0 || usize > PAGE_SIZE ||
6722            usize < SCHED_ATTR_SIZE_VER0 || flags)
6723                return -EINVAL;
6724
6725        rcu_read_lock();
6726        p = find_process_by_pid(pid);
6727        retval = -ESRCH;
6728        if (!p)
6729                goto out_unlock;
6730
6731        retval = security_task_getscheduler(p);
6732        if (retval)
6733                goto out_unlock;
6734
6735        kattr.sched_policy = p->policy;
6736        if (p->sched_reset_on_fork)
6737                kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
6738        if (task_has_dl_policy(p))
6739                __getparam_dl(p, &kattr);
6740        else if (task_has_rt_policy(p))
6741                kattr.sched_priority = p->rt_priority;
6742        else
6743                kattr.sched_nice = task_nice(p);
6744
6745#ifdef CONFIG_UCLAMP_TASK
6746        /*
6747         * This could race with another potential updater, but this is fine
6748         * because it'll correctly read the old or the new value. We don't need
6749         * to guarantee who wins the race as long as it doesn't return garbage.
6750         */
6751        kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
6752        kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
6753#endif
6754
6755        rcu_read_unlock();
6756
6757        return sched_attr_copy_to_user(uattr, &kattr, usize);
6758
6759out_unlock:
6760        rcu_read_unlock();
6761        return retval;
6762}
6763
6764long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
6765{
6766        cpumask_var_t cpus_allowed, new_mask;
6767        struct task_struct *p;
6768        int retval;
6769
6770        rcu_read_lock();
6771
6772        p = find_process_by_pid(pid);
6773        if (!p) {
6774                rcu_read_unlock();
6775                return -ESRCH;
6776        }
6777
6778        /* Prevent p going away */
6779        get_task_struct(p);
6780        rcu_read_unlock();
6781
6782        if (p->flags & PF_NO_SETAFFINITY) {
6783                retval = -EINVAL;
6784                goto out_put_task;
6785        }
6786        if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6787                retval = -ENOMEM;
6788                goto out_put_task;
6789        }
6790        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
6791                retval = -ENOMEM;
6792                goto out_free_cpus_allowed;
6793        }
6794        retval = -EPERM;
6795        if (!check_same_owner(p)) {
6796                rcu_read_lock();
6797                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
6798                        rcu_read_unlock();
6799                        goto out_free_new_mask;
6800                }
6801                rcu_read_unlock();
6802        }
6803
6804        retval = security_task_setscheduler(p);
6805        if (retval)
6806                goto out_free_new_mask;
6807
6808
6809        cpuset_cpus_allowed(p, cpus_allowed);
6810        cpumask_and(new_mask, in_mask, cpus_allowed);
6811
6812        /*
6813         * Since bandwidth control happens on root_domain basis,
6814         * if admission test is enabled, we only admit -deadline
6815         * tasks allowed to run on all the CPUs in the task's
6816         * root_domain.
6817         */
6818#ifdef CONFIG_SMP
6819        if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
6820                rcu_read_lock();
6821                if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
6822                        retval = -EBUSY;
6823                        rcu_read_unlock();
6824                        goto out_free_new_mask;
6825                }
6826                rcu_read_unlock();
6827        }
6828#endif
6829again:
6830        retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
6831
6832        if (!retval) {
6833                cpuset_cpus_allowed(p, cpus_allowed);
6834                if (!cpumask_subset(new_mask, cpus_allowed)) {
6835                        /*
6836                         * We must have raced with a concurrent cpuset
6837                         * update. Just reset the cpus_allowed to the
6838                         * cpuset's cpus_allowed
6839                         */
6840                        cpumask_copy(new_mask, cpus_allowed);
6841                        goto again;
6842                }
6843        }
6844out_free_new_mask:
6845        free_cpumask_var(new_mask);
6846out_free_cpus_allowed:
6847        free_cpumask_var(cpus_allowed);
6848out_put_task:
6849        put_task_struct(p);
6850        return retval;
6851}
6852
6853static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
6854                             struct cpumask *new_mask)
6855{
6856        if (len < cpumask_size())
6857                cpumask_clear(new_mask);
6858        else if (len > cpumask_size())
6859                len = cpumask_size();
6860
6861        return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
6862}
6863
6864/**
6865 * sys_sched_setaffinity - set the CPU affinity of a process
6866 * @pid: pid of the process
6867 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
6868 * @user_mask_ptr: user-space pointer to the new CPU mask
6869 *
6870 * Return: 0 on success. An error code otherwise.
6871 */
6872SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
6873                unsigned long __user *, user_mask_ptr)
6874{
6875        cpumask_var_t new_mask;
6876        int retval;
6877
6878        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
6879                return -ENOMEM;
6880
6881        retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
6882        if (retval == 0)
6883                retval = sched_setaffinity(pid, new_mask);
6884        free_cpumask_var(new_mask);
6885        return retval;
6886}
6887
6888long sched_getaffinity(pid_t pid, struct cpumask *mask)
6889{
6890        struct task_struct *p;
6891        unsigned long flags;
6892        int retval;
6893
6894        rcu_read_lock();
6895
6896        retval = -ESRCH;
6897        p = find_process_by_pid(pid);
6898        if (!p)
6899                goto out_unlock;
6900
6901        retval = security_task_getscheduler(p);
6902        if (retval)
6903                goto out_unlock;
6904
6905        raw_spin_lock_irqsave(&p->pi_lock, flags);
6906        cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
6907        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6908
6909out_unlock:
6910        rcu_read_unlock();
6911
6912        return retval;
6913}
6914
6915/**
6916 * sys_sched_getaffinity - get the CPU affinity of a process
6917 * @pid: pid of the process
6918 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
6919 * @user_mask_ptr: user-space pointer to hold the current CPU mask
6920 *
6921 * Return: size of CPU mask copied to user_mask_ptr on success. An
6922 * error code otherwise.
6923 */
6924SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
6925                unsigned long __user *, user_mask_ptr)
6926{
6927        int ret;
6928        cpumask_var_t mask;
6929
6930        if ((len * BITS_PER_BYTE) < nr_cpu_ids)
6931                return -EINVAL;
6932        if (len & (sizeof(unsigned long)-1))
6933                return -EINVAL;
6934
6935        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
6936                return -ENOMEM;
6937
6938        ret = sched_getaffinity(pid, mask);
6939        if (ret == 0) {
6940                unsigned int retlen = min(len, cpumask_size());
6941
6942                if (copy_to_user(user_mask_ptr, mask, retlen))
6943                        ret = -EFAULT;
6944                else
6945                        ret = retlen;
6946        }
6947        free_cpumask_var(mask);
6948
6949        return ret;
6950}
6951
6952static void do_sched_yield(void)
6953{
6954        struct rq_flags rf;
6955        struct rq *rq;
6956
6957        rq = this_rq_lock_irq(&rf);
6958
6959        schedstat_inc(rq->yld_count);
6960        current->sched_class->yield_task(rq);
6961
6962        preempt_disable();
6963        rq_unlock_irq(rq, &rf);
6964        sched_preempt_enable_no_resched();
6965
6966        schedule();
6967}
6968
6969/**
6970 * sys_sched_yield - yield the current processor to other threads.
6971 *
6972 * This function yields the current CPU to other tasks. If there are no
6973 * other threads running on this CPU then this function will return.
6974 *
6975 * Return: 0.
6976 */
6977SYSCALL_DEFINE0(sched_yield)
6978{
6979        do_sched_yield();
6980        return 0;
6981}
6982
6983#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
6984int __sched __cond_resched(void)
6985{
6986        if (should_resched(0)) {
6987                preempt_schedule_common();
6988                return 1;
6989        }
6990#ifndef CONFIG_PREEMPT_RCU
6991        rcu_all_qs();
6992#endif
6993        return 0;
6994}
6995EXPORT_SYMBOL(__cond_resched);
6996#endif
6997
6998#ifdef CONFIG_PREEMPT_DYNAMIC
6999DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7000EXPORT_STATIC_CALL_TRAMP(cond_resched);
7001
7002DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7003EXPORT_STATIC_CALL_TRAMP(might_resched);
7004#endif
7005
7006/*
7007 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7008 * call schedule, and on return reacquire the lock.
7009 *
7010 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7011 * operations here to prevent schedule() from being called twice (once via
7012 * spin_unlock(), once by hand).
7013 */
7014int __cond_resched_lock(spinlock_t *lock)
7015{
7016        int resched = should_resched(PREEMPT_LOCK_OFFSET);
7017        int ret = 0;
7018
7019        lockdep_assert_held(lock);
7020
7021        if (spin_needbreak(lock) || resched) {
7022                spin_unlock(lock);
7023                if (resched)
7024                        preempt_schedule_common();
7025                else
7026                        cpu_relax();
7027                ret = 1;
7028                spin_lock(lock);
7029        }
7030        return ret;
7031}
7032EXPORT_SYMBOL(__cond_resched_lock);
7033
7034int __cond_resched_rwlock_read(rwlock_t *lock)
7035{
7036        int resched = should_resched(PREEMPT_LOCK_OFFSET);
7037        int ret = 0;
7038
7039        lockdep_assert_held_read(lock);
7040
7041        if (rwlock_needbreak(lock) || resched) {
7042                read_unlock(lock);
7043                if (resched)
7044                        preempt_schedule_common();
7045                else
7046                        cpu_relax();
7047                ret = 1;
7048                read_lock(lock);
7049        }
7050        return ret;
7051}
7052EXPORT_SYMBOL(__cond_resched_rwlock_read);
7053
7054int __cond_resched_rwlock_write(rwlock_t *lock)
7055{
7056        int resched = should_resched(PREEMPT_LOCK_OFFSET);
7057        int ret = 0;
7058
7059        lockdep_assert_held_write(lock);
7060
7061        if (rwlock_needbreak(lock) || resched) {
7062                write_unlock(lock);
7063                if (resched)
7064                        preempt_schedule_common();
7065                else
7066                        cpu_relax();
7067                ret = 1;
7068                write_lock(lock);
7069        }
7070        return ret;
7071}
7072EXPORT_SYMBOL(__cond_resched_rwlock_write);
7073
7074/**
7075 * yield - yield the current processor to other threads.
7076 *
7077 * Do not ever use this function, there's a 99% chance you're doing it wrong.
7078 *
7079 * The scheduler is at all times free to pick the calling task as the most
7080 * eligible task to run, if removing the yield() call from your code breaks
7081 * it, it's already broken.
7082 *
7083 * Typical broken usage is:
7084 *
7085 * while (!event)
7086 *      yield();
7087 *
7088 * where one assumes that yield() will let 'the other' process run that will
7089 * make event true. If the current task is a SCHED_FIFO task that will never
7090 * happen. Never use yield() as a progress guarantee!!
7091 *
7092 * If you want to use yield() to wait for something, use wait_event().
7093 * If you want to use yield() to be 'nice' for others, use cond_resched().
7094 * If you still want to use yield(), do not!
7095 */
7096void __sched yield(void)
7097{
7098        set_current_state(TASK_RUNNING);
7099        do_sched_yield();
7100}
7101EXPORT_SYMBOL(yield);
7102
7103/**
7104 * yield_to - yield the current processor to another thread in
7105 * your thread group, or accelerate that thread toward the
7106 * processor it's on.
7107 * @p: target task
7108 * @preempt: whether task preemption is allowed or not
7109 *
7110 * It's the caller's job to ensure that the target task struct
7111 * can't go away on us before we can do any checks.
7112 *
7113 * Return:
7114 *      true (>0) if we indeed boosted the target task.
7115 *      false (0) if we failed to boost the target.
7116 *      -ESRCH if there's no task to yield to.
7117 */
7118int __sched yield_to(struct task_struct *p, bool preempt)
7119{
7120        struct task_struct *curr = current;
7121        struct rq *rq, *p_rq;
7122        unsigned long flags;
7123        int yielded = 0;
7124
7125        local_irq_save(flags);
7126        rq = this_rq();
7127
7128again:
7129        p_rq = task_rq(p);
7130        /*
7131         * If we're the only runnable task on the rq and target rq also
7132         * has only one task, there's absolutely no point in yielding.
7133         */
7134        if (rq->nr_running == 1 && p_rq->nr_running == 1) {
7135                yielded = -ESRCH;
7136                goto out_irq;
7137        }
7138
7139        double_rq_lock(rq, p_rq);
7140        if (task_rq(p) != p_rq) {
7141                double_rq_unlock(rq, p_rq);
7142                goto again;
7143        }
7144
7145        if (!curr->sched_class->yield_to_task)
7146                goto out_unlock;
7147
7148        if (curr->sched_class != p->sched_class)
7149                goto out_unlock;
7150
7151        if (task_running(p_rq, p) || p->state)
7152                goto out_unlock;
7153
7154        yielded = curr->sched_class->yield_to_task(rq, p);
7155        if (yielded) {
7156                schedstat_inc(rq->yld_count);
7157                /*
7158                 * Make p's CPU reschedule; pick_next_entity takes care of
7159                 * fairness.
7160                 */
7161                if (preempt && rq != p_rq)
7162                        resched_curr(p_rq);
7163        }
7164
7165out_unlock:
7166        double_rq_unlock(rq, p_rq);
7167out_irq:
7168        local_irq_restore(flags);
7169
7170        if (yielded > 0)
7171                schedule();
7172
7173        return yielded;
7174}
7175EXPORT_SYMBOL_GPL(yield_to);
7176
7177int io_schedule_prepare(void)
7178{
7179        int old_iowait = current->in_iowait;
7180
7181        current->in_iowait = 1;
7182        blk_schedule_flush_plug(current);
7183
7184        return old_iowait;
7185}
7186
7187void io_schedule_finish(int token)
7188{
7189        current->in_iowait = token;
7190}
7191
7192/*
7193 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7194 * that process accounting knows that this is a task in IO wait state.
7195 */
7196long __sched io_schedule_timeout(long timeout)
7197{
7198        int token;
7199        long ret;
7200
7201        token = io_schedule_prepare();
7202        ret = schedule_timeout(timeout);
7203        io_schedule_finish(token);
7204
7205        return ret;
7206}
7207EXPORT_SYMBOL(io_schedule_timeout);
7208
7209void __sched io_schedule(void)
7210{
7211        int token;
7212
7213        token = io_schedule_prepare();
7214        schedule();
7215        io_schedule_finish(token);
7216}
7217EXPORT_SYMBOL(io_schedule);
7218
7219/**
7220 * sys_sched_get_priority_max - return maximum RT priority.
7221 * @policy: scheduling class.
7222 *
7223 * Return: On success, this syscall returns the maximum
7224 * rt_priority that can be used by a given scheduling class.
7225 * On failure, a negative error code is returned.
7226 */
7227SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
7228{
7229        int ret = -EINVAL;
7230
7231        switch (policy) {
7232        case SCHED_FIFO:
7233        case SCHED_RR:
7234                ret = MAX_RT_PRIO-1;
7235                break;
7236        case SCHED_DEADLINE:
7237        case SCHED_NORMAL:
7238        case SCHED_BATCH:
7239        case SCHED_IDLE:
7240                ret = 0;
7241                break;
7242        }
7243        return ret;
7244}
7245
7246/**
7247 * sys_sched_get_priority_min - return minimum RT priority.
7248 * @policy: scheduling class.
7249 *
7250 * Return: On success, this syscall returns the minimum
7251 * rt_priority that can be used by a given scheduling class.
7252 * On failure, a negative error code is returned.
7253 */
7254SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
7255{
7256        int ret = -EINVAL;
7257
7258        switch (policy) {
7259        case SCHED_FIFO:
7260        case SCHED_RR:
7261                ret = 1;
7262                break;
7263        case SCHED_DEADLINE:
7264        case SCHED_NORMAL:
7265        case SCHED_BATCH:
7266        case SCHED_IDLE:
7267                ret = 0;
7268        }
7269        return ret;
7270}
7271
7272static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
7273{
7274        struct task_struct *p;
7275        unsigned int time_slice;
7276        struct rq_flags rf;
7277        struct rq *rq;
7278        int retval;
7279
7280        if (pid < 0)
7281                return -EINVAL;
7282
7283        retval = -ESRCH;
7284        rcu_read_lock();
7285        p = find_process_by_pid(pid);
7286        if (!p)
7287                goto out_unlock;
7288
7289        retval = security_task_getscheduler(p);
7290        if (retval)
7291                goto out_unlock;
7292
7293        rq = task_rq_lock(p, &rf);
7294        time_slice = 0;
7295        if (p->sched_class->get_rr_interval)
7296                time_slice = p->sched_class->get_rr_interval(rq, p);
7297        task_rq_unlock(rq, p, &rf);
7298
7299        rcu_read_unlock();
7300        jiffies_to_timespec64(time_slice, t);
7301        return 0;
7302
7303out_unlock:
7304        rcu_read_unlock();
7305        return retval;
7306}
7307
7308/**
7309 * sys_sched_rr_get_interval - return the default timeslice of a process.
7310 * @pid: pid of the process.
7311 * @interval: userspace pointer to the timeslice value.
7312 *
7313 * this syscall writes the default timeslice value of a given process
7314 * into the user-space timespec buffer. A value of '0' means infinity.
7315 *
7316 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
7317 * an error code.
7318 */
7319SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
7320                struct __kernel_timespec __user *, interval)
7321{
7322        struct timespec64 t;
7323        int retval = sched_rr_get_interval(pid, &t);
7324
7325        if (retval == 0)
7326                retval = put_timespec64(&t, interval);
7327
7328        return retval;
7329}
7330
7331#ifdef CONFIG_COMPAT_32BIT_TIME
7332SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
7333                struct old_timespec32 __user *, interval)
7334{
7335        struct timespec64 t;
7336        int retval = sched_rr_get_interval(pid, &t);
7337
7338        if (retval == 0)
7339                retval = put_old_timespec32(&t, interval);
7340        return retval;
7341}
7342#endif
7343
7344void sched_show_task(struct task_struct *p)
7345{
7346        unsigned long free = 0;
7347        int ppid;
7348
7349        if (!try_get_task_stack(p))
7350                return;
7351
7352        pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7353
7354        if (p->state == TASK_RUNNING)
7355                pr_cont("  running task    ");
7356#ifdef CONFIG_DEBUG_STACK_USAGE
7357        free = stack_not_used(p);
7358#endif
7359        ppid = 0;
7360        rcu_read_lock();
7361        if (pid_alive(p))
7362                ppid = task_pid_nr(rcu_dereference(p->real_parent));
7363        rcu_read_unlock();
7364        pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
7365                free, task_pid_nr(p), ppid,
7366                (unsigned long)task_thread_info(p)->flags);
7367
7368        print_worker_info(KERN_INFO, p);
7369        print_stop_info(KERN_INFO, p);
7370        show_stack(p, NULL, KERN_INFO);
7371        put_task_stack(p);
7372}
7373EXPORT_SYMBOL_GPL(sched_show_task);
7374
7375static inline bool
7376state_filter_match(unsigned long state_filter, struct task_struct *p)
7377{
7378        /* no filter, everything matches */
7379        if (!state_filter)
7380                return true;
7381
7382        /* filter, but doesn't match */
7383        if (!(p->state & state_filter))
7384                return false;
7385
7386        /*
7387         * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7388         * TASK_KILLABLE).
7389         */
7390        if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
7391                return false;
7392
7393        return true;
7394}
7395
7396
7397void show_state_filter(unsigned long state_filter)
7398{
7399        struct task_struct *g, *p;
7400
7401        rcu_read_lock();
7402        for_each_process_thread(g, p) {
7403                /*
7404                 * reset the NMI-timeout, listing all files on a slow
7405                 * console might take a lot of time:
7406                 * Also, reset softlockup watchdogs on all CPUs, because
7407                 * another CPU might be blocked waiting for us to process
7408                 * an IPI.
7409                 */
7410                touch_nmi_watchdog();
7411                touch_all_softlockup_watchdogs();
7412                if (state_filter_match(state_filter, p))
7413                        sched_show_task(p);
7414        }
7415
7416#ifdef CONFIG_SCHED_DEBUG
7417        if (!state_filter)
7418                sysrq_sched_debug_show();
7419#endif
7420        rcu_read_unlock();
7421        /*
7422         * Only show locks if all tasks are dumped:
7423         */
7424        if (!state_filter)
7425                debug_show_all_locks();
7426}
7427
7428/**
7429 * init_idle - set up an idle thread for a given CPU
7430 * @idle: task in question
7431 * @cpu: CPU the idle task belongs to
7432 *
7433 * NOTE: this function does not set the idle thread's NEED_RESCHED
7434 * flag, to make booting more robust.
7435 */
7436void init_idle(struct task_struct *idle, int cpu)
7437{
7438        struct rq *rq = cpu_rq(cpu);
7439        unsigned long flags;
7440
7441        __sched_fork(0, idle);
7442
7443        raw_spin_lock_irqsave(&idle->pi_lock, flags);
7444        raw_spin_lock(&rq->lock);
7445
7446        idle->state = TASK_RUNNING;
7447        idle->se.exec_start = sched_clock();
7448        idle->flags |= PF_IDLE;
7449
7450        scs_task_reset(idle);
7451        kasan_unpoison_task_stack(idle);
7452
7453#ifdef CONFIG_SMP
7454        /*
7455         * It's possible that init_idle() gets called multiple times on a task,
7456         * in that case do_set_cpus_allowed() will not do the right thing.
7457         *
7458         * And since this is boot we can forgo the serialization.
7459         */
7460        set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
7461#endif
7462        /*
7463         * We're having a chicken and egg problem, even though we are
7464         * holding rq->lock, the CPU isn't yet set to this CPU so the
7465         * lockdep check in task_group() will fail.
7466         *
7467         * Similar case to sched_fork(). / Alternatively we could
7468         * use task_rq_lock() here and obtain the other rq->lock.
7469         *
7470         * Silence PROVE_RCU
7471         */
7472        rcu_read_lock();
7473        __set_task_cpu(idle, cpu);
7474        rcu_read_unlock();
7475
7476        rq->idle = idle;
7477        rcu_assign_pointer(rq->curr, idle);
7478        idle->on_rq = TASK_ON_RQ_QUEUED;
7479#ifdef CONFIG_SMP
7480        idle->on_cpu = 1;
7481#endif
7482        raw_spin_unlock(&rq->lock);
7483        raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7484
7485        /* Set the preempt count _outside_ the spinlocks! */
7486        init_idle_preempt_count(idle, cpu);
7487
7488        /*
7489         * The idle tasks have their own, simple scheduling class:
7490         */
7491        idle->sched_class = &idle_sched_class;
7492        ftrace_graph_init_idle_task(idle, cpu);
7493        vtime_init_idle(idle, cpu);
7494#ifdef CONFIG_SMP
7495        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7496#endif
7497}
7498
7499#ifdef CONFIG_SMP
7500
7501int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7502                              const struct cpumask *trial)
7503{
7504        int ret = 1;
7505
7506        if (!cpumask_weight(cur))
7507                return ret;
7508
7509        ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7510
7511        return ret;
7512}
7513
7514int task_can_attach(struct task_struct *p,
7515                    const struct cpumask *cs_cpus_allowed)
7516{
7517        int ret = 0;
7518
7519        /*
7520         * Kthreads which disallow setaffinity shouldn't be moved
7521         * to a new cpuset; we don't want to change their CPU
7522         * affinity and isolating such threads by their set of
7523         * allowed nodes is unnecessary.  Thus, cpusets are not
7524         * applicable for such threads.  This prevents checking for
7525         * success of set_cpus_allowed_ptr() on all attached tasks
7526         * before cpus_mask may be changed.
7527         */
7528        if (p->flags & PF_NO_SETAFFINITY) {
7529                ret = -EINVAL;
7530                goto out;
7531        }
7532
7533        if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
7534                                              cs_cpus_allowed))
7535                ret = dl_task_can_attach(p, cs_cpus_allowed);
7536
7537out:
7538        return ret;
7539}
7540
7541bool sched_smp_initialized __read_mostly;
7542
7543#ifdef CONFIG_NUMA_BALANCING
7544/* Migrate current task p to target_cpu */
7545int migrate_task_to(struct task_struct *p, int target_cpu)
7546{
7547        struct migration_arg arg = { p, target_cpu };
7548        int curr_cpu = task_cpu(p);
7549
7550        if (curr_cpu == target_cpu)
7551                return 0;
7552
7553        if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7554                return -EINVAL;
7555
7556        /* TODO: This is not properly updating schedstats */
7557
7558        trace_sched_move_numa(p, curr_cpu, target_cpu);
7559        return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7560}
7561
7562/*
7563 * Requeue a task on a given node and accurately track the number of NUMA
7564 * tasks on the runqueues
7565 */
7566void sched_setnuma(struct task_struct *p, int nid)
7567{
7568        bool queued, running;
7569        struct rq_flags rf;
7570        struct rq *rq;
7571
7572        rq = task_rq_lock(p, &rf);
7573        queued = task_on_rq_queued(p);
7574        running = task_current(rq, p);
7575
7576        if (queued)
7577                dequeue_task(rq, p, DEQUEUE_SAVE);
7578        if (running)
7579                put_prev_task(rq, p);
7580
7581        p->numa_preferred_nid = nid;
7582
7583        if (queued)
7584                enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7585        if (running)
7586                set_next_task(rq, p);
7587        task_rq_unlock(rq, p, &rf);
7588}
7589#endif /* CONFIG_NUMA_BALANCING */
7590
7591#ifdef CONFIG_HOTPLUG_CPU
7592/*
7593 * Ensure that the idle task is using init_mm right before its CPU goes
7594 * offline.
7595 */
7596void idle_task_exit(void)
7597{
7598        struct mm_struct *mm = current->active_mm;
7599
7600        BUG_ON(cpu_online(smp_processor_id()));
7601        BUG_ON(current != this_rq()->idle);
7602
7603        if (mm != &init_mm) {
7604                switch_mm(mm, &init_mm, current);
7605                finish_arch_post_lock_switch();
7606        }
7607
7608        /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7609}
7610
7611static int __balance_push_cpu_stop(void *arg)
7612{
7613        struct task_struct *p = arg;
7614        struct rq *rq = this_rq();
7615        struct rq_flags rf;
7616        int cpu;
7617
7618        raw_spin_lock_irq(&p->pi_lock);
7619        rq_lock(rq, &rf);
7620
7621        update_rq_clock(rq);
7622
7623        if (task_rq(p) == rq && task_on_rq_queued(p)) {
7624                cpu = select_fallback_rq(rq->cpu, p);
7625                rq = __migrate_task(rq, &rf, p, cpu);
7626        }
7627
7628        rq_unlock(rq, &rf);
7629        raw_spin_unlock_irq(&p->pi_lock);
7630
7631        put_task_struct(p);
7632
7633        return 0;
7634}
7635
7636static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7637
7638/*
7639 * Ensure we only run per-cpu kthreads once the CPU goes !active.
7640 *
7641 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7642 * effective when the hotplug motion is down.
7643 */
7644static void balance_push(struct rq *rq)
7645{
7646        struct task_struct *push_task = rq->curr;
7647
7648        lockdep_assert_held(&rq->lock);
7649        SCHED_WARN_ON(rq->cpu != smp_processor_id());
7650
7651        /*
7652         * Ensure the thing is persistent until balance_push_set(.on = false);
7653         */
7654        rq->balance_callback = &balance_push_callback;
7655
7656        /*
7657         * Only active while going offline.
7658         */
7659        if (!cpu_dying(rq->cpu))
7660                return;
7661
7662        /*
7663         * Both the cpu-hotplug and stop task are in this case and are
7664         * required to complete the hotplug process.
7665         *
7666         * XXX: the idle task does not match kthread_is_per_cpu() due to
7667         * histerical raisins.
7668         */
7669        if (rq->idle == push_task ||
7670            kthread_is_per_cpu(push_task) ||
7671            is_migration_disabled(push_task)) {
7672
7673                /*
7674                 * If this is the idle task on the outgoing CPU try to wake
7675                 * up the hotplug control thread which might wait for the
7676                 * last task to vanish. The rcuwait_active() check is
7677                 * accurate here because the waiter is pinned on this CPU
7678                 * and can't obviously be running in parallel.
7679                 *
7680                 * On RT kernels this also has to check whether there are
7681                 * pinned and scheduled out tasks on the runqueue. They
7682                 * need to leave the migrate disabled section first.
7683                 */
7684                if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
7685                    rcuwait_active(&rq->hotplug_wait)) {
7686                        raw_spin_unlock(&rq->lock);
7687                        rcuwait_wake_up(&rq->hotplug_wait);
7688                        raw_spin_lock(&rq->lock);
7689                }
7690                return;
7691        }
7692
7693        get_task_struct(push_task);
7694        /*
7695         * Temporarily drop rq->lock such that we can wake-up the stop task.
7696         * Both preemption and IRQs are still disabled.
7697         */
7698        raw_spin_unlock(&rq->lock);
7699        stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
7700                            this_cpu_ptr(&push_work));
7701        /*
7702         * At this point need_resched() is true and we'll take the loop in
7703         * schedule(). The next pick is obviously going to be the stop task
7704         * which kthread_is_per_cpu() and will push this task away.
7705         */
7706        raw_spin_lock(&rq->lock);
7707}
7708
7709static void balance_push_set(int cpu, bool on)
7710{
7711        struct rq *rq = cpu_rq(cpu);
7712        struct rq_flags rf;
7713
7714        rq_lock_irqsave(rq, &rf);
7715        if (on) {
7716                WARN_ON_ONCE(rq->balance_callback);
7717                rq->balance_callback = &balance_push_callback;
7718        } else if (rq->balance_callback == &balance_push_callback) {
7719                rq->balance_callback = NULL;
7720        }
7721        rq_unlock_irqrestore(rq, &rf);
7722}
7723
7724/*
7725 * Invoked from a CPUs hotplug control thread after the CPU has been marked
7726 * inactive. All tasks which are not per CPU kernel threads are either
7727 * pushed off this CPU now via balance_push() or placed on a different CPU
7728 * during wakeup. Wait until the CPU is quiescent.
7729 */
7730static void balance_hotplug_wait(void)
7731{
7732        struct rq *rq = this_rq();
7733
7734        rcuwait_wait_event(&rq->hotplug_wait,
7735                           rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
7736                           TASK_UNINTERRUPTIBLE);
7737}
7738
7739#else
7740
7741static inline void balance_push(struct rq *rq)
7742{
7743}
7744
7745static inline void balance_push_set(int cpu, bool on)
7746{
7747}
7748
7749static inline void balance_hotplug_wait(void)
7750{
7751}
7752
7753#endif /* CONFIG_HOTPLUG_CPU */
7754
7755void set_rq_online(struct rq *rq)
7756{
7757        if (!rq->online) {
7758                const struct sched_class *class;
7759
7760                cpumask_set_cpu(rq->cpu, rq->rd->online);
7761                rq->online = 1;
7762
7763                for_each_class(class) {
7764                        if (class->rq_online)
7765                                class->rq_online(rq);
7766                }
7767        }
7768}
7769
7770void set_rq_offline(struct rq *rq)
7771{
7772        if (rq->online) {
7773                const struct sched_class *class;
7774
7775                for_each_class(class) {
7776                        if (class->rq_offline)
7777                                class->rq_offline(rq);
7778                }
7779
7780                cpumask_clear_cpu(rq->cpu, rq->rd->online);
7781                rq->online = 0;
7782        }
7783}
7784
7785/*
7786 * used to mark begin/end of suspend/resume:
7787 */
7788static int num_cpus_frozen;
7789
7790/*
7791 * Update cpusets according to cpu_active mask.  If cpusets are
7792 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7793 * around partition_sched_domains().
7794 *
7795 * If we come here as part of a suspend/resume, don't touch cpusets because we
7796 * want to restore it back to its original state upon resume anyway.
7797 */
7798static void cpuset_cpu_active(void)
7799{
7800        if (cpuhp_tasks_frozen) {
7801                /*
7802                 * num_cpus_frozen tracks how many CPUs are involved in suspend
7803                 * resume sequence. As long as this is not the last online
7804                 * operation in the resume sequence, just build a single sched
7805                 * domain, ignoring cpusets.
7806                 */
7807                partition_sched_domains(1, NULL, NULL);
7808                if (--num_cpus_frozen)
7809                        return;
7810                /*
7811                 * This is the last CPU online operation. So fall through and
7812                 * restore the original sched domains by considering the
7813                 * cpuset configurations.
7814                 */
7815                cpuset_force_rebuild();
7816        }
7817        cpuset_update_active_cpus();
7818}
7819
7820static int cpuset_cpu_inactive(unsigned int cpu)
7821{
7822        if (!cpuhp_tasks_frozen) {
7823                if (dl_cpu_busy(cpu))
7824                        return -EBUSY;
7825                cpuset_update_active_cpus();
7826        } else {
7827                num_cpus_frozen++;
7828                partition_sched_domains(1, NULL, NULL);
7829        }
7830        return 0;
7831}
7832
7833int sched_cpu_activate(unsigned int cpu)
7834{
7835        struct rq *rq = cpu_rq(cpu);
7836        struct rq_flags rf;
7837
7838        /*
7839         * Clear the balance_push callback and prepare to schedule
7840         * regular tasks.
7841         */
7842        balance_push_set(cpu, false);
7843
7844#ifdef CONFIG_SCHED_SMT
7845        /*
7846         * When going up, increment the number of cores with SMT present.
7847         */
7848        if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7849                static_branch_inc_cpuslocked(&sched_smt_present);
7850#endif
7851        set_cpu_active(cpu, true);
7852
7853        if (sched_smp_initialized) {
7854                sched_domains_numa_masks_set(cpu);
7855                cpuset_cpu_active();
7856        }
7857
7858        /*
7859         * Put the rq online, if not already. This happens:
7860         *
7861         * 1) In the early boot process, because we build the real domains
7862         *    after all CPUs have been brought up.
7863         *
7864         * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
7865         *    domains.
7866         */
7867        rq_lock_irqsave(rq, &rf);
7868        if (rq->rd) {
7869                BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7870                set_rq_online(rq);
7871        }
7872        rq_unlock_irqrestore(rq, &rf);
7873
7874        return 0;
7875}
7876
7877int sched_cpu_deactivate(unsigned int cpu)
7878{
7879        struct rq *rq = cpu_rq(cpu);
7880        struct rq_flags rf;
7881        int ret;
7882
7883        /*
7884         * Remove CPU from nohz.idle_cpus_mask to prevent participating in
7885         * load balancing when not active
7886         */
7887        nohz_balance_exit_idle(rq);
7888
7889        set_cpu_active(cpu, false);
7890
7891        /*
7892         * From this point forward, this CPU will refuse to run any task that
7893         * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
7894         * push those tasks away until this gets cleared, see
7895         * sched_cpu_dying().
7896         */
7897        balance_push_set(cpu, true);
7898
7899        /*
7900         * We've cleared cpu_active_mask / set balance_push, wait for all
7901         * preempt-disabled and RCU users of this state to go away such that
7902         * all new such users will observe it.
7903         *
7904         * Specifically, we rely on ttwu to no longer target this CPU, see
7905         * ttwu_queue_cond() and is_cpu_allowed().
7906         *
7907         * Do sync before park smpboot threads to take care the rcu boost case.
7908         */
7909        synchronize_rcu();
7910
7911        rq_lock_irqsave(rq, &rf);
7912        if (rq->rd) {
7913                update_rq_clock(rq);
7914                BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7915                set_rq_offline(rq);
7916        }
7917        rq_unlock_irqrestore(rq, &rf);
7918
7919#ifdef CONFIG_SCHED_SMT
7920        /*
7921         * When going down, decrement the number of cores with SMT present.
7922         */
7923        if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7924                static_branch_dec_cpuslocked(&sched_smt_present);
7925#endif
7926
7927        if (!sched_smp_initialized)
7928                return 0;
7929
7930        ret = cpuset_cpu_inactive(cpu);
7931        if (ret) {
7932                balance_push_set(cpu, false);
7933                set_cpu_active(cpu, true);
7934                return ret;
7935        }
7936        sched_domains_numa_masks_clear(cpu);
7937        return 0;
7938}
7939
7940static void sched_rq_cpu_starting(unsigned int cpu)
7941{
7942        struct rq *rq = cpu_rq(cpu);
7943
7944        rq->calc_load_update = calc_load_update;
7945        update_max_interval();
7946}
7947
7948int sched_cpu_starting(unsigned int cpu)
7949{
7950        sched_rq_cpu_starting(cpu);
7951        sched_tick_start(cpu);
7952        return 0;
7953}
7954
7955#ifdef CONFIG_HOTPLUG_CPU
7956
7957/*
7958 * Invoked immediately before the stopper thread is invoked to bring the
7959 * CPU down completely. At this point all per CPU kthreads except the
7960 * hotplug thread (current) and the stopper thread (inactive) have been
7961 * either parked or have been unbound from the outgoing CPU. Ensure that
7962 * any of those which might be on the way out are gone.
7963 *
7964 * If after this point a bound task is being woken on this CPU then the
7965 * responsible hotplug callback has failed to do it's job.
7966 * sched_cpu_dying() will catch it with the appropriate fireworks.
7967 */
7968int sched_cpu_wait_empty(unsigned int cpu)
7969{
7970        balance_hotplug_wait();
7971        return 0;
7972}
7973
7974/*
7975 * Since this CPU is going 'away' for a while, fold any nr_active delta we
7976 * might have. Called from the CPU stopper task after ensuring that the
7977 * stopper is the last running task on the CPU, so nr_active count is
7978 * stable. We need to take the teardown thread which is calling this into
7979 * account, so we hand in adjust = 1 to the load calculation.
7980 *
7981 * Also see the comment "Global load-average calculations".
7982 */
7983static void calc_load_migrate(struct rq *rq)
7984{
7985        long delta = calc_load_fold_active(rq, 1);
7986
7987        if (delta)
7988                atomic_long_add(delta, &calc_load_tasks);
7989}
7990
7991static void dump_rq_tasks(struct rq *rq, const char *loglvl)
7992{
7993        struct task_struct *g, *p;
7994        int cpu = cpu_of(rq);
7995
7996        lockdep_assert_held(&rq->lock);
7997
7998        printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
7999        for_each_process_thread(g, p) {
8000                if (task_cpu(p) != cpu)
8001                        continue;
8002
8003                if (!task_on_rq_queued(p))
8004                        continue;
8005
8006                printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8007        }
8008}
8009
8010int sched_cpu_dying(unsigned int cpu)
8011{
8012        struct rq *rq = cpu_rq(cpu);
8013        struct rq_flags rf;
8014
8015        /* Handle pending wakeups and then migrate everything off */
8016        sched_tick_stop(cpu);
8017
8018        rq_lock_irqsave(rq, &rf);
8019        if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8020                WARN(true, "Dying CPU not properly vacated!");
8021                dump_rq_tasks(rq, KERN_WARNING);
8022        }
8023        rq_unlock_irqrestore(rq, &rf);
8024
8025        calc_load_migrate(rq);
8026        update_max_interval();
8027        hrtick_clear(rq);
8028        return 0;
8029}
8030#endif
8031
8032void __init sched_init_smp(void)
8033{
8034        sched_init_numa();
8035
8036        /*
8037         * There's no userspace yet to cause hotplug operations; hence all the
8038         * CPU masks are stable and all blatant races in the below code cannot
8039         * happen.
8040         */
8041        mutex_lock(&sched_domains_mutex);
8042        sched_init_domains(cpu_active_mask);
8043        mutex_unlock(&sched_domains_mutex);
8044
8045        /* Move init over to a non-isolated CPU */
8046        if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
8047                BUG();
8048        sched_init_granularity();
8049
8050        init_sched_rt_class();
8051        init_sched_dl_class();
8052
8053        sched_smp_initialized = true;
8054}
8055
8056static int __init migration_init(void)
8057{
8058        sched_cpu_starting(smp_processor_id());
8059        return 0;
8060}
8061early_initcall(migration_init);
8062
8063#else
8064void __init sched_init_smp(void)
8065{
8066        sched_init_granularity();
8067}
8068#endif /* CONFIG_SMP */
8069
8070int in_sched_functions(unsigned long addr)
8071{
8072        return in_lock_functions(addr) ||
8073                (addr >= (unsigned long)__sched_text_start
8074                && addr < (unsigned long)__sched_text_end);
8075}
8076
8077#ifdef CONFIG_CGROUP_SCHED
8078/*
8079 * Default task group.
8080 * Every task in system belongs to this group at bootup.
8081 */
8082struct task_group root_task_group;
8083LIST_HEAD(task_groups);
8084
8085/* Cacheline aligned slab cache for task_group */
8086static struct kmem_cache *task_group_cache __read_mostly;
8087#endif
8088
8089DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
8090DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
8091
8092void __init sched_init(void)
8093{
8094        unsigned long ptr = 0;
8095        int i;
8096
8097        /* Make sure the linker didn't screw up */
8098        BUG_ON(&idle_sched_class + 1 != &fair_sched_class ||
8099               &fair_sched_class + 1 != &rt_sched_class ||
8100               &rt_sched_class + 1   != &dl_sched_class);
8101#ifdef CONFIG_SMP
8102        BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
8103#endif
8104
8105        wait_bit_init();
8106
8107#ifdef CONFIG_FAIR_GROUP_SCHED
8108        ptr += 2 * nr_cpu_ids * sizeof(void **);
8109#endif
8110#ifdef CONFIG_RT_GROUP_SCHED
8111        ptr += 2 * nr_cpu_ids * sizeof(void **);
8112#endif
8113        if (ptr) {
8114                ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8115
8116#ifdef CONFIG_FAIR_GROUP_SCHED
8117                root_task_group.se = (struct sched_entity **)ptr;
8118                ptr += nr_cpu_ids * sizeof(void **);
8119
8120                root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8121                ptr += nr_cpu_ids * sizeof(void **);
8122
8123                root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8124                init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
8125#endif /* CONFIG_FAIR_GROUP_SCHED */
8126#ifdef CONFIG_RT_GROUP_SCHED
8127                root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8128                ptr += nr_cpu_ids * sizeof(void **);
8129
8130                root_task_group.rt_rq = (struct rt_rq **)ptr;
8131                ptr += nr_cpu_ids * sizeof(void **);
8132
8133#endif /* CONFIG_RT_GROUP_SCHED */
8134        }
8135#ifdef CONFIG_CPUMASK_OFFSTACK
8136        for_each_possible_cpu(i) {
8137                per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
8138                        cpumask_size(), GFP_KERNEL, cpu_to_node(i));
8139                per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
8140                        cpumask_size(), GFP_KERNEL, cpu_to_node(i));
8141        }
8142#endif /* CONFIG_CPUMASK_OFFSTACK */
8143
8144        init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
8145        init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
8146
8147#ifdef CONFIG_SMP
8148        init_defrootdomain();
8149#endif
8150
8151#ifdef CONFIG_RT_GROUP_SCHED
8152        init_rt_bandwidth(&root_task_group.rt_bandwidth,
8153                        global_rt_period(), global_rt_runtime());
8154#endif /* CONFIG_RT_GROUP_SCHED */
8155
8156#ifdef CONFIG_CGROUP_SCHED
8157        task_group_cache = KMEM_CACHE(task_group, 0);
8158
8159        list_add(&root_task_group.list, &task_groups);
8160        INIT_LIST_HEAD(&root_task_group.children);
8161        INIT_LIST_HEAD(&root_task_group.siblings);
8162        autogroup_init(&init_task);
8163#endif /* CONFIG_CGROUP_SCHED */
8164
8165        for_each_possible_cpu(i) {
8166                struct rq *rq;
8167
8168                rq = cpu_rq(i);
8169                raw_spin_lock_init(&rq->lock);
8170                rq->nr_running = 0;
8171                rq->calc_load_active = 0;
8172                rq->calc_load_update = jiffies + LOAD_FREQ;
8173                init_cfs_rq(&rq->cfs);
8174                init_rt_rq(&rq->rt);
8175                init_dl_rq(&rq->dl);
8176#ifdef CONFIG_FAIR_GROUP_SCHED
8177                INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8178                rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8179                /*
8180                 * How much CPU bandwidth does root_task_group get?
8181                 *
8182                 * In case of task-groups formed thr' the cgroup filesystem, it
8183                 * gets 100% of the CPU resources in the system. This overall
8184                 * system CPU resource is divided among the tasks of
8185                 * root_task_group and its child task-groups in a fair manner,
8186                 * based on each entity's (task or task-group's) weight
8187                 * (se->load.weight).
8188                 *
8189                 * In other words, if root_task_group has 10 tasks of weight
8190                 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8191                 * then A0's share of the CPU resource is:
8192                 *
8193                 *      A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8194                 *
8195                 * We achieve this by letting root_task_group's tasks sit
8196                 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8197                 */
8198                init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8199#endif /* CONFIG_FAIR_GROUP_SCHED */
8200
8201                rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
8202#ifdef CONFIG_RT_GROUP_SCHED
8203                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8204#endif
8205#ifdef CONFIG_SMP
8206                rq->sd = NULL;
8207                rq->rd = NULL;
8208                rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
8209                rq->balance_callback = &balance_push_callback;
8210                rq->active_balance = 0;
8211                rq->next_balance = jiffies;
8212                rq->push_cpu = 0;
8213                rq->cpu = i;
8214                rq->online = 0;
8215                rq->idle_stamp = 0;
8216                rq->avg_idle = 2*sysctl_sched_migration_cost;
8217                rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8218
8219                INIT_LIST_HEAD(&rq->cfs_tasks);
8220
8221                rq_attach_root(rq, &def_root_domain);
8222#ifdef CONFIG_NO_HZ_COMMON
8223                rq->last_blocked_load_update_tick = jiffies;
8224                atomic_set(&rq->nohz_flags, 0);
8225
8226                INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8227#endif
8228#ifdef CONFIG_HOTPLUG_CPU
8229                rcuwait_init(&rq->hotplug_wait);
8230#endif
8231#endif /* CONFIG_SMP */
8232                hrtick_rq_init(rq);
8233                atomic_set(&rq->nr_iowait, 0);
8234        }
8235
8236        set_load_weight(&init_task, false);
8237
8238        /*
8239         * The boot idle thread does lazy MMU switching as well:
8240         */
8241        mmgrab(&init_mm);
8242        enter_lazy_tlb(&init_mm, current);
8243
8244        /*
8245         * Make us the idle thread. Technically, schedule() should not be
8246         * called from this thread, however somewhere below it might be,
8247         * but because we are the idle thread, we just pick up running again
8248         * when this runqueue becomes "idle".
8249         */
8250        init_idle(current, smp_processor_id());
8251
8252        calc_load_update = jiffies + LOAD_FREQ;
8253
8254#ifdef CONFIG_SMP
8255        idle_thread_set_boot_cpu();
8256        balance_push_set(smp_processor_id(), false);
8257#endif
8258        init_sched_fair_class();
8259
8260        init_schedstats();
8261
8262        psi_init();
8263
8264        init_uclamp();
8265
8266        scheduler_running = 1;
8267}
8268
8269#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8270static inline int preempt_count_equals(int preempt_offset)
8271{
8272        int nested = preempt_count() + rcu_preempt_depth();
8273
8274        return (nested == preempt_offset);
8275}
8276
8277void __might_sleep(const char *file, int line, int preempt_offset)
8278{
8279        /*
8280         * Blocking primitives will set (and therefore destroy) current->state,
8281         * since we will exit with TASK_RUNNING make sure we enter with it,
8282         * otherwise we will destroy state.
8283         */
8284        WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
8285                        "do not call blocking ops when !TASK_RUNNING; "
8286                        "state=%lx set at [<%p>] %pS\n",
8287                        current->state,
8288                        (void *)current->task_state_change,
8289                        (void *)current->task_state_change);
8290
8291        ___might_sleep(file, line, preempt_offset);
8292}
8293EXPORT_SYMBOL(__might_sleep);
8294
8295void ___might_sleep(const char *file, int line, int preempt_offset)
8296{
8297        /* Ratelimiting timestamp: */
8298        static unsigned long prev_jiffy;
8299
8300        unsigned long preempt_disable_ip;
8301
8302        /* WARN_ON_ONCE() by default, no rate limit required: */
8303        rcu_sleep_check();
8304
8305        if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
8306             !is_idle_task(current) && !current->non_block_count) ||
8307            system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8308            oops_in_progress)
8309                return;
8310
8311        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8312                return;
8313        prev_jiffy = jiffies;
8314
8315        /* Save this before calling printk(), since that will clobber it: */
8316        preempt_disable_ip = get_preempt_disable_ip(current);
8317
8318        printk(KERN_ERR
8319                "BUG: sleeping function called from invalid context at %s:%d\n",
8320                        file, line);
8321        printk(KERN_ERR
8322                "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8323                        in_atomic(), irqs_disabled(), current->non_block_count,
8324                        current->pid, current->comm);
8325
8326        if (task_stack_end_corrupted(current))
8327                printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
8328
8329        debug_show_held_locks(current);
8330        if (irqs_disabled())
8331                print_irqtrace_events(current);
8332        if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
8333            && !preempt_count_equals(preempt_offset)) {
8334                pr_err("Preemption disabled at:");
8335                print_ip_sym(KERN_ERR, preempt_disable_ip);
8336        }
8337        dump_stack();
8338        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8339}
8340EXPORT_SYMBOL(___might_sleep);
8341
8342void __cant_sleep(const char *file, int line, int preempt_offset)
8343{
8344        static unsigned long prev_jiffy;
8345
8346        if (irqs_disabled())
8347                return;
8348
8349        if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8350                return;
8351
8352        if (preempt_count() > preempt_offset)
8353                return;
8354
8355        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8356                return;
8357        prev_jiffy = jiffies;
8358
8359        printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8360        printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8361                        in_atomic(), irqs_disabled(),
8362                        current->pid, current->comm);
8363
8364        debug_show_held_locks(current);
8365        dump_stack();
8366        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8367}
8368EXPORT_SYMBOL_GPL(__cant_sleep);
8369
8370#ifdef CONFIG_SMP
8371void __cant_migrate(const char *file, int line)
8372{
8373        static unsigned long prev_jiffy;
8374
8375        if (irqs_disabled())
8376                return;
8377
8378        if (is_migration_disabled(current))
8379                return;
8380
8381        if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8382                return;
8383
8384        if (preempt_count() > 0)
8385                return;
8386
8387        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8388                return;
8389        prev_jiffy = jiffies;
8390
8391        pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8392        pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8393               in_atomic(), irqs_disabled(), is_migration_disabled(current),
8394               current->pid, current->comm);
8395
8396        debug_show_held_locks(current);
8397        dump_stack();
8398        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8399}
8400EXPORT_SYMBOL_GPL(__cant_migrate);
8401#endif
8402#endif
8403
8404#ifdef CONFIG_MAGIC_SYSRQ
8405void normalize_rt_tasks(void)
8406{
8407        struct task_struct *g, *p;
8408        struct sched_attr attr = {
8409                .sched_policy = SCHED_NORMAL,
8410        };
8411
8412        read_lock(&tasklist_lock);
8413        for_each_process_thread(g, p) {
8414                /*
8415                 * Only normalize user tasks:
8416                 */
8417                if (p->flags & PF_KTHREAD)
8418                        continue;
8419
8420                p->se.exec_start = 0;
8421                schedstat_set(p->se.statistics.wait_start,  0);
8422                schedstat_set(p->se.statistics.sleep_start, 0);
8423                schedstat_set(p->se.statistics.block_start, 0);
8424
8425                if (!dl_task(p) && !rt_task(p)) {
8426                        /*
8427                         * Renice negative nice level userspace
8428                         * tasks back to 0:
8429                         */
8430                        if (task_nice(p) < 0)
8431                                set_user_nice(p, 0);
8432                        continue;
8433                }
8434
8435                __sched_setscheduler(p, &attr, false, false);
8436        }
8437        read_unlock(&tasklist_lock);
8438}
8439
8440#endif /* CONFIG_MAGIC_SYSRQ */
8441
8442#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
8443/*
8444 * These functions are only useful for the IA64 MCA handling, or kdb.
8445 *
8446 * They can only be called when the whole system has been
8447 * stopped - every CPU needs to be quiescent, and no scheduling
8448 * activity can take place. Using them for anything else would
8449 * be a serious bug, and as a result, they aren't even visible
8450 * under any other configuration.
8451 */
8452
8453/**
8454 * curr_task - return the current task for a given CPU.
8455 * @cpu: the processor in question.
8456 *
8457 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8458 *
8459 * Return: The current task for @cpu.
8460 */
8461struct task_struct *curr_task(int cpu)
8462{
8463        return cpu_curr(cpu);
8464}
8465
8466#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8467
8468#ifdef CONFIG_IA64
8469/**
8470 * ia64_set_curr_task - set the current task for a given CPU.
8471 * @cpu: the processor in question.
8472 * @p: the task pointer to set.
8473 *
8474 * Description: This function must only be used when non-maskable interrupts
8475 * are serviced on a separate stack. It allows the architecture to switch the
8476 * notion of the current task on a CPU in a non-blocking manner. This function
8477 * must be called with all CPU's synchronized, and interrupts disabled, the
8478 * and caller must save the original value of the current task (see
8479 * curr_task() above) and restore that value before reenabling interrupts and
8480 * re-starting the system.
8481 *
8482 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8483 */
8484void ia64_set_curr_task(int cpu, struct task_struct *p)
8485{
8486        cpu_curr(cpu) = p;
8487}
8488
8489#endif
8490
8491#ifdef CONFIG_CGROUP_SCHED
8492/* task_group_lock serializes the addition/removal of task groups */
8493static DEFINE_SPINLOCK(task_group_lock);
8494
8495static inline void alloc_uclamp_sched_group(struct task_group *tg,
8496                                            struct task_group *parent)
8497{
8498#ifdef CONFIG_UCLAMP_TASK_GROUP
8499        enum uclamp_id clamp_id;
8500
8501        for_each_clamp_id(clamp_id) {
8502                uclamp_se_set(&tg->uclamp_req[clamp_id],
8503                              uclamp_none(clamp_id), false);
8504                tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8505        }
8506#endif
8507}
8508
8509static void sched_free_group(struct task_group *tg)
8510{
8511        free_fair_sched_group(tg);
8512        free_rt_sched_group(tg);
8513        autogroup_free(tg);
8514        kmem_cache_free(task_group_cache, tg);
8515}
8516
8517/* allocate runqueue etc for a new task group */
8518struct task_group *sched_create_group(struct task_group *parent)
8519{
8520        struct task_group *tg;
8521
8522        tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8523        if (!tg)
8524                return ERR_PTR(-ENOMEM);
8525
8526        if (!alloc_fair_sched_group(tg, parent))
8527                goto err;
8528
8529        if (!alloc_rt_sched_group(tg, parent))
8530                goto err;
8531
8532        alloc_uclamp_sched_group(tg, parent);
8533
8534        return tg;
8535
8536err:
8537        sched_free_group(tg);
8538        return ERR_PTR(-ENOMEM);
8539}
8540
8541void sched_online_group(struct task_group *tg, struct task_group *parent)
8542{
8543        unsigned long flags;
8544
8545        spin_lock_irqsave(&task_group_lock, flags);
8546        list_add_rcu(&tg->list, &task_groups);
8547
8548        /* Root should already exist: */
8549        WARN_ON(!parent);
8550
8551        tg->parent = parent;
8552        INIT_LIST_HEAD(&tg->children);
8553        list_add_rcu(&tg->siblings, &parent->children);
8554        spin_unlock_irqrestore(&task_group_lock, flags);
8555
8556        online_fair_sched_group(tg);
8557}
8558
8559/* rcu callback to free various structures associated with a task group */
8560static void sched_free_group_rcu(struct rcu_head *rhp)
8561{
8562        /* Now it should be safe to free those cfs_rqs: */
8563        sched_free_group(container_of(rhp, struct task_group, rcu));
8564}
8565
8566void sched_destroy_group(struct task_group *tg)
8567{
8568        /* Wait for possible concurrent references to cfs_rqs complete: */
8569        call_rcu(&tg->rcu, sched_free_group_rcu);
8570}
8571
8572void sched_offline_group(struct task_group *tg)
8573{
8574        unsigned long flags;
8575
8576        /* End participation in shares distribution: */
8577        unregister_fair_sched_group(tg);
8578
8579        spin_lock_irqsave(&task_group_lock, flags);
8580        list_del_rcu(&tg->list);
8581        list_del_rcu(&tg->siblings);
8582        spin_unlock_irqrestore(&task_group_lock, flags);
8583}
8584
8585static void sched_change_group(struct task_struct *tsk, int type)
8586{
8587        struct task_group *tg;
8588
8589        /*
8590         * All callers are synchronized by task_rq_lock(); we do not use RCU
8591         * which is pointless here. Thus, we pass "true" to task_css_check()
8592         * to prevent lockdep warnings.
8593         */
8594        tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8595                          struct task_group, css);
8596        tg = autogroup_task_group(tsk, tg);
8597        tsk->sched_task_group = tg;
8598
8599#ifdef CONFIG_FAIR_GROUP_SCHED
8600        if (tsk->sched_class->task_change_group)
8601                tsk->sched_class->task_change_group(tsk, type);
8602        else
8603#endif
8604                set_task_rq(tsk, task_cpu(tsk));
8605}
8606
8607/*
8608 * Change task's runqueue when it moves between groups.
8609 *
8610 * The caller of this function should have put the task in its new group by
8611 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
8612 * its new group.
8613 */
8614void sched_move_task(struct task_struct *tsk)
8615{
8616        int queued, running, queue_flags =
8617                DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
8618        struct rq_flags rf;
8619        struct rq *rq;
8620
8621        rq = task_rq_lock(tsk, &rf);
8622        update_rq_clock(rq);
8623
8624        running = task_current(rq, tsk);
8625        queued = task_on_rq_queued(tsk);
8626
8627        if (queued)
8628                dequeue_task(rq, tsk, queue_flags);
8629        if (running)
8630                put_prev_task(rq, tsk);
8631
8632        sched_change_group(tsk, TASK_MOVE_GROUP);
8633
8634        if (queued)
8635                enqueue_task(rq, tsk, queue_flags);
8636        if (running) {
8637                set_next_task(rq, tsk);
8638                /*
8639                 * After changing group, the running task may have joined a
8640                 * throttled one but it's still the running task. Trigger a
8641                 * resched to make sure that task can still run.
8642                 */
8643                resched_curr(rq);
8644        }
8645
8646        task_rq_unlock(rq, tsk, &rf);
8647}
8648
8649static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
8650{
8651        return css ? container_of(css, struct task_group, css) : NULL;
8652}
8653
8654static struct cgroup_subsys_state *
8655cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8656{
8657        struct task_group *parent = css_tg(parent_css);
8658        struct task_group *tg;
8659
8660        if (!parent) {
8661                /* This is early initialization for the top cgroup */
8662                return &root_task_group.css;
8663        }
8664
8665        tg = sched_create_group(parent);
8666        if (IS_ERR(tg))
8667                return ERR_PTR(-ENOMEM);
8668
8669        return &tg->css;
8670}
8671
8672/* Expose task group only after completing cgroup initialization */
8673static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
8674{
8675        struct task_group *tg = css_tg(css);
8676        struct task_group *parent = css_tg(css->parent);
8677
8678        if (parent)
8679                sched_online_group(tg, parent);
8680
8681#ifdef CONFIG_UCLAMP_TASK_GROUP
8682        /* Propagate the effective uclamp value for the new group */
8683        cpu_util_update_eff(css);
8684#endif
8685
8686        return 0;
8687}
8688
8689static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
8690{
8691        struct task_group *tg = css_tg(css);
8692
8693        sched_offline_group(tg);
8694}
8695
8696static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
8697{
8698        struct task_group *tg = css_tg(css);
8699
8700        /*
8701         * Relies on the RCU grace period between css_released() and this.
8702         */
8703        sched_free_group(tg);
8704}
8705
8706/*
8707 * This is called before wake_up_new_task(), therefore we really only
8708 * have to set its group bits, all the other stuff does not apply.
8709 */
8710static void cpu_cgroup_fork(struct task_struct *task)
8711{
8712        struct rq_flags rf;
8713        struct rq *rq;
8714
8715        rq = task_rq_lock(task, &rf);
8716
8717        update_rq_clock(rq);
8718        sched_change_group(task, TASK_SET_GROUP);
8719
8720        task_rq_unlock(rq, task, &rf);
8721}
8722
8723static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8724{
8725        struct task_struct *task;
8726        struct cgroup_subsys_state *css;
8727        int ret = 0;
8728
8729        cgroup_taskset_for_each(task, css, tset) {
8730#ifdef CONFIG_RT_GROUP_SCHED
8731                if (!sched_rt_can_attach(css_tg(css), task))
8732                        return -EINVAL;
8733#endif
8734                /*
8735                 * Serialize against wake_up_new_task() such that if it's
8736                 * running, we're sure to observe its full state.
8737                 */
8738                raw_spin_lock_irq(&task->pi_lock);
8739                /*
8740                 * Avoid calling sched_move_task() before wake_up_new_task()
8741                 * has happened. This would lead to problems with PELT, due to
8742                 * move wanting to detach+attach while we're not attached yet.
8743                 */
8744                if (task->state == TASK_NEW)
8745                        ret = -EINVAL;
8746                raw_spin_unlock_irq(&task->pi_lock);
8747
8748                if (ret)
8749                        break;
8750        }
8751        return ret;
8752}
8753
8754static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8755{
8756        struct task_struct *task;
8757        struct cgroup_subsys_state *css;
8758
8759        cgroup_taskset_for_each(task, css, tset)
8760                sched_move_task(task);
8761}
8762
8763#ifdef CONFIG_UCLAMP_TASK_GROUP
8764static void cpu_util_update_eff(struct cgroup_subsys_state *css)
8765{
8766        struct cgroup_subsys_state *top_css = css;
8767        struct uclamp_se *uc_parent = NULL;
8768        struct uclamp_se *uc_se = NULL;
8769        unsigned int eff[UCLAMP_CNT];
8770        enum uclamp_id clamp_id;
8771        unsigned int clamps;
8772
8773        css_for_each_descendant_pre(css, top_css) {
8774                uc_parent = css_tg(css)->parent
8775                        ? css_tg(css)->parent->uclamp : NULL;
8776
8777                for_each_clamp_id(clamp_id) {
8778                        /* Assume effective clamps matches requested clamps */
8779                        eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
8780                        /* Cap effective clamps with parent's effective clamps */
8781                        if (uc_parent &&
8782                            eff[clamp_id] > uc_parent[clamp_id].value) {
8783                                eff[clamp_id] = uc_parent[clamp_id].value;
8784                        }
8785                }
8786                /* Ensure protection is always capped by limit */
8787                eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
8788
8789                /* Propagate most restrictive effective clamps */
8790                clamps = 0x0;
8791                uc_se = css_tg(css)->uclamp;
8792                for_each_clamp_id(clamp_id) {
8793                        if (eff[clamp_id] == uc_se[clamp_id].value)
8794                                continue;
8795                        uc_se[clamp_id].value = eff[clamp_id];
8796                        uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
8797                        clamps |= (0x1 << clamp_id);
8798                }
8799                if (!clamps) {
8800                        css = css_rightmost_descendant(css);
8801                        continue;
8802                }
8803
8804                /* Immediately update descendants RUNNABLE tasks */
8805                uclamp_update_active_tasks(css, clamps);
8806        }
8807}
8808
8809/*
8810 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
8811 * C expression. Since there is no way to convert a macro argument (N) into a
8812 * character constant, use two levels of macros.
8813 */
8814#define _POW10(exp) ((unsigned int)1e##exp)
8815#define POW10(exp) _POW10(exp)
8816
8817struct uclamp_request {
8818#define UCLAMP_PERCENT_SHIFT    2
8819#define UCLAMP_PERCENT_SCALE    (100 * POW10(UCLAMP_PERCENT_SHIFT))
8820        s64 percent;
8821        u64 util;
8822        int ret;
8823};
8824
8825static inline struct uclamp_request
8826capacity_from_percent(char *buf)
8827{
8828        struct uclamp_request req = {
8829                .percent = UCLAMP_PERCENT_SCALE,
8830                .util = SCHED_CAPACITY_SCALE,
8831                .ret = 0,
8832        };
8833
8834        buf = strim(buf);
8835        if (strcmp(buf, "max")) {
8836                req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
8837                                             &req.percent);
8838                if (req.ret)
8839                        return req;
8840                if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
8841                        req.ret = -ERANGE;
8842                        return req;
8843                }
8844
8845                req.util = req.percent << SCHED_CAPACITY_SHIFT;
8846                req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
8847        }
8848
8849        return req;
8850}
8851
8852static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
8853                                size_t nbytes, loff_t off,
8854                                enum uclamp_id clamp_id)
8855{
8856        struct uclamp_request req;
8857        struct task_group *tg;
8858
8859        req = capacity_from_percent(buf);
8860        if (req.ret)
8861                return req.ret;
8862
8863        static_branch_enable(&sched_uclamp_used);
8864
8865        mutex_lock(&uclamp_mutex);
8866        rcu_read_lock();
8867
8868        tg = css_tg(of_css(of));
8869        if (tg->uclamp_req[clamp_id].value != req.util)
8870                uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
8871
8872        /*
8873         * Because of not recoverable conversion rounding we keep track of the
8874         * exact requested value
8875         */
8876        tg->uclamp_pct[clamp_id] = req.percent;
8877
8878        /* Update effective clamps to track the most restrictive value */
8879        cpu_util_update_eff(of_css(of));
8880
8881        rcu_read_unlock();
8882        mutex_unlock(&uclamp_mutex);
8883
8884        return nbytes;
8885}
8886
8887static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
8888                                    char *buf, size_t nbytes,
8889                                    loff_t off)
8890{
8891        return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
8892}
8893
8894static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
8895                                    char *buf, size_t nbytes,
8896                                    loff_t off)
8897{
8898        return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
8899}
8900
8901static inline void cpu_uclamp_print(struct seq_file *sf,
8902                                    enum uclamp_id clamp_id)
8903{
8904        struct task_group *tg;
8905        u64 util_clamp;
8906        u64 percent;
8907        u32 rem;
8908
8909        rcu_read_lock();
8910        tg = css_tg(seq_css(sf));
8911        util_clamp = tg->uclamp_req[clamp_id].value;
8912        rcu_read_unlock();
8913
8914        if (util_clamp == SCHED_CAPACITY_SCALE) {
8915                seq_puts(sf, "max\n");
8916                return;
8917        }
8918
8919        percent = tg->uclamp_pct[clamp_id];
8920        percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
8921        seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
8922}
8923
8924static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
8925{
8926        cpu_uclamp_print(sf, UCLAMP_MIN);
8927        return 0;
8928}
8929
8930static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
8931{
8932        cpu_uclamp_print(sf, UCLAMP_MAX);
8933        return 0;
8934}
8935#endif /* CONFIG_UCLAMP_TASK_GROUP */
8936
8937#ifdef CONFIG_FAIR_GROUP_SCHED
8938static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8939                                struct cftype *cftype, u64 shareval)
8940{
8941        if (shareval > scale_load_down(ULONG_MAX))
8942                shareval = MAX_SHARES;
8943        return sched_group_set_shares(css_tg(css), scale_load(shareval));
8944}
8945
8946static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8947                               struct cftype *cft)
8948{
8949        struct task_group *tg = css_tg(css);
8950
8951        return (u64) scale_load_down(tg->shares);
8952}
8953
8954#ifdef CONFIG_CFS_BANDWIDTH
8955static DEFINE_MUTEX(cfs_constraints_mutex);
8956
8957const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8958static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8959/* More than 203 days if BW_SHIFT equals 20. */
8960static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
8961
8962static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8963
8964static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8965{
8966        int i, ret = 0, runtime_enabled, runtime_was_enabled;
8967        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8968
8969        if (tg == &root_task_group)
8970                return -EINVAL;
8971
8972        /*
8973         * Ensure we have at some amount of bandwidth every period.  This is
8974         * to prevent reaching a state of large arrears when throttled via
8975         * entity_tick() resulting in prolonged exit starvation.
8976         */
8977        if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8978                return -EINVAL;
8979
8980        /*
8981         * Likewise, bound things on the other side by preventing insane quota
8982         * periods.  This also allows us to normalize in computing quota
8983         * feasibility.
8984         */
8985        if (period > max_cfs_quota_period)
8986                return -EINVAL;
8987
8988        /*
8989         * Bound quota to defend quota against overflow during bandwidth shift.
8990         */
8991        if (quota != RUNTIME_INF && quota > max_cfs_runtime)
8992                return -EINVAL;
8993
8994        /*
8995         * Prevent race between setting of cfs_rq->runtime_enabled and
8996         * unthrottle_offline_cfs_rqs().
8997         */
8998        get_online_cpus();
8999        mutex_lock(&cfs_constraints_mutex);
9000        ret = __cfs_schedulable(tg, period, quota);
9001        if (ret)
9002                goto out_unlock;
9003
9004        runtime_enabled = quota != RUNTIME_INF;
9005        runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9006        /*
9007         * If we need to toggle cfs_bandwidth_used, off->on must occur
9008         * before making related changes, and on->off must occur afterwards
9009         */
9010        if (runtime_enabled && !runtime_was_enabled)
9011                cfs_bandwidth_usage_inc();
9012        raw_spin_lock_irq(&cfs_b->lock);
9013        cfs_b->period = ns_to_ktime(period);
9014        cfs_b->quota = quota;
9015
9016        __refill_cfs_bandwidth_runtime(cfs_b);
9017
9018        /* Restart the period timer (if active) to handle new period expiry: */
9019        if (runtime_enabled)
9020                start_cfs_bandwidth(cfs_b);
9021
9022        raw_spin_unlock_irq(&cfs_b->lock);
9023
9024        for_each_online_cpu(i) {
9025                struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9026                struct rq *rq = cfs_rq->rq;
9027                struct rq_flags rf;
9028
9029                rq_lock_irq(rq, &rf);
9030                cfs_rq->runtime_enabled = runtime_enabled;
9031                cfs_rq->runtime_remaining = 0;
9032
9033                if (cfs_rq->throttled)
9034                        unthrottle_cfs_rq(cfs_rq);
9035                rq_unlock_irq(rq, &rf);
9036        }
9037        if (runtime_was_enabled && !runtime_enabled)
9038                cfs_bandwidth_usage_dec();
9039out_unlock:
9040        mutex_unlock(&cfs_constraints_mutex);
9041        put_online_cpus();
9042
9043        return ret;
9044}
9045
9046static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9047{
9048        u64 quota, period;
9049
9050        period = ktime_to_ns(tg->cfs_bandwidth.period);
9051        if (cfs_quota_us < 0)
9052                quota = RUNTIME_INF;
9053        else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
9054                quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9055        else
9056                return -EINVAL;
9057
9058        return tg_set_cfs_bandwidth(tg, period, quota);
9059}
9060
9061static long tg_get_cfs_quota(struct task_group *tg)
9062{
9063        u64 quota_us;
9064
9065        if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9066                return -1;
9067
9068        quota_us = tg->cfs_bandwidth.quota;
9069        do_div(quota_us, NSEC_PER_USEC);
9070
9071        return quota_us;
9072}
9073
9074static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9075{
9076        u64 quota, period;
9077
9078        if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9079                return -EINVAL;
9080
9081        period = (u64)cfs_period_us * NSEC_PER_USEC;
9082        quota = tg->cfs_bandwidth.quota;
9083
9084        return tg_set_cfs_bandwidth(tg, period, quota);
9085}
9086
9087static long tg_get_cfs_period(struct task_group *tg)
9088{
9089        u64 cfs_period_us;
9090
9091        cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9092        do_div(cfs_period_us, NSEC_PER_USEC);
9093
9094        return cfs_period_us;
9095}
9096
9097static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9098                                  struct cftype *cft)
9099{
9100        return tg_get_cfs_quota(css_tg(css));
9101}
9102
9103static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9104                                   struct cftype *cftype, s64 cfs_quota_us)
9105{
9106        return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
9107}
9108
9109static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9110                                   struct cftype *cft)
9111{
9112        return tg_get_cfs_period(css_tg(css));
9113}
9114
9115static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9116                                    struct cftype *cftype, u64 cfs_period_us)
9117{
9118        return tg_set_cfs_period(css_tg(css), cfs_period_us);
9119}
9120
9121struct cfs_schedulable_data {
9122        struct task_group *tg;
9123        u64 period, quota;
9124};
9125
9126/*
9127 * normalize group quota/period to be quota/max_period
9128 * note: units are usecs
9129 */
9130static u64 normalize_cfs_quota(struct task_group *tg,
9131                               struct cfs_schedulable_data *d)
9132{
9133        u64 quota, period;
9134
9135        if (tg == d->tg) {
9136                period = d->period;
9137                quota = d->quota;
9138        } else {
9139                period = tg_get_cfs_period(tg);
9140                quota = tg_get_cfs_quota(tg);
9141        }
9142
9143        /* note: these should typically be equivalent */
9144        if (quota == RUNTIME_INF || quota == -1)
9145                return RUNTIME_INF;
9146
9147        return to_ratio(period, quota);
9148}
9149
9150static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9151{
9152        struct cfs_schedulable_data *d = data;
9153        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9154        s64 quota = 0, parent_quota = -1;
9155
9156        if (!tg->parent) {
9157                quota = RUNTIME_INF;
9158        } else {
9159                struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9160
9161                quota = normalize_cfs_quota(tg, d);
9162                parent_quota = parent_b->hierarchical_quota;
9163
9164                /*
9165                 * Ensure max(child_quota) <= parent_quota.  On cgroup2,
9166                 * always take the min.  On cgroup1, only inherit when no
9167                 * limit is set:
9168                 */
9169                if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9170                        quota = min(quota, parent_quota);
9171                } else {
9172                        if (quota == RUNTIME_INF)
9173                                quota = parent_quota;
9174                        else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9175                                return -EINVAL;
9176                }
9177        }
9178        cfs_b->hierarchical_quota = quota;
9179
9180        return 0;
9181}
9182
9183static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9184{
9185        int ret;
9186        struct cfs_schedulable_data data = {
9187                .tg = tg,
9188                .period = period,
9189                .quota = quota,
9190        };
9191
9192        if (quota != RUNTIME_INF) {
9193                do_div(data.period, NSEC_PER_USEC);
9194                do_div(data.quota, NSEC_PER_USEC);
9195        }
9196
9197        rcu_read_lock();
9198        ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9199        rcu_read_unlock();
9200
9201        return ret;
9202}
9203
9204static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9205{
9206        struct task_group *tg = css_tg(seq_css(sf));
9207        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9208
9209        seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9210        seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9211        seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9212
9213        if (schedstat_enabled() && tg != &root_task_group) {
9214                u64 ws = 0;
9215                int i;
9216
9217                for_each_possible_cpu(i)
9218                        ws += schedstat_val(tg->se[i]->statistics.wait_sum);
9219
9220                seq_printf(sf, "wait_sum %llu\n", ws);
9221        }
9222
9223        return 0;
9224}
9225#endif /* CONFIG_CFS_BANDWIDTH */
9226#endif /* CONFIG_FAIR_GROUP_SCHED */
9227
9228#ifdef CONFIG_RT_GROUP_SCHED
9229static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9230                                struct cftype *cft, s64 val)
9231{
9232        return sched_group_set_rt_runtime(css_tg(css), val);
9233}
9234
9235static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9236                               struct cftype *cft)
9237{
9238        return sched_group_rt_runtime(css_tg(css));
9239}
9240
9241static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9242                                    struct cftype *cftype, u64 rt_period_us)
9243{
9244        return sched_group_set_rt_period(css_tg(css), rt_period_us);
9245}
9246
9247static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9248                                   struct cftype *cft)
9249{
9250        return sched_group_rt_period(css_tg(css));
9251}
9252#endif /* CONFIG_RT_GROUP_SCHED */
9253
9254static struct cftype cpu_legacy_files[] = {
9255#ifdef CONFIG_FAIR_GROUP_SCHED
9256        {
9257                .name = "shares",
9258                .read_u64 = cpu_shares_read_u64,
9259                .write_u64 = cpu_shares_write_u64,
9260        },
9261#endif
9262#ifdef CONFIG_CFS_BANDWIDTH
9263        {
9264                .name = "cfs_quota_us",
9265                .read_s64 = cpu_cfs_quota_read_s64,
9266                .write_s64 = cpu_cfs_quota_write_s64,
9267        },
9268        {
9269                .name = "cfs_period_us",
9270                .read_u64 = cpu_cfs_period_read_u64,
9271                .write_u64 = cpu_cfs_period_write_u64,
9272        },
9273        {
9274                .name = "stat",
9275                .seq_show = cpu_cfs_stat_show,
9276        },
9277#endif
9278#ifdef CONFIG_RT_GROUP_SCHED
9279        {
9280                .name = "rt_runtime_us",
9281                .read_s64 = cpu_rt_runtime_read,
9282                .write_s64 = cpu_rt_runtime_write,
9283        },
9284        {
9285                .name = "rt_period_us",
9286                .read_u64 = cpu_rt_period_read_uint,
9287                .write_u64 = cpu_rt_period_write_uint,
9288        },
9289#endif
9290#ifdef CONFIG_UCLAMP_TASK_GROUP
9291        {
9292                .name = "uclamp.min",
9293                .flags = CFTYPE_NOT_ON_ROOT,
9294                .seq_show = cpu_uclamp_min_show,
9295                .write = cpu_uclamp_min_write,
9296        },
9297        {
9298                .name = "uclamp.max",
9299                .flags = CFTYPE_NOT_ON_ROOT,
9300                .seq_show = cpu_uclamp_max_show,
9301                .write = cpu_uclamp_max_write,
9302        },
9303#endif
9304        { }     /* Terminate */
9305};
9306
9307static int cpu_extra_stat_show(struct seq_file *sf,
9308                               struct cgroup_subsys_state *css)
9309{
9310#ifdef CONFIG_CFS_BANDWIDTH
9311        {
9312                struct task_group *tg = css_tg(css);
9313                struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9314                u64 throttled_usec;
9315
9316                throttled_usec = cfs_b->throttled_time;
9317                do_div(throttled_usec, NSEC_PER_USEC);
9318
9319                seq_printf(sf, "nr_periods %d\n"
9320                           "nr_throttled %d\n"
9321                           "throttled_usec %llu\n",
9322                           cfs_b->nr_periods, cfs_b->nr_throttled,
9323                           throttled_usec);
9324        }
9325#endif
9326        return 0;
9327}
9328
9329#ifdef CONFIG_FAIR_GROUP_SCHED
9330static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9331                               struct cftype *cft)
9332{
9333        struct task_group *tg = css_tg(css);
9334        u64 weight = scale_load_down(tg->shares);
9335
9336        return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
9337}
9338
9339static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9340                                struct cftype *cft, u64 weight)
9341{
9342        /*
9343         * cgroup weight knobs should use the common MIN, DFL and MAX
9344         * values which are 1, 100 and 10000 respectively.  While it loses
9345         * a bit of range on both ends, it maps pretty well onto the shares
9346         * value used by scheduler and the round-trip conversions preserve
9347         * the original value over the entire range.
9348         */
9349        if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
9350                return -ERANGE;
9351
9352        weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
9353
9354        return sched_group_set_shares(css_tg(css), scale_load(weight));
9355}
9356
9357static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9358                                    struct cftype *cft)
9359{
9360        unsigned long weight = scale_load_down(css_tg(css)->shares);
9361        int last_delta = INT_MAX;
9362        int prio, delta;
9363
9364        /* find the closest nice value to the current weight */
9365        for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9366                delta = abs(sched_prio_to_weight[prio] - weight);
9367                if (delta >= last_delta)
9368                        break;
9369                last_delta = delta;
9370        }
9371
9372        return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9373}
9374
9375static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9376                                     struct cftype *cft, s64 nice)
9377{
9378        unsigned long weight;
9379        int idx;
9380
9381        if (nice < MIN_NICE || nice > MAX_NICE)
9382                return -ERANGE;
9383
9384        idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9385        idx = array_index_nospec(idx, 40);
9386        weight = sched_prio_to_weight[idx];
9387
9388        return sched_group_set_shares(css_tg(css), scale_load(weight));
9389}
9390#endif
9391
9392static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9393                                                  long period, long quota)
9394{
9395        if (quota < 0)
9396                seq_puts(sf, "max");
9397        else
9398                seq_printf(sf, "%ld", quota);
9399
9400        seq_printf(sf, " %ld\n", period);
9401}
9402
9403/* caller should put the current value in *@periodp before calling */
9404static int __maybe_unused cpu_period_quota_parse(char *buf,
9405                                                 u64 *periodp, u64 *quotap)
9406{
9407        char tok[21];   /* U64_MAX */
9408
9409        if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
9410                return -EINVAL;
9411
9412        *periodp *= NSEC_PER_USEC;
9413
9414        if (sscanf(tok, "%llu", quotap))
9415                *quotap *= NSEC_PER_USEC;
9416        else if (!strcmp(tok, "max"))
9417                *quotap = RUNTIME_INF;
9418        else
9419                return -EINVAL;
9420
9421        return 0;
9422}
9423
9424#ifdef CONFIG_CFS_BANDWIDTH
9425static int cpu_max_show(struct seq_file *sf, void *v)
9426{
9427        struct task_group *tg = css_tg(seq_css(sf));
9428
9429        cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
9430        return 0;
9431}
9432
9433static ssize_t cpu_max_write(struct kernfs_open_file *of,
9434                             char *buf, size_t nbytes, loff_t off)
9435{
9436        struct task_group *tg = css_tg(of_css(of));
9437        u64 period = tg_get_cfs_period(tg);
9438        u64 quota;
9439        int ret;
9440
9441        ret = cpu_period_quota_parse(buf, &period, &quota);
9442        if (!ret)
9443                ret = tg_set_cfs_bandwidth(tg, period, quota);
9444        return ret ?: nbytes;
9445}
9446#endif
9447
9448static struct cftype cpu_files[] = {
9449#ifdef CONFIG_FAIR_GROUP_SCHED
9450        {
9451                .name = "weight",
9452                .flags = CFTYPE_NOT_ON_ROOT,
9453                .read_u64 = cpu_weight_read_u64,
9454                .write_u64 = cpu_weight_write_u64,
9455        },
9456        {
9457                .name = "weight.nice",
9458                .flags = CFTYPE_NOT_ON_ROOT,
9459                .read_s64 = cpu_weight_nice_read_s64,
9460                .write_s64 = cpu_weight_nice_write_s64,
9461        },
9462#endif
9463#ifdef CONFIG_CFS_BANDWIDTH
9464        {
9465                .name = "max",
9466                .flags = CFTYPE_NOT_ON_ROOT,
9467                .seq_show = cpu_max_show,
9468                .write = cpu_max_write,
9469        },
9470#endif
9471#ifdef CONFIG_UCLAMP_TASK_GROUP
9472        {
9473                .name = "uclamp.min",
9474                .flags = CFTYPE_NOT_ON_ROOT,
9475                .seq_show = cpu_uclamp_min_show,
9476                .write = cpu_uclamp_min_write,
9477        },
9478        {
9479                .name = "uclamp.max",
9480                .flags = CFTYPE_NOT_ON_ROOT,
9481                .seq_show = cpu_uclamp_max_show,
9482                .write = cpu_uclamp_max_write,
9483        },
9484#endif
9485        { }     /* terminate */
9486};
9487
9488struct cgroup_subsys cpu_cgrp_subsys = {
9489        .css_alloc      = cpu_cgroup_css_alloc,
9490        .css_online     = cpu_cgroup_css_online,
9491        .css_released   = cpu_cgroup_css_released,
9492        .css_free       = cpu_cgroup_css_free,
9493        .css_extra_stat_show = cpu_extra_stat_show,
9494        .fork           = cpu_cgroup_fork,
9495        .can_attach     = cpu_cgroup_can_attach,
9496        .attach         = cpu_cgroup_attach,
9497        .legacy_cftypes = cpu_legacy_files,
9498        .dfl_cftypes    = cpu_files,
9499        .early_init     = true,
9500        .threaded       = true,
9501};
9502
9503#endif  /* CONFIG_CGROUP_SCHED */
9504
9505void dump_cpu_task(int cpu)
9506{
9507        pr_info("Task dump for CPU %d:\n", cpu);
9508        sched_show_task(cpu_curr(cpu));
9509}
9510
9511/*
9512 * Nice levels are multiplicative, with a gentle 10% change for every
9513 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
9514 * nice 1, it will get ~10% less CPU time than another CPU-bound task
9515 * that remained on nice 0.
9516 *
9517 * The "10% effect" is relative and cumulative: from _any_ nice level,
9518 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
9519 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
9520 * If a task goes up by ~10% and another task goes down by ~10% then
9521 * the relative distance between them is ~25%.)
9522 */
9523const int sched_prio_to_weight[40] = {
9524 /* -20 */     88761,     71755,     56483,     46273,     36291,
9525 /* -15 */     29154,     23254,     18705,     14949,     11916,
9526 /* -10 */      9548,      7620,      6100,      4904,      3906,
9527 /*  -5 */      3121,      2501,      1991,      1586,      1277,
9528 /*   0 */      1024,       820,       655,       526,       423,
9529 /*   5 */       335,       272,       215,       172,       137,
9530 /*  10 */       110,        87,        70,        56,        45,
9531 /*  15 */        36,        29,        23,        18,        15,
9532};
9533
9534/*
9535 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
9536 *
9537 * In cases where the weight does not change often, we can use the
9538 * precalculated inverse to speed up arithmetics by turning divisions
9539 * into multiplications:
9540 */
9541const u32 sched_prio_to_wmult[40] = {
9542 /* -20 */     48388,     59856,     76040,     92818,    118348,
9543 /* -15 */    147320,    184698,    229616,    287308,    360437,
9544 /* -10 */    449829,    563644,    704093,    875809,   1099582,
9545 /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
9546 /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
9547 /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
9548 /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
9549 /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
9550};
9551
9552void call_trace_sched_update_nr_running(struct rq *rq, int count)
9553{
9554        trace_sched_update_nr_running_tp(rq, count);
9555}
9556