linux/kernel/sched/deadline.c
<<
>>
Prefs
   1/*
   2 * Deadline Scheduling Class (SCHED_DEADLINE)
   3 *
   4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   5 *
   6 * Tasks that periodically executes their instances for less than their
   7 * runtime won't miss any of their deadlines.
   8 * Tasks that are not periodic or sporadic or that tries to execute more
   9 * than their reserved bandwidth will be slowed down (and may potentially
  10 * miss some of their deadlines), and won't affect any other task.
  11 *
  12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  13 *                    Juri Lelli <juri.lelli@gmail.com>,
  14 *                    Michael Trimarchi <michael@amarulasolutions.com>,
  15 *                    Fabio Checconi <fchecconi@gmail.com>
  16 */
  17#include "sched.h"
  18
  19#include <linux/slab.h>
  20
  21struct dl_bandwidth def_dl_bandwidth;
  22
  23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24{
  25        return container_of(dl_se, struct task_struct, dl);
  26}
  27
  28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29{
  30        return container_of(dl_rq, struct rq, dl);
  31}
  32
  33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34{
  35        struct task_struct *p = dl_task_of(dl_se);
  36        struct rq *rq = task_rq(p);
  37
  38        return &rq->dl;
  39}
  40
  41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42{
  43        return !RB_EMPTY_NODE(&dl_se->rb_node);
  44}
  45
  46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
  47{
  48        struct sched_dl_entity *dl_se = &p->dl;
  49
  50        return dl_rq->rb_leftmost == &dl_se->rb_node;
  51}
  52
  53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
  54{
  55        raw_spin_lock_init(&dl_b->dl_runtime_lock);
  56        dl_b->dl_period = period;
  57        dl_b->dl_runtime = runtime;
  58}
  59
  60void init_dl_bw(struct dl_bw *dl_b)
  61{
  62        raw_spin_lock_init(&dl_b->lock);
  63        raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
  64        if (global_rt_runtime() == RUNTIME_INF)
  65                dl_b->bw = -1;
  66        else
  67                dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
  68        raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
  69        dl_b->total_bw = 0;
  70}
  71
  72void init_dl_rq(struct dl_rq *dl_rq)
  73{
  74        dl_rq->rb_root = RB_ROOT;
  75
  76#ifdef CONFIG_SMP
  77        /* zero means no -deadline tasks */
  78        dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
  79
  80        dl_rq->dl_nr_migratory = 0;
  81        dl_rq->overloaded = 0;
  82        dl_rq->pushable_dl_tasks_root = RB_ROOT;
  83#else
  84        init_dl_bw(&dl_rq->dl_bw);
  85#endif
  86}
  87
  88#ifdef CONFIG_SMP
  89
  90static inline int dl_overloaded(struct rq *rq)
  91{
  92        return atomic_read(&rq->rd->dlo_count);
  93}
  94
  95static inline void dl_set_overload(struct rq *rq)
  96{
  97        if (!rq->online)
  98                return;
  99
 100        cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 101        /*
 102         * Must be visible before the overload count is
 103         * set (as in sched_rt.c).
 104         *
 105         * Matched by the barrier in pull_dl_task().
 106         */
 107        smp_wmb();
 108        atomic_inc(&rq->rd->dlo_count);
 109}
 110
 111static inline void dl_clear_overload(struct rq *rq)
 112{
 113        if (!rq->online)
 114                return;
 115
 116        atomic_dec(&rq->rd->dlo_count);
 117        cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 118}
 119
 120static void update_dl_migration(struct dl_rq *dl_rq)
 121{
 122        if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 123                if (!dl_rq->overloaded) {
 124                        dl_set_overload(rq_of_dl_rq(dl_rq));
 125                        dl_rq->overloaded = 1;
 126                }
 127        } else if (dl_rq->overloaded) {
 128                dl_clear_overload(rq_of_dl_rq(dl_rq));
 129                dl_rq->overloaded = 0;
 130        }
 131}
 132
 133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 134{
 135        struct task_struct *p = dl_task_of(dl_se);
 136
 137        if (p->nr_cpus_allowed > 1)
 138                dl_rq->dl_nr_migratory++;
 139
 140        update_dl_migration(dl_rq);
 141}
 142
 143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 144{
 145        struct task_struct *p = dl_task_of(dl_se);
 146
 147        if (p->nr_cpus_allowed > 1)
 148                dl_rq->dl_nr_migratory--;
 149
 150        update_dl_migration(dl_rq);
 151}
 152
 153/*
 154 * The list of pushable -deadline task is not a plist, like in
 155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 156 */
 157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 158{
 159        struct dl_rq *dl_rq = &rq->dl;
 160        struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
 161        struct rb_node *parent = NULL;
 162        struct task_struct *entry;
 163        int leftmost = 1;
 164
 165        BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 166
 167        while (*link) {
 168                parent = *link;
 169                entry = rb_entry(parent, struct task_struct,
 170                                 pushable_dl_tasks);
 171                if (dl_entity_preempt(&p->dl, &entry->dl))
 172                        link = &parent->rb_left;
 173                else {
 174                        link = &parent->rb_right;
 175                        leftmost = 0;
 176                }
 177        }
 178
 179        if (leftmost)
 180                dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
 181
 182        rb_link_node(&p->pushable_dl_tasks, parent, link);
 183        rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 184}
 185
 186static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 187{
 188        struct dl_rq *dl_rq = &rq->dl;
 189
 190        if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 191                return;
 192
 193        if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
 194                struct rb_node *next_node;
 195
 196                next_node = rb_next(&p->pushable_dl_tasks);
 197                dl_rq->pushable_dl_tasks_leftmost = next_node;
 198        }
 199
 200        rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 201        RB_CLEAR_NODE(&p->pushable_dl_tasks);
 202}
 203
 204static inline int has_pushable_dl_tasks(struct rq *rq)
 205{
 206        return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
 207}
 208
 209static int push_dl_task(struct rq *rq);
 210
 211static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 212{
 213        return dl_task(prev);
 214}
 215
 216static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 217static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 218
 219static void push_dl_tasks(struct rq *);
 220static void pull_dl_task(struct rq *);
 221
 222static inline void queue_push_tasks(struct rq *rq)
 223{
 224        if (!has_pushable_dl_tasks(rq))
 225                return;
 226
 227        queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 228}
 229
 230static inline void queue_pull_task(struct rq *rq)
 231{
 232        queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 233}
 234
 235static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 236
 237static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 238{
 239        struct rq *later_rq = NULL;
 240        bool fallback = false;
 241
 242        later_rq = find_lock_later_rq(p, rq);
 243
 244        if (!later_rq) {
 245                int cpu;
 246
 247                /*
 248                 * If we cannot preempt any rq, fall back to pick any
 249                 * online cpu.
 250                 */
 251                fallback = true;
 252                cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
 253                if (cpu >= nr_cpu_ids) {
 254                        /*
 255                         * Fail to find any suitable cpu.
 256                         * The task will never come back!
 257                         */
 258                        BUG_ON(dl_bandwidth_enabled());
 259
 260                        /*
 261                         * If admission control is disabled we
 262                         * try a little harder to let the task
 263                         * run.
 264                         */
 265                        cpu = cpumask_any(cpu_active_mask);
 266                }
 267                later_rq = cpu_rq(cpu);
 268                double_lock_balance(rq, later_rq);
 269        }
 270
 271        /*
 272         * By now the task is replenished and enqueued; migrate it.
 273         */
 274        deactivate_task(rq, p, 0);
 275        set_task_cpu(p, later_rq->cpu);
 276        activate_task(later_rq, p, 0);
 277
 278        if (!fallback)
 279                resched_curr(later_rq);
 280
 281        double_unlock_balance(later_rq, rq);
 282
 283        return later_rq;
 284}
 285
 286#else
 287
 288static inline
 289void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 290{
 291}
 292
 293static inline
 294void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 295{
 296}
 297
 298static inline
 299void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 300{
 301}
 302
 303static inline
 304void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 305{
 306}
 307
 308static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 309{
 310        return false;
 311}
 312
 313static inline void pull_dl_task(struct rq *rq)
 314{
 315}
 316
 317static inline void queue_push_tasks(struct rq *rq)
 318{
 319}
 320
 321static inline void queue_pull_task(struct rq *rq)
 322{
 323}
 324#endif /* CONFIG_SMP */
 325
 326static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 327static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 328static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
 329                                  int flags);
 330
 331/*
 332 * We are being explicitly informed that a new instance is starting,
 333 * and this means that:
 334 *  - the absolute deadline of the entity has to be placed at
 335 *    current time + relative deadline;
 336 *  - the runtime of the entity has to be set to the maximum value.
 337 *
 338 * The capability of specifying such event is useful whenever a -deadline
 339 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 340 * one, and to (try to!) reconcile itself with its own scheduling
 341 * parameters.
 342 */
 343static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
 344                                       struct sched_dl_entity *pi_se)
 345{
 346        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 347        struct rq *rq = rq_of_dl_rq(dl_rq);
 348
 349        WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
 350
 351        /*
 352         * We use the regular wall clock time to set deadlines in the
 353         * future; in fact, we must consider execution overheads (time
 354         * spent on hardirq context, etc.).
 355         */
 356        dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 357        dl_se->runtime = pi_se->dl_runtime;
 358        dl_se->dl_new = 0;
 359}
 360
 361/*
 362 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 363 * possibility of a entity lasting more than what it declared, and thus
 364 * exhausting its runtime.
 365 *
 366 * Here we are interested in making runtime overrun possible, but we do
 367 * not want a entity which is misbehaving to affect the scheduling of all
 368 * other entities.
 369 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 370 * is used, in order to confine each entity within its own bandwidth.
 371 *
 372 * This function deals exactly with that, and ensures that when the runtime
 373 * of a entity is replenished, its deadline is also postponed. That ensures
 374 * the overrunning entity can't interfere with other entity in the system and
 375 * can't make them miss their deadlines. Reasons why this kind of overruns
 376 * could happen are, typically, a entity voluntarily trying to overcome its
 377 * runtime, or it just underestimated it during sched_setattr().
 378 */
 379static void replenish_dl_entity(struct sched_dl_entity *dl_se,
 380                                struct sched_dl_entity *pi_se)
 381{
 382        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 383        struct rq *rq = rq_of_dl_rq(dl_rq);
 384
 385        BUG_ON(pi_se->dl_runtime <= 0);
 386
 387        /*
 388         * This could be the case for a !-dl task that is boosted.
 389         * Just go with full inherited parameters.
 390         */
 391        if (dl_se->dl_deadline == 0) {
 392                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 393                dl_se->runtime = pi_se->dl_runtime;
 394        }
 395
 396        /*
 397         * We keep moving the deadline away until we get some
 398         * available runtime for the entity. This ensures correct
 399         * handling of situations where the runtime overrun is
 400         * arbitrary large.
 401         */
 402        while (dl_se->runtime <= 0) {
 403                dl_se->deadline += pi_se->dl_period;
 404                dl_se->runtime += pi_se->dl_runtime;
 405        }
 406
 407        /*
 408         * At this point, the deadline really should be "in
 409         * the future" with respect to rq->clock. If it's
 410         * not, we are, for some reason, lagging too much!
 411         * Anyway, after having warn userspace abut that,
 412         * we still try to keep the things running by
 413         * resetting the deadline and the budget of the
 414         * entity.
 415         */
 416        if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 417                printk_deferred_once("sched: DL replenish lagged to much\n");
 418                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 419                dl_se->runtime = pi_se->dl_runtime;
 420        }
 421
 422        if (dl_se->dl_yielded)
 423                dl_se->dl_yielded = 0;
 424        if (dl_se->dl_throttled)
 425                dl_se->dl_throttled = 0;
 426}
 427
 428/*
 429 * Here we check if --at time t-- an entity (which is probably being
 430 * [re]activated or, in general, enqueued) can use its remaining runtime
 431 * and its current deadline _without_ exceeding the bandwidth it is
 432 * assigned (function returns true if it can't). We are in fact applying
 433 * one of the CBS rules: when a task wakes up, if the residual runtime
 434 * over residual deadline fits within the allocated bandwidth, then we
 435 * can keep the current (absolute) deadline and residual budget without
 436 * disrupting the schedulability of the system. Otherwise, we should
 437 * refill the runtime and set the deadline a period in the future,
 438 * because keeping the current (absolute) deadline of the task would
 439 * result in breaking guarantees promised to other tasks (refer to
 440 * Documentation/scheduler/sched-deadline.txt for more informations).
 441 *
 442 * This function returns true if:
 443 *
 444 *   runtime / (deadline - t) > dl_runtime / dl_period ,
 445 *
 446 * IOW we can't recycle current parameters.
 447 *
 448 * Notice that the bandwidth check is done against the period. For
 449 * task with deadline equal to period this is the same of using
 450 * dl_deadline instead of dl_period in the equation above.
 451 */
 452static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 453                               struct sched_dl_entity *pi_se, u64 t)
 454{
 455        u64 left, right;
 456
 457        /*
 458         * left and right are the two sides of the equation above,
 459         * after a bit of shuffling to use multiplications instead
 460         * of divisions.
 461         *
 462         * Note that none of the time values involved in the two
 463         * multiplications are absolute: dl_deadline and dl_runtime
 464         * are the relative deadline and the maximum runtime of each
 465         * instance, runtime is the runtime left for the last instance
 466         * and (deadline - t), since t is rq->clock, is the time left
 467         * to the (absolute) deadline. Even if overflowing the u64 type
 468         * is very unlikely to occur in both cases, here we scale down
 469         * as we want to avoid that risk at all. Scaling down by 10
 470         * means that we reduce granularity to 1us. We are fine with it,
 471         * since this is only a true/false check and, anyway, thinking
 472         * of anything below microseconds resolution is actually fiction
 473         * (but still we want to give the user that illusion >;).
 474         */
 475        left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 476        right = ((dl_se->deadline - t) >> DL_SCALE) *
 477                (pi_se->dl_runtime >> DL_SCALE);
 478
 479        return dl_time_before(right, left);
 480}
 481
 482/*
 483 * When a -deadline entity is queued back on the runqueue, its runtime and
 484 * deadline might need updating.
 485 *
 486 * The policy here is that we update the deadline of the entity only if:
 487 *  - the current deadline is in the past,
 488 *  - using the remaining runtime with the current deadline would make
 489 *    the entity exceed its bandwidth.
 490 */
 491static void update_dl_entity(struct sched_dl_entity *dl_se,
 492                             struct sched_dl_entity *pi_se)
 493{
 494        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 495        struct rq *rq = rq_of_dl_rq(dl_rq);
 496
 497        /*
 498         * The arrival of a new instance needs special treatment, i.e.,
 499         * the actual scheduling parameters have to be "renewed".
 500         */
 501        if (dl_se->dl_new) {
 502                setup_new_dl_entity(dl_se, pi_se);
 503                return;
 504        }
 505
 506        if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 507            dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
 508                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 509                dl_se->runtime = pi_se->dl_runtime;
 510        }
 511}
 512
 513/*
 514 * If the entity depleted all its runtime, and if we want it to sleep
 515 * while waiting for some new execution time to become available, we
 516 * set the bandwidth enforcement timer to the replenishment instant
 517 * and try to activate it.
 518 *
 519 * Notice that it is important for the caller to know if the timer
 520 * actually started or not (i.e., the replenishment instant is in
 521 * the future or in the past).
 522 */
 523static int start_dl_timer(struct task_struct *p)
 524{
 525        struct sched_dl_entity *dl_se = &p->dl;
 526        struct hrtimer *timer = &dl_se->dl_timer;
 527        struct rq *rq = task_rq(p);
 528        ktime_t now, act;
 529        s64 delta;
 530
 531        lockdep_assert_held(&rq->lock);
 532
 533        /*
 534         * We want the timer to fire at the deadline, but considering
 535         * that it is actually coming from rq->clock and not from
 536         * hrtimer's time base reading.
 537         */
 538        act = ns_to_ktime(dl_se->deadline);
 539        now = hrtimer_cb_get_time(timer);
 540        delta = ktime_to_ns(now) - rq_clock(rq);
 541        act = ktime_add_ns(act, delta);
 542
 543        /*
 544         * If the expiry time already passed, e.g., because the value
 545         * chosen as the deadline is too small, don't even try to
 546         * start the timer in the past!
 547         */
 548        if (ktime_us_delta(act, now) < 0)
 549                return 0;
 550
 551        /*
 552         * !enqueued will guarantee another callback; even if one is already in
 553         * progress. This ensures a balanced {get,put}_task_struct().
 554         *
 555         * The race against __run_timer() clearing the enqueued state is
 556         * harmless because we're holding task_rq()->lock, therefore the timer
 557         * expiring after we've done the check will wait on its task_rq_lock()
 558         * and observe our state.
 559         */
 560        if (!hrtimer_is_queued(timer)) {
 561                get_task_struct(p);
 562                hrtimer_start(timer, act, HRTIMER_MODE_ABS);
 563        }
 564
 565        return 1;
 566}
 567
 568/*
 569 * This is the bandwidth enforcement timer callback. If here, we know
 570 * a task is not on its dl_rq, since the fact that the timer was running
 571 * means the task is throttled and needs a runtime replenishment.
 572 *
 573 * However, what we actually do depends on the fact the task is active,
 574 * (it is on its rq) or has been removed from there by a call to
 575 * dequeue_task_dl(). In the former case we must issue the runtime
 576 * replenishment and add the task back to the dl_rq; in the latter, we just
 577 * do nothing but clearing dl_throttled, so that runtime and deadline
 578 * updating (and the queueing back to dl_rq) will be done by the
 579 * next call to enqueue_task_dl().
 580 */
 581static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 582{
 583        struct sched_dl_entity *dl_se = container_of(timer,
 584                                                     struct sched_dl_entity,
 585                                                     dl_timer);
 586        struct task_struct *p = dl_task_of(dl_se);
 587        unsigned long flags;
 588        struct rq *rq;
 589
 590        rq = task_rq_lock(p, &flags);
 591
 592        /*
 593         * The task might have changed its scheduling policy to something
 594         * different than SCHED_DEADLINE (through switched_fromd_dl()).
 595         */
 596        if (!dl_task(p)) {
 597                __dl_clear_params(p);
 598                goto unlock;
 599        }
 600
 601        /*
 602         * This is possible if switched_from_dl() raced against a running
 603         * callback that took the above !dl_task() path and we've since then
 604         * switched back into SCHED_DEADLINE.
 605         *
 606         * There's nothing to do except drop our task reference.
 607         */
 608        if (dl_se->dl_new)
 609                goto unlock;
 610
 611        /*
 612         * The task might have been boosted by someone else and might be in the
 613         * boosting/deboosting path, its not throttled.
 614         */
 615        if (dl_se->dl_boosted)
 616                goto unlock;
 617
 618        /*
 619         * Spurious timer due to start_dl_timer() race; or we already received
 620         * a replenishment from rt_mutex_setprio().
 621         */
 622        if (!dl_se->dl_throttled)
 623                goto unlock;
 624
 625        sched_clock_tick();
 626        update_rq_clock(rq);
 627
 628        /*
 629         * If the throttle happened during sched-out; like:
 630         *
 631         *   schedule()
 632         *     deactivate_task()
 633         *       dequeue_task_dl()
 634         *         update_curr_dl()
 635         *           start_dl_timer()
 636         *         __dequeue_task_dl()
 637         *     prev->on_rq = 0;
 638         *
 639         * We can be both throttled and !queued. Replenish the counter
 640         * but do not enqueue -- wait for our wakeup to do that.
 641         */
 642        if (!task_on_rq_queued(p)) {
 643                replenish_dl_entity(dl_se, dl_se);
 644                goto unlock;
 645        }
 646
 647        enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
 648        if (dl_task(rq->curr))
 649                check_preempt_curr_dl(rq, p, 0);
 650        else
 651                resched_curr(rq);
 652
 653#ifdef CONFIG_SMP
 654        /*
 655         * Perform balancing operations here; after the replenishments.  We
 656         * cannot drop rq->lock before this, otherwise the assertion in
 657         * start_dl_timer() about not missing updates is not true.
 658         *
 659         * If we find that the rq the task was on is no longer available, we
 660         * need to select a new rq.
 661         *
 662         * XXX figure out if select_task_rq_dl() deals with offline cpus.
 663         */
 664        if (unlikely(!rq->online))
 665                rq = dl_task_offline_migration(rq, p);
 666
 667        /*
 668         * Queueing this task back might have overloaded rq, check if we need
 669         * to kick someone away.
 670         */
 671        if (has_pushable_dl_tasks(rq)) {
 672                /*
 673                 * Nothing relies on rq->lock after this, so its safe to drop
 674                 * rq->lock.
 675                 */
 676                lockdep_unpin_lock(&rq->lock);
 677                push_dl_task(rq);
 678                lockdep_pin_lock(&rq->lock);
 679        }
 680#endif
 681
 682unlock:
 683        task_rq_unlock(rq, p, &flags);
 684
 685        /*
 686         * This can free the task_struct, including this hrtimer, do not touch
 687         * anything related to that after this.
 688         */
 689        put_task_struct(p);
 690
 691        return HRTIMER_NORESTART;
 692}
 693
 694void init_dl_task_timer(struct sched_dl_entity *dl_se)
 695{
 696        struct hrtimer *timer = &dl_se->dl_timer;
 697
 698        hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 699        timer->function = dl_task_timer;
 700}
 701
 702static
 703int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 704{
 705        return (dl_se->runtime <= 0);
 706}
 707
 708extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
 709
 710/*
 711 * Update the current task's runtime statistics (provided it is still
 712 * a -deadline task and has not been removed from the dl_rq).
 713 */
 714static void update_curr_dl(struct rq *rq)
 715{
 716        struct task_struct *curr = rq->curr;
 717        struct sched_dl_entity *dl_se = &curr->dl;
 718        u64 delta_exec;
 719
 720        if (!dl_task(curr) || !on_dl_rq(dl_se))
 721                return;
 722
 723        /*
 724         * Consumed budget is computed considering the time as
 725         * observed by schedulable tasks (excluding time spent
 726         * in hardirq context, etc.). Deadlines are instead
 727         * computed using hard walltime. This seems to be the more
 728         * natural solution, but the full ramifications of this
 729         * approach need further study.
 730         */
 731        delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 732        if (unlikely((s64)delta_exec <= 0))
 733                return;
 734
 735        schedstat_set(curr->se.statistics.exec_max,
 736                      max(curr->se.statistics.exec_max, delta_exec));
 737
 738        curr->se.sum_exec_runtime += delta_exec;
 739        account_group_exec_runtime(curr, delta_exec);
 740
 741        curr->se.exec_start = rq_clock_task(rq);
 742        cpuacct_charge(curr, delta_exec);
 743
 744        sched_rt_avg_update(rq, delta_exec);
 745
 746        dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
 747        if (dl_runtime_exceeded(dl_se)) {
 748                dl_se->dl_throttled = 1;
 749                __dequeue_task_dl(rq, curr, 0);
 750                if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
 751                        enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
 752
 753                if (!is_leftmost(curr, &rq->dl))
 754                        resched_curr(rq);
 755        }
 756
 757        /*
 758         * Because -- for now -- we share the rt bandwidth, we need to
 759         * account our runtime there too, otherwise actual rt tasks
 760         * would be able to exceed the shared quota.
 761         *
 762         * Account to the root rt group for now.
 763         *
 764         * The solution we're working towards is having the RT groups scheduled
 765         * using deadline servers -- however there's a few nasties to figure
 766         * out before that can happen.
 767         */
 768        if (rt_bandwidth_enabled()) {
 769                struct rt_rq *rt_rq = &rq->rt;
 770
 771                raw_spin_lock(&rt_rq->rt_runtime_lock);
 772                /*
 773                 * We'll let actual RT tasks worry about the overflow here, we
 774                 * have our own CBS to keep us inline; only account when RT
 775                 * bandwidth is relevant.
 776                 */
 777                if (sched_rt_bandwidth_account(rt_rq))
 778                        rt_rq->rt_time += delta_exec;
 779                raw_spin_unlock(&rt_rq->rt_runtime_lock);
 780        }
 781}
 782
 783#ifdef CONFIG_SMP
 784
 785static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
 786
 787static inline u64 next_deadline(struct rq *rq)
 788{
 789        struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
 790
 791        if (next && dl_prio(next->prio))
 792                return next->dl.deadline;
 793        else
 794                return 0;
 795}
 796
 797static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 798{
 799        struct rq *rq = rq_of_dl_rq(dl_rq);
 800
 801        if (dl_rq->earliest_dl.curr == 0 ||
 802            dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
 803                /*
 804                 * If the dl_rq had no -deadline tasks, or if the new task
 805                 * has shorter deadline than the current one on dl_rq, we
 806                 * know that the previous earliest becomes our next earliest,
 807                 * as the new task becomes the earliest itself.
 808                 */
 809                dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
 810                dl_rq->earliest_dl.curr = deadline;
 811                cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
 812        } else if (dl_rq->earliest_dl.next == 0 ||
 813                   dl_time_before(deadline, dl_rq->earliest_dl.next)) {
 814                /*
 815                 * On the other hand, if the new -deadline task has a
 816                 * a later deadline than the earliest one on dl_rq, but
 817                 * it is earlier than the next (if any), we must
 818                 * recompute the next-earliest.
 819                 */
 820                dl_rq->earliest_dl.next = next_deadline(rq);
 821        }
 822}
 823
 824static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 825{
 826        struct rq *rq = rq_of_dl_rq(dl_rq);
 827
 828        /*
 829         * Since we may have removed our earliest (and/or next earliest)
 830         * task we must recompute them.
 831         */
 832        if (!dl_rq->dl_nr_running) {
 833                dl_rq->earliest_dl.curr = 0;
 834                dl_rq->earliest_dl.next = 0;
 835                cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
 836        } else {
 837                struct rb_node *leftmost = dl_rq->rb_leftmost;
 838                struct sched_dl_entity *entry;
 839
 840                entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
 841                dl_rq->earliest_dl.curr = entry->deadline;
 842                dl_rq->earliest_dl.next = next_deadline(rq);
 843                cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
 844        }
 845}
 846
 847#else
 848
 849static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 850static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 851
 852#endif /* CONFIG_SMP */
 853
 854static inline
 855void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 856{
 857        int prio = dl_task_of(dl_se)->prio;
 858        u64 deadline = dl_se->deadline;
 859
 860        WARN_ON(!dl_prio(prio));
 861        dl_rq->dl_nr_running++;
 862        add_nr_running(rq_of_dl_rq(dl_rq), 1);
 863
 864        inc_dl_deadline(dl_rq, deadline);
 865        inc_dl_migration(dl_se, dl_rq);
 866}
 867
 868static inline
 869void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 870{
 871        int prio = dl_task_of(dl_se)->prio;
 872
 873        WARN_ON(!dl_prio(prio));
 874        WARN_ON(!dl_rq->dl_nr_running);
 875        dl_rq->dl_nr_running--;
 876        sub_nr_running(rq_of_dl_rq(dl_rq), 1);
 877
 878        dec_dl_deadline(dl_rq, dl_se->deadline);
 879        dec_dl_migration(dl_se, dl_rq);
 880}
 881
 882static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
 883{
 884        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 885        struct rb_node **link = &dl_rq->rb_root.rb_node;
 886        struct rb_node *parent = NULL;
 887        struct sched_dl_entity *entry;
 888        int leftmost = 1;
 889
 890        BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
 891
 892        while (*link) {
 893                parent = *link;
 894                entry = rb_entry(parent, struct sched_dl_entity, rb_node);
 895                if (dl_time_before(dl_se->deadline, entry->deadline))
 896                        link = &parent->rb_left;
 897                else {
 898                        link = &parent->rb_right;
 899                        leftmost = 0;
 900                }
 901        }
 902
 903        if (leftmost)
 904                dl_rq->rb_leftmost = &dl_se->rb_node;
 905
 906        rb_link_node(&dl_se->rb_node, parent, link);
 907        rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
 908
 909        inc_dl_tasks(dl_se, dl_rq);
 910}
 911
 912static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
 913{
 914        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 915
 916        if (RB_EMPTY_NODE(&dl_se->rb_node))
 917                return;
 918
 919        if (dl_rq->rb_leftmost == &dl_se->rb_node) {
 920                struct rb_node *next_node;
 921
 922                next_node = rb_next(&dl_se->rb_node);
 923                dl_rq->rb_leftmost = next_node;
 924        }
 925
 926        rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
 927        RB_CLEAR_NODE(&dl_se->rb_node);
 928
 929        dec_dl_tasks(dl_se, dl_rq);
 930}
 931
 932static void
 933enqueue_dl_entity(struct sched_dl_entity *dl_se,
 934                  struct sched_dl_entity *pi_se, int flags)
 935{
 936        BUG_ON(on_dl_rq(dl_se));
 937
 938        /*
 939         * If this is a wakeup or a new instance, the scheduling
 940         * parameters of the task might need updating. Otherwise,
 941         * we want a replenishment of its runtime.
 942         */
 943        if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
 944                update_dl_entity(dl_se, pi_se);
 945        else if (flags & ENQUEUE_REPLENISH)
 946                replenish_dl_entity(dl_se, pi_se);
 947
 948        __enqueue_dl_entity(dl_se);
 949}
 950
 951static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
 952{
 953        __dequeue_dl_entity(dl_se);
 954}
 955
 956static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 957{
 958        struct task_struct *pi_task = rt_mutex_get_top_task(p);
 959        struct sched_dl_entity *pi_se = &p->dl;
 960
 961        /*
 962         * Use the scheduling parameters of the top pi-waiter
 963         * task if we have one and its (absolute) deadline is
 964         * smaller than our one... OTW we keep our runtime and
 965         * deadline.
 966         */
 967        if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
 968                pi_se = &pi_task->dl;
 969        } else if (!dl_prio(p->normal_prio)) {
 970                /*
 971                 * Special case in which we have a !SCHED_DEADLINE task
 972                 * that is going to be deboosted, but exceedes its
 973                 * runtime while doing so. No point in replenishing
 974                 * it, as it's going to return back to its original
 975                 * scheduling class after this.
 976                 */
 977                BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
 978                return;
 979        }
 980
 981        /*
 982         * If p is throttled, we do nothing. In fact, if it exhausted
 983         * its budget it needs a replenishment and, since it now is on
 984         * its rq, the bandwidth timer callback (which clearly has not
 985         * run yet) will take care of this.
 986         */
 987        if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
 988                return;
 989
 990        enqueue_dl_entity(&p->dl, pi_se, flags);
 991
 992        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
 993                enqueue_pushable_dl_task(rq, p);
 994}
 995
 996static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 997{
 998        dequeue_dl_entity(&p->dl);
 999        dequeue_pushable_dl_task(rq, p);
1000}
1001
1002static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1003{
1004        update_curr_dl(rq);
1005        __dequeue_task_dl(rq, p, flags);
1006}
1007
1008/*
1009 * Yield task semantic for -deadline tasks is:
1010 *
1011 *   get off from the CPU until our next instance, with
1012 *   a new runtime. This is of little use now, since we
1013 *   don't have a bandwidth reclaiming mechanism. Anyway,
1014 *   bandwidth reclaiming is planned for the future, and
1015 *   yield_task_dl will indicate that some spare budget
1016 *   is available for other task instances to use it.
1017 */
1018static void yield_task_dl(struct rq *rq)
1019{
1020        struct task_struct *p = rq->curr;
1021
1022        /*
1023         * We make the task go to sleep until its current deadline by
1024         * forcing its runtime to zero. This way, update_curr_dl() stops
1025         * it and the bandwidth timer will wake it up and will give it
1026         * new scheduling parameters (thanks to dl_yielded=1).
1027         */
1028        if (p->dl.runtime > 0) {
1029                rq->curr->dl.dl_yielded = 1;
1030                p->dl.runtime = 0;
1031        }
1032        update_rq_clock(rq);
1033        update_curr_dl(rq);
1034        /*
1035         * Tell update_rq_clock() that we've just updated,
1036         * so we don't do microscopic update in schedule()
1037         * and double the fastpath cost.
1038         */
1039        rq_clock_skip_update(rq, true);
1040}
1041
1042#ifdef CONFIG_SMP
1043
1044static int find_later_rq(struct task_struct *task);
1045
1046static int
1047select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1048{
1049        struct task_struct *curr;
1050        struct rq *rq;
1051
1052        if (sd_flag != SD_BALANCE_WAKE)
1053                goto out;
1054
1055        rq = cpu_rq(cpu);
1056
1057        rcu_read_lock();
1058        curr = READ_ONCE(rq->curr); /* unlocked access */
1059
1060        /*
1061         * If we are dealing with a -deadline task, we must
1062         * decide where to wake it up.
1063         * If it has a later deadline and the current task
1064         * on this rq can't move (provided the waking task
1065         * can!) we prefer to send it somewhere else. On the
1066         * other hand, if it has a shorter deadline, we
1067         * try to make it stay here, it might be important.
1068         */
1069        if (unlikely(dl_task(curr)) &&
1070            (curr->nr_cpus_allowed < 2 ||
1071             !dl_entity_preempt(&p->dl, &curr->dl)) &&
1072            (p->nr_cpus_allowed > 1)) {
1073                int target = find_later_rq(p);
1074
1075                if (target != -1 &&
1076                                (dl_time_before(p->dl.deadline,
1077                                        cpu_rq(target)->dl.earliest_dl.curr) ||
1078                                (cpu_rq(target)->dl.dl_nr_running == 0)))
1079                        cpu = target;
1080        }
1081        rcu_read_unlock();
1082
1083out:
1084        return cpu;
1085}
1086
1087static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1088{
1089        /*
1090         * Current can't be migrated, useless to reschedule,
1091         * let's hope p can move out.
1092         */
1093        if (rq->curr->nr_cpus_allowed == 1 ||
1094            cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1095                return;
1096
1097        /*
1098         * p is migratable, so let's not schedule it and
1099         * see if it is pushed or pulled somewhere else.
1100         */
1101        if (p->nr_cpus_allowed != 1 &&
1102            cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1103                return;
1104
1105        resched_curr(rq);
1106}
1107
1108#endif /* CONFIG_SMP */
1109
1110/*
1111 * Only called when both the current and waking task are -deadline
1112 * tasks.
1113 */
1114static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1115                                  int flags)
1116{
1117        if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1118                resched_curr(rq);
1119                return;
1120        }
1121
1122#ifdef CONFIG_SMP
1123        /*
1124         * In the unlikely case current and p have the same deadline
1125         * let us try to decide what's the best thing to do...
1126         */
1127        if ((p->dl.deadline == rq->curr->dl.deadline) &&
1128            !test_tsk_need_resched(rq->curr))
1129                check_preempt_equal_dl(rq, p);
1130#endif /* CONFIG_SMP */
1131}
1132
1133#ifdef CONFIG_SCHED_HRTICK
1134static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1135{
1136        hrtick_start(rq, p->dl.runtime);
1137}
1138#else /* !CONFIG_SCHED_HRTICK */
1139static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1140{
1141}
1142#endif
1143
1144static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1145                                                   struct dl_rq *dl_rq)
1146{
1147        struct rb_node *left = dl_rq->rb_leftmost;
1148
1149        if (!left)
1150                return NULL;
1151
1152        return rb_entry(left, struct sched_dl_entity, rb_node);
1153}
1154
1155struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1156{
1157        struct sched_dl_entity *dl_se;
1158        struct task_struct *p;
1159        struct dl_rq *dl_rq;
1160
1161        dl_rq = &rq->dl;
1162
1163        if (need_pull_dl_task(rq, prev)) {
1164                /*
1165                 * This is OK, because current is on_cpu, which avoids it being
1166                 * picked for load-balance and preemption/IRQs are still
1167                 * disabled avoiding further scheduler activity on it and we're
1168                 * being very careful to re-start the picking loop.
1169                 */
1170                lockdep_unpin_lock(&rq->lock);
1171                pull_dl_task(rq);
1172                lockdep_pin_lock(&rq->lock);
1173                /*
1174                 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1175                 * means a stop task can slip in, in which case we need to
1176                 * re-start task selection.
1177                 */
1178                if (rq->stop && task_on_rq_queued(rq->stop))
1179                        return RETRY_TASK;
1180        }
1181
1182        /*
1183         * When prev is DL, we may throttle it in put_prev_task().
1184         * So, we update time before we check for dl_nr_running.
1185         */
1186        if (prev->sched_class == &dl_sched_class)
1187                update_curr_dl(rq);
1188
1189        if (unlikely(!dl_rq->dl_nr_running))
1190                return NULL;
1191
1192        put_prev_task(rq, prev);
1193
1194        dl_se = pick_next_dl_entity(rq, dl_rq);
1195        BUG_ON(!dl_se);
1196
1197        p = dl_task_of(dl_se);
1198        p->se.exec_start = rq_clock_task(rq);
1199
1200        /* Running task will never be pushed. */
1201       dequeue_pushable_dl_task(rq, p);
1202
1203        if (hrtick_enabled(rq))
1204                start_hrtick_dl(rq, p);
1205
1206        queue_push_tasks(rq);
1207
1208        return p;
1209}
1210
1211static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1212{
1213        update_curr_dl(rq);
1214
1215        if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1216                enqueue_pushable_dl_task(rq, p);
1217}
1218
1219static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1220{
1221        update_curr_dl(rq);
1222
1223        /*
1224         * Even when we have runtime, update_curr_dl() might have resulted in us
1225         * not being the leftmost task anymore. In that case NEED_RESCHED will
1226         * be set and schedule() will start a new hrtick for the next task.
1227         */
1228        if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1229            is_leftmost(p, &rq->dl))
1230                start_hrtick_dl(rq, p);
1231}
1232
1233static void task_fork_dl(struct task_struct *p)
1234{
1235        /*
1236         * SCHED_DEADLINE tasks cannot fork and this is achieved through
1237         * sched_fork()
1238         */
1239}
1240
1241static void task_dead_dl(struct task_struct *p)
1242{
1243        struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1244
1245        /*
1246         * Since we are TASK_DEAD we won't slip out of the domain!
1247         */
1248        raw_spin_lock_irq(&dl_b->lock);
1249        /* XXX we should retain the bw until 0-lag */
1250        dl_b->total_bw -= p->dl.dl_bw;
1251        raw_spin_unlock_irq(&dl_b->lock);
1252}
1253
1254static void set_curr_task_dl(struct rq *rq)
1255{
1256        struct task_struct *p = rq->curr;
1257
1258        p->se.exec_start = rq_clock_task(rq);
1259
1260        /* You can't push away the running task */
1261        dequeue_pushable_dl_task(rq, p);
1262}
1263
1264#ifdef CONFIG_SMP
1265
1266/* Only try algorithms three times */
1267#define DL_MAX_TRIES 3
1268
1269static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1270{
1271        if (!task_running(rq, p) &&
1272            cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1273                return 1;
1274        return 0;
1275}
1276
1277/* Returns the second earliest -deadline task, NULL otherwise */
1278static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1279{
1280        struct rb_node *next_node = rq->dl.rb_leftmost;
1281        struct sched_dl_entity *dl_se;
1282        struct task_struct *p = NULL;
1283
1284next_node:
1285        next_node = rb_next(next_node);
1286        if (next_node) {
1287                dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1288                p = dl_task_of(dl_se);
1289
1290                if (pick_dl_task(rq, p, cpu))
1291                        return p;
1292
1293                goto next_node;
1294        }
1295
1296        return NULL;
1297}
1298
1299/*
1300 * Return the earliest pushable rq's task, which is suitable to be executed
1301 * on the CPU, NULL otherwise:
1302 */
1303static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1304{
1305        struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1306        struct task_struct *p = NULL;
1307
1308        if (!has_pushable_dl_tasks(rq))
1309                return NULL;
1310
1311next_node:
1312        if (next_node) {
1313                p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1314
1315                if (pick_dl_task(rq, p, cpu))
1316                        return p;
1317
1318                next_node = rb_next(next_node);
1319                goto next_node;
1320        }
1321
1322        return NULL;
1323}
1324
1325static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1326
1327static int find_later_rq(struct task_struct *task)
1328{
1329        struct sched_domain *sd;
1330        struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1331        int this_cpu = smp_processor_id();
1332        int best_cpu, cpu = task_cpu(task);
1333
1334        /* Make sure the mask is initialized first */
1335        if (unlikely(!later_mask))
1336                return -1;
1337
1338        if (task->nr_cpus_allowed == 1)
1339                return -1;
1340
1341        /*
1342         * We have to consider system topology and task affinity
1343         * first, then we can look for a suitable cpu.
1344         */
1345        best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1346                        task, later_mask);
1347        if (best_cpu == -1)
1348                return -1;
1349
1350        /*
1351         * If we are here, some target has been found,
1352         * the most suitable of which is cached in best_cpu.
1353         * This is, among the runqueues where the current tasks
1354         * have later deadlines than the task's one, the rq
1355         * with the latest possible one.
1356         *
1357         * Now we check how well this matches with task's
1358         * affinity and system topology.
1359         *
1360         * The last cpu where the task run is our first
1361         * guess, since it is most likely cache-hot there.
1362         */
1363        if (cpumask_test_cpu(cpu, later_mask))
1364                return cpu;
1365        /*
1366         * Check if this_cpu is to be skipped (i.e., it is
1367         * not in the mask) or not.
1368         */
1369        if (!cpumask_test_cpu(this_cpu, later_mask))
1370                this_cpu = -1;
1371
1372        rcu_read_lock();
1373        for_each_domain(cpu, sd) {
1374                if (sd->flags & SD_WAKE_AFFINE) {
1375
1376                        /*
1377                         * If possible, preempting this_cpu is
1378                         * cheaper than migrating.
1379                         */
1380                        if (this_cpu != -1 &&
1381                            cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1382                                rcu_read_unlock();
1383                                return this_cpu;
1384                        }
1385
1386                        /*
1387                         * Last chance: if best_cpu is valid and is
1388                         * in the mask, that becomes our choice.
1389                         */
1390                        if (best_cpu < nr_cpu_ids &&
1391                            cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1392                                rcu_read_unlock();
1393                                return best_cpu;
1394                        }
1395                }
1396        }
1397        rcu_read_unlock();
1398
1399        /*
1400         * At this point, all our guesses failed, we just return
1401         * 'something', and let the caller sort the things out.
1402         */
1403        if (this_cpu != -1)
1404                return this_cpu;
1405
1406        cpu = cpumask_any(later_mask);
1407        if (cpu < nr_cpu_ids)
1408                return cpu;
1409
1410        return -1;
1411}
1412
1413/* Locks the rq it finds */
1414static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1415{
1416        struct rq *later_rq = NULL;
1417        int tries;
1418        int cpu;
1419
1420        for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1421                cpu = find_later_rq(task);
1422
1423                if ((cpu == -1) || (cpu == rq->cpu))
1424                        break;
1425
1426                later_rq = cpu_rq(cpu);
1427
1428                if (later_rq->dl.dl_nr_running &&
1429                    !dl_time_before(task->dl.deadline,
1430                                        later_rq->dl.earliest_dl.curr)) {
1431                        /*
1432                         * Target rq has tasks of equal or earlier deadline,
1433                         * retrying does not release any lock and is unlikely
1434                         * to yield a different result.
1435                         */
1436                        later_rq = NULL;
1437                        break;
1438                }
1439
1440                /* Retry if something changed. */
1441                if (double_lock_balance(rq, later_rq)) {
1442                        if (unlikely(task_rq(task) != rq ||
1443                                     !cpumask_test_cpu(later_rq->cpu,
1444                                                       &task->cpus_allowed) ||
1445                                     task_running(rq, task) ||
1446                                     !task_on_rq_queued(task))) {
1447                                double_unlock_balance(rq, later_rq);
1448                                later_rq = NULL;
1449                                break;
1450                        }
1451                }
1452
1453                /*
1454                 * If the rq we found has no -deadline task, or
1455                 * its earliest one has a later deadline than our
1456                 * task, the rq is a good one.
1457                 */
1458                if (!later_rq->dl.dl_nr_running ||
1459                    dl_time_before(task->dl.deadline,
1460                                   later_rq->dl.earliest_dl.curr))
1461                        break;
1462
1463                /* Otherwise we try again. */
1464                double_unlock_balance(rq, later_rq);
1465                later_rq = NULL;
1466        }
1467
1468        return later_rq;
1469}
1470
1471static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1472{
1473        struct task_struct *p;
1474
1475        if (!has_pushable_dl_tasks(rq))
1476                return NULL;
1477
1478        p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1479                     struct task_struct, pushable_dl_tasks);
1480
1481        BUG_ON(rq->cpu != task_cpu(p));
1482        BUG_ON(task_current(rq, p));
1483        BUG_ON(p->nr_cpus_allowed <= 1);
1484
1485        BUG_ON(!task_on_rq_queued(p));
1486        BUG_ON(!dl_task(p));
1487
1488        return p;
1489}
1490
1491/*
1492 * See if the non running -deadline tasks on this rq
1493 * can be sent to some other CPU where they can preempt
1494 * and start executing.
1495 */
1496static int push_dl_task(struct rq *rq)
1497{
1498        struct task_struct *next_task;
1499        struct rq *later_rq;
1500        int ret = 0;
1501
1502        if (!rq->dl.overloaded)
1503                return 0;
1504
1505        next_task = pick_next_pushable_dl_task(rq);
1506        if (!next_task)
1507                return 0;
1508
1509retry:
1510        if (unlikely(next_task == rq->curr)) {
1511                WARN_ON(1);
1512                return 0;
1513        }
1514
1515        /*
1516         * If next_task preempts rq->curr, and rq->curr
1517         * can move away, it makes sense to just reschedule
1518         * without going further in pushing next_task.
1519         */
1520        if (dl_task(rq->curr) &&
1521            dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1522            rq->curr->nr_cpus_allowed > 1) {
1523                resched_curr(rq);
1524                return 0;
1525        }
1526
1527        /* We might release rq lock */
1528        get_task_struct(next_task);
1529
1530        /* Will lock the rq it'll find */
1531        later_rq = find_lock_later_rq(next_task, rq);
1532        if (!later_rq) {
1533                struct task_struct *task;
1534
1535                /*
1536                 * We must check all this again, since
1537                 * find_lock_later_rq releases rq->lock and it is
1538                 * then possible that next_task has migrated.
1539                 */
1540                task = pick_next_pushable_dl_task(rq);
1541                if (task_cpu(next_task) == rq->cpu && task == next_task) {
1542                        /*
1543                         * The task is still there. We don't try
1544                         * again, some other cpu will pull it when ready.
1545                         */
1546                        goto out;
1547                }
1548
1549                if (!task)
1550                        /* No more tasks */
1551                        goto out;
1552
1553                put_task_struct(next_task);
1554                next_task = task;
1555                goto retry;
1556        }
1557
1558        deactivate_task(rq, next_task, 0);
1559        set_task_cpu(next_task, later_rq->cpu);
1560        activate_task(later_rq, next_task, 0);
1561        ret = 1;
1562
1563        resched_curr(later_rq);
1564
1565        double_unlock_balance(rq, later_rq);
1566
1567out:
1568        put_task_struct(next_task);
1569
1570        return ret;
1571}
1572
1573static void push_dl_tasks(struct rq *rq)
1574{
1575        /* push_dl_task() will return true if it moved a -deadline task */
1576        while (push_dl_task(rq))
1577                ;
1578}
1579
1580static void pull_dl_task(struct rq *this_rq)
1581{
1582        int this_cpu = this_rq->cpu, cpu;
1583        struct task_struct *p;
1584        bool resched = false;
1585        struct rq *src_rq;
1586        u64 dmin = LONG_MAX;
1587
1588        if (likely(!dl_overloaded(this_rq)))
1589                return;
1590
1591        /*
1592         * Match the barrier from dl_set_overloaded; this guarantees that if we
1593         * see overloaded we must also see the dlo_mask bit.
1594         */
1595        smp_rmb();
1596
1597        for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1598                if (this_cpu == cpu)
1599                        continue;
1600
1601                src_rq = cpu_rq(cpu);
1602
1603                /*
1604                 * It looks racy, abd it is! However, as in sched_rt.c,
1605                 * we are fine with this.
1606                 */
1607                if (this_rq->dl.dl_nr_running &&
1608                    dl_time_before(this_rq->dl.earliest_dl.curr,
1609                                   src_rq->dl.earliest_dl.next))
1610                        continue;
1611
1612                /* Might drop this_rq->lock */
1613                double_lock_balance(this_rq, src_rq);
1614
1615                /*
1616                 * If there are no more pullable tasks on the
1617                 * rq, we're done with it.
1618                 */
1619                if (src_rq->dl.dl_nr_running <= 1)
1620                        goto skip;
1621
1622                p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1623
1624                /*
1625                 * We found a task to be pulled if:
1626                 *  - it preempts our current (if there's one),
1627                 *  - it will preempt the last one we pulled (if any).
1628                 */
1629                if (p && dl_time_before(p->dl.deadline, dmin) &&
1630                    (!this_rq->dl.dl_nr_running ||
1631                     dl_time_before(p->dl.deadline,
1632                                    this_rq->dl.earliest_dl.curr))) {
1633                        WARN_ON(p == src_rq->curr);
1634                        WARN_ON(!task_on_rq_queued(p));
1635
1636                        /*
1637                         * Then we pull iff p has actually an earlier
1638                         * deadline than the current task of its runqueue.
1639                         */
1640                        if (dl_time_before(p->dl.deadline,
1641                                           src_rq->curr->dl.deadline))
1642                                goto skip;
1643
1644                        resched = true;
1645
1646                        deactivate_task(src_rq, p, 0);
1647                        set_task_cpu(p, this_cpu);
1648                        activate_task(this_rq, p, 0);
1649                        dmin = p->dl.deadline;
1650
1651                        /* Is there any other task even earlier? */
1652                }
1653skip:
1654                double_unlock_balance(this_rq, src_rq);
1655        }
1656
1657        if (resched)
1658                resched_curr(this_rq);
1659}
1660
1661/*
1662 * Since the task is not running and a reschedule is not going to happen
1663 * anytime soon on its runqueue, we try pushing it away now.
1664 */
1665static void task_woken_dl(struct rq *rq, struct task_struct *p)
1666{
1667        if (!task_running(rq, p) &&
1668            !test_tsk_need_resched(rq->curr) &&
1669            p->nr_cpus_allowed > 1 &&
1670            dl_task(rq->curr) &&
1671            (rq->curr->nr_cpus_allowed < 2 ||
1672             !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1673                push_dl_tasks(rq);
1674        }
1675}
1676
1677static void set_cpus_allowed_dl(struct task_struct *p,
1678                                const struct cpumask *new_mask)
1679{
1680        struct root_domain *src_rd;
1681        struct rq *rq;
1682
1683        BUG_ON(!dl_task(p));
1684
1685        rq = task_rq(p);
1686        src_rd = rq->rd;
1687        /*
1688         * Migrating a SCHED_DEADLINE task between exclusive
1689         * cpusets (different root_domains) entails a bandwidth
1690         * update. We already made space for us in the destination
1691         * domain (see cpuset_can_attach()).
1692         */
1693        if (!cpumask_intersects(src_rd->span, new_mask)) {
1694                struct dl_bw *src_dl_b;
1695
1696                src_dl_b = dl_bw_of(cpu_of(rq));
1697                /*
1698                 * We now free resources of the root_domain we are migrating
1699                 * off. In the worst case, sched_setattr() may temporary fail
1700                 * until we complete the update.
1701                 */
1702                raw_spin_lock(&src_dl_b->lock);
1703                __dl_clear(src_dl_b, p->dl.dl_bw);
1704                raw_spin_unlock(&src_dl_b->lock);
1705        }
1706
1707        set_cpus_allowed_common(p, new_mask);
1708}
1709
1710/* Assumes rq->lock is held */
1711static void rq_online_dl(struct rq *rq)
1712{
1713        if (rq->dl.overloaded)
1714                dl_set_overload(rq);
1715
1716        cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1717        if (rq->dl.dl_nr_running > 0)
1718                cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1719}
1720
1721/* Assumes rq->lock is held */
1722static void rq_offline_dl(struct rq *rq)
1723{
1724        if (rq->dl.overloaded)
1725                dl_clear_overload(rq);
1726
1727        cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1728        cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1729}
1730
1731void __init init_sched_dl_class(void)
1732{
1733        unsigned int i;
1734
1735        for_each_possible_cpu(i)
1736                zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1737                                        GFP_KERNEL, cpu_to_node(i));
1738}
1739
1740#endif /* CONFIG_SMP */
1741
1742static void switched_from_dl(struct rq *rq, struct task_struct *p)
1743{
1744        /*
1745         * Start the deadline timer; if we switch back to dl before this we'll
1746         * continue consuming our current CBS slice. If we stay outside of
1747         * SCHED_DEADLINE until the deadline passes, the timer will reset the
1748         * task.
1749         */
1750        if (!start_dl_timer(p))
1751                __dl_clear_params(p);
1752
1753        /*
1754         * Since this might be the only -deadline task on the rq,
1755         * this is the right place to try to pull some other one
1756         * from an overloaded cpu, if any.
1757         */
1758        if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1759                return;
1760
1761        queue_pull_task(rq);
1762}
1763
1764/*
1765 * When switching to -deadline, we may overload the rq, then
1766 * we try to push someone off, if possible.
1767 */
1768static void switched_to_dl(struct rq *rq, struct task_struct *p)
1769{
1770        if (task_on_rq_queued(p) && rq->curr != p) {
1771#ifdef CONFIG_SMP
1772                if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
1773                        queue_push_tasks(rq);
1774#else
1775                if (dl_task(rq->curr))
1776                        check_preempt_curr_dl(rq, p, 0);
1777                else
1778                        resched_curr(rq);
1779#endif
1780        }
1781}
1782
1783/*
1784 * If the scheduling parameters of a -deadline task changed,
1785 * a push or pull operation might be needed.
1786 */
1787static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1788                            int oldprio)
1789{
1790        if (task_on_rq_queued(p) || rq->curr == p) {
1791#ifdef CONFIG_SMP
1792                /*
1793                 * This might be too much, but unfortunately
1794                 * we don't have the old deadline value, and
1795                 * we can't argue if the task is increasing
1796                 * or lowering its prio, so...
1797                 */
1798                if (!rq->dl.overloaded)
1799                        queue_pull_task(rq);
1800
1801                /*
1802                 * If we now have a earlier deadline task than p,
1803                 * then reschedule, provided p is still on this
1804                 * runqueue.
1805                 */
1806                if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1807                        resched_curr(rq);
1808#else
1809                /*
1810                 * Again, we don't know if p has a earlier
1811                 * or later deadline, so let's blindly set a
1812                 * (maybe not needed) rescheduling point.
1813                 */
1814                resched_curr(rq);
1815#endif /* CONFIG_SMP */
1816        } else
1817                switched_to_dl(rq, p);
1818}
1819
1820const struct sched_class dl_sched_class = {
1821        .next                   = &rt_sched_class,
1822        .enqueue_task           = enqueue_task_dl,
1823        .dequeue_task           = dequeue_task_dl,
1824        .yield_task             = yield_task_dl,
1825
1826        .check_preempt_curr     = check_preempt_curr_dl,
1827
1828        .pick_next_task         = pick_next_task_dl,
1829        .put_prev_task          = put_prev_task_dl,
1830
1831#ifdef CONFIG_SMP
1832        .select_task_rq         = select_task_rq_dl,
1833        .set_cpus_allowed       = set_cpus_allowed_dl,
1834        .rq_online              = rq_online_dl,
1835        .rq_offline             = rq_offline_dl,
1836        .task_woken             = task_woken_dl,
1837#endif
1838
1839        .set_curr_task          = set_curr_task_dl,
1840        .task_tick              = task_tick_dl,
1841        .task_fork              = task_fork_dl,
1842        .task_dead              = task_dead_dl,
1843
1844        .prio_changed           = prio_changed_dl,
1845        .switched_from          = switched_from_dl,
1846        .switched_to            = switched_to_dl,
1847
1848        .update_curr            = update_curr_dl,
1849};
1850
1851#ifdef CONFIG_SCHED_DEBUG
1852extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1853
1854void print_dl_stats(struct seq_file *m, int cpu)
1855{
1856        print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1857}
1858#endif /* CONFIG_SCHED_DEBUG */
1859