linux/kernel/sched/deadline.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Deadline Scheduling Class (SCHED_DEADLINE)
   4 *
   5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   6 *
   7 * Tasks that periodically executes their instances for less than their
   8 * runtime won't miss any of their deadlines.
   9 * Tasks that are not periodic or sporadic or that tries to execute more
  10 * than their reserved bandwidth will be slowed down (and may potentially
  11 * miss some of their deadlines), and won't affect any other task.
  12 *
  13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  14 *                    Juri Lelli <juri.lelli@gmail.com>,
  15 *                    Michael Trimarchi <michael@amarulasolutions.com>,
  16 *                    Fabio Checconi <fchecconi@gmail.com>
  17 */
  18#include "sched.h"
  19
  20struct dl_bandwidth def_dl_bandwidth;
  21
  22static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  23{
  24        return container_of(dl_se, struct task_struct, dl);
  25}
  26
  27static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  28{
  29        return container_of(dl_rq, struct rq, dl);
  30}
  31
  32static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  33{
  34        struct task_struct *p = dl_task_of(dl_se);
  35        struct rq *rq = task_rq(p);
  36
  37        return &rq->dl;
  38}
  39
  40static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  41{
  42        return !RB_EMPTY_NODE(&dl_se->rb_node);
  43}
  44
  45#ifdef CONFIG_SMP
  46static inline struct dl_bw *dl_bw_of(int i)
  47{
  48        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  49                         "sched RCU must be held");
  50        return &cpu_rq(i)->rd->dl_bw;
  51}
  52
  53static inline int dl_bw_cpus(int i)
  54{
  55        struct root_domain *rd = cpu_rq(i)->rd;
  56        int cpus = 0;
  57
  58        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  59                         "sched RCU must be held");
  60        for_each_cpu_and(i, rd->span, cpu_active_mask)
  61                cpus++;
  62
  63        return cpus;
  64}
  65#else
  66static inline struct dl_bw *dl_bw_of(int i)
  67{
  68        return &cpu_rq(i)->dl.dl_bw;
  69}
  70
  71static inline int dl_bw_cpus(int i)
  72{
  73        return 1;
  74}
  75#endif
  76
  77static inline
  78void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
  79{
  80        u64 old = dl_rq->running_bw;
  81
  82        lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
  83        dl_rq->running_bw += dl_bw;
  84        SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
  85        SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
  86        /* kick cpufreq (see the comment in kernel/sched/sched.h). */
  87        cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
  88}
  89
  90static inline
  91void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
  92{
  93        u64 old = dl_rq->running_bw;
  94
  95        lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
  96        dl_rq->running_bw -= dl_bw;
  97        SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
  98        if (dl_rq->running_bw > old)
  99                dl_rq->running_bw = 0;
 100        /* kick cpufreq (see the comment in kernel/sched/sched.h). */
 101        cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
 102}
 103
 104static inline
 105void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 106{
 107        u64 old = dl_rq->this_bw;
 108
 109        lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
 110        dl_rq->this_bw += dl_bw;
 111        SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
 112}
 113
 114static inline
 115void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 116{
 117        u64 old = dl_rq->this_bw;
 118
 119        lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
 120        dl_rq->this_bw -= dl_bw;
 121        SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
 122        if (dl_rq->this_bw > old)
 123                dl_rq->this_bw = 0;
 124        SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
 125}
 126
 127static inline
 128void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 129{
 130        if (!dl_entity_is_special(dl_se))
 131                __add_rq_bw(dl_se->dl_bw, dl_rq);
 132}
 133
 134static inline
 135void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 136{
 137        if (!dl_entity_is_special(dl_se))
 138                __sub_rq_bw(dl_se->dl_bw, dl_rq);
 139}
 140
 141static inline
 142void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 143{
 144        if (!dl_entity_is_special(dl_se))
 145                __add_running_bw(dl_se->dl_bw, dl_rq);
 146}
 147
 148static inline
 149void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 150{
 151        if (!dl_entity_is_special(dl_se))
 152                __sub_running_bw(dl_se->dl_bw, dl_rq);
 153}
 154
 155void dl_change_utilization(struct task_struct *p, u64 new_bw)
 156{
 157        struct rq *rq;
 158
 159        BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
 160
 161        if (task_on_rq_queued(p))
 162                return;
 163
 164        rq = task_rq(p);
 165        if (p->dl.dl_non_contending) {
 166                sub_running_bw(&p->dl, &rq->dl);
 167                p->dl.dl_non_contending = 0;
 168                /*
 169                 * If the timer handler is currently running and the
 170                 * timer cannot be cancelled, inactive_task_timer()
 171                 * will see that dl_not_contending is not set, and
 172                 * will not touch the rq's active utilization,
 173                 * so we are still safe.
 174                 */
 175                if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
 176                        put_task_struct(p);
 177        }
 178        __sub_rq_bw(p->dl.dl_bw, &rq->dl);
 179        __add_rq_bw(new_bw, &rq->dl);
 180}
 181
 182/*
 183 * The utilization of a task cannot be immediately removed from
 184 * the rq active utilization (running_bw) when the task blocks.
 185 * Instead, we have to wait for the so called "0-lag time".
 186 *
 187 * If a task blocks before the "0-lag time", a timer (the inactive
 188 * timer) is armed, and running_bw is decreased when the timer
 189 * fires.
 190 *
 191 * If the task wakes up again before the inactive timer fires,
 192 * the timer is cancelled, whereas if the task wakes up after the
 193 * inactive timer fired (and running_bw has been decreased) the
 194 * task's utilization has to be added to running_bw again.
 195 * A flag in the deadline scheduling entity (dl_non_contending)
 196 * is used to avoid race conditions between the inactive timer handler
 197 * and task wakeups.
 198 *
 199 * The following diagram shows how running_bw is updated. A task is
 200 * "ACTIVE" when its utilization contributes to running_bw; an
 201 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
 202 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
 203 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
 204 * time already passed, which does not contribute to running_bw anymore.
 205 *                              +------------------+
 206 *             wakeup           |    ACTIVE        |
 207 *          +------------------>+   contending     |
 208 *          | add_running_bw    |                  |
 209 *          |                   +----+------+------+
 210 *          |                        |      ^
 211 *          |                dequeue |      |
 212 * +--------+-------+                |      |
 213 * |                |   t >= 0-lag   |      | wakeup
 214 * |    INACTIVE    |<---------------+      |
 215 * |                | sub_running_bw |      |
 216 * +--------+-------+                |      |
 217 *          ^                        |      |
 218 *          |              t < 0-lag |      |
 219 *          |                        |      |
 220 *          |                        V      |
 221 *          |                   +----+------+------+
 222 *          | sub_running_bw    |    ACTIVE        |
 223 *          +-------------------+                  |
 224 *            inactive timer    |  non contending  |
 225 *            fired             +------------------+
 226 *
 227 * The task_non_contending() function is invoked when a task
 228 * blocks, and checks if the 0-lag time already passed or
 229 * not (in the first case, it directly updates running_bw;
 230 * in the second case, it arms the inactive timer).
 231 *
 232 * The task_contending() function is invoked when a task wakes
 233 * up, and checks if the task is still in the "ACTIVE non contending"
 234 * state or not (in the second case, it updates running_bw).
 235 */
 236static void task_non_contending(struct task_struct *p)
 237{
 238        struct sched_dl_entity *dl_se = &p->dl;
 239        struct hrtimer *timer = &dl_se->inactive_timer;
 240        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 241        struct rq *rq = rq_of_dl_rq(dl_rq);
 242        s64 zerolag_time;
 243
 244        /*
 245         * If this is a non-deadline task that has been boosted,
 246         * do nothing
 247         */
 248        if (dl_se->dl_runtime == 0)
 249                return;
 250
 251        if (dl_entity_is_special(dl_se))
 252                return;
 253
 254        WARN_ON(hrtimer_active(&dl_se->inactive_timer));
 255        WARN_ON(dl_se->dl_non_contending);
 256
 257        zerolag_time = dl_se->deadline -
 258                 div64_long((dl_se->runtime * dl_se->dl_period),
 259                        dl_se->dl_runtime);
 260
 261        /*
 262         * Using relative times instead of the absolute "0-lag time"
 263         * allows to simplify the code
 264         */
 265        zerolag_time -= rq_clock(rq);
 266
 267        /*
 268         * If the "0-lag time" already passed, decrease the active
 269         * utilization now, instead of starting a timer
 270         */
 271        if (zerolag_time < 0) {
 272                if (dl_task(p))
 273                        sub_running_bw(dl_se, dl_rq);
 274                if (!dl_task(p) || p->state == TASK_DEAD) {
 275                        struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
 276
 277                        if (p->state == TASK_DEAD)
 278                                sub_rq_bw(&p->dl, &rq->dl);
 279                        raw_spin_lock(&dl_b->lock);
 280                        __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
 281                        __dl_clear_params(p);
 282                        raw_spin_unlock(&dl_b->lock);
 283                }
 284
 285                return;
 286        }
 287
 288        dl_se->dl_non_contending = 1;
 289        get_task_struct(p);
 290        hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
 291}
 292
 293static void task_contending(struct sched_dl_entity *dl_se, int flags)
 294{
 295        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 296
 297        /*
 298         * If this is a non-deadline task that has been boosted,
 299         * do nothing
 300         */
 301        if (dl_se->dl_runtime == 0)
 302                return;
 303
 304        if (flags & ENQUEUE_MIGRATED)
 305                add_rq_bw(dl_se, dl_rq);
 306
 307        if (dl_se->dl_non_contending) {
 308                dl_se->dl_non_contending = 0;
 309                /*
 310                 * If the timer handler is currently running and the
 311                 * timer cannot be cancelled, inactive_task_timer()
 312                 * will see that dl_not_contending is not set, and
 313                 * will not touch the rq's active utilization,
 314                 * so we are still safe.
 315                 */
 316                if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
 317                        put_task_struct(dl_task_of(dl_se));
 318        } else {
 319                /*
 320                 * Since "dl_non_contending" is not set, the
 321                 * task's utilization has already been removed from
 322                 * active utilization (either when the task blocked,
 323                 * when the "inactive timer" fired).
 324                 * So, add it back.
 325                 */
 326                add_running_bw(dl_se, dl_rq);
 327        }
 328}
 329
 330static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 331{
 332        struct sched_dl_entity *dl_se = &p->dl;
 333
 334        return dl_rq->root.rb_leftmost == &dl_se->rb_node;
 335}
 336
 337void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
 338{
 339        raw_spin_lock_init(&dl_b->dl_runtime_lock);
 340        dl_b->dl_period = period;
 341        dl_b->dl_runtime = runtime;
 342}
 343
 344void init_dl_bw(struct dl_bw *dl_b)
 345{
 346        raw_spin_lock_init(&dl_b->lock);
 347        raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
 348        if (global_rt_runtime() == RUNTIME_INF)
 349                dl_b->bw = -1;
 350        else
 351                dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
 352        raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
 353        dl_b->total_bw = 0;
 354}
 355
 356void init_dl_rq(struct dl_rq *dl_rq)
 357{
 358        dl_rq->root = RB_ROOT_CACHED;
 359
 360#ifdef CONFIG_SMP
 361        /* zero means no -deadline tasks */
 362        dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
 363
 364        dl_rq->dl_nr_migratory = 0;
 365        dl_rq->overloaded = 0;
 366        dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
 367#else
 368        init_dl_bw(&dl_rq->dl_bw);
 369#endif
 370
 371        dl_rq->running_bw = 0;
 372        dl_rq->this_bw = 0;
 373        init_dl_rq_bw_ratio(dl_rq);
 374}
 375
 376#ifdef CONFIG_SMP
 377
 378static inline int dl_overloaded(struct rq *rq)
 379{
 380        return atomic_read(&rq->rd->dlo_count);
 381}
 382
 383static inline void dl_set_overload(struct rq *rq)
 384{
 385        if (!rq->online)
 386                return;
 387
 388        cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 389        /*
 390         * Must be visible before the overload count is
 391         * set (as in sched_rt.c).
 392         *
 393         * Matched by the barrier in pull_dl_task().
 394         */
 395        smp_wmb();
 396        atomic_inc(&rq->rd->dlo_count);
 397}
 398
 399static inline void dl_clear_overload(struct rq *rq)
 400{
 401        if (!rq->online)
 402                return;
 403
 404        atomic_dec(&rq->rd->dlo_count);
 405        cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 406}
 407
 408static void update_dl_migration(struct dl_rq *dl_rq)
 409{
 410        if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 411                if (!dl_rq->overloaded) {
 412                        dl_set_overload(rq_of_dl_rq(dl_rq));
 413                        dl_rq->overloaded = 1;
 414                }
 415        } else if (dl_rq->overloaded) {
 416                dl_clear_overload(rq_of_dl_rq(dl_rq));
 417                dl_rq->overloaded = 0;
 418        }
 419}
 420
 421static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 422{
 423        struct task_struct *p = dl_task_of(dl_se);
 424
 425        if (p->nr_cpus_allowed > 1)
 426                dl_rq->dl_nr_migratory++;
 427
 428        update_dl_migration(dl_rq);
 429}
 430
 431static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 432{
 433        struct task_struct *p = dl_task_of(dl_se);
 434
 435        if (p->nr_cpus_allowed > 1)
 436                dl_rq->dl_nr_migratory--;
 437
 438        update_dl_migration(dl_rq);
 439}
 440
 441/*
 442 * The list of pushable -deadline task is not a plist, like in
 443 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 444 */
 445static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 446{
 447        struct dl_rq *dl_rq = &rq->dl;
 448        struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
 449        struct rb_node *parent = NULL;
 450        struct task_struct *entry;
 451        bool leftmost = true;
 452
 453        BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 454
 455        while (*link) {
 456                parent = *link;
 457                entry = rb_entry(parent, struct task_struct,
 458                                 pushable_dl_tasks);
 459                if (dl_entity_preempt(&p->dl, &entry->dl))
 460                        link = &parent->rb_left;
 461                else {
 462                        link = &parent->rb_right;
 463                        leftmost = false;
 464                }
 465        }
 466
 467        if (leftmost)
 468                dl_rq->earliest_dl.next = p->dl.deadline;
 469
 470        rb_link_node(&p->pushable_dl_tasks, parent, link);
 471        rb_insert_color_cached(&p->pushable_dl_tasks,
 472                               &dl_rq->pushable_dl_tasks_root, leftmost);
 473}
 474
 475static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 476{
 477        struct dl_rq *dl_rq = &rq->dl;
 478
 479        if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 480                return;
 481
 482        if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
 483                struct rb_node *next_node;
 484
 485                next_node = rb_next(&p->pushable_dl_tasks);
 486                if (next_node) {
 487                        dl_rq->earliest_dl.next = rb_entry(next_node,
 488                                struct task_struct, pushable_dl_tasks)->dl.deadline;
 489                }
 490        }
 491
 492        rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 493        RB_CLEAR_NODE(&p->pushable_dl_tasks);
 494}
 495
 496static inline int has_pushable_dl_tasks(struct rq *rq)
 497{
 498        return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
 499}
 500
 501static int push_dl_task(struct rq *rq);
 502
 503static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 504{
 505        return dl_task(prev);
 506}
 507
 508static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 509static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 510
 511static void push_dl_tasks(struct rq *);
 512static void pull_dl_task(struct rq *);
 513
 514static inline void deadline_queue_push_tasks(struct rq *rq)
 515{
 516        if (!has_pushable_dl_tasks(rq))
 517                return;
 518
 519        queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 520}
 521
 522static inline void deadline_queue_pull_task(struct rq *rq)
 523{
 524        queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 525}
 526
 527static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 528
 529static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 530{
 531        struct rq *later_rq = NULL;
 532
 533        later_rq = find_lock_later_rq(p, rq);
 534        if (!later_rq) {
 535                int cpu;
 536
 537                /*
 538                 * If we cannot preempt any rq, fall back to pick any
 539                 * online CPU:
 540                 */
 541                cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
 542                if (cpu >= nr_cpu_ids) {
 543                        /*
 544                         * Failed to find any suitable CPU.
 545                         * The task will never come back!
 546                         */
 547                        BUG_ON(dl_bandwidth_enabled());
 548
 549                        /*
 550                         * If admission control is disabled we
 551                         * try a little harder to let the task
 552                         * run.
 553                         */
 554                        cpu = cpumask_any(cpu_active_mask);
 555                }
 556                later_rq = cpu_rq(cpu);
 557                double_lock_balance(rq, later_rq);
 558        }
 559
 560        set_task_cpu(p, later_rq->cpu);
 561        double_unlock_balance(later_rq, rq);
 562
 563        return later_rq;
 564}
 565
 566#else
 567
 568static inline
 569void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 570{
 571}
 572
 573static inline
 574void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 575{
 576}
 577
 578static inline
 579void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 580{
 581}
 582
 583static inline
 584void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 585{
 586}
 587
 588static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 589{
 590        return false;
 591}
 592
 593static inline void pull_dl_task(struct rq *rq)
 594{
 595}
 596
 597static inline void deadline_queue_push_tasks(struct rq *rq)
 598{
 599}
 600
 601static inline void deadline_queue_pull_task(struct rq *rq)
 602{
 603}
 604#endif /* CONFIG_SMP */
 605
 606static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 607static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 608static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
 609
 610/*
 611 * We are being explicitly informed that a new instance is starting,
 612 * and this means that:
 613 *  - the absolute deadline of the entity has to be placed at
 614 *    current time + relative deadline;
 615 *  - the runtime of the entity has to be set to the maximum value.
 616 *
 617 * The capability of specifying such event is useful whenever a -deadline
 618 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 619 * one, and to (try to!) reconcile itself with its own scheduling
 620 * parameters.
 621 */
 622static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
 623{
 624        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 625        struct rq *rq = rq_of_dl_rq(dl_rq);
 626
 627        WARN_ON(dl_se->dl_boosted);
 628        WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
 629
 630        /*
 631         * We are racing with the deadline timer. So, do nothing because
 632         * the deadline timer handler will take care of properly recharging
 633         * the runtime and postponing the deadline
 634         */
 635        if (dl_se->dl_throttled)
 636                return;
 637
 638        /*
 639         * We use the regular wall clock time to set deadlines in the
 640         * future; in fact, we must consider execution overheads (time
 641         * spent on hardirq context, etc.).
 642         */
 643        dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
 644        dl_se->runtime = dl_se->dl_runtime;
 645}
 646
 647/*
 648 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 649 * possibility of a entity lasting more than what it declared, and thus
 650 * exhausting its runtime.
 651 *
 652 * Here we are interested in making runtime overrun possible, but we do
 653 * not want a entity which is misbehaving to affect the scheduling of all
 654 * other entities.
 655 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 656 * is used, in order to confine each entity within its own bandwidth.
 657 *
 658 * This function deals exactly with that, and ensures that when the runtime
 659 * of a entity is replenished, its deadline is also postponed. That ensures
 660 * the overrunning entity can't interfere with other entity in the system and
 661 * can't make them miss their deadlines. Reasons why this kind of overruns
 662 * could happen are, typically, a entity voluntarily trying to overcome its
 663 * runtime, or it just underestimated it during sched_setattr().
 664 */
 665static void replenish_dl_entity(struct sched_dl_entity *dl_se,
 666                                struct sched_dl_entity *pi_se)
 667{
 668        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 669        struct rq *rq = rq_of_dl_rq(dl_rq);
 670
 671        BUG_ON(pi_se->dl_runtime <= 0);
 672
 673        /*
 674         * This could be the case for a !-dl task that is boosted.
 675         * Just go with full inherited parameters.
 676         */
 677        if (dl_se->dl_deadline == 0) {
 678                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 679                dl_se->runtime = pi_se->dl_runtime;
 680        }
 681
 682        if (dl_se->dl_yielded && dl_se->runtime > 0)
 683                dl_se->runtime = 0;
 684
 685        /*
 686         * We keep moving the deadline away until we get some
 687         * available runtime for the entity. This ensures correct
 688         * handling of situations where the runtime overrun is
 689         * arbitrary large.
 690         */
 691        while (dl_se->runtime <= 0) {
 692                dl_se->deadline += pi_se->dl_period;
 693                dl_se->runtime += pi_se->dl_runtime;
 694        }
 695
 696        /*
 697         * At this point, the deadline really should be "in
 698         * the future" with respect to rq->clock. If it's
 699         * not, we are, for some reason, lagging too much!
 700         * Anyway, after having warn userspace abut that,
 701         * we still try to keep the things running by
 702         * resetting the deadline and the budget of the
 703         * entity.
 704         */
 705        if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 706                printk_deferred_once("sched: DL replenish lagged too much\n");
 707                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 708                dl_se->runtime = pi_se->dl_runtime;
 709        }
 710
 711        if (dl_se->dl_yielded)
 712                dl_se->dl_yielded = 0;
 713        if (dl_se->dl_throttled)
 714                dl_se->dl_throttled = 0;
 715}
 716
 717/*
 718 * Here we check if --at time t-- an entity (which is probably being
 719 * [re]activated or, in general, enqueued) can use its remaining runtime
 720 * and its current deadline _without_ exceeding the bandwidth it is
 721 * assigned (function returns true if it can't). We are in fact applying
 722 * one of the CBS rules: when a task wakes up, if the residual runtime
 723 * over residual deadline fits within the allocated bandwidth, then we
 724 * can keep the current (absolute) deadline and residual budget without
 725 * disrupting the schedulability of the system. Otherwise, we should
 726 * refill the runtime and set the deadline a period in the future,
 727 * because keeping the current (absolute) deadline of the task would
 728 * result in breaking guarantees promised to other tasks (refer to
 729 * Documentation/scheduler/sched-deadline.txt for more informations).
 730 *
 731 * This function returns true if:
 732 *
 733 *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
 734 *
 735 * IOW we can't recycle current parameters.
 736 *
 737 * Notice that the bandwidth check is done against the deadline. For
 738 * task with deadline equal to period this is the same of using
 739 * dl_period instead of dl_deadline in the equation above.
 740 */
 741static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 742                               struct sched_dl_entity *pi_se, u64 t)
 743{
 744        u64 left, right;
 745
 746        /*
 747         * left and right are the two sides of the equation above,
 748         * after a bit of shuffling to use multiplications instead
 749         * of divisions.
 750         *
 751         * Note that none of the time values involved in the two
 752         * multiplications are absolute: dl_deadline and dl_runtime
 753         * are the relative deadline and the maximum runtime of each
 754         * instance, runtime is the runtime left for the last instance
 755         * and (deadline - t), since t is rq->clock, is the time left
 756         * to the (absolute) deadline. Even if overflowing the u64 type
 757         * is very unlikely to occur in both cases, here we scale down
 758         * as we want to avoid that risk at all. Scaling down by 10
 759         * means that we reduce granularity to 1us. We are fine with it,
 760         * since this is only a true/false check and, anyway, thinking
 761         * of anything below microseconds resolution is actually fiction
 762         * (but still we want to give the user that illusion >;).
 763         */
 764        left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 765        right = ((dl_se->deadline - t) >> DL_SCALE) *
 766                (pi_se->dl_runtime >> DL_SCALE);
 767
 768        return dl_time_before(right, left);
 769}
 770
 771/*
 772 * Revised wakeup rule [1]: For self-suspending tasks, rather then
 773 * re-initializing task's runtime and deadline, the revised wakeup
 774 * rule adjusts the task's runtime to avoid the task to overrun its
 775 * density.
 776 *
 777 * Reasoning: a task may overrun the density if:
 778 *    runtime / (deadline - t) > dl_runtime / dl_deadline
 779 *
 780 * Therefore, runtime can be adjusted to:
 781 *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
 782 *
 783 * In such way that runtime will be equal to the maximum density
 784 * the task can use without breaking any rule.
 785 *
 786 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
 787 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
 788 */
 789static void
 790update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
 791{
 792        u64 laxity = dl_se->deadline - rq_clock(rq);
 793
 794        /*
 795         * If the task has deadline < period, and the deadline is in the past,
 796         * it should already be throttled before this check.
 797         *
 798         * See update_dl_entity() comments for further details.
 799         */
 800        WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
 801
 802        dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
 803}
 804
 805/*
 806 * Regarding the deadline, a task with implicit deadline has a relative
 807 * deadline == relative period. A task with constrained deadline has a
 808 * relative deadline <= relative period.
 809 *
 810 * We support constrained deadline tasks. However, there are some restrictions
 811 * applied only for tasks which do not have an implicit deadline. See
 812 * update_dl_entity() to know more about such restrictions.
 813 *
 814 * The dl_is_implicit() returns true if the task has an implicit deadline.
 815 */
 816static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
 817{
 818        return dl_se->dl_deadline == dl_se->dl_period;
 819}
 820
 821/*
 822 * When a deadline entity is placed in the runqueue, its runtime and deadline
 823 * might need to be updated. This is done by a CBS wake up rule. There are two
 824 * different rules: 1) the original CBS; and 2) the Revisited CBS.
 825 *
 826 * When the task is starting a new period, the Original CBS is used. In this
 827 * case, the runtime is replenished and a new absolute deadline is set.
 828 *
 829 * When a task is queued before the begin of the next period, using the
 830 * remaining runtime and deadline could make the entity to overflow, see
 831 * dl_entity_overflow() to find more about runtime overflow. When such case
 832 * is detected, the runtime and deadline need to be updated.
 833 *
 834 * If the task has an implicit deadline, i.e., deadline == period, the Original
 835 * CBS is applied. the runtime is replenished and a new absolute deadline is
 836 * set, as in the previous cases.
 837 *
 838 * However, the Original CBS does not work properly for tasks with
 839 * deadline < period, which are said to have a constrained deadline. By
 840 * applying the Original CBS, a constrained deadline task would be able to run
 841 * runtime/deadline in a period. With deadline < period, the task would
 842 * overrun the runtime/period allowed bandwidth, breaking the admission test.
 843 *
 844 * In order to prevent this misbehave, the Revisited CBS is used for
 845 * constrained deadline tasks when a runtime overflow is detected. In the
 846 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
 847 * the remaining runtime of the task is reduced to avoid runtime overflow.
 848 * Please refer to the comments update_dl_revised_wakeup() function to find
 849 * more about the Revised CBS rule.
 850 */
 851static void update_dl_entity(struct sched_dl_entity *dl_se,
 852                             struct sched_dl_entity *pi_se)
 853{
 854        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 855        struct rq *rq = rq_of_dl_rq(dl_rq);
 856
 857        if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 858            dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
 859
 860                if (unlikely(!dl_is_implicit(dl_se) &&
 861                             !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
 862                             !dl_se->dl_boosted)){
 863                        update_dl_revised_wakeup(dl_se, rq);
 864                        return;
 865                }
 866
 867                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 868                dl_se->runtime = pi_se->dl_runtime;
 869        }
 870}
 871
 872static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
 873{
 874        return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
 875}
 876
 877/*
 878 * If the entity depleted all its runtime, and if we want it to sleep
 879 * while waiting for some new execution time to become available, we
 880 * set the bandwidth replenishment timer to the replenishment instant
 881 * and try to activate it.
 882 *
 883 * Notice that it is important for the caller to know if the timer
 884 * actually started or not (i.e., the replenishment instant is in
 885 * the future or in the past).
 886 */
 887static int start_dl_timer(struct task_struct *p)
 888{
 889        struct sched_dl_entity *dl_se = &p->dl;
 890        struct hrtimer *timer = &dl_se->dl_timer;
 891        struct rq *rq = task_rq(p);
 892        ktime_t now, act;
 893        s64 delta;
 894
 895        lockdep_assert_held(&rq->lock);
 896
 897        /*
 898         * We want the timer to fire at the deadline, but considering
 899         * that it is actually coming from rq->clock and not from
 900         * hrtimer's time base reading.
 901         */
 902        act = ns_to_ktime(dl_next_period(dl_se));
 903        now = hrtimer_cb_get_time(timer);
 904        delta = ktime_to_ns(now) - rq_clock(rq);
 905        act = ktime_add_ns(act, delta);
 906
 907        /*
 908         * If the expiry time already passed, e.g., because the value
 909         * chosen as the deadline is too small, don't even try to
 910         * start the timer in the past!
 911         */
 912        if (ktime_us_delta(act, now) < 0)
 913                return 0;
 914
 915        /*
 916         * !enqueued will guarantee another callback; even if one is already in
 917         * progress. This ensures a balanced {get,put}_task_struct().
 918         *
 919         * The race against __run_timer() clearing the enqueued state is
 920         * harmless because we're holding task_rq()->lock, therefore the timer
 921         * expiring after we've done the check will wait on its task_rq_lock()
 922         * and observe our state.
 923         */
 924        if (!hrtimer_is_queued(timer)) {
 925                get_task_struct(p);
 926                hrtimer_start(timer, act, HRTIMER_MODE_ABS);
 927        }
 928
 929        return 1;
 930}
 931
 932/*
 933 * This is the bandwidth enforcement timer callback. If here, we know
 934 * a task is not on its dl_rq, since the fact that the timer was running
 935 * means the task is throttled and needs a runtime replenishment.
 936 *
 937 * However, what we actually do depends on the fact the task is active,
 938 * (it is on its rq) or has been removed from there by a call to
 939 * dequeue_task_dl(). In the former case we must issue the runtime
 940 * replenishment and add the task back to the dl_rq; in the latter, we just
 941 * do nothing but clearing dl_throttled, so that runtime and deadline
 942 * updating (and the queueing back to dl_rq) will be done by the
 943 * next call to enqueue_task_dl().
 944 */
 945static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 946{
 947        struct sched_dl_entity *dl_se = container_of(timer,
 948                                                     struct sched_dl_entity,
 949                                                     dl_timer);
 950        struct task_struct *p = dl_task_of(dl_se);
 951        struct rq_flags rf;
 952        struct rq *rq;
 953
 954        rq = task_rq_lock(p, &rf);
 955
 956        /*
 957         * The task might have changed its scheduling policy to something
 958         * different than SCHED_DEADLINE (through switched_from_dl()).
 959         */
 960        if (!dl_task(p))
 961                goto unlock;
 962
 963        /*
 964         * The task might have been boosted by someone else and might be in the
 965         * boosting/deboosting path, its not throttled.
 966         */
 967        if (dl_se->dl_boosted)
 968                goto unlock;
 969
 970        /*
 971         * Spurious timer due to start_dl_timer() race; or we already received
 972         * a replenishment from rt_mutex_setprio().
 973         */
 974        if (!dl_se->dl_throttled)
 975                goto unlock;
 976
 977        sched_clock_tick();
 978        update_rq_clock(rq);
 979
 980        /*
 981         * If the throttle happened during sched-out; like:
 982         *
 983         *   schedule()
 984         *     deactivate_task()
 985         *       dequeue_task_dl()
 986         *         update_curr_dl()
 987         *           start_dl_timer()
 988         *         __dequeue_task_dl()
 989         *     prev->on_rq = 0;
 990         *
 991         * We can be both throttled and !queued. Replenish the counter
 992         * but do not enqueue -- wait for our wakeup to do that.
 993         */
 994        if (!task_on_rq_queued(p)) {
 995                replenish_dl_entity(dl_se, dl_se);
 996                goto unlock;
 997        }
 998
 999#ifdef CONFIG_SMP
1000        if (unlikely(!rq->online)) {
1001                /*
1002                 * If the runqueue is no longer available, migrate the
1003                 * task elsewhere. This necessarily changes rq.
1004                 */
1005                lockdep_unpin_lock(&rq->lock, rf.cookie);
1006                rq = dl_task_offline_migration(rq, p);
1007                rf.cookie = lockdep_pin_lock(&rq->lock);
1008                update_rq_clock(rq);
1009
1010                /*
1011                 * Now that the task has been migrated to the new RQ and we
1012                 * have that locked, proceed as normal and enqueue the task
1013                 * there.
1014                 */
1015        }
1016#endif
1017
1018        enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1019        if (dl_task(rq->curr))
1020                check_preempt_curr_dl(rq, p, 0);
1021        else
1022                resched_curr(rq);
1023
1024#ifdef CONFIG_SMP
1025        /*
1026         * Queueing this task back might have overloaded rq, check if we need
1027         * to kick someone away.
1028         */
1029        if (has_pushable_dl_tasks(rq)) {
1030                /*
1031                 * Nothing relies on rq->lock after this, so its safe to drop
1032                 * rq->lock.
1033                 */
1034                rq_unpin_lock(rq, &rf);
1035                push_dl_task(rq);
1036                rq_repin_lock(rq, &rf);
1037        }
1038#endif
1039
1040unlock:
1041        task_rq_unlock(rq, p, &rf);
1042
1043        /*
1044         * This can free the task_struct, including this hrtimer, do not touch
1045         * anything related to that after this.
1046         */
1047        put_task_struct(p);
1048
1049        return HRTIMER_NORESTART;
1050}
1051
1052void init_dl_task_timer(struct sched_dl_entity *dl_se)
1053{
1054        struct hrtimer *timer = &dl_se->dl_timer;
1055
1056        hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1057        timer->function = dl_task_timer;
1058}
1059
1060/*
1061 * During the activation, CBS checks if it can reuse the current task's
1062 * runtime and period. If the deadline of the task is in the past, CBS
1063 * cannot use the runtime, and so it replenishes the task. This rule
1064 * works fine for implicit deadline tasks (deadline == period), and the
1065 * CBS was designed for implicit deadline tasks. However, a task with
1066 * constrained deadline (deadine < period) might be awakened after the
1067 * deadline, but before the next period. In this case, replenishing the
1068 * task would allow it to run for runtime / deadline. As in this case
1069 * deadline < period, CBS enables a task to run for more than the
1070 * runtime / period. In a very loaded system, this can cause a domino
1071 * effect, making other tasks miss their deadlines.
1072 *
1073 * To avoid this problem, in the activation of a constrained deadline
1074 * task after the deadline but before the next period, throttle the
1075 * task and set the replenishing timer to the begin of the next period,
1076 * unless it is boosted.
1077 */
1078static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1079{
1080        struct task_struct *p = dl_task_of(dl_se);
1081        struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1082
1083        if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1084            dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1085                if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1086                        return;
1087                dl_se->dl_throttled = 1;
1088                if (dl_se->runtime > 0)
1089                        dl_se->runtime = 0;
1090        }
1091}
1092
1093static
1094int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1095{
1096        return (dl_se->runtime <= 0);
1097}
1098
1099extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1100
1101/*
1102 * This function implements the GRUB accounting rule:
1103 * according to the GRUB reclaiming algorithm, the runtime is
1104 * not decreased as "dq = -dt", but as
1105 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1106 * where u is the utilization of the task, Umax is the maximum reclaimable
1107 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1108 * as the difference between the "total runqueue utilization" and the
1109 * runqueue active utilization, and Uextra is the (per runqueue) extra
1110 * reclaimable utilization.
1111 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1112 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1113 * BW_SHIFT.
1114 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1115 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1116 * Since delta is a 64 bit variable, to have an overflow its value
1117 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1118 * So, overflow is not an issue here.
1119 */
1120static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1121{
1122        u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1123        u64 u_act;
1124        u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1125
1126        /*
1127         * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1128         * we compare u_inact + rq->dl.extra_bw with
1129         * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1130         * u_inact + rq->dl.extra_bw can be larger than
1131         * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1132         * leading to wrong results)
1133         */
1134        if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1135                u_act = u_act_min;
1136        else
1137                u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1138
1139        return (delta * u_act) >> BW_SHIFT;
1140}
1141
1142/*
1143 * Update the current task's runtime statistics (provided it is still
1144 * a -deadline task and has not been removed from the dl_rq).
1145 */
1146static void update_curr_dl(struct rq *rq)
1147{
1148        struct task_struct *curr = rq->curr;
1149        struct sched_dl_entity *dl_se = &curr->dl;
1150        u64 delta_exec, scaled_delta_exec;
1151        int cpu = cpu_of(rq);
1152        u64 now;
1153
1154        if (!dl_task(curr) || !on_dl_rq(dl_se))
1155                return;
1156
1157        /*
1158         * Consumed budget is computed considering the time as
1159         * observed by schedulable tasks (excluding time spent
1160         * in hardirq context, etc.). Deadlines are instead
1161         * computed using hard walltime. This seems to be the more
1162         * natural solution, but the full ramifications of this
1163         * approach need further study.
1164         */
1165        now = rq_clock_task(rq);
1166        delta_exec = now - curr->se.exec_start;
1167        if (unlikely((s64)delta_exec <= 0)) {
1168                if (unlikely(dl_se->dl_yielded))
1169                        goto throttle;
1170                return;
1171        }
1172
1173        schedstat_set(curr->se.statistics.exec_max,
1174                      max(curr->se.statistics.exec_max, delta_exec));
1175
1176        curr->se.sum_exec_runtime += delta_exec;
1177        account_group_exec_runtime(curr, delta_exec);
1178
1179        curr->se.exec_start = now;
1180        cgroup_account_cputime(curr, delta_exec);
1181
1182        sched_rt_avg_update(rq, delta_exec);
1183
1184        if (dl_entity_is_special(dl_se))
1185                return;
1186
1187        /*
1188         * For tasks that participate in GRUB, we implement GRUB-PA: the
1189         * spare reclaimed bandwidth is used to clock down frequency.
1190         *
1191         * For the others, we still need to scale reservation parameters
1192         * according to current frequency and CPU maximum capacity.
1193         */
1194        if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1195                scaled_delta_exec = grub_reclaim(delta_exec,
1196                                                 rq,
1197                                                 &curr->dl);
1198        } else {
1199                unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1200                unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
1201
1202                scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1203                scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1204        }
1205
1206        dl_se->runtime -= scaled_delta_exec;
1207
1208throttle:
1209        if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1210                dl_se->dl_throttled = 1;
1211
1212                /* If requested, inform the user about runtime overruns. */
1213                if (dl_runtime_exceeded(dl_se) &&
1214                    (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1215                        dl_se->dl_overrun = 1;
1216
1217                __dequeue_task_dl(rq, curr, 0);
1218                if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1219                        enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1220
1221                if (!is_leftmost(curr, &rq->dl))
1222                        resched_curr(rq);
1223        }
1224
1225        /*
1226         * Because -- for now -- we share the rt bandwidth, we need to
1227         * account our runtime there too, otherwise actual rt tasks
1228         * would be able to exceed the shared quota.
1229         *
1230         * Account to the root rt group for now.
1231         *
1232         * The solution we're working towards is having the RT groups scheduled
1233         * using deadline servers -- however there's a few nasties to figure
1234         * out before that can happen.
1235         */
1236        if (rt_bandwidth_enabled()) {
1237                struct rt_rq *rt_rq = &rq->rt;
1238
1239                raw_spin_lock(&rt_rq->rt_runtime_lock);
1240                /*
1241                 * We'll let actual RT tasks worry about the overflow here, we
1242                 * have our own CBS to keep us inline; only account when RT
1243                 * bandwidth is relevant.
1244                 */
1245                if (sched_rt_bandwidth_account(rt_rq))
1246                        rt_rq->rt_time += delta_exec;
1247                raw_spin_unlock(&rt_rq->rt_runtime_lock);
1248        }
1249}
1250
1251static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1252{
1253        struct sched_dl_entity *dl_se = container_of(timer,
1254                                                     struct sched_dl_entity,
1255                                                     inactive_timer);
1256        struct task_struct *p = dl_task_of(dl_se);
1257        struct rq_flags rf;
1258        struct rq *rq;
1259
1260        rq = task_rq_lock(p, &rf);
1261
1262        sched_clock_tick();
1263        update_rq_clock(rq);
1264
1265        if (!dl_task(p) || p->state == TASK_DEAD) {
1266                struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1267
1268                if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1269                        sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1270                        sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1271                        dl_se->dl_non_contending = 0;
1272                }
1273
1274                raw_spin_lock(&dl_b->lock);
1275                __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1276                raw_spin_unlock(&dl_b->lock);
1277                __dl_clear_params(p);
1278
1279                goto unlock;
1280        }
1281        if (dl_se->dl_non_contending == 0)
1282                goto unlock;
1283
1284        sub_running_bw(dl_se, &rq->dl);
1285        dl_se->dl_non_contending = 0;
1286unlock:
1287        task_rq_unlock(rq, p, &rf);
1288        put_task_struct(p);
1289
1290        return HRTIMER_NORESTART;
1291}
1292
1293void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1294{
1295        struct hrtimer *timer = &dl_se->inactive_timer;
1296
1297        hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1298        timer->function = inactive_task_timer;
1299}
1300
1301#ifdef CONFIG_SMP
1302
1303static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1304{
1305        struct rq *rq = rq_of_dl_rq(dl_rq);
1306
1307        if (dl_rq->earliest_dl.curr == 0 ||
1308            dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1309                dl_rq->earliest_dl.curr = deadline;
1310                cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1311        }
1312}
1313
1314static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1315{
1316        struct rq *rq = rq_of_dl_rq(dl_rq);
1317
1318        /*
1319         * Since we may have removed our earliest (and/or next earliest)
1320         * task we must recompute them.
1321         */
1322        if (!dl_rq->dl_nr_running) {
1323                dl_rq->earliest_dl.curr = 0;
1324                dl_rq->earliest_dl.next = 0;
1325                cpudl_clear(&rq->rd->cpudl, rq->cpu);
1326        } else {
1327                struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1328                struct sched_dl_entity *entry;
1329
1330                entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1331                dl_rq->earliest_dl.curr = entry->deadline;
1332                cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1333        }
1334}
1335
1336#else
1337
1338static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1339static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1340
1341#endif /* CONFIG_SMP */
1342
1343static inline
1344void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1345{
1346        int prio = dl_task_of(dl_se)->prio;
1347        u64 deadline = dl_se->deadline;
1348
1349        WARN_ON(!dl_prio(prio));
1350        dl_rq->dl_nr_running++;
1351        add_nr_running(rq_of_dl_rq(dl_rq), 1);
1352
1353        inc_dl_deadline(dl_rq, deadline);
1354        inc_dl_migration(dl_se, dl_rq);
1355}
1356
1357static inline
1358void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1359{
1360        int prio = dl_task_of(dl_se)->prio;
1361
1362        WARN_ON(!dl_prio(prio));
1363        WARN_ON(!dl_rq->dl_nr_running);
1364        dl_rq->dl_nr_running--;
1365        sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1366
1367        dec_dl_deadline(dl_rq, dl_se->deadline);
1368        dec_dl_migration(dl_se, dl_rq);
1369}
1370
1371static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1372{
1373        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1374        struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1375        struct rb_node *parent = NULL;
1376        struct sched_dl_entity *entry;
1377        int leftmost = 1;
1378
1379        BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1380
1381        while (*link) {
1382                parent = *link;
1383                entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1384                if (dl_time_before(dl_se->deadline, entry->deadline))
1385                        link = &parent->rb_left;
1386                else {
1387                        link = &parent->rb_right;
1388                        leftmost = 0;
1389                }
1390        }
1391
1392        rb_link_node(&dl_se->rb_node, parent, link);
1393        rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1394
1395        inc_dl_tasks(dl_se, dl_rq);
1396}
1397
1398static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1399{
1400        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1401
1402        if (RB_EMPTY_NODE(&dl_se->rb_node))
1403                return;
1404
1405        rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1406        RB_CLEAR_NODE(&dl_se->rb_node);
1407
1408        dec_dl_tasks(dl_se, dl_rq);
1409}
1410
1411static void
1412enqueue_dl_entity(struct sched_dl_entity *dl_se,
1413                  struct sched_dl_entity *pi_se, int flags)
1414{
1415        BUG_ON(on_dl_rq(dl_se));
1416
1417        /*
1418         * If this is a wakeup or a new instance, the scheduling
1419         * parameters of the task might need updating. Otherwise,
1420         * we want a replenishment of its runtime.
1421         */
1422        if (flags & ENQUEUE_WAKEUP) {
1423                task_contending(dl_se, flags);
1424                update_dl_entity(dl_se, pi_se);
1425        } else if (flags & ENQUEUE_REPLENISH) {
1426                replenish_dl_entity(dl_se, pi_se);
1427        } else if ((flags & ENQUEUE_RESTORE) &&
1428                  dl_time_before(dl_se->deadline,
1429                                 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1430                setup_new_dl_entity(dl_se);
1431        }
1432
1433        __enqueue_dl_entity(dl_se);
1434}
1435
1436static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1437{
1438        __dequeue_dl_entity(dl_se);
1439}
1440
1441static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1442{
1443        struct task_struct *pi_task = rt_mutex_get_top_task(p);
1444        struct sched_dl_entity *pi_se = &p->dl;
1445
1446        /*
1447         * Use the scheduling parameters of the top pi-waiter task if:
1448         * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1449         * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1450         *   smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1451         *   boosted due to a SCHED_DEADLINE pi-waiter).
1452         * Otherwise we keep our runtime and deadline.
1453         */
1454        if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1455                pi_se = &pi_task->dl;
1456        } else if (!dl_prio(p->normal_prio)) {
1457                /*
1458                 * Special case in which we have a !SCHED_DEADLINE task
1459                 * that is going to be deboosted, but exceeds its
1460                 * runtime while doing so. No point in replenishing
1461                 * it, as it's going to return back to its original
1462                 * scheduling class after this.
1463                 */
1464                BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1465                return;
1466        }
1467
1468        /*
1469         * Check if a constrained deadline task was activated
1470         * after the deadline but before the next period.
1471         * If that is the case, the task will be throttled and
1472         * the replenishment timer will be set to the next period.
1473         */
1474        if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1475                dl_check_constrained_dl(&p->dl);
1476
1477        if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1478                add_rq_bw(&p->dl, &rq->dl);
1479                add_running_bw(&p->dl, &rq->dl);
1480        }
1481
1482        /*
1483         * If p is throttled, we do not enqueue it. In fact, if it exhausted
1484         * its budget it needs a replenishment and, since it now is on
1485         * its rq, the bandwidth timer callback (which clearly has not
1486         * run yet) will take care of this.
1487         * However, the active utilization does not depend on the fact
1488         * that the task is on the runqueue or not (but depends on the
1489         * task's state - in GRUB parlance, "inactive" vs "active contending").
1490         * In other words, even if a task is throttled its utilization must
1491         * be counted in the active utilization; hence, we need to call
1492         * add_running_bw().
1493         */
1494        if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1495                if (flags & ENQUEUE_WAKEUP)
1496                        task_contending(&p->dl, flags);
1497
1498                return;
1499        }
1500
1501        enqueue_dl_entity(&p->dl, pi_se, flags);
1502
1503        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1504                enqueue_pushable_dl_task(rq, p);
1505}
1506
1507static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1508{
1509        dequeue_dl_entity(&p->dl);
1510        dequeue_pushable_dl_task(rq, p);
1511}
1512
1513static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1514{
1515        update_curr_dl(rq);
1516        __dequeue_task_dl(rq, p, flags);
1517
1518        if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1519                sub_running_bw(&p->dl, &rq->dl);
1520                sub_rq_bw(&p->dl, &rq->dl);
1521        }
1522
1523        /*
1524         * This check allows to start the inactive timer (or to immediately
1525         * decrease the active utilization, if needed) in two cases:
1526         * when the task blocks and when it is terminating
1527         * (p->state == TASK_DEAD). We can handle the two cases in the same
1528         * way, because from GRUB's point of view the same thing is happening
1529         * (the task moves from "active contending" to "active non contending"
1530         * or "inactive")
1531         */
1532        if (flags & DEQUEUE_SLEEP)
1533                task_non_contending(p);
1534}
1535
1536/*
1537 * Yield task semantic for -deadline tasks is:
1538 *
1539 *   get off from the CPU until our next instance, with
1540 *   a new runtime. This is of little use now, since we
1541 *   don't have a bandwidth reclaiming mechanism. Anyway,
1542 *   bandwidth reclaiming is planned for the future, and
1543 *   yield_task_dl will indicate that some spare budget
1544 *   is available for other task instances to use it.
1545 */
1546static void yield_task_dl(struct rq *rq)
1547{
1548        /*
1549         * We make the task go to sleep until its current deadline by
1550         * forcing its runtime to zero. This way, update_curr_dl() stops
1551         * it and the bandwidth timer will wake it up and will give it
1552         * new scheduling parameters (thanks to dl_yielded=1).
1553         */
1554        rq->curr->dl.dl_yielded = 1;
1555
1556        update_rq_clock(rq);
1557        update_curr_dl(rq);
1558        /*
1559         * Tell update_rq_clock() that we've just updated,
1560         * so we don't do microscopic update in schedule()
1561         * and double the fastpath cost.
1562         */
1563        rq_clock_skip_update(rq);
1564}
1565
1566#ifdef CONFIG_SMP
1567
1568static int find_later_rq(struct task_struct *task);
1569
1570static int
1571select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1572{
1573        struct task_struct *curr;
1574        struct rq *rq;
1575
1576        if (sd_flag != SD_BALANCE_WAKE)
1577                goto out;
1578
1579        rq = cpu_rq(cpu);
1580
1581        rcu_read_lock();
1582        curr = READ_ONCE(rq->curr); /* unlocked access */
1583
1584        /*
1585         * If we are dealing with a -deadline task, we must
1586         * decide where to wake it up.
1587         * If it has a later deadline and the current task
1588         * on this rq can't move (provided the waking task
1589         * can!) we prefer to send it somewhere else. On the
1590         * other hand, if it has a shorter deadline, we
1591         * try to make it stay here, it might be important.
1592         */
1593        if (unlikely(dl_task(curr)) &&
1594            (curr->nr_cpus_allowed < 2 ||
1595             !dl_entity_preempt(&p->dl, &curr->dl)) &&
1596            (p->nr_cpus_allowed > 1)) {
1597                int target = find_later_rq(p);
1598
1599                if (target != -1 &&
1600                                (dl_time_before(p->dl.deadline,
1601                                        cpu_rq(target)->dl.earliest_dl.curr) ||
1602                                (cpu_rq(target)->dl.dl_nr_running == 0)))
1603                        cpu = target;
1604        }
1605        rcu_read_unlock();
1606
1607out:
1608        return cpu;
1609}
1610
1611static void migrate_task_rq_dl(struct task_struct *p)
1612{
1613        struct rq *rq;
1614
1615        if (p->state != TASK_WAKING)
1616                return;
1617
1618        rq = task_rq(p);
1619        /*
1620         * Since p->state == TASK_WAKING, set_task_cpu() has been called
1621         * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1622         * rq->lock is not... So, lock it
1623         */
1624        raw_spin_lock(&rq->lock);
1625        if (p->dl.dl_non_contending) {
1626                sub_running_bw(&p->dl, &rq->dl);
1627                p->dl.dl_non_contending = 0;
1628                /*
1629                 * If the timer handler is currently running and the
1630                 * timer cannot be cancelled, inactive_task_timer()
1631                 * will see that dl_not_contending is not set, and
1632                 * will not touch the rq's active utilization,
1633                 * so we are still safe.
1634                 */
1635                if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1636                        put_task_struct(p);
1637        }
1638        sub_rq_bw(&p->dl, &rq->dl);
1639        raw_spin_unlock(&rq->lock);
1640}
1641
1642static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1643{
1644        /*
1645         * Current can't be migrated, useless to reschedule,
1646         * let's hope p can move out.
1647         */
1648        if (rq->curr->nr_cpus_allowed == 1 ||
1649            !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1650                return;
1651
1652        /*
1653         * p is migratable, so let's not schedule it and
1654         * see if it is pushed or pulled somewhere else.
1655         */
1656        if (p->nr_cpus_allowed != 1 &&
1657            cpudl_find(&rq->rd->cpudl, p, NULL))
1658                return;
1659
1660        resched_curr(rq);
1661}
1662
1663#endif /* CONFIG_SMP */
1664
1665/*
1666 * Only called when both the current and waking task are -deadline
1667 * tasks.
1668 */
1669static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1670                                  int flags)
1671{
1672        if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1673                resched_curr(rq);
1674                return;
1675        }
1676
1677#ifdef CONFIG_SMP
1678        /*
1679         * In the unlikely case current and p have the same deadline
1680         * let us try to decide what's the best thing to do...
1681         */
1682        if ((p->dl.deadline == rq->curr->dl.deadline) &&
1683            !test_tsk_need_resched(rq->curr))
1684                check_preempt_equal_dl(rq, p);
1685#endif /* CONFIG_SMP */
1686}
1687
1688#ifdef CONFIG_SCHED_HRTICK
1689static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1690{
1691        hrtick_start(rq, p->dl.runtime);
1692}
1693#else /* !CONFIG_SCHED_HRTICK */
1694static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1695{
1696}
1697#endif
1698
1699static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1700                                                   struct dl_rq *dl_rq)
1701{
1702        struct rb_node *left = rb_first_cached(&dl_rq->root);
1703
1704        if (!left)
1705                return NULL;
1706
1707        return rb_entry(left, struct sched_dl_entity, rb_node);
1708}
1709
1710static struct task_struct *
1711pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1712{
1713        struct sched_dl_entity *dl_se;
1714        struct task_struct *p;
1715        struct dl_rq *dl_rq;
1716
1717        dl_rq = &rq->dl;
1718
1719        if (need_pull_dl_task(rq, prev)) {
1720                /*
1721                 * This is OK, because current is on_cpu, which avoids it being
1722                 * picked for load-balance and preemption/IRQs are still
1723                 * disabled avoiding further scheduler activity on it and we're
1724                 * being very careful to re-start the picking loop.
1725                 */
1726                rq_unpin_lock(rq, rf);
1727                pull_dl_task(rq);
1728                rq_repin_lock(rq, rf);
1729                /*
1730                 * pull_dl_task() can drop (and re-acquire) rq->lock; this
1731                 * means a stop task can slip in, in which case we need to
1732                 * re-start task selection.
1733                 */
1734                if (rq->stop && task_on_rq_queued(rq->stop))
1735                        return RETRY_TASK;
1736        }
1737
1738        /*
1739         * When prev is DL, we may throttle it in put_prev_task().
1740         * So, we update time before we check for dl_nr_running.
1741         */
1742        if (prev->sched_class == &dl_sched_class)
1743                update_curr_dl(rq);
1744
1745        if (unlikely(!dl_rq->dl_nr_running))
1746                return NULL;
1747
1748        put_prev_task(rq, prev);
1749
1750        dl_se = pick_next_dl_entity(rq, dl_rq);
1751        BUG_ON(!dl_se);
1752
1753        p = dl_task_of(dl_se);
1754        p->se.exec_start = rq_clock_task(rq);
1755
1756        /* Running task will never be pushed. */
1757       dequeue_pushable_dl_task(rq, p);
1758
1759        if (hrtick_enabled(rq))
1760                start_hrtick_dl(rq, p);
1761
1762        deadline_queue_push_tasks(rq);
1763
1764        return p;
1765}
1766
1767static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1768{
1769        update_curr_dl(rq);
1770
1771        if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1772                enqueue_pushable_dl_task(rq, p);
1773}
1774
1775/*
1776 * scheduler tick hitting a task of our scheduling class.
1777 *
1778 * NOTE: This function can be called remotely by the tick offload that
1779 * goes along full dynticks. Therefore no local assumption can be made
1780 * and everything must be accessed through the @rq and @curr passed in
1781 * parameters.
1782 */
1783static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1784{
1785        update_curr_dl(rq);
1786
1787        /*
1788         * Even when we have runtime, update_curr_dl() might have resulted in us
1789         * not being the leftmost task anymore. In that case NEED_RESCHED will
1790         * be set and schedule() will start a new hrtick for the next task.
1791         */
1792        if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1793            is_leftmost(p, &rq->dl))
1794                start_hrtick_dl(rq, p);
1795}
1796
1797static void task_fork_dl(struct task_struct *p)
1798{
1799        /*
1800         * SCHED_DEADLINE tasks cannot fork and this is achieved through
1801         * sched_fork()
1802         */
1803}
1804
1805static void set_curr_task_dl(struct rq *rq)
1806{
1807        struct task_struct *p = rq->curr;
1808
1809        p->se.exec_start = rq_clock_task(rq);
1810
1811        /* You can't push away the running task */
1812        dequeue_pushable_dl_task(rq, p);
1813}
1814
1815#ifdef CONFIG_SMP
1816
1817/* Only try algorithms three times */
1818#define DL_MAX_TRIES 3
1819
1820static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1821{
1822        if (!task_running(rq, p) &&
1823            cpumask_test_cpu(cpu, &p->cpus_allowed))
1824                return 1;
1825        return 0;
1826}
1827
1828/*
1829 * Return the earliest pushable rq's task, which is suitable to be executed
1830 * on the CPU, NULL otherwise:
1831 */
1832static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1833{
1834        struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1835        struct task_struct *p = NULL;
1836
1837        if (!has_pushable_dl_tasks(rq))
1838                return NULL;
1839
1840next_node:
1841        if (next_node) {
1842                p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1843
1844                if (pick_dl_task(rq, p, cpu))
1845                        return p;
1846
1847                next_node = rb_next(next_node);
1848                goto next_node;
1849        }
1850
1851        return NULL;
1852}
1853
1854static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1855
1856static int find_later_rq(struct task_struct *task)
1857{
1858        struct sched_domain *sd;
1859        struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1860        int this_cpu = smp_processor_id();
1861        int cpu = task_cpu(task);
1862
1863        /* Make sure the mask is initialized first */
1864        if (unlikely(!later_mask))
1865                return -1;
1866
1867        if (task->nr_cpus_allowed == 1)
1868                return -1;
1869
1870        /*
1871         * We have to consider system topology and task affinity
1872         * first, then we can look for a suitable CPU.
1873         */
1874        if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1875                return -1;
1876
1877        /*
1878         * If we are here, some targets have been found, including
1879         * the most suitable which is, among the runqueues where the
1880         * current tasks have later deadlines than the task's one, the
1881         * rq with the latest possible one.
1882         *
1883         * Now we check how well this matches with task's
1884         * affinity and system topology.
1885         *
1886         * The last CPU where the task run is our first
1887         * guess, since it is most likely cache-hot there.
1888         */
1889        if (cpumask_test_cpu(cpu, later_mask))
1890                return cpu;
1891        /*
1892         * Check if this_cpu is to be skipped (i.e., it is
1893         * not in the mask) or not.
1894         */
1895        if (!cpumask_test_cpu(this_cpu, later_mask))
1896                this_cpu = -1;
1897
1898        rcu_read_lock();
1899        for_each_domain(cpu, sd) {
1900                if (sd->flags & SD_WAKE_AFFINE) {
1901                        int best_cpu;
1902
1903                        /*
1904                         * If possible, preempting this_cpu is
1905                         * cheaper than migrating.
1906                         */
1907                        if (this_cpu != -1 &&
1908                            cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1909                                rcu_read_unlock();
1910                                return this_cpu;
1911                        }
1912
1913                        best_cpu = cpumask_first_and(later_mask,
1914                                                        sched_domain_span(sd));
1915                        /*
1916                         * Last chance: if a CPU being in both later_mask
1917                         * and current sd span is valid, that becomes our
1918                         * choice. Of course, the latest possible CPU is
1919                         * already under consideration through later_mask.
1920                         */
1921                        if (best_cpu < nr_cpu_ids) {
1922                                rcu_read_unlock();
1923                                return best_cpu;
1924                        }
1925                }
1926        }
1927        rcu_read_unlock();
1928
1929        /*
1930         * At this point, all our guesses failed, we just return
1931         * 'something', and let the caller sort the things out.
1932         */
1933        if (this_cpu != -1)
1934                return this_cpu;
1935
1936        cpu = cpumask_any(later_mask);
1937        if (cpu < nr_cpu_ids)
1938                return cpu;
1939
1940        return -1;
1941}
1942
1943/* Locks the rq it finds */
1944static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1945{
1946        struct rq *later_rq = NULL;
1947        int tries;
1948        int cpu;
1949
1950        for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1951                cpu = find_later_rq(task);
1952
1953                if ((cpu == -1) || (cpu == rq->cpu))
1954                        break;
1955
1956                later_rq = cpu_rq(cpu);
1957
1958                if (later_rq->dl.dl_nr_running &&
1959                    !dl_time_before(task->dl.deadline,
1960                                        later_rq->dl.earliest_dl.curr)) {
1961                        /*
1962                         * Target rq has tasks of equal or earlier deadline,
1963                         * retrying does not release any lock and is unlikely
1964                         * to yield a different result.
1965                         */
1966                        later_rq = NULL;
1967                        break;
1968                }
1969
1970                /* Retry if something changed. */
1971                if (double_lock_balance(rq, later_rq)) {
1972                        if (unlikely(task_rq(task) != rq ||
1973                                     !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
1974                                     task_running(rq, task) ||
1975                                     !dl_task(task) ||
1976                                     !task_on_rq_queued(task))) {
1977                                double_unlock_balance(rq, later_rq);
1978                                later_rq = NULL;
1979                                break;
1980                        }
1981                }
1982
1983                /*
1984                 * If the rq we found has no -deadline task, or
1985                 * its earliest one has a later deadline than our
1986                 * task, the rq is a good one.
1987                 */
1988                if (!later_rq->dl.dl_nr_running ||
1989                    dl_time_before(task->dl.deadline,
1990                                   later_rq->dl.earliest_dl.curr))
1991                        break;
1992
1993                /* Otherwise we try again. */
1994                double_unlock_balance(rq, later_rq);
1995                later_rq = NULL;
1996        }
1997
1998        return later_rq;
1999}
2000
2001static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2002{
2003        struct task_struct *p;
2004
2005        if (!has_pushable_dl_tasks(rq))
2006                return NULL;
2007
2008        p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2009                     struct task_struct, pushable_dl_tasks);
2010
2011        BUG_ON(rq->cpu != task_cpu(p));
2012        BUG_ON(task_current(rq, p));
2013        BUG_ON(p->nr_cpus_allowed <= 1);
2014
2015        BUG_ON(!task_on_rq_queued(p));
2016        BUG_ON(!dl_task(p));
2017
2018        return p;
2019}
2020
2021/*
2022 * See if the non running -deadline tasks on this rq
2023 * can be sent to some other CPU where they can preempt
2024 * and start executing.
2025 */
2026static int push_dl_task(struct rq *rq)
2027{
2028        struct task_struct *next_task;
2029        struct rq *later_rq;
2030        int ret = 0;
2031
2032        if (!rq->dl.overloaded)
2033                return 0;
2034
2035        next_task = pick_next_pushable_dl_task(rq);
2036        if (!next_task)
2037                return 0;
2038
2039retry:
2040        if (unlikely(next_task == rq->curr)) {
2041                WARN_ON(1);
2042                return 0;
2043        }
2044
2045        /*
2046         * If next_task preempts rq->curr, and rq->curr
2047         * can move away, it makes sense to just reschedule
2048         * without going further in pushing next_task.
2049         */
2050        if (dl_task(rq->curr) &&
2051            dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2052            rq->curr->nr_cpus_allowed > 1) {
2053                resched_curr(rq);
2054                return 0;
2055        }
2056
2057        /* We might release rq lock */
2058        get_task_struct(next_task);
2059
2060        /* Will lock the rq it'll find */
2061        later_rq = find_lock_later_rq(next_task, rq);
2062        if (!later_rq) {
2063                struct task_struct *task;
2064
2065                /*
2066                 * We must check all this again, since
2067                 * find_lock_later_rq releases rq->lock and it is
2068                 * then possible that next_task has migrated.
2069                 */
2070                task = pick_next_pushable_dl_task(rq);
2071                if (task == next_task) {
2072                        /*
2073                         * The task is still there. We don't try
2074                         * again, some other CPU will pull it when ready.
2075                         */
2076                        goto out;
2077                }
2078
2079                if (!task)
2080                        /* No more tasks */
2081                        goto out;
2082
2083                put_task_struct(next_task);
2084                next_task = task;
2085                goto retry;
2086        }
2087
2088        deactivate_task(rq, next_task, 0);
2089        sub_running_bw(&next_task->dl, &rq->dl);
2090        sub_rq_bw(&next_task->dl, &rq->dl);
2091        set_task_cpu(next_task, later_rq->cpu);
2092        add_rq_bw(&next_task->dl, &later_rq->dl);
2093        add_running_bw(&next_task->dl, &later_rq->dl);
2094        activate_task(later_rq, next_task, 0);
2095        ret = 1;
2096
2097        resched_curr(later_rq);
2098
2099        double_unlock_balance(rq, later_rq);
2100
2101out:
2102        put_task_struct(next_task);
2103
2104        return ret;
2105}
2106
2107static void push_dl_tasks(struct rq *rq)
2108{
2109        /* push_dl_task() will return true if it moved a -deadline task */
2110        while (push_dl_task(rq))
2111                ;
2112}
2113
2114static void pull_dl_task(struct rq *this_rq)
2115{
2116        int this_cpu = this_rq->cpu, cpu;
2117        struct task_struct *p;
2118        bool resched = false;
2119        struct rq *src_rq;
2120        u64 dmin = LONG_MAX;
2121
2122        if (likely(!dl_overloaded(this_rq)))
2123                return;
2124
2125        /*
2126         * Match the barrier from dl_set_overloaded; this guarantees that if we
2127         * see overloaded we must also see the dlo_mask bit.
2128         */
2129        smp_rmb();
2130
2131        for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2132                if (this_cpu == cpu)
2133                        continue;
2134
2135                src_rq = cpu_rq(cpu);
2136
2137                /*
2138                 * It looks racy, abd it is! However, as in sched_rt.c,
2139                 * we are fine with this.
2140                 */
2141                if (this_rq->dl.dl_nr_running &&
2142                    dl_time_before(this_rq->dl.earliest_dl.curr,
2143                                   src_rq->dl.earliest_dl.next))
2144                        continue;
2145
2146                /* Might drop this_rq->lock */
2147                double_lock_balance(this_rq, src_rq);
2148
2149                /*
2150                 * If there are no more pullable tasks on the
2151                 * rq, we're done with it.
2152                 */
2153                if (src_rq->dl.dl_nr_running <= 1)
2154                        goto skip;
2155
2156                p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2157
2158                /*
2159                 * We found a task to be pulled if:
2160                 *  - it preempts our current (if there's one),
2161                 *  - it will preempt the last one we pulled (if any).
2162                 */
2163                if (p && dl_time_before(p->dl.deadline, dmin) &&
2164                    (!this_rq->dl.dl_nr_running ||
2165                     dl_time_before(p->dl.deadline,
2166                                    this_rq->dl.earliest_dl.curr))) {
2167                        WARN_ON(p == src_rq->curr);
2168                        WARN_ON(!task_on_rq_queued(p));
2169
2170                        /*
2171                         * Then we pull iff p has actually an earlier
2172                         * deadline than the current task of its runqueue.
2173                         */
2174                        if (dl_time_before(p->dl.deadline,
2175                                           src_rq->curr->dl.deadline))
2176                                goto skip;
2177
2178                        resched = true;
2179
2180                        deactivate_task(src_rq, p, 0);
2181                        sub_running_bw(&p->dl, &src_rq->dl);
2182                        sub_rq_bw(&p->dl, &src_rq->dl);
2183                        set_task_cpu(p, this_cpu);
2184                        add_rq_bw(&p->dl, &this_rq->dl);
2185                        add_running_bw(&p->dl, &this_rq->dl);
2186                        activate_task(this_rq, p, 0);
2187                        dmin = p->dl.deadline;
2188
2189                        /* Is there any other task even earlier? */
2190                }
2191skip:
2192                double_unlock_balance(this_rq, src_rq);
2193        }
2194
2195        if (resched)
2196                resched_curr(this_rq);
2197}
2198
2199/*
2200 * Since the task is not running and a reschedule is not going to happen
2201 * anytime soon on its runqueue, we try pushing it away now.
2202 */
2203static void task_woken_dl(struct rq *rq, struct task_struct *p)
2204{
2205        if (!task_running(rq, p) &&
2206            !test_tsk_need_resched(rq->curr) &&
2207            p->nr_cpus_allowed > 1 &&
2208            dl_task(rq->curr) &&
2209            (rq->curr->nr_cpus_allowed < 2 ||
2210             !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2211                push_dl_tasks(rq);
2212        }
2213}
2214
2215static void set_cpus_allowed_dl(struct task_struct *p,
2216                                const struct cpumask *new_mask)
2217{
2218        struct root_domain *src_rd;
2219        struct rq *rq;
2220
2221        BUG_ON(!dl_task(p));
2222
2223        rq = task_rq(p);
2224        src_rd = rq->rd;
2225        /*
2226         * Migrating a SCHED_DEADLINE task between exclusive
2227         * cpusets (different root_domains) entails a bandwidth
2228         * update. We already made space for us in the destination
2229         * domain (see cpuset_can_attach()).
2230         */
2231        if (!cpumask_intersects(src_rd->span, new_mask)) {
2232                struct dl_bw *src_dl_b;
2233
2234                src_dl_b = dl_bw_of(cpu_of(rq));
2235                /*
2236                 * We now free resources of the root_domain we are migrating
2237                 * off. In the worst case, sched_setattr() may temporary fail
2238                 * until we complete the update.
2239                 */
2240                raw_spin_lock(&src_dl_b->lock);
2241                __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2242                raw_spin_unlock(&src_dl_b->lock);
2243        }
2244
2245        set_cpus_allowed_common(p, new_mask);
2246}
2247
2248/* Assumes rq->lock is held */
2249static void rq_online_dl(struct rq *rq)
2250{
2251        if (rq->dl.overloaded)
2252                dl_set_overload(rq);
2253
2254        cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2255        if (rq->dl.dl_nr_running > 0)
2256                cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2257}
2258
2259/* Assumes rq->lock is held */
2260static void rq_offline_dl(struct rq *rq)
2261{
2262        if (rq->dl.overloaded)
2263                dl_clear_overload(rq);
2264
2265        cpudl_clear(&rq->rd->cpudl, rq->cpu);
2266        cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2267}
2268
2269void __init init_sched_dl_class(void)
2270{
2271        unsigned int i;
2272
2273        for_each_possible_cpu(i)
2274                zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2275                                        GFP_KERNEL, cpu_to_node(i));
2276}
2277
2278#endif /* CONFIG_SMP */
2279
2280static void switched_from_dl(struct rq *rq, struct task_struct *p)
2281{
2282        /*
2283         * task_non_contending() can start the "inactive timer" (if the 0-lag
2284         * time is in the future). If the task switches back to dl before
2285         * the "inactive timer" fires, it can continue to consume its current
2286         * runtime using its current deadline. If it stays outside of
2287         * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2288         * will reset the task parameters.
2289         */
2290        if (task_on_rq_queued(p) && p->dl.dl_runtime)
2291                task_non_contending(p);
2292
2293        if (!task_on_rq_queued(p))
2294                sub_rq_bw(&p->dl, &rq->dl);
2295
2296        /*
2297         * We cannot use inactive_task_timer() to invoke sub_running_bw()
2298         * at the 0-lag time, because the task could have been migrated
2299         * while SCHED_OTHER in the meanwhile.
2300         */
2301        if (p->dl.dl_non_contending)
2302                p->dl.dl_non_contending = 0;
2303
2304        /*
2305         * Since this might be the only -deadline task on the rq,
2306         * this is the right place to try to pull some other one
2307         * from an overloaded CPU, if any.
2308         */
2309        if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2310                return;
2311
2312        deadline_queue_pull_task(rq);
2313}
2314
2315/*
2316 * When switching to -deadline, we may overload the rq, then
2317 * we try to push someone off, if possible.
2318 */
2319static void switched_to_dl(struct rq *rq, struct task_struct *p)
2320{
2321        if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2322                put_task_struct(p);
2323
2324        /* If p is not queued we will update its parameters at next wakeup. */
2325        if (!task_on_rq_queued(p)) {
2326                add_rq_bw(&p->dl, &rq->dl);
2327
2328                return;
2329        }
2330
2331        if (rq->curr != p) {
2332#ifdef CONFIG_SMP
2333                if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2334                        deadline_queue_push_tasks(rq);
2335#endif
2336                if (dl_task(rq->curr))
2337                        check_preempt_curr_dl(rq, p, 0);
2338                else
2339                        resched_curr(rq);
2340        }
2341}
2342
2343/*
2344 * If the scheduling parameters of a -deadline task changed,
2345 * a push or pull operation might be needed.
2346 */
2347static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2348                            int oldprio)
2349{
2350        if (task_on_rq_queued(p) || rq->curr == p) {
2351#ifdef CONFIG_SMP
2352                /*
2353                 * This might be too much, but unfortunately
2354                 * we don't have the old deadline value, and
2355                 * we can't argue if the task is increasing
2356                 * or lowering its prio, so...
2357                 */
2358                if (!rq->dl.overloaded)
2359                        deadline_queue_pull_task(rq);
2360
2361                /*
2362                 * If we now have a earlier deadline task than p,
2363                 * then reschedule, provided p is still on this
2364                 * runqueue.
2365                 */
2366                if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2367                        resched_curr(rq);
2368#else
2369                /*
2370                 * Again, we don't know if p has a earlier
2371                 * or later deadline, so let's blindly set a
2372                 * (maybe not needed) rescheduling point.
2373                 */
2374                resched_curr(rq);
2375#endif /* CONFIG_SMP */
2376        }
2377}
2378
2379const struct sched_class dl_sched_class = {
2380        .next                   = &rt_sched_class,
2381        .enqueue_task           = enqueue_task_dl,
2382        .dequeue_task           = dequeue_task_dl,
2383        .yield_task             = yield_task_dl,
2384
2385        .check_preempt_curr     = check_preempt_curr_dl,
2386
2387        .pick_next_task         = pick_next_task_dl,
2388        .put_prev_task          = put_prev_task_dl,
2389
2390#ifdef CONFIG_SMP
2391        .select_task_rq         = select_task_rq_dl,
2392        .migrate_task_rq        = migrate_task_rq_dl,
2393        .set_cpus_allowed       = set_cpus_allowed_dl,
2394        .rq_online              = rq_online_dl,
2395        .rq_offline             = rq_offline_dl,
2396        .task_woken             = task_woken_dl,
2397#endif
2398
2399        .set_curr_task          = set_curr_task_dl,
2400        .task_tick              = task_tick_dl,
2401        .task_fork              = task_fork_dl,
2402
2403        .prio_changed           = prio_changed_dl,
2404        .switched_from          = switched_from_dl,
2405        .switched_to            = switched_to_dl,
2406
2407        .update_curr            = update_curr_dl,
2408};
2409
2410int sched_dl_global_validate(void)
2411{
2412        u64 runtime = global_rt_runtime();
2413        u64 period = global_rt_period();
2414        u64 new_bw = to_ratio(period, runtime);
2415        struct dl_bw *dl_b;
2416        int cpu, ret = 0;
2417        unsigned long flags;
2418
2419        /*
2420         * Here we want to check the bandwidth not being set to some
2421         * value smaller than the currently allocated bandwidth in
2422         * any of the root_domains.
2423         *
2424         * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2425         * cycling on root_domains... Discussion on different/better
2426         * solutions is welcome!
2427         */
2428        for_each_possible_cpu(cpu) {
2429                rcu_read_lock_sched();
2430                dl_b = dl_bw_of(cpu);
2431
2432                raw_spin_lock_irqsave(&dl_b->lock, flags);
2433                if (new_bw < dl_b->total_bw)
2434                        ret = -EBUSY;
2435                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2436
2437                rcu_read_unlock_sched();
2438
2439                if (ret)
2440                        break;
2441        }
2442
2443        return ret;
2444}
2445
2446void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2447{
2448        if (global_rt_runtime() == RUNTIME_INF) {
2449                dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2450                dl_rq->extra_bw = 1 << BW_SHIFT;
2451        } else {
2452                dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2453                          global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2454                dl_rq->extra_bw = to_ratio(global_rt_period(),
2455                                                    global_rt_runtime());
2456        }
2457}
2458
2459void sched_dl_do_global(void)
2460{
2461        u64 new_bw = -1;
2462        struct dl_bw *dl_b;
2463        int cpu;
2464        unsigned long flags;
2465
2466        def_dl_bandwidth.dl_period = global_rt_period();
2467        def_dl_bandwidth.dl_runtime = global_rt_runtime();
2468
2469        if (global_rt_runtime() != RUNTIME_INF)
2470                new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2471
2472        /*
2473         * FIXME: As above...
2474         */
2475        for_each_possible_cpu(cpu) {
2476                rcu_read_lock_sched();
2477                dl_b = dl_bw_of(cpu);
2478
2479                raw_spin_lock_irqsave(&dl_b->lock, flags);
2480                dl_b->bw = new_bw;
2481                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2482
2483                rcu_read_unlock_sched();
2484                init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2485        }
2486}
2487
2488/*
2489 * We must be sure that accepting a new task (or allowing changing the
2490 * parameters of an existing one) is consistent with the bandwidth
2491 * constraints. If yes, this function also accordingly updates the currently
2492 * allocated bandwidth to reflect the new situation.
2493 *
2494 * This function is called while holding p's rq->lock.
2495 */
2496int sched_dl_overflow(struct task_struct *p, int policy,
2497                      const struct sched_attr *attr)
2498{
2499        struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2500        u64 period = attr->sched_period ?: attr->sched_deadline;
2501        u64 runtime = attr->sched_runtime;
2502        u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2503        int cpus, err = -1;
2504
2505        if (attr->sched_flags & SCHED_FLAG_SUGOV)
2506                return 0;
2507
2508        /* !deadline task may carry old deadline bandwidth */
2509        if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2510                return 0;
2511
2512        /*
2513         * Either if a task, enters, leave, or stays -deadline but changes
2514         * its parameters, we may need to update accordingly the total
2515         * allocated bandwidth of the container.
2516         */
2517        raw_spin_lock(&dl_b->lock);
2518        cpus = dl_bw_cpus(task_cpu(p));
2519        if (dl_policy(policy) && !task_has_dl_policy(p) &&
2520            !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2521                if (hrtimer_active(&p->dl.inactive_timer))
2522                        __dl_sub(dl_b, p->dl.dl_bw, cpus);
2523                __dl_add(dl_b, new_bw, cpus);
2524                err = 0;
2525        } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2526                   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2527                /*
2528                 * XXX this is slightly incorrect: when the task
2529                 * utilization decreases, we should delay the total
2530                 * utilization change until the task's 0-lag point.
2531                 * But this would require to set the task's "inactive
2532                 * timer" when the task is not inactive.
2533                 */
2534                __dl_sub(dl_b, p->dl.dl_bw, cpus);
2535                __dl_add(dl_b, new_bw, cpus);
2536                dl_change_utilization(p, new_bw);
2537                err = 0;
2538        } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2539                /*
2540                 * Do not decrease the total deadline utilization here,
2541                 * switched_from_dl() will take care to do it at the correct
2542                 * (0-lag) time.
2543                 */
2544                err = 0;
2545        }
2546        raw_spin_unlock(&dl_b->lock);
2547
2548        return err;
2549}
2550
2551/*
2552 * This function initializes the sched_dl_entity of a newly becoming
2553 * SCHED_DEADLINE task.
2554 *
2555 * Only the static values are considered here, the actual runtime and the
2556 * absolute deadline will be properly calculated when the task is enqueued
2557 * for the first time with its new policy.
2558 */
2559void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2560{
2561        struct sched_dl_entity *dl_se = &p->dl;
2562
2563        dl_se->dl_runtime = attr->sched_runtime;
2564        dl_se->dl_deadline = attr->sched_deadline;
2565        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2566        dl_se->flags = attr->sched_flags;
2567        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2568        dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2569}
2570
2571void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2572{
2573        struct sched_dl_entity *dl_se = &p->dl;
2574
2575        attr->sched_priority = p->rt_priority;
2576        attr->sched_runtime = dl_se->dl_runtime;
2577        attr->sched_deadline = dl_se->dl_deadline;
2578        attr->sched_period = dl_se->dl_period;
2579        attr->sched_flags = dl_se->flags;
2580}
2581
2582/*
2583 * This function validates the new parameters of a -deadline task.
2584 * We ask for the deadline not being zero, and greater or equal
2585 * than the runtime, as well as the period of being zero or
2586 * greater than deadline. Furthermore, we have to be sure that
2587 * user parameters are above the internal resolution of 1us (we
2588 * check sched_runtime only since it is always the smaller one) and
2589 * below 2^63 ns (we have to check both sched_deadline and
2590 * sched_period, as the latter can be zero).
2591 */
2592bool __checkparam_dl(const struct sched_attr *attr)
2593{
2594        /* special dl tasks don't actually use any parameter */
2595        if (attr->sched_flags & SCHED_FLAG_SUGOV)
2596                return true;
2597
2598        /* deadline != 0 */
2599        if (attr->sched_deadline == 0)
2600                return false;
2601
2602        /*
2603         * Since we truncate DL_SCALE bits, make sure we're at least
2604         * that big.
2605         */
2606        if (attr->sched_runtime < (1ULL << DL_SCALE))
2607                return false;
2608
2609        /*
2610         * Since we use the MSB for wrap-around and sign issues, make
2611         * sure it's not set (mind that period can be equal to zero).
2612         */
2613        if (attr->sched_deadline & (1ULL << 63) ||
2614            attr->sched_period & (1ULL << 63))
2615                return false;
2616
2617        /* runtime <= deadline <= period (if period != 0) */
2618        if ((attr->sched_period != 0 &&
2619             attr->sched_period < attr->sched_deadline) ||
2620            attr->sched_deadline < attr->sched_runtime)
2621                return false;
2622
2623        return true;
2624}
2625
2626/*
2627 * This function clears the sched_dl_entity static params.
2628 */
2629void __dl_clear_params(struct task_struct *p)
2630{
2631        struct sched_dl_entity *dl_se = &p->dl;
2632
2633        dl_se->dl_runtime               = 0;
2634        dl_se->dl_deadline              = 0;
2635        dl_se->dl_period                = 0;
2636        dl_se->flags                    = 0;
2637        dl_se->dl_bw                    = 0;
2638        dl_se->dl_density               = 0;
2639
2640        dl_se->dl_throttled             = 0;
2641        dl_se->dl_yielded               = 0;
2642        dl_se->dl_non_contending        = 0;
2643        dl_se->dl_overrun               = 0;
2644}
2645
2646bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2647{
2648        struct sched_dl_entity *dl_se = &p->dl;
2649
2650        if (dl_se->dl_runtime != attr->sched_runtime ||
2651            dl_se->dl_deadline != attr->sched_deadline ||
2652            dl_se->dl_period != attr->sched_period ||
2653            dl_se->flags != attr->sched_flags)
2654                return true;
2655
2656        return false;
2657}
2658
2659#ifdef CONFIG_SMP
2660int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2661{
2662        unsigned int dest_cpu;
2663        struct dl_bw *dl_b;
2664        bool overflow;
2665        int cpus, ret;
2666        unsigned long flags;
2667
2668        dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2669
2670        rcu_read_lock_sched();
2671        dl_b = dl_bw_of(dest_cpu);
2672        raw_spin_lock_irqsave(&dl_b->lock, flags);
2673        cpus = dl_bw_cpus(dest_cpu);
2674        overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2675        if (overflow) {
2676                ret = -EBUSY;
2677        } else {
2678                /*
2679                 * We reserve space for this task in the destination
2680                 * root_domain, as we can't fail after this point.
2681                 * We will free resources in the source root_domain
2682                 * later on (see set_cpus_allowed_dl()).
2683                 */
2684                __dl_add(dl_b, p->dl.dl_bw, cpus);
2685                ret = 0;
2686        }
2687        raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2688        rcu_read_unlock_sched();
2689
2690        return ret;
2691}
2692
2693int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2694                                 const struct cpumask *trial)
2695{
2696        int ret = 1, trial_cpus;
2697        struct dl_bw *cur_dl_b;
2698        unsigned long flags;
2699
2700        rcu_read_lock_sched();
2701        cur_dl_b = dl_bw_of(cpumask_any(cur));
2702        trial_cpus = cpumask_weight(trial);
2703
2704        raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2705        if (cur_dl_b->bw != -1 &&
2706            cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2707                ret = 0;
2708        raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2709        rcu_read_unlock_sched();
2710
2711        return ret;
2712}
2713
2714bool dl_cpu_busy(unsigned int cpu)
2715{
2716        unsigned long flags;
2717        struct dl_bw *dl_b;
2718        bool overflow;
2719        int cpus;
2720
2721        rcu_read_lock_sched();
2722        dl_b = dl_bw_of(cpu);
2723        raw_spin_lock_irqsave(&dl_b->lock, flags);
2724        cpus = dl_bw_cpus(cpu);
2725        overflow = __dl_overflow(dl_b, cpus, 0, 0);
2726        raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2727        rcu_read_unlock_sched();
2728
2729        return overflow;
2730}
2731#endif
2732
2733#ifdef CONFIG_SCHED_DEBUG
2734void print_dl_stats(struct seq_file *m, int cpu)
2735{
2736        print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2737}
2738#endif /* CONFIG_SCHED_DEBUG */
2739