linux/kernel/sched/deadline.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Deadline Scheduling Class (SCHED_DEADLINE)
   4 *
   5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   6 *
   7 * Tasks that periodically executes their instances for less than their
   8 * runtime won't miss any of their deadlines.
   9 * Tasks that are not periodic or sporadic or that tries to execute more
  10 * than their reserved bandwidth will be slowed down (and may potentially
  11 * miss some of their deadlines), and won't affect any other task.
  12 *
  13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  14 *                    Juri Lelli <juri.lelli@gmail.com>,
  15 *                    Michael Trimarchi <michael@amarulasolutions.com>,
  16 *                    Fabio Checconi <fchecconi@gmail.com>
  17 */
  18#include "sched.h"
  19#include "pelt.h"
  20
  21struct dl_bandwidth def_dl_bandwidth;
  22
  23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24{
  25        return container_of(dl_se, struct task_struct, dl);
  26}
  27
  28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29{
  30        return container_of(dl_rq, struct rq, dl);
  31}
  32
  33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34{
  35        struct task_struct *p = dl_task_of(dl_se);
  36        struct rq *rq = task_rq(p);
  37
  38        return &rq->dl;
  39}
  40
  41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42{
  43        return !RB_EMPTY_NODE(&dl_se->rb_node);
  44}
  45
  46#ifdef CONFIG_RT_MUTEXES
  47static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
  48{
  49        return dl_se->pi_se;
  50}
  51
  52static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
  53{
  54        return pi_of(dl_se) != dl_se;
  55}
  56#else
  57static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
  58{
  59        return dl_se;
  60}
  61
  62static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
  63{
  64        return false;
  65}
  66#endif
  67
  68#ifdef CONFIG_SMP
  69static inline struct dl_bw *dl_bw_of(int i)
  70{
  71        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  72                         "sched RCU must be held");
  73        return &cpu_rq(i)->rd->dl_bw;
  74}
  75
  76static inline int dl_bw_cpus(int i)
  77{
  78        struct root_domain *rd = cpu_rq(i)->rd;
  79        int cpus;
  80
  81        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  82                         "sched RCU must be held");
  83
  84        if (cpumask_subset(rd->span, cpu_active_mask))
  85                return cpumask_weight(rd->span);
  86
  87        cpus = 0;
  88
  89        for_each_cpu_and(i, rd->span, cpu_active_mask)
  90                cpus++;
  91
  92        return cpus;
  93}
  94
  95static inline unsigned long __dl_bw_capacity(int i)
  96{
  97        struct root_domain *rd = cpu_rq(i)->rd;
  98        unsigned long cap = 0;
  99
 100        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
 101                         "sched RCU must be held");
 102
 103        for_each_cpu_and(i, rd->span, cpu_active_mask)
 104                cap += capacity_orig_of(i);
 105
 106        return cap;
 107}
 108
 109/*
 110 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
 111 * of the CPU the task is running on rather rd's \Sum CPU capacity.
 112 */
 113static inline unsigned long dl_bw_capacity(int i)
 114{
 115        if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
 116            capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
 117                return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
 118        } else {
 119                return __dl_bw_capacity(i);
 120        }
 121}
 122
 123static inline bool dl_bw_visited(int cpu, u64 gen)
 124{
 125        struct root_domain *rd = cpu_rq(cpu)->rd;
 126
 127        if (rd->visit_gen == gen)
 128                return true;
 129
 130        rd->visit_gen = gen;
 131        return false;
 132}
 133#else
 134static inline struct dl_bw *dl_bw_of(int i)
 135{
 136        return &cpu_rq(i)->dl.dl_bw;
 137}
 138
 139static inline int dl_bw_cpus(int i)
 140{
 141        return 1;
 142}
 143
 144static inline unsigned long dl_bw_capacity(int i)
 145{
 146        return SCHED_CAPACITY_SCALE;
 147}
 148
 149static inline bool dl_bw_visited(int cpu, u64 gen)
 150{
 151        return false;
 152}
 153#endif
 154
 155static inline
 156void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
 157{
 158        u64 old = dl_rq->running_bw;
 159
 160        lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
 161        dl_rq->running_bw += dl_bw;
 162        SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
 163        SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
 164        /* kick cpufreq (see the comment in kernel/sched/sched.h). */
 165        cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
 166}
 167
 168static inline
 169void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
 170{
 171        u64 old = dl_rq->running_bw;
 172
 173        lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
 174        dl_rq->running_bw -= dl_bw;
 175        SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
 176        if (dl_rq->running_bw > old)
 177                dl_rq->running_bw = 0;
 178        /* kick cpufreq (see the comment in kernel/sched/sched.h). */
 179        cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
 180}
 181
 182static inline
 183void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 184{
 185        u64 old = dl_rq->this_bw;
 186
 187        lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
 188        dl_rq->this_bw += dl_bw;
 189        SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
 190}
 191
 192static inline
 193void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 194{
 195        u64 old = dl_rq->this_bw;
 196
 197        lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
 198        dl_rq->this_bw -= dl_bw;
 199        SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
 200        if (dl_rq->this_bw > old)
 201                dl_rq->this_bw = 0;
 202        SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
 203}
 204
 205static inline
 206void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 207{
 208        if (!dl_entity_is_special(dl_se))
 209                __add_rq_bw(dl_se->dl_bw, dl_rq);
 210}
 211
 212static inline
 213void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 214{
 215        if (!dl_entity_is_special(dl_se))
 216                __sub_rq_bw(dl_se->dl_bw, dl_rq);
 217}
 218
 219static inline
 220void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 221{
 222        if (!dl_entity_is_special(dl_se))
 223                __add_running_bw(dl_se->dl_bw, dl_rq);
 224}
 225
 226static inline
 227void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 228{
 229        if (!dl_entity_is_special(dl_se))
 230                __sub_running_bw(dl_se->dl_bw, dl_rq);
 231}
 232
 233static void dl_change_utilization(struct task_struct *p, u64 new_bw)
 234{
 235        struct rq *rq;
 236
 237        BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
 238
 239        if (task_on_rq_queued(p))
 240                return;
 241
 242        rq = task_rq(p);
 243        if (p->dl.dl_non_contending) {
 244                sub_running_bw(&p->dl, &rq->dl);
 245                p->dl.dl_non_contending = 0;
 246                /*
 247                 * If the timer handler is currently running and the
 248                 * timer cannot be canceled, inactive_task_timer()
 249                 * will see that dl_not_contending is not set, and
 250                 * will not touch the rq's active utilization,
 251                 * so we are still safe.
 252                 */
 253                if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
 254                        put_task_struct(p);
 255        }
 256        __sub_rq_bw(p->dl.dl_bw, &rq->dl);
 257        __add_rq_bw(new_bw, &rq->dl);
 258}
 259
 260/*
 261 * The utilization of a task cannot be immediately removed from
 262 * the rq active utilization (running_bw) when the task blocks.
 263 * Instead, we have to wait for the so called "0-lag time".
 264 *
 265 * If a task blocks before the "0-lag time", a timer (the inactive
 266 * timer) is armed, and running_bw is decreased when the timer
 267 * fires.
 268 *
 269 * If the task wakes up again before the inactive timer fires,
 270 * the timer is canceled, whereas if the task wakes up after the
 271 * inactive timer fired (and running_bw has been decreased) the
 272 * task's utilization has to be added to running_bw again.
 273 * A flag in the deadline scheduling entity (dl_non_contending)
 274 * is used to avoid race conditions between the inactive timer handler
 275 * and task wakeups.
 276 *
 277 * The following diagram shows how running_bw is updated. A task is
 278 * "ACTIVE" when its utilization contributes to running_bw; an
 279 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
 280 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
 281 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
 282 * time already passed, which does not contribute to running_bw anymore.
 283 *                              +------------------+
 284 *             wakeup           |    ACTIVE        |
 285 *          +------------------>+   contending     |
 286 *          | add_running_bw    |                  |
 287 *          |                   +----+------+------+
 288 *          |                        |      ^
 289 *          |                dequeue |      |
 290 * +--------+-------+                |      |
 291 * |                |   t >= 0-lag   |      | wakeup
 292 * |    INACTIVE    |<---------------+      |
 293 * |                | sub_running_bw |      |
 294 * +--------+-------+                |      |
 295 *          ^                        |      |
 296 *          |              t < 0-lag |      |
 297 *          |                        |      |
 298 *          |                        V      |
 299 *          |                   +----+------+------+
 300 *          | sub_running_bw    |    ACTIVE        |
 301 *          +-------------------+                  |
 302 *            inactive timer    |  non contending  |
 303 *            fired             +------------------+
 304 *
 305 * The task_non_contending() function is invoked when a task
 306 * blocks, and checks if the 0-lag time already passed or
 307 * not (in the first case, it directly updates running_bw;
 308 * in the second case, it arms the inactive timer).
 309 *
 310 * The task_contending() function is invoked when a task wakes
 311 * up, and checks if the task is still in the "ACTIVE non contending"
 312 * state or not (in the second case, it updates running_bw).
 313 */
 314static void task_non_contending(struct task_struct *p)
 315{
 316        struct sched_dl_entity *dl_se = &p->dl;
 317        struct hrtimer *timer = &dl_se->inactive_timer;
 318        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 319        struct rq *rq = rq_of_dl_rq(dl_rq);
 320        s64 zerolag_time;
 321
 322        /*
 323         * If this is a non-deadline task that has been boosted,
 324         * do nothing
 325         */
 326        if (dl_se->dl_runtime == 0)
 327                return;
 328
 329        if (dl_entity_is_special(dl_se))
 330                return;
 331
 332        WARN_ON(dl_se->dl_non_contending);
 333
 334        zerolag_time = dl_se->deadline -
 335                 div64_long((dl_se->runtime * dl_se->dl_period),
 336                        dl_se->dl_runtime);
 337
 338        /*
 339         * Using relative times instead of the absolute "0-lag time"
 340         * allows to simplify the code
 341         */
 342        zerolag_time -= rq_clock(rq);
 343
 344        /*
 345         * If the "0-lag time" already passed, decrease the active
 346         * utilization now, instead of starting a timer
 347         */
 348        if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
 349                if (dl_task(p))
 350                        sub_running_bw(dl_se, dl_rq);
 351                if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
 352                        struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
 353
 354                        if (READ_ONCE(p->__state) == TASK_DEAD)
 355                                sub_rq_bw(&p->dl, &rq->dl);
 356                        raw_spin_lock(&dl_b->lock);
 357                        __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
 358                        __dl_clear_params(p);
 359                        raw_spin_unlock(&dl_b->lock);
 360                }
 361
 362                return;
 363        }
 364
 365        dl_se->dl_non_contending = 1;
 366        get_task_struct(p);
 367        hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
 368}
 369
 370static void task_contending(struct sched_dl_entity *dl_se, int flags)
 371{
 372        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 373
 374        /*
 375         * If this is a non-deadline task that has been boosted,
 376         * do nothing
 377         */
 378        if (dl_se->dl_runtime == 0)
 379                return;
 380
 381        if (flags & ENQUEUE_MIGRATED)
 382                add_rq_bw(dl_se, dl_rq);
 383
 384        if (dl_se->dl_non_contending) {
 385                dl_se->dl_non_contending = 0;
 386                /*
 387                 * If the timer handler is currently running and the
 388                 * timer cannot be canceled, inactive_task_timer()
 389                 * will see that dl_not_contending is not set, and
 390                 * will not touch the rq's active utilization,
 391                 * so we are still safe.
 392                 */
 393                if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
 394                        put_task_struct(dl_task_of(dl_se));
 395        } else {
 396                /*
 397                 * Since "dl_non_contending" is not set, the
 398                 * task's utilization has already been removed from
 399                 * active utilization (either when the task blocked,
 400                 * when the "inactive timer" fired).
 401                 * So, add it back.
 402                 */
 403                add_running_bw(dl_se, dl_rq);
 404        }
 405}
 406
 407static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 408{
 409        struct sched_dl_entity *dl_se = &p->dl;
 410
 411        return dl_rq->root.rb_leftmost == &dl_se->rb_node;
 412}
 413
 414static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
 415
 416void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
 417{
 418        raw_spin_lock_init(&dl_b->dl_runtime_lock);
 419        dl_b->dl_period = period;
 420        dl_b->dl_runtime = runtime;
 421}
 422
 423void init_dl_bw(struct dl_bw *dl_b)
 424{
 425        raw_spin_lock_init(&dl_b->lock);
 426        raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
 427        if (global_rt_runtime() == RUNTIME_INF)
 428                dl_b->bw = -1;
 429        else
 430                dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
 431        raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
 432        dl_b->total_bw = 0;
 433}
 434
 435void init_dl_rq(struct dl_rq *dl_rq)
 436{
 437        dl_rq->root = RB_ROOT_CACHED;
 438
 439#ifdef CONFIG_SMP
 440        /* zero means no -deadline tasks */
 441        dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
 442
 443        dl_rq->dl_nr_migratory = 0;
 444        dl_rq->overloaded = 0;
 445        dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
 446#else
 447        init_dl_bw(&dl_rq->dl_bw);
 448#endif
 449
 450        dl_rq->running_bw = 0;
 451        dl_rq->this_bw = 0;
 452        init_dl_rq_bw_ratio(dl_rq);
 453}
 454
 455#ifdef CONFIG_SMP
 456
 457static inline int dl_overloaded(struct rq *rq)
 458{
 459        return atomic_read(&rq->rd->dlo_count);
 460}
 461
 462static inline void dl_set_overload(struct rq *rq)
 463{
 464        if (!rq->online)
 465                return;
 466
 467        cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 468        /*
 469         * Must be visible before the overload count is
 470         * set (as in sched_rt.c).
 471         *
 472         * Matched by the barrier in pull_dl_task().
 473         */
 474        smp_wmb();
 475        atomic_inc(&rq->rd->dlo_count);
 476}
 477
 478static inline void dl_clear_overload(struct rq *rq)
 479{
 480        if (!rq->online)
 481                return;
 482
 483        atomic_dec(&rq->rd->dlo_count);
 484        cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 485}
 486
 487static void update_dl_migration(struct dl_rq *dl_rq)
 488{
 489        if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 490                if (!dl_rq->overloaded) {
 491                        dl_set_overload(rq_of_dl_rq(dl_rq));
 492                        dl_rq->overloaded = 1;
 493                }
 494        } else if (dl_rq->overloaded) {
 495                dl_clear_overload(rq_of_dl_rq(dl_rq));
 496                dl_rq->overloaded = 0;
 497        }
 498}
 499
 500static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 501{
 502        struct task_struct *p = dl_task_of(dl_se);
 503
 504        if (p->nr_cpus_allowed > 1)
 505                dl_rq->dl_nr_migratory++;
 506
 507        update_dl_migration(dl_rq);
 508}
 509
 510static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 511{
 512        struct task_struct *p = dl_task_of(dl_se);
 513
 514        if (p->nr_cpus_allowed > 1)
 515                dl_rq->dl_nr_migratory--;
 516
 517        update_dl_migration(dl_rq);
 518}
 519
 520#define __node_2_pdl(node) \
 521        rb_entry((node), struct task_struct, pushable_dl_tasks)
 522
 523static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
 524{
 525        return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
 526}
 527
 528/*
 529 * The list of pushable -deadline task is not a plist, like in
 530 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 531 */
 532static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 533{
 534        struct rb_node *leftmost;
 535
 536        BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 537
 538        leftmost = rb_add_cached(&p->pushable_dl_tasks,
 539                                 &rq->dl.pushable_dl_tasks_root,
 540                                 __pushable_less);
 541        if (leftmost)
 542                rq->dl.earliest_dl.next = p->dl.deadline;
 543}
 544
 545static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 546{
 547        struct dl_rq *dl_rq = &rq->dl;
 548        struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
 549        struct rb_node *leftmost;
 550
 551        if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 552                return;
 553
 554        leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
 555        if (leftmost)
 556                dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
 557
 558        RB_CLEAR_NODE(&p->pushable_dl_tasks);
 559}
 560
 561static inline int has_pushable_dl_tasks(struct rq *rq)
 562{
 563        return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
 564}
 565
 566static int push_dl_task(struct rq *rq);
 567
 568static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 569{
 570        return rq->online && dl_task(prev);
 571}
 572
 573static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 574static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 575
 576static void push_dl_tasks(struct rq *);
 577static void pull_dl_task(struct rq *);
 578
 579static inline void deadline_queue_push_tasks(struct rq *rq)
 580{
 581        if (!has_pushable_dl_tasks(rq))
 582                return;
 583
 584        queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 585}
 586
 587static inline void deadline_queue_pull_task(struct rq *rq)
 588{
 589        queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 590}
 591
 592static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 593
 594static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 595{
 596        struct rq *later_rq = NULL;
 597        struct dl_bw *dl_b;
 598
 599        later_rq = find_lock_later_rq(p, rq);
 600        if (!later_rq) {
 601                int cpu;
 602
 603                /*
 604                 * If we cannot preempt any rq, fall back to pick any
 605                 * online CPU:
 606                 */
 607                cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
 608                if (cpu >= nr_cpu_ids) {
 609                        /*
 610                         * Failed to find any suitable CPU.
 611                         * The task will never come back!
 612                         */
 613                        BUG_ON(dl_bandwidth_enabled());
 614
 615                        /*
 616                         * If admission control is disabled we
 617                         * try a little harder to let the task
 618                         * run.
 619                         */
 620                        cpu = cpumask_any(cpu_active_mask);
 621                }
 622                later_rq = cpu_rq(cpu);
 623                double_lock_balance(rq, later_rq);
 624        }
 625
 626        if (p->dl.dl_non_contending || p->dl.dl_throttled) {
 627                /*
 628                 * Inactive timer is armed (or callback is running, but
 629                 * waiting for us to release rq locks). In any case, when it
 630                 * will fire (or continue), it will see running_bw of this
 631                 * task migrated to later_rq (and correctly handle it).
 632                 */
 633                sub_running_bw(&p->dl, &rq->dl);
 634                sub_rq_bw(&p->dl, &rq->dl);
 635
 636                add_rq_bw(&p->dl, &later_rq->dl);
 637                add_running_bw(&p->dl, &later_rq->dl);
 638        } else {
 639                sub_rq_bw(&p->dl, &rq->dl);
 640                add_rq_bw(&p->dl, &later_rq->dl);
 641        }
 642
 643        /*
 644         * And we finally need to fixup root_domain(s) bandwidth accounting,
 645         * since p is still hanging out in the old (now moved to default) root
 646         * domain.
 647         */
 648        dl_b = &rq->rd->dl_bw;
 649        raw_spin_lock(&dl_b->lock);
 650        __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
 651        raw_spin_unlock(&dl_b->lock);
 652
 653        dl_b = &later_rq->rd->dl_bw;
 654        raw_spin_lock(&dl_b->lock);
 655        __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
 656        raw_spin_unlock(&dl_b->lock);
 657
 658        set_task_cpu(p, later_rq->cpu);
 659        double_unlock_balance(later_rq, rq);
 660
 661        return later_rq;
 662}
 663
 664#else
 665
 666static inline
 667void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 668{
 669}
 670
 671static inline
 672void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 673{
 674}
 675
 676static inline
 677void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 678{
 679}
 680
 681static inline
 682void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 683{
 684}
 685
 686static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 687{
 688        return false;
 689}
 690
 691static inline void pull_dl_task(struct rq *rq)
 692{
 693}
 694
 695static inline void deadline_queue_push_tasks(struct rq *rq)
 696{
 697}
 698
 699static inline void deadline_queue_pull_task(struct rq *rq)
 700{
 701}
 702#endif /* CONFIG_SMP */
 703
 704static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 705static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 706static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
 707
 708/*
 709 * We are being explicitly informed that a new instance is starting,
 710 * and this means that:
 711 *  - the absolute deadline of the entity has to be placed at
 712 *    current time + relative deadline;
 713 *  - the runtime of the entity has to be set to the maximum value.
 714 *
 715 * The capability of specifying such event is useful whenever a -deadline
 716 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 717 * one, and to (try to!) reconcile itself with its own scheduling
 718 * parameters.
 719 */
 720static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
 721{
 722        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 723        struct rq *rq = rq_of_dl_rq(dl_rq);
 724
 725        WARN_ON(is_dl_boosted(dl_se));
 726        WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
 727
 728        /*
 729         * We are racing with the deadline timer. So, do nothing because
 730         * the deadline timer handler will take care of properly recharging
 731         * the runtime and postponing the deadline
 732         */
 733        if (dl_se->dl_throttled)
 734                return;
 735
 736        /*
 737         * We use the regular wall clock time to set deadlines in the
 738         * future; in fact, we must consider execution overheads (time
 739         * spent on hardirq context, etc.).
 740         */
 741        dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
 742        dl_se->runtime = dl_se->dl_runtime;
 743}
 744
 745/*
 746 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 747 * possibility of a entity lasting more than what it declared, and thus
 748 * exhausting its runtime.
 749 *
 750 * Here we are interested in making runtime overrun possible, but we do
 751 * not want a entity which is misbehaving to affect the scheduling of all
 752 * other entities.
 753 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 754 * is used, in order to confine each entity within its own bandwidth.
 755 *
 756 * This function deals exactly with that, and ensures that when the runtime
 757 * of a entity is replenished, its deadline is also postponed. That ensures
 758 * the overrunning entity can't interfere with other entity in the system and
 759 * can't make them miss their deadlines. Reasons why this kind of overruns
 760 * could happen are, typically, a entity voluntarily trying to overcome its
 761 * runtime, or it just underestimated it during sched_setattr().
 762 */
 763static void replenish_dl_entity(struct sched_dl_entity *dl_se)
 764{
 765        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 766        struct rq *rq = rq_of_dl_rq(dl_rq);
 767
 768        BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
 769
 770        /*
 771         * This could be the case for a !-dl task that is boosted.
 772         * Just go with full inherited parameters.
 773         */
 774        if (dl_se->dl_deadline == 0) {
 775                dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
 776                dl_se->runtime = pi_of(dl_se)->dl_runtime;
 777        }
 778
 779        if (dl_se->dl_yielded && dl_se->runtime > 0)
 780                dl_se->runtime = 0;
 781
 782        /*
 783         * We keep moving the deadline away until we get some
 784         * available runtime for the entity. This ensures correct
 785         * handling of situations where the runtime overrun is
 786         * arbitrary large.
 787         */
 788        while (dl_se->runtime <= 0) {
 789                dl_se->deadline += pi_of(dl_se)->dl_period;
 790                dl_se->runtime += pi_of(dl_se)->dl_runtime;
 791        }
 792
 793        /*
 794         * At this point, the deadline really should be "in
 795         * the future" with respect to rq->clock. If it's
 796         * not, we are, for some reason, lagging too much!
 797         * Anyway, after having warn userspace abut that,
 798         * we still try to keep the things running by
 799         * resetting the deadline and the budget of the
 800         * entity.
 801         */
 802        if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 803                printk_deferred_once("sched: DL replenish lagged too much\n");
 804                dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
 805                dl_se->runtime = pi_of(dl_se)->dl_runtime;
 806        }
 807
 808        if (dl_se->dl_yielded)
 809                dl_se->dl_yielded = 0;
 810        if (dl_se->dl_throttled)
 811                dl_se->dl_throttled = 0;
 812}
 813
 814/*
 815 * Here we check if --at time t-- an entity (which is probably being
 816 * [re]activated or, in general, enqueued) can use its remaining runtime
 817 * and its current deadline _without_ exceeding the bandwidth it is
 818 * assigned (function returns true if it can't). We are in fact applying
 819 * one of the CBS rules: when a task wakes up, if the residual runtime
 820 * over residual deadline fits within the allocated bandwidth, then we
 821 * can keep the current (absolute) deadline and residual budget without
 822 * disrupting the schedulability of the system. Otherwise, we should
 823 * refill the runtime and set the deadline a period in the future,
 824 * because keeping the current (absolute) deadline of the task would
 825 * result in breaking guarantees promised to other tasks (refer to
 826 * Documentation/scheduler/sched-deadline.rst for more information).
 827 *
 828 * This function returns true if:
 829 *
 830 *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
 831 *
 832 * IOW we can't recycle current parameters.
 833 *
 834 * Notice that the bandwidth check is done against the deadline. For
 835 * task with deadline equal to period this is the same of using
 836 * dl_period instead of dl_deadline in the equation above.
 837 */
 838static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
 839{
 840        u64 left, right;
 841
 842        /*
 843         * left and right are the two sides of the equation above,
 844         * after a bit of shuffling to use multiplications instead
 845         * of divisions.
 846         *
 847         * Note that none of the time values involved in the two
 848         * multiplications are absolute: dl_deadline and dl_runtime
 849         * are the relative deadline and the maximum runtime of each
 850         * instance, runtime is the runtime left for the last instance
 851         * and (deadline - t), since t is rq->clock, is the time left
 852         * to the (absolute) deadline. Even if overflowing the u64 type
 853         * is very unlikely to occur in both cases, here we scale down
 854         * as we want to avoid that risk at all. Scaling down by 10
 855         * means that we reduce granularity to 1us. We are fine with it,
 856         * since this is only a true/false check and, anyway, thinking
 857         * of anything below microseconds resolution is actually fiction
 858         * (but still we want to give the user that illusion >;).
 859         */
 860        left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 861        right = ((dl_se->deadline - t) >> DL_SCALE) *
 862                (pi_of(dl_se)->dl_runtime >> DL_SCALE);
 863
 864        return dl_time_before(right, left);
 865}
 866
 867/*
 868 * Revised wakeup rule [1]: For self-suspending tasks, rather then
 869 * re-initializing task's runtime and deadline, the revised wakeup
 870 * rule adjusts the task's runtime to avoid the task to overrun its
 871 * density.
 872 *
 873 * Reasoning: a task may overrun the density if:
 874 *    runtime / (deadline - t) > dl_runtime / dl_deadline
 875 *
 876 * Therefore, runtime can be adjusted to:
 877 *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
 878 *
 879 * In such way that runtime will be equal to the maximum density
 880 * the task can use without breaking any rule.
 881 *
 882 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
 883 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
 884 */
 885static void
 886update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
 887{
 888        u64 laxity = dl_se->deadline - rq_clock(rq);
 889
 890        /*
 891         * If the task has deadline < period, and the deadline is in the past,
 892         * it should already be throttled before this check.
 893         *
 894         * See update_dl_entity() comments for further details.
 895         */
 896        WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
 897
 898        dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
 899}
 900
 901/*
 902 * Regarding the deadline, a task with implicit deadline has a relative
 903 * deadline == relative period. A task with constrained deadline has a
 904 * relative deadline <= relative period.
 905 *
 906 * We support constrained deadline tasks. However, there are some restrictions
 907 * applied only for tasks which do not have an implicit deadline. See
 908 * update_dl_entity() to know more about such restrictions.
 909 *
 910 * The dl_is_implicit() returns true if the task has an implicit deadline.
 911 */
 912static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
 913{
 914        return dl_se->dl_deadline == dl_se->dl_period;
 915}
 916
 917/*
 918 * When a deadline entity is placed in the runqueue, its runtime and deadline
 919 * might need to be updated. This is done by a CBS wake up rule. There are two
 920 * different rules: 1) the original CBS; and 2) the Revisited CBS.
 921 *
 922 * When the task is starting a new period, the Original CBS is used. In this
 923 * case, the runtime is replenished and a new absolute deadline is set.
 924 *
 925 * When a task is queued before the begin of the next period, using the
 926 * remaining runtime and deadline could make the entity to overflow, see
 927 * dl_entity_overflow() to find more about runtime overflow. When such case
 928 * is detected, the runtime and deadline need to be updated.
 929 *
 930 * If the task has an implicit deadline, i.e., deadline == period, the Original
 931 * CBS is applied. the runtime is replenished and a new absolute deadline is
 932 * set, as in the previous cases.
 933 *
 934 * However, the Original CBS does not work properly for tasks with
 935 * deadline < period, which are said to have a constrained deadline. By
 936 * applying the Original CBS, a constrained deadline task would be able to run
 937 * runtime/deadline in a period. With deadline < period, the task would
 938 * overrun the runtime/period allowed bandwidth, breaking the admission test.
 939 *
 940 * In order to prevent this misbehave, the Revisited CBS is used for
 941 * constrained deadline tasks when a runtime overflow is detected. In the
 942 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
 943 * the remaining runtime of the task is reduced to avoid runtime overflow.
 944 * Please refer to the comments update_dl_revised_wakeup() function to find
 945 * more about the Revised CBS rule.
 946 */
 947static void update_dl_entity(struct sched_dl_entity *dl_se)
 948{
 949        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 950        struct rq *rq = rq_of_dl_rq(dl_rq);
 951
 952        if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 953            dl_entity_overflow(dl_se, rq_clock(rq))) {
 954
 955                if (unlikely(!dl_is_implicit(dl_se) &&
 956                             !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
 957                             !is_dl_boosted(dl_se))) {
 958                        update_dl_revised_wakeup(dl_se, rq);
 959                        return;
 960                }
 961
 962                dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
 963                dl_se->runtime = pi_of(dl_se)->dl_runtime;
 964        }
 965}
 966
 967static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
 968{
 969        return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
 970}
 971
 972/*
 973 * If the entity depleted all its runtime, and if we want it to sleep
 974 * while waiting for some new execution time to become available, we
 975 * set the bandwidth replenishment timer to the replenishment instant
 976 * and try to activate it.
 977 *
 978 * Notice that it is important for the caller to know if the timer
 979 * actually started or not (i.e., the replenishment instant is in
 980 * the future or in the past).
 981 */
 982static int start_dl_timer(struct task_struct *p)
 983{
 984        struct sched_dl_entity *dl_se = &p->dl;
 985        struct hrtimer *timer = &dl_se->dl_timer;
 986        struct rq *rq = task_rq(p);
 987        ktime_t now, act;
 988        s64 delta;
 989
 990        lockdep_assert_rq_held(rq);
 991
 992        /*
 993         * We want the timer to fire at the deadline, but considering
 994         * that it is actually coming from rq->clock and not from
 995         * hrtimer's time base reading.
 996         */
 997        act = ns_to_ktime(dl_next_period(dl_se));
 998        now = hrtimer_cb_get_time(timer);
 999        delta = ktime_to_ns(now) - rq_clock(rq);
1000        act = ktime_add_ns(act, delta);
1001
1002        /*
1003         * If the expiry time already passed, e.g., because the value
1004         * chosen as the deadline is too small, don't even try to
1005         * start the timer in the past!
1006         */
1007        if (ktime_us_delta(act, now) < 0)
1008                return 0;
1009
1010        /*
1011         * !enqueued will guarantee another callback; even if one is already in
1012         * progress. This ensures a balanced {get,put}_task_struct().
1013         *
1014         * The race against __run_timer() clearing the enqueued state is
1015         * harmless because we're holding task_rq()->lock, therefore the timer
1016         * expiring after we've done the check will wait on its task_rq_lock()
1017         * and observe our state.
1018         */
1019        if (!hrtimer_is_queued(timer)) {
1020                get_task_struct(p);
1021                hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1022        }
1023
1024        return 1;
1025}
1026
1027/*
1028 * This is the bandwidth enforcement timer callback. If here, we know
1029 * a task is not on its dl_rq, since the fact that the timer was running
1030 * means the task is throttled and needs a runtime replenishment.
1031 *
1032 * However, what we actually do depends on the fact the task is active,
1033 * (it is on its rq) or has been removed from there by a call to
1034 * dequeue_task_dl(). In the former case we must issue the runtime
1035 * replenishment and add the task back to the dl_rq; in the latter, we just
1036 * do nothing but clearing dl_throttled, so that runtime and deadline
1037 * updating (and the queueing back to dl_rq) will be done by the
1038 * next call to enqueue_task_dl().
1039 */
1040static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1041{
1042        struct sched_dl_entity *dl_se = container_of(timer,
1043                                                     struct sched_dl_entity,
1044                                                     dl_timer);
1045        struct task_struct *p = dl_task_of(dl_se);
1046        struct rq_flags rf;
1047        struct rq *rq;
1048
1049        rq = task_rq_lock(p, &rf);
1050
1051        /*
1052         * The task might have changed its scheduling policy to something
1053         * different than SCHED_DEADLINE (through switched_from_dl()).
1054         */
1055        if (!dl_task(p))
1056                goto unlock;
1057
1058        /*
1059         * The task might have been boosted by someone else and might be in the
1060         * boosting/deboosting path, its not throttled.
1061         */
1062        if (is_dl_boosted(dl_se))
1063                goto unlock;
1064
1065        /*
1066         * Spurious timer due to start_dl_timer() race; or we already received
1067         * a replenishment from rt_mutex_setprio().
1068         */
1069        if (!dl_se->dl_throttled)
1070                goto unlock;
1071
1072        sched_clock_tick();
1073        update_rq_clock(rq);
1074
1075        /*
1076         * If the throttle happened during sched-out; like:
1077         *
1078         *   schedule()
1079         *     deactivate_task()
1080         *       dequeue_task_dl()
1081         *         update_curr_dl()
1082         *           start_dl_timer()
1083         *         __dequeue_task_dl()
1084         *     prev->on_rq = 0;
1085         *
1086         * We can be both throttled and !queued. Replenish the counter
1087         * but do not enqueue -- wait for our wakeup to do that.
1088         */
1089        if (!task_on_rq_queued(p)) {
1090                replenish_dl_entity(dl_se);
1091                goto unlock;
1092        }
1093
1094#ifdef CONFIG_SMP
1095        if (unlikely(!rq->online)) {
1096                /*
1097                 * If the runqueue is no longer available, migrate the
1098                 * task elsewhere. This necessarily changes rq.
1099                 */
1100                lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1101                rq = dl_task_offline_migration(rq, p);
1102                rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1103                update_rq_clock(rq);
1104
1105                /*
1106                 * Now that the task has been migrated to the new RQ and we
1107                 * have that locked, proceed as normal and enqueue the task
1108                 * there.
1109                 */
1110        }
1111#endif
1112
1113        enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1114        if (dl_task(rq->curr))
1115                check_preempt_curr_dl(rq, p, 0);
1116        else
1117                resched_curr(rq);
1118
1119#ifdef CONFIG_SMP
1120        /*
1121         * Queueing this task back might have overloaded rq, check if we need
1122         * to kick someone away.
1123         */
1124        if (has_pushable_dl_tasks(rq)) {
1125                /*
1126                 * Nothing relies on rq->lock after this, so its safe to drop
1127                 * rq->lock.
1128                 */
1129                rq_unpin_lock(rq, &rf);
1130                push_dl_task(rq);
1131                rq_repin_lock(rq, &rf);
1132        }
1133#endif
1134
1135unlock:
1136        task_rq_unlock(rq, p, &rf);
1137
1138        /*
1139         * This can free the task_struct, including this hrtimer, do not touch
1140         * anything related to that after this.
1141         */
1142        put_task_struct(p);
1143
1144        return HRTIMER_NORESTART;
1145}
1146
1147void init_dl_task_timer(struct sched_dl_entity *dl_se)
1148{
1149        struct hrtimer *timer = &dl_se->dl_timer;
1150
1151        hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1152        timer->function = dl_task_timer;
1153}
1154
1155/*
1156 * During the activation, CBS checks if it can reuse the current task's
1157 * runtime and period. If the deadline of the task is in the past, CBS
1158 * cannot use the runtime, and so it replenishes the task. This rule
1159 * works fine for implicit deadline tasks (deadline == period), and the
1160 * CBS was designed for implicit deadline tasks. However, a task with
1161 * constrained deadline (deadline < period) might be awakened after the
1162 * deadline, but before the next period. In this case, replenishing the
1163 * task would allow it to run for runtime / deadline. As in this case
1164 * deadline < period, CBS enables a task to run for more than the
1165 * runtime / period. In a very loaded system, this can cause a domino
1166 * effect, making other tasks miss their deadlines.
1167 *
1168 * To avoid this problem, in the activation of a constrained deadline
1169 * task after the deadline but before the next period, throttle the
1170 * task and set the replenishing timer to the begin of the next period,
1171 * unless it is boosted.
1172 */
1173static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1174{
1175        struct task_struct *p = dl_task_of(dl_se);
1176        struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1177
1178        if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1179            dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1180                if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1181                        return;
1182                dl_se->dl_throttled = 1;
1183                if (dl_se->runtime > 0)
1184                        dl_se->runtime = 0;
1185        }
1186}
1187
1188static
1189int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1190{
1191        return (dl_se->runtime <= 0);
1192}
1193
1194extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1195
1196/*
1197 * This function implements the GRUB accounting rule:
1198 * according to the GRUB reclaiming algorithm, the runtime is
1199 * not decreased as "dq = -dt", but as
1200 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1201 * where u is the utilization of the task, Umax is the maximum reclaimable
1202 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1203 * as the difference between the "total runqueue utilization" and the
1204 * runqueue active utilization, and Uextra is the (per runqueue) extra
1205 * reclaimable utilization.
1206 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1207 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1208 * BW_SHIFT.
1209 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT,
1210 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1211 * Since delta is a 64 bit variable, to have an overflow its value
1212 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1213 * So, overflow is not an issue here.
1214 */
1215static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1216{
1217        u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1218        u64 u_act;
1219        u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1220
1221        /*
1222         * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1223         * we compare u_inact + rq->dl.extra_bw with
1224         * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1225         * u_inact + rq->dl.extra_bw can be larger than
1226         * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1227         * leading to wrong results)
1228         */
1229        if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1230                u_act = u_act_min;
1231        else
1232                u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1233
1234        return (delta * u_act) >> BW_SHIFT;
1235}
1236
1237/*
1238 * Update the current task's runtime statistics (provided it is still
1239 * a -deadline task and has not been removed from the dl_rq).
1240 */
1241static void update_curr_dl(struct rq *rq)
1242{
1243        struct task_struct *curr = rq->curr;
1244        struct sched_dl_entity *dl_se = &curr->dl;
1245        u64 delta_exec, scaled_delta_exec;
1246        int cpu = cpu_of(rq);
1247        u64 now;
1248
1249        if (!dl_task(curr) || !on_dl_rq(dl_se))
1250                return;
1251
1252        /*
1253         * Consumed budget is computed considering the time as
1254         * observed by schedulable tasks (excluding time spent
1255         * in hardirq context, etc.). Deadlines are instead
1256         * computed using hard walltime. This seems to be the more
1257         * natural solution, but the full ramifications of this
1258         * approach need further study.
1259         */
1260        now = rq_clock_task(rq);
1261        delta_exec = now - curr->se.exec_start;
1262        if (unlikely((s64)delta_exec <= 0)) {
1263                if (unlikely(dl_se->dl_yielded))
1264                        goto throttle;
1265                return;
1266        }
1267
1268        schedstat_set(curr->stats.exec_max,
1269                      max(curr->stats.exec_max, delta_exec));
1270
1271        trace_sched_stat_runtime(curr, delta_exec, 0);
1272
1273        curr->se.sum_exec_runtime += delta_exec;
1274        account_group_exec_runtime(curr, delta_exec);
1275
1276        curr->se.exec_start = now;
1277        cgroup_account_cputime(curr, delta_exec);
1278
1279        if (dl_entity_is_special(dl_se))
1280                return;
1281
1282        /*
1283         * For tasks that participate in GRUB, we implement GRUB-PA: the
1284         * spare reclaimed bandwidth is used to clock down frequency.
1285         *
1286         * For the others, we still need to scale reservation parameters
1287         * according to current frequency and CPU maximum capacity.
1288         */
1289        if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1290                scaled_delta_exec = grub_reclaim(delta_exec,
1291                                                 rq,
1292                                                 &curr->dl);
1293        } else {
1294                unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1295                unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1296
1297                scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1298                scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1299        }
1300
1301        dl_se->runtime -= scaled_delta_exec;
1302
1303throttle:
1304        if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1305                dl_se->dl_throttled = 1;
1306
1307                /* If requested, inform the user about runtime overruns. */
1308                if (dl_runtime_exceeded(dl_se) &&
1309                    (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1310                        dl_se->dl_overrun = 1;
1311
1312                __dequeue_task_dl(rq, curr, 0);
1313                if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1314                        enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1315
1316                if (!is_leftmost(curr, &rq->dl))
1317                        resched_curr(rq);
1318        }
1319
1320        /*
1321         * Because -- for now -- we share the rt bandwidth, we need to
1322         * account our runtime there too, otherwise actual rt tasks
1323         * would be able to exceed the shared quota.
1324         *
1325         * Account to the root rt group for now.
1326         *
1327         * The solution we're working towards is having the RT groups scheduled
1328         * using deadline servers -- however there's a few nasties to figure
1329         * out before that can happen.
1330         */
1331        if (rt_bandwidth_enabled()) {
1332                struct rt_rq *rt_rq = &rq->rt;
1333
1334                raw_spin_lock(&rt_rq->rt_runtime_lock);
1335                /*
1336                 * We'll let actual RT tasks worry about the overflow here, we
1337                 * have our own CBS to keep us inline; only account when RT
1338                 * bandwidth is relevant.
1339                 */
1340                if (sched_rt_bandwidth_account(rt_rq))
1341                        rt_rq->rt_time += delta_exec;
1342                raw_spin_unlock(&rt_rq->rt_runtime_lock);
1343        }
1344}
1345
1346static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1347{
1348        struct sched_dl_entity *dl_se = container_of(timer,
1349                                                     struct sched_dl_entity,
1350                                                     inactive_timer);
1351        struct task_struct *p = dl_task_of(dl_se);
1352        struct rq_flags rf;
1353        struct rq *rq;
1354
1355        rq = task_rq_lock(p, &rf);
1356
1357        sched_clock_tick();
1358        update_rq_clock(rq);
1359
1360        if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1361                struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1362
1363                if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1364                        sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1365                        sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1366                        dl_se->dl_non_contending = 0;
1367                }
1368
1369                raw_spin_lock(&dl_b->lock);
1370                __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1371                raw_spin_unlock(&dl_b->lock);
1372                __dl_clear_params(p);
1373
1374                goto unlock;
1375        }
1376        if (dl_se->dl_non_contending == 0)
1377                goto unlock;
1378
1379        sub_running_bw(dl_se, &rq->dl);
1380        dl_se->dl_non_contending = 0;
1381unlock:
1382        task_rq_unlock(rq, p, &rf);
1383        put_task_struct(p);
1384
1385        return HRTIMER_NORESTART;
1386}
1387
1388void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1389{
1390        struct hrtimer *timer = &dl_se->inactive_timer;
1391
1392        hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1393        timer->function = inactive_task_timer;
1394}
1395
1396#ifdef CONFIG_SMP
1397
1398static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1399{
1400        struct rq *rq = rq_of_dl_rq(dl_rq);
1401
1402        if (dl_rq->earliest_dl.curr == 0 ||
1403            dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1404                if (dl_rq->earliest_dl.curr == 0)
1405                        cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1406                dl_rq->earliest_dl.curr = deadline;
1407                cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1408        }
1409}
1410
1411static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1412{
1413        struct rq *rq = rq_of_dl_rq(dl_rq);
1414
1415        /*
1416         * Since we may have removed our earliest (and/or next earliest)
1417         * task we must recompute them.
1418         */
1419        if (!dl_rq->dl_nr_running) {
1420                dl_rq->earliest_dl.curr = 0;
1421                dl_rq->earliest_dl.next = 0;
1422                cpudl_clear(&rq->rd->cpudl, rq->cpu);
1423                cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1424        } else {
1425                struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1426                struct sched_dl_entity *entry;
1427
1428                entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1429                dl_rq->earliest_dl.curr = entry->deadline;
1430                cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1431        }
1432}
1433
1434#else
1435
1436static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1437static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1438
1439#endif /* CONFIG_SMP */
1440
1441static inline
1442void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1443{
1444        int prio = dl_task_of(dl_se)->prio;
1445        u64 deadline = dl_se->deadline;
1446
1447        WARN_ON(!dl_prio(prio));
1448        dl_rq->dl_nr_running++;
1449        add_nr_running(rq_of_dl_rq(dl_rq), 1);
1450
1451        inc_dl_deadline(dl_rq, deadline);
1452        inc_dl_migration(dl_se, dl_rq);
1453}
1454
1455static inline
1456void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1457{
1458        int prio = dl_task_of(dl_se)->prio;
1459
1460        WARN_ON(!dl_prio(prio));
1461        WARN_ON(!dl_rq->dl_nr_running);
1462        dl_rq->dl_nr_running--;
1463        sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1464
1465        dec_dl_deadline(dl_rq, dl_se->deadline);
1466        dec_dl_migration(dl_se, dl_rq);
1467}
1468
1469#define __node_2_dle(node) \
1470        rb_entry((node), struct sched_dl_entity, rb_node)
1471
1472static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1473{
1474        return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1475}
1476
1477static inline struct sched_statistics *
1478__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1479{
1480        return &dl_task_of(dl_se)->stats;
1481}
1482
1483static inline void
1484update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1485{
1486        struct sched_statistics *stats;
1487
1488        if (!schedstat_enabled())
1489                return;
1490
1491        stats = __schedstats_from_dl_se(dl_se);
1492        __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1493}
1494
1495static inline void
1496update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1497{
1498        struct sched_statistics *stats;
1499
1500        if (!schedstat_enabled())
1501                return;
1502
1503        stats = __schedstats_from_dl_se(dl_se);
1504        __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1505}
1506
1507static inline void
1508update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1509{
1510        struct sched_statistics *stats;
1511
1512        if (!schedstat_enabled())
1513                return;
1514
1515        stats = __schedstats_from_dl_se(dl_se);
1516        __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1517}
1518
1519static inline void
1520update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1521                        int flags)
1522{
1523        if (!schedstat_enabled())
1524                return;
1525
1526        if (flags & ENQUEUE_WAKEUP)
1527                update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1528}
1529
1530static inline void
1531update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1532                        int flags)
1533{
1534        struct task_struct *p = dl_task_of(dl_se);
1535
1536        if (!schedstat_enabled())
1537                return;
1538
1539        if ((flags & DEQUEUE_SLEEP)) {
1540                unsigned int state;
1541
1542                state = READ_ONCE(p->__state);
1543                if (state & TASK_INTERRUPTIBLE)
1544                        __schedstat_set(p->stats.sleep_start,
1545                                        rq_clock(rq_of_dl_rq(dl_rq)));
1546
1547                if (state & TASK_UNINTERRUPTIBLE)
1548                        __schedstat_set(p->stats.block_start,
1549                                        rq_clock(rq_of_dl_rq(dl_rq)));
1550        }
1551}
1552
1553static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1554{
1555        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1556
1557        BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1558
1559        rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1560
1561        inc_dl_tasks(dl_se, dl_rq);
1562}
1563
1564static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1565{
1566        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1567
1568        if (RB_EMPTY_NODE(&dl_se->rb_node))
1569                return;
1570
1571        rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1572
1573        RB_CLEAR_NODE(&dl_se->rb_node);
1574
1575        dec_dl_tasks(dl_se, dl_rq);
1576}
1577
1578static void
1579enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1580{
1581        BUG_ON(on_dl_rq(dl_se));
1582
1583        update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
1584
1585        /*
1586         * If this is a wakeup or a new instance, the scheduling
1587         * parameters of the task might need updating. Otherwise,
1588         * we want a replenishment of its runtime.
1589         */
1590        if (flags & ENQUEUE_WAKEUP) {
1591                task_contending(dl_se, flags);
1592                update_dl_entity(dl_se);
1593        } else if (flags & ENQUEUE_REPLENISH) {
1594                replenish_dl_entity(dl_se);
1595        } else if ((flags & ENQUEUE_RESTORE) &&
1596                  dl_time_before(dl_se->deadline,
1597                                 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1598                setup_new_dl_entity(dl_se);
1599        }
1600
1601        __enqueue_dl_entity(dl_se);
1602}
1603
1604static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1605{
1606        __dequeue_dl_entity(dl_se);
1607}
1608
1609static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1610{
1611        if (is_dl_boosted(&p->dl)) {
1612                /*
1613                 * Because of delays in the detection of the overrun of a
1614                 * thread's runtime, it might be the case that a thread
1615                 * goes to sleep in a rt mutex with negative runtime. As
1616                 * a consequence, the thread will be throttled.
1617                 *
1618                 * While waiting for the mutex, this thread can also be
1619                 * boosted via PI, resulting in a thread that is throttled
1620                 * and boosted at the same time.
1621                 *
1622                 * In this case, the boost overrides the throttle.
1623                 */
1624                if (p->dl.dl_throttled) {
1625                        /*
1626                         * The replenish timer needs to be canceled. No
1627                         * problem if it fires concurrently: boosted threads
1628                         * are ignored in dl_task_timer().
1629                         */
1630                        hrtimer_try_to_cancel(&p->dl.dl_timer);
1631                        p->dl.dl_throttled = 0;
1632                }
1633        } else if (!dl_prio(p->normal_prio)) {
1634                /*
1635                 * Special case in which we have a !SCHED_DEADLINE task that is going
1636                 * to be deboosted, but exceeds its runtime while doing so. No point in
1637                 * replenishing it, as it's going to return back to its original
1638                 * scheduling class after this. If it has been throttled, we need to
1639                 * clear the flag, otherwise the task may wake up as throttled after
1640                 * being boosted again with no means to replenish the runtime and clear
1641                 * the throttle.
1642                 */
1643                p->dl.dl_throttled = 0;
1644                BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1645                return;
1646        }
1647
1648        /*
1649         * Check if a constrained deadline task was activated
1650         * after the deadline but before the next period.
1651         * If that is the case, the task will be throttled and
1652         * the replenishment timer will be set to the next period.
1653         */
1654        if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1655                dl_check_constrained_dl(&p->dl);
1656
1657        if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1658                add_rq_bw(&p->dl, &rq->dl);
1659                add_running_bw(&p->dl, &rq->dl);
1660        }
1661
1662        /*
1663         * If p is throttled, we do not enqueue it. In fact, if it exhausted
1664         * its budget it needs a replenishment and, since it now is on
1665         * its rq, the bandwidth timer callback (which clearly has not
1666         * run yet) will take care of this.
1667         * However, the active utilization does not depend on the fact
1668         * that the task is on the runqueue or not (but depends on the
1669         * task's state - in GRUB parlance, "inactive" vs "active contending").
1670         * In other words, even if a task is throttled its utilization must
1671         * be counted in the active utilization; hence, we need to call
1672         * add_running_bw().
1673         */
1674        if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1675                if (flags & ENQUEUE_WAKEUP)
1676                        task_contending(&p->dl, flags);
1677
1678                return;
1679        }
1680
1681        check_schedstat_required();
1682        update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
1683
1684        enqueue_dl_entity(&p->dl, flags);
1685
1686        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1687                enqueue_pushable_dl_task(rq, p);
1688}
1689
1690static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1691{
1692        update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
1693        dequeue_dl_entity(&p->dl);
1694        dequeue_pushable_dl_task(rq, p);
1695}
1696
1697static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1698{
1699        update_curr_dl(rq);
1700        __dequeue_task_dl(rq, p, flags);
1701
1702        if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1703                sub_running_bw(&p->dl, &rq->dl);
1704                sub_rq_bw(&p->dl, &rq->dl);
1705        }
1706
1707        /*
1708         * This check allows to start the inactive timer (or to immediately
1709         * decrease the active utilization, if needed) in two cases:
1710         * when the task blocks and when it is terminating
1711         * (p->state == TASK_DEAD). We can handle the two cases in the same
1712         * way, because from GRUB's point of view the same thing is happening
1713         * (the task moves from "active contending" to "active non contending"
1714         * or "inactive")
1715         */
1716        if (flags & DEQUEUE_SLEEP)
1717                task_non_contending(p);
1718}
1719
1720/*
1721 * Yield task semantic for -deadline tasks is:
1722 *
1723 *   get off from the CPU until our next instance, with
1724 *   a new runtime. This is of little use now, since we
1725 *   don't have a bandwidth reclaiming mechanism. Anyway,
1726 *   bandwidth reclaiming is planned for the future, and
1727 *   yield_task_dl will indicate that some spare budget
1728 *   is available for other task instances to use it.
1729 */
1730static void yield_task_dl(struct rq *rq)
1731{
1732        /*
1733         * We make the task go to sleep until its current deadline by
1734         * forcing its runtime to zero. This way, update_curr_dl() stops
1735         * it and the bandwidth timer will wake it up and will give it
1736         * new scheduling parameters (thanks to dl_yielded=1).
1737         */
1738        rq->curr->dl.dl_yielded = 1;
1739
1740        update_rq_clock(rq);
1741        update_curr_dl(rq);
1742        /*
1743         * Tell update_rq_clock() that we've just updated,
1744         * so we don't do microscopic update in schedule()
1745         * and double the fastpath cost.
1746         */
1747        rq_clock_skip_update(rq);
1748}
1749
1750#ifdef CONFIG_SMP
1751
1752static int find_later_rq(struct task_struct *task);
1753
1754static int
1755select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1756{
1757        struct task_struct *curr;
1758        bool select_rq;
1759        struct rq *rq;
1760
1761        if (!(flags & WF_TTWU))
1762                goto out;
1763
1764        rq = cpu_rq(cpu);
1765
1766        rcu_read_lock();
1767        curr = READ_ONCE(rq->curr); /* unlocked access */
1768
1769        /*
1770         * If we are dealing with a -deadline task, we must
1771         * decide where to wake it up.
1772         * If it has a later deadline and the current task
1773         * on this rq can't move (provided the waking task
1774         * can!) we prefer to send it somewhere else. On the
1775         * other hand, if it has a shorter deadline, we
1776         * try to make it stay here, it might be important.
1777         */
1778        select_rq = unlikely(dl_task(curr)) &&
1779                    (curr->nr_cpus_allowed < 2 ||
1780                     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1781                    p->nr_cpus_allowed > 1;
1782
1783        /*
1784         * Take the capacity of the CPU into account to
1785         * ensure it fits the requirement of the task.
1786         */
1787        if (static_branch_unlikely(&sched_asym_cpucapacity))
1788                select_rq |= !dl_task_fits_capacity(p, cpu);
1789
1790        if (select_rq) {
1791                int target = find_later_rq(p);
1792
1793                if (target != -1 &&
1794                                (dl_time_before(p->dl.deadline,
1795                                        cpu_rq(target)->dl.earliest_dl.curr) ||
1796                                (cpu_rq(target)->dl.dl_nr_running == 0)))
1797                        cpu = target;
1798        }
1799        rcu_read_unlock();
1800
1801out:
1802        return cpu;
1803}
1804
1805static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1806{
1807        struct rq *rq;
1808
1809        if (READ_ONCE(p->__state) != TASK_WAKING)
1810                return;
1811
1812        rq = task_rq(p);
1813        /*
1814         * Since p->state == TASK_WAKING, set_task_cpu() has been called
1815         * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1816         * rq->lock is not... So, lock it
1817         */
1818        raw_spin_rq_lock(rq);
1819        if (p->dl.dl_non_contending) {
1820                update_rq_clock(rq);
1821                sub_running_bw(&p->dl, &rq->dl);
1822                p->dl.dl_non_contending = 0;
1823                /*
1824                 * If the timer handler is currently running and the
1825                 * timer cannot be canceled, inactive_task_timer()
1826                 * will see that dl_not_contending is not set, and
1827                 * will not touch the rq's active utilization,
1828                 * so we are still safe.
1829                 */
1830                if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1831                        put_task_struct(p);
1832        }
1833        sub_rq_bw(&p->dl, &rq->dl);
1834        raw_spin_rq_unlock(rq);
1835}
1836
1837static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1838{
1839        /*
1840         * Current can't be migrated, useless to reschedule,
1841         * let's hope p can move out.
1842         */
1843        if (rq->curr->nr_cpus_allowed == 1 ||
1844            !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1845                return;
1846
1847        /*
1848         * p is migratable, so let's not schedule it and
1849         * see if it is pushed or pulled somewhere else.
1850         */
1851        if (p->nr_cpus_allowed != 1 &&
1852            cpudl_find(&rq->rd->cpudl, p, NULL))
1853                return;
1854
1855        resched_curr(rq);
1856}
1857
1858static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1859{
1860        if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1861                /*
1862                 * This is OK, because current is on_cpu, which avoids it being
1863                 * picked for load-balance and preemption/IRQs are still
1864                 * disabled avoiding further scheduler activity on it and we've
1865                 * not yet started the picking loop.
1866                 */
1867                rq_unpin_lock(rq, rf);
1868                pull_dl_task(rq);
1869                rq_repin_lock(rq, rf);
1870        }
1871
1872        return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1873}
1874#endif /* CONFIG_SMP */
1875
1876/*
1877 * Only called when both the current and waking task are -deadline
1878 * tasks.
1879 */
1880static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1881                                  int flags)
1882{
1883        if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1884                resched_curr(rq);
1885                return;
1886        }
1887
1888#ifdef CONFIG_SMP
1889        /*
1890         * In the unlikely case current and p have the same deadline
1891         * let us try to decide what's the best thing to do...
1892         */
1893        if ((p->dl.deadline == rq->curr->dl.deadline) &&
1894            !test_tsk_need_resched(rq->curr))
1895                check_preempt_equal_dl(rq, p);
1896#endif /* CONFIG_SMP */
1897}
1898
1899#ifdef CONFIG_SCHED_HRTICK
1900static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1901{
1902        hrtick_start(rq, p->dl.runtime);
1903}
1904#else /* !CONFIG_SCHED_HRTICK */
1905static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1906{
1907}
1908#endif
1909
1910static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1911{
1912        struct sched_dl_entity *dl_se = &p->dl;
1913        struct dl_rq *dl_rq = &rq->dl;
1914
1915        p->se.exec_start = rq_clock_task(rq);
1916        if (on_dl_rq(&p->dl))
1917                update_stats_wait_end_dl(dl_rq, dl_se);
1918
1919        /* You can't push away the running task */
1920        dequeue_pushable_dl_task(rq, p);
1921
1922        if (!first)
1923                return;
1924
1925        if (hrtick_enabled_dl(rq))
1926                start_hrtick_dl(rq, p);
1927
1928        if (rq->curr->sched_class != &dl_sched_class)
1929                update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1930
1931        deadline_queue_push_tasks(rq);
1932}
1933
1934static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1935                                                   struct dl_rq *dl_rq)
1936{
1937        struct rb_node *left = rb_first_cached(&dl_rq->root);
1938
1939        if (!left)
1940                return NULL;
1941
1942        return rb_entry(left, struct sched_dl_entity, rb_node);
1943}
1944
1945static struct task_struct *pick_task_dl(struct rq *rq)
1946{
1947        struct sched_dl_entity *dl_se;
1948        struct dl_rq *dl_rq = &rq->dl;
1949        struct task_struct *p;
1950
1951        if (!sched_dl_runnable(rq))
1952                return NULL;
1953
1954        dl_se = pick_next_dl_entity(rq, dl_rq);
1955        BUG_ON(!dl_se);
1956        p = dl_task_of(dl_se);
1957
1958        return p;
1959}
1960
1961static struct task_struct *pick_next_task_dl(struct rq *rq)
1962{
1963        struct task_struct *p;
1964
1965        p = pick_task_dl(rq);
1966        if (p)
1967                set_next_task_dl(rq, p, true);
1968
1969        return p;
1970}
1971
1972static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1973{
1974        struct sched_dl_entity *dl_se = &p->dl;
1975        struct dl_rq *dl_rq = &rq->dl;
1976
1977        if (on_dl_rq(&p->dl))
1978                update_stats_wait_start_dl(dl_rq, dl_se);
1979
1980        update_curr_dl(rq);
1981
1982        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1983        if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1984                enqueue_pushable_dl_task(rq, p);
1985}
1986
1987/*
1988 * scheduler tick hitting a task of our scheduling class.
1989 *
1990 * NOTE: This function can be called remotely by the tick offload that
1991 * goes along full dynticks. Therefore no local assumption can be made
1992 * and everything must be accessed through the @rq and @curr passed in
1993 * parameters.
1994 */
1995static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1996{
1997        update_curr_dl(rq);
1998
1999        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2000        /*
2001         * Even when we have runtime, update_curr_dl() might have resulted in us
2002         * not being the leftmost task anymore. In that case NEED_RESCHED will
2003         * be set and schedule() will start a new hrtick for the next task.
2004         */
2005        if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2006            is_leftmost(p, &rq->dl))
2007                start_hrtick_dl(rq, p);
2008}
2009
2010static void task_fork_dl(struct task_struct *p)
2011{
2012        /*
2013         * SCHED_DEADLINE tasks cannot fork and this is achieved through
2014         * sched_fork()
2015         */
2016}
2017
2018#ifdef CONFIG_SMP
2019
2020/* Only try algorithms three times */
2021#define DL_MAX_TRIES 3
2022
2023static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2024{
2025        if (!task_running(rq, p) &&
2026            cpumask_test_cpu(cpu, &p->cpus_mask))
2027                return 1;
2028        return 0;
2029}
2030
2031/*
2032 * Return the earliest pushable rq's task, which is suitable to be executed
2033 * on the CPU, NULL otherwise:
2034 */
2035static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2036{
2037        struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
2038        struct task_struct *p = NULL;
2039
2040        if (!has_pushable_dl_tasks(rq))
2041                return NULL;
2042
2043next_node:
2044        if (next_node) {
2045                p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
2046
2047                if (pick_dl_task(rq, p, cpu))
2048                        return p;
2049
2050                next_node = rb_next(next_node);
2051                goto next_node;
2052        }
2053
2054        return NULL;
2055}
2056
2057static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2058
2059static int find_later_rq(struct task_struct *task)
2060{
2061        struct sched_domain *sd;
2062        struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2063        int this_cpu = smp_processor_id();
2064        int cpu = task_cpu(task);
2065
2066        /* Make sure the mask is initialized first */
2067        if (unlikely(!later_mask))
2068                return -1;
2069
2070        if (task->nr_cpus_allowed == 1)
2071                return -1;
2072
2073        /*
2074         * We have to consider system topology and task affinity
2075         * first, then we can look for a suitable CPU.
2076         */
2077        if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2078                return -1;
2079
2080        /*
2081         * If we are here, some targets have been found, including
2082         * the most suitable which is, among the runqueues where the
2083         * current tasks have later deadlines than the task's one, the
2084         * rq with the latest possible one.
2085         *
2086         * Now we check how well this matches with task's
2087         * affinity and system topology.
2088         *
2089         * The last CPU where the task run is our first
2090         * guess, since it is most likely cache-hot there.
2091         */
2092        if (cpumask_test_cpu(cpu, later_mask))
2093                return cpu;
2094        /*
2095         * Check if this_cpu is to be skipped (i.e., it is
2096         * not in the mask) or not.
2097         */
2098        if (!cpumask_test_cpu(this_cpu, later_mask))
2099                this_cpu = -1;
2100
2101        rcu_read_lock();
2102        for_each_domain(cpu, sd) {
2103                if (sd->flags & SD_WAKE_AFFINE) {
2104                        int best_cpu;
2105
2106                        /*
2107                         * If possible, preempting this_cpu is
2108                         * cheaper than migrating.
2109                         */
2110                        if (this_cpu != -1 &&
2111                            cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2112                                rcu_read_unlock();
2113                                return this_cpu;
2114                        }
2115
2116                        best_cpu = cpumask_any_and_distribute(later_mask,
2117                                                              sched_domain_span(sd));
2118                        /*
2119                         * Last chance: if a CPU being in both later_mask
2120                         * and current sd span is valid, that becomes our
2121                         * choice. Of course, the latest possible CPU is
2122                         * already under consideration through later_mask.
2123                         */
2124                        if (best_cpu < nr_cpu_ids) {
2125                                rcu_read_unlock();
2126                                return best_cpu;
2127                        }
2128                }
2129        }
2130        rcu_read_unlock();
2131
2132        /*
2133         * At this point, all our guesses failed, we just return
2134         * 'something', and let the caller sort the things out.
2135         */
2136        if (this_cpu != -1)
2137                return this_cpu;
2138
2139        cpu = cpumask_any_distribute(later_mask);
2140        if (cpu < nr_cpu_ids)
2141                return cpu;
2142
2143        return -1;
2144}
2145
2146/* Locks the rq it finds */
2147static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2148{
2149        struct rq *later_rq = NULL;
2150        int tries;
2151        int cpu;
2152
2153        for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2154                cpu = find_later_rq(task);
2155
2156                if ((cpu == -1) || (cpu == rq->cpu))
2157                        break;
2158
2159                later_rq = cpu_rq(cpu);
2160
2161                if (later_rq->dl.dl_nr_running &&
2162                    !dl_time_before(task->dl.deadline,
2163                                        later_rq->dl.earliest_dl.curr)) {
2164                        /*
2165                         * Target rq has tasks of equal or earlier deadline,
2166                         * retrying does not release any lock and is unlikely
2167                         * to yield a different result.
2168                         */
2169                        later_rq = NULL;
2170                        break;
2171                }
2172
2173                /* Retry if something changed. */
2174                if (double_lock_balance(rq, later_rq)) {
2175                        if (unlikely(task_rq(task) != rq ||
2176                                     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2177                                     task_running(rq, task) ||
2178                                     !dl_task(task) ||
2179                                     !task_on_rq_queued(task))) {
2180                                double_unlock_balance(rq, later_rq);
2181                                later_rq = NULL;
2182                                break;
2183                        }
2184                }
2185
2186                /*
2187                 * If the rq we found has no -deadline task, or
2188                 * its earliest one has a later deadline than our
2189                 * task, the rq is a good one.
2190                 */
2191                if (!later_rq->dl.dl_nr_running ||
2192                    dl_time_before(task->dl.deadline,
2193                                   later_rq->dl.earliest_dl.curr))
2194                        break;
2195
2196                /* Otherwise we try again. */
2197                double_unlock_balance(rq, later_rq);
2198                later_rq = NULL;
2199        }
2200
2201        return later_rq;
2202}
2203
2204static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2205{
2206        struct task_struct *p;
2207
2208        if (!has_pushable_dl_tasks(rq))
2209                return NULL;
2210
2211        p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2212                     struct task_struct, pushable_dl_tasks);
2213
2214        BUG_ON(rq->cpu != task_cpu(p));
2215        BUG_ON(task_current(rq, p));
2216        BUG_ON(p->nr_cpus_allowed <= 1);
2217
2218        BUG_ON(!task_on_rq_queued(p));
2219        BUG_ON(!dl_task(p));
2220
2221        return p;
2222}
2223
2224/*
2225 * See if the non running -deadline tasks on this rq
2226 * can be sent to some other CPU where they can preempt
2227 * and start executing.
2228 */
2229static int push_dl_task(struct rq *rq)
2230{
2231        struct task_struct *next_task;
2232        struct rq *later_rq;
2233        int ret = 0;
2234
2235        if (!rq->dl.overloaded)
2236                return 0;
2237
2238        next_task = pick_next_pushable_dl_task(rq);
2239        if (!next_task)
2240                return 0;
2241
2242retry:
2243        if (is_migration_disabled(next_task))
2244                return 0;
2245
2246        if (WARN_ON(next_task == rq->curr))
2247                return 0;
2248
2249        /*
2250         * If next_task preempts rq->curr, and rq->curr
2251         * can move away, it makes sense to just reschedule
2252         * without going further in pushing next_task.
2253         */
2254        if (dl_task(rq->curr) &&
2255            dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2256            rq->curr->nr_cpus_allowed > 1) {
2257                resched_curr(rq);
2258                return 0;
2259        }
2260
2261        /* We might release rq lock */
2262        get_task_struct(next_task);
2263
2264        /* Will lock the rq it'll find */
2265        later_rq = find_lock_later_rq(next_task, rq);
2266        if (!later_rq) {
2267                struct task_struct *task;
2268
2269                /*
2270                 * We must check all this again, since
2271                 * find_lock_later_rq releases rq->lock and it is
2272                 * then possible that next_task has migrated.
2273                 */
2274                task = pick_next_pushable_dl_task(rq);
2275                if (task == next_task) {
2276                        /*
2277                         * The task is still there. We don't try
2278                         * again, some other CPU will pull it when ready.
2279                         */
2280                        goto out;
2281                }
2282
2283                if (!task)
2284                        /* No more tasks */
2285                        goto out;
2286
2287                put_task_struct(next_task);
2288                next_task = task;
2289                goto retry;
2290        }
2291
2292        deactivate_task(rq, next_task, 0);
2293        set_task_cpu(next_task, later_rq->cpu);
2294
2295        /*
2296         * Update the later_rq clock here, because the clock is used
2297         * by the cpufreq_update_util() inside __add_running_bw().
2298         */
2299        update_rq_clock(later_rq);
2300        activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2301        ret = 1;
2302
2303        resched_curr(later_rq);
2304
2305        double_unlock_balance(rq, later_rq);
2306
2307out:
2308        put_task_struct(next_task);
2309
2310        return ret;
2311}
2312
2313static void push_dl_tasks(struct rq *rq)
2314{
2315        /* push_dl_task() will return true if it moved a -deadline task */
2316        while (push_dl_task(rq))
2317                ;
2318}
2319
2320static void pull_dl_task(struct rq *this_rq)
2321{
2322        int this_cpu = this_rq->cpu, cpu;
2323        struct task_struct *p, *push_task;
2324        bool resched = false;
2325        struct rq *src_rq;
2326        u64 dmin = LONG_MAX;
2327
2328        if (likely(!dl_overloaded(this_rq)))
2329                return;
2330
2331        /*
2332         * Match the barrier from dl_set_overloaded; this guarantees that if we
2333         * see overloaded we must also see the dlo_mask bit.
2334         */
2335        smp_rmb();
2336
2337        for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2338                if (this_cpu == cpu)
2339                        continue;
2340
2341                src_rq = cpu_rq(cpu);
2342
2343                /*
2344                 * It looks racy, abd it is! However, as in sched_rt.c,
2345                 * we are fine with this.
2346                 */
2347                if (this_rq->dl.dl_nr_running &&
2348                    dl_time_before(this_rq->dl.earliest_dl.curr,
2349                                   src_rq->dl.earliest_dl.next))
2350                        continue;
2351
2352                /* Might drop this_rq->lock */
2353                push_task = NULL;
2354                double_lock_balance(this_rq, src_rq);
2355
2356                /*
2357                 * If there are no more pullable tasks on the
2358                 * rq, we're done with it.
2359                 */
2360                if (src_rq->dl.dl_nr_running <= 1)
2361                        goto skip;
2362
2363                p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2364
2365                /*
2366                 * We found a task to be pulled if:
2367                 *  - it preempts our current (if there's one),
2368                 *  - it will preempt the last one we pulled (if any).
2369                 */
2370                if (p && dl_time_before(p->dl.deadline, dmin) &&
2371                    (!this_rq->dl.dl_nr_running ||
2372                     dl_time_before(p->dl.deadline,
2373                                    this_rq->dl.earliest_dl.curr))) {
2374                        WARN_ON(p == src_rq->curr);
2375                        WARN_ON(!task_on_rq_queued(p));
2376
2377                        /*
2378                         * Then we pull iff p has actually an earlier
2379                         * deadline than the current task of its runqueue.
2380                         */
2381                        if (dl_time_before(p->dl.deadline,
2382                                           src_rq->curr->dl.deadline))
2383                                goto skip;
2384
2385                        if (is_migration_disabled(p)) {
2386                                push_task = get_push_task(src_rq);
2387                        } else {
2388                                deactivate_task(src_rq, p, 0);
2389                                set_task_cpu(p, this_cpu);
2390                                activate_task(this_rq, p, 0);
2391                                dmin = p->dl.deadline;
2392                                resched = true;
2393                        }
2394
2395                        /* Is there any other task even earlier? */
2396                }
2397skip:
2398                double_unlock_balance(this_rq, src_rq);
2399
2400                if (push_task) {
2401                        raw_spin_rq_unlock(this_rq);
2402                        stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2403                                            push_task, &src_rq->push_work);
2404                        raw_spin_rq_lock(this_rq);
2405                }
2406        }
2407
2408        if (resched)
2409                resched_curr(this_rq);
2410}
2411
2412/*
2413 * Since the task is not running and a reschedule is not going to happen
2414 * anytime soon on its runqueue, we try pushing it away now.
2415 */
2416static void task_woken_dl(struct rq *rq, struct task_struct *p)
2417{
2418        if (!task_running(rq, p) &&
2419            !test_tsk_need_resched(rq->curr) &&
2420            p->nr_cpus_allowed > 1 &&
2421            dl_task(rq->curr) &&
2422            (rq->curr->nr_cpus_allowed < 2 ||
2423             !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2424                push_dl_tasks(rq);
2425        }
2426}
2427
2428static void set_cpus_allowed_dl(struct task_struct *p,
2429                                const struct cpumask *new_mask,
2430                                u32 flags)
2431{
2432        struct root_domain *src_rd;
2433        struct rq *rq;
2434
2435        BUG_ON(!dl_task(p));
2436
2437        rq = task_rq(p);
2438        src_rd = rq->rd;
2439        /*
2440         * Migrating a SCHED_DEADLINE task between exclusive
2441         * cpusets (different root_domains) entails a bandwidth
2442         * update. We already made space for us in the destination
2443         * domain (see cpuset_can_attach()).
2444         */
2445        if (!cpumask_intersects(src_rd->span, new_mask)) {
2446                struct dl_bw *src_dl_b;
2447
2448                src_dl_b = dl_bw_of(cpu_of(rq));
2449                /*
2450                 * We now free resources of the root_domain we are migrating
2451                 * off. In the worst case, sched_setattr() may temporary fail
2452                 * until we complete the update.
2453                 */
2454                raw_spin_lock(&src_dl_b->lock);
2455                __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2456                raw_spin_unlock(&src_dl_b->lock);
2457        }
2458
2459        set_cpus_allowed_common(p, new_mask, flags);
2460}
2461
2462/* Assumes rq->lock is held */
2463static void rq_online_dl(struct rq *rq)
2464{
2465        if (rq->dl.overloaded)
2466                dl_set_overload(rq);
2467
2468        cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2469        if (rq->dl.dl_nr_running > 0)
2470                cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2471}
2472
2473/* Assumes rq->lock is held */
2474static void rq_offline_dl(struct rq *rq)
2475{
2476        if (rq->dl.overloaded)
2477                dl_clear_overload(rq);
2478
2479        cpudl_clear(&rq->rd->cpudl, rq->cpu);
2480        cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2481}
2482
2483void __init init_sched_dl_class(void)
2484{
2485        unsigned int i;
2486
2487        for_each_possible_cpu(i)
2488                zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2489                                        GFP_KERNEL, cpu_to_node(i));
2490}
2491
2492void dl_add_task_root_domain(struct task_struct *p)
2493{
2494        struct rq_flags rf;
2495        struct rq *rq;
2496        struct dl_bw *dl_b;
2497
2498        raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2499        if (!dl_task(p)) {
2500                raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2501                return;
2502        }
2503
2504        rq = __task_rq_lock(p, &rf);
2505
2506        dl_b = &rq->rd->dl_bw;
2507        raw_spin_lock(&dl_b->lock);
2508
2509        __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2510
2511        raw_spin_unlock(&dl_b->lock);
2512
2513        task_rq_unlock(rq, p, &rf);
2514}
2515
2516void dl_clear_root_domain(struct root_domain *rd)
2517{
2518        unsigned long flags;
2519
2520        raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2521        rd->dl_bw.total_bw = 0;
2522        raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2523}
2524
2525#endif /* CONFIG_SMP */
2526
2527static void switched_from_dl(struct rq *rq, struct task_struct *p)
2528{
2529        /*
2530         * task_non_contending() can start the "inactive timer" (if the 0-lag
2531         * time is in the future). If the task switches back to dl before
2532         * the "inactive timer" fires, it can continue to consume its current
2533         * runtime using its current deadline. If it stays outside of
2534         * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2535         * will reset the task parameters.
2536         */
2537        if (task_on_rq_queued(p) && p->dl.dl_runtime)
2538                task_non_contending(p);
2539
2540        if (!task_on_rq_queued(p)) {
2541                /*
2542                 * Inactive timer is armed. However, p is leaving DEADLINE and
2543                 * might migrate away from this rq while continuing to run on
2544                 * some other class. We need to remove its contribution from
2545                 * this rq running_bw now, or sub_rq_bw (below) will complain.
2546                 */
2547                if (p->dl.dl_non_contending)
2548                        sub_running_bw(&p->dl, &rq->dl);
2549                sub_rq_bw(&p->dl, &rq->dl);
2550        }
2551
2552        /*
2553         * We cannot use inactive_task_timer() to invoke sub_running_bw()
2554         * at the 0-lag time, because the task could have been migrated
2555         * while SCHED_OTHER in the meanwhile.
2556         */
2557        if (p->dl.dl_non_contending)
2558                p->dl.dl_non_contending = 0;
2559
2560        /*
2561         * Since this might be the only -deadline task on the rq,
2562         * this is the right place to try to pull some other one
2563         * from an overloaded CPU, if any.
2564         */
2565        if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2566                return;
2567
2568        deadline_queue_pull_task(rq);
2569}
2570
2571/*
2572 * When switching to -deadline, we may overload the rq, then
2573 * we try to push someone off, if possible.
2574 */
2575static void switched_to_dl(struct rq *rq, struct task_struct *p)
2576{
2577        if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2578                put_task_struct(p);
2579
2580        /* If p is not queued we will update its parameters at next wakeup. */
2581        if (!task_on_rq_queued(p)) {
2582                add_rq_bw(&p->dl, &rq->dl);
2583
2584                return;
2585        }
2586
2587        if (rq->curr != p) {
2588#ifdef CONFIG_SMP
2589                if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2590                        deadline_queue_push_tasks(rq);
2591#endif
2592                if (dl_task(rq->curr))
2593                        check_preempt_curr_dl(rq, p, 0);
2594                else
2595                        resched_curr(rq);
2596        } else {
2597                update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2598        }
2599}
2600
2601/*
2602 * If the scheduling parameters of a -deadline task changed,
2603 * a push or pull operation might be needed.
2604 */
2605static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2606                            int oldprio)
2607{
2608        if (task_on_rq_queued(p) || task_current(rq, p)) {
2609#ifdef CONFIG_SMP
2610                /*
2611                 * This might be too much, but unfortunately
2612                 * we don't have the old deadline value, and
2613                 * we can't argue if the task is increasing
2614                 * or lowering its prio, so...
2615                 */
2616                if (!rq->dl.overloaded)
2617                        deadline_queue_pull_task(rq);
2618
2619                /*
2620                 * If we now have a earlier deadline task than p,
2621                 * then reschedule, provided p is still on this
2622                 * runqueue.
2623                 */
2624                if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2625                        resched_curr(rq);
2626#else
2627                /*
2628                 * Again, we don't know if p has a earlier
2629                 * or later deadline, so let's blindly set a
2630                 * (maybe not needed) rescheduling point.
2631                 */
2632                resched_curr(rq);
2633#endif /* CONFIG_SMP */
2634        }
2635}
2636
2637DEFINE_SCHED_CLASS(dl) = {
2638
2639        .enqueue_task           = enqueue_task_dl,
2640        .dequeue_task           = dequeue_task_dl,
2641        .yield_task             = yield_task_dl,
2642
2643        .check_preempt_curr     = check_preempt_curr_dl,
2644
2645        .pick_next_task         = pick_next_task_dl,
2646        .put_prev_task          = put_prev_task_dl,
2647        .set_next_task          = set_next_task_dl,
2648
2649#ifdef CONFIG_SMP
2650        .balance                = balance_dl,
2651        .pick_task              = pick_task_dl,
2652        .select_task_rq         = select_task_rq_dl,
2653        .migrate_task_rq        = migrate_task_rq_dl,
2654        .set_cpus_allowed       = set_cpus_allowed_dl,
2655        .rq_online              = rq_online_dl,
2656        .rq_offline             = rq_offline_dl,
2657        .task_woken             = task_woken_dl,
2658        .find_lock_rq           = find_lock_later_rq,
2659#endif
2660
2661        .task_tick              = task_tick_dl,
2662        .task_fork              = task_fork_dl,
2663
2664        .prio_changed           = prio_changed_dl,
2665        .switched_from          = switched_from_dl,
2666        .switched_to            = switched_to_dl,
2667
2668        .update_curr            = update_curr_dl,
2669};
2670
2671/* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2672static u64 dl_generation;
2673
2674int sched_dl_global_validate(void)
2675{
2676        u64 runtime = global_rt_runtime();
2677        u64 period = global_rt_period();
2678        u64 new_bw = to_ratio(period, runtime);
2679        u64 gen = ++dl_generation;
2680        struct dl_bw *dl_b;
2681        int cpu, cpus, ret = 0;
2682        unsigned long flags;
2683
2684        /*
2685         * Here we want to check the bandwidth not being set to some
2686         * value smaller than the currently allocated bandwidth in
2687         * any of the root_domains.
2688         */
2689        for_each_possible_cpu(cpu) {
2690                rcu_read_lock_sched();
2691
2692                if (dl_bw_visited(cpu, gen))
2693                        goto next;
2694
2695                dl_b = dl_bw_of(cpu);
2696                cpus = dl_bw_cpus(cpu);
2697
2698                raw_spin_lock_irqsave(&dl_b->lock, flags);
2699                if (new_bw * cpus < dl_b->total_bw)
2700                        ret = -EBUSY;
2701                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2702
2703next:
2704                rcu_read_unlock_sched();
2705
2706                if (ret)
2707                        break;
2708        }
2709
2710        return ret;
2711}
2712
2713static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2714{
2715        if (global_rt_runtime() == RUNTIME_INF) {
2716                dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2717                dl_rq->extra_bw = 1 << BW_SHIFT;
2718        } else {
2719                dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2720                          global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2721                dl_rq->extra_bw = to_ratio(global_rt_period(),
2722                                                    global_rt_runtime());
2723        }
2724}
2725
2726void sched_dl_do_global(void)
2727{
2728        u64 new_bw = -1;
2729        u64 gen = ++dl_generation;
2730        struct dl_bw *dl_b;
2731        int cpu;
2732        unsigned long flags;
2733
2734        def_dl_bandwidth.dl_period = global_rt_period();
2735        def_dl_bandwidth.dl_runtime = global_rt_runtime();
2736
2737        if (global_rt_runtime() != RUNTIME_INF)
2738                new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2739
2740        for_each_possible_cpu(cpu) {
2741                rcu_read_lock_sched();
2742
2743                if (dl_bw_visited(cpu, gen)) {
2744                        rcu_read_unlock_sched();
2745                        continue;
2746                }
2747
2748                dl_b = dl_bw_of(cpu);
2749
2750                raw_spin_lock_irqsave(&dl_b->lock, flags);
2751                dl_b->bw = new_bw;
2752                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2753
2754                rcu_read_unlock_sched();
2755                init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2756        }
2757}
2758
2759/*
2760 * We must be sure that accepting a new task (or allowing changing the
2761 * parameters of an existing one) is consistent with the bandwidth
2762 * constraints. If yes, this function also accordingly updates the currently
2763 * allocated bandwidth to reflect the new situation.
2764 *
2765 * This function is called while holding p's rq->lock.
2766 */
2767int sched_dl_overflow(struct task_struct *p, int policy,
2768                      const struct sched_attr *attr)
2769{
2770        u64 period = attr->sched_period ?: attr->sched_deadline;
2771        u64 runtime = attr->sched_runtime;
2772        u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2773        int cpus, err = -1, cpu = task_cpu(p);
2774        struct dl_bw *dl_b = dl_bw_of(cpu);
2775        unsigned long cap;
2776
2777        if (attr->sched_flags & SCHED_FLAG_SUGOV)
2778                return 0;
2779
2780        /* !deadline task may carry old deadline bandwidth */
2781        if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2782                return 0;
2783
2784        /*
2785         * Either if a task, enters, leave, or stays -deadline but changes
2786         * its parameters, we may need to update accordingly the total
2787         * allocated bandwidth of the container.
2788         */
2789        raw_spin_lock(&dl_b->lock);
2790        cpus = dl_bw_cpus(cpu);
2791        cap = dl_bw_capacity(cpu);
2792
2793        if (dl_policy(policy) && !task_has_dl_policy(p) &&
2794            !__dl_overflow(dl_b, cap, 0, new_bw)) {
2795                if (hrtimer_active(&p->dl.inactive_timer))
2796                        __dl_sub(dl_b, p->dl.dl_bw, cpus);
2797                __dl_add(dl_b, new_bw, cpus);
2798                err = 0;
2799        } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2800                   !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2801                /*
2802                 * XXX this is slightly incorrect: when the task
2803                 * utilization decreases, we should delay the total
2804                 * utilization change until the task's 0-lag point.
2805                 * But this would require to set the task's "inactive
2806                 * timer" when the task is not inactive.
2807                 */
2808                __dl_sub(dl_b, p->dl.dl_bw, cpus);
2809                __dl_add(dl_b, new_bw, cpus);
2810                dl_change_utilization(p, new_bw);
2811                err = 0;
2812        } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2813                /*
2814                 * Do not decrease the total deadline utilization here,
2815                 * switched_from_dl() will take care to do it at the correct
2816                 * (0-lag) time.
2817                 */
2818                err = 0;
2819        }
2820        raw_spin_unlock(&dl_b->lock);
2821
2822        return err;
2823}
2824
2825/*
2826 * This function initializes the sched_dl_entity of a newly becoming
2827 * SCHED_DEADLINE task.
2828 *
2829 * Only the static values are considered here, the actual runtime and the
2830 * absolute deadline will be properly calculated when the task is enqueued
2831 * for the first time with its new policy.
2832 */
2833void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2834{
2835        struct sched_dl_entity *dl_se = &p->dl;
2836
2837        dl_se->dl_runtime = attr->sched_runtime;
2838        dl_se->dl_deadline = attr->sched_deadline;
2839        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2840        dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
2841        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2842        dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2843}
2844
2845void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2846{
2847        struct sched_dl_entity *dl_se = &p->dl;
2848
2849        attr->sched_priority = p->rt_priority;
2850        attr->sched_runtime = dl_se->dl_runtime;
2851        attr->sched_deadline = dl_se->dl_deadline;
2852        attr->sched_period = dl_se->dl_period;
2853        attr->sched_flags &= ~SCHED_DL_FLAGS;
2854        attr->sched_flags |= dl_se->flags;
2855}
2856
2857/*
2858 * Default limits for DL period; on the top end we guard against small util
2859 * tasks still getting ridiculously long effective runtimes, on the bottom end we
2860 * guard against timer DoS.
2861 */
2862unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
2863unsigned int sysctl_sched_dl_period_min = 100;     /* 100 us */
2864
2865/*
2866 * This function validates the new parameters of a -deadline task.
2867 * We ask for the deadline not being zero, and greater or equal
2868 * than the runtime, as well as the period of being zero or
2869 * greater than deadline. Furthermore, we have to be sure that
2870 * user parameters are above the internal resolution of 1us (we
2871 * check sched_runtime only since it is always the smaller one) and
2872 * below 2^63 ns (we have to check both sched_deadline and
2873 * sched_period, as the latter can be zero).
2874 */
2875bool __checkparam_dl(const struct sched_attr *attr)
2876{
2877        u64 period, max, min;
2878
2879        /* special dl tasks don't actually use any parameter */
2880        if (attr->sched_flags & SCHED_FLAG_SUGOV)
2881                return true;
2882
2883        /* deadline != 0 */
2884        if (attr->sched_deadline == 0)
2885                return false;
2886
2887        /*
2888         * Since we truncate DL_SCALE bits, make sure we're at least
2889         * that big.
2890         */
2891        if (attr->sched_runtime < (1ULL << DL_SCALE))
2892                return false;
2893
2894        /*
2895         * Since we use the MSB for wrap-around and sign issues, make
2896         * sure it's not set (mind that period can be equal to zero).
2897         */
2898        if (attr->sched_deadline & (1ULL << 63) ||
2899            attr->sched_period & (1ULL << 63))
2900                return false;
2901
2902        period = attr->sched_period;
2903        if (!period)
2904                period = attr->sched_deadline;
2905
2906        /* runtime <= deadline <= period (if period != 0) */
2907        if (period < attr->sched_deadline ||
2908            attr->sched_deadline < attr->sched_runtime)
2909                return false;
2910
2911        max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
2912        min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
2913
2914        if (period < min || period > max)
2915                return false;
2916
2917        return true;
2918}
2919
2920/*
2921 * This function clears the sched_dl_entity static params.
2922 */
2923void __dl_clear_params(struct task_struct *p)
2924{
2925        struct sched_dl_entity *dl_se = &p->dl;
2926
2927        dl_se->dl_runtime               = 0;
2928        dl_se->dl_deadline              = 0;
2929        dl_se->dl_period                = 0;
2930        dl_se->flags                    = 0;
2931        dl_se->dl_bw                    = 0;
2932        dl_se->dl_density               = 0;
2933
2934        dl_se->dl_throttled             = 0;
2935        dl_se->dl_yielded               = 0;
2936        dl_se->dl_non_contending        = 0;
2937        dl_se->dl_overrun               = 0;
2938
2939#ifdef CONFIG_RT_MUTEXES
2940        dl_se->pi_se                    = dl_se;
2941#endif
2942}
2943
2944bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2945{
2946        struct sched_dl_entity *dl_se = &p->dl;
2947
2948        if (dl_se->dl_runtime != attr->sched_runtime ||
2949            dl_se->dl_deadline != attr->sched_deadline ||
2950            dl_se->dl_period != attr->sched_period ||
2951            dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
2952                return true;
2953
2954        return false;
2955}
2956
2957#ifdef CONFIG_SMP
2958int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2959{
2960        unsigned long flags, cap;
2961        unsigned int dest_cpu;
2962        struct dl_bw *dl_b;
2963        bool overflow;
2964        int ret;
2965
2966        dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2967
2968        rcu_read_lock_sched();
2969        dl_b = dl_bw_of(dest_cpu);
2970        raw_spin_lock_irqsave(&dl_b->lock, flags);
2971        cap = dl_bw_capacity(dest_cpu);
2972        overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
2973        if (overflow) {
2974                ret = -EBUSY;
2975        } else {
2976                /*
2977                 * We reserve space for this task in the destination
2978                 * root_domain, as we can't fail after this point.
2979                 * We will free resources in the source root_domain
2980                 * later on (see set_cpus_allowed_dl()).
2981                 */
2982                int cpus = dl_bw_cpus(dest_cpu);
2983
2984                __dl_add(dl_b, p->dl.dl_bw, cpus);
2985                ret = 0;
2986        }
2987        raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2988        rcu_read_unlock_sched();
2989
2990        return ret;
2991}
2992
2993int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2994                                 const struct cpumask *trial)
2995{
2996        int ret = 1, trial_cpus;
2997        struct dl_bw *cur_dl_b;
2998        unsigned long flags;
2999
3000        rcu_read_lock_sched();
3001        cur_dl_b = dl_bw_of(cpumask_any(cur));
3002        trial_cpus = cpumask_weight(trial);
3003
3004        raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3005        if (cur_dl_b->bw != -1 &&
3006            cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
3007                ret = 0;
3008        raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3009        rcu_read_unlock_sched();
3010
3011        return ret;
3012}
3013
3014bool dl_cpu_busy(unsigned int cpu)
3015{
3016        unsigned long flags, cap;
3017        struct dl_bw *dl_b;
3018        bool overflow;
3019
3020        rcu_read_lock_sched();
3021        dl_b = dl_bw_of(cpu);
3022        raw_spin_lock_irqsave(&dl_b->lock, flags);
3023        cap = dl_bw_capacity(cpu);
3024        overflow = __dl_overflow(dl_b, cap, 0, 0);
3025        raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3026        rcu_read_unlock_sched();
3027
3028        return overflow;
3029}
3030#endif
3031
3032#ifdef CONFIG_SCHED_DEBUG
3033void print_dl_stats(struct seq_file *m, int cpu)
3034{
3035        print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3036}
3037#endif /* CONFIG_SCHED_DEBUG */
3038