linux/kernel/sched/rt.c
<<
>>
Prefs
   1/*
   2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   3 * policies)
   4 */
   5
   6#include "sched.h"
   7
   8#include <linux/slab.h>
   9#include <linux/irq_work.h>
  10
  11int sched_rr_timeslice = RR_TIMESLICE;
  12
  13static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  14
  15struct rt_bandwidth def_rt_bandwidth;
  16
  17static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  18{
  19        struct rt_bandwidth *rt_b =
  20                container_of(timer, struct rt_bandwidth, rt_period_timer);
  21        int idle = 0;
  22        int overrun;
  23
  24        raw_spin_lock(&rt_b->rt_runtime_lock);
  25        for (;;) {
  26                overrun = hrtimer_forward_now(timer, rt_b->rt_period);
  27                if (!overrun)
  28                        break;
  29
  30                raw_spin_unlock(&rt_b->rt_runtime_lock);
  31                idle = do_sched_rt_period_timer(rt_b, overrun);
  32                raw_spin_lock(&rt_b->rt_runtime_lock);
  33        }
  34        if (idle)
  35                rt_b->rt_period_active = 0;
  36        raw_spin_unlock(&rt_b->rt_runtime_lock);
  37
  38        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  39}
  40
  41void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  42{
  43        rt_b->rt_period = ns_to_ktime(period);
  44        rt_b->rt_runtime = runtime;
  45
  46        raw_spin_lock_init(&rt_b->rt_runtime_lock);
  47
  48        hrtimer_init(&rt_b->rt_period_timer,
  49                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  50        rt_b->rt_period_timer.function = sched_rt_period_timer;
  51}
  52
  53static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  54{
  55        if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  56                return;
  57
  58        raw_spin_lock(&rt_b->rt_runtime_lock);
  59        if (!rt_b->rt_period_active) {
  60                rt_b->rt_period_active = 1;
  61                /*
  62                 * SCHED_DEADLINE updates the bandwidth, as a run away
  63                 * RT task with a DL task could hog a CPU. But DL does
  64                 * not reset the period. If a deadline task was running
  65                 * without an RT task running, it can cause RT tasks to
  66                 * throttle when they start up. Kick the timer right away
  67                 * to update the period.
  68                 */
  69                hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
  70                hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
  71        }
  72        raw_spin_unlock(&rt_b->rt_runtime_lock);
  73}
  74
  75#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
  76static void push_irq_work_func(struct irq_work *work);
  77#endif
  78
  79void init_rt_rq(struct rt_rq *rt_rq)
  80{
  81        struct rt_prio_array *array;
  82        int i;
  83
  84        array = &rt_rq->active;
  85        for (i = 0; i < MAX_RT_PRIO; i++) {
  86                INIT_LIST_HEAD(array->queue + i);
  87                __clear_bit(i, array->bitmap);
  88        }
  89        /* delimiter for bitsearch: */
  90        __set_bit(MAX_RT_PRIO, array->bitmap);
  91
  92#if defined CONFIG_SMP
  93        rt_rq->highest_prio.curr = MAX_RT_PRIO;
  94        rt_rq->highest_prio.next = MAX_RT_PRIO;
  95        rt_rq->rt_nr_migratory = 0;
  96        rt_rq->overloaded = 0;
  97        plist_head_init(&rt_rq->pushable_tasks);
  98
  99#ifdef HAVE_RT_PUSH_IPI
 100        rt_rq->push_flags = 0;
 101        rt_rq->push_cpu = nr_cpu_ids;
 102        raw_spin_lock_init(&rt_rq->push_lock);
 103        init_irq_work(&rt_rq->push_work, push_irq_work_func);
 104#endif
 105#endif /* CONFIG_SMP */
 106        /* We start is dequeued state, because no RT tasks are queued */
 107        rt_rq->rt_queued = 0;
 108
 109        rt_rq->rt_time = 0;
 110        rt_rq->rt_throttled = 0;
 111        rt_rq->rt_runtime = 0;
 112        raw_spin_lock_init(&rt_rq->rt_runtime_lock);
 113}
 114
 115#ifdef CONFIG_RT_GROUP_SCHED
 116static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 117{
 118        hrtimer_cancel(&rt_b->rt_period_timer);
 119}
 120
 121#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
 122
 123static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 124{
 125#ifdef CONFIG_SCHED_DEBUG
 126        WARN_ON_ONCE(!rt_entity_is_task(rt_se));
 127#endif
 128        return container_of(rt_se, struct task_struct, rt);
 129}
 130
 131static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 132{
 133        return rt_rq->rq;
 134}
 135
 136static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 137{
 138        return rt_se->rt_rq;
 139}
 140
 141static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 142{
 143        struct rt_rq *rt_rq = rt_se->rt_rq;
 144
 145        return rt_rq->rq;
 146}
 147
 148void free_rt_sched_group(struct task_group *tg)
 149{
 150        int i;
 151
 152        if (tg->rt_se)
 153                destroy_rt_bandwidth(&tg->rt_bandwidth);
 154
 155        for_each_possible_cpu(i) {
 156                if (tg->rt_rq)
 157                        kfree(tg->rt_rq[i]);
 158                if (tg->rt_se)
 159                        kfree(tg->rt_se[i]);
 160        }
 161
 162        kfree(tg->rt_rq);
 163        kfree(tg->rt_se);
 164}
 165
 166void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 167                struct sched_rt_entity *rt_se, int cpu,
 168                struct sched_rt_entity *parent)
 169{
 170        struct rq *rq = cpu_rq(cpu);
 171
 172        rt_rq->highest_prio.curr = MAX_RT_PRIO;
 173        rt_rq->rt_nr_boosted = 0;
 174        rt_rq->rq = rq;
 175        rt_rq->tg = tg;
 176
 177        tg->rt_rq[cpu] = rt_rq;
 178        tg->rt_se[cpu] = rt_se;
 179
 180        if (!rt_se)
 181                return;
 182
 183        if (!parent)
 184                rt_se->rt_rq = &rq->rt;
 185        else
 186                rt_se->rt_rq = parent->my_q;
 187
 188        rt_se->my_q = rt_rq;
 189        rt_se->parent = parent;
 190        INIT_LIST_HEAD(&rt_se->run_list);
 191}
 192
 193int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 194{
 195        struct rt_rq *rt_rq;
 196        struct sched_rt_entity *rt_se;
 197        int i;
 198
 199        tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
 200        if (!tg->rt_rq)
 201                goto err;
 202        tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
 203        if (!tg->rt_se)
 204                goto err;
 205
 206        init_rt_bandwidth(&tg->rt_bandwidth,
 207                        ktime_to_ns(def_rt_bandwidth.rt_period), 0);
 208
 209        for_each_possible_cpu(i) {
 210                rt_rq = kzalloc_node(sizeof(struct rt_rq),
 211                                     GFP_KERNEL, cpu_to_node(i));
 212                if (!rt_rq)
 213                        goto err;
 214
 215                rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
 216                                     GFP_KERNEL, cpu_to_node(i));
 217                if (!rt_se)
 218                        goto err_free_rq;
 219
 220                init_rt_rq(rt_rq);
 221                rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 222                init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
 223        }
 224
 225        return 1;
 226
 227err_free_rq:
 228        kfree(rt_rq);
 229err:
 230        return 0;
 231}
 232
 233#else /* CONFIG_RT_GROUP_SCHED */
 234
 235#define rt_entity_is_task(rt_se) (1)
 236
 237static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 238{
 239        return container_of(rt_se, struct task_struct, rt);
 240}
 241
 242static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 243{
 244        return container_of(rt_rq, struct rq, rt);
 245}
 246
 247static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 248{
 249        struct task_struct *p = rt_task_of(rt_se);
 250
 251        return task_rq(p);
 252}
 253
 254static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 255{
 256        struct rq *rq = rq_of_rt_se(rt_se);
 257
 258        return &rq->rt;
 259}
 260
 261void free_rt_sched_group(struct task_group *tg) { }
 262
 263int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 264{
 265        return 1;
 266}
 267#endif /* CONFIG_RT_GROUP_SCHED */
 268
 269#ifdef CONFIG_SMP
 270
 271static void pull_rt_task(struct rq *this_rq);
 272
 273static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 274{
 275        /* Try to pull RT tasks here if we lower this rq's prio */
 276        return rq->rt.highest_prio.curr > prev->prio;
 277}
 278
 279static inline int rt_overloaded(struct rq *rq)
 280{
 281        return atomic_read(&rq->rd->rto_count);
 282}
 283
 284static inline void rt_set_overload(struct rq *rq)
 285{
 286        if (!rq->online)
 287                return;
 288
 289        cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
 290        /*
 291         * Make sure the mask is visible before we set
 292         * the overload count. That is checked to determine
 293         * if we should look at the mask. It would be a shame
 294         * if we looked at the mask, but the mask was not
 295         * updated yet.
 296         *
 297         * Matched by the barrier in pull_rt_task().
 298         */
 299        smp_wmb();
 300        atomic_inc(&rq->rd->rto_count);
 301}
 302
 303static inline void rt_clear_overload(struct rq *rq)
 304{
 305        if (!rq->online)
 306                return;
 307
 308        /* the order here really doesn't matter */
 309        atomic_dec(&rq->rd->rto_count);
 310        cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 311}
 312
 313static void update_rt_migration(struct rt_rq *rt_rq)
 314{
 315        if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
 316                if (!rt_rq->overloaded) {
 317                        rt_set_overload(rq_of_rt_rq(rt_rq));
 318                        rt_rq->overloaded = 1;
 319                }
 320        } else if (rt_rq->overloaded) {
 321                rt_clear_overload(rq_of_rt_rq(rt_rq));
 322                rt_rq->overloaded = 0;
 323        }
 324}
 325
 326static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 327{
 328        struct task_struct *p;
 329
 330        if (!rt_entity_is_task(rt_se))
 331                return;
 332
 333        p = rt_task_of(rt_se);
 334        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 335
 336        rt_rq->rt_nr_total++;
 337        if (p->nr_cpus_allowed > 1)
 338                rt_rq->rt_nr_migratory++;
 339
 340        update_rt_migration(rt_rq);
 341}
 342
 343static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 344{
 345        struct task_struct *p;
 346
 347        if (!rt_entity_is_task(rt_se))
 348                return;
 349
 350        p = rt_task_of(rt_se);
 351        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 352
 353        rt_rq->rt_nr_total--;
 354        if (p->nr_cpus_allowed > 1)
 355                rt_rq->rt_nr_migratory--;
 356
 357        update_rt_migration(rt_rq);
 358}
 359
 360static inline int has_pushable_tasks(struct rq *rq)
 361{
 362        return !plist_head_empty(&rq->rt.pushable_tasks);
 363}
 364
 365static DEFINE_PER_CPU(struct callback_head, rt_push_head);
 366static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
 367
 368static void push_rt_tasks(struct rq *);
 369static void pull_rt_task(struct rq *);
 370
 371static inline void queue_push_tasks(struct rq *rq)
 372{
 373        if (!has_pushable_tasks(rq))
 374                return;
 375
 376        queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
 377}
 378
 379static inline void queue_pull_task(struct rq *rq)
 380{
 381        queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
 382}
 383
 384static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 385{
 386        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 387        plist_node_init(&p->pushable_tasks, p->prio);
 388        plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
 389
 390        /* Update the highest prio pushable task */
 391        if (p->prio < rq->rt.highest_prio.next)
 392                rq->rt.highest_prio.next = p->prio;
 393}
 394
 395static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 396{
 397        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 398
 399        /* Update the new highest prio pushable task */
 400        if (has_pushable_tasks(rq)) {
 401                p = plist_first_entry(&rq->rt.pushable_tasks,
 402                                      struct task_struct, pushable_tasks);
 403                rq->rt.highest_prio.next = p->prio;
 404        } else
 405                rq->rt.highest_prio.next = MAX_RT_PRIO;
 406}
 407
 408#else
 409
 410static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 411{
 412}
 413
 414static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 415{
 416}
 417
 418static inline
 419void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 420{
 421}
 422
 423static inline
 424void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 425{
 426}
 427
 428static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 429{
 430        return false;
 431}
 432
 433static inline void pull_rt_task(struct rq *this_rq)
 434{
 435}
 436
 437static inline void queue_push_tasks(struct rq *rq)
 438{
 439}
 440#endif /* CONFIG_SMP */
 441
 442static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
 443static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
 444
 445static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 446{
 447        return rt_se->on_rq;
 448}
 449
 450#ifdef CONFIG_RT_GROUP_SCHED
 451
 452static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 453{
 454        if (!rt_rq->tg)
 455                return RUNTIME_INF;
 456
 457        return rt_rq->rt_runtime;
 458}
 459
 460static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 461{
 462        return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 463}
 464
 465typedef struct task_group *rt_rq_iter_t;
 466
 467static inline struct task_group *next_task_group(struct task_group *tg)
 468{
 469        do {
 470                tg = list_entry_rcu(tg->list.next,
 471                        typeof(struct task_group), list);
 472        } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
 473
 474        if (&tg->list == &task_groups)
 475                tg = NULL;
 476
 477        return tg;
 478}
 479
 480#define for_each_rt_rq(rt_rq, iter, rq)                                 \
 481        for (iter = container_of(&task_groups, typeof(*iter), list);    \
 482                (iter = next_task_group(iter)) &&                       \
 483                (rt_rq = iter->rt_rq[cpu_of(rq)]);)
 484
 485#define for_each_sched_rt_entity(rt_se) \
 486        for (; rt_se; rt_se = rt_se->parent)
 487
 488static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 489{
 490        return rt_se->my_q;
 491}
 492
 493static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 494static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 495
 496static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 497{
 498        struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 499        struct rq *rq = rq_of_rt_rq(rt_rq);
 500        struct sched_rt_entity *rt_se;
 501
 502        int cpu = cpu_of(rq);
 503
 504        rt_se = rt_rq->tg->rt_se[cpu];
 505
 506        if (rt_rq->rt_nr_running) {
 507                if (!rt_se)
 508                        enqueue_top_rt_rq(rt_rq);
 509                else if (!on_rt_rq(rt_se))
 510                        enqueue_rt_entity(rt_se, 0);
 511
 512                if (rt_rq->highest_prio.curr < curr->prio)
 513                        resched_curr(rq);
 514        }
 515}
 516
 517static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 518{
 519        struct sched_rt_entity *rt_se;
 520        int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 521
 522        rt_se = rt_rq->tg->rt_se[cpu];
 523
 524        if (!rt_se)
 525                dequeue_top_rt_rq(rt_rq);
 526        else if (on_rt_rq(rt_se))
 527                dequeue_rt_entity(rt_se, 0);
 528}
 529
 530static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 531{
 532        return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
 533}
 534
 535static int rt_se_boosted(struct sched_rt_entity *rt_se)
 536{
 537        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 538        struct task_struct *p;
 539
 540        if (rt_rq)
 541                return !!rt_rq->rt_nr_boosted;
 542
 543        p = rt_task_of(rt_se);
 544        return p->prio != p->normal_prio;
 545}
 546
 547#ifdef CONFIG_SMP
 548static inline const struct cpumask *sched_rt_period_mask(void)
 549{
 550        return this_rq()->rd->span;
 551}
 552#else
 553static inline const struct cpumask *sched_rt_period_mask(void)
 554{
 555        return cpu_online_mask;
 556}
 557#endif
 558
 559static inline
 560struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 561{
 562        return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
 563}
 564
 565static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 566{
 567        return &rt_rq->tg->rt_bandwidth;
 568}
 569
 570#else /* !CONFIG_RT_GROUP_SCHED */
 571
 572static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 573{
 574        return rt_rq->rt_runtime;
 575}
 576
 577static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 578{
 579        return ktime_to_ns(def_rt_bandwidth.rt_period);
 580}
 581
 582typedef struct rt_rq *rt_rq_iter_t;
 583
 584#define for_each_rt_rq(rt_rq, iter, rq) \
 585        for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 586
 587#define for_each_sched_rt_entity(rt_se) \
 588        for (; rt_se; rt_se = NULL)
 589
 590static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 591{
 592        return NULL;
 593}
 594
 595static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 596{
 597        struct rq *rq = rq_of_rt_rq(rt_rq);
 598
 599        if (!rt_rq->rt_nr_running)
 600                return;
 601
 602        enqueue_top_rt_rq(rt_rq);
 603        resched_curr(rq);
 604}
 605
 606static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 607{
 608        dequeue_top_rt_rq(rt_rq);
 609}
 610
 611static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 612{
 613        return rt_rq->rt_throttled;
 614}
 615
 616static inline const struct cpumask *sched_rt_period_mask(void)
 617{
 618        return cpu_online_mask;
 619}
 620
 621static inline
 622struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 623{
 624        return &cpu_rq(cpu)->rt;
 625}
 626
 627static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 628{
 629        return &def_rt_bandwidth;
 630}
 631
 632#endif /* CONFIG_RT_GROUP_SCHED */
 633
 634bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
 635{
 636        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 637
 638        return (hrtimer_active(&rt_b->rt_period_timer) ||
 639                rt_rq->rt_time < rt_b->rt_runtime);
 640}
 641
 642#ifdef CONFIG_SMP
 643/*
 644 * We ran out of runtime, see if we can borrow some from our neighbours.
 645 */
 646static void do_balance_runtime(struct rt_rq *rt_rq)
 647{
 648        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 649        struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
 650        int i, weight;
 651        u64 rt_period;
 652
 653        weight = cpumask_weight(rd->span);
 654
 655        raw_spin_lock(&rt_b->rt_runtime_lock);
 656        rt_period = ktime_to_ns(rt_b->rt_period);
 657        for_each_cpu(i, rd->span) {
 658                struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 659                s64 diff;
 660
 661                if (iter == rt_rq)
 662                        continue;
 663
 664                raw_spin_lock(&iter->rt_runtime_lock);
 665                /*
 666                 * Either all rqs have inf runtime and there's nothing to steal
 667                 * or __disable_runtime() below sets a specific rq to inf to
 668                 * indicate its been disabled and disalow stealing.
 669                 */
 670                if (iter->rt_runtime == RUNTIME_INF)
 671                        goto next;
 672
 673                /*
 674                 * From runqueues with spare time, take 1/n part of their
 675                 * spare time, but no more than our period.
 676                 */
 677                diff = iter->rt_runtime - iter->rt_time;
 678                if (diff > 0) {
 679                        diff = div_u64((u64)diff, weight);
 680                        if (rt_rq->rt_runtime + diff > rt_period)
 681                                diff = rt_period - rt_rq->rt_runtime;
 682                        iter->rt_runtime -= diff;
 683                        rt_rq->rt_runtime += diff;
 684                        if (rt_rq->rt_runtime == rt_period) {
 685                                raw_spin_unlock(&iter->rt_runtime_lock);
 686                                break;
 687                        }
 688                }
 689next:
 690                raw_spin_unlock(&iter->rt_runtime_lock);
 691        }
 692        raw_spin_unlock(&rt_b->rt_runtime_lock);
 693}
 694
 695/*
 696 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 697 */
 698static void __disable_runtime(struct rq *rq)
 699{
 700        struct root_domain *rd = rq->rd;
 701        rt_rq_iter_t iter;
 702        struct rt_rq *rt_rq;
 703
 704        if (unlikely(!scheduler_running))
 705                return;
 706
 707        for_each_rt_rq(rt_rq, iter, rq) {
 708                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 709                s64 want;
 710                int i;
 711
 712                raw_spin_lock(&rt_b->rt_runtime_lock);
 713                raw_spin_lock(&rt_rq->rt_runtime_lock);
 714                /*
 715                 * Either we're all inf and nobody needs to borrow, or we're
 716                 * already disabled and thus have nothing to do, or we have
 717                 * exactly the right amount of runtime to take out.
 718                 */
 719                if (rt_rq->rt_runtime == RUNTIME_INF ||
 720                                rt_rq->rt_runtime == rt_b->rt_runtime)
 721                        goto balanced;
 722                raw_spin_unlock(&rt_rq->rt_runtime_lock);
 723
 724                /*
 725                 * Calculate the difference between what we started out with
 726                 * and what we current have, that's the amount of runtime
 727                 * we lend and now have to reclaim.
 728                 */
 729                want = rt_b->rt_runtime - rt_rq->rt_runtime;
 730
 731                /*
 732                 * Greedy reclaim, take back as much as we can.
 733                 */
 734                for_each_cpu(i, rd->span) {
 735                        struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 736                        s64 diff;
 737
 738                        /*
 739                         * Can't reclaim from ourselves or disabled runqueues.
 740                         */
 741                        if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
 742                                continue;
 743
 744                        raw_spin_lock(&iter->rt_runtime_lock);
 745                        if (want > 0) {
 746                                diff = min_t(s64, iter->rt_runtime, want);
 747                                iter->rt_runtime -= diff;
 748                                want -= diff;
 749                        } else {
 750                                iter->rt_runtime -= want;
 751                                want -= want;
 752                        }
 753                        raw_spin_unlock(&iter->rt_runtime_lock);
 754
 755                        if (!want)
 756                                break;
 757                }
 758
 759                raw_spin_lock(&rt_rq->rt_runtime_lock);
 760                /*
 761                 * We cannot be left wanting - that would mean some runtime
 762                 * leaked out of the system.
 763                 */
 764                BUG_ON(want);
 765balanced:
 766                /*
 767                 * Disable all the borrow logic by pretending we have inf
 768                 * runtime - in which case borrowing doesn't make sense.
 769                 */
 770                rt_rq->rt_runtime = RUNTIME_INF;
 771                rt_rq->rt_throttled = 0;
 772                raw_spin_unlock(&rt_rq->rt_runtime_lock);
 773                raw_spin_unlock(&rt_b->rt_runtime_lock);
 774
 775                /* Make rt_rq available for pick_next_task() */
 776                sched_rt_rq_enqueue(rt_rq);
 777        }
 778}
 779
 780static void __enable_runtime(struct rq *rq)
 781{
 782        rt_rq_iter_t iter;
 783        struct rt_rq *rt_rq;
 784
 785        if (unlikely(!scheduler_running))
 786                return;
 787
 788        /*
 789         * Reset each runqueue's bandwidth settings
 790         */
 791        for_each_rt_rq(rt_rq, iter, rq) {
 792                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 793
 794                raw_spin_lock(&rt_b->rt_runtime_lock);
 795                raw_spin_lock(&rt_rq->rt_runtime_lock);
 796                rt_rq->rt_runtime = rt_b->rt_runtime;
 797                rt_rq->rt_time = 0;
 798                rt_rq->rt_throttled = 0;
 799                raw_spin_unlock(&rt_rq->rt_runtime_lock);
 800                raw_spin_unlock(&rt_b->rt_runtime_lock);
 801        }
 802}
 803
 804static void balance_runtime(struct rt_rq *rt_rq)
 805{
 806        if (!sched_feat(RT_RUNTIME_SHARE))
 807                return;
 808
 809        if (rt_rq->rt_time > rt_rq->rt_runtime) {
 810                raw_spin_unlock(&rt_rq->rt_runtime_lock);
 811                do_balance_runtime(rt_rq);
 812                raw_spin_lock(&rt_rq->rt_runtime_lock);
 813        }
 814}
 815#else /* !CONFIG_SMP */
 816static inline void balance_runtime(struct rt_rq *rt_rq) {}
 817#endif /* CONFIG_SMP */
 818
 819static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 820{
 821        int i, idle = 1, throttled = 0;
 822        const struct cpumask *span;
 823
 824        span = sched_rt_period_mask();
 825#ifdef CONFIG_RT_GROUP_SCHED
 826        /*
 827         * FIXME: isolated CPUs should really leave the root task group,
 828         * whether they are isolcpus or were isolated via cpusets, lest
 829         * the timer run on a CPU which does not service all runqueues,
 830         * potentially leaving other CPUs indefinitely throttled.  If
 831         * isolation is really required, the user will turn the throttle
 832         * off to kill the perturbations it causes anyway.  Meanwhile,
 833         * this maintains functionality for boot and/or troubleshooting.
 834         */
 835        if (rt_b == &root_task_group.rt_bandwidth)
 836                span = cpu_online_mask;
 837#endif
 838        for_each_cpu(i, span) {
 839                int enqueue = 0;
 840                struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 841                struct rq *rq = rq_of_rt_rq(rt_rq);
 842
 843                raw_spin_lock(&rq->lock);
 844                if (rt_rq->rt_time) {
 845                        u64 runtime;
 846
 847                        raw_spin_lock(&rt_rq->rt_runtime_lock);
 848                        if (rt_rq->rt_throttled)
 849                                balance_runtime(rt_rq);
 850                        runtime = rt_rq->rt_runtime;
 851                        rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
 852                        if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
 853                                rt_rq->rt_throttled = 0;
 854                                enqueue = 1;
 855
 856                                /*
 857                                 * When we're idle and a woken (rt) task is
 858                                 * throttled check_preempt_curr() will set
 859                                 * skip_update and the time between the wakeup
 860                                 * and this unthrottle will get accounted as
 861                                 * 'runtime'.
 862                                 */
 863                                if (rt_rq->rt_nr_running && rq->curr == rq->idle)
 864                                        rq_clock_skip_update(rq, false);
 865                        }
 866                        if (rt_rq->rt_time || rt_rq->rt_nr_running)
 867                                idle = 0;
 868                        raw_spin_unlock(&rt_rq->rt_runtime_lock);
 869                } else if (rt_rq->rt_nr_running) {
 870                        idle = 0;
 871                        if (!rt_rq_throttled(rt_rq))
 872                                enqueue = 1;
 873                }
 874                if (rt_rq->rt_throttled)
 875                        throttled = 1;
 876
 877                if (enqueue)
 878                        sched_rt_rq_enqueue(rt_rq);
 879                raw_spin_unlock(&rq->lock);
 880        }
 881
 882        if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
 883                return 1;
 884
 885        return idle;
 886}
 887
 888static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 889{
 890#ifdef CONFIG_RT_GROUP_SCHED
 891        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 892
 893        if (rt_rq)
 894                return rt_rq->highest_prio.curr;
 895#endif
 896
 897        return rt_task_of(rt_se)->prio;
 898}
 899
 900static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 901{
 902        u64 runtime = sched_rt_runtime(rt_rq);
 903
 904        if (rt_rq->rt_throttled)
 905                return rt_rq_throttled(rt_rq);
 906
 907        if (runtime >= sched_rt_period(rt_rq))
 908                return 0;
 909
 910        balance_runtime(rt_rq);
 911        runtime = sched_rt_runtime(rt_rq);
 912        if (runtime == RUNTIME_INF)
 913                return 0;
 914
 915        if (rt_rq->rt_time > runtime) {
 916                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 917
 918                /*
 919                 * Don't actually throttle groups that have no runtime assigned
 920                 * but accrue some time due to boosting.
 921                 */
 922                if (likely(rt_b->rt_runtime)) {
 923                        rt_rq->rt_throttled = 1;
 924                        printk_deferred_once("sched: RT throttling activated\n");
 925                } else {
 926                        /*
 927                         * In case we did anyway, make it go away,
 928                         * replenishment is a joke, since it will replenish us
 929                         * with exactly 0 ns.
 930                         */
 931                        rt_rq->rt_time = 0;
 932                }
 933
 934                if (rt_rq_throttled(rt_rq)) {
 935                        sched_rt_rq_dequeue(rt_rq);
 936                        return 1;
 937                }
 938        }
 939
 940        return 0;
 941}
 942
 943/*
 944 * Update the current task's runtime statistics. Skip current tasks that
 945 * are not in our scheduling class.
 946 */
 947static void update_curr_rt(struct rq *rq)
 948{
 949        struct task_struct *curr = rq->curr;
 950        struct sched_rt_entity *rt_se = &curr->rt;
 951        u64 delta_exec;
 952
 953        if (curr->sched_class != &rt_sched_class)
 954                return;
 955
 956        /* Kick cpufreq (see the comment in linux/cpufreq.h). */
 957        if (cpu_of(rq) == smp_processor_id())
 958                cpufreq_trigger_update(rq_clock(rq));
 959
 960        delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 961        if (unlikely((s64)delta_exec <= 0))
 962                return;
 963
 964        schedstat_set(curr->se.statistics.exec_max,
 965                      max(curr->se.statistics.exec_max, delta_exec));
 966
 967        curr->se.sum_exec_runtime += delta_exec;
 968        account_group_exec_runtime(curr, delta_exec);
 969
 970        curr->se.exec_start = rq_clock_task(rq);
 971        cpuacct_charge(curr, delta_exec);
 972
 973        sched_rt_avg_update(rq, delta_exec);
 974
 975        if (!rt_bandwidth_enabled())
 976                return;
 977
 978        for_each_sched_rt_entity(rt_se) {
 979                struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 980
 981                if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
 982                        raw_spin_lock(&rt_rq->rt_runtime_lock);
 983                        rt_rq->rt_time += delta_exec;
 984                        if (sched_rt_runtime_exceeded(rt_rq))
 985                                resched_curr(rq);
 986                        raw_spin_unlock(&rt_rq->rt_runtime_lock);
 987                }
 988        }
 989}
 990
 991static void
 992dequeue_top_rt_rq(struct rt_rq *rt_rq)
 993{
 994        struct rq *rq = rq_of_rt_rq(rt_rq);
 995
 996        BUG_ON(&rq->rt != rt_rq);
 997
 998        if (!rt_rq->rt_queued)
 999                return;
1000
1001        BUG_ON(!rq->nr_running);
1002
1003        sub_nr_running(rq, rt_rq->rt_nr_running);
1004        rt_rq->rt_queued = 0;
1005}
1006
1007static void
1008enqueue_top_rt_rq(struct rt_rq *rt_rq)
1009{
1010        struct rq *rq = rq_of_rt_rq(rt_rq);
1011
1012        BUG_ON(&rq->rt != rt_rq);
1013
1014        if (rt_rq->rt_queued)
1015                return;
1016        if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1017                return;
1018
1019        add_nr_running(rq, rt_rq->rt_nr_running);
1020        rt_rq->rt_queued = 1;
1021}
1022
1023#if defined CONFIG_SMP
1024
1025static void
1026inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1027{
1028        struct rq *rq = rq_of_rt_rq(rt_rq);
1029
1030#ifdef CONFIG_RT_GROUP_SCHED
1031        /*
1032         * Change rq's cpupri only if rt_rq is the top queue.
1033         */
1034        if (&rq->rt != rt_rq)
1035                return;
1036#endif
1037        if (rq->online && prio < prev_prio)
1038                cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1039}
1040
1041static void
1042dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1043{
1044        struct rq *rq = rq_of_rt_rq(rt_rq);
1045
1046#ifdef CONFIG_RT_GROUP_SCHED
1047        /*
1048         * Change rq's cpupri only if rt_rq is the top queue.
1049         */
1050        if (&rq->rt != rt_rq)
1051                return;
1052#endif
1053        if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1054                cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1055}
1056
1057#else /* CONFIG_SMP */
1058
1059static inline
1060void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1061static inline
1062void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1063
1064#endif /* CONFIG_SMP */
1065
1066#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1067static void
1068inc_rt_prio(struct rt_rq *rt_rq, int prio)
1069{
1070        int prev_prio = rt_rq->highest_prio.curr;
1071
1072        if (prio < prev_prio)
1073                rt_rq->highest_prio.curr = prio;
1074
1075        inc_rt_prio_smp(rt_rq, prio, prev_prio);
1076}
1077
1078static void
1079dec_rt_prio(struct rt_rq *rt_rq, int prio)
1080{
1081        int prev_prio = rt_rq->highest_prio.curr;
1082
1083        if (rt_rq->rt_nr_running) {
1084
1085                WARN_ON(prio < prev_prio);
1086
1087                /*
1088                 * This may have been our highest task, and therefore
1089                 * we may have some recomputation to do
1090                 */
1091                if (prio == prev_prio) {
1092                        struct rt_prio_array *array = &rt_rq->active;
1093
1094                        rt_rq->highest_prio.curr =
1095                                sched_find_first_bit(array->bitmap);
1096                }
1097
1098        } else
1099                rt_rq->highest_prio.curr = MAX_RT_PRIO;
1100
1101        dec_rt_prio_smp(rt_rq, prio, prev_prio);
1102}
1103
1104#else
1105
1106static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1107static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1108
1109#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1110
1111#ifdef CONFIG_RT_GROUP_SCHED
1112
1113static void
1114inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1115{
1116        if (rt_se_boosted(rt_se))
1117                rt_rq->rt_nr_boosted++;
1118
1119        if (rt_rq->tg)
1120                start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1121}
1122
1123static void
1124dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1125{
1126        if (rt_se_boosted(rt_se))
1127                rt_rq->rt_nr_boosted--;
1128
1129        WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1130}
1131
1132#else /* CONFIG_RT_GROUP_SCHED */
1133
1134static void
1135inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1136{
1137        start_rt_bandwidth(&def_rt_bandwidth);
1138}
1139
1140static inline
1141void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1142
1143#endif /* CONFIG_RT_GROUP_SCHED */
1144
1145static inline
1146unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1147{
1148        struct rt_rq *group_rq = group_rt_rq(rt_se);
1149
1150        if (group_rq)
1151                return group_rq->rt_nr_running;
1152        else
1153                return 1;
1154}
1155
1156static inline
1157unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1158{
1159        struct rt_rq *group_rq = group_rt_rq(rt_se);
1160        struct task_struct *tsk;
1161
1162        if (group_rq)
1163                return group_rq->rr_nr_running;
1164
1165        tsk = rt_task_of(rt_se);
1166
1167        return (tsk->policy == SCHED_RR) ? 1 : 0;
1168}
1169
1170static inline
1171void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1172{
1173        int prio = rt_se_prio(rt_se);
1174
1175        WARN_ON(!rt_prio(prio));
1176        rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1177        rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1178
1179        inc_rt_prio(rt_rq, prio);
1180        inc_rt_migration(rt_se, rt_rq);
1181        inc_rt_group(rt_se, rt_rq);
1182}
1183
1184static inline
1185void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1186{
1187        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1188        WARN_ON(!rt_rq->rt_nr_running);
1189        rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1190        rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1191
1192        dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1193        dec_rt_migration(rt_se, rt_rq);
1194        dec_rt_group(rt_se, rt_rq);
1195}
1196
1197/*
1198 * Change rt_se->run_list location unless SAVE && !MOVE
1199 *
1200 * assumes ENQUEUE/DEQUEUE flags match
1201 */
1202static inline bool move_entity(unsigned int flags)
1203{
1204        if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1205                return false;
1206
1207        return true;
1208}
1209
1210static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1211{
1212        list_del_init(&rt_se->run_list);
1213
1214        if (list_empty(array->queue + rt_se_prio(rt_se)))
1215                __clear_bit(rt_se_prio(rt_se), array->bitmap);
1216
1217        rt_se->on_list = 0;
1218}
1219
1220static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1221{
1222        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1223        struct rt_prio_array *array = &rt_rq->active;
1224        struct rt_rq *group_rq = group_rt_rq(rt_se);
1225        struct list_head *queue = array->queue + rt_se_prio(rt_se);
1226
1227        /*
1228         * Don't enqueue the group if its throttled, or when empty.
1229         * The latter is a consequence of the former when a child group
1230         * get throttled and the current group doesn't have any other
1231         * active members.
1232         */
1233        if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1234                if (rt_se->on_list)
1235                        __delist_rt_entity(rt_se, array);
1236                return;
1237        }
1238
1239        if (move_entity(flags)) {
1240                WARN_ON_ONCE(rt_se->on_list);
1241                if (flags & ENQUEUE_HEAD)
1242                        list_add(&rt_se->run_list, queue);
1243                else
1244                        list_add_tail(&rt_se->run_list, queue);
1245
1246                __set_bit(rt_se_prio(rt_se), array->bitmap);
1247                rt_se->on_list = 1;
1248        }
1249        rt_se->on_rq = 1;
1250
1251        inc_rt_tasks(rt_se, rt_rq);
1252}
1253
1254static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1255{
1256        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1257        struct rt_prio_array *array = &rt_rq->active;
1258
1259        if (move_entity(flags)) {
1260                WARN_ON_ONCE(!rt_se->on_list);
1261                __delist_rt_entity(rt_se, array);
1262        }
1263        rt_se->on_rq = 0;
1264
1265        dec_rt_tasks(rt_se, rt_rq);
1266}
1267
1268/*
1269 * Because the prio of an upper entry depends on the lower
1270 * entries, we must remove entries top - down.
1271 */
1272static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1273{
1274        struct sched_rt_entity *back = NULL;
1275
1276        for_each_sched_rt_entity(rt_se) {
1277                rt_se->back = back;
1278                back = rt_se;
1279        }
1280
1281        dequeue_top_rt_rq(rt_rq_of_se(back));
1282
1283        for (rt_se = back; rt_se; rt_se = rt_se->back) {
1284                if (on_rt_rq(rt_se))
1285                        __dequeue_rt_entity(rt_se, flags);
1286        }
1287}
1288
1289static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1290{
1291        struct rq *rq = rq_of_rt_se(rt_se);
1292
1293        dequeue_rt_stack(rt_se, flags);
1294        for_each_sched_rt_entity(rt_se)
1295                __enqueue_rt_entity(rt_se, flags);
1296        enqueue_top_rt_rq(&rq->rt);
1297}
1298
1299static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1300{
1301        struct rq *rq = rq_of_rt_se(rt_se);
1302
1303        dequeue_rt_stack(rt_se, flags);
1304
1305        for_each_sched_rt_entity(rt_se) {
1306                struct rt_rq *rt_rq = group_rt_rq(rt_se);
1307
1308                if (rt_rq && rt_rq->rt_nr_running)
1309                        __enqueue_rt_entity(rt_se, flags);
1310        }
1311        enqueue_top_rt_rq(&rq->rt);
1312}
1313
1314/*
1315 * Adding/removing a task to/from a priority array:
1316 */
1317static void
1318enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1319{
1320        struct sched_rt_entity *rt_se = &p->rt;
1321
1322        if (flags & ENQUEUE_WAKEUP)
1323                rt_se->timeout = 0;
1324
1325        enqueue_rt_entity(rt_se, flags);
1326
1327        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1328                enqueue_pushable_task(rq, p);
1329}
1330
1331static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1332{
1333        struct sched_rt_entity *rt_se = &p->rt;
1334
1335        update_curr_rt(rq);
1336        dequeue_rt_entity(rt_se, flags);
1337
1338        dequeue_pushable_task(rq, p);
1339}
1340
1341/*
1342 * Put task to the head or the end of the run list without the overhead of
1343 * dequeue followed by enqueue.
1344 */
1345static void
1346requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1347{
1348        if (on_rt_rq(rt_se)) {
1349                struct rt_prio_array *array = &rt_rq->active;
1350                struct list_head *queue = array->queue + rt_se_prio(rt_se);
1351
1352                if (head)
1353                        list_move(&rt_se->run_list, queue);
1354                else
1355                        list_move_tail(&rt_se->run_list, queue);
1356        }
1357}
1358
1359static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1360{
1361        struct sched_rt_entity *rt_se = &p->rt;
1362        struct rt_rq *rt_rq;
1363
1364        for_each_sched_rt_entity(rt_se) {
1365                rt_rq = rt_rq_of_se(rt_se);
1366                requeue_rt_entity(rt_rq, rt_se, head);
1367        }
1368}
1369
1370static void yield_task_rt(struct rq *rq)
1371{
1372        requeue_task_rt(rq, rq->curr, 0);
1373}
1374
1375#ifdef CONFIG_SMP
1376static int find_lowest_rq(struct task_struct *task);
1377
1378static int
1379select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1380{
1381        struct task_struct *curr;
1382        struct rq *rq;
1383
1384        /* For anything but wake ups, just return the task_cpu */
1385        if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1386                goto out;
1387
1388        rq = cpu_rq(cpu);
1389
1390        rcu_read_lock();
1391        curr = READ_ONCE(rq->curr); /* unlocked access */
1392
1393        /*
1394         * If the current task on @p's runqueue is an RT task, then
1395         * try to see if we can wake this RT task up on another
1396         * runqueue. Otherwise simply start this RT task
1397         * on its current runqueue.
1398         *
1399         * We want to avoid overloading runqueues. If the woken
1400         * task is a higher priority, then it will stay on this CPU
1401         * and the lower prio task should be moved to another CPU.
1402         * Even though this will probably make the lower prio task
1403         * lose its cache, we do not want to bounce a higher task
1404         * around just because it gave up its CPU, perhaps for a
1405         * lock?
1406         *
1407         * For equal prio tasks, we just let the scheduler sort it out.
1408         *
1409         * Otherwise, just let it ride on the affined RQ and the
1410         * post-schedule router will push the preempted task away
1411         *
1412         * This test is optimistic, if we get it wrong the load-balancer
1413         * will have to sort it out.
1414         */
1415        if (curr && unlikely(rt_task(curr)) &&
1416            (curr->nr_cpus_allowed < 2 ||
1417             curr->prio <= p->prio)) {
1418                int target = find_lowest_rq(p);
1419
1420                /*
1421                 * Don't bother moving it if the destination CPU is
1422                 * not running a lower priority task.
1423                 */
1424                if (target != -1 &&
1425                    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1426                        cpu = target;
1427        }
1428        rcu_read_unlock();
1429
1430out:
1431        return cpu;
1432}
1433
1434static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1435{
1436        /*
1437         * Current can't be migrated, useless to reschedule,
1438         * let's hope p can move out.
1439         */
1440        if (rq->curr->nr_cpus_allowed == 1 ||
1441            !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1442                return;
1443
1444        /*
1445         * p is migratable, so let's not schedule it and
1446         * see if it is pushed or pulled somewhere else.
1447         */
1448        if (p->nr_cpus_allowed != 1
1449            && cpupri_find(&rq->rd->cpupri, p, NULL))
1450                return;
1451
1452        /*
1453         * There appears to be other cpus that can accept
1454         * current and none to run 'p', so lets reschedule
1455         * to try and push current away:
1456         */
1457        requeue_task_rt(rq, p, 1);
1458        resched_curr(rq);
1459}
1460
1461#endif /* CONFIG_SMP */
1462
1463/*
1464 * Preempt the current task with a newly woken task if needed:
1465 */
1466static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1467{
1468        if (p->prio < rq->curr->prio) {
1469                resched_curr(rq);
1470                return;
1471        }
1472
1473#ifdef CONFIG_SMP
1474        /*
1475         * If:
1476         *
1477         * - the newly woken task is of equal priority to the current task
1478         * - the newly woken task is non-migratable while current is migratable
1479         * - current will be preempted on the next reschedule
1480         *
1481         * we should check to see if current can readily move to a different
1482         * cpu.  If so, we will reschedule to allow the push logic to try
1483         * to move current somewhere else, making room for our non-migratable
1484         * task.
1485         */
1486        if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1487                check_preempt_equal_prio(rq, p);
1488#endif
1489}
1490
1491static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1492                                                   struct rt_rq *rt_rq)
1493{
1494        struct rt_prio_array *array = &rt_rq->active;
1495        struct sched_rt_entity *next = NULL;
1496        struct list_head *queue;
1497        int idx;
1498
1499        idx = sched_find_first_bit(array->bitmap);
1500        BUG_ON(idx >= MAX_RT_PRIO);
1501
1502        queue = array->queue + idx;
1503        next = list_entry(queue->next, struct sched_rt_entity, run_list);
1504
1505        return next;
1506}
1507
1508static struct task_struct *_pick_next_task_rt(struct rq *rq)
1509{
1510        struct sched_rt_entity *rt_se;
1511        struct task_struct *p;
1512        struct rt_rq *rt_rq  = &rq->rt;
1513
1514        do {
1515                rt_se = pick_next_rt_entity(rq, rt_rq);
1516                BUG_ON(!rt_se);
1517                rt_rq = group_rt_rq(rt_se);
1518        } while (rt_rq);
1519
1520        p = rt_task_of(rt_se);
1521        p->se.exec_start = rq_clock_task(rq);
1522
1523        return p;
1524}
1525
1526static struct task_struct *
1527pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1528{
1529        struct task_struct *p;
1530        struct rt_rq *rt_rq = &rq->rt;
1531
1532        if (need_pull_rt_task(rq, prev)) {
1533                /*
1534                 * This is OK, because current is on_cpu, which avoids it being
1535                 * picked for load-balance and preemption/IRQs are still
1536                 * disabled avoiding further scheduler activity on it and we're
1537                 * being very careful to re-start the picking loop.
1538                 */
1539                lockdep_unpin_lock(&rq->lock);
1540                pull_rt_task(rq);
1541                lockdep_pin_lock(&rq->lock);
1542                /*
1543                 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1544                 * means a dl or stop task can slip in, in which case we need
1545                 * to re-start task selection.
1546                 */
1547                if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1548                             rq->dl.dl_nr_running))
1549                        return RETRY_TASK;
1550        }
1551
1552        /*
1553         * We may dequeue prev's rt_rq in put_prev_task().
1554         * So, we update time before rt_nr_running check.
1555         */
1556        if (prev->sched_class == &rt_sched_class)
1557                update_curr_rt(rq);
1558
1559        if (!rt_rq->rt_queued)
1560                return NULL;
1561
1562        put_prev_task(rq, prev);
1563
1564        p = _pick_next_task_rt(rq);
1565
1566        /* The running task is never eligible for pushing */
1567        dequeue_pushable_task(rq, p);
1568
1569        queue_push_tasks(rq);
1570
1571        return p;
1572}
1573
1574static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1575{
1576        update_curr_rt(rq);
1577
1578        /*
1579         * The previous task needs to be made eligible for pushing
1580         * if it is still active
1581         */
1582        if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1583                enqueue_pushable_task(rq, p);
1584}
1585
1586#ifdef CONFIG_SMP
1587
1588/* Only try algorithms three times */
1589#define RT_MAX_TRIES 3
1590
1591static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1592{
1593        if (!task_running(rq, p) &&
1594            cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1595                return 1;
1596        return 0;
1597}
1598
1599/*
1600 * Return the highest pushable rq's task, which is suitable to be executed
1601 * on the cpu, NULL otherwise
1602 */
1603static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1604{
1605        struct plist_head *head = &rq->rt.pushable_tasks;
1606        struct task_struct *p;
1607
1608        if (!has_pushable_tasks(rq))
1609                return NULL;
1610
1611        plist_for_each_entry(p, head, pushable_tasks) {
1612                if (pick_rt_task(rq, p, cpu))
1613                        return p;
1614        }
1615
1616        return NULL;
1617}
1618
1619static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1620
1621static int find_lowest_rq(struct task_struct *task)
1622{
1623        struct sched_domain *sd;
1624        struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1625        int this_cpu = smp_processor_id();
1626        int cpu      = task_cpu(task);
1627
1628        /* Make sure the mask is initialized first */
1629        if (unlikely(!lowest_mask))
1630                return -1;
1631
1632        if (task->nr_cpus_allowed == 1)
1633                return -1; /* No other targets possible */
1634
1635        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1636                return -1; /* No targets found */
1637
1638        /*
1639         * At this point we have built a mask of cpus representing the
1640         * lowest priority tasks in the system.  Now we want to elect
1641         * the best one based on our affinity and topology.
1642         *
1643         * We prioritize the last cpu that the task executed on since
1644         * it is most likely cache-hot in that location.
1645         */
1646        if (cpumask_test_cpu(cpu, lowest_mask))
1647                return cpu;
1648
1649        /*
1650         * Otherwise, we consult the sched_domains span maps to figure
1651         * out which cpu is logically closest to our hot cache data.
1652         */
1653        if (!cpumask_test_cpu(this_cpu, lowest_mask))
1654                this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1655
1656        rcu_read_lock();
1657        for_each_domain(cpu, sd) {
1658                if (sd->flags & SD_WAKE_AFFINE) {
1659                        int best_cpu;
1660
1661                        /*
1662                         * "this_cpu" is cheaper to preempt than a
1663                         * remote processor.
1664                         */
1665                        if (this_cpu != -1 &&
1666                            cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1667                                rcu_read_unlock();
1668                                return this_cpu;
1669                        }
1670
1671                        best_cpu = cpumask_first_and(lowest_mask,
1672                                                     sched_domain_span(sd));
1673                        if (best_cpu < nr_cpu_ids) {
1674                                rcu_read_unlock();
1675                                return best_cpu;
1676                        }
1677                }
1678        }
1679        rcu_read_unlock();
1680
1681        /*
1682         * And finally, if there were no matches within the domains
1683         * just give the caller *something* to work with from the compatible
1684         * locations.
1685         */
1686        if (this_cpu != -1)
1687                return this_cpu;
1688
1689        cpu = cpumask_any(lowest_mask);
1690        if (cpu < nr_cpu_ids)
1691                return cpu;
1692        return -1;
1693}
1694
1695/* Will lock the rq it finds */
1696static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1697{
1698        struct rq *lowest_rq = NULL;
1699        int tries;
1700        int cpu;
1701
1702        for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1703                cpu = find_lowest_rq(task);
1704
1705                if ((cpu == -1) || (cpu == rq->cpu))
1706                        break;
1707
1708                lowest_rq = cpu_rq(cpu);
1709
1710                if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1711                        /*
1712                         * Target rq has tasks of equal or higher priority,
1713                         * retrying does not release any lock and is unlikely
1714                         * to yield a different result.
1715                         */
1716                        lowest_rq = NULL;
1717                        break;
1718                }
1719
1720                /* if the prio of this runqueue changed, try again */
1721                if (double_lock_balance(rq, lowest_rq)) {
1722                        /*
1723                         * We had to unlock the run queue. In
1724                         * the mean time, task could have
1725                         * migrated already or had its affinity changed.
1726                         * Also make sure that it wasn't scheduled on its rq.
1727                         */
1728                        if (unlikely(task_rq(task) != rq ||
1729                                     !cpumask_test_cpu(lowest_rq->cpu,
1730                                                       tsk_cpus_allowed(task)) ||
1731                                     task_running(rq, task) ||
1732                                     !rt_task(task) ||
1733                                     !task_on_rq_queued(task))) {
1734
1735                                double_unlock_balance(rq, lowest_rq);
1736                                lowest_rq = NULL;
1737                                break;
1738                        }
1739                }
1740
1741                /* If this rq is still suitable use it. */
1742                if (lowest_rq->rt.highest_prio.curr > task->prio)
1743                        break;
1744
1745                /* try again */
1746                double_unlock_balance(rq, lowest_rq);
1747                lowest_rq = NULL;
1748        }
1749
1750        return lowest_rq;
1751}
1752
1753static struct task_struct *pick_next_pushable_task(struct rq *rq)
1754{
1755        struct task_struct *p;
1756
1757        if (!has_pushable_tasks(rq))
1758                return NULL;
1759
1760        p = plist_first_entry(&rq->rt.pushable_tasks,
1761                              struct task_struct, pushable_tasks);
1762
1763        BUG_ON(rq->cpu != task_cpu(p));
1764        BUG_ON(task_current(rq, p));
1765        BUG_ON(p->nr_cpus_allowed <= 1);
1766
1767        BUG_ON(!task_on_rq_queued(p));
1768        BUG_ON(!rt_task(p));
1769
1770        return p;
1771}
1772
1773/*
1774 * If the current CPU has more than one RT task, see if the non
1775 * running task can migrate over to a CPU that is running a task
1776 * of lesser priority.
1777 */
1778static int push_rt_task(struct rq *rq)
1779{
1780        struct task_struct *next_task;
1781        struct rq *lowest_rq;
1782        int ret = 0;
1783
1784        if (!rq->rt.overloaded)
1785                return 0;
1786
1787        next_task = pick_next_pushable_task(rq);
1788        if (!next_task)
1789                return 0;
1790
1791retry:
1792        if (unlikely(next_task == rq->curr)) {
1793                WARN_ON(1);
1794                return 0;
1795        }
1796
1797        /*
1798         * It's possible that the next_task slipped in of
1799         * higher priority than current. If that's the case
1800         * just reschedule current.
1801         */
1802        if (unlikely(next_task->prio < rq->curr->prio)) {
1803                resched_curr(rq);
1804                return 0;
1805        }
1806
1807        /* We might release rq lock */
1808        get_task_struct(next_task);
1809
1810        /* find_lock_lowest_rq locks the rq if found */
1811        lowest_rq = find_lock_lowest_rq(next_task, rq);
1812        if (!lowest_rq) {
1813                struct task_struct *task;
1814                /*
1815                 * find_lock_lowest_rq releases rq->lock
1816                 * so it is possible that next_task has migrated.
1817                 *
1818                 * We need to make sure that the task is still on the same
1819                 * run-queue and is also still the next task eligible for
1820                 * pushing.
1821                 */
1822                task = pick_next_pushable_task(rq);
1823                if (task_cpu(next_task) == rq->cpu && task == next_task) {
1824                        /*
1825                         * The task hasn't migrated, and is still the next
1826                         * eligible task, but we failed to find a run-queue
1827                         * to push it to.  Do not retry in this case, since
1828                         * other cpus will pull from us when ready.
1829                         */
1830                        goto out;
1831                }
1832
1833                if (!task)
1834                        /* No more tasks, just exit */
1835                        goto out;
1836
1837                /*
1838                 * Something has shifted, try again.
1839                 */
1840                put_task_struct(next_task);
1841                next_task = task;
1842                goto retry;
1843        }
1844
1845        deactivate_task(rq, next_task, 0);
1846        set_task_cpu(next_task, lowest_rq->cpu);
1847        activate_task(lowest_rq, next_task, 0);
1848        ret = 1;
1849
1850        resched_curr(lowest_rq);
1851
1852        double_unlock_balance(rq, lowest_rq);
1853
1854out:
1855        put_task_struct(next_task);
1856
1857        return ret;
1858}
1859
1860static void push_rt_tasks(struct rq *rq)
1861{
1862        /* push_rt_task will return true if it moved an RT */
1863        while (push_rt_task(rq))
1864                ;
1865}
1866
1867#ifdef HAVE_RT_PUSH_IPI
1868/*
1869 * The search for the next cpu always starts at rq->cpu and ends
1870 * when we reach rq->cpu again. It will never return rq->cpu.
1871 * This returns the next cpu to check, or nr_cpu_ids if the loop
1872 * is complete.
1873 *
1874 * rq->rt.push_cpu holds the last cpu returned by this function,
1875 * or if this is the first instance, it must hold rq->cpu.
1876 */
1877static int rto_next_cpu(struct rq *rq)
1878{
1879        int prev_cpu = rq->rt.push_cpu;
1880        int cpu;
1881
1882        cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1883
1884        /*
1885         * If the previous cpu is less than the rq's CPU, then it already
1886         * passed the end of the mask, and has started from the beginning.
1887         * We end if the next CPU is greater or equal to rq's CPU.
1888         */
1889        if (prev_cpu < rq->cpu) {
1890                if (cpu >= rq->cpu)
1891                        return nr_cpu_ids;
1892
1893        } else if (cpu >= nr_cpu_ids) {
1894                /*
1895                 * We passed the end of the mask, start at the beginning.
1896                 * If the result is greater or equal to the rq's CPU, then
1897                 * the loop is finished.
1898                 */
1899                cpu = cpumask_first(rq->rd->rto_mask);
1900                if (cpu >= rq->cpu)
1901                        return nr_cpu_ids;
1902        }
1903        rq->rt.push_cpu = cpu;
1904
1905        /* Return cpu to let the caller know if the loop is finished or not */
1906        return cpu;
1907}
1908
1909static int find_next_push_cpu(struct rq *rq)
1910{
1911        struct rq *next_rq;
1912        int cpu;
1913
1914        while (1) {
1915                cpu = rto_next_cpu(rq);
1916                if (cpu >= nr_cpu_ids)
1917                        break;
1918                next_rq = cpu_rq(cpu);
1919
1920                /* Make sure the next rq can push to this rq */
1921                if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1922                        break;
1923        }
1924
1925        return cpu;
1926}
1927
1928#define RT_PUSH_IPI_EXECUTING           1
1929#define RT_PUSH_IPI_RESTART             2
1930
1931static void tell_cpu_to_push(struct rq *rq)
1932{
1933        int cpu;
1934
1935        if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1936                raw_spin_lock(&rq->rt.push_lock);
1937                /* Make sure it's still executing */
1938                if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1939                        /*
1940                         * Tell the IPI to restart the loop as things have
1941                         * changed since it started.
1942                         */
1943                        rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1944                        raw_spin_unlock(&rq->rt.push_lock);
1945                        return;
1946                }
1947                raw_spin_unlock(&rq->rt.push_lock);
1948        }
1949
1950        /* When here, there's no IPI going around */
1951
1952        rq->rt.push_cpu = rq->cpu;
1953        cpu = find_next_push_cpu(rq);
1954        if (cpu >= nr_cpu_ids)
1955                return;
1956
1957        rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1958
1959        irq_work_queue_on(&rq->rt.push_work, cpu);
1960}
1961
1962/* Called from hardirq context */
1963static void try_to_push_tasks(void *arg)
1964{
1965        struct rt_rq *rt_rq = arg;
1966        struct rq *rq, *src_rq;
1967        int this_cpu;
1968        int cpu;
1969
1970        this_cpu = rt_rq->push_cpu;
1971
1972        /* Paranoid check */
1973        BUG_ON(this_cpu != smp_processor_id());
1974
1975        rq = cpu_rq(this_cpu);
1976        src_rq = rq_of_rt_rq(rt_rq);
1977
1978again:
1979        if (has_pushable_tasks(rq)) {
1980                raw_spin_lock(&rq->lock);
1981                push_rt_task(rq);
1982                raw_spin_unlock(&rq->lock);
1983        }
1984
1985        /* Pass the IPI to the next rt overloaded queue */
1986        raw_spin_lock(&rt_rq->push_lock);
1987        /*
1988         * If the source queue changed since the IPI went out,
1989         * we need to restart the search from that CPU again.
1990         */
1991        if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1992                rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1993                rt_rq->push_cpu = src_rq->cpu;
1994        }
1995
1996        cpu = find_next_push_cpu(src_rq);
1997
1998        if (cpu >= nr_cpu_ids)
1999                rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
2000        raw_spin_unlock(&rt_rq->push_lock);
2001
2002        if (cpu >= nr_cpu_ids)
2003                return;
2004
2005        /*
2006         * It is possible that a restart caused this CPU to be
2007         * chosen again. Don't bother with an IPI, just see if we
2008         * have more to push.
2009         */
2010        if (unlikely(cpu == rq->cpu))
2011                goto again;
2012
2013        /* Try the next RT overloaded CPU */
2014        irq_work_queue_on(&rt_rq->push_work, cpu);
2015}
2016
2017static void push_irq_work_func(struct irq_work *work)
2018{
2019        struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
2020
2021        try_to_push_tasks(rt_rq);
2022}
2023#endif /* HAVE_RT_PUSH_IPI */
2024
2025static void pull_rt_task(struct rq *this_rq)
2026{
2027        int this_cpu = this_rq->cpu, cpu;
2028        bool resched = false;
2029        struct task_struct *p;
2030        struct rq *src_rq;
2031
2032        if (likely(!rt_overloaded(this_rq)))
2033                return;
2034
2035        /*
2036         * Match the barrier from rt_set_overloaded; this guarantees that if we
2037         * see overloaded we must also see the rto_mask bit.
2038         */
2039        smp_rmb();
2040
2041#ifdef HAVE_RT_PUSH_IPI
2042        if (sched_feat(RT_PUSH_IPI)) {
2043                tell_cpu_to_push(this_rq);
2044                return;
2045        }
2046#endif
2047
2048        for_each_cpu(cpu, this_rq->rd->rto_mask) {
2049                if (this_cpu == cpu)
2050                        continue;
2051
2052                src_rq = cpu_rq(cpu);
2053
2054                /*
2055                 * Don't bother taking the src_rq->lock if the next highest
2056                 * task is known to be lower-priority than our current task.
2057                 * This may look racy, but if this value is about to go
2058                 * logically higher, the src_rq will push this task away.
2059                 * And if its going logically lower, we do not care
2060                 */
2061                if (src_rq->rt.highest_prio.next >=
2062                    this_rq->rt.highest_prio.curr)
2063                        continue;
2064
2065                /*
2066                 * We can potentially drop this_rq's lock in
2067                 * double_lock_balance, and another CPU could
2068                 * alter this_rq
2069                 */
2070                double_lock_balance(this_rq, src_rq);
2071
2072                /*
2073                 * We can pull only a task, which is pushable
2074                 * on its rq, and no others.
2075                 */
2076                p = pick_highest_pushable_task(src_rq, this_cpu);
2077
2078                /*
2079                 * Do we have an RT task that preempts
2080                 * the to-be-scheduled task?
2081                 */
2082                if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2083                        WARN_ON(p == src_rq->curr);
2084                        WARN_ON(!task_on_rq_queued(p));
2085
2086                        /*
2087                         * There's a chance that p is higher in priority
2088                         * than what's currently running on its cpu.
2089                         * This is just that p is wakeing up and hasn't
2090                         * had a chance to schedule. We only pull
2091                         * p if it is lower in priority than the
2092                         * current task on the run queue
2093                         */
2094                        if (p->prio < src_rq->curr->prio)
2095                                goto skip;
2096
2097                        resched = true;
2098
2099                        deactivate_task(src_rq, p, 0);
2100                        set_task_cpu(p, this_cpu);
2101                        activate_task(this_rq, p, 0);
2102                        /*
2103                         * We continue with the search, just in
2104                         * case there's an even higher prio task
2105                         * in another runqueue. (low likelihood
2106                         * but possible)
2107                         */
2108                }
2109skip:
2110                double_unlock_balance(this_rq, src_rq);
2111        }
2112
2113        if (resched)
2114                resched_curr(this_rq);
2115}
2116
2117/*
2118 * If we are not running and we are not going to reschedule soon, we should
2119 * try to push tasks away now
2120 */
2121static void task_woken_rt(struct rq *rq, struct task_struct *p)
2122{
2123        if (!task_running(rq, p) &&
2124            !test_tsk_need_resched(rq->curr) &&
2125            p->nr_cpus_allowed > 1 &&
2126            (dl_task(rq->curr) || rt_task(rq->curr)) &&
2127            (rq->curr->nr_cpus_allowed < 2 ||
2128             rq->curr->prio <= p->prio))
2129                push_rt_tasks(rq);
2130}
2131
2132/* Assumes rq->lock is held */
2133static void rq_online_rt(struct rq *rq)
2134{
2135        if (rq->rt.overloaded)
2136                rt_set_overload(rq);
2137
2138        __enable_runtime(rq);
2139
2140        cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2141}
2142
2143/* Assumes rq->lock is held */
2144static void rq_offline_rt(struct rq *rq)
2145{
2146        if (rq->rt.overloaded)
2147                rt_clear_overload(rq);
2148
2149        __disable_runtime(rq);
2150
2151        cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2152}
2153
2154/*
2155 * When switch from the rt queue, we bring ourselves to a position
2156 * that we might want to pull RT tasks from other runqueues.
2157 */
2158static void switched_from_rt(struct rq *rq, struct task_struct *p)
2159{
2160        /*
2161         * If there are other RT tasks then we will reschedule
2162         * and the scheduling of the other RT tasks will handle
2163         * the balancing. But if we are the last RT task
2164         * we may need to handle the pulling of RT tasks
2165         * now.
2166         */
2167        if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2168                return;
2169
2170        queue_pull_task(rq);
2171}
2172
2173void __init init_sched_rt_class(void)
2174{
2175        unsigned int i;
2176
2177        for_each_possible_cpu(i) {
2178                zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2179                                        GFP_KERNEL, cpu_to_node(i));
2180        }
2181}
2182#endif /* CONFIG_SMP */
2183
2184/*
2185 * When switching a task to RT, we may overload the runqueue
2186 * with RT tasks. In this case we try to push them off to
2187 * other runqueues.
2188 */
2189static void switched_to_rt(struct rq *rq, struct task_struct *p)
2190{
2191        /*
2192         * If we are already running, then there's nothing
2193         * that needs to be done. But if we are not running
2194         * we may need to preempt the current running task.
2195         * If that current running task is also an RT task
2196         * then see if we can move to another run queue.
2197         */
2198        if (task_on_rq_queued(p) && rq->curr != p) {
2199#ifdef CONFIG_SMP
2200                if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2201                        queue_push_tasks(rq);
2202#else
2203                if (p->prio < rq->curr->prio)
2204                        resched_curr(rq);
2205#endif /* CONFIG_SMP */
2206        }
2207}
2208
2209/*
2210 * Priority of the task has changed. This may cause
2211 * us to initiate a push or pull.
2212 */
2213static void
2214prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2215{
2216        if (!task_on_rq_queued(p))
2217                return;
2218
2219        if (rq->curr == p) {
2220#ifdef CONFIG_SMP
2221                /*
2222                 * If our priority decreases while running, we
2223                 * may need to pull tasks to this runqueue.
2224                 */
2225                if (oldprio < p->prio)
2226                        queue_pull_task(rq);
2227
2228                /*
2229                 * If there's a higher priority task waiting to run
2230                 * then reschedule.
2231                 */
2232                if (p->prio > rq->rt.highest_prio.curr)
2233                        resched_curr(rq);
2234#else
2235                /* For UP simply resched on drop of prio */
2236                if (oldprio < p->prio)
2237                        resched_curr(rq);
2238#endif /* CONFIG_SMP */
2239        } else {
2240                /*
2241                 * This task is not running, but if it is
2242                 * greater than the current running task
2243                 * then reschedule.
2244                 */
2245                if (p->prio < rq->curr->prio)
2246                        resched_curr(rq);
2247        }
2248}
2249
2250static void watchdog(struct rq *rq, struct task_struct *p)
2251{
2252        unsigned long soft, hard;
2253
2254        /* max may change after cur was read, this will be fixed next tick */
2255        soft = task_rlimit(p, RLIMIT_RTTIME);
2256        hard = task_rlimit_max(p, RLIMIT_RTTIME);
2257
2258        if (soft != RLIM_INFINITY) {
2259                unsigned long next;
2260
2261                if (p->rt.watchdog_stamp != jiffies) {
2262                        p->rt.timeout++;
2263                        p->rt.watchdog_stamp = jiffies;
2264                }
2265
2266                next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2267                if (p->rt.timeout > next)
2268                        p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2269        }
2270}
2271
2272static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2273{
2274        struct sched_rt_entity *rt_se = &p->rt;
2275
2276        update_curr_rt(rq);
2277
2278        watchdog(rq, p);
2279
2280        /*
2281         * RR tasks need a special form of timeslice management.
2282         * FIFO tasks have no timeslices.
2283         */
2284        if (p->policy != SCHED_RR)
2285                return;
2286
2287        if (--p->rt.time_slice)
2288                return;
2289
2290        p->rt.time_slice = sched_rr_timeslice;
2291
2292        /*
2293         * Requeue to the end of queue if we (and all of our ancestors) are not
2294         * the only element on the queue
2295         */
2296        for_each_sched_rt_entity(rt_se) {
2297                if (rt_se->run_list.prev != rt_se->run_list.next) {
2298                        requeue_task_rt(rq, p, 0);
2299                        resched_curr(rq);
2300                        return;
2301                }
2302        }
2303}
2304
2305static void set_curr_task_rt(struct rq *rq)
2306{
2307        struct task_struct *p = rq->curr;
2308
2309        p->se.exec_start = rq_clock_task(rq);
2310
2311        /* The running task is never eligible for pushing */
2312        dequeue_pushable_task(rq, p);
2313}
2314
2315static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2316{
2317        /*
2318         * Time slice is 0 for SCHED_FIFO tasks
2319         */
2320        if (task->policy == SCHED_RR)
2321                return sched_rr_timeslice;
2322        else
2323                return 0;
2324}
2325
2326const struct sched_class rt_sched_class = {
2327        .next                   = &fair_sched_class,
2328        .enqueue_task           = enqueue_task_rt,
2329        .dequeue_task           = dequeue_task_rt,
2330        .yield_task             = yield_task_rt,
2331
2332        .check_preempt_curr     = check_preempt_curr_rt,
2333
2334        .pick_next_task         = pick_next_task_rt,
2335        .put_prev_task          = put_prev_task_rt,
2336
2337#ifdef CONFIG_SMP
2338        .select_task_rq         = select_task_rq_rt,
2339
2340        .set_cpus_allowed       = set_cpus_allowed_common,
2341        .rq_online              = rq_online_rt,
2342        .rq_offline             = rq_offline_rt,
2343        .task_woken             = task_woken_rt,
2344        .switched_from          = switched_from_rt,
2345#endif
2346
2347        .set_curr_task          = set_curr_task_rt,
2348        .task_tick              = task_tick_rt,
2349
2350        .get_rr_interval        = get_rr_interval_rt,
2351
2352        .prio_changed           = prio_changed_rt,
2353        .switched_to            = switched_to_rt,
2354
2355        .update_curr            = update_curr_rt,
2356};
2357
2358#ifdef CONFIG_SCHED_DEBUG
2359extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2360
2361void print_rt_stats(struct seq_file *m, int cpu)
2362{
2363        rt_rq_iter_t iter;
2364        struct rt_rq *rt_rq;
2365
2366        rcu_read_lock();
2367        for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2368                print_rt_rq(m, cpu, rt_rq);
2369        rcu_read_unlock();
2370}
2371#endif /* CONFIG_SCHED_DEBUG */
2372