linux/kernel/sched_fair.c
<<
>>
Prefs
   1/*
   2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
   3 *
   4 *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   5 *
   6 *  Interactivity improvements by Mike Galbraith
   7 *  (C) 2007 Mike Galbraith <efault@gmx.de>
   8 *
   9 *  Various enhancements by Dmitry Adamushko.
  10 *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  11 *
  12 *  Group scheduling enhancements by Srivatsa Vaddagiri
  13 *  Copyright IBM Corporation, 2007
  14 *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  15 *
  16 *  Scaled math optimizations by Thomas Gleixner
  17 *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  18 *
  19 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  20 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  21 */
  22
  23#include <linux/latencytop.h>
  24
  25/*
  26 * Targeted preemption latency for CPU-bound tasks:
  27 * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
  28 *
  29 * NOTE: this latency value is not the same as the concept of
  30 * 'timeslice length' - timeslices in CFS are of variable length
  31 * and have no persistent notion like in traditional, time-slice
  32 * based scheduling concepts.
  33 *
  34 * (to see the precise effective timeslice length of your workload,
  35 *  run vmstat and monitor the context-switches (cs) field)
  36 */
  37unsigned int sysctl_sched_latency = 5000000ULL;
  38
  39/*
  40 * Minimal preemption granularity for CPU-bound tasks:
  41 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  42 */
  43unsigned int sysctl_sched_min_granularity = 1000000ULL;
  44
  45/*
  46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  47 */
  48static unsigned int sched_nr_latency = 5;
  49
  50/*
  51 * After fork, child runs first. If set to 0 (default) then
  52 * parent will (try to) run first.
  53 */
  54unsigned int sysctl_sched_child_runs_first __read_mostly;
  55
  56/*
  57 * sys_sched_yield() compat mode
  58 *
  59 * This option switches the agressive yield implementation of the
  60 * old scheduler back on.
  61 */
  62unsigned int __read_mostly sysctl_sched_compat_yield;
  63
  64/*
  65 * SCHED_OTHER wake-up granularity.
  66 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  67 *
  68 * This option delays the preemption effects of decoupled workloads
  69 * and reduces their over-scheduling. Synchronous workloads will still
  70 * have immediate wakeup/sleep latencies.
  71 */
  72unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
  73
  74const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  75
  76static const struct sched_class fair_sched_class;
  77
  78/**************************************************************
  79 * CFS operations on generic schedulable entities:
  80 */
  81
  82#ifdef CONFIG_FAIR_GROUP_SCHED
  83
  84/* cpu runqueue to which this cfs_rq is attached */
  85static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  86{
  87        return cfs_rq->rq;
  88}
  89
  90/* An entity is a task if it doesn't "own" a runqueue */
  91#define entity_is_task(se)      (!se->my_q)
  92
  93static inline struct task_struct *task_of(struct sched_entity *se)
  94{
  95#ifdef CONFIG_SCHED_DEBUG
  96        WARN_ON_ONCE(!entity_is_task(se));
  97#endif
  98        return container_of(se, struct task_struct, se);
  99}
 100
 101/* Walk up scheduling entities hierarchy */
 102#define for_each_sched_entity(se) \
 103                for (; se; se = se->parent)
 104
 105static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
 106{
 107        return p->se.cfs_rq;
 108}
 109
 110/* runqueue on which this entity is (to be) queued */
 111static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
 112{
 113        return se->cfs_rq;
 114}
 115
 116/* runqueue "owned" by this group */
 117static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
 118{
 119        return grp->my_q;
 120}
 121
 122/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
 123 * another cpu ('this_cpu')
 124 */
 125static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
 126{
 127        return cfs_rq->tg->cfs_rq[this_cpu];
 128}
 129
 130/* Iterate thr' all leaf cfs_rq's on a runqueue */
 131#define for_each_leaf_cfs_rq(rq, cfs_rq) \
 132        list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 133
 134/* Do the two (enqueued) entities belong to the same group ? */
 135static inline int
 136is_same_group(struct sched_entity *se, struct sched_entity *pse)
 137{
 138        if (se->cfs_rq == pse->cfs_rq)
 139                return 1;
 140
 141        return 0;
 142}
 143
 144static inline struct sched_entity *parent_entity(struct sched_entity *se)
 145{
 146        return se->parent;
 147}
 148
 149/* return depth at which a sched entity is present in the hierarchy */
 150static inline int depth_se(struct sched_entity *se)
 151{
 152        int depth = 0;
 153
 154        for_each_sched_entity(se)
 155                depth++;
 156
 157        return depth;
 158}
 159
 160static void
 161find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 162{
 163        int se_depth, pse_depth;
 164
 165        /*
 166         * preemption test can be made between sibling entities who are in the
 167         * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
 168         * both tasks until we find their ancestors who are siblings of common
 169         * parent.
 170         */
 171
 172        /* First walk up until both entities are at same depth */
 173        se_depth = depth_se(*se);
 174        pse_depth = depth_se(*pse);
 175
 176        while (se_depth > pse_depth) {
 177                se_depth--;
 178                *se = parent_entity(*se);
 179        }
 180
 181        while (pse_depth > se_depth) {
 182                pse_depth--;
 183                *pse = parent_entity(*pse);
 184        }
 185
 186        while (!is_same_group(*se, *pse)) {
 187                *se = parent_entity(*se);
 188                *pse = parent_entity(*pse);
 189        }
 190}
 191
 192#else   /* !CONFIG_FAIR_GROUP_SCHED */
 193
 194static inline struct task_struct *task_of(struct sched_entity *se)
 195{
 196        return container_of(se, struct task_struct, se);
 197}
 198
 199static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
 200{
 201        return container_of(cfs_rq, struct rq, cfs);
 202}
 203
 204#define entity_is_task(se)      1
 205
 206#define for_each_sched_entity(se) \
 207                for (; se; se = NULL)
 208
 209static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
 210{
 211        return &task_rq(p)->cfs;
 212}
 213
 214static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
 215{
 216        struct task_struct *p = task_of(se);
 217        struct rq *rq = task_rq(p);
 218
 219        return &rq->cfs;
 220}
 221
 222/* runqueue "owned" by this group */
 223static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
 224{
 225        return NULL;
 226}
 227
 228static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
 229{
 230        return &cpu_rq(this_cpu)->cfs;
 231}
 232
 233#define for_each_leaf_cfs_rq(rq, cfs_rq) \
 234                for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
 235
 236static inline int
 237is_same_group(struct sched_entity *se, struct sched_entity *pse)
 238{
 239        return 1;
 240}
 241
 242static inline struct sched_entity *parent_entity(struct sched_entity *se)
 243{
 244        return NULL;
 245}
 246
 247static inline void
 248find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 249{
 250}
 251
 252#endif  /* CONFIG_FAIR_GROUP_SCHED */
 253
 254
 255/**************************************************************
 256 * Scheduling class tree data structure manipulation methods:
 257 */
 258
 259static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
 260{
 261        s64 delta = (s64)(vruntime - min_vruntime);
 262        if (delta > 0)
 263                min_vruntime = vruntime;
 264
 265        return min_vruntime;
 266}
 267
 268static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
 269{
 270        s64 delta = (s64)(vruntime - min_vruntime);
 271        if (delta < 0)
 272                min_vruntime = vruntime;
 273
 274        return min_vruntime;
 275}
 276
 277static inline int entity_before(struct sched_entity *a,
 278                                struct sched_entity *b)
 279{
 280        return (s64)(a->vruntime - b->vruntime) < 0;
 281}
 282
 283static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
 284{
 285        return se->vruntime - cfs_rq->min_vruntime;
 286}
 287
 288static void update_min_vruntime(struct cfs_rq *cfs_rq)
 289{
 290        u64 vruntime = cfs_rq->min_vruntime;
 291
 292        if (cfs_rq->curr)
 293                vruntime = cfs_rq->curr->vruntime;
 294
 295        if (cfs_rq->rb_leftmost) {
 296                struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
 297                                                   struct sched_entity,
 298                                                   run_node);
 299
 300                if (!cfs_rq->curr)
 301                        vruntime = se->vruntime;
 302                else
 303                        vruntime = min_vruntime(vruntime, se->vruntime);
 304        }
 305
 306        cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
 307}
 308
 309/*
 310 * Enqueue an entity into the rb-tree:
 311 */
 312static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 313{
 314        struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
 315        struct rb_node *parent = NULL;
 316        struct sched_entity *entry;
 317        s64 key = entity_key(cfs_rq, se);
 318        int leftmost = 1;
 319
 320        /*
 321         * Find the right place in the rbtree:
 322         */
 323        while (*link) {
 324                parent = *link;
 325                entry = rb_entry(parent, struct sched_entity, run_node);
 326                /*
 327                 * We dont care about collisions. Nodes with
 328                 * the same key stay together.
 329                 */
 330                if (key < entity_key(cfs_rq, entry)) {
 331                        link = &parent->rb_left;
 332                } else {
 333                        link = &parent->rb_right;
 334                        leftmost = 0;
 335                }
 336        }
 337
 338        /*
 339         * Maintain a cache of leftmost tree entries (it is frequently
 340         * used):
 341         */
 342        if (leftmost)
 343                cfs_rq->rb_leftmost = &se->run_node;
 344
 345        rb_link_node(&se->run_node, parent, link);
 346        rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
 347}
 348
 349static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 350{
 351        if (cfs_rq->rb_leftmost == &se->run_node) {
 352                struct rb_node *next_node;
 353
 354                next_node = rb_next(&se->run_node);
 355                cfs_rq->rb_leftmost = next_node;
 356        }
 357
 358        rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 359}
 360
 361static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
 362{
 363        struct rb_node *left = cfs_rq->rb_leftmost;
 364
 365        if (!left)
 366                return NULL;
 367
 368        return rb_entry(left, struct sched_entity, run_node);
 369}
 370
 371static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 372{
 373        struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
 374
 375        if (!last)
 376                return NULL;
 377
 378        return rb_entry(last, struct sched_entity, run_node);
 379}
 380
 381/**************************************************************
 382 * Scheduling class statistics methods:
 383 */
 384
 385#ifdef CONFIG_SCHED_DEBUG
 386int sched_nr_latency_handler(struct ctl_table *table, int write,
 387                void __user *buffer, size_t *lenp,
 388                loff_t *ppos)
 389{
 390        int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 391
 392        if (ret || !write)
 393                return ret;
 394
 395        sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
 396                                        sysctl_sched_min_granularity);
 397
 398        return 0;
 399}
 400#endif
 401
 402/*
 403 * delta /= w
 404 */
 405static inline unsigned long
 406calc_delta_fair(unsigned long delta, struct sched_entity *se)
 407{
 408        if (unlikely(se->load.weight != NICE_0_LOAD))
 409                delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
 410
 411        return delta;
 412}
 413
 414/*
 415 * The idea is to set a period in which each task runs once.
 416 *
 417 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
 418 * this period because otherwise the slices get too small.
 419 *
 420 * p = (nr <= nl) ? l : l*nr/nl
 421 */
 422static u64 __sched_period(unsigned long nr_running)
 423{
 424        u64 period = sysctl_sched_latency;
 425        unsigned long nr_latency = sched_nr_latency;
 426
 427        if (unlikely(nr_running > nr_latency)) {
 428                period = sysctl_sched_min_granularity;
 429                period *= nr_running;
 430        }
 431
 432        return period;
 433}
 434
 435/*
 436 * We calculate the wall-time slice from the period by taking a part
 437 * proportional to the weight.
 438 *
 439 * s = p*P[w/rw]
 440 */
 441static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 442{
 443        u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
 444
 445        for_each_sched_entity(se) {
 446                struct load_weight *load;
 447                struct load_weight lw;
 448
 449                cfs_rq = cfs_rq_of(se);
 450                load = &cfs_rq->load;
 451
 452                if (unlikely(!se->on_rq)) {
 453                        lw = cfs_rq->load;
 454
 455                        update_load_add(&lw, se->load.weight);
 456                        load = &lw;
 457                }
 458                slice = calc_delta_mine(slice, se->load.weight, load);
 459        }
 460        return slice;
 461}
 462
 463/*
 464 * We calculate the vruntime slice of a to be inserted task
 465 *
 466 * vs = s/w
 467 */
 468static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 469{
 470        return calc_delta_fair(sched_slice(cfs_rq, se), se);
 471}
 472
 473/*
 474 * Update the current task's runtime statistics. Skip current tasks that
 475 * are not in our scheduling class.
 476 */
 477static inline void
 478__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
 479              unsigned long delta_exec)
 480{
 481        unsigned long delta_exec_weighted;
 482
 483        schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
 484
 485        curr->sum_exec_runtime += delta_exec;
 486        schedstat_add(cfs_rq, exec_clock, delta_exec);
 487        delta_exec_weighted = calc_delta_fair(delta_exec, curr);
 488        curr->vruntime += delta_exec_weighted;
 489        update_min_vruntime(cfs_rq);
 490}
 491
 492static void update_curr(struct cfs_rq *cfs_rq)
 493{
 494        struct sched_entity *curr = cfs_rq->curr;
 495        u64 now = rq_of(cfs_rq)->clock;
 496        unsigned long delta_exec;
 497
 498        if (unlikely(!curr))
 499                return;
 500
 501        /*
 502         * Get the amount of time the current task was running
 503         * since the last time we changed load (this cannot
 504         * overflow on 32 bits):
 505         */
 506        delta_exec = (unsigned long)(now - curr->exec_start);
 507        if (!delta_exec)
 508                return;
 509
 510        __update_curr(cfs_rq, curr, delta_exec);
 511        curr->exec_start = now;
 512
 513        if (entity_is_task(curr)) {
 514                struct task_struct *curtask = task_of(curr);
 515
 516                trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
 517                cpuacct_charge(curtask, delta_exec);
 518                account_group_exec_runtime(curtask, delta_exec);
 519        }
 520}
 521
 522static inline void
 523update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 524{
 525        schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
 526}
 527
 528/*
 529 * Task is being enqueued - update stats:
 530 */
 531static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 532{
 533        /*
 534         * Are we enqueueing a waiting task? (for current tasks
 535         * a dequeue/enqueue event is a NOP)
 536         */
 537        if (se != cfs_rq->curr)
 538                update_stats_wait_start(cfs_rq, se);
 539}
 540
 541static void
 542update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 543{
 544        schedstat_set(se->wait_max, max(se->wait_max,
 545                        rq_of(cfs_rq)->clock - se->wait_start));
 546        schedstat_set(se->wait_count, se->wait_count + 1);
 547        schedstat_set(se->wait_sum, se->wait_sum +
 548                        rq_of(cfs_rq)->clock - se->wait_start);
 549#ifdef CONFIG_SCHEDSTATS
 550        if (entity_is_task(se)) {
 551                trace_sched_stat_wait(task_of(se),
 552                        rq_of(cfs_rq)->clock - se->wait_start);
 553        }
 554#endif
 555        schedstat_set(se->wait_start, 0);
 556}
 557
 558static inline void
 559update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 560{
 561        /*
 562         * Mark the end of the wait period if dequeueing a
 563         * waiting task:
 564         */
 565        if (se != cfs_rq->curr)
 566                update_stats_wait_end(cfs_rq, se);
 567}
 568
 569/*
 570 * We are picking a new current task - update its stats:
 571 */
 572static inline void
 573update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 574{
 575        /*
 576         * We are starting a new run period:
 577         */
 578        se->exec_start = rq_of(cfs_rq)->clock;
 579}
 580
 581/**************************************************
 582 * Scheduling class queueing methods:
 583 */
 584
 585#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
 586static void
 587add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
 588{
 589        cfs_rq->task_weight += weight;
 590}
 591#else
 592static inline void
 593add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
 594{
 595}
 596#endif
 597
 598static void
 599account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 600{
 601        update_load_add(&cfs_rq->load, se->load.weight);
 602        if (!parent_entity(se))
 603                inc_cpu_load(rq_of(cfs_rq), se->load.weight);
 604        if (entity_is_task(se)) {
 605                add_cfs_task_weight(cfs_rq, se->load.weight);
 606                list_add(&se->group_node, &cfs_rq->tasks);
 607        }
 608        cfs_rq->nr_running++;
 609        se->on_rq = 1;
 610}
 611
 612static void
 613account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 614{
 615        update_load_sub(&cfs_rq->load, se->load.weight);
 616        if (!parent_entity(se))
 617                dec_cpu_load(rq_of(cfs_rq), se->load.weight);
 618        if (entity_is_task(se)) {
 619                add_cfs_task_weight(cfs_rq, -se->load.weight);
 620                list_del_init(&se->group_node);
 621        }
 622        cfs_rq->nr_running--;
 623        se->on_rq = 0;
 624}
 625
 626static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 627{
 628#ifdef CONFIG_SCHEDSTATS
 629        struct task_struct *tsk = NULL;
 630
 631        if (entity_is_task(se))
 632                tsk = task_of(se);
 633
 634        if (se->sleep_start) {
 635                u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
 636
 637                if ((s64)delta < 0)
 638                        delta = 0;
 639
 640                if (unlikely(delta > se->sleep_max))
 641                        se->sleep_max = delta;
 642
 643                se->sleep_start = 0;
 644                se->sum_sleep_runtime += delta;
 645
 646                if (tsk) {
 647                        account_scheduler_latency(tsk, delta >> 10, 1);
 648                        trace_sched_stat_sleep(tsk, delta);
 649                }
 650        }
 651        if (se->block_start) {
 652                u64 delta = rq_of(cfs_rq)->clock - se->block_start;
 653
 654                if ((s64)delta < 0)
 655                        delta = 0;
 656
 657                if (unlikely(delta > se->block_max))
 658                        se->block_max = delta;
 659
 660                se->block_start = 0;
 661                se->sum_sleep_runtime += delta;
 662
 663                if (tsk) {
 664                        if (tsk->in_iowait) {
 665                                se->iowait_sum += delta;
 666                                se->iowait_count++;
 667                                trace_sched_stat_iowait(tsk, delta);
 668                        }
 669
 670                        /*
 671                         * Blocking time is in units of nanosecs, so shift by
 672                         * 20 to get a milliseconds-range estimation of the
 673                         * amount of time that the task spent sleeping:
 674                         */
 675                        if (unlikely(prof_on == SLEEP_PROFILING)) {
 676                                profile_hits(SLEEP_PROFILING,
 677                                                (void *)get_wchan(tsk),
 678                                                delta >> 20);
 679                        }
 680                        account_scheduler_latency(tsk, delta >> 10, 0);
 681                }
 682        }
 683#endif
 684}
 685
 686static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
 687{
 688#ifdef CONFIG_SCHED_DEBUG
 689        s64 d = se->vruntime - cfs_rq->min_vruntime;
 690
 691        if (d < 0)
 692                d = -d;
 693
 694        if (d > 3*sysctl_sched_latency)
 695                schedstat_inc(cfs_rq, nr_spread_over);
 696#endif
 697}
 698
 699static void
 700place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 701{
 702        u64 vruntime = cfs_rq->min_vruntime;
 703
 704        /*
 705         * The 'current' period is already promised to the current tasks,
 706         * however the extra weight of the new task will slow them down a
 707         * little, place the new task so that it fits in the slot that
 708         * stays open at the end.
 709         */
 710        if (initial && sched_feat(START_DEBIT))
 711                vruntime += sched_vslice(cfs_rq, se);
 712
 713        /* sleeps up to a single latency don't count. */
 714        if (!initial && sched_feat(FAIR_SLEEPERS)) {
 715                unsigned long thresh = sysctl_sched_latency;
 716
 717                /*
 718                 * Convert the sleeper threshold into virtual time.
 719                 * SCHED_IDLE is a special sub-class.  We care about
 720                 * fairness only relative to other SCHED_IDLE tasks,
 721                 * all of which have the same weight.
 722                 */
 723                if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
 724                                 task_of(se)->policy != SCHED_IDLE))
 725                        thresh = calc_delta_fair(thresh, se);
 726
 727                /*
 728                 * Halve their sleep time's effect, to allow
 729                 * for a gentler effect of sleepers:
 730                 */
 731                if (sched_feat(GENTLE_FAIR_SLEEPERS))
 732                        thresh >>= 1;
 733
 734                vruntime -= thresh;
 735        }
 736
 737        /* ensure we never gain time by being placed backwards. */
 738        vruntime = max_vruntime(se->vruntime, vruntime);
 739
 740        se->vruntime = vruntime;
 741}
 742
 743static void
 744enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
 745{
 746        /*
 747         * Update run-time statistics of the 'current'.
 748         */
 749        update_curr(cfs_rq);
 750        account_entity_enqueue(cfs_rq, se);
 751
 752        if (wakeup) {
 753                place_entity(cfs_rq, se, 0);
 754                enqueue_sleeper(cfs_rq, se);
 755        }
 756
 757        update_stats_enqueue(cfs_rq, se);
 758        check_spread(cfs_rq, se);
 759        if (se != cfs_rq->curr)
 760                __enqueue_entity(cfs_rq, se);
 761}
 762
 763static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 764{
 765        if (!se || cfs_rq->last == se)
 766                cfs_rq->last = NULL;
 767
 768        if (!se || cfs_rq->next == se)
 769                cfs_rq->next = NULL;
 770}
 771
 772static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 773{
 774        for_each_sched_entity(se)
 775                __clear_buddies(cfs_rq_of(se), se);
 776}
 777
 778static void
 779dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 780{
 781        /*
 782         * Update run-time statistics of the 'current'.
 783         */
 784        update_curr(cfs_rq);
 785
 786        update_stats_dequeue(cfs_rq, se);
 787        if (sleep) {
 788#ifdef CONFIG_SCHEDSTATS
 789                if (entity_is_task(se)) {
 790                        struct task_struct *tsk = task_of(se);
 791
 792                        if (tsk->state & TASK_INTERRUPTIBLE)
 793                                se->sleep_start = rq_of(cfs_rq)->clock;
 794                        if (tsk->state & TASK_UNINTERRUPTIBLE)
 795                                se->block_start = rq_of(cfs_rq)->clock;
 796                }
 797#endif
 798        }
 799
 800        clear_buddies(cfs_rq, se);
 801
 802        if (se != cfs_rq->curr)
 803                __dequeue_entity(cfs_rq, se);
 804        account_entity_dequeue(cfs_rq, se);
 805        update_min_vruntime(cfs_rq);
 806}
 807
 808/*
 809 * Preempt the current task with a newly woken task if needed:
 810 */
 811static void
 812check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 813{
 814        unsigned long ideal_runtime, delta_exec;
 815
 816        ideal_runtime = sched_slice(cfs_rq, curr);
 817        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
 818        if (delta_exec > ideal_runtime) {
 819                resched_task(rq_of(cfs_rq)->curr);
 820                /*
 821                 * The current task ran long enough, ensure it doesn't get
 822                 * re-elected due to buddy favours.
 823                 */
 824                clear_buddies(cfs_rq, curr);
 825                return;
 826        }
 827
 828        /*
 829         * Ensure that a task that missed wakeup preemption by a
 830         * narrow margin doesn't have to wait for a full slice.
 831         * This also mitigates buddy induced latencies under load.
 832         */
 833        if (!sched_feat(WAKEUP_PREEMPT))
 834                return;
 835
 836        if (delta_exec < sysctl_sched_min_granularity)
 837                return;
 838
 839        if (cfs_rq->nr_running > 1) {
 840                struct sched_entity *se = __pick_next_entity(cfs_rq);
 841                s64 delta = curr->vruntime - se->vruntime;
 842
 843                if (delta > ideal_runtime)
 844                        resched_task(rq_of(cfs_rq)->curr);
 845        }
 846}
 847
 848static void
 849set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 850{
 851        /* 'current' is not kept within the tree. */
 852        if (se->on_rq) {
 853                /*
 854                 * Any task has to be enqueued before it get to execute on
 855                 * a CPU. So account for the time it spent waiting on the
 856                 * runqueue.
 857                 */
 858                update_stats_wait_end(cfs_rq, se);
 859                __dequeue_entity(cfs_rq, se);
 860        }
 861
 862        update_stats_curr_start(cfs_rq, se);
 863        cfs_rq->curr = se;
 864#ifdef CONFIG_SCHEDSTATS
 865        /*
 866         * Track our maximum slice length, if the CPU's load is at
 867         * least twice that of our own weight (i.e. dont track it
 868         * when there are only lesser-weight tasks around):
 869         */
 870        if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
 871                se->slice_max = max(se->slice_max,
 872                        se->sum_exec_runtime - se->prev_sum_exec_runtime);
 873        }
 874#endif
 875        se->prev_sum_exec_runtime = se->sum_exec_runtime;
 876}
 877
 878static int
 879wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
 880
 881static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
 882{
 883        struct sched_entity *se = __pick_next_entity(cfs_rq);
 884        struct sched_entity *left = se;
 885
 886        if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
 887                se = cfs_rq->next;
 888
 889        /*
 890         * Prefer last buddy, try to return the CPU to a preempted task.
 891         */
 892        if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
 893                se = cfs_rq->last;
 894
 895        clear_buddies(cfs_rq, se);
 896
 897        return se;
 898}
 899
 900static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 901{
 902        /*
 903         * If still on the runqueue then deactivate_task()
 904         * was not called and update_curr() has to be done:
 905         */
 906        if (prev->on_rq)
 907                update_curr(cfs_rq);
 908
 909        check_spread(cfs_rq, prev);
 910        if (prev->on_rq) {
 911                update_stats_wait_start(cfs_rq, prev);
 912                /* Put 'current' back into the tree. */
 913                __enqueue_entity(cfs_rq, prev);
 914        }
 915        cfs_rq->curr = NULL;
 916}
 917
 918static void
 919entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
 920{
 921        /*
 922         * Update run-time statistics of the 'current'.
 923         */
 924        update_curr(cfs_rq);
 925
 926#ifdef CONFIG_SCHED_HRTICK
 927        /*
 928         * queued ticks are scheduled to match the slice, so don't bother
 929         * validating it and just reschedule.
 930         */
 931        if (queued) {
 932                resched_task(rq_of(cfs_rq)->curr);
 933                return;
 934        }
 935        /*
 936         * don't let the period tick interfere with the hrtick preemption
 937         */
 938        if (!sched_feat(DOUBLE_TICK) &&
 939                        hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
 940                return;
 941#endif
 942
 943        if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
 944                check_preempt_tick(cfs_rq, curr);
 945}
 946
 947/**************************************************
 948 * CFS operations on tasks:
 949 */
 950
 951#ifdef CONFIG_SCHED_HRTICK
 952static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 953{
 954        struct sched_entity *se = &p->se;
 955        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 956
 957        WARN_ON(task_rq(p) != rq);
 958
 959        if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
 960                u64 slice = sched_slice(cfs_rq, se);
 961                u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
 962                s64 delta = slice - ran;
 963
 964                if (delta < 0) {
 965                        if (rq->curr == p)
 966                                resched_task(p);
 967                        return;
 968                }
 969
 970                /*
 971                 * Don't schedule slices shorter than 10000ns, that just
 972                 * doesn't make sense. Rely on vruntime for fairness.
 973                 */
 974                if (rq->curr != p)
 975                        delta = max_t(s64, 10000LL, delta);
 976
 977                hrtick_start(rq, delta);
 978        }
 979}
 980
 981/*
 982 * called from enqueue/dequeue and updates the hrtick when the
 983 * current task is from our class and nr_running is low enough
 984 * to matter.
 985 */
 986static void hrtick_update(struct rq *rq)
 987{
 988        struct task_struct *curr = rq->curr;
 989
 990        if (curr->sched_class != &fair_sched_class)
 991                return;
 992
 993        if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
 994                hrtick_start_fair(rq, curr);
 995}
 996#else /* !CONFIG_SCHED_HRTICK */
 997static inline void
 998hrtick_start_fair(struct rq *rq, struct task_struct *p)
 999{
1000}
1001
1002static inline void hrtick_update(struct rq *rq)
1003{
1004}
1005#endif
1006
1007/*
1008 * The enqueue_task method is called before nr_running is
1009 * increased. Here we update the fair scheduling stats and
1010 * then put the task into the rbtree:
1011 */
1012static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
1013{
1014        struct cfs_rq *cfs_rq;
1015        struct sched_entity *se = &p->se;
1016
1017        for_each_sched_entity(se) {
1018                if (se->on_rq)
1019                        break;
1020                cfs_rq = cfs_rq_of(se);
1021                enqueue_entity(cfs_rq, se, wakeup);
1022                wakeup = 1;
1023        }
1024
1025        hrtick_update(rq);
1026}
1027
1028/*
1029 * The dequeue_task method is called before nr_running is
1030 * decreased. We remove the task from the rbtree and
1031 * update the fair scheduling stats:
1032 */
1033static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
1034{
1035        struct cfs_rq *cfs_rq;
1036        struct sched_entity *se = &p->se;
1037
1038        for_each_sched_entity(se) {
1039                cfs_rq = cfs_rq_of(se);
1040                dequeue_entity(cfs_rq, se, sleep);
1041                /* Don't dequeue parent if it has other entities besides us */
1042                if (cfs_rq->load.weight)
1043                        break;
1044                sleep = 1;
1045        }
1046
1047        hrtick_update(rq);
1048}
1049
1050/*
1051 * sched_yield() support is very simple - we dequeue and enqueue.
1052 *
1053 * If compat_yield is turned on then we requeue to the end of the tree.
1054 */
1055static void yield_task_fair(struct rq *rq)
1056{
1057        struct task_struct *curr = rq->curr;
1058        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1059        struct sched_entity *rightmost, *se = &curr->se;
1060
1061        /*
1062         * Are we the only task in the tree?
1063         */
1064        if (unlikely(cfs_rq->nr_running == 1))
1065                return;
1066
1067        clear_buddies(cfs_rq, se);
1068
1069        if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
1070                update_rq_clock(rq);
1071                /*
1072                 * Update run-time statistics of the 'current'.
1073                 */
1074                update_curr(cfs_rq);
1075
1076                return;
1077        }
1078        /*
1079         * Find the rightmost entry in the rbtree:
1080         */
1081        rightmost = __pick_last_entity(cfs_rq);
1082        /*
1083         * Already in the rightmost position?
1084         */
1085        if (unlikely(!rightmost || entity_before(rightmost, se)))
1086                return;
1087
1088        /*
1089         * Minimally necessary key value to be last in the tree:
1090         * Upon rescheduling, sched_class::put_prev_task() will place
1091         * 'current' within the tree based on its new key value.
1092         */
1093        se->vruntime = rightmost->vruntime + 1;
1094}
1095
1096#ifdef CONFIG_SMP
1097
1098#ifdef CONFIG_FAIR_GROUP_SCHED
1099/*
1100 * effective_load() calculates the load change as seen from the root_task_group
1101 *
1102 * Adding load to a group doesn't make a group heavier, but can cause movement
1103 * of group shares between cpus. Assuming the shares were perfectly aligned one
1104 * can calculate the shift in shares.
1105 *
1106 * The problem is that perfectly aligning the shares is rather expensive, hence
1107 * we try to avoid doing that too often - see update_shares(), which ratelimits
1108 * this change.
1109 *
1110 * We compensate this by not only taking the current delta into account, but
1111 * also considering the delta between when the shares were last adjusted and
1112 * now.
1113 *
1114 * We still saw a performance dip, some tracing learned us that between
1115 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1116 * significantly. Therefore try to bias the error in direction of failing
1117 * the affine wakeup.
1118 *
1119 */
1120static long effective_load(struct task_group *tg, int cpu,
1121                long wl, long wg)
1122{
1123        struct sched_entity *se = tg->se[cpu];
1124
1125        if (!tg->parent)
1126                return wl;
1127
1128        /*
1129         * By not taking the decrease of shares on the other cpu into
1130         * account our error leans towards reducing the affine wakeups.
1131         */
1132        if (!wl && sched_feat(ASYM_EFF_LOAD))
1133                return wl;
1134
1135        for_each_sched_entity(se) {
1136                long S, rw, s, a, b;
1137                long more_w;
1138
1139                /*
1140                 * Instead of using this increment, also add the difference
1141                 * between when the shares were last updated and now.
1142                 */
1143                more_w = se->my_q->load.weight - se->my_q->rq_weight;
1144                wl += more_w;
1145                wg += more_w;
1146
1147                S = se->my_q->tg->shares;
1148                s = se->my_q->shares;
1149                rw = se->my_q->rq_weight;
1150
1151                a = S*(rw + wl);
1152                b = S*rw + s*wg;
1153
1154                wl = s*(a-b);
1155
1156                if (likely(b))
1157                        wl /= b;
1158
1159                /*
1160                 * Assume the group is already running and will
1161                 * thus already be accounted for in the weight.
1162                 *
1163                 * That is, moving shares between CPUs, does not
1164                 * alter the group weight.
1165                 */
1166                wg = 0;
1167        }
1168
1169        return wl;
1170}
1171
1172#else
1173
1174static inline unsigned long effective_load(struct task_group *tg, int cpu,
1175                unsigned long wl, unsigned long wg)
1176{
1177        return wl;
1178}
1179
1180#endif
1181
1182static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1183{
1184        struct task_struct *curr = current;
1185        unsigned long this_load, load;
1186        int idx, this_cpu, prev_cpu;
1187        unsigned long tl_per_task;
1188        unsigned int imbalance;
1189        struct task_group *tg;
1190        unsigned long weight;
1191        int balanced;
1192
1193        idx       = sd->wake_idx;
1194        this_cpu  = smp_processor_id();
1195        prev_cpu  = task_cpu(p);
1196        load      = source_load(prev_cpu, idx);
1197        this_load = target_load(this_cpu, idx);
1198
1199        if (sync) {
1200               if (sched_feat(SYNC_LESS) &&
1201                   (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1202                    p->se.avg_overlap > sysctl_sched_migration_cost))
1203                       sync = 0;
1204        } else {
1205                if (sched_feat(SYNC_MORE) &&
1206                    (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1207                     p->se.avg_overlap < sysctl_sched_migration_cost))
1208                        sync = 1;
1209        }
1210
1211        /*
1212         * If sync wakeup then subtract the (maximum possible)
1213         * effect of the currently running task from the load
1214         * of the current CPU:
1215         */
1216        if (sync) {
1217                tg = task_group(current);
1218                weight = current->se.load.weight;
1219
1220                this_load += effective_load(tg, this_cpu, -weight, -weight);
1221                load += effective_load(tg, prev_cpu, 0, -weight);
1222        }
1223
1224        tg = task_group(p);
1225        weight = p->se.load.weight;
1226
1227        imbalance = 100 + (sd->imbalance_pct - 100) / 2;
1228
1229        /*
1230         * In low-load situations, where prev_cpu is idle and this_cpu is idle
1231         * due to the sync cause above having dropped this_load to 0, we'll
1232         * always have an imbalance, but there's really nothing you can do
1233         * about that, so that's good too.
1234         *
1235         * Otherwise check if either cpus are near enough in load to allow this
1236         * task to be woken on this_cpu.
1237         */
1238        balanced = !this_load ||
1239                100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
1240                imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1241
1242        /*
1243         * If the currently running task will sleep within
1244         * a reasonable amount of time then attract this newly
1245         * woken task:
1246         */
1247        if (sync && balanced)
1248                return 1;
1249
1250        schedstat_inc(p, se.nr_wakeups_affine_attempts);
1251        tl_per_task = cpu_avg_load_per_task(this_cpu);
1252
1253        if (balanced ||
1254            (this_load <= load &&
1255             this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
1256                /*
1257                 * This domain has SD_WAKE_AFFINE and
1258                 * p is cache cold in this domain, and
1259                 * there is no bad imbalance.
1260                 */
1261                schedstat_inc(sd, ttwu_move_affine);
1262                schedstat_inc(p, se.nr_wakeups_affine);
1263
1264                return 1;
1265        }
1266        return 0;
1267}
1268
1269/*
1270 * find_idlest_group finds and returns the least busy CPU group within the
1271 * domain.
1272 */
1273static struct sched_group *
1274find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1275                  int this_cpu, int load_idx)
1276{
1277        struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1278        unsigned long min_load = ULONG_MAX, this_load = 0;
1279        int imbalance = 100 + (sd->imbalance_pct-100)/2;
1280
1281        do {
1282                unsigned long load, avg_load;
1283                int local_group;
1284                int i;
1285
1286                /* Skip over this group if it has no CPUs allowed */
1287                if (!cpumask_intersects(sched_group_cpus(group),
1288                                        &p->cpus_allowed))
1289                        continue;
1290
1291                local_group = cpumask_test_cpu(this_cpu,
1292                                               sched_group_cpus(group));
1293
1294                /* Tally up the load of all CPUs in the group */
1295                avg_load = 0;
1296
1297                for_each_cpu(i, sched_group_cpus(group)) {
1298                        /* Bias balancing toward cpus of our domain */
1299                        if (local_group)
1300                                load = source_load(i, load_idx);
1301                        else
1302                                load = target_load(i, load_idx);
1303
1304                        avg_load += load;
1305                }
1306
1307                /* Adjust by relative CPU power of the group */
1308                avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1309
1310                if (local_group) {
1311                        this_load = avg_load;
1312                        this = group;
1313                } else if (avg_load < min_load) {
1314                        min_load = avg_load;
1315                        idlest = group;
1316                }
1317        } while (group = group->next, group != sd->groups);
1318
1319        if (!idlest || 100*this_load < imbalance*min_load)
1320                return NULL;
1321        return idlest;
1322}
1323
1324/*
1325 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1326 */
1327static int
1328find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1329{
1330        unsigned long load, min_load = ULONG_MAX;
1331        int idlest = -1;
1332        int i;
1333
1334        /* Traverse only the allowed CPUs */
1335        for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1336                load = weighted_cpuload(i);
1337
1338                if (load < min_load || (load == min_load && i == this_cpu)) {
1339                        min_load = load;
1340                        idlest = i;
1341                }
1342        }
1343
1344        return idlest;
1345}
1346
1347/*
1348 * sched_balance_self: balance the current task (running on cpu) in domains
1349 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1350 * SD_BALANCE_EXEC.
1351 *
1352 * Balance, ie. select the least loaded group.
1353 *
1354 * Returns the target CPU number, or the same CPU if no balancing is needed.
1355 *
1356 * preempt must be disabled.
1357 */
1358static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1359{
1360        struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1361        int cpu = smp_processor_id();
1362        int prev_cpu = task_cpu(p);
1363        int new_cpu = cpu;
1364        int want_affine = 0;
1365        int want_sd = 1;
1366        int sync = wake_flags & WF_SYNC;
1367
1368        if (sd_flag & SD_BALANCE_WAKE) {
1369                if (sched_feat(AFFINE_WAKEUPS) &&
1370                    cpumask_test_cpu(cpu, &p->cpus_allowed))
1371                        want_affine = 1;
1372                new_cpu = prev_cpu;
1373        }
1374
1375        rcu_read_lock();
1376        for_each_domain(cpu, tmp) {
1377                /*
1378                 * If power savings logic is enabled for a domain, see if we
1379                 * are not overloaded, if so, don't balance wider.
1380                 */
1381                if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
1382                        unsigned long power = 0;
1383                        unsigned long nr_running = 0;
1384                        unsigned long capacity;
1385                        int i;
1386
1387                        for_each_cpu(i, sched_domain_span(tmp)) {
1388                                power += power_of(i);
1389                                nr_running += cpu_rq(i)->cfs.nr_running;
1390                        }
1391
1392                        capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1393
1394                        if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1395                                nr_running /= 2;
1396
1397                        if (nr_running < capacity)
1398                                want_sd = 0;
1399                }
1400
1401                if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1402                    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1403
1404                        affine_sd = tmp;
1405                        want_affine = 0;
1406                }
1407
1408                if (!want_sd && !want_affine)
1409                        break;
1410
1411                if (!(tmp->flags & sd_flag))
1412                        continue;
1413
1414                if (want_sd)
1415                        sd = tmp;
1416        }
1417
1418        if (sched_feat(LB_SHARES_UPDATE)) {
1419                /*
1420                 * Pick the largest domain to update shares over
1421                 */
1422                tmp = sd;
1423                if (affine_sd && (!tmp ||
1424                                  cpumask_weight(sched_domain_span(affine_sd)) >
1425                                  cpumask_weight(sched_domain_span(sd))))
1426                        tmp = affine_sd;
1427
1428                if (tmp)
1429                        update_shares(tmp);
1430        }
1431
1432        if (affine_sd && wake_affine(affine_sd, p, sync)) {
1433                new_cpu = cpu;
1434                goto out;
1435        }
1436
1437        while (sd) {
1438                int load_idx = sd->forkexec_idx;
1439                struct sched_group *group;
1440                int weight;
1441
1442                if (!(sd->flags & sd_flag)) {
1443                        sd = sd->child;
1444                        continue;
1445                }
1446
1447                if (sd_flag & SD_BALANCE_WAKE)
1448                        load_idx = sd->wake_idx;
1449
1450                group = find_idlest_group(sd, p, cpu, load_idx);
1451                if (!group) {
1452                        sd = sd->child;
1453                        continue;
1454                }
1455
1456                new_cpu = find_idlest_cpu(group, p, cpu);
1457                if (new_cpu == -1 || new_cpu == cpu) {
1458                        /* Now try balancing at a lower domain level of cpu */
1459                        sd = sd->child;
1460                        continue;
1461                }
1462
1463                /* Now try balancing at a lower domain level of new_cpu */
1464                cpu = new_cpu;
1465                weight = cpumask_weight(sched_domain_span(sd));
1466                sd = NULL;
1467                for_each_domain(cpu, tmp) {
1468                        if (weight <= cpumask_weight(sched_domain_span(tmp)))
1469                                break;
1470                        if (tmp->flags & sd_flag)
1471                                sd = tmp;
1472                }
1473                /* while loop will break here if sd == NULL */
1474        }
1475
1476out:
1477        rcu_read_unlock();
1478        return new_cpu;
1479}
1480#endif /* CONFIG_SMP */
1481
1482/*
1483 * Adaptive granularity
1484 *
1485 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1486 * with the limit of wakeup_gran -- when it never does a wakeup.
1487 *
1488 * So the smaller avg_wakeup is the faster we want this task to preempt,
1489 * but we don't want to treat the preemptee unfairly and therefore allow it
1490 * to run for at least the amount of time we'd like to run.
1491 *
1492 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1493 *
1494 * NOTE: we use *nr_running to scale with load, this nicely matches the
1495 *       degrading latency on load.
1496 */
1497static unsigned long
1498adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1499{
1500        u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1501        u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1502        u64 gran = 0;
1503
1504        if (this_run < expected_wakeup)
1505                gran = expected_wakeup - this_run;
1506
1507        return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1508}
1509
1510static unsigned long
1511wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1512{
1513        unsigned long gran = sysctl_sched_wakeup_granularity;
1514
1515        if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1516                gran = adaptive_gran(curr, se);
1517
1518        /*
1519         * Since its curr running now, convert the gran from real-time
1520         * to virtual-time in his units.
1521         */
1522        if (sched_feat(ASYM_GRAN)) {
1523                /*
1524                 * By using 'se' instead of 'curr' we penalize light tasks, so
1525                 * they get preempted easier. That is, if 'se' < 'curr' then
1526                 * the resulting gran will be larger, therefore penalizing the
1527                 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1528                 * be smaller, again penalizing the lighter task.
1529                 *
1530                 * This is especially important for buddies when the leftmost
1531                 * task is higher priority than the buddy.
1532                 */
1533                if (unlikely(se->load.weight != NICE_0_LOAD))
1534                        gran = calc_delta_fair(gran, se);
1535        } else {
1536                if (unlikely(curr->load.weight != NICE_0_LOAD))
1537                        gran = calc_delta_fair(gran, curr);
1538        }
1539
1540        return gran;
1541}
1542
1543/*
1544 * Should 'se' preempt 'curr'.
1545 *
1546 *             |s1
1547 *        |s2
1548 *   |s3
1549 *         g
1550 *      |<--->|c
1551 *
1552 *  w(c, s1) = -1
1553 *  w(c, s2) =  0
1554 *  w(c, s3) =  1
1555 *
1556 */
1557static int
1558wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1559{
1560        s64 gran, vdiff = curr->vruntime - se->vruntime;
1561
1562        if (vdiff <= 0)
1563                return -1;
1564
1565        gran = wakeup_gran(curr, se);
1566        if (vdiff > gran)
1567                return 1;
1568
1569        return 0;
1570}
1571
1572static void set_last_buddy(struct sched_entity *se)
1573{
1574        if (likely(task_of(se)->policy != SCHED_IDLE)) {
1575                for_each_sched_entity(se)
1576                        cfs_rq_of(se)->last = se;
1577        }
1578}
1579
1580static void set_next_buddy(struct sched_entity *se)
1581{
1582        if (likely(task_of(se)->policy != SCHED_IDLE)) {
1583                for_each_sched_entity(se)
1584                        cfs_rq_of(se)->next = se;
1585        }
1586}
1587
1588/*
1589 * Preempt the current task with a newly woken task if needed:
1590 */
1591static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1592{
1593        struct task_struct *curr = rq->curr;
1594        struct sched_entity *se = &curr->se, *pse = &p->se;
1595        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1596        int sync = wake_flags & WF_SYNC;
1597        int scale = cfs_rq->nr_running >= sched_nr_latency;
1598
1599        update_curr(cfs_rq);
1600
1601        if (unlikely(rt_prio(p->prio))) {
1602                resched_task(curr);
1603                return;
1604        }
1605
1606        if (unlikely(p->sched_class != &fair_sched_class))
1607                return;
1608
1609        if (unlikely(se == pse))
1610                return;
1611
1612        if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1613                set_next_buddy(pse);
1614
1615        /*
1616         * We can come here with TIF_NEED_RESCHED already set from new task
1617         * wake up path.
1618         */
1619        if (test_tsk_need_resched(curr))
1620                return;
1621
1622        /*
1623         * Batch and idle tasks do not preempt (their preemption is driven by
1624         * the tick):
1625         */
1626        if (unlikely(p->policy != SCHED_NORMAL))
1627                return;
1628
1629        /* Idle tasks are by definition preempted by everybody. */
1630        if (unlikely(curr->policy == SCHED_IDLE)) {
1631                resched_task(curr);
1632                return;
1633        }
1634
1635        if ((sched_feat(WAKEUP_SYNC) && sync) ||
1636            (sched_feat(WAKEUP_OVERLAP) &&
1637             (se->avg_overlap < sysctl_sched_migration_cost &&
1638              pse->avg_overlap < sysctl_sched_migration_cost))) {
1639                resched_task(curr);
1640                return;
1641        }
1642
1643        if (sched_feat(WAKEUP_RUNNING)) {
1644                if (pse->avg_running < se->avg_running) {
1645                        set_next_buddy(pse);
1646                        resched_task(curr);
1647                        return;
1648                }
1649        }
1650
1651        if (!sched_feat(WAKEUP_PREEMPT))
1652                return;
1653
1654        find_matching_se(&se, &pse);
1655
1656        BUG_ON(!pse);
1657
1658        if (wakeup_preempt_entity(se, pse) == 1) {
1659                resched_task(curr);
1660                /*
1661                 * Only set the backward buddy when the current task is still
1662                 * on the rq. This can happen when a wakeup gets interleaved
1663                 * with schedule on the ->pre_schedule() or idle_balance()
1664                 * point, either of which can * drop the rq lock.
1665                 *
1666                 * Also, during early boot the idle thread is in the fair class,
1667                 * for obvious reasons its a bad idea to schedule back to it.
1668                 */
1669                if (unlikely(!se->on_rq || curr == rq->idle))
1670                        return;
1671                if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1672                        set_last_buddy(se);
1673        }
1674}
1675
1676static struct task_struct *pick_next_task_fair(struct rq *rq)
1677{
1678        struct task_struct *p;
1679        struct cfs_rq *cfs_rq = &rq->cfs;
1680        struct sched_entity *se;
1681
1682        if (unlikely(!cfs_rq->nr_running))
1683                return NULL;
1684
1685        do {
1686                se = pick_next_entity(cfs_rq);
1687                set_next_entity(cfs_rq, se);
1688                cfs_rq = group_cfs_rq(se);
1689        } while (cfs_rq);
1690
1691        p = task_of(se);
1692        hrtick_start_fair(rq, p);
1693
1694        return p;
1695}
1696
1697/*
1698 * Account for a descheduled task:
1699 */
1700static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1701{
1702        struct sched_entity *se = &prev->se;
1703        struct cfs_rq *cfs_rq;
1704
1705        for_each_sched_entity(se) {
1706                cfs_rq = cfs_rq_of(se);
1707                put_prev_entity(cfs_rq, se);
1708        }
1709}
1710
1711#ifdef CONFIG_SMP
1712/**************************************************
1713 * Fair scheduling class load-balancing methods:
1714 */
1715
1716/*
1717 * Load-balancing iterator. Note: while the runqueue stays locked
1718 * during the whole iteration, the current task might be
1719 * dequeued so the iterator has to be dequeue-safe. Here we
1720 * achieve that by always pre-iterating before returning
1721 * the current task:
1722 */
1723static struct task_struct *
1724__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1725{
1726        struct task_struct *p = NULL;
1727        struct sched_entity *se;
1728
1729        if (next == &cfs_rq->tasks)
1730                return NULL;
1731
1732        se = list_entry(next, struct sched_entity, group_node);
1733        p = task_of(se);
1734        cfs_rq->balance_iterator = next->next;
1735
1736        return p;
1737}
1738
1739static struct task_struct *load_balance_start_fair(void *arg)
1740{
1741        struct cfs_rq *cfs_rq = arg;
1742
1743        return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1744}
1745
1746static struct task_struct *load_balance_next_fair(void *arg)
1747{
1748        struct cfs_rq *cfs_rq = arg;
1749
1750        return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1751}
1752
1753static unsigned long
1754__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1755                unsigned long max_load_move, struct sched_domain *sd,
1756                enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1757                struct cfs_rq *cfs_rq)
1758{
1759        struct rq_iterator cfs_rq_iterator;
1760
1761        cfs_rq_iterator.start = load_balance_start_fair;
1762        cfs_rq_iterator.next = load_balance_next_fair;
1763        cfs_rq_iterator.arg = cfs_rq;
1764
1765        return balance_tasks(this_rq, this_cpu, busiest,
1766                        max_load_move, sd, idle, all_pinned,
1767                        this_best_prio, &cfs_rq_iterator);
1768}
1769
1770#ifdef CONFIG_FAIR_GROUP_SCHED
1771static unsigned long
1772load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1773                  unsigned long max_load_move,
1774                  struct sched_domain *sd, enum cpu_idle_type idle,
1775                  int *all_pinned, int *this_best_prio)
1776{
1777        long rem_load_move = max_load_move;
1778        int busiest_cpu = cpu_of(busiest);
1779        struct task_group *tg;
1780
1781        rcu_read_lock();
1782        update_h_load(busiest_cpu);
1783
1784        list_for_each_entry_rcu(tg, &task_groups, list) {
1785                struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1786                unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1787                unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1788                u64 rem_load, moved_load;
1789
1790                /*
1791                 * empty group
1792                 */
1793                if (!busiest_cfs_rq->task_weight)
1794                        continue;
1795
1796                rem_load = (u64)rem_load_move * busiest_weight;
1797                rem_load = div_u64(rem_load, busiest_h_load + 1);
1798
1799                moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1800                                rem_load, sd, idle, all_pinned, this_best_prio,
1801                                tg->cfs_rq[busiest_cpu]);
1802
1803                if (!moved_load)
1804                        continue;
1805
1806                moved_load *= busiest_h_load;
1807                moved_load = div_u64(moved_load, busiest_weight + 1);
1808
1809                rem_load_move -= moved_load;
1810                if (rem_load_move < 0)
1811                        break;
1812        }
1813        rcu_read_unlock();
1814
1815        return max_load_move - rem_load_move;
1816}
1817#else
1818static unsigned long
1819load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1820                  unsigned long max_load_move,
1821                  struct sched_domain *sd, enum cpu_idle_type idle,
1822                  int *all_pinned, int *this_best_prio)
1823{
1824        return __load_balance_fair(this_rq, this_cpu, busiest,
1825                        max_load_move, sd, idle, all_pinned,
1826                        this_best_prio, &busiest->cfs);
1827}
1828#endif
1829
1830static int
1831move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1832                   struct sched_domain *sd, enum cpu_idle_type idle)
1833{
1834        struct cfs_rq *busy_cfs_rq;
1835        struct rq_iterator cfs_rq_iterator;
1836
1837        cfs_rq_iterator.start = load_balance_start_fair;
1838        cfs_rq_iterator.next = load_balance_next_fair;
1839
1840        for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1841                /*
1842                 * pass busy_cfs_rq argument into
1843                 * load_balance_[start|next]_fair iterators
1844                 */
1845                cfs_rq_iterator.arg = busy_cfs_rq;
1846                if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1847                                       &cfs_rq_iterator))
1848                    return 1;
1849        }
1850
1851        return 0;
1852}
1853#endif /* CONFIG_SMP */
1854
1855/*
1856 * scheduler tick hitting a task of our scheduling class:
1857 */
1858static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1859{
1860        struct cfs_rq *cfs_rq;
1861        struct sched_entity *se = &curr->se;
1862
1863        for_each_sched_entity(se) {
1864                cfs_rq = cfs_rq_of(se);
1865                entity_tick(cfs_rq, se, queued);
1866        }
1867}
1868
1869/*
1870 * Share the fairness runtime between parent and child, thus the
1871 * total amount of pressure for CPU stays equal - new tasks
1872 * get a chance to run but frequent forkers are not allowed to
1873 * monopolize the CPU. Note: the parent runqueue is locked,
1874 * the child is not running yet.
1875 */
1876static void task_new_fair(struct rq *rq, struct task_struct *p)
1877{
1878        struct cfs_rq *cfs_rq = task_cfs_rq(p);
1879        struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1880        int this_cpu = smp_processor_id();
1881
1882        sched_info_queued(p);
1883
1884        update_curr(cfs_rq);
1885        if (curr)
1886                se->vruntime = curr->vruntime;
1887        place_entity(cfs_rq, se, 1);
1888
1889        /* 'curr' will be NULL if the child belongs to a different group */
1890        if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1891                        curr && entity_before(curr, se)) {
1892                /*
1893                 * Upon rescheduling, sched_class::put_prev_task() will place
1894                 * 'current' within the tree based on its new key value.
1895                 */
1896                swap(curr->vruntime, se->vruntime);
1897                resched_task(rq->curr);
1898        }
1899
1900        enqueue_task_fair(rq, p, 0);
1901}
1902
1903/*
1904 * Priority of the task has changed. Check to see if we preempt
1905 * the current task.
1906 */
1907static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1908                              int oldprio, int running)
1909{
1910        /*
1911         * Reschedule if we are currently running on this runqueue and
1912         * our priority decreased, or if we are not currently running on
1913         * this runqueue and our priority is higher than the current's
1914         */
1915        if (running) {
1916                if (p->prio > oldprio)
1917                        resched_task(rq->curr);
1918        } else
1919                check_preempt_curr(rq, p, 0);
1920}
1921
1922/*
1923 * We switched to the sched_fair class.
1924 */
1925static void switched_to_fair(struct rq *rq, struct task_struct *p,
1926                             int running)
1927{
1928        /*
1929         * We were most likely switched from sched_rt, so
1930         * kick off the schedule if running, otherwise just see
1931         * if we can still preempt the current task.
1932         */
1933        if (running)
1934                resched_task(rq->curr);
1935        else
1936                check_preempt_curr(rq, p, 0);
1937}
1938
1939/* Account for a task changing its policy or group.
1940 *
1941 * This routine is mostly called to set cfs_rq->curr field when a task
1942 * migrates between groups/classes.
1943 */
1944static void set_curr_task_fair(struct rq *rq)
1945{
1946        struct sched_entity *se = &rq->curr->se;
1947
1948        for_each_sched_entity(se)
1949                set_next_entity(cfs_rq_of(se), se);
1950}
1951
1952#ifdef CONFIG_FAIR_GROUP_SCHED
1953static void moved_group_fair(struct task_struct *p)
1954{
1955        struct cfs_rq *cfs_rq = task_cfs_rq(p);
1956
1957        update_curr(cfs_rq);
1958        place_entity(cfs_rq, &p->se, 1);
1959}
1960#endif
1961
1962unsigned int get_rr_interval_fair(struct task_struct *task)
1963{
1964        struct sched_entity *se = &task->se;
1965        unsigned long flags;
1966        struct rq *rq;
1967        unsigned int rr_interval = 0;
1968
1969        /*
1970         * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
1971         * idle runqueue:
1972         */
1973        rq = task_rq_lock(task, &flags);
1974        if (rq->cfs.load.weight)
1975                rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
1976        task_rq_unlock(rq, &flags);
1977
1978        return rr_interval;
1979}
1980
1981/*
1982 * All the scheduling class methods:
1983 */
1984static const struct sched_class fair_sched_class = {
1985        .next                   = &idle_sched_class,
1986        .enqueue_task           = enqueue_task_fair,
1987        .dequeue_task           = dequeue_task_fair,
1988        .yield_task             = yield_task_fair,
1989
1990        .check_preempt_curr     = check_preempt_wakeup,
1991
1992        .pick_next_task         = pick_next_task_fair,
1993        .put_prev_task          = put_prev_task_fair,
1994
1995#ifdef CONFIG_SMP
1996        .select_task_rq         = select_task_rq_fair,
1997
1998        .load_balance           = load_balance_fair,
1999        .move_one_task          = move_one_task_fair,
2000#endif
2001
2002        .set_curr_task          = set_curr_task_fair,
2003        .task_tick              = task_tick_fair,
2004        .task_new               = task_new_fair,
2005
2006        .prio_changed           = prio_changed_fair,
2007        .switched_to            = switched_to_fair,
2008
2009        .get_rr_interval        = get_rr_interval_fair,
2010
2011#ifdef CONFIG_FAIR_GROUP_SCHED
2012        .moved_group            = moved_group_fair,
2013#endif
2014};
2015
2016#ifdef CONFIG_SCHED_DEBUG
2017static void print_cfs_stats(struct seq_file *m, int cpu)
2018{
2019        struct cfs_rq *cfs_rq;
2020
2021        rcu_read_lock();
2022        for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
2023                print_cfs_rq(m, cpu, cfs_rq);
2024        rcu_read_unlock();
2025}
2026#endif
2027