linux/block/cfq-iosched.c
<<
>>
Prefs
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/blkdev.h>
  12#include <linux/elevator.h>
  13#include <linux/jiffies.h>
  14#include <linux/rbtree.h>
  15#include <linux/ioprio.h>
  16#include <linux/blktrace_api.h>
  17#include <linux/blk-cgroup.h>
  18#include "blk.h"
  19
  20/*
  21 * tunables
  22 */
  23/* max queue in one round of service */
  24static const int cfq_quantum = 8;
  25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  26/* maximum backwards seek, in KiB */
  27static const int cfq_back_max = 16 * 1024;
  28/* penalty of a backwards seek */
  29static const int cfq_back_penalty = 2;
  30static const int cfq_slice_sync = HZ / 10;
  31static int cfq_slice_async = HZ / 25;
  32static const int cfq_slice_async_rq = 2;
  33static int cfq_slice_idle = HZ / 125;
  34static int cfq_group_idle = HZ / 125;
  35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  36static const int cfq_hist_divisor = 4;
  37
  38/*
  39 * offset from end of service tree
  40 */
  41#define CFQ_IDLE_DELAY          (HZ / 5)
  42
  43/*
  44 * below this threshold, we consider thinktime immediate
  45 */
  46#define CFQ_MIN_TT              (2)
  47
  48#define CFQ_SLICE_SCALE         (5)
  49#define CFQ_HW_QUEUE_MIN        (5)
  50#define CFQ_SERVICE_SHIFT       12
  51
  52#define CFQQ_SEEK_THR           (sector_t)(8 * 100)
  53#define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
  54#define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
  55#define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
  56
  57#define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
  58#define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
  59#define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
  60
  61static struct kmem_cache *cfq_pool;
  62
  63#define CFQ_PRIO_LISTS          IOPRIO_BE_NR
  64#define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  65#define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  66
  67#define sample_valid(samples)   ((samples) > 80)
  68#define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
  69
  70/* blkio-related constants */
  71#define CFQ_WEIGHT_LEGACY_MIN   10
  72#define CFQ_WEIGHT_LEGACY_DFL   500
  73#define CFQ_WEIGHT_LEGACY_MAX   1000
  74
  75struct cfq_ttime {
  76        unsigned long last_end_request;
  77
  78        unsigned long ttime_total;
  79        unsigned long ttime_samples;
  80        unsigned long ttime_mean;
  81};
  82
  83/*
  84 * Most of our rbtree usage is for sorting with min extraction, so
  85 * if we cache the leftmost node we don't have to walk down the tree
  86 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  87 * move this into the elevator for the rq sorting as well.
  88 */
  89struct cfq_rb_root {
  90        struct rb_root rb;
  91        struct rb_node *left;
  92        unsigned count;
  93        u64 min_vdisktime;
  94        struct cfq_ttime ttime;
  95};
  96#define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
  97                        .ttime = {.last_end_request = jiffies,},}
  98
  99/*
 100 * Per process-grouping structure
 101 */
 102struct cfq_queue {
 103        /* reference count */
 104        int ref;
 105        /* various state flags, see below */
 106        unsigned int flags;
 107        /* parent cfq_data */
 108        struct cfq_data *cfqd;
 109        /* service_tree member */
 110        struct rb_node rb_node;
 111        /* service_tree key */
 112        unsigned long rb_key;
 113        /* prio tree member */
 114        struct rb_node p_node;
 115        /* prio tree root we belong to, if any */
 116        struct rb_root *p_root;
 117        /* sorted list of pending requests */
 118        struct rb_root sort_list;
 119        /* if fifo isn't expired, next request to serve */
 120        struct request *next_rq;
 121        /* requests queued in sort_list */
 122        int queued[2];
 123        /* currently allocated requests */
 124        int allocated[2];
 125        /* fifo list of requests in sort_list */
 126        struct list_head fifo;
 127
 128        /* time when queue got scheduled in to dispatch first request. */
 129        unsigned long dispatch_start;
 130        unsigned int allocated_slice;
 131        unsigned int slice_dispatch;
 132        /* time when first request from queue completed and slice started. */
 133        unsigned long slice_start;
 134        unsigned long slice_end;
 135        long slice_resid;
 136
 137        /* pending priority requests */
 138        int prio_pending;
 139        /* number of requests that are on the dispatch list or inside driver */
 140        int dispatched;
 141
 142        /* io prio of this group */
 143        unsigned short ioprio, org_ioprio;
 144        unsigned short ioprio_class;
 145
 146        pid_t pid;
 147
 148        u32 seek_history;
 149        sector_t last_request_pos;
 150
 151        struct cfq_rb_root *service_tree;
 152        struct cfq_queue *new_cfqq;
 153        struct cfq_group *cfqg;
 154        /* Number of sectors dispatched from queue in single dispatch round */
 155        unsigned long nr_sectors;
 156};
 157
 158/*
 159 * First index in the service_trees.
 160 * IDLE is handled separately, so it has negative index
 161 */
 162enum wl_class_t {
 163        BE_WORKLOAD = 0,
 164        RT_WORKLOAD = 1,
 165        IDLE_WORKLOAD = 2,
 166        CFQ_PRIO_NR,
 167};
 168
 169/*
 170 * Second index in the service_trees.
 171 */
 172enum wl_type_t {
 173        ASYNC_WORKLOAD = 0,
 174        SYNC_NOIDLE_WORKLOAD = 1,
 175        SYNC_WORKLOAD = 2
 176};
 177
 178struct cfqg_stats {
 179#ifdef CONFIG_CFQ_GROUP_IOSCHED
 180        /* number of ios merged */
 181        struct blkg_rwstat              merged;
 182        /* total time spent on device in ns, may not be accurate w/ queueing */
 183        struct blkg_rwstat              service_time;
 184        /* total time spent waiting in scheduler queue in ns */
 185        struct blkg_rwstat              wait_time;
 186        /* number of IOs queued up */
 187        struct blkg_rwstat              queued;
 188        /* total disk time and nr sectors dispatched by this group */
 189        struct blkg_stat                time;
 190#ifdef CONFIG_DEBUG_BLK_CGROUP
 191        /* time not charged to this cgroup */
 192        struct blkg_stat                unaccounted_time;
 193        /* sum of number of ios queued across all samples */
 194        struct blkg_stat                avg_queue_size_sum;
 195        /* count of samples taken for average */
 196        struct blkg_stat                avg_queue_size_samples;
 197        /* how many times this group has been removed from service tree */
 198        struct blkg_stat                dequeue;
 199        /* total time spent waiting for it to be assigned a timeslice. */
 200        struct blkg_stat                group_wait_time;
 201        /* time spent idling for this blkcg_gq */
 202        struct blkg_stat                idle_time;
 203        /* total time with empty current active q with other requests queued */
 204        struct blkg_stat                empty_time;
 205        /* fields after this shouldn't be cleared on stat reset */
 206        uint64_t                        start_group_wait_time;
 207        uint64_t                        start_idle_time;
 208        uint64_t                        start_empty_time;
 209        uint16_t                        flags;
 210#endif  /* CONFIG_DEBUG_BLK_CGROUP */
 211#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 212};
 213
 214/* Per-cgroup data */
 215struct cfq_group_data {
 216        /* must be the first member */
 217        struct blkcg_policy_data cpd;
 218
 219        unsigned int weight;
 220        unsigned int leaf_weight;
 221};
 222
 223/* This is per cgroup per device grouping structure */
 224struct cfq_group {
 225        /* must be the first member */
 226        struct blkg_policy_data pd;
 227
 228        /* group service_tree member */
 229        struct rb_node rb_node;
 230
 231        /* group service_tree key */
 232        u64 vdisktime;
 233
 234        /*
 235         * The number of active cfqgs and sum of their weights under this
 236         * cfqg.  This covers this cfqg's leaf_weight and all children's
 237         * weights, but does not cover weights of further descendants.
 238         *
 239         * If a cfqg is on the service tree, it's active.  An active cfqg
 240         * also activates its parent and contributes to the children_weight
 241         * of the parent.
 242         */
 243        int nr_active;
 244        unsigned int children_weight;
 245
 246        /*
 247         * vfraction is the fraction of vdisktime that the tasks in this
 248         * cfqg are entitled to.  This is determined by compounding the
 249         * ratios walking up from this cfqg to the root.
 250         *
 251         * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
 252         * vfractions on a service tree is approximately 1.  The sum may
 253         * deviate a bit due to rounding errors and fluctuations caused by
 254         * cfqgs entering and leaving the service tree.
 255         */
 256        unsigned int vfraction;
 257
 258        /*
 259         * There are two weights - (internal) weight is the weight of this
 260         * cfqg against the sibling cfqgs.  leaf_weight is the wight of
 261         * this cfqg against the child cfqgs.  For the root cfqg, both
 262         * weights are kept in sync for backward compatibility.
 263         */
 264        unsigned int weight;
 265        unsigned int new_weight;
 266        unsigned int dev_weight;
 267
 268        unsigned int leaf_weight;
 269        unsigned int new_leaf_weight;
 270        unsigned int dev_leaf_weight;
 271
 272        /* number of cfqq currently on this group */
 273        int nr_cfqq;
 274
 275        /*
 276         * Per group busy queues average. Useful for workload slice calc. We
 277         * create the array for each prio class but at run time it is used
 278         * only for RT and BE class and slot for IDLE class remains unused.
 279         * This is primarily done to avoid confusion and a gcc warning.
 280         */
 281        unsigned int busy_queues_avg[CFQ_PRIO_NR];
 282        /*
 283         * rr lists of queues with requests. We maintain service trees for
 284         * RT and BE classes. These trees are subdivided in subclasses
 285         * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
 286         * class there is no subclassification and all the cfq queues go on
 287         * a single tree service_tree_idle.
 288         * Counts are embedded in the cfq_rb_root
 289         */
 290        struct cfq_rb_root service_trees[2][3];
 291        struct cfq_rb_root service_tree_idle;
 292
 293        unsigned long saved_wl_slice;
 294        enum wl_type_t saved_wl_type;
 295        enum wl_class_t saved_wl_class;
 296
 297        /* number of requests that are on the dispatch list or inside driver */
 298        int dispatched;
 299        struct cfq_ttime ttime;
 300        struct cfqg_stats stats;        /* stats for this cfqg */
 301
 302        /* async queue for each priority case */
 303        struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 304        struct cfq_queue *async_idle_cfqq;
 305
 306};
 307
 308struct cfq_io_cq {
 309        struct io_cq            icq;            /* must be the first member */
 310        struct cfq_queue        *cfqq[2];
 311        struct cfq_ttime        ttime;
 312        int                     ioprio;         /* the current ioprio */
 313#ifdef CONFIG_CFQ_GROUP_IOSCHED
 314        uint64_t                blkcg_serial_nr; /* the current blkcg serial */
 315#endif
 316};
 317
 318/*
 319 * Per block device queue structure
 320 */
 321struct cfq_data {
 322        struct request_queue *queue;
 323        /* Root service tree for cfq_groups */
 324        struct cfq_rb_root grp_service_tree;
 325        struct cfq_group *root_group;
 326
 327        /*
 328         * The priority currently being served
 329         */
 330        enum wl_class_t serving_wl_class;
 331        enum wl_type_t serving_wl_type;
 332        unsigned long workload_expires;
 333        struct cfq_group *serving_group;
 334
 335        /*
 336         * Each priority tree is sorted by next_request position.  These
 337         * trees are used when determining if two or more queues are
 338         * interleaving requests (see cfq_close_cooperator).
 339         */
 340        struct rb_root prio_trees[CFQ_PRIO_LISTS];
 341
 342        unsigned int busy_queues;
 343        unsigned int busy_sync_queues;
 344
 345        int rq_in_driver;
 346        int rq_in_flight[2];
 347
 348        /*
 349         * queue-depth detection
 350         */
 351        int rq_queued;
 352        int hw_tag;
 353        /*
 354         * hw_tag can be
 355         * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
 356         *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
 357         *  0 => no NCQ
 358         */
 359        int hw_tag_est_depth;
 360        unsigned int hw_tag_samples;
 361
 362        /*
 363         * idle window management
 364         */
 365        struct timer_list idle_slice_timer;
 366        struct work_struct unplug_work;
 367
 368        struct cfq_queue *active_queue;
 369        struct cfq_io_cq *active_cic;
 370
 371        sector_t last_position;
 372
 373        /*
 374         * tunables, see top of file
 375         */
 376        unsigned int cfq_quantum;
 377        unsigned int cfq_fifo_expire[2];
 378        unsigned int cfq_back_penalty;
 379        unsigned int cfq_back_max;
 380        unsigned int cfq_slice[2];
 381        unsigned int cfq_slice_async_rq;
 382        unsigned int cfq_slice_idle;
 383        unsigned int cfq_group_idle;
 384        unsigned int cfq_latency;
 385        unsigned int cfq_target_latency;
 386
 387        /*
 388         * Fallback dummy cfqq for extreme OOM conditions
 389         */
 390        struct cfq_queue oom_cfqq;
 391
 392        unsigned long last_delayed_sync;
 393};
 394
 395static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 396static void cfq_put_queue(struct cfq_queue *cfqq);
 397
 398static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
 399                                            enum wl_class_t class,
 400                                            enum wl_type_t type)
 401{
 402        if (!cfqg)
 403                return NULL;
 404
 405        if (class == IDLE_WORKLOAD)
 406                return &cfqg->service_tree_idle;
 407
 408        return &cfqg->service_trees[class][type];
 409}
 410
 411enum cfqq_state_flags {
 412        CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
 413        CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
 414        CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
 415        CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
 416        CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
 417        CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
 418        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
 419        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
 420        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
 421        CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
 422        CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
 423        CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
 424        CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
 425};
 426
 427#define CFQ_CFQQ_FNS(name)                                              \
 428static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
 429{                                                                       \
 430        (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
 431}                                                                       \
 432static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
 433{                                                                       \
 434        (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
 435}                                                                       \
 436static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
 437{                                                                       \
 438        return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
 439}
 440
 441CFQ_CFQQ_FNS(on_rr);
 442CFQ_CFQQ_FNS(wait_request);
 443CFQ_CFQQ_FNS(must_dispatch);
 444CFQ_CFQQ_FNS(must_alloc_slice);
 445CFQ_CFQQ_FNS(fifo_expire);
 446CFQ_CFQQ_FNS(idle_window);
 447CFQ_CFQQ_FNS(prio_changed);
 448CFQ_CFQQ_FNS(slice_new);
 449CFQ_CFQQ_FNS(sync);
 450CFQ_CFQQ_FNS(coop);
 451CFQ_CFQQ_FNS(split_coop);
 452CFQ_CFQQ_FNS(deep);
 453CFQ_CFQQ_FNS(wait_busy);
 454#undef CFQ_CFQQ_FNS
 455
 456#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 457
 458/* cfqg stats flags */
 459enum cfqg_stats_flags {
 460        CFQG_stats_waiting = 0,
 461        CFQG_stats_idling,
 462        CFQG_stats_empty,
 463};
 464
 465#define CFQG_FLAG_FNS(name)                                             \
 466static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)     \
 467{                                                                       \
 468        stats->flags |= (1 << CFQG_stats_##name);                       \
 469}                                                                       \
 470static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)    \
 471{                                                                       \
 472        stats->flags &= ~(1 << CFQG_stats_##name);                      \
 473}                                                                       \
 474static inline int cfqg_stats_##name(struct cfqg_stats *stats)           \
 475{                                                                       \
 476        return (stats->flags & (1 << CFQG_stats_##name)) != 0;          \
 477}                                                                       \
 478
 479CFQG_FLAG_FNS(waiting)
 480CFQG_FLAG_FNS(idling)
 481CFQG_FLAG_FNS(empty)
 482#undef CFQG_FLAG_FNS
 483
 484/* This should be called with the queue_lock held. */
 485static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
 486{
 487        unsigned long long now;
 488
 489        if (!cfqg_stats_waiting(stats))
 490                return;
 491
 492        now = sched_clock();
 493        if (time_after64(now, stats->start_group_wait_time))
 494                blkg_stat_add(&stats->group_wait_time,
 495                              now - stats->start_group_wait_time);
 496        cfqg_stats_clear_waiting(stats);
 497}
 498
 499/* This should be called with the queue_lock held. */
 500static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
 501                                                 struct cfq_group *curr_cfqg)
 502{
 503        struct cfqg_stats *stats = &cfqg->stats;
 504
 505        if (cfqg_stats_waiting(stats))
 506                return;
 507        if (cfqg == curr_cfqg)
 508                return;
 509        stats->start_group_wait_time = sched_clock();
 510        cfqg_stats_mark_waiting(stats);
 511}
 512
 513/* This should be called with the queue_lock held. */
 514static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
 515{
 516        unsigned long long now;
 517
 518        if (!cfqg_stats_empty(stats))
 519                return;
 520
 521        now = sched_clock();
 522        if (time_after64(now, stats->start_empty_time))
 523                blkg_stat_add(&stats->empty_time,
 524                              now - stats->start_empty_time);
 525        cfqg_stats_clear_empty(stats);
 526}
 527
 528static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
 529{
 530        blkg_stat_add(&cfqg->stats.dequeue, 1);
 531}
 532
 533static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
 534{
 535        struct cfqg_stats *stats = &cfqg->stats;
 536
 537        if (blkg_rwstat_total(&stats->queued))
 538                return;
 539
 540        /*
 541         * group is already marked empty. This can happen if cfqq got new
 542         * request in parent group and moved to this group while being added
 543         * to service tree. Just ignore the event and move on.
 544         */
 545        if (cfqg_stats_empty(stats))
 546                return;
 547
 548        stats->start_empty_time = sched_clock();
 549        cfqg_stats_mark_empty(stats);
 550}
 551
 552static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
 553{
 554        struct cfqg_stats *stats = &cfqg->stats;
 555
 556        if (cfqg_stats_idling(stats)) {
 557                unsigned long long now = sched_clock();
 558
 559                if (time_after64(now, stats->start_idle_time))
 560                        blkg_stat_add(&stats->idle_time,
 561                                      now - stats->start_idle_time);
 562                cfqg_stats_clear_idling(stats);
 563        }
 564}
 565
 566static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
 567{
 568        struct cfqg_stats *stats = &cfqg->stats;
 569
 570        BUG_ON(cfqg_stats_idling(stats));
 571
 572        stats->start_idle_time = sched_clock();
 573        cfqg_stats_mark_idling(stats);
 574}
 575
 576static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
 577{
 578        struct cfqg_stats *stats = &cfqg->stats;
 579
 580        blkg_stat_add(&stats->avg_queue_size_sum,
 581                      blkg_rwstat_total(&stats->queued));
 582        blkg_stat_add(&stats->avg_queue_size_samples, 1);
 583        cfqg_stats_update_group_wait_time(stats);
 584}
 585
 586#else   /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 587
 588static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
 589static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
 590static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
 591static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
 592static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
 593static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
 594static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 595
 596#endif  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 597
 598#ifdef CONFIG_CFQ_GROUP_IOSCHED
 599
 600static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
 601{
 602        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 603}
 604
 605static struct cfq_group_data
 606*cpd_to_cfqgd(struct blkcg_policy_data *cpd)
 607{
 608        return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
 609}
 610
 611static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 612{
 613        return pd_to_blkg(&cfqg->pd);
 614}
 615
 616static struct blkcg_policy blkcg_policy_cfq;
 617
 618static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
 619{
 620        return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
 621}
 622
 623static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
 624{
 625        return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
 626}
 627
 628static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
 629{
 630        struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
 631
 632        return pblkg ? blkg_to_cfqg(pblkg) : NULL;
 633}
 634
 635static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
 636                                      struct cfq_group *ancestor)
 637{
 638        return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
 639                                    cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
 640}
 641
 642static inline void cfqg_get(struct cfq_group *cfqg)
 643{
 644        return blkg_get(cfqg_to_blkg(cfqg));
 645}
 646
 647static inline void cfqg_put(struct cfq_group *cfqg)
 648{
 649        return blkg_put(cfqg_to_blkg(cfqg));
 650}
 651
 652#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  do {                    \
 653        char __pbuf[128];                                               \
 654                                                                        \
 655        blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));  \
 656        blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
 657                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
 658                        cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 659                          __pbuf, ##args);                              \
 660} while (0)
 661
 662#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)  do {                    \
 663        char __pbuf[128];                                               \
 664                                                                        \
 665        blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));          \
 666        blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);    \
 667} while (0)
 668
 669static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 670                                            struct cfq_group *curr_cfqg, int rw)
 671{
 672        blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
 673        cfqg_stats_end_empty_time(&cfqg->stats);
 674        cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 675}
 676
 677static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 678                        unsigned long time, unsigned long unaccounted_time)
 679{
 680        blkg_stat_add(&cfqg->stats.time, time);
 681#ifdef CONFIG_DEBUG_BLK_CGROUP
 682        blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
 683#endif
 684}
 685
 686static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
 687{
 688        blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
 689}
 690
 691static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
 692{
 693        blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
 694}
 695
 696static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 697                        uint64_t start_time, uint64_t io_start_time, int rw)
 698{
 699        struct cfqg_stats *stats = &cfqg->stats;
 700        unsigned long long now = sched_clock();
 701
 702        if (time_after64(now, io_start_time))
 703                blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
 704        if (time_after64(io_start_time, start_time))
 705                blkg_rwstat_add(&stats->wait_time, rw,
 706                                io_start_time - start_time);
 707}
 708
 709/* @stats = 0 */
 710static void cfqg_stats_reset(struct cfqg_stats *stats)
 711{
 712        /* queued stats shouldn't be cleared */
 713        blkg_rwstat_reset(&stats->merged);
 714        blkg_rwstat_reset(&stats->service_time);
 715        blkg_rwstat_reset(&stats->wait_time);
 716        blkg_stat_reset(&stats->time);
 717#ifdef CONFIG_DEBUG_BLK_CGROUP
 718        blkg_stat_reset(&stats->unaccounted_time);
 719        blkg_stat_reset(&stats->avg_queue_size_sum);
 720        blkg_stat_reset(&stats->avg_queue_size_samples);
 721        blkg_stat_reset(&stats->dequeue);
 722        blkg_stat_reset(&stats->group_wait_time);
 723        blkg_stat_reset(&stats->idle_time);
 724        blkg_stat_reset(&stats->empty_time);
 725#endif
 726}
 727
 728/* @to += @from */
 729static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
 730{
 731        /* queued stats shouldn't be cleared */
 732        blkg_rwstat_add_aux(&to->merged, &from->merged);
 733        blkg_rwstat_add_aux(&to->service_time, &from->service_time);
 734        blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
 735        blkg_stat_add_aux(&from->time, &from->time);
 736#ifdef CONFIG_DEBUG_BLK_CGROUP
 737        blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
 738        blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
 739        blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
 740        blkg_stat_add_aux(&to->dequeue, &from->dequeue);
 741        blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
 742        blkg_stat_add_aux(&to->idle_time, &from->idle_time);
 743        blkg_stat_add_aux(&to->empty_time, &from->empty_time);
 744#endif
 745}
 746
 747/*
 748 * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
 749 * recursive stats can still account for the amount used by this cfqg after
 750 * it's gone.
 751 */
 752static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
 753{
 754        struct cfq_group *parent = cfqg_parent(cfqg);
 755
 756        lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
 757
 758        if (unlikely(!parent))
 759                return;
 760
 761        cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
 762        cfqg_stats_reset(&cfqg->stats);
 763}
 764
 765#else   /* CONFIG_CFQ_GROUP_IOSCHED */
 766
 767static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
 768static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
 769                                      struct cfq_group *ancestor)
 770{
 771        return true;
 772}
 773static inline void cfqg_get(struct cfq_group *cfqg) { }
 774static inline void cfqg_put(struct cfq_group *cfqg) { }
 775
 776#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
 777        blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
 778                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
 779                        cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 780                                ##args)
 781#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
 782
 783static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 784                        struct cfq_group *curr_cfqg, int rw) { }
 785static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 786                        unsigned long time, unsigned long unaccounted_time) { }
 787static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
 788static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
 789static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 790                        uint64_t start_time, uint64_t io_start_time, int rw) { }
 791
 792#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 793
 794#define cfq_log(cfqd, fmt, args...)     \
 795        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 796
 797/* Traverses through cfq group service trees */
 798#define for_each_cfqg_st(cfqg, i, j, st) \
 799        for (i = 0; i <= IDLE_WORKLOAD; i++) \
 800                for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
 801                        : &cfqg->service_tree_idle; \
 802                        (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
 803                        (i == IDLE_WORKLOAD && j == 0); \
 804                        j++, st = i < IDLE_WORKLOAD ? \
 805                        &cfqg->service_trees[i][j]: NULL) \
 806
 807static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
 808        struct cfq_ttime *ttime, bool group_idle)
 809{
 810        unsigned long slice;
 811        if (!sample_valid(ttime->ttime_samples))
 812                return false;
 813        if (group_idle)
 814                slice = cfqd->cfq_group_idle;
 815        else
 816                slice = cfqd->cfq_slice_idle;
 817        return ttime->ttime_mean > slice;
 818}
 819
 820static inline bool iops_mode(struct cfq_data *cfqd)
 821{
 822        /*
 823         * If we are not idling on queues and it is a NCQ drive, parallel
 824         * execution of requests is on and measuring time is not possible
 825         * in most of the cases until and unless we drive shallower queue
 826         * depths and that becomes a performance bottleneck. In such cases
 827         * switch to start providing fairness in terms of number of IOs.
 828         */
 829        if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
 830                return true;
 831        else
 832                return false;
 833}
 834
 835static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
 836{
 837        if (cfq_class_idle(cfqq))
 838                return IDLE_WORKLOAD;
 839        if (cfq_class_rt(cfqq))
 840                return RT_WORKLOAD;
 841        return BE_WORKLOAD;
 842}
 843
 844
 845static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
 846{
 847        if (!cfq_cfqq_sync(cfqq))
 848                return ASYNC_WORKLOAD;
 849        if (!cfq_cfqq_idle_window(cfqq))
 850                return SYNC_NOIDLE_WORKLOAD;
 851        return SYNC_WORKLOAD;
 852}
 853
 854static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
 855                                        struct cfq_data *cfqd,
 856                                        struct cfq_group *cfqg)
 857{
 858        if (wl_class == IDLE_WORKLOAD)
 859                return cfqg->service_tree_idle.count;
 860
 861        return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
 862                cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
 863                cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
 864}
 865
 866static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 867                                        struct cfq_group *cfqg)
 868{
 869        return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
 870                cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 871}
 872
 873static void cfq_dispatch_insert(struct request_queue *, struct request *);
 874static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
 875                                       struct cfq_io_cq *cic, struct bio *bio);
 876
 877static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 878{
 879        /* cic->icq is the first member, %NULL will convert to %NULL */
 880        return container_of(icq, struct cfq_io_cq, icq);
 881}
 882
 883static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
 884                                               struct io_context *ioc)
 885{
 886        if (ioc)
 887                return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
 888        return NULL;
 889}
 890
 891static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
 892{
 893        return cic->cfqq[is_sync];
 894}
 895
 896static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
 897                                bool is_sync)
 898{
 899        cic->cfqq[is_sync] = cfqq;
 900}
 901
 902static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
 903{
 904        return cic->icq.q->elevator->elevator_data;
 905}
 906
 907/*
 908 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 909 * set (in which case it could also be direct WRITE).
 910 */
 911static inline bool cfq_bio_sync(struct bio *bio)
 912{
 913        return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 914}
 915
 916/*
 917 * scheduler run of queue, if there are requests pending and no one in the
 918 * driver that will restart queueing
 919 */
 920static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 921{
 922        if (cfqd->busy_queues) {
 923                cfq_log(cfqd, "schedule dispatch");
 924                kblockd_schedule_work(&cfqd->unplug_work);
 925        }
 926}
 927
 928/*
 929 * Scale schedule slice based on io priority. Use the sync time slice only
 930 * if a queue is marked sync and has sync io queued. A sync queue with async
 931 * io only, should not get full sync slice length.
 932 */
 933static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 934                                 unsigned short prio)
 935{
 936        const int base_slice = cfqd->cfq_slice[sync];
 937
 938        WARN_ON(prio >= IOPRIO_BE_NR);
 939
 940        return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 941}
 942
 943static inline int
 944cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 945{
 946        return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 947}
 948
 949/**
 950 * cfqg_scale_charge - scale disk time charge according to cfqg weight
 951 * @charge: disk time being charged
 952 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
 953 *
 954 * Scale @charge according to @vfraction, which is in range (0, 1].  The
 955 * scaling is inversely proportional.
 956 *
 957 * scaled = charge / vfraction
 958 *
 959 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
 960 */
 961static inline u64 cfqg_scale_charge(unsigned long charge,
 962                                    unsigned int vfraction)
 963{
 964        u64 c = charge << CFQ_SERVICE_SHIFT;    /* make it fixed point */
 965
 966        /* charge / vfraction */
 967        c <<= CFQ_SERVICE_SHIFT;
 968        do_div(c, vfraction);
 969        return c;
 970}
 971
 972static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 973{
 974        s64 delta = (s64)(vdisktime - min_vdisktime);
 975        if (delta > 0)
 976                min_vdisktime = vdisktime;
 977
 978        return min_vdisktime;
 979}
 980
 981static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
 982{
 983        s64 delta = (s64)(vdisktime - min_vdisktime);
 984        if (delta < 0)
 985                min_vdisktime = vdisktime;
 986
 987        return min_vdisktime;
 988}
 989
 990static void update_min_vdisktime(struct cfq_rb_root *st)
 991{
 992        struct cfq_group *cfqg;
 993
 994        if (st->left) {
 995                cfqg = rb_entry_cfqg(st->left);
 996                st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 997                                                  cfqg->vdisktime);
 998        }
 999}
1000
1001/*
1002 * get averaged number of queues of RT/BE priority.
1003 * average is updated, with a formula that gives more weight to higher numbers,
1004 * to quickly follows sudden increases and decrease slowly
1005 */
1006
1007static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
1008                                        struct cfq_group *cfqg, bool rt)
1009{
1010        unsigned min_q, max_q;
1011        unsigned mult  = cfq_hist_divisor - 1;
1012        unsigned round = cfq_hist_divisor / 2;
1013        unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1014
1015        min_q = min(cfqg->busy_queues_avg[rt], busy);
1016        max_q = max(cfqg->busy_queues_avg[rt], busy);
1017        cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1018                cfq_hist_divisor;
1019        return cfqg->busy_queues_avg[rt];
1020}
1021
1022static inline unsigned
1023cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1024{
1025        return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1026}
1027
1028static inline unsigned
1029cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1030{
1031        unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1032        if (cfqd->cfq_latency) {
1033                /*
1034                 * interested queues (we consider only the ones with the same
1035                 * priority class in the cfq group)
1036                 */
1037                unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1038                                                cfq_class_rt(cfqq));
1039                unsigned sync_slice = cfqd->cfq_slice[1];
1040                unsigned expect_latency = sync_slice * iq;
1041                unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1042
1043                if (expect_latency > group_slice) {
1044                        unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1045                        /* scale low_slice according to IO priority
1046                         * and sync vs async */
1047                        unsigned low_slice =
1048                                min(slice, base_low_slice * slice / sync_slice);
1049                        /* the adapted slice value is scaled to fit all iqs
1050                         * into the target latency */
1051                        slice = max(slice * group_slice / expect_latency,
1052                                    low_slice);
1053                }
1054        }
1055        return slice;
1056}
1057
1058static inline void
1059cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1060{
1061        unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1062
1063        cfqq->slice_start = jiffies;
1064        cfqq->slice_end = jiffies + slice;
1065        cfqq->allocated_slice = slice;
1066        cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
1067}
1068
1069/*
1070 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1071 * isn't valid until the first request from the dispatch is activated
1072 * and the slice time set.
1073 */
1074static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1075{
1076        if (cfq_cfqq_slice_new(cfqq))
1077                return false;
1078        if (time_before(jiffies, cfqq->slice_end))
1079                return false;
1080
1081        return true;
1082}
1083
1084/*
1085 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1086 * We choose the request that is closest to the head right now. Distance
1087 * behind the head is penalized and only allowed to a certain extent.
1088 */
1089static struct request *
1090cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1091{
1092        sector_t s1, s2, d1 = 0, d2 = 0;
1093        unsigned long back_max;
1094#define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
1095#define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
1096        unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1097
1098        if (rq1 == NULL || rq1 == rq2)
1099                return rq2;
1100        if (rq2 == NULL)
1101                return rq1;
1102
1103        if (rq_is_sync(rq1) != rq_is_sync(rq2))
1104                return rq_is_sync(rq1) ? rq1 : rq2;
1105
1106        if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1107                return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1108
1109        s1 = blk_rq_pos(rq1);
1110        s2 = blk_rq_pos(rq2);
1111
1112        /*
1113         * by definition, 1KiB is 2 sectors
1114         */
1115        back_max = cfqd->cfq_back_max * 2;
1116
1117        /*
1118         * Strict one way elevator _except_ in the case where we allow
1119         * short backward seeks which are biased as twice the cost of a
1120         * similar forward seek.
1121         */
1122        if (s1 >= last)
1123                d1 = s1 - last;
1124        else if (s1 + back_max >= last)
1125                d1 = (last - s1) * cfqd->cfq_back_penalty;
1126        else
1127                wrap |= CFQ_RQ1_WRAP;
1128
1129        if (s2 >= last)
1130                d2 = s2 - last;
1131        else if (s2 + back_max >= last)
1132                d2 = (last - s2) * cfqd->cfq_back_penalty;
1133        else
1134                wrap |= CFQ_RQ2_WRAP;
1135
1136        /* Found required data */
1137
1138        /*
1139         * By doing switch() on the bit mask "wrap" we avoid having to
1140         * check two variables for all permutations: --> faster!
1141         */
1142        switch (wrap) {
1143        case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1144                if (d1 < d2)
1145                        return rq1;
1146                else if (d2 < d1)
1147                        return rq2;
1148                else {
1149                        if (s1 >= s2)
1150                                return rq1;
1151                        else
1152                                return rq2;
1153                }
1154
1155        case CFQ_RQ2_WRAP:
1156                return rq1;
1157        case CFQ_RQ1_WRAP:
1158                return rq2;
1159        case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1160        default:
1161                /*
1162                 * Since both rqs are wrapped,
1163                 * start with the one that's further behind head
1164                 * (--> only *one* back seek required),
1165                 * since back seek takes more time than forward.
1166                 */
1167                if (s1 <= s2)
1168                        return rq1;
1169                else
1170                        return rq2;
1171        }
1172}
1173
1174/*
1175 * The below is leftmost cache rbtree addon
1176 */
1177static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1178{
1179        /* Service tree is empty */
1180        if (!root->count)
1181                return NULL;
1182
1183        if (!root->left)
1184                root->left = rb_first(&root->rb);
1185
1186        if (root->left)
1187                return rb_entry(root->left, struct cfq_queue, rb_node);
1188
1189        return NULL;
1190}
1191
1192static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1193{
1194        if (!root->left)
1195                root->left = rb_first(&root->rb);
1196
1197        if (root->left)
1198                return rb_entry_cfqg(root->left);
1199
1200        return NULL;
1201}
1202
1203static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1204{
1205        rb_erase(n, root);
1206        RB_CLEAR_NODE(n);
1207}
1208
1209static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1210{
1211        if (root->left == n)
1212                root->left = NULL;
1213        rb_erase_init(n, &root->rb);
1214        --root->count;
1215}
1216
1217/*
1218 * would be nice to take fifo expire time into account as well
1219 */
1220static struct request *
1221cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1222                  struct request *last)
1223{
1224        struct rb_node *rbnext = rb_next(&last->rb_node);
1225        struct rb_node *rbprev = rb_prev(&last->rb_node);
1226        struct request *next = NULL, *prev = NULL;
1227
1228        BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1229
1230        if (rbprev)
1231                prev = rb_entry_rq(rbprev);
1232
1233        if (rbnext)
1234                next = rb_entry_rq(rbnext);
1235        else {
1236                rbnext = rb_first(&cfqq->sort_list);
1237                if (rbnext && rbnext != &last->rb_node)
1238                        next = rb_entry_rq(rbnext);
1239        }
1240
1241        return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1242}
1243
1244static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1245                                      struct cfq_queue *cfqq)
1246{
1247        /*
1248         * just an approximation, should be ok.
1249         */
1250        return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1251                       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1252}
1253
1254static inline s64
1255cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1256{
1257        return cfqg->vdisktime - st->min_vdisktime;
1258}
1259
1260static void
1261__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1262{
1263        struct rb_node **node = &st->rb.rb_node;
1264        struct rb_node *parent = NULL;
1265        struct cfq_group *__cfqg;
1266        s64 key = cfqg_key(st, cfqg);
1267        int left = 1;
1268
1269        while (*node != NULL) {
1270                parent = *node;
1271                __cfqg = rb_entry_cfqg(parent);
1272
1273                if (key < cfqg_key(st, __cfqg))
1274                        node = &parent->rb_left;
1275                else {
1276                        node = &parent->rb_right;
1277                        left = 0;
1278                }
1279        }
1280
1281        if (left)
1282                st->left = &cfqg->rb_node;
1283
1284        rb_link_node(&cfqg->rb_node, parent, node);
1285        rb_insert_color(&cfqg->rb_node, &st->rb);
1286}
1287
1288/*
1289 * This has to be called only on activation of cfqg
1290 */
1291static void
1292cfq_update_group_weight(struct cfq_group *cfqg)
1293{
1294        if (cfqg->new_weight) {
1295                cfqg->weight = cfqg->new_weight;
1296                cfqg->new_weight = 0;
1297        }
1298}
1299
1300static void
1301cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1302{
1303        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1304
1305        if (cfqg->new_leaf_weight) {
1306                cfqg->leaf_weight = cfqg->new_leaf_weight;
1307                cfqg->new_leaf_weight = 0;
1308        }
1309}
1310
1311static void
1312cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1313{
1314        unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;      /* start with 1 */
1315        struct cfq_group *pos = cfqg;
1316        struct cfq_group *parent;
1317        bool propagate;
1318
1319        /* add to the service tree */
1320        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1321
1322        /*
1323         * Update leaf_weight.  We cannot update weight at this point
1324         * because cfqg might already have been activated and is
1325         * contributing its current weight to the parent's child_weight.
1326         */
1327        cfq_update_group_leaf_weight(cfqg);
1328        __cfq_group_service_tree_add(st, cfqg);
1329
1330        /*
1331         * Activate @cfqg and calculate the portion of vfraction @cfqg is
1332         * entitled to.  vfraction is calculated by walking the tree
1333         * towards the root calculating the fraction it has at each level.
1334         * The compounded ratio is how much vfraction @cfqg owns.
1335         *
1336         * Start with the proportion tasks in this cfqg has against active
1337         * children cfqgs - its leaf_weight against children_weight.
1338         */
1339        propagate = !pos->nr_active++;
1340        pos->children_weight += pos->leaf_weight;
1341        vfr = vfr * pos->leaf_weight / pos->children_weight;
1342
1343        /*
1344         * Compound ->weight walking up the tree.  Both activation and
1345         * vfraction calculation are done in the same loop.  Propagation
1346         * stops once an already activated node is met.  vfraction
1347         * calculation should always continue to the root.
1348         */
1349        while ((parent = cfqg_parent(pos))) {
1350                if (propagate) {
1351                        cfq_update_group_weight(pos);
1352                        propagate = !parent->nr_active++;
1353                        parent->children_weight += pos->weight;
1354                }
1355                vfr = vfr * pos->weight / parent->children_weight;
1356                pos = parent;
1357        }
1358
1359        cfqg->vfraction = max_t(unsigned, vfr, 1);
1360}
1361
1362static void
1363cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1364{
1365        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1366        struct cfq_group *__cfqg;
1367        struct rb_node *n;
1368
1369        cfqg->nr_cfqq++;
1370        if (!RB_EMPTY_NODE(&cfqg->rb_node))
1371                return;
1372
1373        /*
1374         * Currently put the group at the end. Later implement something
1375         * so that groups get lesser vtime based on their weights, so that
1376         * if group does not loose all if it was not continuously backlogged.
1377         */
1378        n = rb_last(&st->rb);
1379        if (n) {
1380                __cfqg = rb_entry_cfqg(n);
1381                cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1382        } else
1383                cfqg->vdisktime = st->min_vdisktime;
1384        cfq_group_service_tree_add(st, cfqg);
1385}
1386
1387static void
1388cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1389{
1390        struct cfq_group *pos = cfqg;
1391        bool propagate;
1392
1393        /*
1394         * Undo activation from cfq_group_service_tree_add().  Deactivate
1395         * @cfqg and propagate deactivation upwards.
1396         */
1397        propagate = !--pos->nr_active;
1398        pos->children_weight -= pos->leaf_weight;
1399
1400        while (propagate) {
1401                struct cfq_group *parent = cfqg_parent(pos);
1402
1403                /* @pos has 0 nr_active at this point */
1404                WARN_ON_ONCE(pos->children_weight);
1405                pos->vfraction = 0;
1406
1407                if (!parent)
1408                        break;
1409
1410                propagate = !--parent->nr_active;
1411                parent->children_weight -= pos->weight;
1412                pos = parent;
1413        }
1414
1415        /* remove from the service tree */
1416        if (!RB_EMPTY_NODE(&cfqg->rb_node))
1417                cfq_rb_erase(&cfqg->rb_node, st);
1418}
1419
1420static void
1421cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1422{
1423        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1424
1425        BUG_ON(cfqg->nr_cfqq < 1);
1426        cfqg->nr_cfqq--;
1427
1428        /* If there are other cfq queues under this group, don't delete it */
1429        if (cfqg->nr_cfqq)
1430                return;
1431
1432        cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1433        cfq_group_service_tree_del(st, cfqg);
1434        cfqg->saved_wl_slice = 0;
1435        cfqg_stats_update_dequeue(cfqg);
1436}
1437
1438static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1439                                                unsigned int *unaccounted_time)
1440{
1441        unsigned int slice_used;
1442
1443        /*
1444         * Queue got expired before even a single request completed or
1445         * got expired immediately after first request completion.
1446         */
1447        if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1448                /*
1449                 * Also charge the seek time incurred to the group, otherwise
1450                 * if there are mutiple queues in the group, each can dispatch
1451                 * a single request on seeky media and cause lots of seek time
1452                 * and group will never know it.
1453                 */
1454                slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1455                                        1);
1456        } else {
1457                slice_used = jiffies - cfqq->slice_start;
1458                if (slice_used > cfqq->allocated_slice) {
1459                        *unaccounted_time = slice_used - cfqq->allocated_slice;
1460                        slice_used = cfqq->allocated_slice;
1461                }
1462                if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1463                        *unaccounted_time += cfqq->slice_start -
1464                                        cfqq->dispatch_start;
1465        }
1466
1467        return slice_used;
1468}
1469
1470static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1471                                struct cfq_queue *cfqq)
1472{
1473        struct cfq_rb_root *st = &cfqd->grp_service_tree;
1474        unsigned int used_sl, charge, unaccounted_sl = 0;
1475        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1476                        - cfqg->service_tree_idle.count;
1477        unsigned int vfr;
1478
1479        BUG_ON(nr_sync < 0);
1480        used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1481
1482        if (iops_mode(cfqd))
1483                charge = cfqq->slice_dispatch;
1484        else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1485                charge = cfqq->allocated_slice;
1486
1487        /*
1488         * Can't update vdisktime while on service tree and cfqg->vfraction
1489         * is valid only while on it.  Cache vfr, leave the service tree,
1490         * update vdisktime and go back on.  The re-addition to the tree
1491         * will also update the weights as necessary.
1492         */
1493        vfr = cfqg->vfraction;
1494        cfq_group_service_tree_del(st, cfqg);
1495        cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1496        cfq_group_service_tree_add(st, cfqg);
1497
1498        /* This group is being expired. Save the context */
1499        if (time_after(cfqd->workload_expires, jiffies)) {
1500                cfqg->saved_wl_slice = cfqd->workload_expires
1501                                                - jiffies;
1502                cfqg->saved_wl_type = cfqd->serving_wl_type;
1503                cfqg->saved_wl_class = cfqd->serving_wl_class;
1504        } else
1505                cfqg->saved_wl_slice = 0;
1506
1507        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1508                                        st->min_vdisktime);
1509        cfq_log_cfqq(cfqq->cfqd, cfqq,
1510                     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1511                     used_sl, cfqq->slice_dispatch, charge,
1512                     iops_mode(cfqd), cfqq->nr_sectors);
1513        cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1514        cfqg_stats_set_start_empty_time(cfqg);
1515}
1516
1517/**
1518 * cfq_init_cfqg_base - initialize base part of a cfq_group
1519 * @cfqg: cfq_group to initialize
1520 *
1521 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1522 * is enabled or not.
1523 */
1524static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1525{
1526        struct cfq_rb_root *st;
1527        int i, j;
1528
1529        for_each_cfqg_st(cfqg, i, j, st)
1530                *st = CFQ_RB_ROOT;
1531        RB_CLEAR_NODE(&cfqg->rb_node);
1532
1533        cfqg->ttime.last_end_request = jiffies;
1534}
1535
1536#ifdef CONFIG_CFQ_GROUP_IOSCHED
1537static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1538                            bool on_dfl, bool reset_dev, bool is_leaf_weight);
1539
1540static void cfqg_stats_exit(struct cfqg_stats *stats)
1541{
1542        blkg_rwstat_exit(&stats->merged);
1543        blkg_rwstat_exit(&stats->service_time);
1544        blkg_rwstat_exit(&stats->wait_time);
1545        blkg_rwstat_exit(&stats->queued);
1546        blkg_stat_exit(&stats->time);
1547#ifdef CONFIG_DEBUG_BLK_CGROUP
1548        blkg_stat_exit(&stats->unaccounted_time);
1549        blkg_stat_exit(&stats->avg_queue_size_sum);
1550        blkg_stat_exit(&stats->avg_queue_size_samples);
1551        blkg_stat_exit(&stats->dequeue);
1552        blkg_stat_exit(&stats->group_wait_time);
1553        blkg_stat_exit(&stats->idle_time);
1554        blkg_stat_exit(&stats->empty_time);
1555#endif
1556}
1557
1558static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
1559{
1560        if (blkg_rwstat_init(&stats->merged, gfp) ||
1561            blkg_rwstat_init(&stats->service_time, gfp) ||
1562            blkg_rwstat_init(&stats->wait_time, gfp) ||
1563            blkg_rwstat_init(&stats->queued, gfp) ||
1564            blkg_stat_init(&stats->time, gfp))
1565                goto err;
1566
1567#ifdef CONFIG_DEBUG_BLK_CGROUP
1568        if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
1569            blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
1570            blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
1571            blkg_stat_init(&stats->dequeue, gfp) ||
1572            blkg_stat_init(&stats->group_wait_time, gfp) ||
1573            blkg_stat_init(&stats->idle_time, gfp) ||
1574            blkg_stat_init(&stats->empty_time, gfp))
1575                goto err;
1576#endif
1577        return 0;
1578err:
1579        cfqg_stats_exit(stats);
1580        return -ENOMEM;
1581}
1582
1583static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1584{
1585        struct cfq_group_data *cgd;
1586
1587        cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
1588        if (!cgd)
1589                return NULL;
1590        return &cgd->cpd;
1591}
1592
1593static void cfq_cpd_init(struct blkcg_policy_data *cpd)
1594{
1595        struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
1596        unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
1597                              CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1598
1599        if (cpd_to_blkcg(cpd) == &blkcg_root)
1600                weight *= 2;
1601
1602        cgd->weight = weight;
1603        cgd->leaf_weight = weight;
1604}
1605
1606static void cfq_cpd_free(struct blkcg_policy_data *cpd)
1607{
1608        kfree(cpd_to_cfqgd(cpd));
1609}
1610
1611static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
1612{
1613        struct blkcg *blkcg = cpd_to_blkcg(cpd);
1614        bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
1615        unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1616
1617        if (blkcg == &blkcg_root)
1618                weight *= 2;
1619
1620        WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
1621        WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
1622}
1623
1624static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1625{
1626        struct cfq_group *cfqg;
1627
1628        cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1629        if (!cfqg)
1630                return NULL;
1631
1632        cfq_init_cfqg_base(cfqg);
1633        if (cfqg_stats_init(&cfqg->stats, gfp)) {
1634                kfree(cfqg);
1635                return NULL;
1636        }
1637
1638        return &cfqg->pd;
1639}
1640
1641static void cfq_pd_init(struct blkg_policy_data *pd)
1642{
1643        struct cfq_group *cfqg = pd_to_cfqg(pd);
1644        struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
1645
1646        cfqg->weight = cgd->weight;
1647        cfqg->leaf_weight = cgd->leaf_weight;
1648}
1649
1650static void cfq_pd_offline(struct blkg_policy_data *pd)
1651{
1652        struct cfq_group *cfqg = pd_to_cfqg(pd);
1653        int i;
1654
1655        for (i = 0; i < IOPRIO_BE_NR; i++) {
1656                if (cfqg->async_cfqq[0][i])
1657                        cfq_put_queue(cfqg->async_cfqq[0][i]);
1658                if (cfqg->async_cfqq[1][i])
1659                        cfq_put_queue(cfqg->async_cfqq[1][i]);
1660        }
1661
1662        if (cfqg->async_idle_cfqq)
1663                cfq_put_queue(cfqg->async_idle_cfqq);
1664
1665        /*
1666         * @blkg is going offline and will be ignored by
1667         * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1668         * that they don't get lost.  If IOs complete after this point, the
1669         * stats for them will be lost.  Oh well...
1670         */
1671        cfqg_stats_xfer_dead(cfqg);
1672}
1673
1674static void cfq_pd_free(struct blkg_policy_data *pd)
1675{
1676        struct cfq_group *cfqg = pd_to_cfqg(pd);
1677
1678        cfqg_stats_exit(&cfqg->stats);
1679        return kfree(cfqg);
1680}
1681
1682static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
1683{
1684        struct cfq_group *cfqg = pd_to_cfqg(pd);
1685
1686        cfqg_stats_reset(&cfqg->stats);
1687}
1688
1689static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1690                                         struct blkcg *blkcg)
1691{
1692        struct blkcg_gq *blkg;
1693
1694        blkg = blkg_lookup(blkcg, cfqd->queue);
1695        if (likely(blkg))
1696                return blkg_to_cfqg(blkg);
1697        return NULL;
1698}
1699
1700static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1701{
1702        cfqq->cfqg = cfqg;
1703        /* cfqq reference on cfqg */
1704        cfqg_get(cfqg);
1705}
1706
1707static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1708                                     struct blkg_policy_data *pd, int off)
1709{
1710        struct cfq_group *cfqg = pd_to_cfqg(pd);
1711
1712        if (!cfqg->dev_weight)
1713                return 0;
1714        return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1715}
1716
1717static int cfqg_print_weight_device(struct seq_file *sf, void *v)
1718{
1719        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1720                          cfqg_prfill_weight_device, &blkcg_policy_cfq,
1721                          0, false);
1722        return 0;
1723}
1724
1725static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1726                                          struct blkg_policy_data *pd, int off)
1727{
1728        struct cfq_group *cfqg = pd_to_cfqg(pd);
1729
1730        if (!cfqg->dev_leaf_weight)
1731                return 0;
1732        return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1733}
1734
1735static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
1736{
1737        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1738                          cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1739                          0, false);
1740        return 0;
1741}
1742
1743static int cfq_print_weight(struct seq_file *sf, void *v)
1744{
1745        struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1746        struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1747        unsigned int val = 0;
1748
1749        if (cgd)
1750                val = cgd->weight;
1751
1752        seq_printf(sf, "%u\n", val);
1753        return 0;
1754}
1755
1756static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
1757{
1758        struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1759        struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1760        unsigned int val = 0;
1761
1762        if (cgd)
1763                val = cgd->leaf_weight;
1764
1765        seq_printf(sf, "%u\n", val);
1766        return 0;
1767}
1768
1769static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1770                                        char *buf, size_t nbytes, loff_t off,
1771                                        bool on_dfl, bool is_leaf_weight)
1772{
1773        unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1774        unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
1775        struct blkcg *blkcg = css_to_blkcg(of_css(of));
1776        struct blkg_conf_ctx ctx;
1777        struct cfq_group *cfqg;
1778        struct cfq_group_data *cfqgd;
1779        int ret;
1780        u64 v;
1781
1782        ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1783        if (ret)
1784                return ret;
1785
1786        if (sscanf(ctx.body, "%llu", &v) == 1) {
1787                /* require "default" on dfl */
1788                ret = -ERANGE;
1789                if (!v && on_dfl)
1790                        goto out_finish;
1791        } else if (!strcmp(strim(ctx.body), "default")) {
1792                v = 0;
1793        } else {
1794                ret = -EINVAL;
1795                goto out_finish;
1796        }
1797
1798        cfqg = blkg_to_cfqg(ctx.blkg);
1799        cfqgd = blkcg_to_cfqgd(blkcg);
1800
1801        ret = -ERANGE;
1802        if (!v || (v >= min && v <= max)) {
1803                if (!is_leaf_weight) {
1804                        cfqg->dev_weight = v;
1805                        cfqg->new_weight = v ?: cfqgd->weight;
1806                } else {
1807                        cfqg->dev_leaf_weight = v;
1808                        cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
1809                }
1810                ret = 0;
1811        }
1812out_finish:
1813        blkg_conf_finish(&ctx);
1814        return ret ?: nbytes;
1815}
1816
1817static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1818                                      char *buf, size_t nbytes, loff_t off)
1819{
1820        return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
1821}
1822
1823static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1824                                           char *buf, size_t nbytes, loff_t off)
1825{
1826        return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
1827}
1828
1829static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1830                            bool on_dfl, bool reset_dev, bool is_leaf_weight)
1831{
1832        unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1833        unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
1834        struct blkcg *blkcg = css_to_blkcg(css);
1835        struct blkcg_gq *blkg;
1836        struct cfq_group_data *cfqgd;
1837        int ret = 0;
1838
1839        if (val < min || val > max)
1840                return -ERANGE;
1841
1842        spin_lock_irq(&blkcg->lock);
1843        cfqgd = blkcg_to_cfqgd(blkcg);
1844        if (!cfqgd) {
1845                ret = -EINVAL;
1846                goto out;
1847        }
1848
1849        if (!is_leaf_weight)
1850                cfqgd->weight = val;
1851        else
1852                cfqgd->leaf_weight = val;
1853
1854        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1855                struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1856
1857                if (!cfqg)
1858                        continue;
1859
1860                if (!is_leaf_weight) {
1861                        if (reset_dev)
1862                                cfqg->dev_weight = 0;
1863                        if (!cfqg->dev_weight)
1864                                cfqg->new_weight = cfqgd->weight;
1865                } else {
1866                        if (reset_dev)
1867                                cfqg->dev_leaf_weight = 0;
1868                        if (!cfqg->dev_leaf_weight)
1869                                cfqg->new_leaf_weight = cfqgd->leaf_weight;
1870                }
1871        }
1872
1873out:
1874        spin_unlock_irq(&blkcg->lock);
1875        return ret;
1876}
1877
1878static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1879                          u64 val)
1880{
1881        return __cfq_set_weight(css, val, false, false, false);
1882}
1883
1884static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1885                               struct cftype *cft, u64 val)
1886{
1887        return __cfq_set_weight(css, val, false, false, true);
1888}
1889
1890static int cfqg_print_stat(struct seq_file *sf, void *v)
1891{
1892        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1893                          &blkcg_policy_cfq, seq_cft(sf)->private, false);
1894        return 0;
1895}
1896
1897static int cfqg_print_rwstat(struct seq_file *sf, void *v)
1898{
1899        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1900                          &blkcg_policy_cfq, seq_cft(sf)->private, true);
1901        return 0;
1902}
1903
1904static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1905                                      struct blkg_policy_data *pd, int off)
1906{
1907        u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
1908                                          &blkcg_policy_cfq, off);
1909        return __blkg_prfill_u64(sf, pd, sum);
1910}
1911
1912static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1913                                        struct blkg_policy_data *pd, int off)
1914{
1915        struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
1916                                                        &blkcg_policy_cfq, off);
1917        return __blkg_prfill_rwstat(sf, pd, &sum);
1918}
1919
1920static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
1921{
1922        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1923                          cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1924                          seq_cft(sf)->private, false);
1925        return 0;
1926}
1927
1928static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1929{
1930        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1931                          cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1932                          seq_cft(sf)->private, true);
1933        return 0;
1934}
1935
1936static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1937                               int off)
1938{
1939        u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1940
1941        return __blkg_prfill_u64(sf, pd, sum >> 9);
1942}
1943
1944static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
1945{
1946        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1947                          cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
1948        return 0;
1949}
1950
1951static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
1952                                         struct blkg_policy_data *pd, int off)
1953{
1954        struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
1955                                        offsetof(struct blkcg_gq, stat_bytes));
1956        u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
1957                atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
1958
1959        return __blkg_prfill_u64(sf, pd, sum >> 9);
1960}
1961
1962static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1963{
1964        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1965                          cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
1966                          false);
1967        return 0;
1968}
1969
1970#ifdef CONFIG_DEBUG_BLK_CGROUP
1971static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1972                                      struct blkg_policy_data *pd, int off)
1973{
1974        struct cfq_group *cfqg = pd_to_cfqg(pd);
1975        u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1976        u64 v = 0;
1977
1978        if (samples) {
1979                v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1980                v = div64_u64(v, samples);
1981        }
1982        __blkg_prfill_u64(sf, pd, v);
1983        return 0;
1984}
1985
1986/* print avg_queue_size */
1987static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1988{
1989        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1990                          cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
1991                          0, false);
1992        return 0;
1993}
1994#endif  /* CONFIG_DEBUG_BLK_CGROUP */
1995
1996static struct cftype cfq_blkcg_legacy_files[] = {
1997        /* on root, weight is mapped to leaf_weight */
1998        {
1999                .name = "weight_device",
2000                .flags = CFTYPE_ONLY_ON_ROOT,
2001                .seq_show = cfqg_print_leaf_weight_device,
2002                .write = cfqg_set_leaf_weight_device,
2003        },
2004        {
2005                .name = "weight",
2006                .flags = CFTYPE_ONLY_ON_ROOT,
2007                .seq_show = cfq_print_leaf_weight,
2008                .write_u64 = cfq_set_leaf_weight,
2009        },
2010
2011        /* no such mapping necessary for !roots */
2012        {
2013                .name = "weight_device",
2014                .flags = CFTYPE_NOT_ON_ROOT,
2015                .seq_show = cfqg_print_weight_device,
2016                .write = cfqg_set_weight_device,
2017        },
2018        {
2019                .name = "weight",
2020                .flags = CFTYPE_NOT_ON_ROOT,
2021                .seq_show = cfq_print_weight,
2022                .write_u64 = cfq_set_weight,
2023        },
2024
2025        {
2026                .name = "leaf_weight_device",
2027                .seq_show = cfqg_print_leaf_weight_device,
2028                .write = cfqg_set_leaf_weight_device,
2029        },
2030        {
2031                .name = "leaf_weight",
2032                .seq_show = cfq_print_leaf_weight,
2033                .write_u64 = cfq_set_leaf_weight,
2034        },
2035
2036        /* statistics, covers only the tasks in the cfqg */
2037        {
2038                .name = "time",
2039                .private = offsetof(struct cfq_group, stats.time),
2040                .seq_show = cfqg_print_stat,
2041        },
2042        {
2043                .name = "sectors",
2044                .seq_show = cfqg_print_stat_sectors,
2045        },
2046        {
2047                .name = "io_service_bytes",
2048                .private = (unsigned long)&blkcg_policy_cfq,
2049                .seq_show = blkg_print_stat_bytes,
2050        },
2051        {
2052                .name = "io_serviced",
2053                .private = (unsigned long)&blkcg_policy_cfq,
2054                .seq_show = blkg_print_stat_ios,
2055        },
2056        {
2057                .name = "io_service_time",
2058                .private = offsetof(struct cfq_group, stats.service_time),
2059                .seq_show = cfqg_print_rwstat,
2060        },
2061        {
2062                .name = "io_wait_time",
2063                .private = offsetof(struct cfq_group, stats.wait_time),
2064                .seq_show = cfqg_print_rwstat,
2065        },
2066        {
2067                .name = "io_merged",
2068                .private = offsetof(struct cfq_group, stats.merged),
2069                .seq_show = cfqg_print_rwstat,
2070        },
2071        {
2072                .name = "io_queued",
2073                .private = offsetof(struct cfq_group, stats.queued),
2074                .seq_show = cfqg_print_rwstat,
2075        },
2076
2077        /* the same statictics which cover the cfqg and its descendants */
2078        {
2079                .name = "time_recursive",
2080                .private = offsetof(struct cfq_group, stats.time),
2081                .seq_show = cfqg_print_stat_recursive,
2082        },
2083        {
2084                .name = "sectors_recursive",
2085                .seq_show = cfqg_print_stat_sectors_recursive,
2086        },
2087        {
2088                .name = "io_service_bytes_recursive",
2089                .private = (unsigned long)&blkcg_policy_cfq,
2090                .seq_show = blkg_print_stat_bytes_recursive,
2091        },
2092        {
2093                .name = "io_serviced_recursive",
2094                .private = (unsigned long)&blkcg_policy_cfq,
2095                .seq_show = blkg_print_stat_ios_recursive,
2096        },
2097        {
2098                .name = "io_service_time_recursive",
2099                .private = offsetof(struct cfq_group, stats.service_time),
2100                .seq_show = cfqg_print_rwstat_recursive,
2101        },
2102        {
2103                .name = "io_wait_time_recursive",
2104                .private = offsetof(struct cfq_group, stats.wait_time),
2105                .seq_show = cfqg_print_rwstat_recursive,
2106        },
2107        {
2108                .name = "io_merged_recursive",
2109                .private = offsetof(struct cfq_group, stats.merged),
2110                .seq_show = cfqg_print_rwstat_recursive,
2111        },
2112        {
2113                .name = "io_queued_recursive",
2114                .private = offsetof(struct cfq_group, stats.queued),
2115                .seq_show = cfqg_print_rwstat_recursive,
2116        },
2117#ifdef CONFIG_DEBUG_BLK_CGROUP
2118        {
2119                .name = "avg_queue_size",
2120                .seq_show = cfqg_print_avg_queue_size,
2121        },
2122        {
2123                .name = "group_wait_time",
2124                .private = offsetof(struct cfq_group, stats.group_wait_time),
2125                .seq_show = cfqg_print_stat,
2126        },
2127        {
2128                .name = "idle_time",
2129                .private = offsetof(struct cfq_group, stats.idle_time),
2130                .seq_show = cfqg_print_stat,
2131        },
2132        {
2133                .name = "empty_time",
2134                .private = offsetof(struct cfq_group, stats.empty_time),
2135                .seq_show = cfqg_print_stat,
2136        },
2137        {
2138                .name = "dequeue",
2139                .private = offsetof(struct cfq_group, stats.dequeue),
2140                .seq_show = cfqg_print_stat,
2141        },
2142        {
2143                .name = "unaccounted_time",
2144                .private = offsetof(struct cfq_group, stats.unaccounted_time),
2145                .seq_show = cfqg_print_stat,
2146        },
2147#endif  /* CONFIG_DEBUG_BLK_CGROUP */
2148        { }     /* terminate */
2149};
2150
2151static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
2152{
2153        struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2154        struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
2155
2156        seq_printf(sf, "default %u\n", cgd->weight);
2157        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
2158                          &blkcg_policy_cfq, 0, false);
2159        return 0;
2160}
2161
2162static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
2163                                     char *buf, size_t nbytes, loff_t off)
2164{
2165        char *endp;
2166        int ret;
2167        u64 v;
2168
2169        buf = strim(buf);
2170
2171        /* "WEIGHT" or "default WEIGHT" sets the default weight */
2172        v = simple_strtoull(buf, &endp, 0);
2173        if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
2174                ret = __cfq_set_weight(of_css(of), v, true, false, false);
2175                return ret ?: nbytes;
2176        }
2177
2178        /* "MAJ:MIN WEIGHT" */
2179        return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
2180}
2181
2182static struct cftype cfq_blkcg_files[] = {
2183        {
2184                .name = "weight",
2185                .flags = CFTYPE_NOT_ON_ROOT,
2186                .seq_show = cfq_print_weight_on_dfl,
2187                .write = cfq_set_weight_on_dfl,
2188        },
2189        { }     /* terminate */
2190};
2191
2192#else /* GROUP_IOSCHED */
2193static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2194                                         struct blkcg *blkcg)
2195{
2196        return cfqd->root_group;
2197}
2198
2199static inline void
2200cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2201        cfqq->cfqg = cfqg;
2202}
2203
2204#endif /* GROUP_IOSCHED */
2205
2206/*
2207 * The cfqd->service_trees holds all pending cfq_queue's that have
2208 * requests waiting to be processed. It is sorted in the order that
2209 * we will service the queues.
2210 */
2211static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2212                                 bool add_front)
2213{
2214        struct rb_node **p, *parent;
2215        struct cfq_queue *__cfqq;
2216        unsigned long rb_key;
2217        struct cfq_rb_root *st;
2218        int left;
2219        int new_cfqq = 1;
2220
2221        st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2222        if (cfq_class_idle(cfqq)) {
2223                rb_key = CFQ_IDLE_DELAY;
2224                parent = rb_last(&st->rb);
2225                if (parent && parent != &cfqq->rb_node) {
2226                        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2227                        rb_key += __cfqq->rb_key;
2228                } else
2229                        rb_key += jiffies;
2230        } else if (!add_front) {
2231                /*
2232                 * Get our rb key offset. Subtract any residual slice
2233                 * value carried from last service. A negative resid
2234                 * count indicates slice overrun, and this should position
2235                 * the next service time further away in the tree.
2236                 */
2237                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
2238                rb_key -= cfqq->slice_resid;
2239                cfqq->slice_resid = 0;
2240        } else {
2241                rb_key = -HZ;
2242                __cfqq = cfq_rb_first(st);
2243                rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2244        }
2245
2246        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2247                new_cfqq = 0;
2248                /*
2249                 * same position, nothing more to do
2250                 */
2251                if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2252                        return;
2253
2254                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2255                cfqq->service_tree = NULL;
2256        }
2257
2258        left = 1;
2259        parent = NULL;
2260        cfqq->service_tree = st;
2261        p = &st->rb.rb_node;
2262        while (*p) {
2263                parent = *p;
2264                __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2265
2266                /*
2267                 * sort by key, that represents service time.
2268                 */
2269                if (time_before(rb_key, __cfqq->rb_key))
2270                        p = &parent->rb_left;
2271                else {
2272                        p = &parent->rb_right;
2273                        left = 0;
2274                }
2275        }
2276
2277        if (left)
2278                st->left = &cfqq->rb_node;
2279
2280        cfqq->rb_key = rb_key;
2281        rb_link_node(&cfqq->rb_node, parent, p);
2282        rb_insert_color(&cfqq->rb_node, &st->rb);
2283        st->count++;
2284        if (add_front || !new_cfqq)
2285                return;
2286        cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2287}
2288
2289static struct cfq_queue *
2290cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2291                     sector_t sector, struct rb_node **ret_parent,
2292                     struct rb_node ***rb_link)
2293{
2294        struct rb_node **p, *parent;
2295        struct cfq_queue *cfqq = NULL;
2296
2297        parent = NULL;
2298        p = &root->rb_node;
2299        while (*p) {
2300                struct rb_node **n;
2301
2302                parent = *p;
2303                cfqq = rb_entry(parent, struct cfq_queue, p_node);
2304
2305                /*
2306                 * Sort strictly based on sector.  Smallest to the left,
2307                 * largest to the right.
2308                 */
2309                if (sector > blk_rq_pos(cfqq->next_rq))
2310                        n = &(*p)->rb_right;
2311                else if (sector < blk_rq_pos(cfqq->next_rq))
2312                        n = &(*p)->rb_left;
2313                else
2314                        break;
2315                p = n;
2316                cfqq = NULL;
2317        }
2318
2319        *ret_parent = parent;
2320        if (rb_link)
2321                *rb_link = p;
2322        return cfqq;
2323}
2324
2325static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2326{
2327        struct rb_node **p, *parent;
2328        struct cfq_queue *__cfqq;
2329
2330        if (cfqq->p_root) {
2331                rb_erase(&cfqq->p_node, cfqq->p_root);
2332                cfqq->p_root = NULL;
2333        }
2334
2335        if (cfq_class_idle(cfqq))
2336                return;
2337        if (!cfqq->next_rq)
2338                return;
2339
2340        cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2341        __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2342                                      blk_rq_pos(cfqq->next_rq), &parent, &p);
2343        if (!__cfqq) {
2344                rb_link_node(&cfqq->p_node, parent, p);
2345                rb_insert_color(&cfqq->p_node, cfqq->p_root);
2346        } else
2347                cfqq->p_root = NULL;
2348}
2349
2350/*
2351 * Update cfqq's position in the service tree.
2352 */
2353static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2354{
2355        /*
2356         * Resorting requires the cfqq to be on the RR list already.
2357         */
2358        if (cfq_cfqq_on_rr(cfqq)) {
2359                cfq_service_tree_add(cfqd, cfqq, 0);
2360                cfq_prio_tree_add(cfqd, cfqq);
2361        }
2362}
2363
2364/*
2365 * add to busy list of queues for service, trying to be fair in ordering
2366 * the pending list according to last request service
2367 */
2368static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2369{
2370        cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2371        BUG_ON(cfq_cfqq_on_rr(cfqq));
2372        cfq_mark_cfqq_on_rr(cfqq);
2373        cfqd->busy_queues++;
2374        if (cfq_cfqq_sync(cfqq))
2375                cfqd->busy_sync_queues++;
2376
2377        cfq_resort_rr_list(cfqd, cfqq);
2378}
2379
2380/*
2381 * Called when the cfqq no longer has requests pending, remove it from
2382 * the service tree.
2383 */
2384static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2385{
2386        cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2387        BUG_ON(!cfq_cfqq_on_rr(cfqq));
2388        cfq_clear_cfqq_on_rr(cfqq);
2389
2390        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2391                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2392                cfqq->service_tree = NULL;
2393        }
2394        if (cfqq->p_root) {
2395                rb_erase(&cfqq->p_node, cfqq->p_root);
2396                cfqq->p_root = NULL;
2397        }
2398
2399        cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2400        BUG_ON(!cfqd->busy_queues);
2401        cfqd->busy_queues--;
2402        if (cfq_cfqq_sync(cfqq))
2403                cfqd->busy_sync_queues--;
2404}
2405
2406/*
2407 * rb tree support functions
2408 */
2409static void cfq_del_rq_rb(struct request *rq)
2410{
2411        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2412        const int sync = rq_is_sync(rq);
2413
2414        BUG_ON(!cfqq->queued[sync]);
2415        cfqq->queued[sync]--;
2416
2417        elv_rb_del(&cfqq->sort_list, rq);
2418
2419        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2420                /*
2421                 * Queue will be deleted from service tree when we actually
2422                 * expire it later. Right now just remove it from prio tree
2423                 * as it is empty.
2424                 */
2425                if (cfqq->p_root) {
2426                        rb_erase(&cfqq->p_node, cfqq->p_root);
2427                        cfqq->p_root = NULL;
2428                }
2429        }
2430}
2431
2432static void cfq_add_rq_rb(struct request *rq)
2433{
2434        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2435        struct cfq_data *cfqd = cfqq->cfqd;
2436        struct request *prev;
2437
2438        cfqq->queued[rq_is_sync(rq)]++;
2439
2440        elv_rb_add(&cfqq->sort_list, rq);
2441
2442        if (!cfq_cfqq_on_rr(cfqq))
2443                cfq_add_cfqq_rr(cfqd, cfqq);
2444
2445        /*
2446         * check if this request is a better next-serve candidate
2447         */
2448        prev = cfqq->next_rq;
2449        cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2450
2451        /*
2452         * adjust priority tree position, if ->next_rq changes
2453         */
2454        if (prev != cfqq->next_rq)
2455                cfq_prio_tree_add(cfqd, cfqq);
2456
2457        BUG_ON(!cfqq->next_rq);
2458}
2459
2460static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2461{
2462        elv_rb_del(&cfqq->sort_list, rq);
2463        cfqq->queued[rq_is_sync(rq)]--;
2464        cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2465        cfq_add_rq_rb(rq);
2466        cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2467                                 rq->cmd_flags);
2468}
2469
2470static struct request *
2471cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2472{
2473        struct task_struct *tsk = current;
2474        struct cfq_io_cq *cic;
2475        struct cfq_queue *cfqq;
2476
2477        cic = cfq_cic_lookup(cfqd, tsk->io_context);
2478        if (!cic)
2479                return NULL;
2480
2481        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2482        if (cfqq)
2483                return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2484
2485        return NULL;
2486}
2487
2488static void cfq_activate_request(struct request_queue *q, struct request *rq)
2489{
2490        struct cfq_data *cfqd = q->elevator->elevator_data;
2491
2492        cfqd->rq_in_driver++;
2493        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2494                                                cfqd->rq_in_driver);
2495
2496        cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2497}
2498
2499static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2500{
2501        struct cfq_data *cfqd = q->elevator->elevator_data;
2502
2503        WARN_ON(!cfqd->rq_in_driver);
2504        cfqd->rq_in_driver--;
2505        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2506                                                cfqd->rq_in_driver);
2507}
2508
2509static void cfq_remove_request(struct request *rq)
2510{
2511        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2512
2513        if (cfqq->next_rq == rq)
2514                cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2515
2516        list_del_init(&rq->queuelist);
2517        cfq_del_rq_rb(rq);
2518
2519        cfqq->cfqd->rq_queued--;
2520        cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2521        if (rq->cmd_flags & REQ_PRIO) {
2522                WARN_ON(!cfqq->prio_pending);
2523                cfqq->prio_pending--;
2524        }
2525}
2526
2527static int cfq_merge(struct request_queue *q, struct request **req,
2528                     struct bio *bio)
2529{
2530        struct cfq_data *cfqd = q->elevator->elevator_data;
2531        struct request *__rq;
2532
2533        __rq = cfq_find_rq_fmerge(cfqd, bio);
2534        if (__rq && elv_rq_merge_ok(__rq, bio)) {
2535                *req = __rq;
2536                return ELEVATOR_FRONT_MERGE;
2537        }
2538
2539        return ELEVATOR_NO_MERGE;
2540}
2541
2542static void cfq_merged_request(struct request_queue *q, struct request *req,
2543                               int type)
2544{
2545        if (type == ELEVATOR_FRONT_MERGE) {
2546                struct cfq_queue *cfqq = RQ_CFQQ(req);
2547
2548                cfq_reposition_rq_rb(cfqq, req);
2549        }
2550}
2551
2552static void cfq_bio_merged(struct request_queue *q, struct request *req,
2553                                struct bio *bio)
2554{
2555        cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
2556}
2557
2558static void
2559cfq_merged_requests(struct request_queue *q, struct request *rq,
2560                    struct request *next)
2561{
2562        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2563        struct cfq_data *cfqd = q->elevator->elevator_data;
2564
2565        /*
2566         * reposition in fifo if next is older than rq
2567         */
2568        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2569            time_before(next->fifo_time, rq->fifo_time) &&
2570            cfqq == RQ_CFQQ(next)) {
2571                list_move(&rq->queuelist, &next->queuelist);
2572                rq->fifo_time = next->fifo_time;
2573        }
2574
2575        if (cfqq->next_rq == next)
2576                cfqq->next_rq = rq;
2577        cfq_remove_request(next);
2578        cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2579
2580        cfqq = RQ_CFQQ(next);
2581        /*
2582         * all requests of this queue are merged to other queues, delete it
2583         * from the service tree. If it's the active_queue,
2584         * cfq_dispatch_requests() will choose to expire it or do idle
2585         */
2586        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2587            cfqq != cfqd->active_queue)
2588                cfq_del_cfqq_rr(cfqd, cfqq);
2589}
2590
2591static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2592                           struct bio *bio)
2593{
2594        struct cfq_data *cfqd = q->elevator->elevator_data;
2595        struct cfq_io_cq *cic;
2596        struct cfq_queue *cfqq;
2597
2598        /*
2599         * Disallow merge of a sync bio into an async request.
2600         */
2601        if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2602                return false;
2603
2604        /*
2605         * Lookup the cfqq that this bio will be queued with and allow
2606         * merge only if rq is queued there.
2607         */
2608        cic = cfq_cic_lookup(cfqd, current->io_context);
2609        if (!cic)
2610                return false;
2611
2612        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2613        return cfqq == RQ_CFQQ(rq);
2614}
2615
2616static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2617{
2618        del_timer(&cfqd->idle_slice_timer);
2619        cfqg_stats_update_idle_time(cfqq->cfqg);
2620}
2621
2622static void __cfq_set_active_queue(struct cfq_data *cfqd,
2623                                   struct cfq_queue *cfqq)
2624{
2625        if (cfqq) {
2626                cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2627                                cfqd->serving_wl_class, cfqd->serving_wl_type);
2628                cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2629                cfqq->slice_start = 0;
2630                cfqq->dispatch_start = jiffies;
2631                cfqq->allocated_slice = 0;
2632                cfqq->slice_end = 0;
2633                cfqq->slice_dispatch = 0;
2634                cfqq->nr_sectors = 0;
2635
2636                cfq_clear_cfqq_wait_request(cfqq);
2637                cfq_clear_cfqq_must_dispatch(cfqq);
2638                cfq_clear_cfqq_must_alloc_slice(cfqq);
2639                cfq_clear_cfqq_fifo_expire(cfqq);
2640                cfq_mark_cfqq_slice_new(cfqq);
2641
2642                cfq_del_timer(cfqd, cfqq);
2643        }
2644
2645        cfqd->active_queue = cfqq;
2646}
2647
2648/*
2649 * current cfqq expired its slice (or was too idle), select new one
2650 */
2651static void
2652__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2653                    bool timed_out)
2654{
2655        cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2656
2657        if (cfq_cfqq_wait_request(cfqq))
2658                cfq_del_timer(cfqd, cfqq);
2659
2660        cfq_clear_cfqq_wait_request(cfqq);
2661        cfq_clear_cfqq_wait_busy(cfqq);
2662
2663        /*
2664         * If this cfqq is shared between multiple processes, check to
2665         * make sure that those processes are still issuing I/Os within
2666         * the mean seek distance.  If not, it may be time to break the
2667         * queues apart again.
2668         */
2669        if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2670                cfq_mark_cfqq_split_coop(cfqq);
2671
2672        /*
2673         * store what was left of this slice, if the queue idled/timed out
2674         */
2675        if (timed_out) {
2676                if (cfq_cfqq_slice_new(cfqq))
2677                        cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2678                else
2679                        cfqq->slice_resid = cfqq->slice_end - jiffies;
2680                cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2681        }
2682
2683        cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2684
2685        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2686                cfq_del_cfqq_rr(cfqd, cfqq);
2687
2688        cfq_resort_rr_list(cfqd, cfqq);
2689
2690        if (cfqq == cfqd->active_queue)
2691                cfqd->active_queue = NULL;
2692
2693        if (cfqd->active_cic) {
2694                put_io_context(cfqd->active_cic->icq.ioc);
2695                cfqd->active_cic = NULL;
2696        }
2697}
2698
2699static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2700{
2701        struct cfq_queue *cfqq = cfqd->active_queue;
2702
2703        if (cfqq)
2704                __cfq_slice_expired(cfqd, cfqq, timed_out);
2705}
2706
2707/*
2708 * Get next queue for service. Unless we have a queue preemption,
2709 * we'll simply select the first cfqq in the service tree.
2710 */
2711static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2712{
2713        struct cfq_rb_root *st = st_for(cfqd->serving_group,
2714                        cfqd->serving_wl_class, cfqd->serving_wl_type);
2715
2716        if (!cfqd->rq_queued)
2717                return NULL;
2718
2719        /* There is nothing to dispatch */
2720        if (!st)
2721                return NULL;
2722        if (RB_EMPTY_ROOT(&st->rb))
2723                return NULL;
2724        return cfq_rb_first(st);
2725}
2726
2727static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2728{
2729        struct cfq_group *cfqg;
2730        struct cfq_queue *cfqq;
2731        int i, j;
2732        struct cfq_rb_root *st;
2733
2734        if (!cfqd->rq_queued)
2735                return NULL;
2736
2737        cfqg = cfq_get_next_cfqg(cfqd);
2738        if (!cfqg)
2739                return NULL;
2740
2741        for_each_cfqg_st(cfqg, i, j, st)
2742                if ((cfqq = cfq_rb_first(st)) != NULL)
2743                        return cfqq;
2744        return NULL;
2745}
2746
2747/*
2748 * Get and set a new active queue for service.
2749 */
2750static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2751                                              struct cfq_queue *cfqq)
2752{
2753        if (!cfqq)
2754                cfqq = cfq_get_next_queue(cfqd);
2755
2756        __cfq_set_active_queue(cfqd, cfqq);
2757        return cfqq;
2758}
2759
2760static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2761                                          struct request *rq)
2762{
2763        if (blk_rq_pos(rq) >= cfqd->last_position)
2764                return blk_rq_pos(rq) - cfqd->last_position;
2765        else
2766                return cfqd->last_position - blk_rq_pos(rq);
2767}
2768
2769static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2770                               struct request *rq)
2771{
2772        return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2773}
2774
2775static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2776                                    struct cfq_queue *cur_cfqq)
2777{
2778        struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2779        struct rb_node *parent, *node;
2780        struct cfq_queue *__cfqq;
2781        sector_t sector = cfqd->last_position;
2782
2783        if (RB_EMPTY_ROOT(root))
2784                return NULL;
2785
2786        /*
2787         * First, if we find a request starting at the end of the last
2788         * request, choose it.
2789         */
2790        __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2791        if (__cfqq)
2792                return __cfqq;
2793
2794        /*
2795         * If the exact sector wasn't found, the parent of the NULL leaf
2796         * will contain the closest sector.
2797         */
2798        __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2799        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2800                return __cfqq;
2801
2802        if (blk_rq_pos(__cfqq->next_rq) < sector)
2803                node = rb_next(&__cfqq->p_node);
2804        else
2805                node = rb_prev(&__cfqq->p_node);
2806        if (!node)
2807                return NULL;
2808
2809        __cfqq = rb_entry(node, struct cfq_queue, p_node);
2810        if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2811                return __cfqq;
2812
2813        return NULL;
2814}
2815
2816/*
2817 * cfqd - obvious
2818 * cur_cfqq - passed in so that we don't decide that the current queue is
2819 *            closely cooperating with itself.
2820 *
2821 * So, basically we're assuming that that cur_cfqq has dispatched at least
2822 * one request, and that cfqd->last_position reflects a position on the disk
2823 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2824 * assumption.
2825 */
2826static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2827                                              struct cfq_queue *cur_cfqq)
2828{
2829        struct cfq_queue *cfqq;
2830
2831        if (cfq_class_idle(cur_cfqq))
2832                return NULL;
2833        if (!cfq_cfqq_sync(cur_cfqq))
2834                return NULL;
2835        if (CFQQ_SEEKY(cur_cfqq))
2836                return NULL;
2837
2838        /*
2839         * Don't search priority tree if it's the only queue in the group.
2840         */
2841        if (cur_cfqq->cfqg->nr_cfqq == 1)
2842                return NULL;
2843
2844        /*
2845         * We should notice if some of the queues are cooperating, eg
2846         * working closely on the same area of the disk. In that case,
2847         * we can group them together and don't waste time idling.
2848         */
2849        cfqq = cfqq_close(cfqd, cur_cfqq);
2850        if (!cfqq)
2851                return NULL;
2852
2853        /* If new queue belongs to different cfq_group, don't choose it */
2854        if (cur_cfqq->cfqg != cfqq->cfqg)
2855                return NULL;
2856
2857        /*
2858         * It only makes sense to merge sync queues.
2859         */
2860        if (!cfq_cfqq_sync(cfqq))
2861                return NULL;
2862        if (CFQQ_SEEKY(cfqq))
2863                return NULL;
2864
2865        /*
2866         * Do not merge queues of different priority classes
2867         */
2868        if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2869                return NULL;
2870
2871        return cfqq;
2872}
2873
2874/*
2875 * Determine whether we should enforce idle window for this queue.
2876 */
2877
2878static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2879{
2880        enum wl_class_t wl_class = cfqq_class(cfqq);
2881        struct cfq_rb_root *st = cfqq->service_tree;
2882
2883        BUG_ON(!st);
2884        BUG_ON(!st->count);
2885
2886        if (!cfqd->cfq_slice_idle)
2887                return false;
2888
2889        /* We never do for idle class queues. */
2890        if (wl_class == IDLE_WORKLOAD)
2891                return false;
2892
2893        /* We do for queues that were marked with idle window flag. */
2894        if (cfq_cfqq_idle_window(cfqq) &&
2895           !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2896                return true;
2897
2898        /*
2899         * Otherwise, we do only if they are the last ones
2900         * in their service tree.
2901         */
2902        if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2903           !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2904                return true;
2905        cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2906        return false;
2907}
2908
2909static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2910{
2911        struct cfq_queue *cfqq = cfqd->active_queue;
2912        struct cfq_rb_root *st = cfqq->service_tree;
2913        struct cfq_io_cq *cic;
2914        unsigned long sl, group_idle = 0;
2915
2916        /*
2917         * SSD device without seek penalty, disable idling. But only do so
2918         * for devices that support queuing, otherwise we still have a problem
2919         * with sync vs async workloads.
2920         */
2921        if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2922                return;
2923
2924        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2925        WARN_ON(cfq_cfqq_slice_new(cfqq));
2926
2927        /*
2928         * idle is disabled, either manually or by past process history
2929         */
2930        if (!cfq_should_idle(cfqd, cfqq)) {
2931                /* no queue idling. Check for group idling */
2932                if (cfqd->cfq_group_idle)
2933                        group_idle = cfqd->cfq_group_idle;
2934                else
2935                        return;
2936        }
2937
2938        /*
2939         * still active requests from this queue, don't idle
2940         */
2941        if (cfqq->dispatched)
2942                return;
2943
2944        /*
2945         * task has exited, don't wait
2946         */
2947        cic = cfqd->active_cic;
2948        if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2949                return;
2950
2951        /*
2952         * If our average think time is larger than the remaining time
2953         * slice, then don't idle. This avoids overrunning the allotted
2954         * time slice.
2955         */
2956        if (sample_valid(cic->ttime.ttime_samples) &&
2957            (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2958                cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2959                             cic->ttime.ttime_mean);
2960                return;
2961        }
2962
2963        /*
2964         * There are other queues in the group or this is the only group and
2965         * it has too big thinktime, don't do group idle.
2966         */
2967        if (group_idle &&
2968            (cfqq->cfqg->nr_cfqq > 1 ||
2969             cfq_io_thinktime_big(cfqd, &st->ttime, true)))
2970                return;
2971
2972        cfq_mark_cfqq_wait_request(cfqq);
2973
2974        if (group_idle)
2975                sl = cfqd->cfq_group_idle;
2976        else
2977                sl = cfqd->cfq_slice_idle;
2978
2979        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2980        cfqg_stats_set_start_idle_time(cfqq->cfqg);
2981        cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2982                        group_idle ? 1 : 0);
2983}
2984
2985/*
2986 * Move request from internal lists to the request queue dispatch list.
2987 */
2988static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2989{
2990        struct cfq_data *cfqd = q->elevator->elevator_data;
2991        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2992
2993        cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2994
2995        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2996        cfq_remove_request(rq);
2997        cfqq->dispatched++;
2998        (RQ_CFQG(rq))->dispatched++;
2999        elv_dispatch_sort(q, rq);
3000
3001        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
3002        cfqq->nr_sectors += blk_rq_sectors(rq);
3003}
3004
3005/*
3006 * return expired entry, or NULL to just start from scratch in rbtree
3007 */
3008static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
3009{
3010        struct request *rq = NULL;
3011
3012        if (cfq_cfqq_fifo_expire(cfqq))
3013                return NULL;
3014
3015        cfq_mark_cfqq_fifo_expire(cfqq);
3016
3017        if (list_empty(&cfqq->fifo))
3018                return NULL;
3019
3020        rq = rq_entry_fifo(cfqq->fifo.next);
3021        if (time_before(jiffies, rq->fifo_time))
3022                rq = NULL;
3023
3024        cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
3025        return rq;
3026}
3027
3028static inline int
3029cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3030{
3031        const int base_rq = cfqd->cfq_slice_async_rq;
3032
3033        WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
3034
3035        return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
3036}
3037
3038/*
3039 * Must be called with the queue_lock held.
3040 */
3041static int cfqq_process_refs(struct cfq_queue *cfqq)
3042{
3043        int process_refs, io_refs;
3044
3045        io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
3046        process_refs = cfqq->ref - io_refs;
3047        BUG_ON(process_refs < 0);
3048        return process_refs;
3049}
3050
3051static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
3052{
3053        int process_refs, new_process_refs;
3054        struct cfq_queue *__cfqq;
3055
3056        /*
3057         * If there are no process references on the new_cfqq, then it is
3058         * unsafe to follow the ->new_cfqq chain as other cfqq's in the
3059         * chain may have dropped their last reference (not just their
3060         * last process reference).
3061         */
3062        if (!cfqq_process_refs(new_cfqq))
3063                return;
3064
3065        /* Avoid a circular list and skip interim queue merges */
3066        while ((__cfqq = new_cfqq->new_cfqq)) {
3067                if (__cfqq == cfqq)
3068                        return;
3069                new_cfqq = __cfqq;
3070        }
3071
3072        process_refs = cfqq_process_refs(cfqq);
3073        new_process_refs = cfqq_process_refs(new_cfqq);
3074        /*
3075         * If the process for the cfqq has gone away, there is no
3076         * sense in merging the queues.
3077         */
3078        if (process_refs == 0 || new_process_refs == 0)
3079                return;
3080
3081        /*
3082         * Merge in the direction of the lesser amount of work.
3083         */
3084        if (new_process_refs >= process_refs) {
3085                cfqq->new_cfqq = new_cfqq;
3086                new_cfqq->ref += process_refs;
3087        } else {
3088                new_cfqq->new_cfqq = cfqq;
3089                cfqq->ref += new_process_refs;
3090        }
3091}
3092
3093static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
3094                        struct cfq_group *cfqg, enum wl_class_t wl_class)
3095{
3096        struct cfq_queue *queue;
3097        int i;
3098        bool key_valid = false;
3099        unsigned long lowest_key = 0;
3100        enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
3101
3102        for (i = 0; i <= SYNC_WORKLOAD; ++i) {
3103                /* select the one with lowest rb_key */
3104                queue = cfq_rb_first(st_for(cfqg, wl_class, i));
3105                if (queue &&
3106                    (!key_valid || time_before(queue->rb_key, lowest_key))) {
3107                        lowest_key = queue->rb_key;
3108                        cur_best = i;
3109                        key_valid = true;
3110                }
3111        }
3112
3113        return cur_best;
3114}
3115
3116static void
3117choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
3118{
3119        unsigned slice;
3120        unsigned count;
3121        struct cfq_rb_root *st;
3122        unsigned group_slice;
3123        enum wl_class_t original_class = cfqd->serving_wl_class;
3124
3125        /* Choose next priority. RT > BE > IDLE */
3126        if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
3127                cfqd->serving_wl_class = RT_WORKLOAD;
3128        else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
3129                cfqd->serving_wl_class = BE_WORKLOAD;
3130        else {
3131                cfqd->serving_wl_class = IDLE_WORKLOAD;
3132                cfqd->workload_expires = jiffies + 1;
3133                return;
3134        }
3135
3136        if (original_class != cfqd->serving_wl_class)
3137                goto new_workload;
3138
3139        /*
3140         * For RT and BE, we have to choose also the type
3141         * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3142         * expiration time
3143         */
3144        st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3145        count = st->count;
3146
3147        /*
3148         * check workload expiration, and that we still have other queues ready
3149         */
3150        if (count && !time_after(jiffies, cfqd->workload_expires))
3151                return;
3152
3153new_workload:
3154        /* otherwise select new workload type */
3155        cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
3156                                        cfqd->serving_wl_class);
3157        st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3158        count = st->count;
3159
3160        /*
3161         * the workload slice is computed as a fraction of target latency
3162         * proportional to the number of queues in that workload, over
3163         * all the queues in the same priority class
3164         */
3165        group_slice = cfq_group_slice(cfqd, cfqg);
3166
3167        slice = group_slice * count /
3168                max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3169                      cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
3170                                        cfqg));
3171
3172        if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
3173                unsigned int tmp;
3174
3175                /*
3176                 * Async queues are currently system wide. Just taking
3177                 * proportion of queues with-in same group will lead to higher
3178                 * async ratio system wide as generally root group is going
3179                 * to have higher weight. A more accurate thing would be to
3180                 * calculate system wide asnc/sync ratio.
3181                 */
3182                tmp = cfqd->cfq_target_latency *
3183                        cfqg_busy_async_queues(cfqd, cfqg);
3184                tmp = tmp/cfqd->busy_queues;
3185                slice = min_t(unsigned, slice, tmp);
3186
3187                /* async workload slice is scaled down according to
3188                 * the sync/async slice ratio. */
3189                slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
3190        } else
3191                /* sync workload slice is at least 2 * cfq_slice_idle */
3192                slice = max(slice, 2 * cfqd->cfq_slice_idle);
3193
3194        slice = max_t(unsigned, slice, CFQ_MIN_TT);
3195        cfq_log(cfqd, "workload slice:%d", slice);
3196        cfqd->workload_expires = jiffies + slice;
3197}
3198
3199static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3200{
3201        struct cfq_rb_root *st = &cfqd->grp_service_tree;
3202        struct cfq_group *cfqg;
3203
3204        if (RB_EMPTY_ROOT(&st->rb))
3205                return NULL;
3206        cfqg = cfq_rb_first_group(st);
3207        update_min_vdisktime(st);
3208        return cfqg;
3209}
3210
3211static void cfq_choose_cfqg(struct cfq_data *cfqd)
3212{
3213        struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3214
3215        cfqd->serving_group = cfqg;
3216
3217        /* Restore the workload type data */
3218        if (cfqg->saved_wl_slice) {
3219                cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3220                cfqd->serving_wl_type = cfqg->saved_wl_type;
3221                cfqd->serving_wl_class = cfqg->saved_wl_class;
3222        } else
3223                cfqd->workload_expires = jiffies - 1;
3224
3225        choose_wl_class_and_type(cfqd, cfqg);
3226}
3227
3228/*
3229 * Select a queue for service. If we have a current active queue,
3230 * check whether to continue servicing it, or retrieve and set a new one.
3231 */
3232static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3233{
3234        struct cfq_queue *cfqq, *new_cfqq = NULL;
3235
3236        cfqq = cfqd->active_queue;
3237        if (!cfqq)
3238                goto new_queue;
3239
3240        if (!cfqd->rq_queued)
3241                return NULL;
3242
3243        /*
3244         * We were waiting for group to get backlogged. Expire the queue
3245         */
3246        if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3247                goto expire;
3248
3249        /*
3250         * The active queue has run out of time, expire it and select new.
3251         */
3252        if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3253                /*
3254                 * If slice had not expired at the completion of last request
3255                 * we might not have turned on wait_busy flag. Don't expire
3256                 * the queue yet. Allow the group to get backlogged.
3257                 *
3258                 * The very fact that we have used the slice, that means we
3259                 * have been idling all along on this queue and it should be
3260                 * ok to wait for this request to complete.
3261                 */
3262                if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3263                    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3264                        cfqq = NULL;
3265                        goto keep_queue;
3266                } else
3267                        goto check_group_idle;
3268        }
3269
3270        /*
3271         * The active queue has requests and isn't expired, allow it to
3272         * dispatch.
3273         */
3274        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3275                goto keep_queue;
3276
3277        /*
3278         * If another queue has a request waiting within our mean seek
3279         * distance, let it run.  The expire code will check for close
3280         * cooperators and put the close queue at the front of the service
3281         * tree.  If possible, merge the expiring queue with the new cfqq.
3282         */
3283        new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3284        if (new_cfqq) {
3285                if (!cfqq->new_cfqq)
3286                        cfq_setup_merge(cfqq, new_cfqq);
3287                goto expire;
3288        }
3289
3290        /*
3291         * No requests pending. If the active queue still has requests in
3292         * flight or is idling for a new request, allow either of these
3293         * conditions to happen (or time out) before selecting a new queue.
3294         */
3295        if (timer_pending(&cfqd->idle_slice_timer)) {
3296                cfqq = NULL;
3297                goto keep_queue;
3298        }
3299
3300        /*
3301         * This is a deep seek queue, but the device is much faster than
3302         * the queue can deliver, don't idle
3303         **/
3304        if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3305            (cfq_cfqq_slice_new(cfqq) ||
3306            (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3307                cfq_clear_cfqq_deep(cfqq);
3308                cfq_clear_cfqq_idle_window(cfqq);
3309        }
3310
3311        if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3312                cfqq = NULL;
3313                goto keep_queue;
3314        }
3315
3316        /*
3317         * If group idle is enabled and there are requests dispatched from
3318         * this group, wait for requests to complete.
3319         */
3320check_group_idle:
3321        if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3322            cfqq->cfqg->dispatched &&
3323            !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3324                cfqq = NULL;
3325                goto keep_queue;
3326        }
3327
3328expire:
3329        cfq_slice_expired(cfqd, 0);
3330new_queue:
3331        /*
3332         * Current queue expired. Check if we have to switch to a new
3333         * service tree
3334         */
3335        if (!new_cfqq)
3336                cfq_choose_cfqg(cfqd);
3337
3338        cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3339keep_queue:
3340        return cfqq;
3341}
3342
3343static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3344{
3345        int dispatched = 0;
3346
3347        while (cfqq->next_rq) {
3348                cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3349                dispatched++;
3350        }
3351
3352        BUG_ON(!list_empty(&cfqq->fifo));
3353
3354        /* By default cfqq is not expired if it is empty. Do it explicitly */
3355        __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3356        return dispatched;
3357}
3358
3359/*
3360 * Drain our current requests. Used for barriers and when switching
3361 * io schedulers on-the-fly.
3362 */
3363static int cfq_forced_dispatch(struct cfq_data *cfqd)
3364{
3365        struct cfq_queue *cfqq;
3366        int dispatched = 0;
3367
3368        /* Expire the timeslice of the current active queue first */
3369        cfq_slice_expired(cfqd, 0);
3370        while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3371                __cfq_set_active_queue(cfqd, cfqq);
3372                dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3373        }
3374
3375        BUG_ON(cfqd->busy_queues);
3376
3377        cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3378        return dispatched;
3379}
3380
3381static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3382        struct cfq_queue *cfqq)
3383{
3384        /* the queue hasn't finished any request, can't estimate */
3385        if (cfq_cfqq_slice_new(cfqq))
3386                return true;
3387        if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3388                cfqq->slice_end))
3389                return true;
3390
3391        return false;
3392}
3393
3394static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3395{
3396        unsigned int max_dispatch;
3397
3398        /*
3399         * Drain async requests before we start sync IO
3400         */
3401        if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3402                return false;
3403
3404        /*
3405         * If this is an async queue and we have sync IO in flight, let it wait
3406         */
3407        if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3408                return false;
3409
3410        max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3411        if (cfq_class_idle(cfqq))
3412                max_dispatch = 1;
3413
3414        /*
3415         * Does this cfqq already have too much IO in flight?
3416         */
3417        if (cfqq->dispatched >= max_dispatch) {
3418                bool promote_sync = false;
3419                /*
3420                 * idle queue must always only have a single IO in flight
3421                 */
3422                if (cfq_class_idle(cfqq))
3423                        return false;
3424
3425                /*
3426                 * If there is only one sync queue
3427                 * we can ignore async queue here and give the sync
3428                 * queue no dispatch limit. The reason is a sync queue can
3429                 * preempt async queue, limiting the sync queue doesn't make
3430                 * sense. This is useful for aiostress test.
3431                 */
3432                if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3433                        promote_sync = true;
3434
3435                /*
3436                 * We have other queues, don't allow more IO from this one
3437                 */
3438                if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3439                                !promote_sync)
3440                        return false;
3441
3442                /*
3443                 * Sole queue user, no limit
3444                 */
3445                if (cfqd->busy_queues == 1 || promote_sync)
3446                        max_dispatch = -1;
3447                else
3448                        /*
3449                         * Normally we start throttling cfqq when cfq_quantum/2
3450                         * requests have been dispatched. But we can drive
3451                         * deeper queue depths at the beginning of slice
3452                         * subjected to upper limit of cfq_quantum.
3453                         * */
3454                        max_dispatch = cfqd->cfq_quantum;
3455        }
3456
3457        /*
3458         * Async queues must wait a bit before being allowed dispatch.
3459         * We also ramp up the dispatch depth gradually for async IO,
3460         * based on the last sync IO we serviced
3461         */
3462        if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3463                unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
3464                unsigned int depth;
3465
3466                depth = last_sync / cfqd->cfq_slice[1];
3467                if (!depth && !cfqq->dispatched)
3468                        depth = 1;
3469                if (depth < max_dispatch)
3470                        max_dispatch = depth;
3471        }
3472
3473        /*
3474         * If we're below the current max, allow a dispatch
3475         */
3476        return cfqq->dispatched < max_dispatch;
3477}
3478
3479/*
3480 * Dispatch a request from cfqq, moving them to the request queue
3481 * dispatch list.
3482 */
3483static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3484{
3485        struct request *rq;
3486
3487        BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3488
3489        if (!cfq_may_dispatch(cfqd, cfqq))
3490                return false;
3491
3492        /*
3493         * follow expired path, else get first next available
3494         */
3495        rq = cfq_check_fifo(cfqq);
3496        if (!rq)
3497                rq = cfqq->next_rq;
3498
3499        /*
3500         * insert request into driver dispatch list
3501         */
3502        cfq_dispatch_insert(cfqd->queue, rq);
3503
3504        if (!cfqd->active_cic) {
3505                struct cfq_io_cq *cic = RQ_CIC(rq);
3506
3507                atomic_long_inc(&cic->icq.ioc->refcount);
3508                cfqd->active_cic = cic;
3509        }
3510
3511        return true;
3512}
3513
3514/*
3515 * Find the cfqq that we need to service and move a request from that to the
3516 * dispatch list
3517 */
3518static int cfq_dispatch_requests(struct request_queue *q, int force)
3519{
3520        struct cfq_data *cfqd = q->elevator->elevator_data;
3521        struct cfq_queue *cfqq;
3522
3523        if (!cfqd->busy_queues)
3524                return 0;
3525
3526        if (unlikely(force))
3527                return cfq_forced_dispatch(cfqd);
3528
3529        cfqq = cfq_select_queue(cfqd);
3530        if (!cfqq)
3531                return 0;
3532
3533        /*
3534         * Dispatch a request from this cfqq, if it is allowed
3535         */
3536        if (!cfq_dispatch_request(cfqd, cfqq))
3537                return 0;
3538
3539        cfqq->slice_dispatch++;
3540        cfq_clear_cfqq_must_dispatch(cfqq);
3541
3542        /*
3543         * expire an async queue immediately if it has used up its slice. idle
3544         * queue always expire after 1 dispatch round.
3545         */
3546        if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3547            cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3548            cfq_class_idle(cfqq))) {
3549                cfqq->slice_end = jiffies + 1;
3550                cfq_slice_expired(cfqd, 0);
3551        }
3552
3553        cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3554        return 1;
3555}
3556
3557/*
3558 * task holds one reference to the queue, dropped when task exits. each rq
3559 * in-flight on this queue also holds a reference, dropped when rq is freed.
3560 *
3561 * Each cfq queue took a reference on the parent group. Drop it now.
3562 * queue lock must be held here.
3563 */
3564static void cfq_put_queue(struct cfq_queue *cfqq)
3565{
3566        struct cfq_data *cfqd = cfqq->cfqd;
3567        struct cfq_group *cfqg;
3568
3569        BUG_ON(cfqq->ref <= 0);
3570
3571        cfqq->ref--;
3572        if (cfqq->ref)
3573                return;
3574
3575        cfq_log_cfqq(cfqd, cfqq, "put_queue");
3576        BUG_ON(rb_first(&cfqq->sort_list));
3577        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3578        cfqg = cfqq->cfqg;
3579
3580        if (unlikely(cfqd->active_queue == cfqq)) {
3581                __cfq_slice_expired(cfqd, cfqq, 0);
3582                cfq_schedule_dispatch(cfqd);
3583        }
3584
3585        BUG_ON(cfq_cfqq_on_rr(cfqq));
3586        kmem_cache_free(cfq_pool, cfqq);
3587        cfqg_put(cfqg);
3588}
3589
3590static void cfq_put_cooperator(struct cfq_queue *cfqq)
3591{
3592        struct cfq_queue *__cfqq, *next;
3593
3594        /*
3595         * If this queue was scheduled to merge with another queue, be
3596         * sure to drop the reference taken on that queue (and others in
3597         * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3598         */
3599        __cfqq = cfqq->new_cfqq;
3600        while (__cfqq) {
3601                if (__cfqq == cfqq) {
3602                        WARN(1, "cfqq->new_cfqq loop detected\n");
3603                        break;
3604                }
3605                next = __cfqq->new_cfqq;
3606                cfq_put_queue(__cfqq);
3607                __cfqq = next;
3608        }
3609}
3610
3611static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3612{
3613        if (unlikely(cfqq == cfqd->active_queue)) {
3614                __cfq_slice_expired(cfqd, cfqq, 0);
3615                cfq_schedule_dispatch(cfqd);
3616        }
3617
3618        cfq_put_cooperator(cfqq);
3619
3620        cfq_put_queue(cfqq);
3621}
3622
3623static void cfq_init_icq(struct io_cq *icq)
3624{
3625        struct cfq_io_cq *cic = icq_to_cic(icq);
3626
3627        cic->ttime.last_end_request = jiffies;
3628}
3629
3630static void cfq_exit_icq(struct io_cq *icq)
3631{
3632        struct cfq_io_cq *cic = icq_to_cic(icq);
3633        struct cfq_data *cfqd = cic_to_cfqd(cic);
3634
3635        if (cic_to_cfqq(cic, false)) {
3636                cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3637                cic_set_cfqq(cic, NULL, false);
3638        }
3639
3640        if (cic_to_cfqq(cic, true)) {
3641                cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3642                cic_set_cfqq(cic, NULL, true);
3643        }
3644}
3645
3646static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3647{
3648        struct task_struct *tsk = current;
3649        int ioprio_class;
3650
3651        if (!cfq_cfqq_prio_changed(cfqq))
3652                return;
3653
3654        ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3655        switch (ioprio_class) {
3656        default:
3657                printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3658        case IOPRIO_CLASS_NONE:
3659                /*
3660                 * no prio set, inherit CPU scheduling settings
3661                 */
3662                cfqq->ioprio = task_nice_ioprio(tsk);
3663                cfqq->ioprio_class = task_nice_ioclass(tsk);
3664                break;
3665        case IOPRIO_CLASS_RT:
3666                cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3667                cfqq->ioprio_class = IOPRIO_CLASS_RT;
3668                break;
3669        case IOPRIO_CLASS_BE:
3670                cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3671                cfqq->ioprio_class = IOPRIO_CLASS_BE;
3672                break;
3673        case IOPRIO_CLASS_IDLE:
3674                cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3675                cfqq->ioprio = 7;
3676                cfq_clear_cfqq_idle_window(cfqq);
3677                break;
3678        }
3679
3680        /*
3681         * keep track of original prio settings in case we have to temporarily
3682         * elevate the priority of this queue
3683         */
3684        cfqq->org_ioprio = cfqq->ioprio;
3685        cfq_clear_cfqq_prio_changed(cfqq);
3686}
3687
3688static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3689{
3690        int ioprio = cic->icq.ioc->ioprio;
3691        struct cfq_data *cfqd = cic_to_cfqd(cic);
3692        struct cfq_queue *cfqq;
3693
3694        /*
3695         * Check whether ioprio has changed.  The condition may trigger
3696         * spuriously on a newly created cic but there's no harm.
3697         */
3698        if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3699                return;
3700
3701        cfqq = cic_to_cfqq(cic, false);
3702        if (cfqq) {
3703                cfq_put_queue(cfqq);
3704                cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
3705                cic_set_cfqq(cic, cfqq, false);
3706        }
3707
3708        cfqq = cic_to_cfqq(cic, true);
3709        if (cfqq)
3710                cfq_mark_cfqq_prio_changed(cfqq);
3711
3712        cic->ioprio = ioprio;
3713}
3714
3715static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3716                          pid_t pid, bool is_sync)
3717{
3718        RB_CLEAR_NODE(&cfqq->rb_node);
3719        RB_CLEAR_NODE(&cfqq->p_node);
3720        INIT_LIST_HEAD(&cfqq->fifo);
3721
3722        cfqq->ref = 0;
3723        cfqq->cfqd = cfqd;
3724
3725        cfq_mark_cfqq_prio_changed(cfqq);
3726
3727        if (is_sync) {
3728                if (!cfq_class_idle(cfqq))
3729                        cfq_mark_cfqq_idle_window(cfqq);
3730                cfq_mark_cfqq_sync(cfqq);
3731        }
3732        cfqq->pid = pid;
3733}
3734
3735#ifdef CONFIG_CFQ_GROUP_IOSCHED
3736static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3737{
3738        struct cfq_data *cfqd = cic_to_cfqd(cic);
3739        struct cfq_queue *cfqq;
3740        uint64_t serial_nr;
3741
3742        rcu_read_lock();
3743        serial_nr = bio_blkcg(bio)->css.serial_nr;
3744        rcu_read_unlock();
3745
3746        /*
3747         * Check whether blkcg has changed.  The condition may trigger
3748         * spuriously on a newly created cic but there's no harm.
3749         */
3750        if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
3751                return;
3752
3753        /*
3754         * Drop reference to queues.  New queues will be assigned in new
3755         * group upon arrival of fresh requests.
3756         */
3757        cfqq = cic_to_cfqq(cic, false);
3758        if (cfqq) {
3759                cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3760                cic_set_cfqq(cic, NULL, false);
3761                cfq_put_queue(cfqq);
3762        }
3763
3764        cfqq = cic_to_cfqq(cic, true);
3765        if (cfqq) {
3766                cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3767                cic_set_cfqq(cic, NULL, true);
3768                cfq_put_queue(cfqq);
3769        }
3770
3771        cic->blkcg_serial_nr = serial_nr;
3772}
3773#else
3774static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3775#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3776
3777static struct cfq_queue **
3778cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
3779{
3780        switch (ioprio_class) {
3781        case IOPRIO_CLASS_RT:
3782                return &cfqg->async_cfqq[0][ioprio];
3783        case IOPRIO_CLASS_NONE:
3784                ioprio = IOPRIO_NORM;
3785                /* fall through */
3786        case IOPRIO_CLASS_BE:
3787                return &cfqg->async_cfqq[1][ioprio];
3788        case IOPRIO_CLASS_IDLE:
3789                return &cfqg->async_idle_cfqq;
3790        default:
3791                BUG();
3792        }
3793}
3794
3795static struct cfq_queue *
3796cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3797              struct bio *bio)
3798{
3799        int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3800        int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3801        struct cfq_queue **async_cfqq = NULL;
3802        struct cfq_queue *cfqq;
3803        struct cfq_group *cfqg;
3804
3805        rcu_read_lock();
3806        cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
3807        if (!cfqg) {
3808                cfqq = &cfqd->oom_cfqq;
3809                goto out;
3810        }
3811
3812        if (!is_sync) {
3813                if (!ioprio_valid(cic->ioprio)) {
3814                        struct task_struct *tsk = current;
3815                        ioprio = task_nice_ioprio(tsk);
3816                        ioprio_class = task_nice_ioclass(tsk);
3817                }
3818                async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
3819                cfqq = *async_cfqq;
3820                if (cfqq)
3821                        goto out;
3822        }
3823
3824        cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
3825                                     cfqd->queue->node);
3826        if (!cfqq) {
3827                cfqq = &cfqd->oom_cfqq;
3828                goto out;
3829        }
3830
3831        cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3832        cfq_init_prio_data(cfqq, cic);
3833        cfq_link_cfqq_cfqg(cfqq, cfqg);
3834        cfq_log_cfqq(cfqd, cfqq, "alloced");
3835
3836        if (async_cfqq) {
3837                /* a new async queue is created, pin and remember */
3838                cfqq->ref++;
3839                *async_cfqq = cfqq;
3840        }
3841out:
3842        cfqq->ref++;
3843        rcu_read_unlock();
3844        return cfqq;
3845}
3846
3847static void
3848__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3849{
3850        unsigned long elapsed = jiffies - ttime->last_end_request;
3851        elapsed = min(elapsed, 2UL * slice_idle);
3852
3853        ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3854        ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3855        ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3856}
3857
3858static void
3859cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3860                        struct cfq_io_cq *cic)
3861{
3862        if (cfq_cfqq_sync(cfqq)) {
3863                __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3864                __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3865                        cfqd->cfq_slice_idle);
3866        }
3867#ifdef CONFIG_CFQ_GROUP_IOSCHED
3868        __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3869#endif
3870}
3871
3872static void
3873cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3874                       struct request *rq)
3875{
3876        sector_t sdist = 0;
3877        sector_t n_sec = blk_rq_sectors(rq);
3878        if (cfqq->last_request_pos) {
3879                if (cfqq->last_request_pos < blk_rq_pos(rq))
3880                        sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3881                else
3882                        sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3883        }
3884
3885        cfqq->seek_history <<= 1;
3886        if (blk_queue_nonrot(cfqd->queue))
3887                cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3888        else
3889                cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3890}
3891
3892/*
3893 * Disable idle window if the process thinks too long or seeks so much that
3894 * it doesn't matter
3895 */
3896static void
3897cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3898                       struct cfq_io_cq *cic)
3899{
3900        int old_idle, enable_idle;
3901
3902        /*
3903         * Don't idle for async or idle io prio class
3904         */
3905        if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3906                return;
3907
3908        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3909
3910        if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3911                cfq_mark_cfqq_deep(cfqq);
3912
3913        if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3914                enable_idle = 0;
3915        else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3916                 !cfqd->cfq_slice_idle ||
3917                 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3918                enable_idle = 0;
3919        else if (sample_valid(cic->ttime.ttime_samples)) {
3920                if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3921                        enable_idle = 0;
3922                else
3923                        enable_idle = 1;
3924        }
3925
3926        if (old_idle != enable_idle) {
3927                cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3928                if (enable_idle)
3929                        cfq_mark_cfqq_idle_window(cfqq);
3930                else
3931                        cfq_clear_cfqq_idle_window(cfqq);
3932        }
3933}
3934
3935/*
3936 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3937 * no or if we aren't sure, a 1 will cause a preempt.
3938 */
3939static bool
3940cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3941                   struct request *rq)
3942{
3943        struct cfq_queue *cfqq;
3944
3945        cfqq = cfqd->active_queue;
3946        if (!cfqq)
3947                return false;
3948
3949        if (cfq_class_idle(new_cfqq))
3950                return false;
3951
3952        if (cfq_class_idle(cfqq))
3953                return true;
3954
3955        /*
3956         * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3957         */
3958        if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3959                return false;
3960
3961        /*
3962         * if the new request is sync, but the currently running queue is
3963         * not, let the sync request have priority.
3964         */
3965        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3966                return true;
3967
3968        /*
3969         * Treat ancestors of current cgroup the same way as current cgroup.
3970         * For anybody else we disallow preemption to guarantee service
3971         * fairness among cgroups.
3972         */
3973        if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
3974                return false;
3975
3976        if (cfq_slice_used(cfqq))
3977                return true;
3978
3979        /*
3980         * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3981         */
3982        if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3983                return true;
3984
3985        WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
3986        /* Allow preemption only if we are idling on sync-noidle tree */
3987        if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3988            cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3989            RB_EMPTY_ROOT(&cfqq->sort_list))
3990                return true;
3991
3992        /*
3993         * So both queues are sync. Let the new request get disk time if
3994         * it's a metadata request and the current queue is doing regular IO.
3995         */
3996        if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3997                return true;
3998
3999        /* An idle queue should not be idle now for some reason */
4000        if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
4001                return true;
4002
4003        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
4004                return false;
4005
4006        /*
4007         * if this request is as-good as one we would expect from the
4008         * current cfqq, let it preempt
4009         */
4010        if (cfq_rq_close(cfqd, cfqq, rq))
4011                return true;
4012
4013        return false;
4014}
4015
4016/*
4017 * cfqq preempts the active queue. if we allowed preempt with no slice left,
4018 * let it have half of its nominal slice.
4019 */
4020static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4021{
4022        enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
4023
4024        cfq_log_cfqq(cfqd, cfqq, "preempt");
4025        cfq_slice_expired(cfqd, 1);
4026
4027        /*
4028         * workload type is changed, don't save slice, otherwise preempt
4029         * doesn't happen
4030         */
4031        if (old_type != cfqq_type(cfqq))
4032                cfqq->cfqg->saved_wl_slice = 0;
4033
4034        /*
4035         * Put the new queue at the front of the of the current list,
4036         * so we know that it will be selected next.
4037         */
4038        BUG_ON(!cfq_cfqq_on_rr(cfqq));
4039
4040        cfq_service_tree_add(cfqd, cfqq, 1);
4041
4042        cfqq->slice_end = 0;
4043        cfq_mark_cfqq_slice_new(cfqq);
4044}
4045
4046/*
4047 * Called when a new fs request (rq) is added (to cfqq). Check if there's
4048 * something we should do about it
4049 */
4050static void
4051cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4052                struct request *rq)
4053{
4054        struct cfq_io_cq *cic = RQ_CIC(rq);
4055
4056        cfqd->rq_queued++;
4057        if (rq->cmd_flags & REQ_PRIO)
4058                cfqq->prio_pending++;
4059
4060        cfq_update_io_thinktime(cfqd, cfqq, cic);
4061        cfq_update_io_seektime(cfqd, cfqq, rq);
4062        cfq_update_idle_window(cfqd, cfqq, cic);
4063
4064        cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4065
4066        if (cfqq == cfqd->active_queue) {
4067                /*
4068                 * Remember that we saw a request from this process, but
4069                 * don't start queuing just yet. Otherwise we risk seeing lots
4070                 * of tiny requests, because we disrupt the normal plugging
4071                 * and merging. If the request is already larger than a single
4072                 * page, let it rip immediately. For that case we assume that
4073                 * merging is already done. Ditto for a busy system that
4074                 * has other work pending, don't risk delaying until the
4075                 * idle timer unplug to continue working.
4076                 */
4077                if (cfq_cfqq_wait_request(cfqq)) {
4078                        if (blk_rq_bytes(rq) > PAGE_SIZE ||
4079                            cfqd->busy_queues > 1) {
4080                                cfq_del_timer(cfqd, cfqq);
4081                                cfq_clear_cfqq_wait_request(cfqq);
4082                                __blk_run_queue(cfqd->queue);
4083                        } else {
4084                                cfqg_stats_update_idle_time(cfqq->cfqg);
4085                                cfq_mark_cfqq_must_dispatch(cfqq);
4086                        }
4087                }
4088        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
4089                /*
4090                 * not the active queue - expire current slice if it is
4091                 * idle and has expired it's mean thinktime or this new queue
4092                 * has some old slice time left and is of higher priority or
4093                 * this new queue is RT and the current one is BE
4094                 */
4095                cfq_preempt_queue(cfqd, cfqq);
4096                __blk_run_queue(cfqd->queue);
4097        }
4098}
4099
4100static void cfq_insert_request(struct request_queue *q, struct request *rq)
4101{
4102        struct cfq_data *cfqd = q->elevator->elevator_data;
4103        struct cfq_queue *cfqq = RQ_CFQQ(rq);
4104
4105        cfq_log_cfqq(cfqd, cfqq, "insert_request");
4106        cfq_init_prio_data(cfqq, RQ_CIC(rq));
4107
4108        rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
4109        list_add_tail(&rq->queuelist, &cfqq->fifo);
4110        cfq_add_rq_rb(rq);
4111        cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
4112                                 rq->cmd_flags);
4113        cfq_rq_enqueued(cfqd, cfqq, rq);
4114}
4115
4116/*
4117 * Update hw_tag based on peak queue depth over 50 samples under
4118 * sufficient load.
4119 */
4120static void cfq_update_hw_tag(struct cfq_data *cfqd)
4121{
4122        struct cfq_queue *cfqq = cfqd->active_queue;
4123
4124        if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4125                cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
4126
4127        if (cfqd->hw_tag == 1)
4128                return;
4129
4130        if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
4131            cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
4132                return;
4133
4134        /*
4135         * If active queue hasn't enough requests and can idle, cfq might not
4136         * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4137         * case
4138         */
4139        if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4140            cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
4141            CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
4142                return;
4143
4144        if (cfqd->hw_tag_samples++ < 50)
4145                return;
4146
4147        if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
4148                cfqd->hw_tag = 1;
4149        else
4150                cfqd->hw_tag = 0;
4151}
4152
4153static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4154{
4155        struct cfq_io_cq *cic = cfqd->active_cic;
4156
4157        /* If the queue already has requests, don't wait */
4158        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4159                return false;
4160
4161        /* If there are other queues in the group, don't wait */
4162        if (cfqq->cfqg->nr_cfqq > 1)
4163                return false;
4164
4165        /* the only queue in the group, but think time is big */
4166        if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4167                return false;
4168
4169        if (cfq_slice_used(cfqq))
4170                return true;
4171
4172        /* if slice left is less than think time, wait busy */
4173        if (cic && sample_valid(cic->ttime.ttime_samples)
4174            && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
4175                return true;
4176
4177        /*
4178         * If think times is less than a jiffy than ttime_mean=0 and above
4179         * will not be true. It might happen that slice has not expired yet
4180         * but will expire soon (4-5 ns) during select_queue(). To cover the
4181         * case where think time is less than a jiffy, mark the queue wait
4182         * busy if only 1 jiffy is left in the slice.
4183         */
4184        if (cfqq->slice_end - jiffies == 1)
4185                return true;
4186
4187        return false;
4188}
4189
4190static void cfq_completed_request(struct request_queue *q, struct request *rq)
4191{
4192        struct cfq_queue *cfqq = RQ_CFQQ(rq);
4193        struct cfq_data *cfqd = cfqq->cfqd;
4194        const int sync = rq_is_sync(rq);
4195        unsigned long now;
4196
4197        now = jiffies;
4198        cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4199                     !!(rq->cmd_flags & REQ_NOIDLE));
4200
4201        cfq_update_hw_tag(cfqd);
4202
4203        WARN_ON(!cfqd->rq_in_driver);
4204        WARN_ON(!cfqq->dispatched);
4205        cfqd->rq_in_driver--;
4206        cfqq->dispatched--;
4207        (RQ_CFQG(rq))->dispatched--;
4208        cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4209                                     rq_io_start_time_ns(rq), rq->cmd_flags);
4210
4211        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4212
4213        if (sync) {
4214                struct cfq_rb_root *st;
4215
4216                RQ_CIC(rq)->ttime.last_end_request = now;
4217
4218                if (cfq_cfqq_on_rr(cfqq))
4219                        st = cfqq->service_tree;
4220                else
4221                        st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4222                                        cfqq_type(cfqq));
4223
4224                st->ttime.last_end_request = now;
4225                if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4226                        cfqd->last_delayed_sync = now;
4227        }
4228
4229#ifdef CONFIG_CFQ_GROUP_IOSCHED
4230        cfqq->cfqg->ttime.last_end_request = now;
4231#endif
4232
4233        /*
4234         * If this is the active queue, check if it needs to be expired,
4235         * or if we want to idle in case it has no pending requests.
4236         */
4237        if (cfqd->active_queue == cfqq) {
4238                const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4239
4240                if (cfq_cfqq_slice_new(cfqq)) {
4241                        cfq_set_prio_slice(cfqd, cfqq);
4242                        cfq_clear_cfqq_slice_new(cfqq);
4243                }
4244
4245                /*
4246                 * Should we wait for next request to come in before we expire
4247                 * the queue.
4248                 */
4249                if (cfq_should_wait_busy(cfqd, cfqq)) {
4250                        unsigned long extend_sl = cfqd->cfq_slice_idle;
4251                        if (!cfqd->cfq_slice_idle)
4252                                extend_sl = cfqd->cfq_group_idle;
4253                        cfqq->slice_end = jiffies + extend_sl;
4254                        cfq_mark_cfqq_wait_busy(cfqq);
4255                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4256                }
4257
4258                /*
4259                 * Idling is not enabled on:
4260                 * - expired queues
4261                 * - idle-priority queues
4262                 * - async queues
4263                 * - queues with still some requests queued
4264                 * - when there is a close cooperator
4265                 */
4266                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4267                        cfq_slice_expired(cfqd, 1);
4268                else if (sync && cfqq_empty &&
4269                         !cfq_close_cooperator(cfqd, cfqq)) {
4270                        cfq_arm_slice_timer(cfqd);
4271                }
4272        }
4273
4274        if (!cfqd->rq_in_driver)
4275                cfq_schedule_dispatch(cfqd);
4276}
4277
4278static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4279{
4280        if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4281                cfq_mark_cfqq_must_alloc_slice(cfqq);
4282                return ELV_MQUEUE_MUST;
4283        }
4284
4285        return ELV_MQUEUE_MAY;
4286}
4287
4288static int cfq_may_queue(struct request_queue *q, int rw)
4289{
4290        struct cfq_data *cfqd = q->elevator->elevator_data;
4291        struct task_struct *tsk = current;
4292        struct cfq_io_cq *cic;
4293        struct cfq_queue *cfqq;
4294
4295        /*
4296         * don't force setup of a queue from here, as a call to may_queue
4297         * does not necessarily imply that a request actually will be queued.
4298         * so just lookup a possibly existing queue, or return 'may queue'
4299         * if that fails
4300         */
4301        cic = cfq_cic_lookup(cfqd, tsk->io_context);
4302        if (!cic)
4303                return ELV_MQUEUE_MAY;
4304
4305        cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
4306        if (cfqq) {
4307                cfq_init_prio_data(cfqq, cic);
4308
4309                return __cfq_may_queue(cfqq);
4310        }
4311
4312        return ELV_MQUEUE_MAY;
4313}
4314
4315/*
4316 * queue lock held here
4317 */
4318static void cfq_put_request(struct request *rq)
4319{
4320        struct cfq_queue *cfqq = RQ_CFQQ(rq);
4321
4322        if (cfqq) {
4323                const int rw = rq_data_dir(rq);
4324
4325                BUG_ON(!cfqq->allocated[rw]);
4326                cfqq->allocated[rw]--;
4327
4328                /* Put down rq reference on cfqg */
4329                cfqg_put(RQ_CFQG(rq));
4330                rq->elv.priv[0] = NULL;
4331                rq->elv.priv[1] = NULL;
4332
4333                cfq_put_queue(cfqq);
4334        }
4335}
4336
4337static struct cfq_queue *
4338cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4339                struct cfq_queue *cfqq)
4340{
4341        cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4342        cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4343        cfq_mark_cfqq_coop(cfqq->new_cfqq);
4344        cfq_put_queue(cfqq);
4345        return cic_to_cfqq(cic, 1);
4346}
4347
4348/*
4349 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4350 * was the last process referring to said cfqq.
4351 */
4352static struct cfq_queue *
4353split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4354{
4355        if (cfqq_process_refs(cfqq) == 1) {
4356                cfqq->pid = current->pid;
4357                cfq_clear_cfqq_coop(cfqq);
4358                cfq_clear_cfqq_split_coop(cfqq);
4359                return cfqq;
4360        }
4361
4362        cic_set_cfqq(cic, NULL, 1);
4363
4364        cfq_put_cooperator(cfqq);
4365
4366        cfq_put_queue(cfqq);
4367        return NULL;
4368}
4369/*
4370 * Allocate cfq data structures associated with this request.
4371 */
4372static int
4373cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4374                gfp_t gfp_mask)
4375{
4376        struct cfq_data *cfqd = q->elevator->elevator_data;
4377        struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4378        const int rw = rq_data_dir(rq);
4379        const bool is_sync = rq_is_sync(rq);
4380        struct cfq_queue *cfqq;
4381
4382        spin_lock_irq(q->queue_lock);
4383
4384        check_ioprio_changed(cic, bio);
4385        check_blkcg_changed(cic, bio);
4386new_queue:
4387        cfqq = cic_to_cfqq(cic, is_sync);
4388        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4389                if (cfqq)
4390                        cfq_put_queue(cfqq);
4391                cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
4392                cic_set_cfqq(cic, cfqq, is_sync);
4393        } else {
4394                /*
4395                 * If the queue was seeky for too long, break it apart.
4396                 */
4397                if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4398                        cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4399                        cfqq = split_cfqq(cic, cfqq);
4400                        if (!cfqq)
4401                                goto new_queue;
4402                }
4403
4404                /*
4405                 * Check to see if this queue is scheduled to merge with
4406                 * another, closely cooperating queue.  The merging of
4407                 * queues happens here as it must be done in process context.
4408                 * The reference on new_cfqq was taken in merge_cfqqs.
4409                 */
4410                if (cfqq->new_cfqq)
4411                        cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4412        }
4413
4414        cfqq->allocated[rw]++;
4415
4416        cfqq->ref++;
4417        cfqg_get(cfqq->cfqg);
4418        rq->elv.priv[0] = cfqq;
4419        rq->elv.priv[1] = cfqq->cfqg;
4420        spin_unlock_irq(q->queue_lock);
4421        return 0;
4422}
4423
4424static void cfq_kick_queue(struct work_struct *work)
4425{
4426        struct cfq_data *cfqd =
4427                container_of(work, struct cfq_data, unplug_work);
4428        struct request_queue *q = cfqd->queue;
4429
4430        spin_lock_irq(q->queue_lock);
4431        __blk_run_queue(cfqd->queue);
4432        spin_unlock_irq(q->queue_lock);
4433}
4434
4435/*
4436 * Timer running if the active_queue is currently idling inside its time slice
4437 */
4438static void cfq_idle_slice_timer(unsigned long data)
4439{
4440        struct cfq_data *cfqd = (struct cfq_data *) data;
4441        struct cfq_queue *cfqq;
4442        unsigned long flags;
4443        int timed_out = 1;
4444
4445        cfq_log(cfqd, "idle timer fired");
4446
4447        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4448
4449        cfqq = cfqd->active_queue;
4450        if (cfqq) {
4451                timed_out = 0;
4452
4453                /*
4454                 * We saw a request before the queue expired, let it through
4455                 */
4456                if (cfq_cfqq_must_dispatch(cfqq))
4457                        goto out_kick;
4458
4459                /*
4460                 * expired
4461                 */
4462                if (cfq_slice_used(cfqq))
4463                        goto expire;
4464
4465                /*
4466                 * only expire and reinvoke request handler, if there are
4467                 * other queues with pending requests
4468                 */
4469                if (!cfqd->busy_queues)
4470                        goto out_cont;
4471
4472                /*
4473                 * not expired and it has a request pending, let it dispatch
4474                 */
4475                if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4476                        goto out_kick;
4477
4478                /*
4479                 * Queue depth flag is reset only when the idle didn't succeed
4480                 */
4481                cfq_clear_cfqq_deep(cfqq);
4482        }
4483expire:
4484        cfq_slice_expired(cfqd, timed_out);
4485out_kick:
4486        cfq_schedule_dispatch(cfqd);
4487out_cont:
4488        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4489}
4490
4491static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4492{
4493        del_timer_sync(&cfqd->idle_slice_timer);
4494        cancel_work_sync(&cfqd->unplug_work);
4495}
4496
4497static void cfq_exit_queue(struct elevator_queue *e)
4498{
4499        struct cfq_data *cfqd = e->elevator_data;
4500        struct request_queue *q = cfqd->queue;
4501
4502        cfq_shutdown_timer_wq(cfqd);
4503
4504        spin_lock_irq(q->queue_lock);
4505
4506        if (cfqd->active_queue)
4507                __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4508
4509        spin_unlock_irq(q->queue_lock);
4510
4511        cfq_shutdown_timer_wq(cfqd);
4512
4513#ifdef CONFIG_CFQ_GROUP_IOSCHED
4514        blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4515#else
4516        kfree(cfqd->root_group);
4517#endif
4518        kfree(cfqd);
4519}
4520
4521static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4522{
4523        struct cfq_data *cfqd;
4524        struct blkcg_gq *blkg __maybe_unused;
4525        int i, ret;
4526        struct elevator_queue *eq;
4527
4528        eq = elevator_alloc(q, e);
4529        if (!eq)
4530                return -ENOMEM;
4531
4532        cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
4533        if (!cfqd) {
4534                kobject_put(&eq->kobj);
4535                return -ENOMEM;
4536        }
4537        eq->elevator_data = cfqd;
4538
4539        cfqd->queue = q;
4540        spin_lock_irq(q->queue_lock);
4541        q->elevator = eq;
4542        spin_unlock_irq(q->queue_lock);
4543
4544        /* Init root service tree */
4545        cfqd->grp_service_tree = CFQ_RB_ROOT;
4546
4547        /* Init root group and prefer root group over other groups by default */
4548#ifdef CONFIG_CFQ_GROUP_IOSCHED
4549        ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4550        if (ret)
4551                goto out_free;
4552
4553        cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4554#else
4555        ret = -ENOMEM;
4556        cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4557                                        GFP_KERNEL, cfqd->queue->node);
4558        if (!cfqd->root_group)
4559                goto out_free;
4560
4561        cfq_init_cfqg_base(cfqd->root_group);
4562        cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4563        cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4564#endif
4565
4566        /*
4567         * Not strictly needed (since RB_ROOT just clears the node and we
4568         * zeroed cfqd on alloc), but better be safe in case someone decides
4569         * to add magic to the rb code
4570         */
4571        for (i = 0; i < CFQ_PRIO_LISTS; i++)
4572                cfqd->prio_trees[i] = RB_ROOT;
4573
4574        /*
4575         * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
4576         * Grab a permanent reference to it, so that the normal code flow
4577         * will not attempt to free it.  oom_cfqq is linked to root_group
4578         * but shouldn't hold a reference as it'll never be unlinked.  Lose
4579         * the reference from linking right away.
4580         */
4581        cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4582        cfqd->oom_cfqq.ref++;
4583
4584        spin_lock_irq(q->queue_lock);
4585        cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4586        cfqg_put(cfqd->root_group);
4587        spin_unlock_irq(q->queue_lock);
4588
4589        init_timer(&cfqd->idle_slice_timer);
4590        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4591        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4592
4593        INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4594
4595        cfqd->cfq_quantum = cfq_quantum;
4596        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4597        cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4598        cfqd->cfq_back_max = cfq_back_max;
4599        cfqd->cfq_back_penalty = cfq_back_penalty;
4600        cfqd->cfq_slice[0] = cfq_slice_async;
4601        cfqd->cfq_slice[1] = cfq_slice_sync;
4602        cfqd->cfq_target_latency = cfq_target_latency;
4603        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4604        cfqd->cfq_slice_idle = cfq_slice_idle;
4605        cfqd->cfq_group_idle = cfq_group_idle;
4606        cfqd->cfq_latency = 1;
4607        cfqd->hw_tag = -1;
4608        /*
4609         * we optimistically start assuming sync ops weren't delayed in last
4610         * second, in order to have larger depth for async operations.
4611         */
4612        cfqd->last_delayed_sync = jiffies - HZ;
4613        return 0;
4614
4615out_free:
4616        kfree(cfqd);
4617        kobject_put(&eq->kobj);
4618        return ret;
4619}
4620
4621static void cfq_registered_queue(struct request_queue *q)
4622{
4623        struct elevator_queue *e = q->elevator;
4624        struct cfq_data *cfqd = e->elevator_data;
4625
4626        /*
4627         * Default to IOPS mode with no idling for SSDs
4628         */
4629        if (blk_queue_nonrot(q))
4630                cfqd->cfq_slice_idle = 0;
4631}
4632
4633/*
4634 * sysfs parts below -->
4635 */
4636static ssize_t
4637cfq_var_show(unsigned int var, char *page)
4638{
4639        return sprintf(page, "%u\n", var);
4640}
4641
4642static ssize_t
4643cfq_var_store(unsigned int *var, const char *page, size_t count)
4644{
4645        char *p = (char *) page;
4646
4647        *var = simple_strtoul(p, &p, 10);
4648        return count;
4649}
4650
4651#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4652static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4653{                                                                       \
4654        struct cfq_data *cfqd = e->elevator_data;                       \
4655        unsigned int __data = __VAR;                                    \
4656        if (__CONV)                                                     \
4657                __data = jiffies_to_msecs(__data);                      \
4658        return cfq_var_show(__data, (page));                            \
4659}
4660SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4661SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4662SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4663SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4664SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4665SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4666SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4667SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4668SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4669SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4670SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4671SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4672#undef SHOW_FUNCTION
4673
4674#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4675static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4676{                                                                       \
4677        struct cfq_data *cfqd = e->elevator_data;                       \
4678        unsigned int __data;                                            \
4679        int ret = cfq_var_store(&__data, (page), count);                \
4680        if (__data < (MIN))                                             \
4681                __data = (MIN);                                         \
4682        else if (__data > (MAX))                                        \
4683                __data = (MAX);                                         \
4684        if (__CONV)                                                     \
4685                *(__PTR) = msecs_to_jiffies(__data);                    \
4686        else                                                            \
4687                *(__PTR) = __data;                                      \
4688        return ret;                                                     \
4689}
4690STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4691STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4692                UINT_MAX, 1);
4693STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4694                UINT_MAX, 1);
4695STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4696STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4697                UINT_MAX, 0);
4698STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4699STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4700STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4701STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4702STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4703                UINT_MAX, 0);
4704STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4705STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4706#undef STORE_FUNCTION
4707
4708#define CFQ_ATTR(name) \
4709        __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4710
4711static struct elv_fs_entry cfq_attrs[] = {
4712        CFQ_ATTR(quantum),
4713        CFQ_ATTR(fifo_expire_sync),
4714        CFQ_ATTR(fifo_expire_async),
4715        CFQ_ATTR(back_seek_max),
4716        CFQ_ATTR(back_seek_penalty),
4717        CFQ_ATTR(slice_sync),
4718        CFQ_ATTR(slice_async),
4719        CFQ_ATTR(slice_async_rq),
4720        CFQ_ATTR(slice_idle),
4721        CFQ_ATTR(group_idle),
4722        CFQ_ATTR(low_latency),
4723        CFQ_ATTR(target_latency),
4724        __ATTR_NULL
4725};
4726
4727static struct elevator_type iosched_cfq = {
4728        .ops = {
4729                .elevator_merge_fn =            cfq_merge,
4730                .elevator_merged_fn =           cfq_merged_request,
4731                .elevator_merge_req_fn =        cfq_merged_requests,
4732                .elevator_allow_merge_fn =      cfq_allow_merge,
4733                .elevator_bio_merged_fn =       cfq_bio_merged,
4734                .elevator_dispatch_fn =         cfq_dispatch_requests,
4735                .elevator_add_req_fn =          cfq_insert_request,
4736                .elevator_activate_req_fn =     cfq_activate_request,
4737                .elevator_deactivate_req_fn =   cfq_deactivate_request,
4738                .elevator_completed_req_fn =    cfq_completed_request,
4739                .elevator_former_req_fn =       elv_rb_former_request,
4740                .elevator_latter_req_fn =       elv_rb_latter_request,
4741                .elevator_init_icq_fn =         cfq_init_icq,
4742                .elevator_exit_icq_fn =         cfq_exit_icq,
4743                .elevator_set_req_fn =          cfq_set_request,
4744                .elevator_put_req_fn =          cfq_put_request,
4745                .elevator_may_queue_fn =        cfq_may_queue,
4746                .elevator_init_fn =             cfq_init_queue,
4747                .elevator_exit_fn =             cfq_exit_queue,
4748                .elevator_registered_fn =       cfq_registered_queue,
4749        },
4750        .icq_size       =       sizeof(struct cfq_io_cq),
4751        .icq_align      =       __alignof__(struct cfq_io_cq),
4752        .elevator_attrs =       cfq_attrs,
4753        .elevator_name  =       "cfq",
4754        .elevator_owner =       THIS_MODULE,
4755};
4756
4757#ifdef CONFIG_CFQ_GROUP_IOSCHED
4758static struct blkcg_policy blkcg_policy_cfq = {
4759        .dfl_cftypes            = cfq_blkcg_files,
4760        .legacy_cftypes         = cfq_blkcg_legacy_files,
4761
4762        .cpd_alloc_fn           = cfq_cpd_alloc,
4763        .cpd_init_fn            = cfq_cpd_init,
4764        .cpd_free_fn            = cfq_cpd_free,
4765        .cpd_bind_fn            = cfq_cpd_bind,
4766
4767        .pd_alloc_fn            = cfq_pd_alloc,
4768        .pd_init_fn             = cfq_pd_init,
4769        .pd_offline_fn          = cfq_pd_offline,
4770        .pd_free_fn             = cfq_pd_free,
4771        .pd_reset_stats_fn      = cfq_pd_reset_stats,
4772};
4773#endif
4774
4775static int __init cfq_init(void)
4776{
4777        int ret;
4778
4779        /*
4780         * could be 0 on HZ < 1000 setups
4781         */
4782        if (!cfq_slice_async)
4783                cfq_slice_async = 1;
4784        if (!cfq_slice_idle)
4785                cfq_slice_idle = 1;
4786
4787#ifdef CONFIG_CFQ_GROUP_IOSCHED
4788        if (!cfq_group_idle)
4789                cfq_group_idle = 1;
4790
4791        ret = blkcg_policy_register(&blkcg_policy_cfq);
4792        if (ret)
4793                return ret;
4794#else
4795        cfq_group_idle = 0;
4796#endif
4797
4798        ret = -ENOMEM;
4799        cfq_pool = KMEM_CACHE(cfq_queue, 0);
4800        if (!cfq_pool)
4801                goto err_pol_unreg;
4802
4803        ret = elv_register(&iosched_cfq);
4804        if (ret)
4805                goto err_free_pool;
4806
4807        return 0;
4808
4809err_free_pool:
4810        kmem_cache_destroy(cfq_pool);
4811err_pol_unreg:
4812#ifdef CONFIG_CFQ_GROUP_IOSCHED
4813        blkcg_policy_unregister(&blkcg_policy_cfq);
4814#endif
4815        return ret;
4816}
4817
4818static void __exit cfq_exit(void)
4819{
4820#ifdef CONFIG_CFQ_GROUP_IOSCHED
4821        blkcg_policy_unregister(&blkcg_policy_cfq);
4822#endif
4823        elv_unregister(&iosched_cfq);
4824        kmem_cache_destroy(cfq_pool);
4825}
4826
4827module_init(cfq_init);
4828module_exit(cfq_exit);
4829
4830MODULE_AUTHOR("Jens Axboe");
4831MODULE_LICENSE("GPL");
4832MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
4833