linux/block/cfq-iosched.c
<<
>>
Prefs
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/blkdev.h>
  11#include <linux/elevator.h>
  12#include <linux/rbtree.h>
  13#include <linux/ioprio.h>
  14#include <linux/blktrace_api.h>
  15
  16/*
  17 * tunables
  18 */
  19/* max queue in one round of service */
  20static const int cfq_quantum = 4;
  21static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  22/* maximum backwards seek, in KiB */
  23static const int cfq_back_max = 16 * 1024;
  24/* penalty of a backwards seek */
  25static const int cfq_back_penalty = 2;
  26static const int cfq_slice_sync = HZ / 10;
  27static int cfq_slice_async = HZ / 25;
  28static const int cfq_slice_async_rq = 2;
  29static int cfq_slice_idle = HZ / 125;
  30
  31/*
  32 * offset from end of service tree
  33 */
  34#define CFQ_IDLE_DELAY          (HZ / 5)
  35
  36/*
  37 * below this threshold, we consider thinktime immediate
  38 */
  39#define CFQ_MIN_TT              (2)
  40
  41#define CFQ_SLICE_SCALE         (5)
  42#define CFQ_HW_QUEUE_MIN        (5)
  43
  44#define RQ_CIC(rq)              \
  45        ((struct cfq_io_context *) (rq)->elevator_private)
  46#define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
  47
  48static struct kmem_cache *cfq_pool;
  49static struct kmem_cache *cfq_ioc_pool;
  50
  51static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
  52static struct completion *ioc_gone;
  53static DEFINE_SPINLOCK(ioc_gone_lock);
  54
  55#define CFQ_PRIO_LISTS          IOPRIO_BE_NR
  56#define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  57#define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  58
  59#define sample_valid(samples)   ((samples) > 80)
  60
  61/*
  62 * Most of our rbtree usage is for sorting with min extraction, so
  63 * if we cache the leftmost node we don't have to walk down the tree
  64 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  65 * move this into the elevator for the rq sorting as well.
  66 */
  67struct cfq_rb_root {
  68        struct rb_root rb;
  69        struct rb_node *left;
  70};
  71#define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, }
  72
  73/*
  74 * Per process-grouping structure
  75 */
  76struct cfq_queue {
  77        /* reference count */
  78        atomic_t ref;
  79        /* various state flags, see below */
  80        unsigned int flags;
  81        /* parent cfq_data */
  82        struct cfq_data *cfqd;
  83        /* service_tree member */
  84        struct rb_node rb_node;
  85        /* service_tree key */
  86        unsigned long rb_key;
  87        /* prio tree member */
  88        struct rb_node p_node;
  89        /* prio tree root we belong to, if any */
  90        struct rb_root *p_root;
  91        /* sorted list of pending requests */
  92        struct rb_root sort_list;
  93        /* if fifo isn't expired, next request to serve */
  94        struct request *next_rq;
  95        /* requests queued in sort_list */
  96        int queued[2];
  97        /* currently allocated requests */
  98        int allocated[2];
  99        /* fifo list of requests in sort_list */
 100        struct list_head fifo;
 101
 102        unsigned long slice_end;
 103        long slice_resid;
 104        unsigned int slice_dispatch;
 105
 106        /* pending metadata requests */
 107        int meta_pending;
 108        /* number of requests that are on the dispatch list or inside driver */
 109        int dispatched;
 110
 111        /* io prio of this group */
 112        unsigned short ioprio, org_ioprio;
 113        unsigned short ioprio_class, org_ioprio_class;
 114
 115        pid_t pid;
 116};
 117
 118/*
 119 * Per block device queue structure
 120 */
 121struct cfq_data {
 122        struct request_queue *queue;
 123
 124        /*
 125         * rr list of queues with requests and the count of them
 126         */
 127        struct cfq_rb_root service_tree;
 128
 129        /*
 130         * Each priority tree is sorted by next_request position.  These
 131         * trees are used when determining if two or more queues are
 132         * interleaving requests (see cfq_close_cooperator).
 133         */
 134        struct rb_root prio_trees[CFQ_PRIO_LISTS];
 135
 136        unsigned int busy_queues;
 137
 138        int rq_in_driver[2];
 139        int sync_flight;
 140
 141        /*
 142         * queue-depth detection
 143         */
 144        int rq_queued;
 145        int hw_tag;
 146        int hw_tag_samples;
 147        int rq_in_driver_peak;
 148
 149        /*
 150         * idle window management
 151         */
 152        struct timer_list idle_slice_timer;
 153        struct work_struct unplug_work;
 154
 155        struct cfq_queue *active_queue;
 156        struct cfq_io_context *active_cic;
 157
 158        /*
 159         * async queue for each priority case
 160         */
 161        struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 162        struct cfq_queue *async_idle_cfqq;
 163
 164        sector_t last_position;
 165
 166        /*
 167         * tunables, see top of file
 168         */
 169        unsigned int cfq_quantum;
 170        unsigned int cfq_fifo_expire[2];
 171        unsigned int cfq_back_penalty;
 172        unsigned int cfq_back_max;
 173        unsigned int cfq_slice[2];
 174        unsigned int cfq_slice_async_rq;
 175        unsigned int cfq_slice_idle;
 176        unsigned int cfq_latency;
 177
 178        struct list_head cic_list;
 179
 180        /*
 181         * Fallback dummy cfqq for extreme OOM conditions
 182         */
 183        struct cfq_queue oom_cfqq;
 184
 185        unsigned long last_end_sync_rq;
 186};
 187
 188enum cfqq_state_flags {
 189        CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
 190        CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
 191        CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
 192        CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
 193        CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
 194        CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
 195        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
 196        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
 197        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
 198        CFQ_CFQQ_FLAG_coop,             /* has done a coop jump of the queue */
 199        CFQ_CFQQ_FLAG_coop_preempt,     /* coop preempt */
 200};
 201
 202#define CFQ_CFQQ_FNS(name)                                              \
 203static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
 204{                                                                       \
 205        (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
 206}                                                                       \
 207static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
 208{                                                                       \
 209        (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
 210}                                                                       \
 211static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
 212{                                                                       \
 213        return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
 214}
 215
 216CFQ_CFQQ_FNS(on_rr);
 217CFQ_CFQQ_FNS(wait_request);
 218CFQ_CFQQ_FNS(must_dispatch);
 219CFQ_CFQQ_FNS(must_alloc_slice);
 220CFQ_CFQQ_FNS(fifo_expire);
 221CFQ_CFQQ_FNS(idle_window);
 222CFQ_CFQQ_FNS(prio_changed);
 223CFQ_CFQQ_FNS(slice_new);
 224CFQ_CFQQ_FNS(sync);
 225CFQ_CFQQ_FNS(coop);
 226CFQ_CFQQ_FNS(coop_preempt);
 227#undef CFQ_CFQQ_FNS
 228
 229#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
 230        blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
 231#define cfq_log(cfqd, fmt, args...)     \
 232        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 233
 234static void cfq_dispatch_insert(struct request_queue *, struct request *);
 235static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
 236                                       struct io_context *, gfp_t);
 237static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
 238                                                struct io_context *);
 239
 240static inline int rq_in_driver(struct cfq_data *cfqd)
 241{
 242        return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
 243}
 244
 245static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
 246                                            bool is_sync)
 247{
 248        return cic->cfqq[is_sync];
 249}
 250
 251static inline void cic_set_cfqq(struct cfq_io_context *cic,
 252                                struct cfq_queue *cfqq, bool is_sync)
 253{
 254        cic->cfqq[is_sync] = cfqq;
 255}
 256
 257/*
 258 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 259 * set (in which case it could also be direct WRITE).
 260 */
 261static inline bool cfq_bio_sync(struct bio *bio)
 262{
 263        return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
 264}
 265
 266/*
 267 * scheduler run of queue, if there are requests pending and no one in the
 268 * driver that will restart queueing
 269 */
 270static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 271{
 272        if (cfqd->busy_queues) {
 273                cfq_log(cfqd, "schedule dispatch");
 274                kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
 275        }
 276}
 277
 278static int cfq_queue_empty(struct request_queue *q)
 279{
 280        struct cfq_data *cfqd = q->elevator->elevator_data;
 281
 282        return !cfqd->busy_queues;
 283}
 284
 285/*
 286 * Scale schedule slice based on io priority. Use the sync time slice only
 287 * if a queue is marked sync and has sync io queued. A sync queue with async
 288 * io only, should not get full sync slice length.
 289 */
 290static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 291                                 unsigned short prio)
 292{
 293        const int base_slice = cfqd->cfq_slice[sync];
 294
 295        WARN_ON(prio >= IOPRIO_BE_NR);
 296
 297        return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 298}
 299
 300static inline int
 301cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 302{
 303        return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 304}
 305
 306static inline void
 307cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 308{
 309        cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
 310        cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
 311}
 312
 313/*
 314 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 315 * isn't valid until the first request from the dispatch is activated
 316 * and the slice time set.
 317 */
 318static inline bool cfq_slice_used(struct cfq_queue *cfqq)
 319{
 320        if (cfq_cfqq_slice_new(cfqq))
 321                return 0;
 322        if (time_before(jiffies, cfqq->slice_end))
 323                return 0;
 324
 325        return 1;
 326}
 327
 328/*
 329 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
 330 * We choose the request that is closest to the head right now. Distance
 331 * behind the head is penalized and only allowed to a certain extent.
 332 */
 333static struct request *
 334cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
 335{
 336        sector_t last, s1, s2, d1 = 0, d2 = 0;
 337        unsigned long back_max;
 338#define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
 339#define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
 340        unsigned wrap = 0; /* bit mask: requests behind the disk head? */
 341
 342        if (rq1 == NULL || rq1 == rq2)
 343                return rq2;
 344        if (rq2 == NULL)
 345                return rq1;
 346
 347        if (rq_is_sync(rq1) && !rq_is_sync(rq2))
 348                return rq1;
 349        else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
 350                return rq2;
 351        if (rq_is_meta(rq1) && !rq_is_meta(rq2))
 352                return rq1;
 353        else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
 354                return rq2;
 355
 356        s1 = blk_rq_pos(rq1);
 357        s2 = blk_rq_pos(rq2);
 358
 359        last = cfqd->last_position;
 360
 361        /*
 362         * by definition, 1KiB is 2 sectors
 363         */
 364        back_max = cfqd->cfq_back_max * 2;
 365
 366        /*
 367         * Strict one way elevator _except_ in the case where we allow
 368         * short backward seeks which are biased as twice the cost of a
 369         * similar forward seek.
 370         */
 371        if (s1 >= last)
 372                d1 = s1 - last;
 373        else if (s1 + back_max >= last)
 374                d1 = (last - s1) * cfqd->cfq_back_penalty;
 375        else
 376                wrap |= CFQ_RQ1_WRAP;
 377
 378        if (s2 >= last)
 379                d2 = s2 - last;
 380        else if (s2 + back_max >= last)
 381                d2 = (last - s2) * cfqd->cfq_back_penalty;
 382        else
 383                wrap |= CFQ_RQ2_WRAP;
 384
 385        /* Found required data */
 386
 387        /*
 388         * By doing switch() on the bit mask "wrap" we avoid having to
 389         * check two variables for all permutations: --> faster!
 390         */
 391        switch (wrap) {
 392        case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
 393                if (d1 < d2)
 394                        return rq1;
 395                else if (d2 < d1)
 396                        return rq2;
 397                else {
 398                        if (s1 >= s2)
 399                                return rq1;
 400                        else
 401                                return rq2;
 402                }
 403
 404        case CFQ_RQ2_WRAP:
 405                return rq1;
 406        case CFQ_RQ1_WRAP:
 407                return rq2;
 408        case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
 409        default:
 410                /*
 411                 * Since both rqs are wrapped,
 412                 * start with the one that's further behind head
 413                 * (--> only *one* back seek required),
 414                 * since back seek takes more time than forward.
 415                 */
 416                if (s1 <= s2)
 417                        return rq1;
 418                else
 419                        return rq2;
 420        }
 421}
 422
 423/*
 424 * The below is leftmost cache rbtree addon
 425 */
 426static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
 427{
 428        if (!root->left)
 429                root->left = rb_first(&root->rb);
 430
 431        if (root->left)
 432                return rb_entry(root->left, struct cfq_queue, rb_node);
 433
 434        return NULL;
 435}
 436
 437static void rb_erase_init(struct rb_node *n, struct rb_root *root)
 438{
 439        rb_erase(n, root);
 440        RB_CLEAR_NODE(n);
 441}
 442
 443static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
 444{
 445        if (root->left == n)
 446                root->left = NULL;
 447        rb_erase_init(n, &root->rb);
 448}
 449
 450/*
 451 * would be nice to take fifo expire time into account as well
 452 */
 453static struct request *
 454cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 455                  struct request *last)
 456{
 457        struct rb_node *rbnext = rb_next(&last->rb_node);
 458        struct rb_node *rbprev = rb_prev(&last->rb_node);
 459        struct request *next = NULL, *prev = NULL;
 460
 461        BUG_ON(RB_EMPTY_NODE(&last->rb_node));
 462
 463        if (rbprev)
 464                prev = rb_entry_rq(rbprev);
 465
 466        if (rbnext)
 467                next = rb_entry_rq(rbnext);
 468        else {
 469                rbnext = rb_first(&cfqq->sort_list);
 470                if (rbnext && rbnext != &last->rb_node)
 471                        next = rb_entry_rq(rbnext);
 472        }
 473
 474        return cfq_choose_req(cfqd, next, prev);
 475}
 476
 477static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
 478                                      struct cfq_queue *cfqq)
 479{
 480        /*
 481         * just an approximation, should be ok.
 482         */
 483        return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
 484                       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
 485}
 486
 487/*
 488 * The cfqd->service_tree holds all pending cfq_queue's that have
 489 * requests waiting to be processed. It is sorted in the order that
 490 * we will service the queues.
 491 */
 492static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 493                                 bool add_front)
 494{
 495        struct rb_node **p, *parent;
 496        struct cfq_queue *__cfqq;
 497        unsigned long rb_key;
 498        int left;
 499
 500        if (cfq_class_idle(cfqq)) {
 501                rb_key = CFQ_IDLE_DELAY;
 502                parent = rb_last(&cfqd->service_tree.rb);
 503                if (parent && parent != &cfqq->rb_node) {
 504                        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
 505                        rb_key += __cfqq->rb_key;
 506                } else
 507                        rb_key += jiffies;
 508        } else if (!add_front) {
 509                /*
 510                 * Get our rb key offset. Subtract any residual slice
 511                 * value carried from last service. A negative resid
 512                 * count indicates slice overrun, and this should position
 513                 * the next service time further away in the tree.
 514                 */
 515                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
 516                rb_key -= cfqq->slice_resid;
 517                cfqq->slice_resid = 0;
 518        } else {
 519                rb_key = -HZ;
 520                __cfqq = cfq_rb_first(&cfqd->service_tree);
 521                rb_key += __cfqq ? __cfqq->rb_key : jiffies;
 522        }
 523
 524        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
 525                /*
 526                 * same position, nothing more to do
 527                 */
 528                if (rb_key == cfqq->rb_key)
 529                        return;
 530
 531                cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
 532        }
 533
 534        left = 1;
 535        parent = NULL;
 536        p = &cfqd->service_tree.rb.rb_node;
 537        while (*p) {
 538                struct rb_node **n;
 539
 540                parent = *p;
 541                __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
 542
 543                /*
 544                 * sort RT queues first, we always want to give
 545                 * preference to them. IDLE queues goes to the back.
 546                 * after that, sort on the next service time.
 547                 */
 548                if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
 549                        n = &(*p)->rb_left;
 550                else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
 551                        n = &(*p)->rb_right;
 552                else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
 553                        n = &(*p)->rb_left;
 554                else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
 555                        n = &(*p)->rb_right;
 556                else if (time_before(rb_key, __cfqq->rb_key))
 557                        n = &(*p)->rb_left;
 558                else
 559                        n = &(*p)->rb_right;
 560
 561                if (n == &(*p)->rb_right)
 562                        left = 0;
 563
 564                p = n;
 565        }
 566
 567        if (left)
 568                cfqd->service_tree.left = &cfqq->rb_node;
 569
 570        cfqq->rb_key = rb_key;
 571        rb_link_node(&cfqq->rb_node, parent, p);
 572        rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
 573}
 574
 575static struct cfq_queue *
 576cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
 577                     sector_t sector, struct rb_node **ret_parent,
 578                     struct rb_node ***rb_link)
 579{
 580        struct rb_node **p, *parent;
 581        struct cfq_queue *cfqq = NULL;
 582
 583        parent = NULL;
 584        p = &root->rb_node;
 585        while (*p) {
 586                struct rb_node **n;
 587
 588                parent = *p;
 589                cfqq = rb_entry(parent, struct cfq_queue, p_node);
 590
 591                /*
 592                 * Sort strictly based on sector.  Smallest to the left,
 593                 * largest to the right.
 594                 */
 595                if (sector > blk_rq_pos(cfqq->next_rq))
 596                        n = &(*p)->rb_right;
 597                else if (sector < blk_rq_pos(cfqq->next_rq))
 598                        n = &(*p)->rb_left;
 599                else
 600                        break;
 601                p = n;
 602                cfqq = NULL;
 603        }
 604
 605        *ret_parent = parent;
 606        if (rb_link)
 607                *rb_link = p;
 608        return cfqq;
 609}
 610
 611static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 612{
 613        struct rb_node **p, *parent;
 614        struct cfq_queue *__cfqq;
 615
 616        if (cfqq->p_root) {
 617                rb_erase(&cfqq->p_node, cfqq->p_root);
 618                cfqq->p_root = NULL;
 619        }
 620
 621        if (cfq_class_idle(cfqq))
 622                return;
 623        if (!cfqq->next_rq)
 624                return;
 625
 626        cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
 627        __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
 628                                      blk_rq_pos(cfqq->next_rq), &parent, &p);
 629        if (!__cfqq) {
 630                rb_link_node(&cfqq->p_node, parent, p);
 631                rb_insert_color(&cfqq->p_node, cfqq->p_root);
 632        } else
 633                cfqq->p_root = NULL;
 634}
 635
 636/*
 637 * Update cfqq's position in the service tree.
 638 */
 639static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 640{
 641        /*
 642         * Resorting requires the cfqq to be on the RR list already.
 643         */
 644        if (cfq_cfqq_on_rr(cfqq)) {
 645                cfq_service_tree_add(cfqd, cfqq, 0);
 646                cfq_prio_tree_add(cfqd, cfqq);
 647        }
 648}
 649
 650/*
 651 * add to busy list of queues for service, trying to be fair in ordering
 652 * the pending list according to last request service
 653 */
 654static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 655{
 656        cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
 657        BUG_ON(cfq_cfqq_on_rr(cfqq));
 658        cfq_mark_cfqq_on_rr(cfqq);
 659        cfqd->busy_queues++;
 660
 661        cfq_resort_rr_list(cfqd, cfqq);
 662}
 663
 664/*
 665 * Called when the cfqq no longer has requests pending, remove it from
 666 * the service tree.
 667 */
 668static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 669{
 670        cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
 671        BUG_ON(!cfq_cfqq_on_rr(cfqq));
 672        cfq_clear_cfqq_on_rr(cfqq);
 673
 674        if (!RB_EMPTY_NODE(&cfqq->rb_node))
 675                cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
 676        if (cfqq->p_root) {
 677                rb_erase(&cfqq->p_node, cfqq->p_root);
 678                cfqq->p_root = NULL;
 679        }
 680
 681        BUG_ON(!cfqd->busy_queues);
 682        cfqd->busy_queues--;
 683}
 684
 685/*
 686 * rb tree support functions
 687 */
 688static void cfq_del_rq_rb(struct request *rq)
 689{
 690        struct cfq_queue *cfqq = RQ_CFQQ(rq);
 691        struct cfq_data *cfqd = cfqq->cfqd;
 692        const int sync = rq_is_sync(rq);
 693
 694        BUG_ON(!cfqq->queued[sync]);
 695        cfqq->queued[sync]--;
 696
 697        elv_rb_del(&cfqq->sort_list, rq);
 698
 699        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
 700                cfq_del_cfqq_rr(cfqd, cfqq);
 701}
 702
 703static void cfq_add_rq_rb(struct request *rq)
 704{
 705        struct cfq_queue *cfqq = RQ_CFQQ(rq);
 706        struct cfq_data *cfqd = cfqq->cfqd;
 707        struct request *__alias, *prev;
 708
 709        cfqq->queued[rq_is_sync(rq)]++;
 710
 711        /*
 712         * looks a little odd, but the first insert might return an alias.
 713         * if that happens, put the alias on the dispatch list
 714         */
 715        while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
 716                cfq_dispatch_insert(cfqd->queue, __alias);
 717
 718        if (!cfq_cfqq_on_rr(cfqq))
 719                cfq_add_cfqq_rr(cfqd, cfqq);
 720
 721        /*
 722         * check if this request is a better next-serve candidate
 723         */
 724        prev = cfqq->next_rq;
 725        cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
 726
 727        /*
 728         * adjust priority tree position, if ->next_rq changes
 729         */
 730        if (prev != cfqq->next_rq)
 731                cfq_prio_tree_add(cfqd, cfqq);
 732
 733        BUG_ON(!cfqq->next_rq);
 734}
 735
 736static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
 737{
 738        elv_rb_del(&cfqq->sort_list, rq);
 739        cfqq->queued[rq_is_sync(rq)]--;
 740        cfq_add_rq_rb(rq);
 741}
 742
 743static struct request *
 744cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
 745{
 746        struct task_struct *tsk = current;
 747        struct cfq_io_context *cic;
 748        struct cfq_queue *cfqq;
 749
 750        cic = cfq_cic_lookup(cfqd, tsk->io_context);
 751        if (!cic)
 752                return NULL;
 753
 754        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
 755        if (cfqq) {
 756                sector_t sector = bio->bi_sector + bio_sectors(bio);
 757
 758                return elv_rb_find(&cfqq->sort_list, sector);
 759        }
 760
 761        return NULL;
 762}
 763
 764static void cfq_activate_request(struct request_queue *q, struct request *rq)
 765{
 766        struct cfq_data *cfqd = q->elevator->elevator_data;
 767
 768        cfqd->rq_in_driver[rq_is_sync(rq)]++;
 769        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
 770                                                rq_in_driver(cfqd));
 771
 772        cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
 773}
 774
 775static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
 776{
 777        struct cfq_data *cfqd = q->elevator->elevator_data;
 778        const int sync = rq_is_sync(rq);
 779
 780        WARN_ON(!cfqd->rq_in_driver[sync]);
 781        cfqd->rq_in_driver[sync]--;
 782        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
 783                                                rq_in_driver(cfqd));
 784}
 785
 786static void cfq_remove_request(struct request *rq)
 787{
 788        struct cfq_queue *cfqq = RQ_CFQQ(rq);
 789
 790        if (cfqq->next_rq == rq)
 791                cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
 792
 793        list_del_init(&rq->queuelist);
 794        cfq_del_rq_rb(rq);
 795
 796        cfqq->cfqd->rq_queued--;
 797        if (rq_is_meta(rq)) {
 798                WARN_ON(!cfqq->meta_pending);
 799                cfqq->meta_pending--;
 800        }
 801}
 802
 803static int cfq_merge(struct request_queue *q, struct request **req,
 804                     struct bio *bio)
 805{
 806        struct cfq_data *cfqd = q->elevator->elevator_data;
 807        struct request *__rq;
 808
 809        __rq = cfq_find_rq_fmerge(cfqd, bio);
 810        if (__rq && elv_rq_merge_ok(__rq, bio)) {
 811                *req = __rq;
 812                return ELEVATOR_FRONT_MERGE;
 813        }
 814
 815        return ELEVATOR_NO_MERGE;
 816}
 817
 818static void cfq_merged_request(struct request_queue *q, struct request *req,
 819                               int type)
 820{
 821        if (type == ELEVATOR_FRONT_MERGE) {
 822                struct cfq_queue *cfqq = RQ_CFQQ(req);
 823
 824                cfq_reposition_rq_rb(cfqq, req);
 825        }
 826}
 827
 828static void
 829cfq_merged_requests(struct request_queue *q, struct request *rq,
 830                    struct request *next)
 831{
 832        /*
 833         * reposition in fifo if next is older than rq
 834         */
 835        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
 836            time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
 837                list_move(&rq->queuelist, &next->queuelist);
 838                rq_set_fifo_time(rq, rq_fifo_time(next));
 839        }
 840
 841        cfq_remove_request(next);
 842}
 843
 844static int cfq_allow_merge(struct request_queue *q, struct request *rq,
 845                           struct bio *bio)
 846{
 847        struct cfq_data *cfqd = q->elevator->elevator_data;
 848        struct cfq_io_context *cic;
 849        struct cfq_queue *cfqq;
 850
 851        /*
 852         * Disallow merge of a sync bio into an async request.
 853         */
 854        if (cfq_bio_sync(bio) && !rq_is_sync(rq))
 855                return false;
 856
 857        /*
 858         * Lookup the cfqq that this bio will be queued with. Allow
 859         * merge only if rq is queued there.
 860         */
 861        cic = cfq_cic_lookup(cfqd, current->io_context);
 862        if (!cic)
 863                return false;
 864
 865        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
 866        return cfqq == RQ_CFQQ(rq);
 867}
 868
 869static void __cfq_set_active_queue(struct cfq_data *cfqd,
 870                                   struct cfq_queue *cfqq)
 871{
 872        if (cfqq) {
 873                cfq_log_cfqq(cfqd, cfqq, "set_active");
 874                cfqq->slice_end = 0;
 875                cfqq->slice_dispatch = 0;
 876
 877                cfq_clear_cfqq_wait_request(cfqq);
 878                cfq_clear_cfqq_must_dispatch(cfqq);
 879                cfq_clear_cfqq_must_alloc_slice(cfqq);
 880                cfq_clear_cfqq_fifo_expire(cfqq);
 881                cfq_mark_cfqq_slice_new(cfqq);
 882
 883                del_timer(&cfqd->idle_slice_timer);
 884        }
 885
 886        cfqd->active_queue = cfqq;
 887}
 888
 889/*
 890 * current cfqq expired its slice (or was too idle), select new one
 891 */
 892static void
 893__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 894                    bool timed_out)
 895{
 896        cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
 897
 898        if (cfq_cfqq_wait_request(cfqq))
 899                del_timer(&cfqd->idle_slice_timer);
 900
 901        cfq_clear_cfqq_wait_request(cfqq);
 902
 903        /*
 904         * store what was left of this slice, if the queue idled/timed out
 905         */
 906        if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
 907                cfqq->slice_resid = cfqq->slice_end - jiffies;
 908                cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
 909        }
 910
 911        cfq_resort_rr_list(cfqd, cfqq);
 912
 913        if (cfqq == cfqd->active_queue)
 914                cfqd->active_queue = NULL;
 915
 916        if (cfqd->active_cic) {
 917                put_io_context(cfqd->active_cic->ioc);
 918                cfqd->active_cic = NULL;
 919        }
 920}
 921
 922static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
 923{
 924        struct cfq_queue *cfqq = cfqd->active_queue;
 925
 926        if (cfqq)
 927                __cfq_slice_expired(cfqd, cfqq, timed_out);
 928}
 929
 930/*
 931 * Get next queue for service. Unless we have a queue preemption,
 932 * we'll simply select the first cfqq in the service tree.
 933 */
 934static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
 935{
 936        if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
 937                return NULL;
 938
 939        return cfq_rb_first(&cfqd->service_tree);
 940}
 941
 942/*
 943 * Get and set a new active queue for service.
 944 */
 945static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
 946                                              struct cfq_queue *cfqq)
 947{
 948        if (!cfqq) {
 949                cfqq = cfq_get_next_queue(cfqd);
 950                if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
 951                        cfq_clear_cfqq_coop(cfqq);
 952        }
 953
 954        if (cfqq)
 955                cfq_clear_cfqq_coop_preempt(cfqq);
 956
 957        __cfq_set_active_queue(cfqd, cfqq);
 958        return cfqq;
 959}
 960
 961static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
 962                                          struct request *rq)
 963{
 964        if (blk_rq_pos(rq) >= cfqd->last_position)
 965                return blk_rq_pos(rq) - cfqd->last_position;
 966        else
 967                return cfqd->last_position - blk_rq_pos(rq);
 968}
 969
 970#define CIC_SEEK_THR    8 * 1024
 971#define CIC_SEEKY(cic)  ((cic)->seek_mean > CIC_SEEK_THR)
 972
 973static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
 974{
 975        struct cfq_io_context *cic = cfqd->active_cic;
 976        sector_t sdist = cic->seek_mean;
 977
 978        if (!sample_valid(cic->seek_samples))
 979                sdist = CIC_SEEK_THR;
 980
 981        return cfq_dist_from_last(cfqd, rq) <= sdist;
 982}
 983
 984static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
 985                                    struct cfq_queue *cur_cfqq)
 986{
 987        struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
 988        struct rb_node *parent, *node;
 989        struct cfq_queue *__cfqq;
 990        sector_t sector = cfqd->last_position;
 991
 992        if (RB_EMPTY_ROOT(root))
 993                return NULL;
 994
 995        /*
 996         * First, if we find a request starting at the end of the last
 997         * request, choose it.
 998         */
 999        __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1000        if (__cfqq)
1001                return __cfqq;
1002
1003        /*
1004         * If the exact sector wasn't found, the parent of the NULL leaf
1005         * will contain the closest sector.
1006         */
1007        __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1008        if (cfq_rq_close(cfqd, __cfqq->next_rq))
1009                return __cfqq;
1010
1011        if (blk_rq_pos(__cfqq->next_rq) < sector)
1012                node = rb_next(&__cfqq->p_node);
1013        else
1014                node = rb_prev(&__cfqq->p_node);
1015        if (!node)
1016                return NULL;
1017
1018        __cfqq = rb_entry(node, struct cfq_queue, p_node);
1019        if (cfq_rq_close(cfqd, __cfqq->next_rq))
1020                return __cfqq;
1021
1022        return NULL;
1023}
1024
1025/*
1026 * cfqd - obvious
1027 * cur_cfqq - passed in so that we don't decide that the current queue is
1028 *            closely cooperating with itself.
1029 *
1030 * So, basically we're assuming that that cur_cfqq has dispatched at least
1031 * one request, and that cfqd->last_position reflects a position on the disk
1032 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1033 * assumption.
1034 */
1035static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1036                                              struct cfq_queue *cur_cfqq,
1037                                              bool probe)
1038{
1039        struct cfq_queue *cfqq;
1040
1041        /*
1042         * A valid cfq_io_context is necessary to compare requests against
1043         * the seek_mean of the current cfqq.
1044         */
1045        if (!cfqd->active_cic)
1046                return NULL;
1047
1048        /*
1049         * We should notice if some of the queues are cooperating, eg
1050         * working closely on the same area of the disk. In that case,
1051         * we can group them together and don't waste time idling.
1052         */
1053        cfqq = cfqq_close(cfqd, cur_cfqq);
1054        if (!cfqq)
1055                return NULL;
1056
1057        if (cfq_cfqq_coop(cfqq))
1058                return NULL;
1059
1060        if (!probe)
1061                cfq_mark_cfqq_coop(cfqq);
1062        return cfqq;
1063}
1064
1065static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1066{
1067        struct cfq_queue *cfqq = cfqd->active_queue;
1068        struct cfq_io_context *cic;
1069        unsigned long sl;
1070
1071        /*
1072         * SSD device without seek penalty, disable idling. But only do so
1073         * for devices that support queuing, otherwise we still have a problem
1074         * with sync vs async workloads.
1075         */
1076        if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1077                return;
1078
1079        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1080        WARN_ON(cfq_cfqq_slice_new(cfqq));
1081
1082        /*
1083         * idle is disabled, either manually or by past process history
1084         */
1085        if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
1086                return;
1087
1088        /*
1089         * still requests with the driver, don't idle
1090         */
1091        if (rq_in_driver(cfqd))
1092                return;
1093
1094        /*
1095         * task has exited, don't wait
1096         */
1097        cic = cfqd->active_cic;
1098        if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1099                return;
1100
1101        /*
1102         * If our average think time is larger than the remaining time
1103         * slice, then don't idle. This avoids overrunning the allotted
1104         * time slice.
1105         */
1106        if (sample_valid(cic->ttime_samples) &&
1107            (cfqq->slice_end - jiffies < cic->ttime_mean))
1108                return;
1109
1110        cfq_mark_cfqq_wait_request(cfqq);
1111
1112        /*
1113         * we don't want to idle for seeks, but we do want to allow
1114         * fair distribution of slice time for a process doing back-to-back
1115         * seeks. so allow a little bit of time for him to submit a new rq
1116         */
1117        sl = cfqd->cfq_slice_idle;
1118        if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
1119                sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
1120
1121        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1122        cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1123}
1124
1125/*
1126 * Move request from internal lists to the request queue dispatch list.
1127 */
1128static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1129{
1130        struct cfq_data *cfqd = q->elevator->elevator_data;
1131        struct cfq_queue *cfqq = RQ_CFQQ(rq);
1132
1133        cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1134
1135        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1136        cfq_remove_request(rq);
1137        cfqq->dispatched++;
1138        elv_dispatch_sort(q, rq);
1139
1140        if (cfq_cfqq_sync(cfqq))
1141                cfqd->sync_flight++;
1142}
1143
1144/*
1145 * return expired entry, or NULL to just start from scratch in rbtree
1146 */
1147static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1148{
1149        struct request *rq = NULL;
1150
1151        if (cfq_cfqq_fifo_expire(cfqq))
1152                return NULL;
1153
1154        cfq_mark_cfqq_fifo_expire(cfqq);
1155
1156        if (list_empty(&cfqq->fifo))
1157                return NULL;
1158
1159        rq = rq_entry_fifo(cfqq->fifo.next);
1160        if (time_before(jiffies, rq_fifo_time(rq)))
1161                rq = NULL;
1162
1163        cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1164        return rq;
1165}
1166
1167static inline int
1168cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1169{
1170        const int base_rq = cfqd->cfq_slice_async_rq;
1171
1172        WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1173
1174        return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1175}
1176
1177/*
1178 * Select a queue for service. If we have a current active queue,
1179 * check whether to continue servicing it, or retrieve and set a new one.
1180 */
1181static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1182{
1183        struct cfq_queue *cfqq, *new_cfqq = NULL;
1184
1185        cfqq = cfqd->active_queue;
1186        if (!cfqq)
1187                goto new_queue;
1188
1189        /*
1190         * The active queue has run out of time, expire it and select new.
1191         */
1192        if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
1193                goto expire;
1194
1195        /*
1196         * The active queue has requests and isn't expired, allow it to
1197         * dispatch.
1198         */
1199        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1200                goto keep_queue;
1201
1202        /*
1203         * If another queue has a request waiting within our mean seek
1204         * distance, let it run.  The expire code will check for close
1205         * cooperators and put the close queue at the front of the service
1206         * tree.
1207         */
1208        new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0);
1209        if (new_cfqq)
1210                goto expire;
1211
1212        /*
1213         * No requests pending. If the active queue still has requests in
1214         * flight or is idling for a new request, allow either of these
1215         * conditions to happen (or time out) before selecting a new queue.
1216         */
1217        if (timer_pending(&cfqd->idle_slice_timer) ||
1218            (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
1219                cfqq = NULL;
1220                goto keep_queue;
1221        }
1222
1223expire:
1224        cfq_slice_expired(cfqd, 0);
1225new_queue:
1226        cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1227keep_queue:
1228        return cfqq;
1229}
1230
1231static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1232{
1233        int dispatched = 0;
1234
1235        while (cfqq->next_rq) {
1236                cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1237                dispatched++;
1238        }
1239
1240        BUG_ON(!list_empty(&cfqq->fifo));
1241        return dispatched;
1242}
1243
1244/*
1245 * Drain our current requests. Used for barriers and when switching
1246 * io schedulers on-the-fly.
1247 */
1248static int cfq_forced_dispatch(struct cfq_data *cfqd)
1249{
1250        struct cfq_queue *cfqq;
1251        int dispatched = 0;
1252
1253        while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
1254                dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1255
1256        cfq_slice_expired(cfqd, 0);
1257
1258        BUG_ON(cfqd->busy_queues);
1259
1260        cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1261        return dispatched;
1262}
1263
1264static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1265{
1266        unsigned int max_dispatch;
1267
1268        /*
1269         * Drain async requests before we start sync IO
1270         */
1271        if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1272                return false;
1273
1274        /*
1275         * If this is an async queue and we have sync IO in flight, let it wait
1276         */
1277        if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1278                return false;
1279
1280        max_dispatch = cfqd->cfq_quantum;
1281        if (cfq_class_idle(cfqq))
1282                max_dispatch = 1;
1283
1284        /*
1285         * Does this cfqq already have too much IO in flight?
1286         */
1287        if (cfqq->dispatched >= max_dispatch) {
1288                /*
1289                 * idle queue must always only have a single IO in flight
1290                 */
1291                if (cfq_class_idle(cfqq))
1292                        return false;
1293
1294                /*
1295                 * We have other queues, don't allow more IO from this one
1296                 */
1297                if (cfqd->busy_queues > 1)
1298                        return false;
1299
1300                /*
1301                 * Sole queue user, allow bigger slice
1302                 */
1303                max_dispatch *= 4;
1304        }
1305
1306        /*
1307         * Async queues must wait a bit before being allowed dispatch.
1308         * We also ramp up the dispatch depth gradually for async IO,
1309         * based on the last sync IO we serviced
1310         */
1311        if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
1312                unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1313                unsigned int depth;
1314
1315                depth = last_sync / cfqd->cfq_slice[1];
1316                if (!depth && !cfqq->dispatched)
1317                        depth = 1;
1318                if (depth < max_dispatch)
1319                        max_dispatch = depth;
1320        }
1321
1322        /*
1323         * If we're below the current max, allow a dispatch
1324         */
1325        return cfqq->dispatched < max_dispatch;
1326}
1327
1328/*
1329 * Dispatch a request from cfqq, moving them to the request queue
1330 * dispatch list.
1331 */
1332static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1333{
1334        struct request *rq;
1335
1336        BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1337
1338        if (!cfq_may_dispatch(cfqd, cfqq))
1339                return false;
1340
1341        /*
1342         * follow expired path, else get first next available
1343         */
1344        rq = cfq_check_fifo(cfqq);
1345        if (!rq)
1346                rq = cfqq->next_rq;
1347
1348        /*
1349         * insert request into driver dispatch list
1350         */
1351        cfq_dispatch_insert(cfqd->queue, rq);
1352
1353        if (!cfqd->active_cic) {
1354                struct cfq_io_context *cic = RQ_CIC(rq);
1355
1356                atomic_long_inc(&cic->ioc->refcount);
1357                cfqd->active_cic = cic;
1358        }
1359
1360        return true;
1361}
1362
1363/*
1364 * Find the cfqq that we need to service and move a request from that to the
1365 * dispatch list
1366 */
1367static int cfq_dispatch_requests(struct request_queue *q, int force)
1368{
1369        struct cfq_data *cfqd = q->elevator->elevator_data;
1370        struct cfq_queue *cfqq;
1371
1372        if (!cfqd->busy_queues)
1373                return 0;
1374
1375        if (unlikely(force))
1376                return cfq_forced_dispatch(cfqd);
1377
1378        cfqq = cfq_select_queue(cfqd);
1379        if (!cfqq)
1380                return 0;
1381
1382        /*
1383         * Dispatch a request from this cfqq, if it is allowed
1384         */
1385        if (!cfq_dispatch_request(cfqd, cfqq))
1386                return 0;
1387
1388        cfqq->slice_dispatch++;
1389        cfq_clear_cfqq_must_dispatch(cfqq);
1390
1391        /*
1392         * expire an async queue immediately if it has used up its slice. idle
1393         * queue always expire after 1 dispatch round.
1394         */
1395        if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1396            cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1397            cfq_class_idle(cfqq))) {
1398                cfqq->slice_end = jiffies + 1;
1399                cfq_slice_expired(cfqd, 0);
1400        }
1401
1402        cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
1403        return 1;
1404}
1405
1406/*
1407 * task holds one reference to the queue, dropped when task exits. each rq
1408 * in-flight on this queue also holds a reference, dropped when rq is freed.
1409 *
1410 * queue lock must be held here.
1411 */
1412static void cfq_put_queue(struct cfq_queue *cfqq)
1413{
1414        struct cfq_data *cfqd = cfqq->cfqd;
1415
1416        BUG_ON(atomic_read(&cfqq->ref) <= 0);
1417
1418        if (!atomic_dec_and_test(&cfqq->ref))
1419                return;
1420
1421        cfq_log_cfqq(cfqd, cfqq, "put_queue");
1422        BUG_ON(rb_first(&cfqq->sort_list));
1423        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1424        BUG_ON(cfq_cfqq_on_rr(cfqq));
1425
1426        if (unlikely(cfqd->active_queue == cfqq)) {
1427                __cfq_slice_expired(cfqd, cfqq, 0);
1428                cfq_schedule_dispatch(cfqd);
1429        }
1430
1431        kmem_cache_free(cfq_pool, cfqq);
1432}
1433
1434/*
1435 * Must always be called with the rcu_read_lock() held
1436 */
1437static void
1438__call_for_each_cic(struct io_context *ioc,
1439                    void (*func)(struct io_context *, struct cfq_io_context *))
1440{
1441        struct cfq_io_context *cic;
1442        struct hlist_node *n;
1443
1444        hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1445                func(ioc, cic);
1446}
1447
1448/*
1449 * Call func for each cic attached to this ioc.
1450 */
1451static void
1452call_for_each_cic(struct io_context *ioc,
1453                  void (*func)(struct io_context *, struct cfq_io_context *))
1454{
1455        rcu_read_lock();
1456        __call_for_each_cic(ioc, func);
1457        rcu_read_unlock();
1458}
1459
1460static void cfq_cic_free_rcu(struct rcu_head *head)
1461{
1462        struct cfq_io_context *cic;
1463
1464        cic = container_of(head, struct cfq_io_context, rcu_head);
1465
1466        kmem_cache_free(cfq_ioc_pool, cic);
1467        elv_ioc_count_dec(cfq_ioc_count);
1468
1469        if (ioc_gone) {
1470                /*
1471                 * CFQ scheduler is exiting, grab exit lock and check
1472                 * the pending io context count. If it hits zero,
1473                 * complete ioc_gone and set it back to NULL
1474                 */
1475                spin_lock(&ioc_gone_lock);
1476                if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1477                        complete(ioc_gone);
1478                        ioc_gone = NULL;
1479                }
1480                spin_unlock(&ioc_gone_lock);
1481        }
1482}
1483
1484static void cfq_cic_free(struct cfq_io_context *cic)
1485{
1486        call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1487}
1488
1489static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1490{
1491        unsigned long flags;
1492
1493        BUG_ON(!cic->dead_key);
1494
1495        spin_lock_irqsave(&ioc->lock, flags);
1496        radix_tree_delete(&ioc->radix_root, cic->dead_key);
1497        hlist_del_rcu(&cic->cic_list);
1498        spin_unlock_irqrestore(&ioc->lock, flags);
1499
1500        cfq_cic_free(cic);
1501}
1502
1503/*
1504 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1505 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1506 * and ->trim() which is called with the task lock held
1507 */
1508static void cfq_free_io_context(struct io_context *ioc)
1509{
1510        /*
1511         * ioc->refcount is zero here, or we are called from elv_unregister(),
1512         * so no more cic's are allowed to be linked into this ioc.  So it
1513         * should be ok to iterate over the known list, we will see all cic's
1514         * since no new ones are added.
1515         */
1516        __call_for_each_cic(ioc, cic_free_func);
1517}
1518
1519static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1520{
1521        if (unlikely(cfqq == cfqd->active_queue)) {
1522                __cfq_slice_expired(cfqd, cfqq, 0);
1523                cfq_schedule_dispatch(cfqd);
1524        }
1525
1526        cfq_put_queue(cfqq);
1527}
1528
1529static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1530                                         struct cfq_io_context *cic)
1531{
1532        struct io_context *ioc = cic->ioc;
1533
1534        list_del_init(&cic->queue_list);
1535
1536        /*
1537         * Make sure key == NULL is seen for dead queues
1538         */
1539        smp_wmb();
1540        cic->dead_key = (unsigned long) cic->key;
1541        cic->key = NULL;
1542
1543        if (ioc->ioc_data == cic)
1544                rcu_assign_pointer(ioc->ioc_data, NULL);
1545
1546        if (cic->cfqq[BLK_RW_ASYNC]) {
1547                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1548                cic->cfqq[BLK_RW_ASYNC] = NULL;
1549        }
1550
1551        if (cic->cfqq[BLK_RW_SYNC]) {
1552                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1553                cic->cfqq[BLK_RW_SYNC] = NULL;
1554        }
1555}
1556
1557static void cfq_exit_single_io_context(struct io_context *ioc,
1558                                       struct cfq_io_context *cic)
1559{
1560        struct cfq_data *cfqd = cic->key;
1561
1562        if (cfqd) {
1563                struct request_queue *q = cfqd->queue;
1564                unsigned long flags;
1565
1566                spin_lock_irqsave(q->queue_lock, flags);
1567
1568                /*
1569                 * Ensure we get a fresh copy of the ->key to prevent
1570                 * race between exiting task and queue
1571                 */
1572                smp_read_barrier_depends();
1573                if (cic->key)
1574                        __cfq_exit_single_io_context(cfqd, cic);
1575
1576                spin_unlock_irqrestore(q->queue_lock, flags);
1577        }
1578}
1579
1580/*
1581 * The process that ioc belongs to has exited, we need to clean up
1582 * and put the internal structures we have that belongs to that process.
1583 */
1584static void cfq_exit_io_context(struct io_context *ioc)
1585{
1586        call_for_each_cic(ioc, cfq_exit_single_io_context);
1587}
1588
1589static struct cfq_io_context *
1590cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1591{
1592        struct cfq_io_context *cic;
1593
1594        cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1595                                                        cfqd->queue->node);
1596        if (cic) {
1597                cic->last_end_request = jiffies;
1598                INIT_LIST_HEAD(&cic->queue_list);
1599                INIT_HLIST_NODE(&cic->cic_list);
1600                cic->dtor = cfq_free_io_context;
1601                cic->exit = cfq_exit_io_context;
1602                elv_ioc_count_inc(cfq_ioc_count);
1603        }
1604
1605        return cic;
1606}
1607
1608static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1609{
1610        struct task_struct *tsk = current;
1611        int ioprio_class;
1612
1613        if (!cfq_cfqq_prio_changed(cfqq))
1614                return;
1615
1616        ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1617        switch (ioprio_class) {
1618        default:
1619                printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1620        case IOPRIO_CLASS_NONE:
1621                /*
1622                 * no prio set, inherit CPU scheduling settings
1623                 */
1624                cfqq->ioprio = task_nice_ioprio(tsk);
1625                cfqq->ioprio_class = task_nice_ioclass(tsk);
1626                break;
1627        case IOPRIO_CLASS_RT:
1628                cfqq->ioprio = task_ioprio(ioc);
1629                cfqq->ioprio_class = IOPRIO_CLASS_RT;
1630                break;
1631        case IOPRIO_CLASS_BE:
1632                cfqq->ioprio = task_ioprio(ioc);
1633                cfqq->ioprio_class = IOPRIO_CLASS_BE;
1634                break;
1635        case IOPRIO_CLASS_IDLE:
1636                cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1637                cfqq->ioprio = 7;
1638                cfq_clear_cfqq_idle_window(cfqq);
1639                break;
1640        }
1641
1642        /*
1643         * keep track of original prio settings in case we have to temporarily
1644         * elevate the priority of this queue
1645         */
1646        cfqq->org_ioprio = cfqq->ioprio;
1647        cfqq->org_ioprio_class = cfqq->ioprio_class;
1648        cfq_clear_cfqq_prio_changed(cfqq);
1649}
1650
1651static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1652{
1653        struct cfq_data *cfqd = cic->key;
1654        struct cfq_queue *cfqq;
1655        unsigned long flags;
1656
1657        if (unlikely(!cfqd))
1658                return;
1659
1660        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1661
1662        cfqq = cic->cfqq[BLK_RW_ASYNC];
1663        if (cfqq) {
1664                struct cfq_queue *new_cfqq;
1665                new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
1666                                                GFP_ATOMIC);
1667                if (new_cfqq) {
1668                        cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
1669                        cfq_put_queue(cfqq);
1670                }
1671        }
1672
1673        cfqq = cic->cfqq[BLK_RW_SYNC];
1674        if (cfqq)
1675                cfq_mark_cfqq_prio_changed(cfqq);
1676
1677        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1678}
1679
1680static void cfq_ioc_set_ioprio(struct io_context *ioc)
1681{
1682        call_for_each_cic(ioc, changed_ioprio);
1683        ioc->ioprio_changed = 0;
1684}
1685
1686static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1687                          pid_t pid, bool is_sync)
1688{
1689        RB_CLEAR_NODE(&cfqq->rb_node);
1690        RB_CLEAR_NODE(&cfqq->p_node);
1691        INIT_LIST_HEAD(&cfqq->fifo);
1692
1693        atomic_set(&cfqq->ref, 0);
1694        cfqq->cfqd = cfqd;
1695
1696        cfq_mark_cfqq_prio_changed(cfqq);
1697
1698        if (is_sync) {
1699                if (!cfq_class_idle(cfqq))
1700                        cfq_mark_cfqq_idle_window(cfqq);
1701                cfq_mark_cfqq_sync(cfqq);
1702        }
1703        cfqq->pid = pid;
1704}
1705
1706static struct cfq_queue *
1707cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
1708                     struct io_context *ioc, gfp_t gfp_mask)
1709{
1710        struct cfq_queue *cfqq, *new_cfqq = NULL;
1711        struct cfq_io_context *cic;
1712
1713retry:
1714        cic = cfq_cic_lookup(cfqd, ioc);
1715        /* cic always exists here */
1716        cfqq = cic_to_cfqq(cic, is_sync);
1717
1718        /*
1719         * Always try a new alloc if we fell back to the OOM cfqq
1720         * originally, since it should just be a temporary situation.
1721         */
1722        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
1723                cfqq = NULL;
1724                if (new_cfqq) {
1725                        cfqq = new_cfqq;
1726                        new_cfqq = NULL;
1727                } else if (gfp_mask & __GFP_WAIT) {
1728                        spin_unlock_irq(cfqd->queue->queue_lock);
1729                        new_cfqq = kmem_cache_alloc_node(cfq_pool,
1730                                        gfp_mask | __GFP_ZERO,
1731                                        cfqd->queue->node);
1732                        spin_lock_irq(cfqd->queue->queue_lock);
1733                        if (new_cfqq)
1734                                goto retry;
1735                } else {
1736                        cfqq = kmem_cache_alloc_node(cfq_pool,
1737                                        gfp_mask | __GFP_ZERO,
1738                                        cfqd->queue->node);
1739                }
1740
1741                if (cfqq) {
1742                        cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
1743                        cfq_init_prio_data(cfqq, ioc);
1744                        cfq_log_cfqq(cfqd, cfqq, "alloced");
1745                } else
1746                        cfqq = &cfqd->oom_cfqq;
1747        }
1748
1749        if (new_cfqq)
1750                kmem_cache_free(cfq_pool, new_cfqq);
1751
1752        return cfqq;
1753}
1754
1755static struct cfq_queue **
1756cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1757{
1758        switch (ioprio_class) {
1759        case IOPRIO_CLASS_RT:
1760                return &cfqd->async_cfqq[0][ioprio];
1761        case IOPRIO_CLASS_BE:
1762                return &cfqd->async_cfqq[1][ioprio];
1763        case IOPRIO_CLASS_IDLE:
1764                return &cfqd->async_idle_cfqq;
1765        default:
1766                BUG();
1767        }
1768}
1769
1770static struct cfq_queue *
1771cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
1772              gfp_t gfp_mask)
1773{
1774        const int ioprio = task_ioprio(ioc);
1775        const int ioprio_class = task_ioprio_class(ioc);
1776        struct cfq_queue **async_cfqq = NULL;
1777        struct cfq_queue *cfqq = NULL;
1778
1779        if (!is_sync) {
1780                async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1781                cfqq = *async_cfqq;
1782        }
1783
1784        if (!cfqq)
1785                cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
1786
1787        /*
1788         * pin the queue now that it's allocated, scheduler exit will prune it
1789         */
1790        if (!is_sync && !(*async_cfqq)) {
1791                atomic_inc(&cfqq->ref);
1792                *async_cfqq = cfqq;
1793        }
1794
1795        atomic_inc(&cfqq->ref);
1796        return cfqq;
1797}
1798
1799/*
1800 * We drop cfq io contexts lazily, so we may find a dead one.
1801 */
1802static void
1803cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
1804                  struct cfq_io_context *cic)
1805{
1806        unsigned long flags;
1807
1808        WARN_ON(!list_empty(&cic->queue_list));
1809
1810        spin_lock_irqsave(&ioc->lock, flags);
1811
1812        BUG_ON(ioc->ioc_data == cic);
1813
1814        radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
1815        hlist_del_rcu(&cic->cic_list);
1816        spin_unlock_irqrestore(&ioc->lock, flags);
1817
1818        cfq_cic_free(cic);
1819}
1820
1821static struct cfq_io_context *
1822cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1823{
1824        struct cfq_io_context *cic;
1825        unsigned long flags;
1826        void *k;
1827
1828        if (unlikely(!ioc))
1829                return NULL;
1830
1831        rcu_read_lock();
1832
1833        /*
1834         * we maintain a last-hit cache, to avoid browsing over the tree
1835         */
1836        cic = rcu_dereference(ioc->ioc_data);
1837        if (cic && cic->key == cfqd) {
1838                rcu_read_unlock();
1839                return cic;
1840        }
1841
1842        do {
1843                cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
1844                rcu_read_unlock();
1845                if (!cic)
1846                        break;
1847                /* ->key must be copied to avoid race with cfq_exit_queue() */
1848                k = cic->key;
1849                if (unlikely(!k)) {
1850                        cfq_drop_dead_cic(cfqd, ioc, cic);
1851                        rcu_read_lock();
1852                        continue;
1853                }
1854
1855                spin_lock_irqsave(&ioc->lock, flags);
1856                rcu_assign_pointer(ioc->ioc_data, cic);
1857                spin_unlock_irqrestore(&ioc->lock, flags);
1858                break;
1859        } while (1);
1860
1861        return cic;
1862}
1863
1864/*
1865 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
1866 * the process specific cfq io context when entered from the block layer.
1867 * Also adds the cic to a per-cfqd list, used when this queue is removed.
1868 */
1869static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1870                        struct cfq_io_context *cic, gfp_t gfp_mask)
1871{
1872        unsigned long flags;
1873        int ret;
1874
1875        ret = radix_tree_preload(gfp_mask);
1876        if (!ret) {
1877                cic->ioc = ioc;
1878                cic->key = cfqd;
1879
1880                spin_lock_irqsave(&ioc->lock, flags);
1881                ret = radix_tree_insert(&ioc->radix_root,
1882                                                (unsigned long) cfqd, cic);
1883                if (!ret)
1884                        hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
1885                spin_unlock_irqrestore(&ioc->lock, flags);
1886
1887                radix_tree_preload_end();
1888
1889                if (!ret) {
1890                        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1891                        list_add(&cic->queue_list, &cfqd->cic_list);
1892                        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1893                }
1894        }
1895
1896        if (ret)
1897                printk(KERN_ERR "cfq: cic link failed!\n");
1898
1899        return ret;
1900}
1901
1902/*
1903 * Setup general io context and cfq io context. There can be several cfq
1904 * io contexts per general io context, if this process is doing io to more
1905 * than one device managed by cfq.
1906 */
1907static struct cfq_io_context *
1908cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1909{
1910        struct io_context *ioc = NULL;
1911        struct cfq_io_context *cic;
1912
1913        might_sleep_if(gfp_mask & __GFP_WAIT);
1914
1915        ioc = get_io_context(gfp_mask, cfqd->queue->node);
1916        if (!ioc)
1917                return NULL;
1918
1919        cic = cfq_cic_lookup(cfqd, ioc);
1920        if (cic)
1921                goto out;
1922
1923        cic = cfq_alloc_io_context(cfqd, gfp_mask);
1924        if (cic == NULL)
1925                goto err;
1926
1927        if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
1928                goto err_free;
1929
1930out:
1931        smp_read_barrier_depends();
1932        if (unlikely(ioc->ioprio_changed))
1933                cfq_ioc_set_ioprio(ioc);
1934
1935        return cic;
1936err_free:
1937        cfq_cic_free(cic);
1938err:
1939        put_io_context(ioc);
1940        return NULL;
1941}
1942
1943static void
1944cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1945{
1946        unsigned long elapsed = jiffies - cic->last_end_request;
1947        unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1948
1949        cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1950        cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1951        cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1952}
1953
1954static void
1955cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1956                       struct request *rq)
1957{
1958        sector_t sdist;
1959        u64 total;
1960
1961        if (!cic->last_request_pos)
1962                sdist = 0;
1963        else if (cic->last_request_pos < blk_rq_pos(rq))
1964                sdist = blk_rq_pos(rq) - cic->last_request_pos;
1965        else
1966                sdist = cic->last_request_pos - blk_rq_pos(rq);
1967
1968        /*
1969         * Don't allow the seek distance to get too large from the
1970         * odd fragment, pagein, etc
1971         */
1972        if (cic->seek_samples <= 60) /* second&third seek */
1973                sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1974        else
1975                sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1976
1977        cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1978        cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1979        total = cic->seek_total + (cic->seek_samples/2);
1980        do_div(total, cic->seek_samples);
1981        cic->seek_mean = (sector_t)total;
1982}
1983
1984/*
1985 * Disable idle window if the process thinks too long or seeks so much that
1986 * it doesn't matter
1987 */
1988static void
1989cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1990                       struct cfq_io_context *cic)
1991{
1992        int old_idle, enable_idle;
1993
1994        /*
1995         * Don't idle for async or idle io prio class
1996         */
1997        if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1998                return;
1999
2000        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2001
2002        if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
2003            (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
2004                enable_idle = 0;
2005        else if (sample_valid(cic->ttime_samples)) {
2006                unsigned int slice_idle = cfqd->cfq_slice_idle;
2007                if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
2008                        slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
2009                if (cic->ttime_mean > slice_idle)
2010                        enable_idle = 0;
2011                else
2012                        enable_idle = 1;
2013        }
2014
2015        if (old_idle != enable_idle) {
2016                cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
2017                if (enable_idle)
2018                        cfq_mark_cfqq_idle_window(cfqq);
2019                else
2020                        cfq_clear_cfqq_idle_window(cfqq);
2021        }
2022}
2023
2024/*
2025 * Check if new_cfqq should preempt the currently active queue. Return 0 for
2026 * no or if we aren't sure, a 1 will cause a preempt.
2027 */
2028static bool
2029cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2030                   struct request *rq)
2031{
2032        struct cfq_queue *cfqq;
2033
2034        cfqq = cfqd->active_queue;
2035        if (!cfqq)
2036                return false;
2037
2038        if (cfq_slice_used(cfqq))
2039                return true;
2040
2041        if (cfq_class_idle(new_cfqq))
2042                return false;
2043
2044        if (cfq_class_idle(cfqq))
2045                return true;
2046
2047        /*
2048         * if the new request is sync, but the currently running queue is
2049         * not, let the sync request have priority.
2050         */
2051        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
2052                return true;
2053
2054        /*
2055         * So both queues are sync. Let the new request get disk time if
2056         * it's a metadata request and the current queue is doing regular IO.
2057         */
2058        if (rq_is_meta(rq) && !cfqq->meta_pending)
2059                return true;
2060
2061        /*
2062         * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2063         */
2064        if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
2065                return true;
2066
2067        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
2068                return false;
2069
2070        /*
2071         * if this request is as-good as one we would expect from the
2072         * current cfqq, let it preempt
2073         */
2074        if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) ||
2075            cfqd->busy_queues == 1)) {
2076                /*
2077                 * Mark new queue coop_preempt, so its coop flag will not be
2078                 * cleared when new queue gets scheduled at the very first time
2079                 */
2080                cfq_mark_cfqq_coop_preempt(new_cfqq);
2081                cfq_mark_cfqq_coop(new_cfqq);
2082                return true;
2083        }
2084
2085        return false;
2086}
2087
2088/*
2089 * cfqq preempts the active queue. if we allowed preempt with no slice left,
2090 * let it have half of its nominal slice.
2091 */
2092static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2093{
2094        cfq_log_cfqq(cfqd, cfqq, "preempt");
2095        cfq_slice_expired(cfqd, 1);
2096
2097        /*
2098         * Put the new queue at the front of the of the current list,
2099         * so we know that it will be selected next.
2100         */
2101        BUG_ON(!cfq_cfqq_on_rr(cfqq));
2102
2103        cfq_service_tree_add(cfqd, cfqq, 1);
2104
2105        cfqq->slice_end = 0;
2106        cfq_mark_cfqq_slice_new(cfqq);
2107}
2108
2109/*
2110 * Called when a new fs request (rq) is added (to cfqq). Check if there's
2111 * something we should do about it
2112 */
2113static void
2114cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2115                struct request *rq)
2116{
2117        struct cfq_io_context *cic = RQ_CIC(rq);
2118
2119        cfqd->rq_queued++;
2120        if (rq_is_meta(rq))
2121                cfqq->meta_pending++;
2122
2123        cfq_update_io_thinktime(cfqd, cic);
2124        cfq_update_io_seektime(cfqd, cic, rq);
2125        cfq_update_idle_window(cfqd, cfqq, cic);
2126
2127        cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2128
2129        if (cfqq == cfqd->active_queue) {
2130                /*
2131                 * Remember that we saw a request from this process, but
2132                 * don't start queuing just yet. Otherwise we risk seeing lots
2133                 * of tiny requests, because we disrupt the normal plugging
2134                 * and merging. If the request is already larger than a single
2135                 * page, let it rip immediately. For that case we assume that
2136                 * merging is already done. Ditto for a busy system that
2137                 * has other work pending, don't risk delaying until the
2138                 * idle timer unplug to continue working.
2139                 */
2140                if (cfq_cfqq_wait_request(cfqq)) {
2141                        if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2142                            cfqd->busy_queues > 1) {
2143                                del_timer(&cfqd->idle_slice_timer);
2144                        __blk_run_queue(cfqd->queue);
2145                        }
2146                        cfq_mark_cfqq_must_dispatch(cfqq);
2147                }
2148        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
2149                /*
2150                 * not the active queue - expire current slice if it is
2151                 * idle and has expired it's mean thinktime or this new queue
2152                 * has some old slice time left and is of higher priority or
2153                 * this new queue is RT and the current one is BE
2154                 */
2155                cfq_preempt_queue(cfqd, cfqq);
2156                __blk_run_queue(cfqd->queue);
2157        }
2158}
2159
2160static void cfq_insert_request(struct request_queue *q, struct request *rq)
2161{
2162        struct cfq_data *cfqd = q->elevator->elevator_data;
2163        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2164
2165        cfq_log_cfqq(cfqd, cfqq, "insert_request");
2166        cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
2167
2168        cfq_add_rq_rb(rq);
2169
2170        rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
2171        list_add_tail(&rq->queuelist, &cfqq->fifo);
2172
2173        cfq_rq_enqueued(cfqd, cfqq, rq);
2174}
2175
2176/*
2177 * Update hw_tag based on peak queue depth over 50 samples under
2178 * sufficient load.
2179 */
2180static void cfq_update_hw_tag(struct cfq_data *cfqd)
2181{
2182        if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
2183                cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
2184
2185        if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
2186            rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
2187                return;
2188
2189        if (cfqd->hw_tag_samples++ < 50)
2190                return;
2191
2192        if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
2193                cfqd->hw_tag = 1;
2194        else
2195                cfqd->hw_tag = 0;
2196
2197        cfqd->hw_tag_samples = 0;
2198        cfqd->rq_in_driver_peak = 0;
2199}
2200
2201static void cfq_completed_request(struct request_queue *q, struct request *rq)
2202{
2203        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2204        struct cfq_data *cfqd = cfqq->cfqd;
2205        const int sync = rq_is_sync(rq);
2206        unsigned long now;
2207
2208        now = jiffies;
2209        cfq_log_cfqq(cfqd, cfqq, "complete");
2210
2211        cfq_update_hw_tag(cfqd);
2212
2213        WARN_ON(!cfqd->rq_in_driver[sync]);
2214        WARN_ON(!cfqq->dispatched);
2215        cfqd->rq_in_driver[sync]--;
2216        cfqq->dispatched--;
2217
2218        if (cfq_cfqq_sync(cfqq))
2219                cfqd->sync_flight--;
2220
2221        if (sync) {
2222                RQ_CIC(rq)->last_end_request = now;
2223                cfqd->last_end_sync_rq = now;
2224        }
2225
2226        /*
2227         * If this is the active queue, check if it needs to be expired,
2228         * or if we want to idle in case it has no pending requests.
2229         */
2230        if (cfqd->active_queue == cfqq) {
2231                const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
2232
2233                if (cfq_cfqq_slice_new(cfqq)) {
2234                        cfq_set_prio_slice(cfqd, cfqq);
2235                        cfq_clear_cfqq_slice_new(cfqq);
2236                }
2237                /*
2238                 * If there are no requests waiting in this queue, and
2239                 * there are other queues ready to issue requests, AND
2240                 * those other queues are issuing requests within our
2241                 * mean seek distance, give them a chance to run instead
2242                 * of idling.
2243                 */
2244                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2245                        cfq_slice_expired(cfqd, 1);
2246                else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) &&
2247                         sync && !rq_noidle(rq))
2248                        cfq_arm_slice_timer(cfqd);
2249        }
2250
2251        if (!rq_in_driver(cfqd))
2252                cfq_schedule_dispatch(cfqd);
2253}
2254
2255/*
2256 * we temporarily boost lower priority queues if they are holding fs exclusive
2257 * resources. they are boosted to normal prio (CLASS_BE/4)
2258 */
2259static void cfq_prio_boost(struct cfq_queue *cfqq)
2260{
2261        if (has_fs_excl()) {
2262                /*
2263                 * boost idle prio on transactions that would lock out other
2264                 * users of the filesystem
2265                 */
2266                if (cfq_class_idle(cfqq))
2267                        cfqq->ioprio_class = IOPRIO_CLASS_BE;
2268                if (cfqq->ioprio > IOPRIO_NORM)
2269                        cfqq->ioprio = IOPRIO_NORM;
2270        } else {
2271                /*
2272                 * check if we need to unboost the queue
2273                 */
2274                if (cfqq->ioprio_class != cfqq->org_ioprio_class)
2275                        cfqq->ioprio_class = cfqq->org_ioprio_class;
2276                if (cfqq->ioprio != cfqq->org_ioprio)
2277                        cfqq->ioprio = cfqq->org_ioprio;
2278        }
2279}
2280
2281static inline int __cfq_may_queue(struct cfq_queue *cfqq)
2282{
2283        if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
2284                cfq_mark_cfqq_must_alloc_slice(cfqq);
2285                return ELV_MQUEUE_MUST;
2286        }
2287
2288        return ELV_MQUEUE_MAY;
2289}
2290
2291static int cfq_may_queue(struct request_queue *q, int rw)
2292{
2293        struct cfq_data *cfqd = q->elevator->elevator_data;
2294        struct task_struct *tsk = current;
2295        struct cfq_io_context *cic;
2296        struct cfq_queue *cfqq;
2297
2298        /*
2299         * don't force setup of a queue from here, as a call to may_queue
2300         * does not necessarily imply that a request actually will be queued.
2301         * so just lookup a possibly existing queue, or return 'may queue'
2302         * if that fails
2303         */
2304        cic = cfq_cic_lookup(cfqd, tsk->io_context);
2305        if (!cic)
2306                return ELV_MQUEUE_MAY;
2307
2308        cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
2309        if (cfqq) {
2310                cfq_init_prio_data(cfqq, cic->ioc);
2311                cfq_prio_boost(cfqq);
2312
2313                return __cfq_may_queue(cfqq);
2314        }
2315
2316        return ELV_MQUEUE_MAY;
2317}
2318
2319/*
2320 * queue lock held here
2321 */
2322static void cfq_put_request(struct request *rq)
2323{
2324        struct cfq_queue *cfqq = RQ_CFQQ(rq);
2325
2326        if (cfqq) {
2327                const int rw = rq_data_dir(rq);
2328
2329                BUG_ON(!cfqq->allocated[rw]);
2330                cfqq->allocated[rw]--;
2331
2332                put_io_context(RQ_CIC(rq)->ioc);
2333
2334                rq->elevator_private = NULL;
2335                rq->elevator_private2 = NULL;
2336
2337                cfq_put_queue(cfqq);
2338        }
2339}
2340
2341/*
2342 * Allocate cfq data structures associated with this request.
2343 */
2344static int
2345cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2346{
2347        struct cfq_data *cfqd = q->elevator->elevator_data;
2348        struct cfq_io_context *cic;
2349        const int rw = rq_data_dir(rq);
2350        const bool is_sync = rq_is_sync(rq);
2351        struct cfq_queue *cfqq;
2352        unsigned long flags;
2353
2354        might_sleep_if(gfp_mask & __GFP_WAIT);
2355
2356        cic = cfq_get_io_context(cfqd, gfp_mask);
2357
2358        spin_lock_irqsave(q->queue_lock, flags);
2359
2360        if (!cic)
2361                goto queue_fail;
2362
2363        cfqq = cic_to_cfqq(cic, is_sync);
2364        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2365                cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2366                cic_set_cfqq(cic, cfqq, is_sync);
2367        }
2368
2369        cfqq->allocated[rw]++;
2370        atomic_inc(&cfqq->ref);
2371
2372        spin_unlock_irqrestore(q->queue_lock, flags);
2373
2374        rq->elevator_private = cic;
2375        rq->elevator_private2 = cfqq;
2376        return 0;
2377
2378queue_fail:
2379        if (cic)
2380                put_io_context(cic->ioc);
2381
2382        cfq_schedule_dispatch(cfqd);
2383        spin_unlock_irqrestore(q->queue_lock, flags);
2384        cfq_log(cfqd, "set_request fail");
2385        return 1;
2386}
2387
2388static void cfq_kick_queue(struct work_struct *work)
2389{
2390        struct cfq_data *cfqd =
2391                container_of(work, struct cfq_data, unplug_work);
2392        struct request_queue *q = cfqd->queue;
2393
2394        spin_lock_irq(q->queue_lock);
2395        __blk_run_queue(cfqd->queue);
2396        spin_unlock_irq(q->queue_lock);
2397}
2398
2399/*
2400 * Timer running if the active_queue is currently idling inside its time slice
2401 */
2402static void cfq_idle_slice_timer(unsigned long data)
2403{
2404        struct cfq_data *cfqd = (struct cfq_data *) data;
2405        struct cfq_queue *cfqq;
2406        unsigned long flags;
2407        int timed_out = 1;
2408
2409        cfq_log(cfqd, "idle timer fired");
2410
2411        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2412
2413        cfqq = cfqd->active_queue;
2414        if (cfqq) {
2415                timed_out = 0;
2416
2417                /*
2418                 * We saw a request before the queue expired, let it through
2419                 */
2420                if (cfq_cfqq_must_dispatch(cfqq))
2421                        goto out_kick;
2422
2423                /*
2424                 * expired
2425                 */
2426                if (cfq_slice_used(cfqq))
2427                        goto expire;
2428
2429                /*
2430                 * only expire and reinvoke request handler, if there are
2431                 * other queues with pending requests
2432                 */
2433                if (!cfqd->busy_queues)
2434                        goto out_cont;
2435
2436                /*
2437                 * not expired and it has a request pending, let it dispatch
2438                 */
2439                if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2440                        goto out_kick;
2441        }
2442expire:
2443        cfq_slice_expired(cfqd, timed_out);
2444out_kick:
2445        cfq_schedule_dispatch(cfqd);
2446out_cont:
2447        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2448}
2449
2450static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2451{
2452        del_timer_sync(&cfqd->idle_slice_timer);
2453        cancel_work_sync(&cfqd->unplug_work);
2454}
2455
2456static void cfq_put_async_queues(struct cfq_data *cfqd)
2457{
2458        int i;
2459
2460        for (i = 0; i < IOPRIO_BE_NR; i++) {
2461                if (cfqd->async_cfqq[0][i])
2462                        cfq_put_queue(cfqd->async_cfqq[0][i]);
2463                if (cfqd->async_cfqq[1][i])
2464                        cfq_put_queue(cfqd->async_cfqq[1][i]);
2465        }
2466
2467        if (cfqd->async_idle_cfqq)
2468                cfq_put_queue(cfqd->async_idle_cfqq);
2469}
2470
2471static void cfq_exit_queue(struct elevator_queue *e)
2472{
2473        struct cfq_data *cfqd = e->elevator_data;
2474        struct request_queue *q = cfqd->queue;
2475
2476        cfq_shutdown_timer_wq(cfqd);
2477
2478        spin_lock_irq(q->queue_lock);
2479
2480        if (cfqd->active_queue)
2481                __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2482
2483        while (!list_empty(&cfqd->cic_list)) {
2484                struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2485                                                        struct cfq_io_context,
2486                                                        queue_list);
2487
2488                __cfq_exit_single_io_context(cfqd, cic);
2489        }
2490
2491        cfq_put_async_queues(cfqd);
2492
2493        spin_unlock_irq(q->queue_lock);
2494
2495        cfq_shutdown_timer_wq(cfqd);
2496
2497        kfree(cfqd);
2498}
2499
2500static void *cfq_init_queue(struct request_queue *q)
2501{
2502        struct cfq_data *cfqd;
2503        int i;
2504
2505        cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2506        if (!cfqd)
2507                return NULL;
2508
2509        cfqd->service_tree = CFQ_RB_ROOT;
2510
2511        /*
2512         * Not strictly needed (since RB_ROOT just clears the node and we
2513         * zeroed cfqd on alloc), but better be safe in case someone decides
2514         * to add magic to the rb code
2515         */
2516        for (i = 0; i < CFQ_PRIO_LISTS; i++)
2517                cfqd->prio_trees[i] = RB_ROOT;
2518
2519        /*
2520         * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
2521         * Grab a permanent reference to it, so that the normal code flow
2522         * will not attempt to free it.
2523         */
2524        cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
2525        atomic_inc(&cfqd->oom_cfqq.ref);
2526
2527        INIT_LIST_HEAD(&cfqd->cic_list);
2528
2529        cfqd->queue = q;
2530
2531        init_timer(&cfqd->idle_slice_timer);
2532        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2533        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2534
2535        INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2536
2537        cfqd->cfq_quantum = cfq_quantum;
2538        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2539        cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2540        cfqd->cfq_back_max = cfq_back_max;
2541        cfqd->cfq_back_penalty = cfq_back_penalty;
2542        cfqd->cfq_slice[0] = cfq_slice_async;
2543        cfqd->cfq_slice[1] = cfq_slice_sync;
2544        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2545        cfqd->cfq_slice_idle = cfq_slice_idle;
2546        cfqd->cfq_latency = 1;
2547        cfqd->hw_tag = 1;
2548        cfqd->last_end_sync_rq = jiffies;
2549        return cfqd;
2550}
2551
2552static void cfq_slab_kill(void)
2553{
2554        /*
2555         * Caller already ensured that pending RCU callbacks are completed,
2556         * so we should have no busy allocations at this point.
2557         */
2558        if (cfq_pool)
2559                kmem_cache_destroy(cfq_pool);
2560        if (cfq_ioc_pool)
2561                kmem_cache_destroy(cfq_ioc_pool);
2562}
2563
2564static int __init cfq_slab_setup(void)
2565{
2566        cfq_pool = KMEM_CACHE(cfq_queue, 0);
2567        if (!cfq_pool)
2568                goto fail;
2569
2570        cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2571        if (!cfq_ioc_pool)
2572                goto fail;
2573
2574        return 0;
2575fail:
2576        cfq_slab_kill();
2577        return -ENOMEM;
2578}
2579
2580/*
2581 * sysfs parts below -->
2582 */
2583static ssize_t
2584cfq_var_show(unsigned int var, char *page)
2585{
2586        return sprintf(page, "%d\n", var);
2587}
2588
2589static ssize_t
2590cfq_var_store(unsigned int *var, const char *page, size_t count)
2591{
2592        char *p = (char *) page;
2593
2594        *var = simple_strtoul(p, &p, 10);
2595        return count;
2596}
2597
2598#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2599static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
2600{                                                                       \
2601        struct cfq_data *cfqd = e->elevator_data;                       \
2602        unsigned int __data = __VAR;                                    \
2603        if (__CONV)                                                     \
2604                __data = jiffies_to_msecs(__data);                      \
2605        return cfq_var_show(__data, (page));                            \
2606}
2607SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2608SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2609SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2610SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2611SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2612SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2613SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2614SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2615SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2616SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
2617#undef SHOW_FUNCTION
2618
2619#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2620static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
2621{                                                                       \
2622        struct cfq_data *cfqd = e->elevator_data;                       \
2623        unsigned int __data;                                            \
2624        int ret = cfq_var_store(&__data, (page), count);                \
2625        if (__data < (MIN))                                             \
2626                __data = (MIN);                                         \
2627        else if (__data > (MAX))                                        \
2628                __data = (MAX);                                         \
2629        if (__CONV)                                                     \
2630                *(__PTR) = msecs_to_jiffies(__data);                    \
2631        else                                                            \
2632                *(__PTR) = __data;                                      \
2633        return ret;                                                     \
2634}
2635STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2636STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
2637                UINT_MAX, 1);
2638STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
2639                UINT_MAX, 1);
2640STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2641STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
2642                UINT_MAX, 0);
2643STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2644STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2645STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2646STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2647                UINT_MAX, 0);
2648STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
2649#undef STORE_FUNCTION
2650
2651#define CFQ_ATTR(name) \
2652        __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2653
2654static struct elv_fs_entry cfq_attrs[] = {
2655        CFQ_ATTR(quantum),
2656        CFQ_ATTR(fifo_expire_sync),
2657        CFQ_ATTR(fifo_expire_async),
2658        CFQ_ATTR(back_seek_max),
2659        CFQ_ATTR(back_seek_penalty),
2660        CFQ_ATTR(slice_sync),
2661        CFQ_ATTR(slice_async),
2662        CFQ_ATTR(slice_async_rq),
2663        CFQ_ATTR(slice_idle),
2664        CFQ_ATTR(low_latency),
2665        __ATTR_NULL
2666};
2667
2668static struct elevator_type iosched_cfq = {
2669        .ops = {
2670                .elevator_merge_fn =            cfq_merge,
2671                .elevator_merged_fn =           cfq_merged_request,
2672                .elevator_merge_req_fn =        cfq_merged_requests,
2673                .elevator_allow_merge_fn =      cfq_allow_merge,
2674                .elevator_dispatch_fn =         cfq_dispatch_requests,
2675                .elevator_add_req_fn =          cfq_insert_request,
2676                .elevator_activate_req_fn =     cfq_activate_request,
2677                .elevator_deactivate_req_fn =   cfq_deactivate_request,
2678                .elevator_queue_empty_fn =      cfq_queue_empty,
2679                .elevator_completed_req_fn =    cfq_completed_request,
2680                .elevator_former_req_fn =       elv_rb_former_request,
2681                .elevator_latter_req_fn =       elv_rb_latter_request,
2682                .elevator_set_req_fn =          cfq_set_request,
2683                .elevator_put_req_fn =          cfq_put_request,
2684                .elevator_may_queue_fn =        cfq_may_queue,
2685                .elevator_init_fn =             cfq_init_queue,
2686                .elevator_exit_fn =             cfq_exit_queue,
2687                .trim =                         cfq_free_io_context,
2688        },
2689        .elevator_attrs =       cfq_attrs,
2690        .elevator_name =        "cfq",
2691        .elevator_owner =       THIS_MODULE,
2692};
2693
2694static int __init cfq_init(void)
2695{
2696        /*
2697         * could be 0 on HZ < 1000 setups
2698         */
2699        if (!cfq_slice_async)
2700                cfq_slice_async = 1;
2701        if (!cfq_slice_idle)
2702                cfq_slice_idle = 1;
2703
2704        if (cfq_slab_setup())
2705                return -ENOMEM;
2706
2707        elv_register(&iosched_cfq);
2708
2709        return 0;
2710}
2711
2712static void __exit cfq_exit(void)
2713{
2714        DECLARE_COMPLETION_ONSTACK(all_gone);
2715        elv_unregister(&iosched_cfq);
2716        ioc_gone = &all_gone;
2717        /* ioc_gone's update must be visible before reading ioc_count */
2718        smp_wmb();
2719
2720        /*
2721         * this also protects us from entering cfq_slab_kill() with
2722         * pending RCU callbacks
2723         */
2724        if (elv_ioc_count_read(cfq_ioc_count))
2725                wait_for_completion(&all_gone);
2726        cfq_slab_kill();
2727}
2728
2729module_init(cfq_init);
2730module_exit(cfq_exit);
2731
2732MODULE_AUTHOR("Jens Axboe");
2733MODULE_LICENSE("GPL");
2734MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
2735