linux/block/mq-deadline.c
<<
>>
Prefs
   1/*
   2 *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
   3 *  for the blk-mq scheduling framework
   4 *
   5 *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
   6 */
   7#include <linux/kernel.h>
   8#include <linux/fs.h>
   9#include <linux/blkdev.h>
  10#include <linux/blk-mq.h>
  11#include <linux/elevator.h>
  12#include <linux/bio.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/init.h>
  16#include <linux/compiler.h>
  17#include <linux/rbtree.h>
  18#include <linux/sbitmap.h>
  19
  20#include "blk.h"
  21#include "blk-mq.h"
  22#include "blk-mq-debugfs.h"
  23#include "blk-mq-tag.h"
  24#include "blk-mq-sched.h"
  25
  26/*
  27 * See Documentation/block/deadline-iosched.txt
  28 */
  29static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
  30static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
  31static const int writes_starved = 2;    /* max times reads can starve a write */
  32static const int fifo_batch = 16;       /* # of sequential requests treated as one
  33                                     by the above parameters. For throughput. */
  34
  35struct deadline_data {
  36        /*
  37         * run time data
  38         */
  39
  40        /*
  41         * requests (deadline_rq s) are present on both sort_list and fifo_list
  42         */
  43        struct rb_root sort_list[2];
  44        struct list_head fifo_list[2];
  45
  46        /*
  47         * next in sort order. read, write or both are NULL
  48         */
  49        struct request *next_rq[2];
  50        unsigned int batching;          /* number of sequential requests made */
  51        unsigned int starved;           /* times reads have starved writes */
  52
  53        /*
  54         * settings that change how the i/o scheduler behaves
  55         */
  56        int fifo_expire[2];
  57        int fifo_batch;
  58        int writes_starved;
  59        int front_merges;
  60
  61        spinlock_t lock;
  62        spinlock_t zone_lock;
  63        struct list_head dispatch;
  64};
  65
  66static inline struct rb_root *
  67deadline_rb_root(struct deadline_data *dd, struct request *rq)
  68{
  69        return &dd->sort_list[rq_data_dir(rq)];
  70}
  71
  72/*
  73 * get the request after `rq' in sector-sorted order
  74 */
  75static inline struct request *
  76deadline_latter_request(struct request *rq)
  77{
  78        struct rb_node *node = rb_next(&rq->rb_node);
  79
  80        if (node)
  81                return rb_entry_rq(node);
  82
  83        return NULL;
  84}
  85
  86static void
  87deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
  88{
  89        struct rb_root *root = deadline_rb_root(dd, rq);
  90
  91        elv_rb_add(root, rq);
  92}
  93
  94static inline void
  95deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
  96{
  97        const int data_dir = rq_data_dir(rq);
  98
  99        if (dd->next_rq[data_dir] == rq)
 100                dd->next_rq[data_dir] = deadline_latter_request(rq);
 101
 102        elv_rb_del(deadline_rb_root(dd, rq), rq);
 103}
 104
 105/*
 106 * remove rq from rbtree and fifo.
 107 */
 108static void deadline_remove_request(struct request_queue *q, struct request *rq)
 109{
 110        struct deadline_data *dd = q->elevator->elevator_data;
 111
 112        list_del_init(&rq->queuelist);
 113
 114        /*
 115         * We might not be on the rbtree, if we are doing an insert merge
 116         */
 117        if (!RB_EMPTY_NODE(&rq->rb_node))
 118                deadline_del_rq_rb(dd, rq);
 119
 120        elv_rqhash_del(q, rq);
 121        if (q->last_merge == rq)
 122                q->last_merge = NULL;
 123}
 124
 125static void dd_request_merged(struct request_queue *q, struct request *req,
 126                              enum elv_merge type)
 127{
 128        struct deadline_data *dd = q->elevator->elevator_data;
 129
 130        /*
 131         * if the merge was a front merge, we need to reposition request
 132         */
 133        if (type == ELEVATOR_FRONT_MERGE) {
 134                elv_rb_del(deadline_rb_root(dd, req), req);
 135                deadline_add_rq_rb(dd, req);
 136        }
 137}
 138
 139static void dd_merged_requests(struct request_queue *q, struct request *req,
 140                               struct request *next)
 141{
 142        /*
 143         * if next expires before rq, assign its expire time to rq
 144         * and move into next position (next will be deleted) in fifo
 145         */
 146        if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
 147                if (time_before((unsigned long)next->fifo_time,
 148                                (unsigned long)req->fifo_time)) {
 149                        list_move(&req->queuelist, &next->queuelist);
 150                        req->fifo_time = next->fifo_time;
 151                }
 152        }
 153
 154        /*
 155         * kill knowledge of next, this one is a goner
 156         */
 157        deadline_remove_request(q, next);
 158}
 159
 160/*
 161 * move an entry to dispatch queue
 162 */
 163static void
 164deadline_move_request(struct deadline_data *dd, struct request *rq)
 165{
 166        const int data_dir = rq_data_dir(rq);
 167
 168        dd->next_rq[READ] = NULL;
 169        dd->next_rq[WRITE] = NULL;
 170        dd->next_rq[data_dir] = deadline_latter_request(rq);
 171
 172        /*
 173         * take it off the sort and fifo list
 174         */
 175        deadline_remove_request(rq->q, rq);
 176}
 177
 178/*
 179 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
 180 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
 181 */
 182static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
 183{
 184        struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
 185
 186        /*
 187         * rq is expired!
 188         */
 189        if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
 190                return 1;
 191
 192        return 0;
 193}
 194
 195/*
 196 * For the specified data direction, return the next request to
 197 * dispatch using arrival ordered lists.
 198 */
 199static struct request *
 200deadline_fifo_request(struct deadline_data *dd, int data_dir)
 201{
 202        struct request *rq;
 203        unsigned long flags;
 204
 205        if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
 206                return NULL;
 207
 208        if (list_empty(&dd->fifo_list[data_dir]))
 209                return NULL;
 210
 211        rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
 212        if (data_dir == READ || !blk_queue_is_zoned(rq->q))
 213                return rq;
 214
 215        /*
 216         * Look for a write request that can be dispatched, that is one with
 217         * an unlocked target zone.
 218         */
 219        spin_lock_irqsave(&dd->zone_lock, flags);
 220        list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
 221                if (blk_req_can_dispatch_to_zone(rq))
 222                        goto out;
 223        }
 224        rq = NULL;
 225out:
 226        spin_unlock_irqrestore(&dd->zone_lock, flags);
 227
 228        return rq;
 229}
 230
 231/*
 232 * For the specified data direction, return the next request to
 233 * dispatch using sector position sorted lists.
 234 */
 235static struct request *
 236deadline_next_request(struct deadline_data *dd, int data_dir)
 237{
 238        struct request *rq;
 239        unsigned long flags;
 240
 241        if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
 242                return NULL;
 243
 244        rq = dd->next_rq[data_dir];
 245        if (!rq)
 246                return NULL;
 247
 248        if (data_dir == READ || !blk_queue_is_zoned(rq->q))
 249                return rq;
 250
 251        /*
 252         * Look for a write request that can be dispatched, that is one with
 253         * an unlocked target zone.
 254         */
 255        spin_lock_irqsave(&dd->zone_lock, flags);
 256        while (rq) {
 257                if (blk_req_can_dispatch_to_zone(rq))
 258                        break;
 259                rq = deadline_latter_request(rq);
 260        }
 261        spin_unlock_irqrestore(&dd->zone_lock, flags);
 262
 263        return rq;
 264}
 265
 266/*
 267 * deadline_dispatch_requests selects the best request according to
 268 * read/write expire, fifo_batch, etc
 269 */
 270static struct request *__dd_dispatch_request(struct deadline_data *dd)
 271{
 272        struct request *rq, *next_rq;
 273        bool reads, writes;
 274        int data_dir;
 275
 276        if (!list_empty(&dd->dispatch)) {
 277                rq = list_first_entry(&dd->dispatch, struct request, queuelist);
 278                list_del_init(&rq->queuelist);
 279                goto done;
 280        }
 281
 282        reads = !list_empty(&dd->fifo_list[READ]);
 283        writes = !list_empty(&dd->fifo_list[WRITE]);
 284
 285        /*
 286         * batches are currently reads XOR writes
 287         */
 288        rq = deadline_next_request(dd, WRITE);
 289        if (!rq)
 290                rq = deadline_next_request(dd, READ);
 291
 292        if (rq && dd->batching < dd->fifo_batch)
 293                /* we have a next request are still entitled to batch */
 294                goto dispatch_request;
 295
 296        /*
 297         * at this point we are not running a batch. select the appropriate
 298         * data direction (read / write)
 299         */
 300
 301        if (reads) {
 302                BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
 303
 304                if (deadline_fifo_request(dd, WRITE) &&
 305                    (dd->starved++ >= dd->writes_starved))
 306                        goto dispatch_writes;
 307
 308                data_dir = READ;
 309
 310                goto dispatch_find_request;
 311        }
 312
 313        /*
 314         * there are either no reads or writes have been starved
 315         */
 316
 317        if (writes) {
 318dispatch_writes:
 319                BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
 320
 321                dd->starved = 0;
 322
 323                data_dir = WRITE;
 324
 325                goto dispatch_find_request;
 326        }
 327
 328        return NULL;
 329
 330dispatch_find_request:
 331        /*
 332         * we are not running a batch, find best request for selected data_dir
 333         */
 334        next_rq = deadline_next_request(dd, data_dir);
 335        if (deadline_check_fifo(dd, data_dir) || !next_rq) {
 336                /*
 337                 * A deadline has expired, the last request was in the other
 338                 * direction, or we have run out of higher-sectored requests.
 339                 * Start again from the request with the earliest expiry time.
 340                 */
 341                rq = deadline_fifo_request(dd, data_dir);
 342        } else {
 343                /*
 344                 * The last req was the same dir and we have a next request in
 345                 * sort order. No expired requests so continue on from here.
 346                 */
 347                rq = next_rq;
 348        }
 349
 350        /*
 351         * For a zoned block device, if we only have writes queued and none of
 352         * them can be dispatched, rq will be NULL.
 353         */
 354        if (!rq)
 355                return NULL;
 356
 357        dd->batching = 0;
 358
 359dispatch_request:
 360        /*
 361         * rq is the selected appropriate request.
 362         */
 363        dd->batching++;
 364        deadline_move_request(dd, rq);
 365done:
 366        /*
 367         * If the request needs its target zone locked, do it.
 368         */
 369        blk_req_zone_write_lock(rq);
 370        rq->rq_flags |= RQF_STARTED;
 371        return rq;
 372}
 373
 374/*
 375 * One confusing aspect here is that we get called for a specific
 376 * hardware queue, but we return a request that may not be for a
 377 * different hardware queue. This is because mq-deadline has shared
 378 * state for all hardware queues, in terms of sorting, FIFOs, etc.
 379 */
 380static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 381{
 382        struct deadline_data *dd = hctx->queue->elevator->elevator_data;
 383        struct request *rq;
 384
 385        spin_lock(&dd->lock);
 386        rq = __dd_dispatch_request(dd);
 387        spin_unlock(&dd->lock);
 388
 389        return rq;
 390}
 391
 392static void dd_exit_queue(struct elevator_queue *e)
 393{
 394        struct deadline_data *dd = e->elevator_data;
 395
 396        BUG_ON(!list_empty(&dd->fifo_list[READ]));
 397        BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
 398
 399        kfree(dd);
 400}
 401
 402/*
 403 * initialize elevator private data (deadline_data).
 404 */
 405static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
 406{
 407        struct deadline_data *dd;
 408        struct elevator_queue *eq;
 409
 410        eq = elevator_alloc(q, e);
 411        if (!eq)
 412                return -ENOMEM;
 413
 414        dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
 415        if (!dd) {
 416                kobject_put(&eq->kobj);
 417                return -ENOMEM;
 418        }
 419        eq->elevator_data = dd;
 420
 421        INIT_LIST_HEAD(&dd->fifo_list[READ]);
 422        INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
 423        dd->sort_list[READ] = RB_ROOT;
 424        dd->sort_list[WRITE] = RB_ROOT;
 425        dd->fifo_expire[READ] = read_expire;
 426        dd->fifo_expire[WRITE] = write_expire;
 427        dd->writes_starved = writes_starved;
 428        dd->front_merges = 1;
 429        dd->fifo_batch = fifo_batch;
 430        spin_lock_init(&dd->lock);
 431        spin_lock_init(&dd->zone_lock);
 432        INIT_LIST_HEAD(&dd->dispatch);
 433
 434        q->elevator = eq;
 435        return 0;
 436}
 437
 438static int dd_request_merge(struct request_queue *q, struct request **rq,
 439                            struct bio *bio)
 440{
 441        struct deadline_data *dd = q->elevator->elevator_data;
 442        sector_t sector = bio_end_sector(bio);
 443        struct request *__rq;
 444
 445        if (!dd->front_merges)
 446                return ELEVATOR_NO_MERGE;
 447
 448        __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
 449        if (__rq) {
 450                BUG_ON(sector != blk_rq_pos(__rq));
 451
 452                if (elv_bio_merge_ok(__rq, bio)) {
 453                        *rq = __rq;
 454                        return ELEVATOR_FRONT_MERGE;
 455                }
 456        }
 457
 458        return ELEVATOR_NO_MERGE;
 459}
 460
 461static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
 462{
 463        struct request_queue *q = hctx->queue;
 464        struct deadline_data *dd = q->elevator->elevator_data;
 465        struct request *free = NULL;
 466        bool ret;
 467
 468        spin_lock(&dd->lock);
 469        ret = blk_mq_sched_try_merge(q, bio, &free);
 470        spin_unlock(&dd->lock);
 471
 472        if (free)
 473                blk_mq_free_request(free);
 474
 475        return ret;
 476}
 477
 478/*
 479 * add rq to rbtree and fifo
 480 */
 481static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 482                              bool at_head)
 483{
 484        struct request_queue *q = hctx->queue;
 485        struct deadline_data *dd = q->elevator->elevator_data;
 486        const int data_dir = rq_data_dir(rq);
 487
 488        /*
 489         * This may be a requeue of a write request that has locked its
 490         * target zone. If it is the case, this releases the zone lock.
 491         */
 492        blk_req_zone_write_unlock(rq);
 493
 494        if (blk_mq_sched_try_insert_merge(q, rq))
 495                return;
 496
 497        blk_mq_sched_request_inserted(rq);
 498
 499        if (at_head || blk_rq_is_passthrough(rq)) {
 500                if (at_head)
 501                        list_add(&rq->queuelist, &dd->dispatch);
 502                else
 503                        list_add_tail(&rq->queuelist, &dd->dispatch);
 504        } else {
 505                deadline_add_rq_rb(dd, rq);
 506
 507                if (rq_mergeable(rq)) {
 508                        elv_rqhash_add(q, rq);
 509                        if (!q->last_merge)
 510                                q->last_merge = rq;
 511                }
 512
 513                /*
 514                 * set expire time and add to fifo list
 515                 */
 516                rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
 517                list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
 518        }
 519}
 520
 521static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
 522                               struct list_head *list, bool at_head)
 523{
 524        struct request_queue *q = hctx->queue;
 525        struct deadline_data *dd = q->elevator->elevator_data;
 526
 527        spin_lock(&dd->lock);
 528        while (!list_empty(list)) {
 529                struct request *rq;
 530
 531                rq = list_first_entry(list, struct request, queuelist);
 532                list_del_init(&rq->queuelist);
 533                dd_insert_request(hctx, rq, at_head);
 534        }
 535        spin_unlock(&dd->lock);
 536}
 537
 538/*
 539 * Nothing to do here. This is defined only to ensure that .finish_request
 540 * method is called upon request completion.
 541 */
 542static void dd_prepare_request(struct request *rq, struct bio *bio)
 543{
 544}
 545
 546/*
 547 * For zoned block devices, write unlock the target zone of
 548 * completed write requests. Do this while holding the zone lock
 549 * spinlock so that the zone is never unlocked while deadline_fifo_request()
 550 * or deadline_next_request() are executing. This function is called for
 551 * all requests, whether or not these requests complete successfully.
 552 */
 553static void dd_finish_request(struct request *rq)
 554{
 555        struct request_queue *q = rq->q;
 556
 557        if (blk_queue_is_zoned(q)) {
 558                struct deadline_data *dd = q->elevator->elevator_data;
 559                unsigned long flags;
 560
 561                spin_lock_irqsave(&dd->zone_lock, flags);
 562                blk_req_zone_write_unlock(rq);
 563                spin_unlock_irqrestore(&dd->zone_lock, flags);
 564        }
 565}
 566
 567static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
 568{
 569        struct deadline_data *dd = hctx->queue->elevator->elevator_data;
 570
 571        return !list_empty_careful(&dd->dispatch) ||
 572                !list_empty_careful(&dd->fifo_list[0]) ||
 573                !list_empty_careful(&dd->fifo_list[1]);
 574}
 575
 576/*
 577 * sysfs parts below
 578 */
 579static ssize_t
 580deadline_var_show(int var, char *page)
 581{
 582        return sprintf(page, "%d\n", var);
 583}
 584
 585static void
 586deadline_var_store(int *var, const char *page)
 587{
 588        char *p = (char *) page;
 589
 590        *var = simple_strtol(p, &p, 10);
 591}
 592
 593#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
 594static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
 595{                                                                       \
 596        struct deadline_data *dd = e->elevator_data;                    \
 597        int __data = __VAR;                                             \
 598        if (__CONV)                                                     \
 599                __data = jiffies_to_msecs(__data);                      \
 600        return deadline_var_show(__data, (page));                       \
 601}
 602SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
 603SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
 604SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
 605SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
 606SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
 607#undef SHOW_FUNCTION
 608
 609#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
 610static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
 611{                                                                       \
 612        struct deadline_data *dd = e->elevator_data;                    \
 613        int __data;                                                     \
 614        deadline_var_store(&__data, (page));                            \
 615        if (__data < (MIN))                                             \
 616                __data = (MIN);                                         \
 617        else if (__data > (MAX))                                        \
 618                __data = (MAX);                                         \
 619        if (__CONV)                                                     \
 620                *(__PTR) = msecs_to_jiffies(__data);                    \
 621        else                                                            \
 622                *(__PTR) = __data;                                      \
 623        return count;                                                   \
 624}
 625STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
 626STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
 627STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
 628STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
 629STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
 630#undef STORE_FUNCTION
 631
 632#define DD_ATTR(name) \
 633        __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
 634                                      deadline_##name##_store)
 635
 636static struct elv_fs_entry deadline_attrs[] = {
 637        DD_ATTR(read_expire),
 638        DD_ATTR(write_expire),
 639        DD_ATTR(writes_starved),
 640        DD_ATTR(front_merges),
 641        DD_ATTR(fifo_batch),
 642        __ATTR_NULL
 643};
 644
 645#ifdef CONFIG_BLK_DEBUG_FS
 646#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name)                         \
 647static void *deadline_##name##_fifo_start(struct seq_file *m,           \
 648                                          loff_t *pos)                  \
 649        __acquires(&dd->lock)                                           \
 650{                                                                       \
 651        struct request_queue *q = m->private;                           \
 652        struct deadline_data *dd = q->elevator->elevator_data;          \
 653                                                                        \
 654        spin_lock(&dd->lock);                                           \
 655        return seq_list_start(&dd->fifo_list[ddir], *pos);              \
 656}                                                                       \
 657                                                                        \
 658static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,   \
 659                                         loff_t *pos)                   \
 660{                                                                       \
 661        struct request_queue *q = m->private;                           \
 662        struct deadline_data *dd = q->elevator->elevator_data;          \
 663                                                                        \
 664        return seq_list_next(v, &dd->fifo_list[ddir], pos);             \
 665}                                                                       \
 666                                                                        \
 667static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)    \
 668        __releases(&dd->lock)                                           \
 669{                                                                       \
 670        struct request_queue *q = m->private;                           \
 671        struct deadline_data *dd = q->elevator->elevator_data;          \
 672                                                                        \
 673        spin_unlock(&dd->lock);                                         \
 674}                                                                       \
 675                                                                        \
 676static const struct seq_operations deadline_##name##_fifo_seq_ops = {   \
 677        .start  = deadline_##name##_fifo_start,                         \
 678        .next   = deadline_##name##_fifo_next,                          \
 679        .stop   = deadline_##name##_fifo_stop,                          \
 680        .show   = blk_mq_debugfs_rq_show,                               \
 681};                                                                      \
 682                                                                        \
 683static int deadline_##name##_next_rq_show(void *data,                   \
 684                                          struct seq_file *m)           \
 685{                                                                       \
 686        struct request_queue *q = data;                                 \
 687        struct deadline_data *dd = q->elevator->elevator_data;          \
 688        struct request *rq = dd->next_rq[ddir];                         \
 689                                                                        \
 690        if (rq)                                                         \
 691                __blk_mq_debugfs_rq_show(m, rq);                        \
 692        return 0;                                                       \
 693}
 694DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
 695DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
 696#undef DEADLINE_DEBUGFS_DDIR_ATTRS
 697
 698static int deadline_batching_show(void *data, struct seq_file *m)
 699{
 700        struct request_queue *q = data;
 701        struct deadline_data *dd = q->elevator->elevator_data;
 702
 703        seq_printf(m, "%u\n", dd->batching);
 704        return 0;
 705}
 706
 707static int deadline_starved_show(void *data, struct seq_file *m)
 708{
 709        struct request_queue *q = data;
 710        struct deadline_data *dd = q->elevator->elevator_data;
 711
 712        seq_printf(m, "%u\n", dd->starved);
 713        return 0;
 714}
 715
 716static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
 717        __acquires(&dd->lock)
 718{
 719        struct request_queue *q = m->private;
 720        struct deadline_data *dd = q->elevator->elevator_data;
 721
 722        spin_lock(&dd->lock);
 723        return seq_list_start(&dd->dispatch, *pos);
 724}
 725
 726static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
 727{
 728        struct request_queue *q = m->private;
 729        struct deadline_data *dd = q->elevator->elevator_data;
 730
 731        return seq_list_next(v, &dd->dispatch, pos);
 732}
 733
 734static void deadline_dispatch_stop(struct seq_file *m, void *v)
 735        __releases(&dd->lock)
 736{
 737        struct request_queue *q = m->private;
 738        struct deadline_data *dd = q->elevator->elevator_data;
 739
 740        spin_unlock(&dd->lock);
 741}
 742
 743static const struct seq_operations deadline_dispatch_seq_ops = {
 744        .start  = deadline_dispatch_start,
 745        .next   = deadline_dispatch_next,
 746        .stop   = deadline_dispatch_stop,
 747        .show   = blk_mq_debugfs_rq_show,
 748};
 749
 750#define DEADLINE_QUEUE_DDIR_ATTRS(name)                                         \
 751        {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
 752        {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
 753static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
 754        DEADLINE_QUEUE_DDIR_ATTRS(read),
 755        DEADLINE_QUEUE_DDIR_ATTRS(write),
 756        {"batching", 0400, deadline_batching_show},
 757        {"starved", 0400, deadline_starved_show},
 758        {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
 759        {},
 760};
 761#undef DEADLINE_QUEUE_DDIR_ATTRS
 762#endif
 763
 764static struct elevator_type mq_deadline = {
 765        .ops.mq = {
 766                .insert_requests        = dd_insert_requests,
 767                .dispatch_request       = dd_dispatch_request,
 768                .prepare_request        = dd_prepare_request,
 769                .finish_request         = dd_finish_request,
 770                .next_request           = elv_rb_latter_request,
 771                .former_request         = elv_rb_former_request,
 772                .bio_merge              = dd_bio_merge,
 773                .request_merge          = dd_request_merge,
 774                .requests_merged        = dd_merged_requests,
 775                .request_merged         = dd_request_merged,
 776                .has_work               = dd_has_work,
 777                .init_sched             = dd_init_queue,
 778                .exit_sched             = dd_exit_queue,
 779        },
 780
 781        .uses_mq        = true,
 782#ifdef CONFIG_BLK_DEBUG_FS
 783        .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
 784#endif
 785        .elevator_attrs = deadline_attrs,
 786        .elevator_name = "mq-deadline",
 787        .elevator_alias = "deadline",
 788        .elevator_owner = THIS_MODULE,
 789};
 790MODULE_ALIAS("mq-deadline-iosched");
 791
 792static int __init deadline_init(void)
 793{
 794        return elv_register(&mq_deadline);
 795}
 796
 797static void __exit deadline_exit(void)
 798{
 799        elv_unregister(&mq_deadline);
 800}
 801
 802module_init(deadline_init);
 803module_exit(deadline_exit);
 804
 805MODULE_AUTHOR("Jens Axboe");
 806MODULE_LICENSE("GPL");
 807MODULE_DESCRIPTION("MQ deadline IO scheduler");
 808