linux/block/mq-deadline.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
   4 *  for the blk-mq scheduling framework
   5 *
   6 *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/fs.h>
  10#include <linux/blkdev.h>
  11#include <linux/blk-mq.h>
  12#include <linux/elevator.h>
  13#include <linux/bio.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/init.h>
  17#include <linux/compiler.h>
  18#include <linux/rbtree.h>
  19#include <linux/sbitmap.h>
  20
  21#include "blk.h"
  22#include "blk-mq.h"
  23#include "blk-mq-debugfs.h"
  24#include "blk-mq-tag.h"
  25#include "blk-mq-sched.h"
  26
  27/*
  28 * See Documentation/block/deadline-iosched.rst
  29 */
  30static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
  31static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
  32static const int writes_starved = 2;    /* max times reads can starve a write */
  33static const int fifo_batch = 16;       /* # of sequential requests treated as one
  34                                     by the above parameters. For throughput. */
  35
  36struct deadline_data {
  37        /*
  38         * run time data
  39         */
  40
  41        /*
  42         * requests (deadline_rq s) are present on both sort_list and fifo_list
  43         */
  44        struct rb_root sort_list[2];
  45        struct list_head fifo_list[2];
  46
  47        /*
  48         * next in sort order. read, write or both are NULL
  49         */
  50        struct request *next_rq[2];
  51        unsigned int batching;          /* number of sequential requests made */
  52        unsigned int starved;           /* times reads have starved writes */
  53
  54        /*
  55         * settings that change how the i/o scheduler behaves
  56         */
  57        int fifo_expire[2];
  58        int fifo_batch;
  59        int writes_starved;
  60        int front_merges;
  61
  62        spinlock_t lock;
  63        spinlock_t zone_lock;
  64        struct list_head dispatch;
  65};
  66
  67static inline struct rb_root *
  68deadline_rb_root(struct deadline_data *dd, struct request *rq)
  69{
  70        return &dd->sort_list[rq_data_dir(rq)];
  71}
  72
  73/*
  74 * get the request after `rq' in sector-sorted order
  75 */
  76static inline struct request *
  77deadline_latter_request(struct request *rq)
  78{
  79        struct rb_node *node = rb_next(&rq->rb_node);
  80
  81        if (node)
  82                return rb_entry_rq(node);
  83
  84        return NULL;
  85}
  86
  87static void
  88deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
  89{
  90        struct rb_root *root = deadline_rb_root(dd, rq);
  91
  92        elv_rb_add(root, rq);
  93}
  94
  95static inline void
  96deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
  97{
  98        const int data_dir = rq_data_dir(rq);
  99
 100        if (dd->next_rq[data_dir] == rq)
 101                dd->next_rq[data_dir] = deadline_latter_request(rq);
 102
 103        elv_rb_del(deadline_rb_root(dd, rq), rq);
 104}
 105
 106/*
 107 * remove rq from rbtree and fifo.
 108 */
 109static void deadline_remove_request(struct request_queue *q, struct request *rq)
 110{
 111        struct deadline_data *dd = q->elevator->elevator_data;
 112
 113        list_del_init(&rq->queuelist);
 114
 115        /*
 116         * We might not be on the rbtree, if we are doing an insert merge
 117         */
 118        if (!RB_EMPTY_NODE(&rq->rb_node))
 119                deadline_del_rq_rb(dd, rq);
 120
 121        elv_rqhash_del(q, rq);
 122        if (q->last_merge == rq)
 123                q->last_merge = NULL;
 124}
 125
 126static void dd_request_merged(struct request_queue *q, struct request *req,
 127                              enum elv_merge type)
 128{
 129        struct deadline_data *dd = q->elevator->elevator_data;
 130
 131        /*
 132         * if the merge was a front merge, we need to reposition request
 133         */
 134        if (type == ELEVATOR_FRONT_MERGE) {
 135                elv_rb_del(deadline_rb_root(dd, req), req);
 136                deadline_add_rq_rb(dd, req);
 137        }
 138}
 139
 140static void dd_merged_requests(struct request_queue *q, struct request *req,
 141                               struct request *next)
 142{
 143        /*
 144         * if next expires before rq, assign its expire time to rq
 145         * and move into next position (next will be deleted) in fifo
 146         */
 147        if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
 148                if (time_before((unsigned long)next->fifo_time,
 149                                (unsigned long)req->fifo_time)) {
 150                        list_move(&req->queuelist, &next->queuelist);
 151                        req->fifo_time = next->fifo_time;
 152                }
 153        }
 154
 155        /*
 156         * kill knowledge of next, this one is a goner
 157         */
 158        deadline_remove_request(q, next);
 159}
 160
 161/*
 162 * move an entry to dispatch queue
 163 */
 164static void
 165deadline_move_request(struct deadline_data *dd, struct request *rq)
 166{
 167        const int data_dir = rq_data_dir(rq);
 168
 169        dd->next_rq[READ] = NULL;
 170        dd->next_rq[WRITE] = NULL;
 171        dd->next_rq[data_dir] = deadline_latter_request(rq);
 172
 173        /*
 174         * take it off the sort and fifo list
 175         */
 176        deadline_remove_request(rq->q, rq);
 177}
 178
 179/*
 180 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
 181 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
 182 */
 183static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
 184{
 185        struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
 186
 187        /*
 188         * rq is expired!
 189         */
 190        if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
 191                return 1;
 192
 193        return 0;
 194}
 195
 196/*
 197 * For the specified data direction, return the next request to
 198 * dispatch using arrival ordered lists.
 199 */
 200static struct request *
 201deadline_fifo_request(struct deadline_data *dd, int data_dir)
 202{
 203        struct request *rq;
 204        unsigned long flags;
 205
 206        if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
 207                return NULL;
 208
 209        if (list_empty(&dd->fifo_list[data_dir]))
 210                return NULL;
 211
 212        rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
 213        if (data_dir == READ || !blk_queue_is_zoned(rq->q))
 214                return rq;
 215
 216        /*
 217         * Look for a write request that can be dispatched, that is one with
 218         * an unlocked target zone.
 219         */
 220        spin_lock_irqsave(&dd->zone_lock, flags);
 221        list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
 222                if (blk_req_can_dispatch_to_zone(rq))
 223                        goto out;
 224        }
 225        rq = NULL;
 226out:
 227        spin_unlock_irqrestore(&dd->zone_lock, flags);
 228
 229        return rq;
 230}
 231
 232/*
 233 * For the specified data direction, return the next request to
 234 * dispatch using sector position sorted lists.
 235 */
 236static struct request *
 237deadline_next_request(struct deadline_data *dd, int data_dir)
 238{
 239        struct request *rq;
 240        unsigned long flags;
 241
 242        if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
 243                return NULL;
 244
 245        rq = dd->next_rq[data_dir];
 246        if (!rq)
 247                return NULL;
 248
 249        if (data_dir == READ || !blk_queue_is_zoned(rq->q))
 250                return rq;
 251
 252        /*
 253         * Look for a write request that can be dispatched, that is one with
 254         * an unlocked target zone.
 255         */
 256        spin_lock_irqsave(&dd->zone_lock, flags);
 257        while (rq) {
 258                if (blk_req_can_dispatch_to_zone(rq))
 259                        break;
 260                rq = deadline_latter_request(rq);
 261        }
 262        spin_unlock_irqrestore(&dd->zone_lock, flags);
 263
 264        return rq;
 265}
 266
 267/*
 268 * deadline_dispatch_requests selects the best request according to
 269 * read/write expire, fifo_batch, etc
 270 */
 271static struct request *__dd_dispatch_request(struct deadline_data *dd)
 272{
 273        struct request *rq, *next_rq;
 274        bool reads, writes;
 275        int data_dir;
 276
 277        if (!list_empty(&dd->dispatch)) {
 278                rq = list_first_entry(&dd->dispatch, struct request, queuelist);
 279                list_del_init(&rq->queuelist);
 280                goto done;
 281        }
 282
 283        reads = !list_empty(&dd->fifo_list[READ]);
 284        writes = !list_empty(&dd->fifo_list[WRITE]);
 285
 286        /*
 287         * batches are currently reads XOR writes
 288         */
 289        rq = deadline_next_request(dd, WRITE);
 290        if (!rq)
 291                rq = deadline_next_request(dd, READ);
 292
 293        if (rq && dd->batching < dd->fifo_batch)
 294                /* we have a next request are still entitled to batch */
 295                goto dispatch_request;
 296
 297        /*
 298         * at this point we are not running a batch. select the appropriate
 299         * data direction (read / write)
 300         */
 301
 302        if (reads) {
 303                BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
 304
 305                if (deadline_fifo_request(dd, WRITE) &&
 306                    (dd->starved++ >= dd->writes_starved))
 307                        goto dispatch_writes;
 308
 309                data_dir = READ;
 310
 311                goto dispatch_find_request;
 312        }
 313
 314        /*
 315         * there are either no reads or writes have been starved
 316         */
 317
 318        if (writes) {
 319dispatch_writes:
 320                BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
 321
 322                dd->starved = 0;
 323
 324                data_dir = WRITE;
 325
 326                goto dispatch_find_request;
 327        }
 328
 329        return NULL;
 330
 331dispatch_find_request:
 332        /*
 333         * we are not running a batch, find best request for selected data_dir
 334         */
 335        next_rq = deadline_next_request(dd, data_dir);
 336        if (deadline_check_fifo(dd, data_dir) || !next_rq) {
 337                /*
 338                 * A deadline has expired, the last request was in the other
 339                 * direction, or we have run out of higher-sectored requests.
 340                 * Start again from the request with the earliest expiry time.
 341                 */
 342                rq = deadline_fifo_request(dd, data_dir);
 343        } else {
 344                /*
 345                 * The last req was the same dir and we have a next request in
 346                 * sort order. No expired requests so continue on from here.
 347                 */
 348                rq = next_rq;
 349        }
 350
 351        /*
 352         * For a zoned block device, if we only have writes queued and none of
 353         * them can be dispatched, rq will be NULL.
 354         */
 355        if (!rq)
 356                return NULL;
 357
 358        dd->batching = 0;
 359
 360dispatch_request:
 361        /*
 362         * rq is the selected appropriate request.
 363         */
 364        dd->batching++;
 365        deadline_move_request(dd, rq);
 366done:
 367        /*
 368         * If the request needs its target zone locked, do it.
 369         */
 370        blk_req_zone_write_lock(rq);
 371        rq->rq_flags |= RQF_STARTED;
 372        return rq;
 373}
 374
 375/*
 376 * One confusing aspect here is that we get called for a specific
 377 * hardware queue, but we may return a request that is for a
 378 * different hardware queue. This is because mq-deadline has shared
 379 * state for all hardware queues, in terms of sorting, FIFOs, etc.
 380 */
 381static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 382{
 383        struct deadline_data *dd = hctx->queue->elevator->elevator_data;
 384        struct request *rq;
 385
 386        spin_lock(&dd->lock);
 387        rq = __dd_dispatch_request(dd);
 388        spin_unlock(&dd->lock);
 389
 390        return rq;
 391}
 392
 393static void dd_exit_queue(struct elevator_queue *e)
 394{
 395        struct deadline_data *dd = e->elevator_data;
 396
 397        BUG_ON(!list_empty(&dd->fifo_list[READ]));
 398        BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
 399
 400        kfree(dd);
 401}
 402
 403/*
 404 * initialize elevator private data (deadline_data).
 405 */
 406static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
 407{
 408        struct deadline_data *dd;
 409        struct elevator_queue *eq;
 410
 411        eq = elevator_alloc(q, e);
 412        if (!eq)
 413                return -ENOMEM;
 414
 415        dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
 416        if (!dd) {
 417                kobject_put(&eq->kobj);
 418                return -ENOMEM;
 419        }
 420        eq->elevator_data = dd;
 421
 422        INIT_LIST_HEAD(&dd->fifo_list[READ]);
 423        INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
 424        dd->sort_list[READ] = RB_ROOT;
 425        dd->sort_list[WRITE] = RB_ROOT;
 426        dd->fifo_expire[READ] = read_expire;
 427        dd->fifo_expire[WRITE] = write_expire;
 428        dd->writes_starved = writes_starved;
 429        dd->front_merges = 1;
 430        dd->fifo_batch = fifo_batch;
 431        spin_lock_init(&dd->lock);
 432        spin_lock_init(&dd->zone_lock);
 433        INIT_LIST_HEAD(&dd->dispatch);
 434
 435        q->elevator = eq;
 436        return 0;
 437}
 438
 439static int dd_request_merge(struct request_queue *q, struct request **rq,
 440                            struct bio *bio)
 441{
 442        struct deadline_data *dd = q->elevator->elevator_data;
 443        sector_t sector = bio_end_sector(bio);
 444        struct request *__rq;
 445
 446        if (!dd->front_merges)
 447                return ELEVATOR_NO_MERGE;
 448
 449        __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
 450        if (__rq) {
 451                BUG_ON(sector != blk_rq_pos(__rq));
 452
 453                if (elv_bio_merge_ok(__rq, bio)) {
 454                        *rq = __rq;
 455                        return ELEVATOR_FRONT_MERGE;
 456                }
 457        }
 458
 459        return ELEVATOR_NO_MERGE;
 460}
 461
 462static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
 463                unsigned int nr_segs)
 464{
 465        struct request_queue *q = hctx->queue;
 466        struct deadline_data *dd = q->elevator->elevator_data;
 467        struct request *free = NULL;
 468        bool ret;
 469
 470        spin_lock(&dd->lock);
 471        ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
 472        spin_unlock(&dd->lock);
 473
 474        if (free)
 475                blk_mq_free_request(free);
 476
 477        return ret;
 478}
 479
 480/*
 481 * add rq to rbtree and fifo
 482 */
 483static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 484                              bool at_head)
 485{
 486        struct request_queue *q = hctx->queue;
 487        struct deadline_data *dd = q->elevator->elevator_data;
 488        const int data_dir = rq_data_dir(rq);
 489
 490        /*
 491         * This may be a requeue of a write request that has locked its
 492         * target zone. If it is the case, this releases the zone lock.
 493         */
 494        blk_req_zone_write_unlock(rq);
 495
 496        if (blk_mq_sched_try_insert_merge(q, rq))
 497                return;
 498
 499        blk_mq_sched_request_inserted(rq);
 500
 501        if (at_head || blk_rq_is_passthrough(rq)) {
 502                if (at_head)
 503                        list_add(&rq->queuelist, &dd->dispatch);
 504                else
 505                        list_add_tail(&rq->queuelist, &dd->dispatch);
 506        } else {
 507                deadline_add_rq_rb(dd, rq);
 508
 509                if (rq_mergeable(rq)) {
 510                        elv_rqhash_add(q, rq);
 511                        if (!q->last_merge)
 512                                q->last_merge = rq;
 513                }
 514
 515                /*
 516                 * set expire time and add to fifo list
 517                 */
 518                rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
 519                list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
 520        }
 521}
 522
 523static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
 524                               struct list_head *list, bool at_head)
 525{
 526        struct request_queue *q = hctx->queue;
 527        struct deadline_data *dd = q->elevator->elevator_data;
 528
 529        spin_lock(&dd->lock);
 530        while (!list_empty(list)) {
 531                struct request *rq;
 532
 533                rq = list_first_entry(list, struct request, queuelist);
 534                list_del_init(&rq->queuelist);
 535                dd_insert_request(hctx, rq, at_head);
 536        }
 537        spin_unlock(&dd->lock);
 538}
 539
 540/*
 541 * Nothing to do here. This is defined only to ensure that .finish_request
 542 * method is called upon request completion.
 543 */
 544static void dd_prepare_request(struct request *rq, struct bio *bio)
 545{
 546}
 547
 548/*
 549 * For zoned block devices, write unlock the target zone of
 550 * completed write requests. Do this while holding the zone lock
 551 * spinlock so that the zone is never unlocked while deadline_fifo_request()
 552 * or deadline_next_request() are executing. This function is called for
 553 * all requests, whether or not these requests complete successfully.
 554 *
 555 * For a zoned block device, __dd_dispatch_request() may have stopped
 556 * dispatching requests if all the queued requests are write requests directed
 557 * at zones that are already locked due to on-going write requests. To ensure
 558 * write request dispatch progress in this case, mark the queue as needing a
 559 * restart to ensure that the queue is run again after completion of the
 560 * request and zones being unlocked.
 561 */
 562static void dd_finish_request(struct request *rq)
 563{
 564        struct request_queue *q = rq->q;
 565
 566        if (blk_queue_is_zoned(q)) {
 567                struct deadline_data *dd = q->elevator->elevator_data;
 568                unsigned long flags;
 569
 570                spin_lock_irqsave(&dd->zone_lock, flags);
 571                blk_req_zone_write_unlock(rq);
 572                if (!list_empty(&dd->fifo_list[WRITE]))
 573                        blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
 574                spin_unlock_irqrestore(&dd->zone_lock, flags);
 575        }
 576}
 577
 578static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
 579{
 580        struct deadline_data *dd = hctx->queue->elevator->elevator_data;
 581
 582        return !list_empty_careful(&dd->dispatch) ||
 583                !list_empty_careful(&dd->fifo_list[0]) ||
 584                !list_empty_careful(&dd->fifo_list[1]);
 585}
 586
 587/*
 588 * sysfs parts below
 589 */
 590static ssize_t
 591deadline_var_show(int var, char *page)
 592{
 593        return sprintf(page, "%d\n", var);
 594}
 595
 596static void
 597deadline_var_store(int *var, const char *page)
 598{
 599        char *p = (char *) page;
 600
 601        *var = simple_strtol(p, &p, 10);
 602}
 603
 604#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
 605static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
 606{                                                                       \
 607        struct deadline_data *dd = e->elevator_data;                    \
 608        int __data = __VAR;                                             \
 609        if (__CONV)                                                     \
 610                __data = jiffies_to_msecs(__data);                      \
 611        return deadline_var_show(__data, (page));                       \
 612}
 613SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
 614SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
 615SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
 616SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
 617SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
 618#undef SHOW_FUNCTION
 619
 620#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
 621static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
 622{                                                                       \
 623        struct deadline_data *dd = e->elevator_data;                    \
 624        int __data;                                                     \
 625        deadline_var_store(&__data, (page));                            \
 626        if (__data < (MIN))                                             \
 627                __data = (MIN);                                         \
 628        else if (__data > (MAX))                                        \
 629                __data = (MAX);                                         \
 630        if (__CONV)                                                     \
 631                *(__PTR) = msecs_to_jiffies(__data);                    \
 632        else                                                            \
 633                *(__PTR) = __data;                                      \
 634        return count;                                                   \
 635}
 636STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
 637STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
 638STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
 639STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
 640STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
 641#undef STORE_FUNCTION
 642
 643#define DD_ATTR(name) \
 644        __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
 645
 646static struct elv_fs_entry deadline_attrs[] = {
 647        DD_ATTR(read_expire),
 648        DD_ATTR(write_expire),
 649        DD_ATTR(writes_starved),
 650        DD_ATTR(front_merges),
 651        DD_ATTR(fifo_batch),
 652        __ATTR_NULL
 653};
 654
 655#ifdef CONFIG_BLK_DEBUG_FS
 656#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name)                         \
 657static void *deadline_##name##_fifo_start(struct seq_file *m,           \
 658                                          loff_t *pos)                  \
 659        __acquires(&dd->lock)                                           \
 660{                                                                       \
 661        struct request_queue *q = m->private;                           \
 662        struct deadline_data *dd = q->elevator->elevator_data;          \
 663                                                                        \
 664        spin_lock(&dd->lock);                                           \
 665        return seq_list_start(&dd->fifo_list[ddir], *pos);              \
 666}                                                                       \
 667                                                                        \
 668static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,   \
 669                                         loff_t *pos)                   \
 670{                                                                       \
 671        struct request_queue *q = m->private;                           \
 672        struct deadline_data *dd = q->elevator->elevator_data;          \
 673                                                                        \
 674        return seq_list_next(v, &dd->fifo_list[ddir], pos);             \
 675}                                                                       \
 676                                                                        \
 677static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)    \
 678        __releases(&dd->lock)                                           \
 679{                                                                       \
 680        struct request_queue *q = m->private;                           \
 681        struct deadline_data *dd = q->elevator->elevator_data;          \
 682                                                                        \
 683        spin_unlock(&dd->lock);                                         \
 684}                                                                       \
 685                                                                        \
 686static const struct seq_operations deadline_##name##_fifo_seq_ops = {   \
 687        .start  = deadline_##name##_fifo_start,                         \
 688        .next   = deadline_##name##_fifo_next,                          \
 689        .stop   = deadline_##name##_fifo_stop,                          \
 690        .show   = blk_mq_debugfs_rq_show,                               \
 691};                                                                      \
 692                                                                        \
 693static int deadline_##name##_next_rq_show(void *data,                   \
 694                                          struct seq_file *m)           \
 695{                                                                       \
 696        struct request_queue *q = data;                                 \
 697        struct deadline_data *dd = q->elevator->elevator_data;          \
 698        struct request *rq = dd->next_rq[ddir];                         \
 699                                                                        \
 700        if (rq)                                                         \
 701                __blk_mq_debugfs_rq_show(m, rq);                        \
 702        return 0;                                                       \
 703}
 704DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
 705DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
 706#undef DEADLINE_DEBUGFS_DDIR_ATTRS
 707
 708static int deadline_batching_show(void *data, struct seq_file *m)
 709{
 710        struct request_queue *q = data;
 711        struct deadline_data *dd = q->elevator->elevator_data;
 712
 713        seq_printf(m, "%u\n", dd->batching);
 714        return 0;
 715}
 716
 717static int deadline_starved_show(void *data, struct seq_file *m)
 718{
 719        struct request_queue *q = data;
 720        struct deadline_data *dd = q->elevator->elevator_data;
 721
 722        seq_printf(m, "%u\n", dd->starved);
 723        return 0;
 724}
 725
 726static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
 727        __acquires(&dd->lock)
 728{
 729        struct request_queue *q = m->private;
 730        struct deadline_data *dd = q->elevator->elevator_data;
 731
 732        spin_lock(&dd->lock);
 733        return seq_list_start(&dd->dispatch, *pos);
 734}
 735
 736static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
 737{
 738        struct request_queue *q = m->private;
 739        struct deadline_data *dd = q->elevator->elevator_data;
 740
 741        return seq_list_next(v, &dd->dispatch, pos);
 742}
 743
 744static void deadline_dispatch_stop(struct seq_file *m, void *v)
 745        __releases(&dd->lock)
 746{
 747        struct request_queue *q = m->private;
 748        struct deadline_data *dd = q->elevator->elevator_data;
 749
 750        spin_unlock(&dd->lock);
 751}
 752
 753static const struct seq_operations deadline_dispatch_seq_ops = {
 754        .start  = deadline_dispatch_start,
 755        .next   = deadline_dispatch_next,
 756        .stop   = deadline_dispatch_stop,
 757        .show   = blk_mq_debugfs_rq_show,
 758};
 759
 760#define DEADLINE_QUEUE_DDIR_ATTRS(name)                                         \
 761        {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
 762        {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
 763static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
 764        DEADLINE_QUEUE_DDIR_ATTRS(read),
 765        DEADLINE_QUEUE_DDIR_ATTRS(write),
 766        {"batching", 0400, deadline_batching_show},
 767        {"starved", 0400, deadline_starved_show},
 768        {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
 769        {},
 770};
 771#undef DEADLINE_QUEUE_DDIR_ATTRS
 772#endif
 773
 774static struct elevator_type mq_deadline = {
 775        .ops = {
 776                .insert_requests        = dd_insert_requests,
 777                .dispatch_request       = dd_dispatch_request,
 778                .prepare_request        = dd_prepare_request,
 779                .finish_request         = dd_finish_request,
 780                .next_request           = elv_rb_latter_request,
 781                .former_request         = elv_rb_former_request,
 782                .bio_merge              = dd_bio_merge,
 783                .request_merge          = dd_request_merge,
 784                .requests_merged        = dd_merged_requests,
 785                .request_merged         = dd_request_merged,
 786                .has_work               = dd_has_work,
 787                .init_sched             = dd_init_queue,
 788                .exit_sched             = dd_exit_queue,
 789        },
 790
 791#ifdef CONFIG_BLK_DEBUG_FS
 792        .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
 793#endif
 794        .elevator_attrs = deadline_attrs,
 795        .elevator_name = "mq-deadline",
 796        .elevator_alias = "deadline",
 797        .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
 798        .elevator_owner = THIS_MODULE,
 799};
 800MODULE_ALIAS("mq-deadline-iosched");
 801
 802static int __init deadline_init(void)
 803{
 804        return elv_register(&mq_deadline);
 805}
 806
 807static void __exit deadline_exit(void)
 808{
 809        elv_unregister(&mq_deadline);
 810}
 811
 812module_init(deadline_init);
 813module_exit(deadline_exit);
 814
 815MODULE_AUTHOR("Jens Axboe");
 816MODULE_LICENSE("GPL");
 817MODULE_DESCRIPTION("MQ deadline IO scheduler");
 818