linux/block/mq-deadline.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
   4 *  for the blk-mq scheduling framework
   5 *
   6 *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/fs.h>
  10#include <linux/blkdev.h>
  11#include <linux/blk-mq.h>
  12#include <linux/elevator.h>
  13#include <linux/bio.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/init.h>
  17#include <linux/compiler.h>
  18#include <linux/rbtree.h>
  19#include <linux/sbitmap.h>
  20
  21#include <trace/events/block.h>
  22
  23#include "blk.h"
  24#include "blk-mq.h"
  25#include "blk-mq-debugfs.h"
  26#include "blk-mq-tag.h"
  27#include "blk-mq-sched.h"
  28
  29/*
  30 * See Documentation/block/deadline-iosched.rst
  31 */
  32static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
  33static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
  34static const int writes_starved = 2;    /* max times reads can starve a write */
  35static const int fifo_batch = 16;       /* # of sequential requests treated as one
  36                                     by the above parameters. For throughput. */
  37
  38enum dd_data_dir {
  39        DD_READ         = READ,
  40        DD_WRITE        = WRITE,
  41};
  42
  43enum { DD_DIR_COUNT = 2 };
  44
  45enum dd_prio {
  46        DD_RT_PRIO      = 0,
  47        DD_BE_PRIO      = 1,
  48        DD_IDLE_PRIO    = 2,
  49        DD_PRIO_MAX     = 2,
  50};
  51
  52enum { DD_PRIO_COUNT = 3 };
  53
  54/* I/O statistics per I/O priority. */
  55struct io_stats_per_prio {
  56        local_t inserted;
  57        local_t merged;
  58        local_t dispatched;
  59        local_t completed;
  60};
  61
  62/* I/O statistics for all I/O priorities (enum dd_prio). */
  63struct io_stats {
  64        struct io_stats_per_prio stats[DD_PRIO_COUNT];
  65};
  66
  67/*
  68 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
  69 * present on both sort_list[] and fifo_list[].
  70 */
  71struct dd_per_prio {
  72        struct list_head dispatch;
  73        struct rb_root sort_list[DD_DIR_COUNT];
  74        struct list_head fifo_list[DD_DIR_COUNT];
  75        /* Next request in FIFO order. Read, write or both are NULL. */
  76        struct request *next_rq[DD_DIR_COUNT];
  77};
  78
  79struct deadline_data {
  80        /*
  81         * run time data
  82         */
  83
  84        struct dd_per_prio per_prio[DD_PRIO_COUNT];
  85
  86        /* Data direction of latest dispatched request. */
  87        enum dd_data_dir last_dir;
  88        unsigned int batching;          /* number of sequential requests made */
  89        unsigned int starved;           /* times reads have starved writes */
  90
  91        struct io_stats __percpu *stats;
  92
  93        /*
  94         * settings that change how the i/o scheduler behaves
  95         */
  96        int fifo_expire[DD_DIR_COUNT];
  97        int fifo_batch;
  98        int writes_starved;
  99        int front_merges;
 100        u32 async_depth;
 101
 102        spinlock_t lock;
 103        spinlock_t zone_lock;
 104};
 105
 106/* Count one event of type 'event_type' and with I/O priority 'prio' */
 107#define dd_count(dd, event_type, prio) do {                             \
 108        struct io_stats *io_stats = get_cpu_ptr((dd)->stats);           \
 109                                                                        \
 110        BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
 111        BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
 112        local_inc(&io_stats->stats[(prio)].event_type);                 \
 113        put_cpu_ptr(io_stats);                                          \
 114} while (0)
 115
 116/*
 117 * Returns the total number of dd_count(dd, event_type, prio) calls across all
 118 * CPUs. No locking or barriers since it is fine if the returned sum is slightly
 119 * outdated.
 120 */
 121#define dd_sum(dd, event_type, prio) ({                                 \
 122        unsigned int cpu;                                               \
 123        u32 sum = 0;                                                    \
 124                                                                        \
 125        BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
 126        BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
 127        for_each_present_cpu(cpu)                                       \
 128                sum += local_read(&per_cpu_ptr((dd)->stats, cpu)->      \
 129                                  stats[(prio)].event_type);            \
 130        sum;                                                            \
 131})
 132
 133/* Maps an I/O priority class to a deadline scheduler priority. */
 134static const enum dd_prio ioprio_class_to_prio[] = {
 135        [IOPRIO_CLASS_NONE]     = DD_BE_PRIO,
 136        [IOPRIO_CLASS_RT]       = DD_RT_PRIO,
 137        [IOPRIO_CLASS_BE]       = DD_BE_PRIO,
 138        [IOPRIO_CLASS_IDLE]     = DD_IDLE_PRIO,
 139};
 140
 141static inline struct rb_root *
 142deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
 143{
 144        return &per_prio->sort_list[rq_data_dir(rq)];
 145}
 146
 147/*
 148 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
 149 * request.
 150 */
 151static u8 dd_rq_ioclass(struct request *rq)
 152{
 153        return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
 154}
 155
 156/*
 157 * get the request after `rq' in sector-sorted order
 158 */
 159static inline struct request *
 160deadline_latter_request(struct request *rq)
 161{
 162        struct rb_node *node = rb_next(&rq->rb_node);
 163
 164        if (node)
 165                return rb_entry_rq(node);
 166
 167        return NULL;
 168}
 169
 170static void
 171deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
 172{
 173        struct rb_root *root = deadline_rb_root(per_prio, rq);
 174
 175        elv_rb_add(root, rq);
 176}
 177
 178static inline void
 179deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
 180{
 181        const enum dd_data_dir data_dir = rq_data_dir(rq);
 182
 183        if (per_prio->next_rq[data_dir] == rq)
 184                per_prio->next_rq[data_dir] = deadline_latter_request(rq);
 185
 186        elv_rb_del(deadline_rb_root(per_prio, rq), rq);
 187}
 188
 189/*
 190 * remove rq from rbtree and fifo.
 191 */
 192static void deadline_remove_request(struct request_queue *q,
 193                                    struct dd_per_prio *per_prio,
 194                                    struct request *rq)
 195{
 196        list_del_init(&rq->queuelist);
 197
 198        /*
 199         * We might not be on the rbtree, if we are doing an insert merge
 200         */
 201        if (!RB_EMPTY_NODE(&rq->rb_node))
 202                deadline_del_rq_rb(per_prio, rq);
 203
 204        elv_rqhash_del(q, rq);
 205        if (q->last_merge == rq)
 206                q->last_merge = NULL;
 207}
 208
 209static void dd_request_merged(struct request_queue *q, struct request *req,
 210                              enum elv_merge type)
 211{
 212        struct deadline_data *dd = q->elevator->elevator_data;
 213        const u8 ioprio_class = dd_rq_ioclass(req);
 214        const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
 215        struct dd_per_prio *per_prio = &dd->per_prio[prio];
 216
 217        /*
 218         * if the merge was a front merge, we need to reposition request
 219         */
 220        if (type == ELEVATOR_FRONT_MERGE) {
 221                elv_rb_del(deadline_rb_root(per_prio, req), req);
 222                deadline_add_rq_rb(per_prio, req);
 223        }
 224}
 225
 226/*
 227 * Callback function that is invoked after @next has been merged into @req.
 228 */
 229static void dd_merged_requests(struct request_queue *q, struct request *req,
 230                               struct request *next)
 231{
 232        struct deadline_data *dd = q->elevator->elevator_data;
 233        const u8 ioprio_class = dd_rq_ioclass(next);
 234        const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
 235
 236        dd_count(dd, merged, prio);
 237
 238        /*
 239         * if next expires before rq, assign its expire time to rq
 240         * and move into next position (next will be deleted) in fifo
 241         */
 242        if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
 243                if (time_before((unsigned long)next->fifo_time,
 244                                (unsigned long)req->fifo_time)) {
 245                        list_move(&req->queuelist, &next->queuelist);
 246                        req->fifo_time = next->fifo_time;
 247                }
 248        }
 249
 250        /*
 251         * kill knowledge of next, this one is a goner
 252         */
 253        deadline_remove_request(q, &dd->per_prio[prio], next);
 254}
 255
 256/*
 257 * move an entry to dispatch queue
 258 */
 259static void
 260deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
 261                      struct request *rq)
 262{
 263        const enum dd_data_dir data_dir = rq_data_dir(rq);
 264
 265        per_prio->next_rq[data_dir] = deadline_latter_request(rq);
 266
 267        /*
 268         * take it off the sort and fifo list
 269         */
 270        deadline_remove_request(rq->q, per_prio, rq);
 271}
 272
 273/*
 274 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
 275 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
 276 */
 277static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
 278                                      enum dd_data_dir data_dir)
 279{
 280        struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
 281
 282        /*
 283         * rq is expired!
 284         */
 285        if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
 286                return 1;
 287
 288        return 0;
 289}
 290
 291/*
 292 * For the specified data direction, return the next request to
 293 * dispatch using arrival ordered lists.
 294 */
 295static struct request *
 296deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
 297                      enum dd_data_dir data_dir)
 298{
 299        struct request *rq;
 300        unsigned long flags;
 301
 302        if (list_empty(&per_prio->fifo_list[data_dir]))
 303                return NULL;
 304
 305        rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
 306        if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
 307                return rq;
 308
 309        /*
 310         * Look for a write request that can be dispatched, that is one with
 311         * an unlocked target zone.
 312         */
 313        spin_lock_irqsave(&dd->zone_lock, flags);
 314        list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
 315                if (blk_req_can_dispatch_to_zone(rq))
 316                        goto out;
 317        }
 318        rq = NULL;
 319out:
 320        spin_unlock_irqrestore(&dd->zone_lock, flags);
 321
 322        return rq;
 323}
 324
 325/*
 326 * For the specified data direction, return the next request to
 327 * dispatch using sector position sorted lists.
 328 */
 329static struct request *
 330deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
 331                      enum dd_data_dir data_dir)
 332{
 333        struct request *rq;
 334        unsigned long flags;
 335
 336        rq = per_prio->next_rq[data_dir];
 337        if (!rq)
 338                return NULL;
 339
 340        if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
 341                return rq;
 342
 343        /*
 344         * Look for a write request that can be dispatched, that is one with
 345         * an unlocked target zone.
 346         */
 347        spin_lock_irqsave(&dd->zone_lock, flags);
 348        while (rq) {
 349                if (blk_req_can_dispatch_to_zone(rq))
 350                        break;
 351                rq = deadline_latter_request(rq);
 352        }
 353        spin_unlock_irqrestore(&dd->zone_lock, flags);
 354
 355        return rq;
 356}
 357
 358/*
 359 * deadline_dispatch_requests selects the best request according to
 360 * read/write expire, fifo_batch, etc
 361 */
 362static struct request *__dd_dispatch_request(struct deadline_data *dd,
 363                                             struct dd_per_prio *per_prio)
 364{
 365        struct request *rq, *next_rq;
 366        enum dd_data_dir data_dir;
 367        enum dd_prio prio;
 368        u8 ioprio_class;
 369
 370        lockdep_assert_held(&dd->lock);
 371
 372        if (!list_empty(&per_prio->dispatch)) {
 373                rq = list_first_entry(&per_prio->dispatch, struct request,
 374                                      queuelist);
 375                list_del_init(&rq->queuelist);
 376                goto done;
 377        }
 378
 379        /*
 380         * batches are currently reads XOR writes
 381         */
 382        rq = deadline_next_request(dd, per_prio, dd->last_dir);
 383        if (rq && dd->batching < dd->fifo_batch)
 384                /* we have a next request are still entitled to batch */
 385                goto dispatch_request;
 386
 387        /*
 388         * at this point we are not running a batch. select the appropriate
 389         * data direction (read / write)
 390         */
 391
 392        if (!list_empty(&per_prio->fifo_list[DD_READ])) {
 393                BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
 394
 395                if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
 396                    (dd->starved++ >= dd->writes_starved))
 397                        goto dispatch_writes;
 398
 399                data_dir = DD_READ;
 400
 401                goto dispatch_find_request;
 402        }
 403
 404        /*
 405         * there are either no reads or writes have been starved
 406         */
 407
 408        if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
 409dispatch_writes:
 410                BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
 411
 412                dd->starved = 0;
 413
 414                data_dir = DD_WRITE;
 415
 416                goto dispatch_find_request;
 417        }
 418
 419        return NULL;
 420
 421dispatch_find_request:
 422        /*
 423         * we are not running a batch, find best request for selected data_dir
 424         */
 425        next_rq = deadline_next_request(dd, per_prio, data_dir);
 426        if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
 427                /*
 428                 * A deadline has expired, the last request was in the other
 429                 * direction, or we have run out of higher-sectored requests.
 430                 * Start again from the request with the earliest expiry time.
 431                 */
 432                rq = deadline_fifo_request(dd, per_prio, data_dir);
 433        } else {
 434                /*
 435                 * The last req was the same dir and we have a next request in
 436                 * sort order. No expired requests so continue on from here.
 437                 */
 438                rq = next_rq;
 439        }
 440
 441        /*
 442         * For a zoned block device, if we only have writes queued and none of
 443         * them can be dispatched, rq will be NULL.
 444         */
 445        if (!rq)
 446                return NULL;
 447
 448        dd->last_dir = data_dir;
 449        dd->batching = 0;
 450
 451dispatch_request:
 452        /*
 453         * rq is the selected appropriate request.
 454         */
 455        dd->batching++;
 456        deadline_move_request(dd, per_prio, rq);
 457done:
 458        ioprio_class = dd_rq_ioclass(rq);
 459        prio = ioprio_class_to_prio[ioprio_class];
 460        dd_count(dd, dispatched, prio);
 461        /*
 462         * If the request needs its target zone locked, do it.
 463         */
 464        blk_req_zone_write_lock(rq);
 465        rq->rq_flags |= RQF_STARTED;
 466        return rq;
 467}
 468
 469/*
 470 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
 471 *
 472 * One confusing aspect here is that we get called for a specific
 473 * hardware queue, but we may return a request that is for a
 474 * different hardware queue. This is because mq-deadline has shared
 475 * state for all hardware queues, in terms of sorting, FIFOs, etc.
 476 */
 477static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 478{
 479        struct deadline_data *dd = hctx->queue->elevator->elevator_data;
 480        struct request *rq;
 481        enum dd_prio prio;
 482
 483        spin_lock(&dd->lock);
 484        for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
 485                rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
 486                if (rq)
 487                        break;
 488        }
 489        spin_unlock(&dd->lock);
 490
 491        return rq;
 492}
 493
 494/*
 495 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
 496 * function is used by __blk_mq_get_tag().
 497 */
 498static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
 499{
 500        struct deadline_data *dd = data->q->elevator->elevator_data;
 501
 502        /* Do not throttle synchronous reads. */
 503        if (op_is_sync(op) && !op_is_write(op))
 504                return;
 505
 506        /*
 507         * Throttle asynchronous requests and writes such that these requests
 508         * do not block the allocation of synchronous requests.
 509         */
 510        data->shallow_depth = dd->async_depth;
 511}
 512
 513/* Called by blk_mq_update_nr_requests(). */
 514static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
 515{
 516        struct request_queue *q = hctx->queue;
 517        struct deadline_data *dd = q->elevator->elevator_data;
 518        struct blk_mq_tags *tags = hctx->sched_tags;
 519
 520        dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
 521
 522        sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
 523}
 524
 525/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
 526static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 527{
 528        dd_depth_updated(hctx);
 529        return 0;
 530}
 531
 532static void dd_exit_sched(struct elevator_queue *e)
 533{
 534        struct deadline_data *dd = e->elevator_data;
 535        enum dd_prio prio;
 536
 537        for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
 538                struct dd_per_prio *per_prio = &dd->per_prio[prio];
 539
 540                WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
 541                WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
 542        }
 543
 544        free_percpu(dd->stats);
 545
 546        kfree(dd);
 547}
 548
 549/*
 550 * initialize elevator private data (deadline_data).
 551 */
 552static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
 553{
 554        struct deadline_data *dd;
 555        struct elevator_queue *eq;
 556        enum dd_prio prio;
 557        int ret = -ENOMEM;
 558
 559        eq = elevator_alloc(q, e);
 560        if (!eq)
 561                return ret;
 562
 563        dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
 564        if (!dd)
 565                goto put_eq;
 566
 567        eq->elevator_data = dd;
 568
 569        dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
 570                                     GFP_KERNEL | __GFP_ZERO);
 571        if (!dd->stats)
 572                goto free_dd;
 573
 574        for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
 575                struct dd_per_prio *per_prio = &dd->per_prio[prio];
 576
 577                INIT_LIST_HEAD(&per_prio->dispatch);
 578                INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
 579                INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
 580                per_prio->sort_list[DD_READ] = RB_ROOT;
 581                per_prio->sort_list[DD_WRITE] = RB_ROOT;
 582        }
 583        dd->fifo_expire[DD_READ] = read_expire;
 584        dd->fifo_expire[DD_WRITE] = write_expire;
 585        dd->writes_starved = writes_starved;
 586        dd->front_merges = 1;
 587        dd->last_dir = DD_WRITE;
 588        dd->fifo_batch = fifo_batch;
 589        spin_lock_init(&dd->lock);
 590        spin_lock_init(&dd->zone_lock);
 591
 592        q->elevator = eq;
 593        return 0;
 594
 595free_dd:
 596        kfree(dd);
 597
 598put_eq:
 599        kobject_put(&eq->kobj);
 600        return ret;
 601}
 602
 603/*
 604 * Try to merge @bio into an existing request. If @bio has been merged into
 605 * an existing request, store the pointer to that request into *@rq.
 606 */
 607static int dd_request_merge(struct request_queue *q, struct request **rq,
 608                            struct bio *bio)
 609{
 610        struct deadline_data *dd = q->elevator->elevator_data;
 611        const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
 612        const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
 613        struct dd_per_prio *per_prio = &dd->per_prio[prio];
 614        sector_t sector = bio_end_sector(bio);
 615        struct request *__rq;
 616
 617        if (!dd->front_merges)
 618                return ELEVATOR_NO_MERGE;
 619
 620        __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
 621        if (__rq) {
 622                BUG_ON(sector != blk_rq_pos(__rq));
 623
 624                if (elv_bio_merge_ok(__rq, bio)) {
 625                        *rq = __rq;
 626                        if (blk_discard_mergable(__rq))
 627                                return ELEVATOR_DISCARD_MERGE;
 628                        return ELEVATOR_FRONT_MERGE;
 629                }
 630        }
 631
 632        return ELEVATOR_NO_MERGE;
 633}
 634
 635/*
 636 * Attempt to merge a bio into an existing request. This function is called
 637 * before @bio is associated with a request.
 638 */
 639static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
 640                unsigned int nr_segs)
 641{
 642        struct deadline_data *dd = q->elevator->elevator_data;
 643        struct request *free = NULL;
 644        bool ret;
 645
 646        spin_lock(&dd->lock);
 647        ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
 648        spin_unlock(&dd->lock);
 649
 650        if (free)
 651                blk_mq_free_request(free);
 652
 653        return ret;
 654}
 655
 656/*
 657 * add rq to rbtree and fifo
 658 */
 659static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 660                              bool at_head)
 661{
 662        struct request_queue *q = hctx->queue;
 663        struct deadline_data *dd = q->elevator->elevator_data;
 664        const enum dd_data_dir data_dir = rq_data_dir(rq);
 665        u16 ioprio = req_get_ioprio(rq);
 666        u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
 667        struct dd_per_prio *per_prio;
 668        enum dd_prio prio;
 669        LIST_HEAD(free);
 670
 671        lockdep_assert_held(&dd->lock);
 672
 673        /*
 674         * This may be a requeue of a write request that has locked its
 675         * target zone. If it is the case, this releases the zone lock.
 676         */
 677        blk_req_zone_write_unlock(rq);
 678
 679        prio = ioprio_class_to_prio[ioprio_class];
 680        dd_count(dd, inserted, prio);
 681        rq->elv.priv[0] = (void *)(uintptr_t)1;
 682
 683        if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
 684                blk_mq_free_requests(&free);
 685                return;
 686        }
 687
 688        trace_block_rq_insert(rq);
 689
 690        per_prio = &dd->per_prio[prio];
 691        if (at_head) {
 692                list_add(&rq->queuelist, &per_prio->dispatch);
 693        } else {
 694                deadline_add_rq_rb(per_prio, rq);
 695
 696                if (rq_mergeable(rq)) {
 697                        elv_rqhash_add(q, rq);
 698                        if (!q->last_merge)
 699                                q->last_merge = rq;
 700                }
 701
 702                /*
 703                 * set expire time and add to fifo list
 704                 */
 705                rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
 706                list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
 707        }
 708}
 709
 710/*
 711 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
 712 */
 713static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
 714                               struct list_head *list, bool at_head)
 715{
 716        struct request_queue *q = hctx->queue;
 717        struct deadline_data *dd = q->elevator->elevator_data;
 718
 719        spin_lock(&dd->lock);
 720        while (!list_empty(list)) {
 721                struct request *rq;
 722
 723                rq = list_first_entry(list, struct request, queuelist);
 724                list_del_init(&rq->queuelist);
 725                dd_insert_request(hctx, rq, at_head);
 726        }
 727        spin_unlock(&dd->lock);
 728}
 729
 730/* Callback from inside blk_mq_rq_ctx_init(). */
 731static void dd_prepare_request(struct request *rq)
 732{
 733        rq->elv.priv[0] = NULL;
 734}
 735
 736/*
 737 * Callback from inside blk_mq_free_request().
 738 *
 739 * For zoned block devices, write unlock the target zone of
 740 * completed write requests. Do this while holding the zone lock
 741 * spinlock so that the zone is never unlocked while deadline_fifo_request()
 742 * or deadline_next_request() are executing. This function is called for
 743 * all requests, whether or not these requests complete successfully.
 744 *
 745 * For a zoned block device, __dd_dispatch_request() may have stopped
 746 * dispatching requests if all the queued requests are write requests directed
 747 * at zones that are already locked due to on-going write requests. To ensure
 748 * write request dispatch progress in this case, mark the queue as needing a
 749 * restart to ensure that the queue is run again after completion of the
 750 * request and zones being unlocked.
 751 */
 752static void dd_finish_request(struct request *rq)
 753{
 754        struct request_queue *q = rq->q;
 755        struct deadline_data *dd = q->elevator->elevator_data;
 756        const u8 ioprio_class = dd_rq_ioclass(rq);
 757        const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
 758        struct dd_per_prio *per_prio = &dd->per_prio[prio];
 759
 760        /*
 761         * The block layer core may call dd_finish_request() without having
 762         * called dd_insert_requests(). Hence only update statistics for
 763         * requests for which dd_insert_requests() has been called. See also
 764         * blk_mq_request_bypass_insert().
 765         */
 766        if (rq->elv.priv[0])
 767                dd_count(dd, completed, prio);
 768
 769        if (blk_queue_is_zoned(q)) {
 770                unsigned long flags;
 771
 772                spin_lock_irqsave(&dd->zone_lock, flags);
 773                blk_req_zone_write_unlock(rq);
 774                if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
 775                        blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
 776                spin_unlock_irqrestore(&dd->zone_lock, flags);
 777        }
 778}
 779
 780static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
 781{
 782        return !list_empty_careful(&per_prio->dispatch) ||
 783                !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
 784                !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
 785}
 786
 787static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
 788{
 789        struct deadline_data *dd = hctx->queue->elevator->elevator_data;
 790        enum dd_prio prio;
 791
 792        for (prio = 0; prio <= DD_PRIO_MAX; prio++)
 793                if (dd_has_work_for_prio(&dd->per_prio[prio]))
 794                        return true;
 795
 796        return false;
 797}
 798
 799/*
 800 * sysfs parts below
 801 */
 802#define SHOW_INT(__FUNC, __VAR)                                         \
 803static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
 804{                                                                       \
 805        struct deadline_data *dd = e->elevator_data;                    \
 806                                                                        \
 807        return sysfs_emit(page, "%d\n", __VAR);                         \
 808}
 809#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
 810SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
 811SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
 812SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
 813SHOW_INT(deadline_front_merges_show, dd->front_merges);
 814SHOW_INT(deadline_async_depth_show, dd->front_merges);
 815SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
 816#undef SHOW_INT
 817#undef SHOW_JIFFIES
 818
 819#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
 820static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
 821{                                                                       \
 822        struct deadline_data *dd = e->elevator_data;                    \
 823        int __data, __ret;                                              \
 824                                                                        \
 825        __ret = kstrtoint(page, 0, &__data);                            \
 826        if (__ret < 0)                                                  \
 827                return __ret;                                           \
 828        if (__data < (MIN))                                             \
 829                __data = (MIN);                                         \
 830        else if (__data > (MAX))                                        \
 831                __data = (MAX);                                         \
 832        *(__PTR) = __CONV(__data);                                      \
 833        return count;                                                   \
 834}
 835#define STORE_INT(__FUNC, __PTR, MIN, MAX)                              \
 836        STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
 837#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)                          \
 838        STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
 839STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
 840STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
 841STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
 842STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
 843STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
 844STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
 845#undef STORE_FUNCTION
 846#undef STORE_INT
 847#undef STORE_JIFFIES
 848
 849#define DD_ATTR(name) \
 850        __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
 851
 852static struct elv_fs_entry deadline_attrs[] = {
 853        DD_ATTR(read_expire),
 854        DD_ATTR(write_expire),
 855        DD_ATTR(writes_starved),
 856        DD_ATTR(front_merges),
 857        DD_ATTR(async_depth),
 858        DD_ATTR(fifo_batch),
 859        __ATTR_NULL
 860};
 861
 862#ifdef CONFIG_BLK_DEBUG_FS
 863#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)               \
 864static void *deadline_##name##_fifo_start(struct seq_file *m,           \
 865                                          loff_t *pos)                  \
 866        __acquires(&dd->lock)                                           \
 867{                                                                       \
 868        struct request_queue *q = m->private;                           \
 869        struct deadline_data *dd = q->elevator->elevator_data;          \
 870        struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
 871                                                                        \
 872        spin_lock(&dd->lock);                                           \
 873        return seq_list_start(&per_prio->fifo_list[data_dir], *pos);    \
 874}                                                                       \
 875                                                                        \
 876static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,   \
 877                                         loff_t *pos)                   \
 878{                                                                       \
 879        struct request_queue *q = m->private;                           \
 880        struct deadline_data *dd = q->elevator->elevator_data;          \
 881        struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
 882                                                                        \
 883        return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);   \
 884}                                                                       \
 885                                                                        \
 886static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)    \
 887        __releases(&dd->lock)                                           \
 888{                                                                       \
 889        struct request_queue *q = m->private;                           \
 890        struct deadline_data *dd = q->elevator->elevator_data;          \
 891                                                                        \
 892        spin_unlock(&dd->lock);                                         \
 893}                                                                       \
 894                                                                        \
 895static const struct seq_operations deadline_##name##_fifo_seq_ops = {   \
 896        .start  = deadline_##name##_fifo_start,                         \
 897        .next   = deadline_##name##_fifo_next,                          \
 898        .stop   = deadline_##name##_fifo_stop,                          \
 899        .show   = blk_mq_debugfs_rq_show,                               \
 900};                                                                      \
 901                                                                        \
 902static int deadline_##name##_next_rq_show(void *data,                   \
 903                                          struct seq_file *m)           \
 904{                                                                       \
 905        struct request_queue *q = data;                                 \
 906        struct deadline_data *dd = q->elevator->elevator_data;          \
 907        struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
 908        struct request *rq = per_prio->next_rq[data_dir];               \
 909                                                                        \
 910        if (rq)                                                         \
 911                __blk_mq_debugfs_rq_show(m, rq);                        \
 912        return 0;                                                       \
 913}
 914
 915DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
 916DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
 917DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
 918DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
 919DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
 920DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
 921#undef DEADLINE_DEBUGFS_DDIR_ATTRS
 922
 923static int deadline_batching_show(void *data, struct seq_file *m)
 924{
 925        struct request_queue *q = data;
 926        struct deadline_data *dd = q->elevator->elevator_data;
 927
 928        seq_printf(m, "%u\n", dd->batching);
 929        return 0;
 930}
 931
 932static int deadline_starved_show(void *data, struct seq_file *m)
 933{
 934        struct request_queue *q = data;
 935        struct deadline_data *dd = q->elevator->elevator_data;
 936
 937        seq_printf(m, "%u\n", dd->starved);
 938        return 0;
 939}
 940
 941static int dd_async_depth_show(void *data, struct seq_file *m)
 942{
 943        struct request_queue *q = data;
 944        struct deadline_data *dd = q->elevator->elevator_data;
 945
 946        seq_printf(m, "%u\n", dd->async_depth);
 947        return 0;
 948}
 949
 950/* Number of requests queued for a given priority level. */
 951static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
 952{
 953        return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
 954}
 955
 956static int dd_queued_show(void *data, struct seq_file *m)
 957{
 958        struct request_queue *q = data;
 959        struct deadline_data *dd = q->elevator->elevator_data;
 960
 961        seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
 962                   dd_queued(dd, DD_BE_PRIO),
 963                   dd_queued(dd, DD_IDLE_PRIO));
 964        return 0;
 965}
 966
 967/* Number of requests owned by the block driver for a given priority. */
 968static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
 969{
 970        return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
 971                - dd_sum(dd, completed, prio);
 972}
 973
 974static int dd_owned_by_driver_show(void *data, struct seq_file *m)
 975{
 976        struct request_queue *q = data;
 977        struct deadline_data *dd = q->elevator->elevator_data;
 978
 979        seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
 980                   dd_owned_by_driver(dd, DD_BE_PRIO),
 981                   dd_owned_by_driver(dd, DD_IDLE_PRIO));
 982        return 0;
 983}
 984
 985#define DEADLINE_DISPATCH_ATTR(prio)                                    \
 986static void *deadline_dispatch##prio##_start(struct seq_file *m,        \
 987                                             loff_t *pos)               \
 988        __acquires(&dd->lock)                                           \
 989{                                                                       \
 990        struct request_queue *q = m->private;                           \
 991        struct deadline_data *dd = q->elevator->elevator_data;          \
 992        struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
 993                                                                        \
 994        spin_lock(&dd->lock);                                           \
 995        return seq_list_start(&per_prio->dispatch, *pos);               \
 996}                                                                       \
 997                                                                        \
 998static void *deadline_dispatch##prio##_next(struct seq_file *m,         \
 999                                            void *v, loff_t *pos)       \
1000{                                                                       \
1001        struct request_queue *q = m->private;                           \
1002        struct deadline_data *dd = q->elevator->elevator_data;          \
1003        struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
1004                                                                        \
1005        return seq_list_next(v, &per_prio->dispatch, pos);              \
1006}                                                                       \
1007                                                                        \
1008static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1009        __releases(&dd->lock)                                           \
1010{                                                                       \
1011        struct request_queue *q = m->private;                           \
1012        struct deadline_data *dd = q->elevator->elevator_data;          \
1013                                                                        \
1014        spin_unlock(&dd->lock);                                         \
1015}                                                                       \
1016                                                                        \
1017static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1018        .start  = deadline_dispatch##prio##_start,                      \
1019        .next   = deadline_dispatch##prio##_next,                       \
1020        .stop   = deadline_dispatch##prio##_stop,                       \
1021        .show   = blk_mq_debugfs_rq_show,                               \
1022}
1023
1024DEADLINE_DISPATCH_ATTR(0);
1025DEADLINE_DISPATCH_ATTR(1);
1026DEADLINE_DISPATCH_ATTR(2);
1027#undef DEADLINE_DISPATCH_ATTR
1028
1029#define DEADLINE_QUEUE_DDIR_ATTRS(name)                                 \
1030        {#name "_fifo_list", 0400,                                      \
1031                        .seq_ops = &deadline_##name##_fifo_seq_ops}
1032#define DEADLINE_NEXT_RQ_ATTR(name)                                     \
1033        {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1034static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1035        DEADLINE_QUEUE_DDIR_ATTRS(read0),
1036        DEADLINE_QUEUE_DDIR_ATTRS(write0),
1037        DEADLINE_QUEUE_DDIR_ATTRS(read1),
1038        DEADLINE_QUEUE_DDIR_ATTRS(write1),
1039        DEADLINE_QUEUE_DDIR_ATTRS(read2),
1040        DEADLINE_QUEUE_DDIR_ATTRS(write2),
1041        DEADLINE_NEXT_RQ_ATTR(read0),
1042        DEADLINE_NEXT_RQ_ATTR(write0),
1043        DEADLINE_NEXT_RQ_ATTR(read1),
1044        DEADLINE_NEXT_RQ_ATTR(write1),
1045        DEADLINE_NEXT_RQ_ATTR(read2),
1046        DEADLINE_NEXT_RQ_ATTR(write2),
1047        {"batching", 0400, deadline_batching_show},
1048        {"starved", 0400, deadline_starved_show},
1049        {"async_depth", 0400, dd_async_depth_show},
1050        {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1051        {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1052        {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1053        {"owned_by_driver", 0400, dd_owned_by_driver_show},
1054        {"queued", 0400, dd_queued_show},
1055        {},
1056};
1057#undef DEADLINE_QUEUE_DDIR_ATTRS
1058#endif
1059
1060static struct elevator_type mq_deadline = {
1061        .ops = {
1062                .depth_updated          = dd_depth_updated,
1063                .limit_depth            = dd_limit_depth,
1064                .insert_requests        = dd_insert_requests,
1065                .dispatch_request       = dd_dispatch_request,
1066                .prepare_request        = dd_prepare_request,
1067                .finish_request         = dd_finish_request,
1068                .next_request           = elv_rb_latter_request,
1069                .former_request         = elv_rb_former_request,
1070                .bio_merge              = dd_bio_merge,
1071                .request_merge          = dd_request_merge,
1072                .requests_merged        = dd_merged_requests,
1073                .request_merged         = dd_request_merged,
1074                .has_work               = dd_has_work,
1075                .init_sched             = dd_init_sched,
1076                .exit_sched             = dd_exit_sched,
1077                .init_hctx              = dd_init_hctx,
1078        },
1079
1080#ifdef CONFIG_BLK_DEBUG_FS
1081        .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1082#endif
1083        .elevator_attrs = deadline_attrs,
1084        .elevator_name = "mq-deadline",
1085        .elevator_alias = "deadline",
1086        .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1087        .elevator_owner = THIS_MODULE,
1088};
1089MODULE_ALIAS("mq-deadline-iosched");
1090
1091static int __init deadline_init(void)
1092{
1093        return elv_register(&mq_deadline);
1094}
1095
1096static void __exit deadline_exit(void)
1097{
1098        elv_unregister(&mq_deadline);
1099}
1100
1101module_init(deadline_init);
1102module_exit(deadline_exit);
1103
1104MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1105MODULE_LICENSE("GPL");
1106MODULE_DESCRIPTION("MQ deadline IO scheduler");
1107