linux/block/elevator.c
<<
>>
Prefs
   1/*
   2 *  Block device elevator/IO-scheduler.
   3 *
   4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   5 *
   6 * 30042000 Jens Axboe <axboe@kernel.dk> :
   7 *
   8 * Split the elevator a bit so that it is possible to choose a different
   9 * one or even write a new "plug in". There are three pieces:
  10 * - elevator_fn, inserts a new request in the queue list
  11 * - elevator_merge_fn, decides whether a new buffer can be merged with
  12 *   an existing request
  13 * - elevator_dequeue_fn, called when a request is taken off the active list
  14 *
  15 * 20082000 Dave Jones <davej@suse.de> :
  16 * Removed tests for max-bomb-segments, which was breaking elvtune
  17 *  when run without -bN
  18 *
  19 * Jens:
  20 * - Rework again to work with bio instead of buffer_heads
  21 * - loose bi_dev comparisons, partition handling is right now
  22 * - completely modularize elevator setup and teardown
  23 *
  24 */
  25#include <linux/kernel.h>
  26#include <linux/fs.h>
  27#include <linux/blkdev.h>
  28#include <linux/elevator.h>
  29#include <linux/bio.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/compiler.h>
  34#include <linux/blktrace_api.h>
  35#include <linux/hash.h>
  36#include <linux/uaccess.h>
  37#include <linux/pm_runtime.h>
  38#include <linux/blk-cgroup.h>
  39
  40#include <trace/events/block.h>
  41
  42#include "blk.h"
  43#include "blk-mq-sched.h"
  44#include "blk-wbt.h"
  45
  46static DEFINE_SPINLOCK(elv_list_lock);
  47static LIST_HEAD(elv_list);
  48
  49/*
  50 * Merge hash stuff.
  51 */
  52#define rq_hash_key(rq)         (blk_rq_pos(rq) + blk_rq_sectors(rq))
  53
  54/*
  55 * Query io scheduler to see if the current process issuing bio may be
  56 * merged with rq.
  57 */
  58static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
  59{
  60        struct request_queue *q = rq->q;
  61        struct elevator_queue *e = q->elevator;
  62
  63        if (e->uses_mq && e->type->ops.mq.allow_merge)
  64                return e->type->ops.mq.allow_merge(q, rq, bio);
  65        else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
  66                return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
  67
  68        return 1;
  69}
  70
  71/*
  72 * can we safely merge with this request?
  73 */
  74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
  75{
  76        if (!blk_rq_merge_ok(rq, bio))
  77                return false;
  78
  79        if (!elv_iosched_allow_bio_merge(rq, bio))
  80                return false;
  81
  82        return true;
  83}
  84EXPORT_SYMBOL(elv_bio_merge_ok);
  85
  86static bool elevator_match(const struct elevator_type *e, const char *name)
  87{
  88        if (!strcmp(e->elevator_name, name))
  89                return true;
  90        if (e->elevator_alias && !strcmp(e->elevator_alias, name))
  91                return true;
  92
  93        return false;
  94}
  95
  96/*
  97 * Return scheduler with name 'name' and with matching 'mq capability
  98 */
  99static struct elevator_type *elevator_find(const char *name, bool mq)
 100{
 101        struct elevator_type *e;
 102
 103        list_for_each_entry(e, &elv_list, list) {
 104                if (elevator_match(e, name) && (mq == e->uses_mq))
 105                        return e;
 106        }
 107
 108        return NULL;
 109}
 110
 111static void elevator_put(struct elevator_type *e)
 112{
 113        module_put(e->elevator_owner);
 114}
 115
 116static struct elevator_type *elevator_get(struct request_queue *q,
 117                                          const char *name, bool try_loading)
 118{
 119        struct elevator_type *e;
 120
 121        spin_lock(&elv_list_lock);
 122
 123        e = elevator_find(name, q->mq_ops != NULL);
 124        if (!e && try_loading) {
 125                spin_unlock(&elv_list_lock);
 126                request_module("%s-iosched", name);
 127                spin_lock(&elv_list_lock);
 128                e = elevator_find(name, q->mq_ops != NULL);
 129        }
 130
 131        if (e && !try_module_get(e->elevator_owner))
 132                e = NULL;
 133
 134        spin_unlock(&elv_list_lock);
 135        return e;
 136}
 137
 138static char chosen_elevator[ELV_NAME_MAX];
 139
 140static int __init elevator_setup(char *str)
 141{
 142        /*
 143         * Be backwards-compatible with previous kernels, so users
 144         * won't get the wrong elevator.
 145         */
 146        strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
 147        return 1;
 148}
 149
 150__setup("elevator=", elevator_setup);
 151
 152/* called during boot to load the elevator chosen by the elevator param */
 153void __init load_default_elevator_module(void)
 154{
 155        struct elevator_type *e;
 156
 157        if (!chosen_elevator[0])
 158                return;
 159
 160        /*
 161         * Boot parameter is deprecated, we haven't supported that for MQ.
 162         * Only look for non-mq schedulers from here.
 163         */
 164        spin_lock(&elv_list_lock);
 165        e = elevator_find(chosen_elevator, false);
 166        spin_unlock(&elv_list_lock);
 167
 168        if (!e)
 169                request_module("%s-iosched", chosen_elevator);
 170}
 171
 172static struct kobj_type elv_ktype;
 173
 174struct elevator_queue *elevator_alloc(struct request_queue *q,
 175                                  struct elevator_type *e)
 176{
 177        struct elevator_queue *eq;
 178
 179        eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
 180        if (unlikely(!eq))
 181                return NULL;
 182
 183        eq->type = e;
 184        kobject_init(&eq->kobj, &elv_ktype);
 185        mutex_init(&eq->sysfs_lock);
 186        hash_init(eq->hash);
 187        eq->uses_mq = e->uses_mq;
 188
 189        return eq;
 190}
 191EXPORT_SYMBOL(elevator_alloc);
 192
 193static void elevator_release(struct kobject *kobj)
 194{
 195        struct elevator_queue *e;
 196
 197        e = container_of(kobj, struct elevator_queue, kobj);
 198        elevator_put(e->type);
 199        kfree(e);
 200}
 201
 202int elevator_init(struct request_queue *q, char *name)
 203{
 204        struct elevator_type *e = NULL;
 205        int err;
 206
 207        /*
 208         * q->sysfs_lock must be held to provide mutual exclusion between
 209         * elevator_switch() and here.
 210         */
 211        lockdep_assert_held(&q->sysfs_lock);
 212
 213        if (unlikely(q->elevator))
 214                return 0;
 215
 216        INIT_LIST_HEAD(&q->queue_head);
 217        q->last_merge = NULL;
 218        q->end_sector = 0;
 219        q->boundary_rq = NULL;
 220
 221        if (name) {
 222                e = elevator_get(q, name, true);
 223                if (!e)
 224                        return -EINVAL;
 225        }
 226
 227        /*
 228         * Use the default elevator specified by config boot param for
 229         * non-mq devices, or by config option. Don't try to load modules
 230         * as we could be running off async and request_module() isn't
 231         * allowed from async.
 232         */
 233        if (!e && !q->mq_ops && *chosen_elevator) {
 234                e = elevator_get(q, chosen_elevator, false);
 235                if (!e)
 236                        printk(KERN_ERR "I/O scheduler %s not found\n",
 237                                                        chosen_elevator);
 238        }
 239
 240        if (!e) {
 241                /*
 242                 * For blk-mq devices, we default to using mq-deadline,
 243                 * if available, for single queue devices. If deadline
 244                 * isn't available OR we have multiple queues, default
 245                 * to "none".
 246                 */
 247                if (q->mq_ops) {
 248                        if (q->nr_hw_queues == 1)
 249                                e = elevator_get(q, "mq-deadline", false);
 250                        if (!e)
 251                                return 0;
 252                } else
 253                        e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
 254
 255                if (!e) {
 256                        printk(KERN_ERR
 257                                "Default I/O scheduler not found. " \
 258                                "Using noop.\n");
 259                        e = elevator_get(q, "noop", false);
 260                }
 261        }
 262
 263        if (e->uses_mq)
 264                err = blk_mq_init_sched(q, e);
 265        else
 266                err = e->ops.sq.elevator_init_fn(q, e);
 267        if (err)
 268                elevator_put(e);
 269        return err;
 270}
 271EXPORT_SYMBOL(elevator_init);
 272
 273void elevator_exit(struct request_queue *q, struct elevator_queue *e)
 274{
 275        mutex_lock(&e->sysfs_lock);
 276        if (e->uses_mq && e->type->ops.mq.exit_sched)
 277                blk_mq_exit_sched(q, e);
 278        else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
 279                e->type->ops.sq.elevator_exit_fn(e);
 280        mutex_unlock(&e->sysfs_lock);
 281
 282        kobject_put(&e->kobj);
 283}
 284EXPORT_SYMBOL(elevator_exit);
 285
 286static inline void __elv_rqhash_del(struct request *rq)
 287{
 288        hash_del(&rq->hash);
 289        rq->rq_flags &= ~RQF_HASHED;
 290}
 291
 292void elv_rqhash_del(struct request_queue *q, struct request *rq)
 293{
 294        if (ELV_ON_HASH(rq))
 295                __elv_rqhash_del(rq);
 296}
 297EXPORT_SYMBOL_GPL(elv_rqhash_del);
 298
 299void elv_rqhash_add(struct request_queue *q, struct request *rq)
 300{
 301        struct elevator_queue *e = q->elevator;
 302
 303        BUG_ON(ELV_ON_HASH(rq));
 304        hash_add(e->hash, &rq->hash, rq_hash_key(rq));
 305        rq->rq_flags |= RQF_HASHED;
 306}
 307EXPORT_SYMBOL_GPL(elv_rqhash_add);
 308
 309void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 310{
 311        __elv_rqhash_del(rq);
 312        elv_rqhash_add(q, rq);
 313}
 314
 315struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 316{
 317        struct elevator_queue *e = q->elevator;
 318        struct hlist_node *next;
 319        struct request *rq;
 320
 321        hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
 322                BUG_ON(!ELV_ON_HASH(rq));
 323
 324                if (unlikely(!rq_mergeable(rq))) {
 325                        __elv_rqhash_del(rq);
 326                        continue;
 327                }
 328
 329                if (rq_hash_key(rq) == offset)
 330                        return rq;
 331        }
 332
 333        return NULL;
 334}
 335
 336/*
 337 * RB-tree support functions for inserting/lookup/removal of requests
 338 * in a sorted RB tree.
 339 */
 340void elv_rb_add(struct rb_root *root, struct request *rq)
 341{
 342        struct rb_node **p = &root->rb_node;
 343        struct rb_node *parent = NULL;
 344        struct request *__rq;
 345
 346        while (*p) {
 347                parent = *p;
 348                __rq = rb_entry(parent, struct request, rb_node);
 349
 350                if (blk_rq_pos(rq) < blk_rq_pos(__rq))
 351                        p = &(*p)->rb_left;
 352                else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
 353                        p = &(*p)->rb_right;
 354        }
 355
 356        rb_link_node(&rq->rb_node, parent, p);
 357        rb_insert_color(&rq->rb_node, root);
 358}
 359EXPORT_SYMBOL(elv_rb_add);
 360
 361void elv_rb_del(struct rb_root *root, struct request *rq)
 362{
 363        BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
 364        rb_erase(&rq->rb_node, root);
 365        RB_CLEAR_NODE(&rq->rb_node);
 366}
 367EXPORT_SYMBOL(elv_rb_del);
 368
 369struct request *elv_rb_find(struct rb_root *root, sector_t sector)
 370{
 371        struct rb_node *n = root->rb_node;
 372        struct request *rq;
 373
 374        while (n) {
 375                rq = rb_entry(n, struct request, rb_node);
 376
 377                if (sector < blk_rq_pos(rq))
 378                        n = n->rb_left;
 379                else if (sector > blk_rq_pos(rq))
 380                        n = n->rb_right;
 381                else
 382                        return rq;
 383        }
 384
 385        return NULL;
 386}
 387EXPORT_SYMBOL(elv_rb_find);
 388
 389/*
 390 * Insert rq into dispatch queue of q.  Queue lock must be held on
 391 * entry.  rq is sort instead into the dispatch queue. To be used by
 392 * specific elevators.
 393 */
 394void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 395{
 396        sector_t boundary;
 397        struct list_head *entry;
 398
 399        if (q->last_merge == rq)
 400                q->last_merge = NULL;
 401
 402        elv_rqhash_del(q, rq);
 403
 404        q->nr_sorted--;
 405
 406        boundary = q->end_sector;
 407        list_for_each_prev(entry, &q->queue_head) {
 408                struct request *pos = list_entry_rq(entry);
 409
 410                if (req_op(rq) != req_op(pos))
 411                        break;
 412                if (rq_data_dir(rq) != rq_data_dir(pos))
 413                        break;
 414                if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
 415                        break;
 416                if (blk_rq_pos(rq) >= boundary) {
 417                        if (blk_rq_pos(pos) < boundary)
 418                                continue;
 419                } else {
 420                        if (blk_rq_pos(pos) >= boundary)
 421                                break;
 422                }
 423                if (blk_rq_pos(rq) >= blk_rq_pos(pos))
 424                        break;
 425        }
 426
 427        list_add(&rq->queuelist, entry);
 428}
 429EXPORT_SYMBOL(elv_dispatch_sort);
 430
 431/*
 432 * Insert rq into dispatch queue of q.  Queue lock must be held on
 433 * entry.  rq is added to the back of the dispatch queue. To be used by
 434 * specific elevators.
 435 */
 436void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
 437{
 438        if (q->last_merge == rq)
 439                q->last_merge = NULL;
 440
 441        elv_rqhash_del(q, rq);
 442
 443        q->nr_sorted--;
 444
 445        q->end_sector = rq_end_sector(rq);
 446        q->boundary_rq = rq;
 447        list_add_tail(&rq->queuelist, &q->queue_head);
 448}
 449EXPORT_SYMBOL(elv_dispatch_add_tail);
 450
 451enum elv_merge elv_merge(struct request_queue *q, struct request **req,
 452                struct bio *bio)
 453{
 454        struct elevator_queue *e = q->elevator;
 455        struct request *__rq;
 456
 457        /*
 458         * Levels of merges:
 459         *      nomerges:  No merges at all attempted
 460         *      noxmerges: Only simple one-hit cache try
 461         *      merges:    All merge tries attempted
 462         */
 463        if (blk_queue_nomerges(q) || !bio_mergeable(bio))
 464                return ELEVATOR_NO_MERGE;
 465
 466        /*
 467         * First try one-hit cache.
 468         */
 469        if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
 470                enum elv_merge ret = blk_try_merge(q->last_merge, bio);
 471
 472                if (ret != ELEVATOR_NO_MERGE) {
 473                        *req = q->last_merge;
 474                        return ret;
 475                }
 476        }
 477
 478        if (blk_queue_noxmerges(q))
 479                return ELEVATOR_NO_MERGE;
 480
 481        /*
 482         * See if our hash lookup can find a potential backmerge.
 483         */
 484        __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
 485        if (__rq && elv_bio_merge_ok(__rq, bio)) {
 486                *req = __rq;
 487                return ELEVATOR_BACK_MERGE;
 488        }
 489
 490        if (e->uses_mq && e->type->ops.mq.request_merge)
 491                return e->type->ops.mq.request_merge(q, req, bio);
 492        else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
 493                return e->type->ops.sq.elevator_merge_fn(q, req, bio);
 494
 495        return ELEVATOR_NO_MERGE;
 496}
 497
 498/*
 499 * Attempt to do an insertion back merge. Only check for the case where
 500 * we can append 'rq' to an existing request, so we can throw 'rq' away
 501 * afterwards.
 502 *
 503 * Returns true if we merged, false otherwise
 504 */
 505bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
 506{
 507        struct request *__rq;
 508        bool ret;
 509
 510        if (blk_queue_nomerges(q))
 511                return false;
 512
 513        /*
 514         * First try one-hit cache.
 515         */
 516        if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 517                return true;
 518
 519        if (blk_queue_noxmerges(q))
 520                return false;
 521
 522        ret = false;
 523        /*
 524         * See if our hash lookup can find a potential backmerge.
 525         */
 526        while (1) {
 527                __rq = elv_rqhash_find(q, blk_rq_pos(rq));
 528                if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
 529                        break;
 530
 531                /* The merged request could be merged with others, try again */
 532                ret = true;
 533                rq = __rq;
 534        }
 535
 536        return ret;
 537}
 538
 539void elv_merged_request(struct request_queue *q, struct request *rq,
 540                enum elv_merge type)
 541{
 542        struct elevator_queue *e = q->elevator;
 543
 544        if (e->uses_mq && e->type->ops.mq.request_merged)
 545                e->type->ops.mq.request_merged(q, rq, type);
 546        else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
 547                e->type->ops.sq.elevator_merged_fn(q, rq, type);
 548
 549        if (type == ELEVATOR_BACK_MERGE)
 550                elv_rqhash_reposition(q, rq);
 551
 552        q->last_merge = rq;
 553}
 554
 555void elv_merge_requests(struct request_queue *q, struct request *rq,
 556                             struct request *next)
 557{
 558        struct elevator_queue *e = q->elevator;
 559        bool next_sorted = false;
 560
 561        if (e->uses_mq && e->type->ops.mq.requests_merged)
 562                e->type->ops.mq.requests_merged(q, rq, next);
 563        else if (e->type->ops.sq.elevator_merge_req_fn) {
 564                next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
 565                if (next_sorted)
 566                        e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
 567        }
 568
 569        elv_rqhash_reposition(q, rq);
 570
 571        if (next_sorted) {
 572                elv_rqhash_del(q, next);
 573                q->nr_sorted--;
 574        }
 575
 576        q->last_merge = rq;
 577}
 578
 579void elv_bio_merged(struct request_queue *q, struct request *rq,
 580                        struct bio *bio)
 581{
 582        struct elevator_queue *e = q->elevator;
 583
 584        if (WARN_ON_ONCE(e->uses_mq))
 585                return;
 586
 587        if (e->type->ops.sq.elevator_bio_merged_fn)
 588                e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
 589}
 590
 591#ifdef CONFIG_PM
 592static void blk_pm_requeue_request(struct request *rq)
 593{
 594        if (rq->q->dev && !(rq->rq_flags & RQF_PM))
 595                rq->q->nr_pending--;
 596}
 597
 598static void blk_pm_add_request(struct request_queue *q, struct request *rq)
 599{
 600        if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
 601            (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
 602                pm_request_resume(q->dev);
 603}
 604#else
 605static inline void blk_pm_requeue_request(struct request *rq) {}
 606static inline void blk_pm_add_request(struct request_queue *q,
 607                                      struct request *rq)
 608{
 609}
 610#endif
 611
 612void elv_requeue_request(struct request_queue *q, struct request *rq)
 613{
 614        /*
 615         * it already went through dequeue, we need to decrement the
 616         * in_flight count again
 617         */
 618        if (blk_account_rq(rq)) {
 619                q->in_flight[rq_is_sync(rq)]--;
 620                if (rq->rq_flags & RQF_SORTED)
 621                        elv_deactivate_rq(q, rq);
 622        }
 623
 624        rq->rq_flags &= ~RQF_STARTED;
 625
 626        blk_pm_requeue_request(rq);
 627
 628        __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
 629}
 630
 631void elv_drain_elevator(struct request_queue *q)
 632{
 633        struct elevator_queue *e = q->elevator;
 634        static int printed;
 635
 636        if (WARN_ON_ONCE(e->uses_mq))
 637                return;
 638
 639        lockdep_assert_held(q->queue_lock);
 640
 641        while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
 642                ;
 643        if (q->nr_sorted && printed++ < 10) {
 644                printk(KERN_ERR "%s: forced dispatching is broken "
 645                       "(nr_sorted=%u), please report this\n",
 646                       q->elevator->type->elevator_name, q->nr_sorted);
 647        }
 648}
 649
 650void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 651{
 652        trace_block_rq_insert(q, rq);
 653
 654        blk_pm_add_request(q, rq);
 655
 656        rq->q = q;
 657
 658        if (rq->rq_flags & RQF_SOFTBARRIER) {
 659                /* barriers are scheduling boundary, update end_sector */
 660                if (!blk_rq_is_passthrough(rq)) {
 661                        q->end_sector = rq_end_sector(rq);
 662                        q->boundary_rq = rq;
 663                }
 664        } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
 665                    (where == ELEVATOR_INSERT_SORT ||
 666                     where == ELEVATOR_INSERT_SORT_MERGE))
 667                where = ELEVATOR_INSERT_BACK;
 668
 669        switch (where) {
 670        case ELEVATOR_INSERT_REQUEUE:
 671        case ELEVATOR_INSERT_FRONT:
 672                rq->rq_flags |= RQF_SOFTBARRIER;
 673                list_add(&rq->queuelist, &q->queue_head);
 674                break;
 675
 676        case ELEVATOR_INSERT_BACK:
 677                rq->rq_flags |= RQF_SOFTBARRIER;
 678                elv_drain_elevator(q);
 679                list_add_tail(&rq->queuelist, &q->queue_head);
 680                /*
 681                 * We kick the queue here for the following reasons.
 682                 * - The elevator might have returned NULL previously
 683                 *   to delay requests and returned them now.  As the
 684                 *   queue wasn't empty before this request, ll_rw_blk
 685                 *   won't run the queue on return, resulting in hang.
 686                 * - Usually, back inserted requests won't be merged
 687                 *   with anything.  There's no point in delaying queue
 688                 *   processing.
 689                 */
 690                __blk_run_queue(q);
 691                break;
 692
 693        case ELEVATOR_INSERT_SORT_MERGE:
 694                /*
 695                 * If we succeed in merging this request with one in the
 696                 * queue already, we are done - rq has now been freed,
 697                 * so no need to do anything further.
 698                 */
 699                if (elv_attempt_insert_merge(q, rq))
 700                        break;
 701                /* fall through */
 702        case ELEVATOR_INSERT_SORT:
 703                BUG_ON(blk_rq_is_passthrough(rq));
 704                rq->rq_flags |= RQF_SORTED;
 705                q->nr_sorted++;
 706                if (rq_mergeable(rq)) {
 707                        elv_rqhash_add(q, rq);
 708                        if (!q->last_merge)
 709                                q->last_merge = rq;
 710                }
 711
 712                /*
 713                 * Some ioscheds (cfq) run q->request_fn directly, so
 714                 * rq cannot be accessed after calling
 715                 * elevator_add_req_fn.
 716                 */
 717                q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
 718                break;
 719
 720        case ELEVATOR_INSERT_FLUSH:
 721                rq->rq_flags |= RQF_SOFTBARRIER;
 722                blk_insert_flush(rq);
 723                break;
 724        default:
 725                printk(KERN_ERR "%s: bad insertion point %d\n",
 726                       __func__, where);
 727                BUG();
 728        }
 729}
 730EXPORT_SYMBOL(__elv_add_request);
 731
 732void elv_add_request(struct request_queue *q, struct request *rq, int where)
 733{
 734        unsigned long flags;
 735
 736        spin_lock_irqsave(q->queue_lock, flags);
 737        __elv_add_request(q, rq, where);
 738        spin_unlock_irqrestore(q->queue_lock, flags);
 739}
 740EXPORT_SYMBOL(elv_add_request);
 741
 742struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 743{
 744        struct elevator_queue *e = q->elevator;
 745
 746        if (e->uses_mq && e->type->ops.mq.next_request)
 747                return e->type->ops.mq.next_request(q, rq);
 748        else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
 749                return e->type->ops.sq.elevator_latter_req_fn(q, rq);
 750
 751        return NULL;
 752}
 753
 754struct request *elv_former_request(struct request_queue *q, struct request *rq)
 755{
 756        struct elevator_queue *e = q->elevator;
 757
 758        if (e->uses_mq && e->type->ops.mq.former_request)
 759                return e->type->ops.mq.former_request(q, rq);
 760        if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
 761                return e->type->ops.sq.elevator_former_req_fn(q, rq);
 762        return NULL;
 763}
 764
 765int elv_set_request(struct request_queue *q, struct request *rq,
 766                    struct bio *bio, gfp_t gfp_mask)
 767{
 768        struct elevator_queue *e = q->elevator;
 769
 770        if (WARN_ON_ONCE(e->uses_mq))
 771                return 0;
 772
 773        if (e->type->ops.sq.elevator_set_req_fn)
 774                return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
 775        return 0;
 776}
 777
 778void elv_put_request(struct request_queue *q, struct request *rq)
 779{
 780        struct elevator_queue *e = q->elevator;
 781
 782        if (WARN_ON_ONCE(e->uses_mq))
 783                return;
 784
 785        if (e->type->ops.sq.elevator_put_req_fn)
 786                e->type->ops.sq.elevator_put_req_fn(rq);
 787}
 788
 789int elv_may_queue(struct request_queue *q, unsigned int op)
 790{
 791        struct elevator_queue *e = q->elevator;
 792
 793        if (WARN_ON_ONCE(e->uses_mq))
 794                return 0;
 795
 796        if (e->type->ops.sq.elevator_may_queue_fn)
 797                return e->type->ops.sq.elevator_may_queue_fn(q, op);
 798
 799        return ELV_MQUEUE_MAY;
 800}
 801
 802void elv_completed_request(struct request_queue *q, struct request *rq)
 803{
 804        struct elevator_queue *e = q->elevator;
 805
 806        if (WARN_ON_ONCE(e->uses_mq))
 807                return;
 808
 809        /*
 810         * request is released from the driver, io must be done
 811         */
 812        if (blk_account_rq(rq)) {
 813                q->in_flight[rq_is_sync(rq)]--;
 814                if ((rq->rq_flags & RQF_SORTED) &&
 815                    e->type->ops.sq.elevator_completed_req_fn)
 816                        e->type->ops.sq.elevator_completed_req_fn(q, rq);
 817        }
 818}
 819
 820#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
 821
 822static ssize_t
 823elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 824{
 825        struct elv_fs_entry *entry = to_elv(attr);
 826        struct elevator_queue *e;
 827        ssize_t error;
 828
 829        if (!entry->show)
 830                return -EIO;
 831
 832        e = container_of(kobj, struct elevator_queue, kobj);
 833        mutex_lock(&e->sysfs_lock);
 834        error = e->type ? entry->show(e, page) : -ENOENT;
 835        mutex_unlock(&e->sysfs_lock);
 836        return error;
 837}
 838
 839static ssize_t
 840elv_attr_store(struct kobject *kobj, struct attribute *attr,
 841               const char *page, size_t length)
 842{
 843        struct elv_fs_entry *entry = to_elv(attr);
 844        struct elevator_queue *e;
 845        ssize_t error;
 846
 847        if (!entry->store)
 848                return -EIO;
 849
 850        e = container_of(kobj, struct elevator_queue, kobj);
 851        mutex_lock(&e->sysfs_lock);
 852        error = e->type ? entry->store(e, page, length) : -ENOENT;
 853        mutex_unlock(&e->sysfs_lock);
 854        return error;
 855}
 856
 857static const struct sysfs_ops elv_sysfs_ops = {
 858        .show   = elv_attr_show,
 859        .store  = elv_attr_store,
 860};
 861
 862static struct kobj_type elv_ktype = {
 863        .sysfs_ops      = &elv_sysfs_ops,
 864        .release        = elevator_release,
 865};
 866
 867int elv_register_queue(struct request_queue *q)
 868{
 869        struct elevator_queue *e = q->elevator;
 870        int error;
 871
 872        lockdep_assert_held(&q->sysfs_lock);
 873
 874        error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 875        if (!error) {
 876                struct elv_fs_entry *attr = e->type->elevator_attrs;
 877                if (attr) {
 878                        while (attr->attr.name) {
 879                                if (sysfs_create_file(&e->kobj, &attr->attr))
 880                                        break;
 881                                attr++;
 882                        }
 883                }
 884                kobject_uevent(&e->kobj, KOBJ_ADD);
 885                e->registered = 1;
 886                if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
 887                        e->type->ops.sq.elevator_registered_fn(q);
 888        }
 889        return error;
 890}
 891
 892void elv_unregister_queue(struct request_queue *q)
 893{
 894        lockdep_assert_held(&q->sysfs_lock);
 895
 896        if (q) {
 897                struct elevator_queue *e = q->elevator;
 898
 899                kobject_uevent(&e->kobj, KOBJ_REMOVE);
 900                kobject_del(&e->kobj);
 901                e->registered = 0;
 902                /* Re-enable throttling in case elevator disabled it */
 903                wbt_enable_default(q);
 904        }
 905}
 906
 907int elv_register(struct elevator_type *e)
 908{
 909        char *def = "";
 910
 911        /* create icq_cache if requested */
 912        if (e->icq_size) {
 913                if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
 914                    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
 915                        return -EINVAL;
 916
 917                snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
 918                         "%s_io_cq", e->elevator_name);
 919                e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
 920                                                 e->icq_align, 0, NULL);
 921                if (!e->icq_cache)
 922                        return -ENOMEM;
 923        }
 924
 925        /* register, don't allow duplicate names */
 926        spin_lock(&elv_list_lock);
 927        if (elevator_find(e->elevator_name, e->uses_mq)) {
 928                spin_unlock(&elv_list_lock);
 929                if (e->icq_cache)
 930                        kmem_cache_destroy(e->icq_cache);
 931                return -EBUSY;
 932        }
 933        list_add_tail(&e->list, &elv_list);
 934        spin_unlock(&elv_list_lock);
 935
 936        /* print pretty message */
 937        if (elevator_match(e, chosen_elevator) ||
 938                        (!*chosen_elevator &&
 939                         elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
 940                                def = " (default)";
 941
 942        printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
 943                                                                def);
 944        return 0;
 945}
 946EXPORT_SYMBOL_GPL(elv_register);
 947
 948void elv_unregister(struct elevator_type *e)
 949{
 950        /* unregister */
 951        spin_lock(&elv_list_lock);
 952        list_del_init(&e->list);
 953        spin_unlock(&elv_list_lock);
 954
 955        /*
 956         * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
 957         * sure all RCU operations are complete before proceeding.
 958         */
 959        if (e->icq_cache) {
 960                rcu_barrier();
 961                kmem_cache_destroy(e->icq_cache);
 962                e->icq_cache = NULL;
 963        }
 964}
 965EXPORT_SYMBOL_GPL(elv_unregister);
 966
 967static int elevator_switch_mq(struct request_queue *q,
 968                              struct elevator_type *new_e)
 969{
 970        int ret;
 971
 972        lockdep_assert_held(&q->sysfs_lock);
 973
 974        blk_mq_freeze_queue(q);
 975        blk_mq_quiesce_queue(q);
 976
 977        if (q->elevator) {
 978                if (q->elevator->registered)
 979                        elv_unregister_queue(q);
 980                ioc_clear_queue(q);
 981                elevator_exit(q, q->elevator);
 982        }
 983
 984        ret = blk_mq_init_sched(q, new_e);
 985        if (ret)
 986                goto out;
 987
 988        if (new_e) {
 989                ret = elv_register_queue(q);
 990                if (ret) {
 991                        elevator_exit(q, q->elevator);
 992                        goto out;
 993                }
 994        }
 995
 996        if (new_e)
 997                blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 998        else
 999                blk_add_trace_msg(q, "elv switch: none");
1000
1001out:
1002        blk_mq_unquiesce_queue(q);
1003        blk_mq_unfreeze_queue(q);
1004        return ret;
1005}
1006
1007/*
1008 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1009 * we don't free the old io scheduler, before we have allocated what we
1010 * need for the new one. this way we have a chance of going back to the old
1011 * one, if the new one fails init for some reason.
1012 */
1013static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1014{
1015        struct elevator_queue *old = q->elevator;
1016        bool old_registered = false;
1017        int err;
1018
1019        lockdep_assert_held(&q->sysfs_lock);
1020
1021        if (q->mq_ops)
1022                return elevator_switch_mq(q, new_e);
1023
1024        /*
1025         * Turn on BYPASS and drain all requests w/ elevator private data.
1026         * Block layer doesn't call into a quiesced elevator - all requests
1027         * are directly put on the dispatch list without elevator data
1028         * using INSERT_BACK.  All requests have SOFTBARRIER set and no
1029         * merge happens either.
1030         */
1031        if (old) {
1032                old_registered = old->registered;
1033
1034                blk_queue_bypass_start(q);
1035
1036                /* unregister and clear all auxiliary data of the old elevator */
1037                if (old_registered)
1038                        elv_unregister_queue(q);
1039
1040                ioc_clear_queue(q);
1041        }
1042
1043        /* allocate, init and register new elevator */
1044        err = new_e->ops.sq.elevator_init_fn(q, new_e);
1045        if (err)
1046                goto fail_init;
1047
1048        err = elv_register_queue(q);
1049        if (err)
1050                goto fail_register;
1051
1052        /* done, kill the old one and finish */
1053        if (old) {
1054                elevator_exit(q, old);
1055                blk_queue_bypass_end(q);
1056        }
1057
1058        blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1059
1060        return 0;
1061
1062fail_register:
1063        elevator_exit(q, q->elevator);
1064fail_init:
1065        /* switch failed, restore and re-register old elevator */
1066        if (old) {
1067                q->elevator = old;
1068                elv_register_queue(q);
1069                blk_queue_bypass_end(q);
1070        }
1071
1072        return err;
1073}
1074
1075/*
1076 * Switch this queue to the given IO scheduler.
1077 */
1078static int __elevator_change(struct request_queue *q, const char *name)
1079{
1080        char elevator_name[ELV_NAME_MAX];
1081        struct elevator_type *e;
1082
1083        /* Make sure queue is not in the middle of being removed */
1084        if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
1085                return -ENOENT;
1086
1087        /*
1088         * Special case for mq, turn off scheduling
1089         */
1090        if (q->mq_ops && !strncmp(name, "none", 4))
1091                return elevator_switch(q, NULL);
1092
1093        strlcpy(elevator_name, name, sizeof(elevator_name));
1094        e = elevator_get(q, strstrip(elevator_name), true);
1095        if (!e)
1096                return -EINVAL;
1097
1098        if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
1099                elevator_put(e);
1100                return 0;
1101        }
1102
1103        return elevator_switch(q, e);
1104}
1105
1106static inline bool elv_support_iosched(struct request_queue *q)
1107{
1108        if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1109                                BLK_MQ_F_NO_SCHED))
1110                return false;
1111        return true;
1112}
1113
1114ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1115                          size_t count)
1116{
1117        int ret;
1118
1119        if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
1120                return count;
1121
1122        ret = __elevator_change(q, name);
1123        if (!ret)
1124                return count;
1125
1126        return ret;
1127}
1128
1129ssize_t elv_iosched_show(struct request_queue *q, char *name)
1130{
1131        struct elevator_queue *e = q->elevator;
1132        struct elevator_type *elv = NULL;
1133        struct elevator_type *__e;
1134        bool uses_mq = q->mq_ops != NULL;
1135        int len = 0;
1136
1137        if (!queue_is_rq_based(q))
1138                return sprintf(name, "none\n");
1139
1140        if (!q->elevator)
1141                len += sprintf(name+len, "[none] ");
1142        else
1143                elv = e->type;
1144
1145        spin_lock(&elv_list_lock);
1146        list_for_each_entry(__e, &elv_list, list) {
1147                if (elv && elevator_match(elv, __e->elevator_name) &&
1148                    (__e->uses_mq == uses_mq)) {
1149                        len += sprintf(name+len, "[%s] ", elv->elevator_name);
1150                        continue;
1151                }
1152                if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
1153                        len += sprintf(name+len, "%s ", __e->elevator_name);
1154                else if (!__e->uses_mq && !q->mq_ops)
1155                        len += sprintf(name+len, "%s ", __e->elevator_name);
1156        }
1157        spin_unlock(&elv_list_lock);
1158
1159        if (q->mq_ops && q->elevator)
1160                len += sprintf(name+len, "none");
1161
1162        len += sprintf(len+name, "\n");
1163        return len;
1164}
1165
1166struct request *elv_rb_former_request(struct request_queue *q,
1167                                      struct request *rq)
1168{
1169        struct rb_node *rbprev = rb_prev(&rq->rb_node);
1170
1171        if (rbprev)
1172                return rb_entry_rq(rbprev);
1173
1174        return NULL;
1175}
1176EXPORT_SYMBOL(elv_rb_former_request);
1177
1178struct request *elv_rb_latter_request(struct request_queue *q,
1179                                      struct request *rq)
1180{
1181        struct rb_node *rbnext = rb_next(&rq->rb_node);
1182
1183        if (rbnext)
1184                return rb_entry_rq(rbnext);
1185
1186        return NULL;
1187}
1188EXPORT_SYMBOL(elv_rb_latter_request);
1189