linux/block/blk-mq.c
<<
>>
Prefs
   1/*
   2 * Block multiqueue core code
   3 *
   4 * Copyright (C) 2013-2014 Jens Axboe
   5 * Copyright (C) 2013-2014 Christoph Hellwig
   6 */
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/backing-dev.h>
  10#include <linux/bio.h>
  11#include <linux/blkdev.h>
  12#include <linux/kmemleak.h>
  13#include <linux/mm.h>
  14#include <linux/init.h>
  15#include <linux/slab.h>
  16#include <linux/workqueue.h>
  17#include <linux/smp.h>
  18#include <linux/llist.h>
  19#include <linux/list_sort.h>
  20#include <linux/cpu.h>
  21#include <linux/cache.h>
  22#include <linux/sched/sysctl.h>
  23#include <linux/delay.h>
  24#include <linux/crash_dump.h>
  25
  26#include <trace/events/block.h>
  27
  28#include <linux/blk-mq.h>
  29#include "blk.h"
  30#include "blk-mq.h"
  31#include "blk-mq-tag.h"
  32
  33static DEFINE_MUTEX(all_q_mutex);
  34static LIST_HEAD(all_q_list);
  35
  36static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
  37
  38/*
  39 * Check if any of the ctx's have pending work in this hardware queue
  40 */
  41static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
  42{
  43        unsigned int i;
  44
  45        for (i = 0; i < hctx->ctx_map.size; i++)
  46                if (hctx->ctx_map.map[i].word)
  47                        return true;
  48
  49        return false;
  50}
  51
  52static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
  53                                              struct blk_mq_ctx *ctx)
  54{
  55        return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
  56}
  57
  58#define CTX_TO_BIT(hctx, ctx)   \
  59        ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
  60
  61/*
  62 * Mark this ctx as having pending work in this hardware queue
  63 */
  64static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
  65                                     struct blk_mq_ctx *ctx)
  66{
  67        struct blk_align_bitmap *bm = get_bm(hctx, ctx);
  68
  69        if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
  70                set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
  71}
  72
  73static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
  74                                      struct blk_mq_ctx *ctx)
  75{
  76        struct blk_align_bitmap *bm = get_bm(hctx, ctx);
  77
  78        clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
  79}
  80
  81void blk_mq_freeze_queue_start(struct request_queue *q)
  82{
  83        int freeze_depth;
  84
  85        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
  86        if (freeze_depth == 1) {
  87                percpu_ref_kill(&q->q_usage_counter);
  88                blk_mq_run_hw_queues(q, false);
  89        }
  90}
  91EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
  92
  93static void blk_mq_freeze_queue_wait(struct request_queue *q)
  94{
  95        wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
  96}
  97
  98/*
  99 * Guarantee no request is in use, so we can change any data structure of
 100 * the queue afterward.
 101 */
 102void blk_freeze_queue(struct request_queue *q)
 103{
 104        /*
 105         * In the !blk_mq case we are only calling this to kill the
 106         * q_usage_counter, otherwise this increases the freeze depth
 107         * and waits for it to return to zero.  For this reason there is
 108         * no blk_unfreeze_queue(), and blk_freeze_queue() is not
 109         * exported to drivers as the only user for unfreeze is blk_mq.
 110         */
 111        blk_mq_freeze_queue_start(q);
 112        blk_mq_freeze_queue_wait(q);
 113}
 114
 115void blk_mq_freeze_queue(struct request_queue *q)
 116{
 117        /*
 118         * ...just an alias to keep freeze and unfreeze actions balanced
 119         * in the blk_mq_* namespace
 120         */
 121        blk_freeze_queue(q);
 122}
 123EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 124
 125void blk_mq_unfreeze_queue(struct request_queue *q)
 126{
 127        int freeze_depth;
 128
 129        freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
 130        WARN_ON_ONCE(freeze_depth < 0);
 131        if (!freeze_depth) {
 132                percpu_ref_reinit(&q->q_usage_counter);
 133                wake_up_all(&q->mq_freeze_wq);
 134        }
 135}
 136EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 137
 138void blk_mq_wake_waiters(struct request_queue *q)
 139{
 140        struct blk_mq_hw_ctx *hctx;
 141        unsigned int i;
 142
 143        queue_for_each_hw_ctx(q, hctx, i)
 144                if (blk_mq_hw_queue_mapped(hctx))
 145                        blk_mq_tag_wakeup_all(hctx->tags, true);
 146
 147        /*
 148         * If we are called because the queue has now been marked as
 149         * dying, we need to ensure that processes currently waiting on
 150         * the queue are notified as well.
 151         */
 152        wake_up_all(&q->mq_freeze_wq);
 153}
 154
 155bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 156{
 157        return blk_mq_has_free_tags(hctx->tags);
 158}
 159EXPORT_SYMBOL(blk_mq_can_queue);
 160
 161static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 162                               struct request *rq, unsigned int rw_flags)
 163{
 164        if (blk_queue_io_stat(q))
 165                rw_flags |= REQ_IO_STAT;
 166
 167        INIT_LIST_HEAD(&rq->queuelist);
 168        /* csd/requeue_work/fifo_time is initialized before use */
 169        rq->q = q;
 170        rq->mq_ctx = ctx;
 171        rq->cmd_flags |= rw_flags;
 172        /* do not touch atomic flags, it needs atomic ops against the timer */
 173        rq->cpu = -1;
 174        INIT_HLIST_NODE(&rq->hash);
 175        RB_CLEAR_NODE(&rq->rb_node);
 176        rq->rq_disk = NULL;
 177        rq->part = NULL;
 178        rq->start_time = jiffies;
 179#ifdef CONFIG_BLK_CGROUP
 180        rq->rl = NULL;
 181        set_start_time_ns(rq);
 182        rq->io_start_time_ns = 0;
 183#endif
 184        rq->nr_phys_segments = 0;
 185#if defined(CONFIG_BLK_DEV_INTEGRITY)
 186        rq->nr_integrity_segments = 0;
 187#endif
 188        rq->special = NULL;
 189        /* tag was already set */
 190        rq->errors = 0;
 191
 192        rq->cmd = rq->__cmd;
 193
 194        rq->extra_len = 0;
 195        rq->sense_len = 0;
 196        rq->resid_len = 0;
 197        rq->sense = NULL;
 198
 199        INIT_LIST_HEAD(&rq->timeout_list);
 200        rq->timeout = 0;
 201
 202        rq->end_io = NULL;
 203        rq->end_io_data = NULL;
 204        rq->next_rq = NULL;
 205
 206        ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
 207}
 208
 209static struct request *
 210__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
 211{
 212        struct request *rq;
 213        unsigned int tag;
 214
 215        tag = blk_mq_get_tag(data);
 216        if (tag != BLK_MQ_TAG_FAIL) {
 217                rq = data->hctx->tags->rqs[tag];
 218
 219                if (blk_mq_tag_busy(data->hctx)) {
 220                        rq->cmd_flags = REQ_MQ_INFLIGHT;
 221                        atomic_inc(&data->hctx->nr_active);
 222                }
 223
 224                rq->tag = tag;
 225                blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
 226                return rq;
 227        }
 228
 229        return NULL;
 230}
 231
 232struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
 233                unsigned int flags)
 234{
 235        struct blk_mq_ctx *ctx;
 236        struct blk_mq_hw_ctx *hctx;
 237        struct request *rq;
 238        struct blk_mq_alloc_data alloc_data;
 239        int ret;
 240
 241        ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
 242        if (ret)
 243                return ERR_PTR(ret);
 244
 245        ctx = blk_mq_get_ctx(q);
 246        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 247        blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
 248
 249        rq = __blk_mq_alloc_request(&alloc_data, rw);
 250        if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) {
 251                __blk_mq_run_hw_queue(hctx);
 252                blk_mq_put_ctx(ctx);
 253
 254                ctx = blk_mq_get_ctx(q);
 255                hctx = q->mq_ops->map_queue(q, ctx->cpu);
 256                blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
 257                rq =  __blk_mq_alloc_request(&alloc_data, rw);
 258                ctx = alloc_data.ctx;
 259        }
 260        blk_mq_put_ctx(ctx);
 261        if (!rq) {
 262                blk_queue_exit(q);
 263                return ERR_PTR(-EWOULDBLOCK);
 264        }
 265        return rq;
 266}
 267EXPORT_SYMBOL(blk_mq_alloc_request);
 268
 269static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 270                                  struct blk_mq_ctx *ctx, struct request *rq)
 271{
 272        const int tag = rq->tag;
 273        struct request_queue *q = rq->q;
 274
 275        if (rq->cmd_flags & REQ_MQ_INFLIGHT)
 276                atomic_dec(&hctx->nr_active);
 277        rq->cmd_flags = 0;
 278
 279        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
 280        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
 281        blk_queue_exit(q);
 282}
 283
 284void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
 285{
 286        struct blk_mq_ctx *ctx = rq->mq_ctx;
 287
 288        ctx->rq_completed[rq_is_sync(rq)]++;
 289        __blk_mq_free_request(hctx, ctx, rq);
 290
 291}
 292EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
 293
 294void blk_mq_free_request(struct request *rq)
 295{
 296        struct blk_mq_hw_ctx *hctx;
 297        struct request_queue *q = rq->q;
 298
 299        hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
 300        blk_mq_free_hctx_request(hctx, rq);
 301}
 302EXPORT_SYMBOL_GPL(blk_mq_free_request);
 303
 304inline void __blk_mq_end_request(struct request *rq, int error)
 305{
 306        blk_account_io_done(rq);
 307
 308        if (rq->end_io) {
 309                rq->end_io(rq, error);
 310        } else {
 311                if (unlikely(blk_bidi_rq(rq)))
 312                        blk_mq_free_request(rq->next_rq);
 313                blk_mq_free_request(rq);
 314        }
 315}
 316EXPORT_SYMBOL(__blk_mq_end_request);
 317
 318void blk_mq_end_request(struct request *rq, int error)
 319{
 320        if (blk_update_request(rq, error, blk_rq_bytes(rq)))
 321                BUG();
 322        __blk_mq_end_request(rq, error);
 323}
 324EXPORT_SYMBOL(blk_mq_end_request);
 325
 326static void __blk_mq_complete_request_remote(void *data)
 327{
 328        struct request *rq = data;
 329
 330        rq->q->softirq_done_fn(rq);
 331}
 332
 333static void blk_mq_ipi_complete_request(struct request *rq)
 334{
 335        struct blk_mq_ctx *ctx = rq->mq_ctx;
 336        bool shared = false;
 337        int cpu;
 338
 339        if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
 340                rq->q->softirq_done_fn(rq);
 341                return;
 342        }
 343
 344        cpu = get_cpu();
 345        if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
 346                shared = cpus_share_cache(cpu, ctx->cpu);
 347
 348        if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
 349                rq->csd.func = __blk_mq_complete_request_remote;
 350                rq->csd.info = rq;
 351                rq->csd.flags = 0;
 352                smp_call_function_single_async(ctx->cpu, &rq->csd);
 353        } else {
 354                rq->q->softirq_done_fn(rq);
 355        }
 356        put_cpu();
 357}
 358
 359static void __blk_mq_complete_request(struct request *rq)
 360{
 361        struct request_queue *q = rq->q;
 362
 363        if (!q->softirq_done_fn)
 364                blk_mq_end_request(rq, rq->errors);
 365        else
 366                blk_mq_ipi_complete_request(rq);
 367}
 368
 369/**
 370 * blk_mq_complete_request - end I/O on a request
 371 * @rq:         the request being processed
 372 *
 373 * Description:
 374 *      Ends all I/O on a request. It does not handle partial completions.
 375 *      The actual completion happens out-of-order, through a IPI handler.
 376 **/
 377void blk_mq_complete_request(struct request *rq, int error)
 378{
 379        struct request_queue *q = rq->q;
 380
 381        if (unlikely(blk_should_fake_timeout(q)))
 382                return;
 383        if (!blk_mark_rq_complete(rq)) {
 384                rq->errors = error;
 385                __blk_mq_complete_request(rq);
 386        }
 387}
 388EXPORT_SYMBOL(blk_mq_complete_request);
 389
 390int blk_mq_request_started(struct request *rq)
 391{
 392        return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
 393}
 394EXPORT_SYMBOL_GPL(blk_mq_request_started);
 395
 396void blk_mq_start_request(struct request *rq)
 397{
 398        struct request_queue *q = rq->q;
 399
 400        trace_block_rq_issue(q, rq);
 401
 402        rq->resid_len = blk_rq_bytes(rq);
 403        if (unlikely(blk_bidi_rq(rq)))
 404                rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
 405
 406        blk_add_timer(rq);
 407
 408        /*
 409         * Ensure that ->deadline is visible before set the started
 410         * flag and clear the completed flag.
 411         */
 412        smp_mb__before_atomic();
 413
 414        /*
 415         * Mark us as started and clear complete. Complete might have been
 416         * set if requeue raced with timeout, which then marked it as
 417         * complete. So be sure to clear complete again when we start
 418         * the request, otherwise we'll ignore the completion event.
 419         */
 420        if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
 421                set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
 422        if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
 423                clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
 424
 425        if (q->dma_drain_size && blk_rq_bytes(rq)) {
 426                /*
 427                 * Make sure space for the drain appears.  We know we can do
 428                 * this because max_hw_segments has been adjusted to be one
 429                 * fewer than the device can handle.
 430                 */
 431                rq->nr_phys_segments++;
 432        }
 433}
 434EXPORT_SYMBOL(blk_mq_start_request);
 435
 436static void __blk_mq_requeue_request(struct request *rq)
 437{
 438        struct request_queue *q = rq->q;
 439
 440        trace_block_rq_requeue(q, rq);
 441
 442        if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
 443                if (q->dma_drain_size && blk_rq_bytes(rq))
 444                        rq->nr_phys_segments--;
 445        }
 446}
 447
 448void blk_mq_requeue_request(struct request *rq)
 449{
 450        __blk_mq_requeue_request(rq);
 451
 452        BUG_ON(blk_queued_rq(rq));
 453        blk_mq_add_to_requeue_list(rq, true);
 454}
 455EXPORT_SYMBOL(blk_mq_requeue_request);
 456
 457static void blk_mq_requeue_work(struct work_struct *work)
 458{
 459        struct request_queue *q =
 460                container_of(work, struct request_queue, requeue_work);
 461        LIST_HEAD(rq_list);
 462        struct request *rq, *next;
 463        unsigned long flags;
 464
 465        spin_lock_irqsave(&q->requeue_lock, flags);
 466        list_splice_init(&q->requeue_list, &rq_list);
 467        spin_unlock_irqrestore(&q->requeue_lock, flags);
 468
 469        list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
 470                if (!(rq->cmd_flags & REQ_SOFTBARRIER))
 471                        continue;
 472
 473                rq->cmd_flags &= ~REQ_SOFTBARRIER;
 474                list_del_init(&rq->queuelist);
 475                blk_mq_insert_request(rq, true, false, false);
 476        }
 477
 478        while (!list_empty(&rq_list)) {
 479                rq = list_entry(rq_list.next, struct request, queuelist);
 480                list_del_init(&rq->queuelist);
 481                blk_mq_insert_request(rq, false, false, false);
 482        }
 483
 484        /*
 485         * Use the start variant of queue running here, so that running
 486         * the requeue work will kick stopped queues.
 487         */
 488        blk_mq_start_hw_queues(q);
 489}
 490
 491void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
 492{
 493        struct request_queue *q = rq->q;
 494        unsigned long flags;
 495
 496        /*
 497         * We abuse this flag that is otherwise used by the I/O scheduler to
 498         * request head insertation from the workqueue.
 499         */
 500        BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
 501
 502        spin_lock_irqsave(&q->requeue_lock, flags);
 503        if (at_head) {
 504                rq->cmd_flags |= REQ_SOFTBARRIER;
 505                list_add(&rq->queuelist, &q->requeue_list);
 506        } else {
 507                list_add_tail(&rq->queuelist, &q->requeue_list);
 508        }
 509        spin_unlock_irqrestore(&q->requeue_lock, flags);
 510}
 511EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 512
 513void blk_mq_cancel_requeue_work(struct request_queue *q)
 514{
 515        cancel_work_sync(&q->requeue_work);
 516}
 517EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
 518
 519void blk_mq_kick_requeue_list(struct request_queue *q)
 520{
 521        kblockd_schedule_work(&q->requeue_work);
 522}
 523EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 524
 525void blk_mq_abort_requeue_list(struct request_queue *q)
 526{
 527        unsigned long flags;
 528        LIST_HEAD(rq_list);
 529
 530        spin_lock_irqsave(&q->requeue_lock, flags);
 531        list_splice_init(&q->requeue_list, &rq_list);
 532        spin_unlock_irqrestore(&q->requeue_lock, flags);
 533
 534        while (!list_empty(&rq_list)) {
 535                struct request *rq;
 536
 537                rq = list_first_entry(&rq_list, struct request, queuelist);
 538                list_del_init(&rq->queuelist);
 539                rq->errors = -EIO;
 540                blk_mq_end_request(rq, rq->errors);
 541        }
 542}
 543EXPORT_SYMBOL(blk_mq_abort_requeue_list);
 544
 545struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
 546{
 547        if (tag < tags->nr_tags)
 548                return tags->rqs[tag];
 549
 550        return NULL;
 551}
 552EXPORT_SYMBOL(blk_mq_tag_to_rq);
 553
 554struct blk_mq_timeout_data {
 555        unsigned long next;
 556        unsigned int next_set;
 557};
 558
 559void blk_mq_rq_timed_out(struct request *req, bool reserved)
 560{
 561        struct blk_mq_ops *ops = req->q->mq_ops;
 562        enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
 563
 564        /*
 565         * We know that complete is set at this point. If STARTED isn't set
 566         * anymore, then the request isn't active and the "timeout" should
 567         * just be ignored. This can happen due to the bitflag ordering.
 568         * Timeout first checks if STARTED is set, and if it is, assumes
 569         * the request is active. But if we race with completion, then
 570         * we both flags will get cleared. So check here again, and ignore
 571         * a timeout event with a request that isn't active.
 572         */
 573        if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
 574                return;
 575
 576        if (ops->timeout)
 577                ret = ops->timeout(req, reserved);
 578
 579        switch (ret) {
 580        case BLK_EH_HANDLED:
 581                __blk_mq_complete_request(req);
 582                break;
 583        case BLK_EH_RESET_TIMER:
 584                blk_add_timer(req);
 585                blk_clear_rq_complete(req);
 586                break;
 587        case BLK_EH_NOT_HANDLED:
 588                break;
 589        default:
 590                printk(KERN_ERR "block: bad eh return: %d\n", ret);
 591                break;
 592        }
 593}
 594
 595static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
 596                struct request *rq, void *priv, bool reserved)
 597{
 598        struct blk_mq_timeout_data *data = priv;
 599
 600        if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
 601                /*
 602                 * If a request wasn't started before the queue was
 603                 * marked dying, kill it here or it'll go unnoticed.
 604                 */
 605                if (unlikely(blk_queue_dying(rq->q))) {
 606                        rq->errors = -EIO;
 607                        blk_mq_end_request(rq, rq->errors);
 608                }
 609                return;
 610        }
 611
 612        if (time_after_eq(jiffies, rq->deadline)) {
 613                if (!blk_mark_rq_complete(rq))
 614                        blk_mq_rq_timed_out(rq, reserved);
 615        } else if (!data->next_set || time_after(data->next, rq->deadline)) {
 616                data->next = rq->deadline;
 617                data->next_set = 1;
 618        }
 619}
 620
 621static void blk_mq_timeout_work(struct work_struct *work)
 622{
 623        struct request_queue *q =
 624                container_of(work, struct request_queue, timeout_work);
 625        struct blk_mq_timeout_data data = {
 626                .next           = 0,
 627                .next_set       = 0,
 628        };
 629        int i;
 630
 631        if (blk_queue_enter(q, true))
 632                return;
 633
 634        blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
 635
 636        if (data.next_set) {
 637                data.next = blk_rq_timeout(round_jiffies_up(data.next));
 638                mod_timer(&q->timeout, data.next);
 639        } else {
 640                struct blk_mq_hw_ctx *hctx;
 641
 642                queue_for_each_hw_ctx(q, hctx, i) {
 643                        /* the hctx may be unmapped, so check it here */
 644                        if (blk_mq_hw_queue_mapped(hctx))
 645                                blk_mq_tag_idle(hctx);
 646                }
 647        }
 648        blk_queue_exit(q);
 649}
 650
 651/*
 652 * Reverse check our software queue for entries that we could potentially
 653 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
 654 * too much time checking for merges.
 655 */
 656static bool blk_mq_attempt_merge(struct request_queue *q,
 657                                 struct blk_mq_ctx *ctx, struct bio *bio)
 658{
 659        struct request *rq;
 660        int checked = 8;
 661
 662        list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
 663                int el_ret;
 664
 665                if (!checked--)
 666                        break;
 667
 668                if (!blk_rq_merge_ok(rq, bio))
 669                        continue;
 670
 671                el_ret = blk_try_merge(rq, bio);
 672                if (el_ret == ELEVATOR_BACK_MERGE) {
 673                        if (bio_attempt_back_merge(q, rq, bio)) {
 674                                ctx->rq_merged++;
 675                                return true;
 676                        }
 677                        break;
 678                } else if (el_ret == ELEVATOR_FRONT_MERGE) {
 679                        if (bio_attempt_front_merge(q, rq, bio)) {
 680                                ctx->rq_merged++;
 681                                return true;
 682                        }
 683                        break;
 684                }
 685        }
 686
 687        return false;
 688}
 689
 690/*
 691 * Process software queues that have been marked busy, splicing them
 692 * to the for-dispatch
 693 */
 694static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
 695{
 696        struct blk_mq_ctx *ctx;
 697        int i;
 698
 699        for (i = 0; i < hctx->ctx_map.size; i++) {
 700                struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
 701                unsigned int off, bit;
 702
 703                if (!bm->word)
 704                        continue;
 705
 706                bit = 0;
 707                off = i * hctx->ctx_map.bits_per_word;
 708                do {
 709                        bit = find_next_bit(&bm->word, bm->depth, bit);
 710                        if (bit >= bm->depth)
 711                                break;
 712
 713                        ctx = hctx->ctxs[bit + off];
 714                        clear_bit(bit, &bm->word);
 715                        spin_lock(&ctx->lock);
 716                        list_splice_tail_init(&ctx->rq_list, list);
 717                        spin_unlock(&ctx->lock);
 718
 719                        bit++;
 720                } while (1);
 721        }
 722}
 723
 724/*
 725 * Run this hardware queue, pulling any software queues mapped to it in.
 726 * Note that this function currently has various problems around ordering
 727 * of IO. In particular, we'd like FIFO behaviour on handling existing
 728 * items on the hctx->dispatch list. Ignore that for now.
 729 */
 730static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 731{
 732        struct request_queue *q = hctx->queue;
 733        struct request *rq;
 734        LIST_HEAD(rq_list);
 735        LIST_HEAD(driver_list);
 736        struct list_head *dptr;
 737        int queued;
 738
 739        WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
 740
 741        if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
 742                return;
 743
 744        hctx->run++;
 745
 746        /*
 747         * Touch any software queue that has pending entries.
 748         */
 749        flush_busy_ctxs(hctx, &rq_list);
 750
 751        /*
 752         * If we have previous entries on our dispatch list, grab them
 753         * and stuff them at the front for more fair dispatch.
 754         */
 755        if (!list_empty_careful(&hctx->dispatch)) {
 756                spin_lock(&hctx->lock);
 757                if (!list_empty(&hctx->dispatch))
 758                        list_splice_init(&hctx->dispatch, &rq_list);
 759                spin_unlock(&hctx->lock);
 760        }
 761
 762        /*
 763         * Start off with dptr being NULL, so we start the first request
 764         * immediately, even if we have more pending.
 765         */
 766        dptr = NULL;
 767
 768        /*
 769         * Now process all the entries, sending them to the driver.
 770         */
 771        queued = 0;
 772        while (!list_empty(&rq_list)) {
 773                struct blk_mq_queue_data bd;
 774                int ret;
 775
 776                rq = list_first_entry(&rq_list, struct request, queuelist);
 777                list_del_init(&rq->queuelist);
 778
 779                bd.rq = rq;
 780                bd.list = dptr;
 781                bd.last = list_empty(&rq_list);
 782
 783                ret = q->mq_ops->queue_rq(hctx, &bd);
 784                switch (ret) {
 785                case BLK_MQ_RQ_QUEUE_OK:
 786                        queued++;
 787                        continue;
 788                case BLK_MQ_RQ_QUEUE_BUSY:
 789                        list_add(&rq->queuelist, &rq_list);
 790                        __blk_mq_requeue_request(rq);
 791                        break;
 792                default:
 793                        pr_err("blk-mq: bad return on queue: %d\n", ret);
 794                case BLK_MQ_RQ_QUEUE_ERROR:
 795                        rq->errors = -EIO;
 796                        blk_mq_end_request(rq, rq->errors);
 797                        break;
 798                }
 799
 800                if (ret == BLK_MQ_RQ_QUEUE_BUSY)
 801                        break;
 802
 803                /*
 804                 * We've done the first request. If we have more than 1
 805                 * left in the list, set dptr to defer issue.
 806                 */
 807                if (!dptr && rq_list.next != rq_list.prev)
 808                        dptr = &driver_list;
 809        }
 810
 811        if (!queued)
 812                hctx->dispatched[0]++;
 813        else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
 814                hctx->dispatched[ilog2(queued) + 1]++;
 815
 816        /*
 817         * Any items that need requeuing? Stuff them into hctx->dispatch,
 818         * that is where we will continue on next queue run.
 819         */
 820        if (!list_empty(&rq_list)) {
 821                spin_lock(&hctx->lock);
 822                list_splice(&rq_list, &hctx->dispatch);
 823                spin_unlock(&hctx->lock);
 824                /*
 825                 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
 826                 * it's possible the queue is stopped and restarted again
 827                 * before this. Queue restart will dispatch requests. And since
 828                 * requests in rq_list aren't added into hctx->dispatch yet,
 829                 * the requests in rq_list might get lost.
 830                 *
 831                 * blk_mq_run_hw_queue() already checks the STOPPED bit
 832                 **/
 833                blk_mq_run_hw_queue(hctx, true);
 834        }
 835}
 836
 837/*
 838 * It'd be great if the workqueue API had a way to pass
 839 * in a mask and had some smarts for more clever placement.
 840 * For now we just round-robin here, switching for every
 841 * BLK_MQ_CPU_WORK_BATCH queued items.
 842 */
 843static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
 844{
 845        if (hctx->queue->nr_hw_queues == 1)
 846                return WORK_CPU_UNBOUND;
 847
 848        if (--hctx->next_cpu_batch <= 0) {
 849                int cpu = hctx->next_cpu, next_cpu;
 850
 851                next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
 852                if (next_cpu >= nr_cpu_ids)
 853                        next_cpu = cpumask_first(hctx->cpumask);
 854
 855                hctx->next_cpu = next_cpu;
 856                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
 857
 858                return cpu;
 859        }
 860
 861        return hctx->next_cpu;
 862}
 863
 864void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 865{
 866        if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
 867            !blk_mq_hw_queue_mapped(hctx)))
 868                return;
 869
 870        if (!async) {
 871                int cpu = get_cpu();
 872                if (cpumask_test_cpu(cpu, hctx->cpumask)) {
 873                        __blk_mq_run_hw_queue(hctx);
 874                        put_cpu();
 875                        return;
 876                }
 877
 878                put_cpu();
 879        }
 880
 881        kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
 882                        &hctx->run_work, 0);
 883}
 884
 885void blk_mq_run_hw_queues(struct request_queue *q, bool async)
 886{
 887        struct blk_mq_hw_ctx *hctx;
 888        int i;
 889
 890        queue_for_each_hw_ctx(q, hctx, i) {
 891                if ((!blk_mq_hctx_has_pending(hctx) &&
 892                    list_empty_careful(&hctx->dispatch)) ||
 893                    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
 894                        continue;
 895
 896                blk_mq_run_hw_queue(hctx, async);
 897        }
 898}
 899EXPORT_SYMBOL(blk_mq_run_hw_queues);
 900
 901void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
 902{
 903        cancel_delayed_work(&hctx->run_work);
 904        cancel_delayed_work(&hctx->delay_work);
 905        set_bit(BLK_MQ_S_STOPPED, &hctx->state);
 906}
 907EXPORT_SYMBOL(blk_mq_stop_hw_queue);
 908
 909void blk_mq_stop_hw_queues(struct request_queue *q)
 910{
 911        struct blk_mq_hw_ctx *hctx;
 912        int i;
 913
 914        queue_for_each_hw_ctx(q, hctx, i)
 915                blk_mq_stop_hw_queue(hctx);
 916}
 917EXPORT_SYMBOL(blk_mq_stop_hw_queues);
 918
 919void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
 920{
 921        clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
 922
 923        blk_mq_run_hw_queue(hctx, false);
 924}
 925EXPORT_SYMBOL(blk_mq_start_hw_queue);
 926
 927void blk_mq_start_hw_queues(struct request_queue *q)
 928{
 929        struct blk_mq_hw_ctx *hctx;
 930        int i;
 931
 932        queue_for_each_hw_ctx(q, hctx, i)
 933                blk_mq_start_hw_queue(hctx);
 934}
 935EXPORT_SYMBOL(blk_mq_start_hw_queues);
 936
 937void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
 938{
 939        struct blk_mq_hw_ctx *hctx;
 940        int i;
 941
 942        queue_for_each_hw_ctx(q, hctx, i) {
 943                if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
 944                        continue;
 945
 946                clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
 947                blk_mq_run_hw_queue(hctx, async);
 948        }
 949}
 950EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
 951
 952static void blk_mq_run_work_fn(struct work_struct *work)
 953{
 954        struct blk_mq_hw_ctx *hctx;
 955
 956        hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
 957
 958        __blk_mq_run_hw_queue(hctx);
 959}
 960
 961static void blk_mq_delay_work_fn(struct work_struct *work)
 962{
 963        struct blk_mq_hw_ctx *hctx;
 964
 965        hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
 966
 967        if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
 968                __blk_mq_run_hw_queue(hctx);
 969}
 970
 971void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 972{
 973        if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
 974                return;
 975
 976        kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
 977                        &hctx->delay_work, msecs_to_jiffies(msecs));
 978}
 979EXPORT_SYMBOL(blk_mq_delay_queue);
 980
 981static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
 982                                            struct blk_mq_ctx *ctx,
 983                                            struct request *rq,
 984                                            bool at_head)
 985{
 986        trace_block_rq_insert(hctx->queue, rq);
 987
 988        if (at_head)
 989                list_add(&rq->queuelist, &ctx->rq_list);
 990        else
 991                list_add_tail(&rq->queuelist, &ctx->rq_list);
 992}
 993
 994static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
 995                                    struct request *rq, bool at_head)
 996{
 997        struct blk_mq_ctx *ctx = rq->mq_ctx;
 998
 999        __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
1000        blk_mq_hctx_mark_pending(hctx, ctx);
1001}
1002
1003void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1004                bool async)
1005{
1006        struct request_queue *q = rq->q;
1007        struct blk_mq_hw_ctx *hctx;
1008        struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1009
1010        current_ctx = blk_mq_get_ctx(q);
1011        if (!cpu_online(ctx->cpu))
1012                rq->mq_ctx = ctx = current_ctx;
1013
1014        hctx = q->mq_ops->map_queue(q, ctx->cpu);
1015
1016        spin_lock(&ctx->lock);
1017        __blk_mq_insert_request(hctx, rq, at_head);
1018        spin_unlock(&ctx->lock);
1019
1020        if (run_queue)
1021                blk_mq_run_hw_queue(hctx, async);
1022
1023        blk_mq_put_ctx(current_ctx);
1024}
1025
1026static void blk_mq_insert_requests(struct request_queue *q,
1027                                     struct blk_mq_ctx *ctx,
1028                                     struct list_head *list,
1029                                     int depth,
1030                                     bool from_schedule)
1031
1032{
1033        struct blk_mq_hw_ctx *hctx;
1034        struct blk_mq_ctx *current_ctx;
1035
1036        trace_block_unplug(q, depth, !from_schedule);
1037
1038        current_ctx = blk_mq_get_ctx(q);
1039
1040        if (!cpu_online(ctx->cpu))
1041                ctx = current_ctx;
1042        hctx = q->mq_ops->map_queue(q, ctx->cpu);
1043
1044        /*
1045         * preemption doesn't flush plug list, so it's possible ctx->cpu is
1046         * offline now
1047         */
1048        spin_lock(&ctx->lock);
1049        while (!list_empty(list)) {
1050                struct request *rq;
1051
1052                rq = list_first_entry(list, struct request, queuelist);
1053                list_del_init(&rq->queuelist);
1054                rq->mq_ctx = ctx;
1055                __blk_mq_insert_req_list(hctx, ctx, rq, false);
1056        }
1057        blk_mq_hctx_mark_pending(hctx, ctx);
1058        spin_unlock(&ctx->lock);
1059
1060        blk_mq_run_hw_queue(hctx, from_schedule);
1061        blk_mq_put_ctx(current_ctx);
1062}
1063
1064static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1065{
1066        struct request *rqa = container_of(a, struct request, queuelist);
1067        struct request *rqb = container_of(b, struct request, queuelist);
1068
1069        return !(rqa->mq_ctx < rqb->mq_ctx ||
1070                 (rqa->mq_ctx == rqb->mq_ctx &&
1071                  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1072}
1073
1074void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1075{
1076        struct blk_mq_ctx *this_ctx;
1077        struct request_queue *this_q;
1078        struct request *rq;
1079        LIST_HEAD(list);
1080        LIST_HEAD(ctx_list);
1081        unsigned int depth;
1082
1083        list_splice_init(&plug->mq_list, &list);
1084
1085        list_sort(NULL, &list, plug_ctx_cmp);
1086
1087        this_q = NULL;
1088        this_ctx = NULL;
1089        depth = 0;
1090
1091        while (!list_empty(&list)) {
1092                rq = list_entry_rq(list.next);
1093                list_del_init(&rq->queuelist);
1094                BUG_ON(!rq->q);
1095                if (rq->mq_ctx != this_ctx) {
1096                        if (this_ctx) {
1097                                blk_mq_insert_requests(this_q, this_ctx,
1098                                                        &ctx_list, depth,
1099                                                        from_schedule);
1100                        }
1101
1102                        this_ctx = rq->mq_ctx;
1103                        this_q = rq->q;
1104                        depth = 0;
1105                }
1106
1107                depth++;
1108                list_add_tail(&rq->queuelist, &ctx_list);
1109        }
1110
1111        /*
1112         * If 'this_ctx' is set, we know we have entries to complete
1113         * on 'ctx_list'. Do those.
1114         */
1115        if (this_ctx) {
1116                blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1117                                       from_schedule);
1118        }
1119}
1120
1121static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1122{
1123        init_request_from_bio(rq, bio);
1124
1125        if (blk_do_io_stat(rq))
1126                blk_account_io_start(rq, 1);
1127}
1128
1129static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1130{
1131        return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1132                !blk_queue_nomerges(hctx->queue);
1133}
1134
1135static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1136                                         struct blk_mq_ctx *ctx,
1137                                         struct request *rq, struct bio *bio)
1138{
1139        if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1140                blk_mq_bio_to_request(rq, bio);
1141                spin_lock(&ctx->lock);
1142insert_rq:
1143                __blk_mq_insert_request(hctx, rq, false);
1144                spin_unlock(&ctx->lock);
1145                return false;
1146        } else {
1147                struct request_queue *q = hctx->queue;
1148
1149                spin_lock(&ctx->lock);
1150                if (!blk_mq_attempt_merge(q, ctx, bio)) {
1151                        blk_mq_bio_to_request(rq, bio);
1152                        goto insert_rq;
1153                }
1154
1155                spin_unlock(&ctx->lock);
1156                __blk_mq_free_request(hctx, ctx, rq);
1157                return true;
1158        }
1159}
1160
1161struct blk_map_ctx {
1162        struct blk_mq_hw_ctx *hctx;
1163        struct blk_mq_ctx *ctx;
1164};
1165
1166static struct request *blk_mq_map_request(struct request_queue *q,
1167                                          struct bio *bio,
1168                                          struct blk_map_ctx *data)
1169{
1170        struct blk_mq_hw_ctx *hctx;
1171        struct blk_mq_ctx *ctx;
1172        struct request *rq;
1173        int rw = bio_data_dir(bio);
1174        struct blk_mq_alloc_data alloc_data;
1175
1176        blk_queue_enter_live(q);
1177        ctx = blk_mq_get_ctx(q);
1178        hctx = q->mq_ops->map_queue(q, ctx->cpu);
1179
1180        if (rw_is_sync(bio->bi_rw))
1181                rw |= REQ_SYNC;
1182
1183        trace_block_getrq(q, bio, rw);
1184        blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx);
1185        rq = __blk_mq_alloc_request(&alloc_data, rw);
1186        if (unlikely(!rq)) {
1187                __blk_mq_run_hw_queue(hctx);
1188                blk_mq_put_ctx(ctx);
1189                trace_block_sleeprq(q, bio, rw);
1190
1191                ctx = blk_mq_get_ctx(q);
1192                hctx = q->mq_ops->map_queue(q, ctx->cpu);
1193                blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
1194                rq = __blk_mq_alloc_request(&alloc_data, rw);
1195                ctx = alloc_data.ctx;
1196                hctx = alloc_data.hctx;
1197        }
1198
1199        hctx->queued++;
1200        data->hctx = hctx;
1201        data->ctx = ctx;
1202        return rq;
1203}
1204
1205static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
1206{
1207        int ret;
1208        struct request_queue *q = rq->q;
1209        struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
1210                        rq->mq_ctx->cpu);
1211        struct blk_mq_queue_data bd = {
1212                .rq = rq,
1213                .list = NULL,
1214                .last = 1
1215        };
1216        blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
1217
1218        /*
1219         * For OK queue, we are done. For error, kill it. Any other
1220         * error (busy), just add it to our list as we previously
1221         * would have done
1222         */
1223        ret = q->mq_ops->queue_rq(hctx, &bd);
1224        if (ret == BLK_MQ_RQ_QUEUE_OK) {
1225                *cookie = new_cookie;
1226                return 0;
1227        }
1228
1229        __blk_mq_requeue_request(rq);
1230
1231        if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1232                *cookie = BLK_QC_T_NONE;
1233                rq->errors = -EIO;
1234                blk_mq_end_request(rq, rq->errors);
1235                return 0;
1236        }
1237
1238        return -1;
1239}
1240
1241/*
1242 * Multiple hardware queue variant. This will not use per-process plugs,
1243 * but will attempt to bypass the hctx queueing if we can go straight to
1244 * hardware for SYNC IO.
1245 */
1246static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1247{
1248        const int is_sync = rw_is_sync(bio->bi_rw);
1249        const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1250        struct blk_map_ctx data;
1251        struct request *rq;
1252        unsigned int request_count = 0;
1253        struct blk_plug *plug;
1254        struct request *same_queue_rq = NULL;
1255        blk_qc_t cookie;
1256
1257        blk_queue_bounce(q, &bio);
1258
1259        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1260                bio_io_error(bio);
1261                return BLK_QC_T_NONE;
1262        }
1263
1264        blk_queue_split(q, &bio, q->bio_split);
1265
1266        if (!is_flush_fua && !blk_queue_nomerges(q)) {
1267                if (blk_attempt_plug_merge(q, bio, &request_count,
1268                                           &same_queue_rq))
1269                        return BLK_QC_T_NONE;
1270        } else
1271                request_count = blk_plug_queued_count(q);
1272
1273        rq = blk_mq_map_request(q, bio, &data);
1274        if (unlikely(!rq))
1275                return BLK_QC_T_NONE;
1276
1277        cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1278
1279        if (unlikely(is_flush_fua)) {
1280                blk_mq_bio_to_request(rq, bio);
1281                blk_insert_flush(rq);
1282                goto run_queue;
1283        }
1284
1285        plug = current->plug;
1286        /*
1287         * If the driver supports defer issued based on 'last', then
1288         * queue it up like normal since we can potentially save some
1289         * CPU this way.
1290         */
1291        if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1292            !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1293                struct request *old_rq = NULL;
1294
1295                blk_mq_bio_to_request(rq, bio);
1296
1297                /*
1298                 * We do limited pluging. If the bio can be merged, do that.
1299                 * Otherwise the existing request in the plug list will be
1300                 * issued. So the plug list will have one request at most
1301                 */
1302                if (plug) {
1303                        /*
1304                         * The plug list might get flushed before this. If that
1305                         * happens, same_queue_rq is invalid and plug list is
1306                         * empty
1307                         */
1308                        if (same_queue_rq && !list_empty(&plug->mq_list)) {
1309                                old_rq = same_queue_rq;
1310                                list_del_init(&old_rq->queuelist);
1311                        }
1312                        list_add_tail(&rq->queuelist, &plug->mq_list);
1313                } else /* is_sync */
1314                        old_rq = rq;
1315                blk_mq_put_ctx(data.ctx);
1316                if (!old_rq)
1317                        goto done;
1318                if (!blk_mq_direct_issue_request(old_rq, &cookie))
1319                        goto done;
1320                blk_mq_insert_request(old_rq, false, true, true);
1321                goto done;
1322        }
1323
1324        if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1325                /*
1326                 * For a SYNC request, send it to the hardware immediately. For
1327                 * an ASYNC request, just ensure that we run it later on. The
1328                 * latter allows for merging opportunities and more efficient
1329                 * dispatching.
1330                 */
1331run_queue:
1332                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1333        }
1334        blk_mq_put_ctx(data.ctx);
1335done:
1336        return cookie;
1337}
1338
1339/*
1340 * Single hardware queue variant. This will attempt to use any per-process
1341 * plug for merging and IO deferral.
1342 */
1343static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1344{
1345        const int is_sync = rw_is_sync(bio->bi_rw);
1346        const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1347        struct blk_plug *plug;
1348        unsigned int request_count = 0;
1349        struct blk_map_ctx data;
1350        struct request *rq;
1351        blk_qc_t cookie;
1352
1353        blk_queue_bounce(q, &bio);
1354
1355        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1356                bio_io_error(bio);
1357                return BLK_QC_T_NONE;
1358        }
1359
1360        blk_queue_split(q, &bio, q->bio_split);
1361
1362        if (!is_flush_fua && !blk_queue_nomerges(q) &&
1363            blk_attempt_plug_merge(q, bio, &request_count, NULL))
1364                return BLK_QC_T_NONE;
1365
1366        rq = blk_mq_map_request(q, bio, &data);
1367        if (unlikely(!rq))
1368                return BLK_QC_T_NONE;
1369
1370        cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1371
1372        if (unlikely(is_flush_fua)) {
1373                blk_mq_bio_to_request(rq, bio);
1374                blk_insert_flush(rq);
1375                goto run_queue;
1376        }
1377
1378        /*
1379         * A task plug currently exists. Since this is completely lockless,
1380         * utilize that to temporarily store requests until the task is
1381         * either done or scheduled away.
1382         */
1383        plug = current->plug;
1384        if (plug) {
1385                blk_mq_bio_to_request(rq, bio);
1386                if (!request_count)
1387                        trace_block_plug(q);
1388
1389                blk_mq_put_ctx(data.ctx);
1390
1391                if (request_count >= BLK_MAX_REQUEST_COUNT) {
1392                        blk_flush_plug_list(plug, false);
1393                        trace_block_plug(q);
1394                }
1395
1396                list_add_tail(&rq->queuelist, &plug->mq_list);
1397                return cookie;
1398        }
1399
1400        if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1401                /*
1402                 * For a SYNC request, send it to the hardware immediately. For
1403                 * an ASYNC request, just ensure that we run it later on. The
1404                 * latter allows for merging opportunities and more efficient
1405                 * dispatching.
1406                 */
1407run_queue:
1408                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1409        }
1410
1411        blk_mq_put_ctx(data.ctx);
1412        return cookie;
1413}
1414
1415/*
1416 * Default mapping to a software queue, since we use one per CPU.
1417 */
1418struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1419{
1420        return q->queue_hw_ctx[q->mq_map[cpu]];
1421}
1422EXPORT_SYMBOL(blk_mq_map_queue);
1423
1424static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1425                struct blk_mq_tags *tags, unsigned int hctx_idx)
1426{
1427        struct page *page;
1428
1429        if (tags->rqs && set->ops->exit_request) {
1430                int i;
1431
1432                for (i = 0; i < tags->nr_tags; i++) {
1433                        if (!tags->rqs[i])
1434                                continue;
1435                        set->ops->exit_request(set->driver_data, tags->rqs[i],
1436                                                hctx_idx, i);
1437                        tags->rqs[i] = NULL;
1438                }
1439        }
1440
1441        while (!list_empty(&tags->page_list)) {
1442                page = list_first_entry(&tags->page_list, struct page, lru);
1443                list_del_init(&page->lru);
1444                /*
1445                 * Remove kmemleak object previously allocated in
1446                 * blk_mq_init_rq_map().
1447                 */
1448                kmemleak_free(page_address(page));
1449                __free_pages(page, page->private);
1450        }
1451
1452        kfree(tags->rqs);
1453
1454        blk_mq_free_tags(tags);
1455}
1456
1457static size_t order_to_size(unsigned int order)
1458{
1459        return (size_t)PAGE_SIZE << order;
1460}
1461
1462static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1463                unsigned int hctx_idx)
1464{
1465        struct blk_mq_tags *tags;
1466        unsigned int i, j, entries_per_page, max_order = 4;
1467        size_t rq_size, left;
1468
1469        tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1470                                set->numa_node,
1471                                BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1472        if (!tags)
1473                return NULL;
1474
1475        INIT_LIST_HEAD(&tags->page_list);
1476
1477        tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1478                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1479                                 set->numa_node);
1480        if (!tags->rqs) {
1481                blk_mq_free_tags(tags);
1482                return NULL;
1483        }
1484
1485        /*
1486         * rq_size is the size of the request plus driver payload, rounded
1487         * to the cacheline size
1488         */
1489        rq_size = round_up(sizeof(struct request) + set->cmd_size,
1490                                cache_line_size());
1491        left = rq_size * set->queue_depth;
1492
1493        for (i = 0; i < set->queue_depth; ) {
1494                int this_order = max_order;
1495                struct page *page;
1496                int to_do;
1497                void *p;
1498
1499                while (left < order_to_size(this_order - 1) && this_order)
1500                        this_order--;
1501
1502                do {
1503                        page = alloc_pages_node(set->numa_node,
1504                                GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1505                                this_order);
1506                        if (page)
1507                                break;
1508                        if (!this_order--)
1509                                break;
1510                        if (order_to_size(this_order) < rq_size)
1511                                break;
1512                } while (1);
1513
1514                if (!page)
1515                        goto fail;
1516
1517                page->private = this_order;
1518                list_add_tail(&page->lru, &tags->page_list);
1519
1520                p = page_address(page);
1521                /*
1522                 * Allow kmemleak to scan these pages as they contain pointers
1523                 * to additional allocations like via ops->init_request().
1524                 */
1525                kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
1526                entries_per_page = order_to_size(this_order) / rq_size;
1527                to_do = min(entries_per_page, set->queue_depth - i);
1528                left -= to_do * rq_size;
1529                for (j = 0; j < to_do; j++) {
1530                        tags->rqs[i] = p;
1531                        if (set->ops->init_request) {
1532                                if (set->ops->init_request(set->driver_data,
1533                                                tags->rqs[i], hctx_idx, i,
1534                                                set->numa_node)) {
1535                                        tags->rqs[i] = NULL;
1536                                        goto fail;
1537                                }
1538                        }
1539
1540                        p += rq_size;
1541                        i++;
1542                }
1543        }
1544        return tags;
1545
1546fail:
1547        blk_mq_free_rq_map(set, tags, hctx_idx);
1548        return NULL;
1549}
1550
1551static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1552{
1553        kfree(bitmap->map);
1554}
1555
1556static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1557{
1558        unsigned int bpw = 8, total, num_maps, i;
1559
1560        bitmap->bits_per_word = bpw;
1561
1562        num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1563        bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1564                                        GFP_KERNEL, node);
1565        if (!bitmap->map)
1566                return -ENOMEM;
1567
1568        total = nr_cpu_ids;
1569        for (i = 0; i < num_maps; i++) {
1570                bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1571                total -= bitmap->map[i].depth;
1572        }
1573
1574        return 0;
1575}
1576
1577static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1578{
1579        struct request_queue *q = hctx->queue;
1580        struct blk_mq_ctx *ctx;
1581        LIST_HEAD(tmp);
1582
1583        /*
1584         * Move ctx entries to new CPU, if this one is going away.
1585         */
1586        ctx = __blk_mq_get_ctx(q, cpu);
1587
1588        spin_lock(&ctx->lock);
1589        if (!list_empty(&ctx->rq_list)) {
1590                list_splice_init(&ctx->rq_list, &tmp);
1591                blk_mq_hctx_clear_pending(hctx, ctx);
1592        }
1593        spin_unlock(&ctx->lock);
1594
1595        if (list_empty(&tmp))
1596                return NOTIFY_OK;
1597
1598        ctx = blk_mq_get_ctx(q);
1599        spin_lock(&ctx->lock);
1600
1601        while (!list_empty(&tmp)) {
1602                struct request *rq;
1603
1604                rq = list_first_entry(&tmp, struct request, queuelist);
1605                rq->mq_ctx = ctx;
1606                list_move_tail(&rq->queuelist, &ctx->rq_list);
1607        }
1608
1609        hctx = q->mq_ops->map_queue(q, ctx->cpu);
1610        blk_mq_hctx_mark_pending(hctx, ctx);
1611
1612        spin_unlock(&ctx->lock);
1613
1614        blk_mq_run_hw_queue(hctx, true);
1615        blk_mq_put_ctx(ctx);
1616        return NOTIFY_OK;
1617}
1618
1619static int blk_mq_hctx_notify(void *data, unsigned long action,
1620                              unsigned int cpu)
1621{
1622        struct blk_mq_hw_ctx *hctx = data;
1623
1624        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1625                return blk_mq_hctx_cpu_offline(hctx, cpu);
1626
1627        /*
1628         * In case of CPU online, tags may be reallocated
1629         * in blk_mq_map_swqueue() after mapping is updated.
1630         */
1631
1632        return NOTIFY_OK;
1633}
1634
1635/* hctx->ctxs will be freed in queue's release handler */
1636static void blk_mq_exit_hctx(struct request_queue *q,
1637                struct blk_mq_tag_set *set,
1638                struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1639{
1640        unsigned flush_start_tag = set->queue_depth;
1641
1642        blk_mq_tag_idle(hctx);
1643
1644        if (set->ops->exit_request)
1645                set->ops->exit_request(set->driver_data,
1646                                       hctx->fq->flush_rq, hctx_idx,
1647                                       flush_start_tag + hctx_idx);
1648
1649        if (set->ops->exit_hctx)
1650                set->ops->exit_hctx(hctx, hctx_idx);
1651
1652        blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1653        blk_free_flush_queue(hctx->fq);
1654        blk_mq_free_bitmap(&hctx->ctx_map);
1655}
1656
1657static void blk_mq_exit_hw_queues(struct request_queue *q,
1658                struct blk_mq_tag_set *set, int nr_queue)
1659{
1660        struct blk_mq_hw_ctx *hctx;
1661        unsigned int i;
1662
1663        queue_for_each_hw_ctx(q, hctx, i) {
1664                if (i == nr_queue)
1665                        break;
1666                blk_mq_exit_hctx(q, set, hctx, i);
1667        }
1668}
1669
1670static void blk_mq_free_hw_queues(struct request_queue *q,
1671                struct blk_mq_tag_set *set)
1672{
1673        struct blk_mq_hw_ctx *hctx;
1674        unsigned int i;
1675
1676        queue_for_each_hw_ctx(q, hctx, i)
1677                free_cpumask_var(hctx->cpumask);
1678}
1679
1680static int blk_mq_init_hctx(struct request_queue *q,
1681                struct blk_mq_tag_set *set,
1682                struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1683{
1684        int node;
1685        unsigned flush_start_tag = set->queue_depth;
1686
1687        node = hctx->numa_node;
1688        if (node == NUMA_NO_NODE)
1689                node = hctx->numa_node = set->numa_node;
1690
1691        INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1692        INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1693        spin_lock_init(&hctx->lock);
1694        INIT_LIST_HEAD(&hctx->dispatch);
1695        hctx->queue = q;
1696        hctx->queue_num = hctx_idx;
1697        hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1698
1699        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1700                                        blk_mq_hctx_notify, hctx);
1701        blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1702
1703        hctx->tags = set->tags[hctx_idx];
1704
1705        /*
1706         * Allocate space for all possible cpus to avoid allocation at
1707         * runtime
1708         */
1709        hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1710                                        GFP_KERNEL, node);
1711        if (!hctx->ctxs)
1712                goto unregister_cpu_notifier;
1713
1714        if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1715                goto free_ctxs;
1716
1717        hctx->nr_ctx = 0;
1718
1719        if (set->ops->init_hctx &&
1720            set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1721                goto free_bitmap;
1722
1723        hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1724        if (!hctx->fq)
1725                goto exit_hctx;
1726
1727        if (set->ops->init_request &&
1728            set->ops->init_request(set->driver_data,
1729                                   hctx->fq->flush_rq, hctx_idx,
1730                                   flush_start_tag + hctx_idx, node))
1731                goto free_fq;
1732
1733        return 0;
1734
1735 free_fq:
1736        kfree(hctx->fq);
1737 exit_hctx:
1738        if (set->ops->exit_hctx)
1739                set->ops->exit_hctx(hctx, hctx_idx);
1740 free_bitmap:
1741        blk_mq_free_bitmap(&hctx->ctx_map);
1742 free_ctxs:
1743        kfree(hctx->ctxs);
1744 unregister_cpu_notifier:
1745        blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1746
1747        return -1;
1748}
1749
1750static void blk_mq_init_cpu_queues(struct request_queue *q,
1751                                   unsigned int nr_hw_queues)
1752{
1753        unsigned int i;
1754
1755        for_each_possible_cpu(i) {
1756                struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1757                struct blk_mq_hw_ctx *hctx;
1758
1759                memset(__ctx, 0, sizeof(*__ctx));
1760                __ctx->cpu = i;
1761                spin_lock_init(&__ctx->lock);
1762                INIT_LIST_HEAD(&__ctx->rq_list);
1763                __ctx->queue = q;
1764
1765                /* If the cpu isn't online, the cpu is mapped to first hctx */
1766                if (!cpu_online(i))
1767                        continue;
1768
1769                hctx = q->mq_ops->map_queue(q, i);
1770
1771                /*
1772                 * Set local node, IFF we have more than one hw queue. If
1773                 * not, we remain on the home node of the device
1774                 */
1775                if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1776                        hctx->numa_node = local_memory_node(cpu_to_node(i));
1777        }
1778}
1779
1780static void blk_mq_map_swqueue(struct request_queue *q,
1781                               const struct cpumask *online_mask)
1782{
1783        unsigned int i;
1784        struct blk_mq_hw_ctx *hctx;
1785        struct blk_mq_ctx *ctx;
1786        struct blk_mq_tag_set *set = q->tag_set;
1787
1788        /*
1789         * Avoid others reading imcomplete hctx->cpumask through sysfs
1790         */
1791        mutex_lock(&q->sysfs_lock);
1792
1793        queue_for_each_hw_ctx(q, hctx, i) {
1794                cpumask_clear(hctx->cpumask);
1795                hctx->nr_ctx = 0;
1796        }
1797
1798        /*
1799         * Map software to hardware queues
1800         */
1801        for_each_possible_cpu(i) {
1802                /* If the cpu isn't online, the cpu is mapped to first hctx */
1803                if (!cpumask_test_cpu(i, online_mask))
1804                        continue;
1805
1806                ctx = per_cpu_ptr(q->queue_ctx, i);
1807                hctx = q->mq_ops->map_queue(q, i);
1808
1809                cpumask_set_cpu(i, hctx->cpumask);
1810                ctx->index_hw = hctx->nr_ctx;
1811                hctx->ctxs[hctx->nr_ctx++] = ctx;
1812        }
1813
1814        mutex_unlock(&q->sysfs_lock);
1815
1816        queue_for_each_hw_ctx(q, hctx, i) {
1817                struct blk_mq_ctxmap *map = &hctx->ctx_map;
1818
1819                /*
1820                 * If no software queues are mapped to this hardware queue,
1821                 * disable it and free the request entries.
1822                 */
1823                if (!hctx->nr_ctx) {
1824                        if (set->tags[i]) {
1825                                blk_mq_free_rq_map(set, set->tags[i], i);
1826                                set->tags[i] = NULL;
1827                        }
1828                        hctx->tags = NULL;
1829                        continue;
1830                }
1831
1832                /* unmapped hw queue can be remapped after CPU topo changed */
1833                if (!set->tags[i])
1834                        set->tags[i] = blk_mq_init_rq_map(set, i);
1835                hctx->tags = set->tags[i];
1836                WARN_ON(!hctx->tags);
1837
1838                cpumask_copy(hctx->tags->cpumask, hctx->cpumask);
1839                /*
1840                 * Set the map size to the number of mapped software queues.
1841                 * This is more accurate and more efficient than looping
1842                 * over all possibly mapped software queues.
1843                 */
1844                map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
1845
1846                /*
1847                 * Initialize batch roundrobin counts
1848                 */
1849                hctx->next_cpu = cpumask_first(hctx->cpumask);
1850                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1851        }
1852}
1853
1854static void queue_set_hctx_shared(struct request_queue *q, bool shared)
1855{
1856        struct blk_mq_hw_ctx *hctx;
1857        int i;
1858
1859        queue_for_each_hw_ctx(q, hctx, i) {
1860                if (shared)
1861                        hctx->flags |= BLK_MQ_F_TAG_SHARED;
1862                else
1863                        hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1864        }
1865}
1866
1867static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
1868{
1869        struct request_queue *q;
1870
1871        list_for_each_entry(q, &set->tag_list, tag_set_list) {
1872                blk_mq_freeze_queue(q);
1873                queue_set_hctx_shared(q, shared);
1874                blk_mq_unfreeze_queue(q);
1875        }
1876}
1877
1878static void blk_mq_del_queue_tag_set(struct request_queue *q)
1879{
1880        struct blk_mq_tag_set *set = q->tag_set;
1881
1882        mutex_lock(&set->tag_list_lock);
1883        list_del_init(&q->tag_set_list);
1884        if (list_is_singular(&set->tag_list)) {
1885                /* just transitioned to unshared */
1886                set->flags &= ~BLK_MQ_F_TAG_SHARED;
1887                /* update existing queue */
1888                blk_mq_update_tag_set_depth(set, false);
1889        }
1890        mutex_unlock(&set->tag_list_lock);
1891}
1892
1893static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1894                                     struct request_queue *q)
1895{
1896        q->tag_set = set;
1897
1898        mutex_lock(&set->tag_list_lock);
1899
1900        /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
1901        if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
1902                set->flags |= BLK_MQ_F_TAG_SHARED;
1903                /* update existing queue */
1904                blk_mq_update_tag_set_depth(set, true);
1905        }
1906        if (set->flags & BLK_MQ_F_TAG_SHARED)
1907                queue_set_hctx_shared(q, true);
1908        list_add_tail(&q->tag_set_list, &set->tag_list);
1909
1910        mutex_unlock(&set->tag_list_lock);
1911}
1912
1913/*
1914 * It is the actual release handler for mq, but we do it from
1915 * request queue's release handler for avoiding use-after-free
1916 * and headache because q->mq_kobj shouldn't have been introduced,
1917 * but we can't group ctx/kctx kobj without it.
1918 */
1919void blk_mq_release(struct request_queue *q)
1920{
1921        struct blk_mq_hw_ctx *hctx;
1922        unsigned int i;
1923
1924        /* hctx kobj stays in hctx */
1925        queue_for_each_hw_ctx(q, hctx, i) {
1926                if (!hctx)
1927                        continue;
1928                kfree(hctx->ctxs);
1929                kfree(hctx);
1930        }
1931
1932        kfree(q->mq_map);
1933        q->mq_map = NULL;
1934
1935        kfree(q->queue_hw_ctx);
1936
1937        /* ctx kobj stays in queue_ctx */
1938        free_percpu(q->queue_ctx);
1939}
1940
1941struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1942{
1943        struct request_queue *uninit_q, *q;
1944
1945        uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1946        if (!uninit_q)
1947                return ERR_PTR(-ENOMEM);
1948
1949        q = blk_mq_init_allocated_queue(set, uninit_q);
1950        if (IS_ERR(q))
1951                blk_cleanup_queue(uninit_q);
1952
1953        return q;
1954}
1955EXPORT_SYMBOL(blk_mq_init_queue);
1956
1957static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
1958                                                struct request_queue *q)
1959{
1960        int i, j;
1961        struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
1962
1963        blk_mq_sysfs_unregister(q);
1964        for (i = 0; i < set->nr_hw_queues; i++) {
1965                int node;
1966
1967                if (hctxs[i])
1968                        continue;
1969
1970                node = blk_mq_hw_queue_to_node(q->mq_map, i);
1971                hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1972                                        GFP_KERNEL, node);
1973                if (!hctxs[i])
1974                        break;
1975
1976                if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1977                                                node)) {
1978                        kfree(hctxs[i]);
1979                        hctxs[i] = NULL;
1980                        break;
1981                }
1982
1983                atomic_set(&hctxs[i]->nr_active, 0);
1984                hctxs[i]->numa_node = node;
1985                hctxs[i]->queue_num = i;
1986
1987                if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
1988                        free_cpumask_var(hctxs[i]->cpumask);
1989                        kfree(hctxs[i]);
1990                        hctxs[i] = NULL;
1991                        break;
1992                }
1993                blk_mq_hctx_kobj_init(hctxs[i]);
1994        }
1995        for (j = i; j < q->nr_hw_queues; j++) {
1996                struct blk_mq_hw_ctx *hctx = hctxs[j];
1997
1998                if (hctx) {
1999                        if (hctx->tags) {
2000                                blk_mq_free_rq_map(set, hctx->tags, j);
2001                                set->tags[j] = NULL;
2002                        }
2003                        blk_mq_exit_hctx(q, set, hctx, j);
2004                        free_cpumask_var(hctx->cpumask);
2005                        kobject_put(&hctx->kobj);
2006                        kfree(hctx->ctxs);
2007                        kfree(hctx);
2008                        hctxs[j] = NULL;
2009
2010                }
2011        }
2012        q->nr_hw_queues = i;
2013        blk_mq_sysfs_register(q);
2014}
2015
2016struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2017                                                  struct request_queue *q)
2018{
2019        /* mark the queue as mq asap */
2020        q->mq_ops = set->ops;
2021
2022        q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2023        if (!q->queue_ctx)
2024                return ERR_PTR(-ENOMEM);
2025
2026        q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2027                                                GFP_KERNEL, set->numa_node);
2028        if (!q->queue_hw_ctx)
2029                goto err_percpu;
2030
2031        q->mq_map = blk_mq_make_queue_map(set);
2032        if (!q->mq_map)
2033                goto err_map;
2034
2035        blk_mq_realloc_hw_ctxs(set, q);
2036        if (!q->nr_hw_queues)
2037                goto err_hctxs;
2038
2039        INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2040        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2041
2042        q->nr_queues = nr_cpu_ids;
2043
2044        q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2045
2046        if (!(set->flags & BLK_MQ_F_SG_MERGE))
2047                q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2048
2049        q->sg_reserved_size = INT_MAX;
2050
2051        INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
2052        INIT_LIST_HEAD(&q->requeue_list);
2053        spin_lock_init(&q->requeue_lock);
2054
2055        if (q->nr_hw_queues > 1)
2056                blk_queue_make_request(q, blk_mq_make_request);
2057        else
2058                blk_queue_make_request(q, blk_sq_make_request);
2059
2060        /*
2061         * Do this after blk_queue_make_request() overrides it...
2062         */
2063        q->nr_requests = set->queue_depth;
2064
2065        if (set->ops->complete)
2066                blk_queue_softirq_done(q, set->ops->complete);
2067
2068        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2069
2070        get_online_cpus();
2071        mutex_lock(&all_q_mutex);
2072
2073        list_add_tail(&q->all_q_node, &all_q_list);
2074        blk_mq_add_queue_tag_set(set, q);
2075        blk_mq_map_swqueue(q, cpu_online_mask);
2076
2077        mutex_unlock(&all_q_mutex);
2078        put_online_cpus();
2079
2080        return q;
2081
2082err_hctxs:
2083        kfree(q->mq_map);
2084err_map:
2085        kfree(q->queue_hw_ctx);
2086err_percpu:
2087        free_percpu(q->queue_ctx);
2088        return ERR_PTR(-ENOMEM);
2089}
2090EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2091
2092void blk_mq_free_queue(struct request_queue *q)
2093{
2094        struct blk_mq_tag_set   *set = q->tag_set;
2095
2096        mutex_lock(&all_q_mutex);
2097        list_del_init(&q->all_q_node);
2098        mutex_unlock(&all_q_mutex);
2099
2100        blk_mq_del_queue_tag_set(q);
2101
2102        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2103        blk_mq_free_hw_queues(q, set);
2104}
2105
2106/* Basically redo blk_mq_init_queue with queue frozen */
2107static void blk_mq_queue_reinit(struct request_queue *q,
2108                                const struct cpumask *online_mask)
2109{
2110        WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2111
2112        blk_mq_sysfs_unregister(q);
2113
2114        blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
2115
2116        /*
2117         * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2118         * we should change hctx numa_node according to new topology (this
2119         * involves free and re-allocate memory, worthy doing?)
2120         */
2121
2122        blk_mq_map_swqueue(q, online_mask);
2123
2124        blk_mq_sysfs_register(q);
2125}
2126
2127static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2128                                      unsigned long action, void *hcpu)
2129{
2130        struct request_queue *q;
2131        int cpu = (unsigned long)hcpu;
2132        /*
2133         * New online cpumask which is going to be set in this hotplug event.
2134         * Declare this cpumasks as global as cpu-hotplug operation is invoked
2135         * one-by-one and dynamically allocating this could result in a failure.
2136         */
2137        static struct cpumask online_new;
2138
2139        /*
2140         * Before hotadded cpu starts handling requests, new mappings must
2141         * be established.  Otherwise, these requests in hw queue might
2142         * never be dispatched.
2143         *
2144         * For example, there is a single hw queue (hctx) and two CPU queues
2145         * (ctx0 for CPU0, and ctx1 for CPU1).
2146         *
2147         * Now CPU1 is just onlined and a request is inserted into
2148         * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
2149         * still zero.
2150         *
2151         * And then while running hw queue, flush_busy_ctxs() finds bit0 is
2152         * set in pending bitmap and tries to retrieve requests in
2153         * hctx->ctxs[0]->rq_list.  But htx->ctxs[0] is a pointer to ctx0,
2154         * so the request in ctx1->rq_list is ignored.
2155         */
2156        switch (action & ~CPU_TASKS_FROZEN) {
2157        case CPU_DEAD:
2158        case CPU_UP_CANCELED:
2159                cpumask_copy(&online_new, cpu_online_mask);
2160                break;
2161        case CPU_UP_PREPARE:
2162                cpumask_copy(&online_new, cpu_online_mask);
2163                cpumask_set_cpu(cpu, &online_new);
2164                break;
2165        default:
2166                return NOTIFY_OK;
2167        }
2168
2169        mutex_lock(&all_q_mutex);
2170
2171        /*
2172         * We need to freeze and reinit all existing queues.  Freezing
2173         * involves synchronous wait for an RCU grace period and doing it
2174         * one by one may take a long time.  Start freezing all queues in
2175         * one swoop and then wait for the completions so that freezing can
2176         * take place in parallel.
2177         */
2178        list_for_each_entry(q, &all_q_list, all_q_node)
2179                blk_mq_freeze_queue_start(q);
2180        list_for_each_entry(q, &all_q_list, all_q_node) {
2181                blk_mq_freeze_queue_wait(q);
2182
2183                /*
2184                 * timeout handler can't touch hw queue during the
2185                 * reinitialization
2186                 */
2187                del_timer_sync(&q->timeout);
2188        }
2189
2190        list_for_each_entry(q, &all_q_list, all_q_node)
2191                blk_mq_queue_reinit(q, &online_new);
2192
2193        list_for_each_entry(q, &all_q_list, all_q_node)
2194                blk_mq_unfreeze_queue(q);
2195
2196        mutex_unlock(&all_q_mutex);
2197        return NOTIFY_OK;
2198}
2199
2200static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2201{
2202        int i;
2203
2204        for (i = 0; i < set->nr_hw_queues; i++) {
2205                set->tags[i] = blk_mq_init_rq_map(set, i);
2206                if (!set->tags[i])
2207                        goto out_unwind;
2208        }
2209
2210        return 0;
2211
2212out_unwind:
2213        while (--i >= 0)
2214                blk_mq_free_rq_map(set, set->tags[i], i);
2215
2216        return -ENOMEM;
2217}
2218
2219/*
2220 * Allocate the request maps associated with this tag_set. Note that this
2221 * may reduce the depth asked for, if memory is tight. set->queue_depth
2222 * will be updated to reflect the allocated depth.
2223 */
2224static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2225{
2226        unsigned int depth;
2227        int err;
2228
2229        depth = set->queue_depth;
2230        do {
2231                err = __blk_mq_alloc_rq_maps(set);
2232                if (!err)
2233                        break;
2234
2235                set->queue_depth >>= 1;
2236                if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2237                        err = -ENOMEM;
2238                        break;
2239                }
2240        } while (set->queue_depth);
2241
2242        if (!set->queue_depth || err) {
2243                pr_err("blk-mq: failed to allocate request map\n");
2244                return -ENOMEM;
2245        }
2246
2247        if (depth != set->queue_depth)
2248                pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2249                                                depth, set->queue_depth);
2250
2251        return 0;
2252}
2253
2254struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
2255{
2256        return tags->cpumask;
2257}
2258EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
2259
2260/*
2261 * Alloc a tag set to be associated with one or more request queues.
2262 * May fail with EINVAL for various error conditions. May adjust the
2263 * requested depth down, if if it too large. In that case, the set
2264 * value will be stored in set->queue_depth.
2265 */
2266int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2267{
2268        BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2269
2270        if (!set->nr_hw_queues)
2271                return -EINVAL;
2272        if (!set->queue_depth)
2273                return -EINVAL;
2274        if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2275                return -EINVAL;
2276
2277        if (!set->ops->queue_rq || !set->ops->map_queue)
2278                return -EINVAL;
2279
2280        if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2281                pr_info("blk-mq: reduced tag depth to %u\n",
2282                        BLK_MQ_MAX_DEPTH);
2283                set->queue_depth = BLK_MQ_MAX_DEPTH;
2284        }
2285
2286        /*
2287         * If a crashdump is active, then we are potentially in a very
2288         * memory constrained environment. Limit us to 1 queue and
2289         * 64 tags to prevent using too much memory.
2290         */
2291        if (is_kdump_kernel()) {
2292                set->nr_hw_queues = 1;
2293                set->queue_depth = min(64U, set->queue_depth);
2294        }
2295        /*
2296         * There is no use for more h/w queues than cpus.
2297         */
2298        if (set->nr_hw_queues > nr_cpu_ids)
2299                set->nr_hw_queues = nr_cpu_ids;
2300
2301        set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2302                                 GFP_KERNEL, set->numa_node);
2303        if (!set->tags)
2304                return -ENOMEM;
2305
2306        if (blk_mq_alloc_rq_maps(set))
2307                goto enomem;
2308
2309        mutex_init(&set->tag_list_lock);
2310        INIT_LIST_HEAD(&set->tag_list);
2311
2312        return 0;
2313enomem:
2314        kfree(set->tags);
2315        set->tags = NULL;
2316        return -ENOMEM;
2317}
2318EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2319
2320void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2321{
2322        int i;
2323
2324        for (i = 0; i < nr_cpu_ids; i++) {
2325                if (set->tags[i])
2326                        blk_mq_free_rq_map(set, set->tags[i], i);
2327        }
2328
2329        kfree(set->tags);
2330        set->tags = NULL;
2331}
2332EXPORT_SYMBOL(blk_mq_free_tag_set);
2333
2334int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2335{
2336        struct blk_mq_tag_set *set = q->tag_set;
2337        struct blk_mq_hw_ctx *hctx;
2338        int i, ret;
2339
2340        if (!set || nr > set->queue_depth)
2341                return -EINVAL;
2342
2343        ret = 0;
2344        queue_for_each_hw_ctx(q, hctx, i) {
2345                if (!hctx->tags)
2346                        continue;
2347                ret = blk_mq_tag_update_depth(hctx->tags, nr);
2348                if (ret)
2349                        break;
2350        }
2351
2352        if (!ret)
2353                q->nr_requests = nr;
2354
2355        return ret;
2356}
2357
2358void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2359{
2360        struct request_queue *q;
2361
2362        if (nr_hw_queues > nr_cpu_ids)
2363                nr_hw_queues = nr_cpu_ids;
2364        if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2365                return;
2366
2367        list_for_each_entry(q, &set->tag_list, tag_set_list)
2368                blk_mq_freeze_queue(q);
2369
2370        set->nr_hw_queues = nr_hw_queues;
2371        list_for_each_entry(q, &set->tag_list, tag_set_list) {
2372                blk_mq_realloc_hw_ctxs(set, q);
2373
2374                if (q->nr_hw_queues > 1)
2375                        blk_queue_make_request(q, blk_mq_make_request);
2376                else
2377                        blk_queue_make_request(q, blk_sq_make_request);
2378
2379                blk_mq_queue_reinit(q, cpu_online_mask);
2380        }
2381
2382        list_for_each_entry(q, &set->tag_list, tag_set_list)
2383                blk_mq_unfreeze_queue(q);
2384}
2385EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2386
2387void blk_mq_disable_hotplug(void)
2388{
2389        mutex_lock(&all_q_mutex);
2390}
2391
2392void blk_mq_enable_hotplug(void)
2393{
2394        mutex_unlock(&all_q_mutex);
2395}
2396
2397static int __init blk_mq_init(void)
2398{
2399        blk_mq_cpu_init();
2400
2401        hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2402
2403        return 0;
2404}
2405subsys_initcall(blk_mq_init);
2406