linux/block/blk-core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1991, 1992 Linus Torvalds
   3 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   4 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
   6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   7 *      -  July2000
   8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
   9 */
  10
  11/*
  12 * This handles all read/write requests to block devices
  13 */
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/backing-dev.h>
  17#include <linux/bio.h>
  18#include <linux/blkdev.h>
  19#include <linux/blk-mq.h>
  20#include <linux/highmem.h>
  21#include <linux/mm.h>
  22#include <linux/kernel_stat.h>
  23#include <linux/string.h>
  24#include <linux/init.h>
  25#include <linux/completion.h>
  26#include <linux/slab.h>
  27#include <linux/swap.h>
  28#include <linux/writeback.h>
  29#include <linux/task_io_accounting_ops.h>
  30#include <linux/fault-inject.h>
  31#include <linux/list_sort.h>
  32#include <linux/delay.h>
  33#include <linux/ratelimit.h>
  34#include <linux/pm_runtime.h>
  35#include <linux/blk-cgroup.h>
  36#include <linux/debugfs.h>
  37#include <linux/bpf.h>
  38
  39#define CREATE_TRACE_POINTS
  40#include <trace/events/block.h>
  41
  42#include "blk.h"
  43#include "blk-mq.h"
  44#include "blk-mq-sched.h"
  45#include "blk-wbt.h"
  46
  47#ifdef CONFIG_DEBUG_FS
  48struct dentry *blk_debugfs_root;
  49#endif
  50
  51EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
  52EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
  53EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
  54EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
  55EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
  56
  57DEFINE_IDA(blk_queue_ida);
  58
  59/*
  60 * For the allocated request tables
  61 */
  62struct kmem_cache *request_cachep;
  63
  64/*
  65 * For queue allocation
  66 */
  67struct kmem_cache *blk_requestq_cachep;
  68
  69/*
  70 * Controlling structure to kblockd
  71 */
  72static struct workqueue_struct *kblockd_workqueue;
  73
  74static void blk_clear_congested(struct request_list *rl, int sync)
  75{
  76#ifdef CONFIG_CGROUP_WRITEBACK
  77        clear_wb_congested(rl->blkg->wb_congested, sync);
  78#else
  79        /*
  80         * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
  81         * flip its congestion state for events on other blkcgs.
  82         */
  83        if (rl == &rl->q->root_rl)
  84                clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
  85#endif
  86}
  87
  88static void blk_set_congested(struct request_list *rl, int sync)
  89{
  90#ifdef CONFIG_CGROUP_WRITEBACK
  91        set_wb_congested(rl->blkg->wb_congested, sync);
  92#else
  93        /* see blk_clear_congested() */
  94        if (rl == &rl->q->root_rl)
  95                set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
  96#endif
  97}
  98
  99void blk_queue_congestion_threshold(struct request_queue *q)
 100{
 101        int nr;
 102
 103        nr = q->nr_requests - (q->nr_requests / 8) + 1;
 104        if (nr > q->nr_requests)
 105                nr = q->nr_requests;
 106        q->nr_congestion_on = nr;
 107
 108        nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
 109        if (nr < 1)
 110                nr = 1;
 111        q->nr_congestion_off = nr;
 112}
 113
 114void blk_rq_init(struct request_queue *q, struct request *rq)
 115{
 116        memset(rq, 0, sizeof(*rq));
 117
 118        INIT_LIST_HEAD(&rq->queuelist);
 119        INIT_LIST_HEAD(&rq->timeout_list);
 120        rq->cpu = -1;
 121        rq->q = q;
 122        rq->__sector = (sector_t) -1;
 123        INIT_HLIST_NODE(&rq->hash);
 124        RB_CLEAR_NODE(&rq->rb_node);
 125        rq->tag = -1;
 126        rq->internal_tag = -1;
 127        rq->start_time = jiffies;
 128        set_start_time_ns(rq);
 129        rq->part = NULL;
 130        seqcount_init(&rq->gstate_seq);
 131        u64_stats_init(&rq->aborted_gstate_sync);
 132}
 133EXPORT_SYMBOL(blk_rq_init);
 134
 135static const struct {
 136        int             errno;
 137        const char      *name;
 138} blk_errors[] = {
 139        [BLK_STS_OK]            = { 0,          "" },
 140        [BLK_STS_NOTSUPP]       = { -EOPNOTSUPP, "operation not supported" },
 141        [BLK_STS_TIMEOUT]       = { -ETIMEDOUT, "timeout" },
 142        [BLK_STS_NOSPC]         = { -ENOSPC,    "critical space allocation" },
 143        [BLK_STS_TRANSPORT]     = { -ENOLINK,   "recoverable transport" },
 144        [BLK_STS_TARGET]        = { -EREMOTEIO, "critical target" },
 145        [BLK_STS_NEXUS]         = { -EBADE,     "critical nexus" },
 146        [BLK_STS_MEDIUM]        = { -ENODATA,   "critical medium" },
 147        [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
 148        [BLK_STS_RESOURCE]      = { -ENOMEM,    "kernel resource" },
 149        [BLK_STS_DEV_RESOURCE]  = { -EBUSY,     "device resource" },
 150        [BLK_STS_AGAIN]         = { -EAGAIN,    "nonblocking retry" },
 151
 152        /* device mapper special case, should not leak out: */
 153        [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
 154
 155        /* everything else not covered above: */
 156        [BLK_STS_IOERR]         = { -EIO,       "I/O" },
 157};
 158
 159blk_status_t errno_to_blk_status(int errno)
 160{
 161        int i;
 162
 163        for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
 164                if (blk_errors[i].errno == errno)
 165                        return (__force blk_status_t)i;
 166        }
 167
 168        return BLK_STS_IOERR;
 169}
 170EXPORT_SYMBOL_GPL(errno_to_blk_status);
 171
 172int blk_status_to_errno(blk_status_t status)
 173{
 174        int idx = (__force int)status;
 175
 176        if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
 177                return -EIO;
 178        return blk_errors[idx].errno;
 179}
 180EXPORT_SYMBOL_GPL(blk_status_to_errno);
 181
 182static void print_req_error(struct request *req, blk_status_t status)
 183{
 184        int idx = (__force int)status;
 185
 186        if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
 187                return;
 188
 189        printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
 190                           __func__, blk_errors[idx].name, req->rq_disk ?
 191                           req->rq_disk->disk_name : "?",
 192                           (unsigned long long)blk_rq_pos(req));
 193}
 194
 195static void req_bio_endio(struct request *rq, struct bio *bio,
 196                          unsigned int nbytes, blk_status_t error)
 197{
 198        if (error)
 199                bio->bi_status = error;
 200
 201        if (unlikely(rq->rq_flags & RQF_QUIET))
 202                bio_set_flag(bio, BIO_QUIET);
 203
 204        bio_advance(bio, nbytes);
 205
 206        /* don't actually finish bio if it's part of flush sequence */
 207        if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
 208                bio_endio(bio);
 209}
 210
 211void blk_dump_rq_flags(struct request *rq, char *msg)
 212{
 213        printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
 214                rq->rq_disk ? rq->rq_disk->disk_name : "?",
 215                (unsigned long long) rq->cmd_flags);
 216
 217        printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
 218               (unsigned long long)blk_rq_pos(rq),
 219               blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
 220        printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
 221               rq->bio, rq->biotail, blk_rq_bytes(rq));
 222}
 223EXPORT_SYMBOL(blk_dump_rq_flags);
 224
 225static void blk_delay_work(struct work_struct *work)
 226{
 227        struct request_queue *q;
 228
 229        q = container_of(work, struct request_queue, delay_work.work);
 230        spin_lock_irq(q->queue_lock);
 231        __blk_run_queue(q);
 232        spin_unlock_irq(q->queue_lock);
 233}
 234
 235/**
 236 * blk_delay_queue - restart queueing after defined interval
 237 * @q:          The &struct request_queue in question
 238 * @msecs:      Delay in msecs
 239 *
 240 * Description:
 241 *   Sometimes queueing needs to be postponed for a little while, to allow
 242 *   resources to come back. This function will make sure that queueing is
 243 *   restarted around the specified time.
 244 */
 245void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 246{
 247        lockdep_assert_held(q->queue_lock);
 248        WARN_ON_ONCE(q->mq_ops);
 249
 250        if (likely(!blk_queue_dead(q)))
 251                queue_delayed_work(kblockd_workqueue, &q->delay_work,
 252                                   msecs_to_jiffies(msecs));
 253}
 254EXPORT_SYMBOL(blk_delay_queue);
 255
 256/**
 257 * blk_start_queue_async - asynchronously restart a previously stopped queue
 258 * @q:    The &struct request_queue in question
 259 *
 260 * Description:
 261 *   blk_start_queue_async() will clear the stop flag on the queue, and
 262 *   ensure that the request_fn for the queue is run from an async
 263 *   context.
 264 **/
 265void blk_start_queue_async(struct request_queue *q)
 266{
 267        lockdep_assert_held(q->queue_lock);
 268        WARN_ON_ONCE(q->mq_ops);
 269
 270        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 271        blk_run_queue_async(q);
 272}
 273EXPORT_SYMBOL(blk_start_queue_async);
 274
 275/**
 276 * blk_start_queue - restart a previously stopped queue
 277 * @q:    The &struct request_queue in question
 278 *
 279 * Description:
 280 *   blk_start_queue() will clear the stop flag on the queue, and call
 281 *   the request_fn for the queue if it was in a stopped state when
 282 *   entered. Also see blk_stop_queue().
 283 **/
 284void blk_start_queue(struct request_queue *q)
 285{
 286        lockdep_assert_held(q->queue_lock);
 287        WARN_ON(!in_interrupt() && !irqs_disabled());
 288        WARN_ON_ONCE(q->mq_ops);
 289
 290        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 291        __blk_run_queue(q);
 292}
 293EXPORT_SYMBOL(blk_start_queue);
 294
 295/**
 296 * blk_stop_queue - stop a queue
 297 * @q:    The &struct request_queue in question
 298 *
 299 * Description:
 300 *   The Linux block layer assumes that a block driver will consume all
 301 *   entries on the request queue when the request_fn strategy is called.
 302 *   Often this will not happen, because of hardware limitations (queue
 303 *   depth settings). If a device driver gets a 'queue full' response,
 304 *   or if it simply chooses not to queue more I/O at one point, it can
 305 *   call this function to prevent the request_fn from being called until
 306 *   the driver has signalled it's ready to go again. This happens by calling
 307 *   blk_start_queue() to restart queue operations.
 308 **/
 309void blk_stop_queue(struct request_queue *q)
 310{
 311        lockdep_assert_held(q->queue_lock);
 312        WARN_ON_ONCE(q->mq_ops);
 313
 314        cancel_delayed_work(&q->delay_work);
 315        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 316}
 317EXPORT_SYMBOL(blk_stop_queue);
 318
 319/**
 320 * blk_sync_queue - cancel any pending callbacks on a queue
 321 * @q: the queue
 322 *
 323 * Description:
 324 *     The block layer may perform asynchronous callback activity
 325 *     on a queue, such as calling the unplug function after a timeout.
 326 *     A block device may call blk_sync_queue to ensure that any
 327 *     such activity is cancelled, thus allowing it to release resources
 328 *     that the callbacks might use. The caller must already have made sure
 329 *     that its ->make_request_fn will not re-add plugging prior to calling
 330 *     this function.
 331 *
 332 *     This function does not cancel any asynchronous activity arising
 333 *     out of elevator or throttling code. That would require elevator_exit()
 334 *     and blkcg_exit_queue() to be called with queue lock initialized.
 335 *
 336 */
 337void blk_sync_queue(struct request_queue *q)
 338{
 339        del_timer_sync(&q->timeout);
 340        cancel_work_sync(&q->timeout_work);
 341
 342        if (q->mq_ops) {
 343                struct blk_mq_hw_ctx *hctx;
 344                int i;
 345
 346                cancel_delayed_work_sync(&q->requeue_work);
 347                queue_for_each_hw_ctx(q, hctx, i)
 348                        cancel_delayed_work_sync(&hctx->run_work);
 349        } else {
 350                cancel_delayed_work_sync(&q->delay_work);
 351        }
 352}
 353EXPORT_SYMBOL(blk_sync_queue);
 354
 355/**
 356 * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
 357 * @q: request queue pointer
 358 *
 359 * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
 360 * set and 1 if the flag was already set.
 361 */
 362int blk_set_preempt_only(struct request_queue *q)
 363{
 364        unsigned long flags;
 365        int res;
 366
 367        spin_lock_irqsave(q->queue_lock, flags);
 368        res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
 369        spin_unlock_irqrestore(q->queue_lock, flags);
 370
 371        return res;
 372}
 373EXPORT_SYMBOL_GPL(blk_set_preempt_only);
 374
 375void blk_clear_preempt_only(struct request_queue *q)
 376{
 377        unsigned long flags;
 378
 379        spin_lock_irqsave(q->queue_lock, flags);
 380        queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
 381        wake_up_all(&q->mq_freeze_wq);
 382        spin_unlock_irqrestore(q->queue_lock, flags);
 383}
 384EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
 385
 386/**
 387 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
 388 * @q:  The queue to run
 389 *
 390 * Description:
 391 *    Invoke request handling on a queue if there are any pending requests.
 392 *    May be used to restart request handling after a request has completed.
 393 *    This variant runs the queue whether or not the queue has been
 394 *    stopped. Must be called with the queue lock held and interrupts
 395 *    disabled. See also @blk_run_queue.
 396 */
 397inline void __blk_run_queue_uncond(struct request_queue *q)
 398{
 399        lockdep_assert_held(q->queue_lock);
 400        WARN_ON_ONCE(q->mq_ops);
 401
 402        if (unlikely(blk_queue_dead(q)))
 403                return;
 404
 405        /*
 406         * Some request_fn implementations, e.g. scsi_request_fn(), unlock
 407         * the queue lock internally. As a result multiple threads may be
 408         * running such a request function concurrently. Keep track of the
 409         * number of active request_fn invocations such that blk_drain_queue()
 410         * can wait until all these request_fn calls have finished.
 411         */
 412        q->request_fn_active++;
 413        q->request_fn(q);
 414        q->request_fn_active--;
 415}
 416EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
 417
 418/**
 419 * __blk_run_queue - run a single device queue
 420 * @q:  The queue to run
 421 *
 422 * Description:
 423 *    See @blk_run_queue.
 424 */
 425void __blk_run_queue(struct request_queue *q)
 426{
 427        lockdep_assert_held(q->queue_lock);
 428        WARN_ON_ONCE(q->mq_ops);
 429
 430        if (unlikely(blk_queue_stopped(q)))
 431                return;
 432
 433        __blk_run_queue_uncond(q);
 434}
 435EXPORT_SYMBOL(__blk_run_queue);
 436
 437/**
 438 * blk_run_queue_async - run a single device queue in workqueue context
 439 * @q:  The queue to run
 440 *
 441 * Description:
 442 *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
 443 *    of us.
 444 *
 445 * Note:
 446 *    Since it is not allowed to run q->delay_work after blk_cleanup_queue()
 447 *    has canceled q->delay_work, callers must hold the queue lock to avoid
 448 *    race conditions between blk_cleanup_queue() and blk_run_queue_async().
 449 */
 450void blk_run_queue_async(struct request_queue *q)
 451{
 452        lockdep_assert_held(q->queue_lock);
 453        WARN_ON_ONCE(q->mq_ops);
 454
 455        if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
 456                mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 457}
 458EXPORT_SYMBOL(blk_run_queue_async);
 459
 460/**
 461 * blk_run_queue - run a single device queue
 462 * @q: The queue to run
 463 *
 464 * Description:
 465 *    Invoke request handling on this queue, if it has pending work to do.
 466 *    May be used to restart queueing when a request has completed.
 467 */
 468void blk_run_queue(struct request_queue *q)
 469{
 470        unsigned long flags;
 471
 472        WARN_ON_ONCE(q->mq_ops);
 473
 474        spin_lock_irqsave(q->queue_lock, flags);
 475        __blk_run_queue(q);
 476        spin_unlock_irqrestore(q->queue_lock, flags);
 477}
 478EXPORT_SYMBOL(blk_run_queue);
 479
 480void blk_put_queue(struct request_queue *q)
 481{
 482        kobject_put(&q->kobj);
 483}
 484EXPORT_SYMBOL(blk_put_queue);
 485
 486/**
 487 * __blk_drain_queue - drain requests from request_queue
 488 * @q: queue to drain
 489 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
 490 *
 491 * Drain requests from @q.  If @drain_all is set, all requests are drained.
 492 * If not, only ELVPRIV requests are drained.  The caller is responsible
 493 * for ensuring that no new requests which need to be drained are queued.
 494 */
 495static void __blk_drain_queue(struct request_queue *q, bool drain_all)
 496        __releases(q->queue_lock)
 497        __acquires(q->queue_lock)
 498{
 499        int i;
 500
 501        lockdep_assert_held(q->queue_lock);
 502        WARN_ON_ONCE(q->mq_ops);
 503
 504        while (true) {
 505                bool drain = false;
 506
 507                /*
 508                 * The caller might be trying to drain @q before its
 509                 * elevator is initialized.
 510                 */
 511                if (q->elevator)
 512                        elv_drain_elevator(q);
 513
 514                blkcg_drain_queue(q);
 515
 516                /*
 517                 * This function might be called on a queue which failed
 518                 * driver init after queue creation or is not yet fully
 519                 * active yet.  Some drivers (e.g. fd and loop) get unhappy
 520                 * in such cases.  Kick queue iff dispatch queue has
 521                 * something on it and @q has request_fn set.
 522                 */
 523                if (!list_empty(&q->queue_head) && q->request_fn)
 524                        __blk_run_queue(q);
 525
 526                drain |= q->nr_rqs_elvpriv;
 527                drain |= q->request_fn_active;
 528
 529                /*
 530                 * Unfortunately, requests are queued at and tracked from
 531                 * multiple places and there's no single counter which can
 532                 * be drained.  Check all the queues and counters.
 533                 */
 534                if (drain_all) {
 535                        struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
 536                        drain |= !list_empty(&q->queue_head);
 537                        for (i = 0; i < 2; i++) {
 538                                drain |= q->nr_rqs[i];
 539                                drain |= q->in_flight[i];
 540                                if (fq)
 541                                    drain |= !list_empty(&fq->flush_queue[i]);
 542                        }
 543                }
 544
 545                if (!drain)
 546                        break;
 547
 548                spin_unlock_irq(q->queue_lock);
 549
 550                msleep(10);
 551
 552                spin_lock_irq(q->queue_lock);
 553        }
 554
 555        /*
 556         * With queue marked dead, any woken up waiter will fail the
 557         * allocation path, so the wakeup chaining is lost and we're
 558         * left with hung waiters. We need to wake up those waiters.
 559         */
 560        if (q->request_fn) {
 561                struct request_list *rl;
 562
 563                blk_queue_for_each_rl(rl, q)
 564                        for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
 565                                wake_up_all(&rl->wait[i]);
 566        }
 567}
 568
 569void blk_drain_queue(struct request_queue *q)
 570{
 571        spin_lock_irq(q->queue_lock);
 572        __blk_drain_queue(q, true);
 573        spin_unlock_irq(q->queue_lock);
 574}
 575
 576/**
 577 * blk_queue_bypass_start - enter queue bypass mode
 578 * @q: queue of interest
 579 *
 580 * In bypass mode, only the dispatch FIFO queue of @q is used.  This
 581 * function makes @q enter bypass mode and drains all requests which were
 582 * throttled or issued before.  On return, it's guaranteed that no request
 583 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
 584 * inside queue or RCU read lock.
 585 */
 586void blk_queue_bypass_start(struct request_queue *q)
 587{
 588        WARN_ON_ONCE(q->mq_ops);
 589
 590        spin_lock_irq(q->queue_lock);
 591        q->bypass_depth++;
 592        queue_flag_set(QUEUE_FLAG_BYPASS, q);
 593        spin_unlock_irq(q->queue_lock);
 594
 595        /*
 596         * Queues start drained.  Skip actual draining till init is
 597         * complete.  This avoids lenghty delays during queue init which
 598         * can happen many times during boot.
 599         */
 600        if (blk_queue_init_done(q)) {
 601                spin_lock_irq(q->queue_lock);
 602                __blk_drain_queue(q, false);
 603                spin_unlock_irq(q->queue_lock);
 604
 605                /* ensure blk_queue_bypass() is %true inside RCU read lock */
 606                synchronize_rcu();
 607        }
 608}
 609EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
 610
 611/**
 612 * blk_queue_bypass_end - leave queue bypass mode
 613 * @q: queue of interest
 614 *
 615 * Leave bypass mode and restore the normal queueing behavior.
 616 *
 617 * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
 618 * this function is called for both blk-sq and blk-mq queues.
 619 */
 620void blk_queue_bypass_end(struct request_queue *q)
 621{
 622        spin_lock_irq(q->queue_lock);
 623        if (!--q->bypass_depth)
 624                queue_flag_clear(QUEUE_FLAG_BYPASS, q);
 625        WARN_ON_ONCE(q->bypass_depth < 0);
 626        spin_unlock_irq(q->queue_lock);
 627}
 628EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 629
 630void blk_set_queue_dying(struct request_queue *q)
 631{
 632        spin_lock_irq(q->queue_lock);
 633        queue_flag_set(QUEUE_FLAG_DYING, q);
 634        spin_unlock_irq(q->queue_lock);
 635
 636        /*
 637         * When queue DYING flag is set, we need to block new req
 638         * entering queue, so we call blk_freeze_queue_start() to
 639         * prevent I/O from crossing blk_queue_enter().
 640         */
 641        blk_freeze_queue_start(q);
 642
 643        if (q->mq_ops)
 644                blk_mq_wake_waiters(q);
 645        else {
 646                struct request_list *rl;
 647
 648                spin_lock_irq(q->queue_lock);
 649                blk_queue_for_each_rl(rl, q) {
 650                        if (rl->rq_pool) {
 651                                wake_up_all(&rl->wait[BLK_RW_SYNC]);
 652                                wake_up_all(&rl->wait[BLK_RW_ASYNC]);
 653                        }
 654                }
 655                spin_unlock_irq(q->queue_lock);
 656        }
 657
 658        /* Make blk_queue_enter() reexamine the DYING flag. */
 659        wake_up_all(&q->mq_freeze_wq);
 660}
 661EXPORT_SYMBOL_GPL(blk_set_queue_dying);
 662
 663/**
 664 * blk_cleanup_queue - shutdown a request queue
 665 * @q: request queue to shutdown
 666 *
 667 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 668 * put it.  All future requests will be failed immediately with -ENODEV.
 669 */
 670void blk_cleanup_queue(struct request_queue *q)
 671{
 672        spinlock_t *lock = q->queue_lock;
 673
 674        /* mark @q DYING, no new request or merges will be allowed afterwards */
 675        mutex_lock(&q->sysfs_lock);
 676        blk_set_queue_dying(q);
 677        spin_lock_irq(lock);
 678
 679        /*
 680         * A dying queue is permanently in bypass mode till released.  Note
 681         * that, unlike blk_queue_bypass_start(), we aren't performing
 682         * synchronize_rcu() after entering bypass mode to avoid the delay
 683         * as some drivers create and destroy a lot of queues while
 684         * probing.  This is still safe because blk_release_queue() will be
 685         * called only after the queue refcnt drops to zero and nothing,
 686         * RCU or not, would be traversing the queue by then.
 687         */
 688        q->bypass_depth++;
 689        queue_flag_set(QUEUE_FLAG_BYPASS, q);
 690
 691        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 692        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 693        queue_flag_set(QUEUE_FLAG_DYING, q);
 694        spin_unlock_irq(lock);
 695        mutex_unlock(&q->sysfs_lock);
 696
 697        /*
 698         * Drain all requests queued before DYING marking. Set DEAD flag to
 699         * prevent that q->request_fn() gets invoked after draining finished.
 700         */
 701        blk_freeze_queue(q);
 702        spin_lock_irq(lock);
 703        queue_flag_set(QUEUE_FLAG_DEAD, q);
 704        spin_unlock_irq(lock);
 705
 706        /*
 707         * make sure all in-progress dispatch are completed because
 708         * blk_freeze_queue() can only complete all requests, and
 709         * dispatch may still be in-progress since we dispatch requests
 710         * from more than one contexts
 711         */
 712        if (q->mq_ops)
 713                blk_mq_quiesce_queue(q);
 714
 715        /* for synchronous bio-based driver finish in-flight integrity i/o */
 716        blk_flush_integrity();
 717
 718        /* @q won't process any more request, flush async actions */
 719        del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
 720        blk_sync_queue(q);
 721
 722        if (q->mq_ops)
 723                blk_mq_free_queue(q);
 724        percpu_ref_exit(&q->q_usage_counter);
 725
 726        spin_lock_irq(lock);
 727        if (q->queue_lock != &q->__queue_lock)
 728                q->queue_lock = &q->__queue_lock;
 729        spin_unlock_irq(lock);
 730
 731        /* @q is and will stay empty, shutdown and put */
 732        blk_put_queue(q);
 733}
 734EXPORT_SYMBOL(blk_cleanup_queue);
 735
 736/* Allocate memory local to the request queue */
 737static void *alloc_request_simple(gfp_t gfp_mask, void *data)
 738{
 739        struct request_queue *q = data;
 740
 741        return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
 742}
 743
 744static void free_request_simple(void *element, void *data)
 745{
 746        kmem_cache_free(request_cachep, element);
 747}
 748
 749static void *alloc_request_size(gfp_t gfp_mask, void *data)
 750{
 751        struct request_queue *q = data;
 752        struct request *rq;
 753
 754        rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
 755                        q->node);
 756        if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
 757                kfree(rq);
 758                rq = NULL;
 759        }
 760        return rq;
 761}
 762
 763static void free_request_size(void *element, void *data)
 764{
 765        struct request_queue *q = data;
 766
 767        if (q->exit_rq_fn)
 768                q->exit_rq_fn(q, element);
 769        kfree(element);
 770}
 771
 772int blk_init_rl(struct request_list *rl, struct request_queue *q,
 773                gfp_t gfp_mask)
 774{
 775        if (unlikely(rl->rq_pool) || q->mq_ops)
 776                return 0;
 777
 778        rl->q = q;
 779        rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
 780        rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
 781        init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
 782        init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
 783
 784        if (q->cmd_size) {
 785                rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
 786                                alloc_request_size, free_request_size,
 787                                q, gfp_mask, q->node);
 788        } else {
 789                rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
 790                                alloc_request_simple, free_request_simple,
 791                                q, gfp_mask, q->node);
 792        }
 793        if (!rl->rq_pool)
 794                return -ENOMEM;
 795
 796        if (rl != &q->root_rl)
 797                WARN_ON_ONCE(!blk_get_queue(q));
 798
 799        return 0;
 800}
 801
 802void blk_exit_rl(struct request_queue *q, struct request_list *rl)
 803{
 804        if (rl->rq_pool) {
 805                mempool_destroy(rl->rq_pool);
 806                if (rl != &q->root_rl)
 807                        blk_put_queue(q);
 808        }
 809}
 810
 811struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 812{
 813        return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
 814}
 815EXPORT_SYMBOL(blk_alloc_queue);
 816
 817/**
 818 * blk_queue_enter() - try to increase q->q_usage_counter
 819 * @q: request queue pointer
 820 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
 821 */
 822int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 823{
 824        const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
 825
 826        while (true) {
 827                bool success = false;
 828                int ret;
 829
 830                rcu_read_lock_sched();
 831                if (percpu_ref_tryget_live(&q->q_usage_counter)) {
 832                        /*
 833                         * The code that sets the PREEMPT_ONLY flag is
 834                         * responsible for ensuring that that flag is globally
 835                         * visible before the queue is unfrozen.
 836                         */
 837                        if (preempt || !blk_queue_preempt_only(q)) {
 838                                success = true;
 839                        } else {
 840                                percpu_ref_put(&q->q_usage_counter);
 841                        }
 842                }
 843                rcu_read_unlock_sched();
 844
 845                if (success)
 846                        return 0;
 847
 848                if (flags & BLK_MQ_REQ_NOWAIT)
 849                        return -EBUSY;
 850
 851                /*
 852                 * read pair of barrier in blk_freeze_queue_start(),
 853                 * we need to order reading __PERCPU_REF_DEAD flag of
 854                 * .q_usage_counter and reading .mq_freeze_depth or
 855                 * queue dying flag, otherwise the following wait may
 856                 * never return if the two reads are reordered.
 857                 */
 858                smp_rmb();
 859
 860                ret = wait_event_interruptible(q->mq_freeze_wq,
 861                                (atomic_read(&q->mq_freeze_depth) == 0 &&
 862                                 (preempt || !blk_queue_preempt_only(q))) ||
 863                                blk_queue_dying(q));
 864                if (blk_queue_dying(q))
 865                        return -ENODEV;
 866                if (ret)
 867                        return ret;
 868        }
 869}
 870
 871void blk_queue_exit(struct request_queue *q)
 872{
 873        percpu_ref_put(&q->q_usage_counter);
 874}
 875
 876static void blk_queue_usage_counter_release(struct percpu_ref *ref)
 877{
 878        struct request_queue *q =
 879                container_of(ref, struct request_queue, q_usage_counter);
 880
 881        wake_up_all(&q->mq_freeze_wq);
 882}
 883
 884static void blk_rq_timed_out_timer(struct timer_list *t)
 885{
 886        struct request_queue *q = from_timer(q, t, timeout);
 887
 888        kblockd_schedule_work(&q->timeout_work);
 889}
 890
 891struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 892{
 893        struct request_queue *q;
 894
 895        q = kmem_cache_alloc_node(blk_requestq_cachep,
 896                                gfp_mask | __GFP_ZERO, node_id);
 897        if (!q)
 898                return NULL;
 899
 900        q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
 901        if (q->id < 0)
 902                goto fail_q;
 903
 904        q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 905        if (!q->bio_split)
 906                goto fail_id;
 907
 908        q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
 909        if (!q->backing_dev_info)
 910                goto fail_split;
 911
 912        q->stats = blk_alloc_queue_stats();
 913        if (!q->stats)
 914                goto fail_stats;
 915
 916        q->backing_dev_info->ra_pages =
 917                        (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
 918        q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
 919        q->backing_dev_info->name = "block";
 920        q->node = node_id;
 921
 922        timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
 923                    laptop_mode_timer_fn, 0);
 924        timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
 925        INIT_WORK(&q->timeout_work, NULL);
 926        INIT_LIST_HEAD(&q->queue_head);
 927        INIT_LIST_HEAD(&q->timeout_list);
 928        INIT_LIST_HEAD(&q->icq_list);
 929#ifdef CONFIG_BLK_CGROUP
 930        INIT_LIST_HEAD(&q->blkg_list);
 931#endif
 932        INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
 933
 934        kobject_init(&q->kobj, &blk_queue_ktype);
 935
 936#ifdef CONFIG_BLK_DEV_IO_TRACE
 937        mutex_init(&q->blk_trace_mutex);
 938#endif
 939        mutex_init(&q->sysfs_lock);
 940        spin_lock_init(&q->__queue_lock);
 941
 942        /*
 943         * By default initialize queue_lock to internal lock and driver can
 944         * override it later if need be.
 945         */
 946        q->queue_lock = &q->__queue_lock;
 947
 948        /*
 949         * A queue starts its life with bypass turned on to avoid
 950         * unnecessary bypass on/off overhead and nasty surprises during
 951         * init.  The initial bypass will be finished when the queue is
 952         * registered by blk_register_queue().
 953         */
 954        q->bypass_depth = 1;
 955        __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
 956
 957        init_waitqueue_head(&q->mq_freeze_wq);
 958
 959        /*
 960         * Init percpu_ref in atomic mode so that it's faster to shutdown.
 961         * See blk_register_queue() for details.
 962         */
 963        if (percpu_ref_init(&q->q_usage_counter,
 964                                blk_queue_usage_counter_release,
 965                                PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
 966                goto fail_bdi;
 967
 968        if (blkcg_init_queue(q))
 969                goto fail_ref;
 970
 971        return q;
 972
 973fail_ref:
 974        percpu_ref_exit(&q->q_usage_counter);
 975fail_bdi:
 976        blk_free_queue_stats(q->stats);
 977fail_stats:
 978        bdi_put(q->backing_dev_info);
 979fail_split:
 980        bioset_free(q->bio_split);
 981fail_id:
 982        ida_simple_remove(&blk_queue_ida, q->id);
 983fail_q:
 984        kmem_cache_free(blk_requestq_cachep, q);
 985        return NULL;
 986}
 987EXPORT_SYMBOL(blk_alloc_queue_node);
 988
 989/**
 990 * blk_init_queue  - prepare a request queue for use with a block device
 991 * @rfn:  The function to be called to process requests that have been
 992 *        placed on the queue.
 993 * @lock: Request queue spin lock
 994 *
 995 * Description:
 996 *    If a block device wishes to use the standard request handling procedures,
 997 *    which sorts requests and coalesces adjacent requests, then it must
 998 *    call blk_init_queue().  The function @rfn will be called when there
 999 *    are requests on the queue that need to be processed.  If the device
1000 *    supports plugging, then @rfn may not be called immediately when requests
1001 *    are available on the queue, but may be called at some time later instead.
1002 *    Plugged queues are generally unplugged when a buffer belonging to one
1003 *    of the requests on the queue is needed, or due to memory pressure.
1004 *
1005 *    @rfn is not required, or even expected, to remove all requests off the
1006 *    queue, but only as many as it can handle at a time.  If it does leave
1007 *    requests on the queue, it is responsible for arranging that the requests
1008 *    get dealt with eventually.
1009 *
1010 *    The queue spin lock must be held while manipulating the requests on the
1011 *    request queue; this lock will be taken also from interrupt context, so irq
1012 *    disabling is needed for it.
1013 *
1014 *    Function returns a pointer to the initialized request queue, or %NULL if
1015 *    it didn't succeed.
1016 *
1017 * Note:
1018 *    blk_init_queue() must be paired with a blk_cleanup_queue() call
1019 *    when the block device is deactivated (such as at module unload).
1020 **/
1021
1022struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1023{
1024        return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
1025}
1026EXPORT_SYMBOL(blk_init_queue);
1027
1028struct request_queue *
1029blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1030{
1031        struct request_queue *q;
1032
1033        q = blk_alloc_queue_node(GFP_KERNEL, node_id);
1034        if (!q)
1035                return NULL;
1036
1037        q->request_fn = rfn;
1038        if (lock)
1039                q->queue_lock = lock;
1040        if (blk_init_allocated_queue(q) < 0) {
1041                blk_cleanup_queue(q);
1042                return NULL;
1043        }
1044
1045        return q;
1046}
1047EXPORT_SYMBOL(blk_init_queue_node);
1048
1049static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
1050
1051
1052int blk_init_allocated_queue(struct request_queue *q)
1053{
1054        WARN_ON_ONCE(q->mq_ops);
1055
1056        q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
1057        if (!q->fq)
1058                return -ENOMEM;
1059
1060        if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
1061                goto out_free_flush_queue;
1062
1063        if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
1064                goto out_exit_flush_rq;
1065
1066        INIT_WORK(&q->timeout_work, blk_timeout_work);
1067        q->queue_flags          |= QUEUE_FLAG_DEFAULT;
1068
1069        /*
1070         * This also sets hw/phys segments, boundary and size
1071         */
1072        blk_queue_make_request(q, blk_queue_bio);
1073
1074        q->sg_reserved_size = INT_MAX;
1075
1076        /* Protect q->elevator from elevator_change */
1077        mutex_lock(&q->sysfs_lock);
1078
1079        /* init elevator */
1080        if (elevator_init(q, NULL)) {
1081                mutex_unlock(&q->sysfs_lock);
1082                goto out_exit_flush_rq;
1083        }
1084
1085        mutex_unlock(&q->sysfs_lock);
1086        return 0;
1087
1088out_exit_flush_rq:
1089        if (q->exit_rq_fn)
1090                q->exit_rq_fn(q, q->fq->flush_rq);
1091out_free_flush_queue:
1092        blk_free_flush_queue(q->fq);
1093        return -ENOMEM;
1094}
1095EXPORT_SYMBOL(blk_init_allocated_queue);
1096
1097bool blk_get_queue(struct request_queue *q)
1098{
1099        if (likely(!blk_queue_dying(q))) {
1100                __blk_get_queue(q);
1101                return true;
1102        }
1103
1104        return false;
1105}
1106EXPORT_SYMBOL(blk_get_queue);
1107
1108static inline void blk_free_request(struct request_list *rl, struct request *rq)
1109{
1110        if (rq->rq_flags & RQF_ELVPRIV) {
1111                elv_put_request(rl->q, rq);
1112                if (rq->elv.icq)
1113                        put_io_context(rq->elv.icq->ioc);
1114        }
1115
1116        mempool_free(rq, rl->rq_pool);
1117}
1118
1119/*
1120 * ioc_batching returns true if the ioc is a valid batching request and
1121 * should be given priority access to a request.
1122 */
1123static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1124{
1125        if (!ioc)
1126                return 0;
1127
1128        /*
1129         * Make sure the process is able to allocate at least 1 request
1130         * even if the batch times out, otherwise we could theoretically
1131         * lose wakeups.
1132         */
1133        return ioc->nr_batch_requests == q->nr_batching ||
1134                (ioc->nr_batch_requests > 0
1135                && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
1136}
1137
1138/*
1139 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
1140 * will cause the process to be a "batcher" on all queues in the system. This
1141 * is the behaviour we want though - once it gets a wakeup it should be given
1142 * a nice run.
1143 */
1144static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1145{
1146        if (!ioc || ioc_batching(q, ioc))
1147                return;
1148
1149        ioc->nr_batch_requests = q->nr_batching;
1150        ioc->last_waited = jiffies;
1151}
1152
1153static void __freed_request(struct request_list *rl, int sync)
1154{
1155        struct request_queue *q = rl->q;
1156
1157        if (rl->count[sync] < queue_congestion_off_threshold(q))
1158                blk_clear_congested(rl, sync);
1159
1160        if (rl->count[sync] + 1 <= q->nr_requests) {
1161                if (waitqueue_active(&rl->wait[sync]))
1162                        wake_up(&rl->wait[sync]);
1163
1164                blk_clear_rl_full(rl, sync);
1165        }
1166}
1167
1168/*
1169 * A request has just been released.  Account for it, update the full and
1170 * congestion status, wake up any waiters.   Called under q->queue_lock.
1171 */
1172static void freed_request(struct request_list *rl, bool sync,
1173                req_flags_t rq_flags)
1174{
1175        struct request_queue *q = rl->q;
1176
1177        q->nr_rqs[sync]--;
1178        rl->count[sync]--;
1179        if (rq_flags & RQF_ELVPRIV)
1180                q->nr_rqs_elvpriv--;
1181
1182        __freed_request(rl, sync);
1183
1184        if (unlikely(rl->starved[sync ^ 1]))
1185                __freed_request(rl, sync ^ 1);
1186}
1187
1188int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
1189{
1190        struct request_list *rl;
1191        int on_thresh, off_thresh;
1192
1193        WARN_ON_ONCE(q->mq_ops);
1194
1195        spin_lock_irq(q->queue_lock);
1196        q->nr_requests = nr;
1197        blk_queue_congestion_threshold(q);
1198        on_thresh = queue_congestion_on_threshold(q);
1199        off_thresh = queue_congestion_off_threshold(q);
1200
1201        blk_queue_for_each_rl(rl, q) {
1202                if (rl->count[BLK_RW_SYNC] >= on_thresh)
1203                        blk_set_congested(rl, BLK_RW_SYNC);
1204                else if (rl->count[BLK_RW_SYNC] < off_thresh)
1205                        blk_clear_congested(rl, BLK_RW_SYNC);
1206
1207                if (rl->count[BLK_RW_ASYNC] >= on_thresh)
1208                        blk_set_congested(rl, BLK_RW_ASYNC);
1209                else if (rl->count[BLK_RW_ASYNC] < off_thresh)
1210                        blk_clear_congested(rl, BLK_RW_ASYNC);
1211
1212                if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
1213                        blk_set_rl_full(rl, BLK_RW_SYNC);
1214                } else {
1215                        blk_clear_rl_full(rl, BLK_RW_SYNC);
1216                        wake_up(&rl->wait[BLK_RW_SYNC]);
1217                }
1218
1219                if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
1220                        blk_set_rl_full(rl, BLK_RW_ASYNC);
1221                } else {
1222                        blk_clear_rl_full(rl, BLK_RW_ASYNC);
1223                        wake_up(&rl->wait[BLK_RW_ASYNC]);
1224                }
1225        }
1226
1227        spin_unlock_irq(q->queue_lock);
1228        return 0;
1229}
1230
1231/**
1232 * __get_request - get a free request
1233 * @rl: request list to allocate from
1234 * @op: operation and flags
1235 * @bio: bio to allocate request for (can be %NULL)
1236 * @flags: BLQ_MQ_REQ_* flags
1237 *
1238 * Get a free request from @q.  This function may fail under memory
1239 * pressure or if @q is dead.
1240 *
1241 * Must be called with @q->queue_lock held and,
1242 * Returns ERR_PTR on failure, with @q->queue_lock held.
1243 * Returns request pointer on success, with @q->queue_lock *not held*.
1244 */
1245static struct request *__get_request(struct request_list *rl, unsigned int op,
1246                                     struct bio *bio, blk_mq_req_flags_t flags)
1247{
1248        struct request_queue *q = rl->q;
1249        struct request *rq;
1250        struct elevator_type *et = q->elevator->type;
1251        struct io_context *ioc = rq_ioc(bio);
1252        struct io_cq *icq = NULL;
1253        const bool is_sync = op_is_sync(op);
1254        int may_queue;
1255        gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
1256                         __GFP_DIRECT_RECLAIM;
1257        req_flags_t rq_flags = RQF_ALLOCED;
1258
1259        lockdep_assert_held(q->queue_lock);
1260
1261        if (unlikely(blk_queue_dying(q)))
1262                return ERR_PTR(-ENODEV);
1263
1264        may_queue = elv_may_queue(q, op);
1265        if (may_queue == ELV_MQUEUE_NO)
1266                goto rq_starved;
1267
1268        if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
1269                if (rl->count[is_sync]+1 >= q->nr_requests) {
1270                        /*
1271                         * The queue will fill after this allocation, so set
1272                         * it as full, and mark this process as "batching".
1273                         * This process will be allowed to complete a batch of
1274                         * requests, others will be blocked.
1275                         */
1276                        if (!blk_rl_full(rl, is_sync)) {
1277                                ioc_set_batching(q, ioc);
1278                                blk_set_rl_full(rl, is_sync);
1279                        } else {
1280                                if (may_queue != ELV_MQUEUE_MUST
1281                                                && !ioc_batching(q, ioc)) {
1282                                        /*
1283                                         * The queue is full and the allocating
1284                                         * process is not a "batcher", and not
1285                                         * exempted by the IO scheduler
1286                                         */
1287                                        return ERR_PTR(-ENOMEM);
1288                                }
1289                        }
1290                }
1291                blk_set_congested(rl, is_sync);
1292        }
1293
1294        /*
1295         * Only allow batching queuers to allocate up to 50% over the defined
1296         * limit of requests, otherwise we could have thousands of requests
1297         * allocated with any setting of ->nr_requests
1298         */
1299        if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
1300                return ERR_PTR(-ENOMEM);
1301
1302        q->nr_rqs[is_sync]++;
1303        rl->count[is_sync]++;
1304        rl->starved[is_sync] = 0;
1305
1306        /*
1307         * Decide whether the new request will be managed by elevator.  If
1308         * so, mark @rq_flags and increment elvpriv.  Non-zero elvpriv will
1309         * prevent the current elevator from being destroyed until the new
1310         * request is freed.  This guarantees icq's won't be destroyed and
1311         * makes creating new ones safe.
1312         *
1313         * Flush requests do not use the elevator so skip initialization.
1314         * This allows a request to share the flush and elevator data.
1315         *
1316         * Also, lookup icq while holding queue_lock.  If it doesn't exist,
1317         * it will be created after releasing queue_lock.
1318         */
1319        if (!op_is_flush(op) && !blk_queue_bypass(q)) {
1320                rq_flags |= RQF_ELVPRIV;
1321                q->nr_rqs_elvpriv++;
1322                if (et->icq_cache && ioc)
1323                        icq = ioc_lookup_icq(ioc, q);
1324        }
1325
1326        if (blk_queue_io_stat(q))
1327                rq_flags |= RQF_IO_STAT;
1328        spin_unlock_irq(q->queue_lock);
1329
1330        /* allocate and init request */
1331        rq = mempool_alloc(rl->rq_pool, gfp_mask);
1332        if (!rq)
1333                goto fail_alloc;
1334
1335        blk_rq_init(q, rq);
1336        blk_rq_set_rl(rq, rl);
1337        rq->cmd_flags = op;
1338        rq->rq_flags = rq_flags;
1339        if (flags & BLK_MQ_REQ_PREEMPT)
1340                rq->rq_flags |= RQF_PREEMPT;
1341
1342        /* init elvpriv */
1343        if (rq_flags & RQF_ELVPRIV) {
1344                if (unlikely(et->icq_cache && !icq)) {
1345                        if (ioc)
1346                                icq = ioc_create_icq(ioc, q, gfp_mask);
1347                        if (!icq)
1348                                goto fail_elvpriv;
1349                }
1350
1351                rq->elv.icq = icq;
1352                if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
1353                        goto fail_elvpriv;
1354
1355                /* @rq->elv.icq holds io_context until @rq is freed */
1356                if (icq)
1357                        get_io_context(icq->ioc);
1358        }
1359out:
1360        /*
1361         * ioc may be NULL here, and ioc_batching will be false. That's
1362         * OK, if the queue is under the request limit then requests need
1363         * not count toward the nr_batch_requests limit. There will always
1364         * be some limit enforced by BLK_BATCH_TIME.
1365         */
1366        if (ioc_batching(q, ioc))
1367                ioc->nr_batch_requests--;
1368
1369        trace_block_getrq(q, bio, op);
1370        return rq;
1371
1372fail_elvpriv:
1373        /*
1374         * elvpriv init failed.  ioc, icq and elvpriv aren't mempool backed
1375         * and may fail indefinitely under memory pressure and thus
1376         * shouldn't stall IO.  Treat this request as !elvpriv.  This will
1377         * disturb iosched and blkcg but weird is bettern than dead.
1378         */
1379        printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
1380                           __func__, dev_name(q->backing_dev_info->dev));
1381
1382        rq->rq_flags &= ~RQF_ELVPRIV;
1383        rq->elv.icq = NULL;
1384
1385        spin_lock_irq(q->queue_lock);
1386        q->nr_rqs_elvpriv--;
1387        spin_unlock_irq(q->queue_lock);
1388        goto out;
1389
1390fail_alloc:
1391        /*
1392         * Allocation failed presumably due to memory. Undo anything we
1393         * might have messed up.
1394         *
1395         * Allocating task should really be put onto the front of the wait
1396         * queue, but this is pretty rare.
1397         */
1398        spin_lock_irq(q->queue_lock);
1399        freed_request(rl, is_sync, rq_flags);
1400
1401        /*
1402         * in the very unlikely event that allocation failed and no
1403         * requests for this direction was pending, mark us starved so that
1404         * freeing of a request in the other direction will notice
1405         * us. another possible fix would be to split the rq mempool into
1406         * READ and WRITE
1407         */
1408rq_starved:
1409        if (unlikely(rl->count[is_sync] == 0))
1410                rl->starved[is_sync] = 1;
1411        return ERR_PTR(-ENOMEM);
1412}
1413
1414/**
1415 * get_request - get a free request
1416 * @q: request_queue to allocate request from
1417 * @op: operation and flags
1418 * @bio: bio to allocate request for (can be %NULL)
1419 * @flags: BLK_MQ_REQ_* flags.
1420 *
1421 * Get a free request from @q.  If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
1422 * this function keeps retrying under memory pressure and fails iff @q is dead.
1423 *
1424 * Must be called with @q->queue_lock held and,
1425 * Returns ERR_PTR on failure, with @q->queue_lock held.
1426 * Returns request pointer on success, with @q->queue_lock *not held*.
1427 */
1428static struct request *get_request(struct request_queue *q, unsigned int op,
1429                                   struct bio *bio, blk_mq_req_flags_t flags)
1430{
1431        const bool is_sync = op_is_sync(op);
1432        DEFINE_WAIT(wait);
1433        struct request_list *rl;
1434        struct request *rq;
1435
1436        lockdep_assert_held(q->queue_lock);
1437        WARN_ON_ONCE(q->mq_ops);
1438
1439        rl = blk_get_rl(q, bio);        /* transferred to @rq on success */
1440retry:
1441        rq = __get_request(rl, op, bio, flags);
1442        if (!IS_ERR(rq))
1443                return rq;
1444
1445        if (op & REQ_NOWAIT) {
1446                blk_put_rl(rl);
1447                return ERR_PTR(-EAGAIN);
1448        }
1449
1450        if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) {
1451                blk_put_rl(rl);
1452                return rq;
1453        }
1454
1455        /* wait on @rl and retry */
1456        prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1457                                  TASK_UNINTERRUPTIBLE);
1458
1459        trace_block_sleeprq(q, bio, op);
1460
1461        spin_unlock_irq(q->queue_lock);
1462        io_schedule();
1463
1464        /*
1465         * After sleeping, we become a "batching" process and will be able
1466         * to allocate at least one request, and up to a big batch of them
1467         * for a small period time.  See ioc_batching, ioc_set_batching
1468         */
1469        ioc_set_batching(q, current->io_context);
1470
1471        spin_lock_irq(q->queue_lock);
1472        finish_wait(&rl->wait[is_sync], &wait);
1473
1474        goto retry;
1475}
1476
1477/* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
1478static struct request *blk_old_get_request(struct request_queue *q,
1479                                unsigned int op, blk_mq_req_flags_t flags)
1480{
1481        struct request *rq;
1482        gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
1483                         __GFP_DIRECT_RECLAIM;
1484        int ret = 0;
1485
1486        WARN_ON_ONCE(q->mq_ops);
1487
1488        /* create ioc upfront */
1489        create_io_context(gfp_mask, q->node);
1490
1491        ret = blk_queue_enter(q, flags);
1492        if (ret)
1493                return ERR_PTR(ret);
1494        spin_lock_irq(q->queue_lock);
1495        rq = get_request(q, op, NULL, flags);
1496        if (IS_ERR(rq)) {
1497                spin_unlock_irq(q->queue_lock);
1498                blk_queue_exit(q);
1499                return rq;
1500        }
1501
1502        /* q->queue_lock is unlocked at this point */
1503        rq->__data_len = 0;
1504        rq->__sector = (sector_t) -1;
1505        rq->bio = rq->biotail = NULL;
1506        return rq;
1507}
1508
1509/**
1510 * blk_get_request_flags - allocate a request
1511 * @q: request queue to allocate a request for
1512 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
1513 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
1514 */
1515struct request *blk_get_request_flags(struct request_queue *q, unsigned int op,
1516                                      blk_mq_req_flags_t flags)
1517{
1518        struct request *req;
1519
1520        WARN_ON_ONCE(op & REQ_NOWAIT);
1521        WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
1522
1523        if (q->mq_ops) {
1524                req = blk_mq_alloc_request(q, op, flags);
1525                if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
1526                        q->mq_ops->initialize_rq_fn(req);
1527        } else {
1528                req = blk_old_get_request(q, op, flags);
1529                if (!IS_ERR(req) && q->initialize_rq_fn)
1530                        q->initialize_rq_fn(req);
1531        }
1532
1533        return req;
1534}
1535EXPORT_SYMBOL(blk_get_request_flags);
1536
1537struct request *blk_get_request(struct request_queue *q, unsigned int op,
1538                                gfp_t gfp_mask)
1539{
1540        return blk_get_request_flags(q, op, gfp_mask & __GFP_DIRECT_RECLAIM ?
1541                                     0 : BLK_MQ_REQ_NOWAIT);
1542}
1543EXPORT_SYMBOL(blk_get_request);
1544
1545/**
1546 * blk_requeue_request - put a request back on queue
1547 * @q:          request queue where request should be inserted
1548 * @rq:         request to be inserted
1549 *
1550 * Description:
1551 *    Drivers often keep queueing requests until the hardware cannot accept
1552 *    more, when that condition happens we need to put the request back
1553 *    on the queue. Must be called with queue lock held.
1554 */
1555void blk_requeue_request(struct request_queue *q, struct request *rq)
1556{
1557        lockdep_assert_held(q->queue_lock);
1558        WARN_ON_ONCE(q->mq_ops);
1559
1560        blk_delete_timer(rq);
1561        blk_clear_rq_complete(rq);
1562        trace_block_rq_requeue(q, rq);
1563        wbt_requeue(q->rq_wb, &rq->issue_stat);
1564
1565        if (rq->rq_flags & RQF_QUEUED)
1566                blk_queue_end_tag(q, rq);
1567
1568        BUG_ON(blk_queued_rq(rq));
1569
1570        elv_requeue_request(q, rq);
1571}
1572EXPORT_SYMBOL(blk_requeue_request);
1573
1574static void add_acct_request(struct request_queue *q, struct request *rq,
1575                             int where)
1576{
1577        blk_account_io_start(rq, true);
1578        __elv_add_request(q, rq, where);
1579}
1580
1581static void part_round_stats_single(struct request_queue *q, int cpu,
1582                                    struct hd_struct *part, unsigned long now,
1583                                    unsigned int inflight)
1584{
1585        if (inflight) {
1586                __part_stat_add(cpu, part, time_in_queue,
1587                                inflight * (now - part->stamp));
1588                __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1589        }
1590        part->stamp = now;
1591}
1592
1593/**
1594 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1595 * @q: target block queue
1596 * @cpu: cpu number for stats access
1597 * @part: target partition
1598 *
1599 * The average IO queue length and utilisation statistics are maintained
1600 * by observing the current state of the queue length and the amount of
1601 * time it has been in this state for.
1602 *
1603 * Normally, that accounting is done on IO completion, but that can result
1604 * in more than a second's worth of IO being accounted for within any one
1605 * second, leading to >100% utilisation.  To deal with that, we call this
1606 * function to do a round-off before returning the results when reading
1607 * /proc/diskstats.  This accounts immediately for all queue usage up to
1608 * the current jiffies and restarts the counters again.
1609 */
1610void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
1611{
1612        struct hd_struct *part2 = NULL;
1613        unsigned long now = jiffies;
1614        unsigned int inflight[2];
1615        int stats = 0;
1616
1617        if (part->stamp != now)
1618                stats |= 1;
1619
1620        if (part->partno) {
1621                part2 = &part_to_disk(part)->part0;
1622                if (part2->stamp != now)
1623                        stats |= 2;
1624        }
1625
1626        if (!stats)
1627                return;
1628
1629        part_in_flight(q, part, inflight);
1630
1631        if (stats & 2)
1632                part_round_stats_single(q, cpu, part2, now, inflight[1]);
1633        if (stats & 1)
1634                part_round_stats_single(q, cpu, part, now, inflight[0]);
1635}
1636EXPORT_SYMBOL_GPL(part_round_stats);
1637
1638#ifdef CONFIG_PM
1639static void blk_pm_put_request(struct request *rq)
1640{
1641        if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
1642                pm_runtime_mark_last_busy(rq->q->dev);
1643}
1644#else
1645static inline void blk_pm_put_request(struct request *rq) {}
1646#endif
1647
1648void __blk_put_request(struct request_queue *q, struct request *req)
1649{
1650        req_flags_t rq_flags = req->rq_flags;
1651
1652        if (unlikely(!q))
1653                return;
1654
1655        if (q->mq_ops) {
1656                blk_mq_free_request(req);
1657                return;
1658        }
1659
1660        lockdep_assert_held(q->queue_lock);
1661
1662        blk_req_zone_write_unlock(req);
1663        blk_pm_put_request(req);
1664
1665        elv_completed_request(q, req);
1666
1667        /* this is a bio leak */
1668        WARN_ON(req->bio != NULL);
1669
1670        wbt_done(q->rq_wb, &req->issue_stat);
1671
1672        /*
1673         * Request may not have originated from ll_rw_blk. if not,
1674         * it didn't come out of our reserved rq pools
1675         */
1676        if (rq_flags & RQF_ALLOCED) {
1677                struct request_list *rl = blk_rq_rl(req);
1678                bool sync = op_is_sync(req->cmd_flags);
1679
1680                BUG_ON(!list_empty(&req->queuelist));
1681                BUG_ON(ELV_ON_HASH(req));
1682
1683                blk_free_request(rl, req);
1684                freed_request(rl, sync, rq_flags);
1685                blk_put_rl(rl);
1686                blk_queue_exit(q);
1687        }
1688}
1689EXPORT_SYMBOL_GPL(__blk_put_request);
1690
1691void blk_put_request(struct request *req)
1692{
1693        struct request_queue *q = req->q;
1694
1695        if (q->mq_ops)
1696                blk_mq_free_request(req);
1697        else {
1698                unsigned long flags;
1699
1700                spin_lock_irqsave(q->queue_lock, flags);
1701                __blk_put_request(q, req);
1702                spin_unlock_irqrestore(q->queue_lock, flags);
1703        }
1704}
1705EXPORT_SYMBOL(blk_put_request);
1706
1707bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1708                            struct bio *bio)
1709{
1710        const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1711
1712        if (!ll_back_merge_fn(q, req, bio))
1713                return false;
1714
1715        trace_block_bio_backmerge(q, req, bio);
1716
1717        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1718                blk_rq_set_mixed_merge(req);
1719
1720        req->biotail->bi_next = bio;
1721        req->biotail = bio;
1722        req->__data_len += bio->bi_iter.bi_size;
1723        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1724
1725        blk_account_io_start(req, false);
1726        return true;
1727}
1728
1729bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1730                             struct bio *bio)
1731{
1732        const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1733
1734        if (!ll_front_merge_fn(q, req, bio))
1735                return false;
1736
1737        trace_block_bio_frontmerge(q, req, bio);
1738
1739        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1740                blk_rq_set_mixed_merge(req);
1741
1742        bio->bi_next = req->bio;
1743        req->bio = bio;
1744
1745        req->__sector = bio->bi_iter.bi_sector;
1746        req->__data_len += bio->bi_iter.bi_size;
1747        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1748
1749        blk_account_io_start(req, false);
1750        return true;
1751}
1752
1753bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
1754                struct bio *bio)
1755{
1756        unsigned short segments = blk_rq_nr_discard_segments(req);
1757
1758        if (segments >= queue_max_discard_segments(q))
1759                goto no_merge;
1760        if (blk_rq_sectors(req) + bio_sectors(bio) >
1761            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1762                goto no_merge;
1763
1764        req->biotail->bi_next = bio;
1765        req->biotail = bio;
1766        req->__data_len += bio->bi_iter.bi_size;
1767        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1768        req->nr_phys_segments = segments + 1;
1769
1770        blk_account_io_start(req, false);
1771        return true;
1772no_merge:
1773        req_set_nomerge(q, req);
1774        return false;
1775}
1776
1777/**
1778 * blk_attempt_plug_merge - try to merge with %current's plugged list
1779 * @q: request_queue new bio is being queued at
1780 * @bio: new bio being queued
1781 * @request_count: out parameter for number of traversed plugged requests
1782 * @same_queue_rq: pointer to &struct request that gets filled in when
1783 * another request associated with @q is found on the plug list
1784 * (optional, may be %NULL)
1785 *
1786 * Determine whether @bio being queued on @q can be merged with a request
1787 * on %current's plugged list.  Returns %true if merge was successful,
1788 * otherwise %false.
1789 *
1790 * Plugging coalesces IOs from the same issuer for the same purpose without
1791 * going through @q->queue_lock.  As such it's more of an issuing mechanism
1792 * than scheduling, and the request, while may have elvpriv data, is not
1793 * added on the elevator at this point.  In addition, we don't have
1794 * reliable access to the elevator outside queue lock.  Only check basic
1795 * merging parameters without querying the elevator.
1796 *
1797 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1798 */
1799bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1800                            unsigned int *request_count,
1801                            struct request **same_queue_rq)
1802{
1803        struct blk_plug *plug;
1804        struct request *rq;
1805        struct list_head *plug_list;
1806
1807        plug = current->plug;
1808        if (!plug)
1809                return false;
1810        *request_count = 0;
1811
1812        if (q->mq_ops)
1813                plug_list = &plug->mq_list;
1814        else
1815                plug_list = &plug->list;
1816
1817        list_for_each_entry_reverse(rq, plug_list, queuelist) {
1818                bool merged = false;
1819
1820                if (rq->q == q) {
1821                        (*request_count)++;
1822                        /*
1823                         * Only blk-mq multiple hardware queues case checks the
1824                         * rq in the same queue, there should be only one such
1825                         * rq in a queue
1826                         **/
1827                        if (same_queue_rq)
1828                                *same_queue_rq = rq;
1829                }
1830
1831                if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1832                        continue;
1833
1834                switch (blk_try_merge(rq, bio)) {
1835                case ELEVATOR_BACK_MERGE:
1836                        merged = bio_attempt_back_merge(q, rq, bio);
1837                        break;
1838                case ELEVATOR_FRONT_MERGE:
1839                        merged = bio_attempt_front_merge(q, rq, bio);
1840                        break;
1841                case ELEVATOR_DISCARD_MERGE:
1842                        merged = bio_attempt_discard_merge(q, rq, bio);
1843                        break;
1844                default:
1845                        break;
1846                }
1847
1848                if (merged)
1849                        return true;
1850        }
1851
1852        return false;
1853}
1854
1855unsigned int blk_plug_queued_count(struct request_queue *q)
1856{
1857        struct blk_plug *plug;
1858        struct request *rq;
1859        struct list_head *plug_list;
1860        unsigned int ret = 0;
1861
1862        plug = current->plug;
1863        if (!plug)
1864                goto out;
1865
1866        if (q->mq_ops)
1867                plug_list = &plug->mq_list;
1868        else
1869                plug_list = &plug->list;
1870
1871        list_for_each_entry(rq, plug_list, queuelist) {
1872                if (rq->q == q)
1873                        ret++;
1874        }
1875out:
1876        return ret;
1877}
1878
1879void blk_init_request_from_bio(struct request *req, struct bio *bio)
1880{
1881        struct io_context *ioc = rq_ioc(bio);
1882
1883        if (bio->bi_opf & REQ_RAHEAD)
1884                req->cmd_flags |= REQ_FAILFAST_MASK;
1885
1886        req->__sector = bio->bi_iter.bi_sector;
1887        if (ioprio_valid(bio_prio(bio)))
1888                req->ioprio = bio_prio(bio);
1889        else if (ioc)
1890                req->ioprio = ioc->ioprio;
1891        else
1892                req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
1893        req->write_hint = bio->bi_write_hint;
1894        blk_rq_bio_prep(req->q, req, bio);
1895}
1896EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
1897
1898static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1899{
1900        struct blk_plug *plug;
1901        int where = ELEVATOR_INSERT_SORT;
1902        struct request *req, *free;
1903        unsigned int request_count = 0;
1904        unsigned int wb_acct;
1905
1906        /*
1907         * low level driver can indicate that it wants pages above a
1908         * certain limit bounced to low memory (ie for highmem, or even
1909         * ISA dma in theory)
1910         */
1911        blk_queue_bounce(q, &bio);
1912
1913        blk_queue_split(q, &bio);
1914
1915        if (!bio_integrity_prep(bio))
1916                return BLK_QC_T_NONE;
1917
1918        if (op_is_flush(bio->bi_opf)) {
1919                spin_lock_irq(q->queue_lock);
1920                where = ELEVATOR_INSERT_FLUSH;
1921                goto get_rq;
1922        }
1923
1924        /*
1925         * Check if we can merge with the plugged list before grabbing
1926         * any locks.
1927         */
1928        if (!blk_queue_nomerges(q)) {
1929                if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1930                        return BLK_QC_T_NONE;
1931        } else
1932                request_count = blk_plug_queued_count(q);
1933
1934        spin_lock_irq(q->queue_lock);
1935
1936        switch (elv_merge(q, &req, bio)) {
1937        case ELEVATOR_BACK_MERGE:
1938                if (!bio_attempt_back_merge(q, req, bio))
1939                        break;
1940                elv_bio_merged(q, req, bio);
1941                free = attempt_back_merge(q, req);
1942                if (free)
1943                        __blk_put_request(q, free);
1944                else
1945                        elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
1946                goto out_unlock;
1947        case ELEVATOR_FRONT_MERGE:
1948                if (!bio_attempt_front_merge(q, req, bio))
1949                        break;
1950                elv_bio_merged(q, req, bio);
1951                free = attempt_front_merge(q, req);
1952                if (free)
1953                        __blk_put_request(q, free);
1954                else
1955                        elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
1956                goto out_unlock;
1957        default:
1958                break;
1959        }
1960
1961get_rq:
1962        wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
1963
1964        /*
1965         * Grab a free request. This is might sleep but can not fail.
1966         * Returns with the queue unlocked.
1967         */
1968        blk_queue_enter_live(q);
1969        req = get_request(q, bio->bi_opf, bio, 0);
1970        if (IS_ERR(req)) {
1971                blk_queue_exit(q);
1972                __wbt_done(q->rq_wb, wb_acct);
1973                if (PTR_ERR(req) == -ENOMEM)
1974                        bio->bi_status = BLK_STS_RESOURCE;
1975                else
1976                        bio->bi_status = BLK_STS_IOERR;
1977                bio_endio(bio);
1978                goto out_unlock;
1979        }
1980
1981        wbt_track(&req->issue_stat, wb_acct);
1982
1983        /*
1984         * After dropping the lock and possibly sleeping here, our request
1985         * may now be mergeable after it had proven unmergeable (above).
1986         * We don't worry about that case for efficiency. It won't happen
1987         * often, and the elevators are able to handle it.
1988         */
1989        blk_init_request_from_bio(req, bio);
1990
1991        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1992                req->cpu = raw_smp_processor_id();
1993
1994        plug = current->plug;
1995        if (plug) {
1996                /*
1997                 * If this is the first request added after a plug, fire
1998                 * of a plug trace.
1999                 *
2000                 * @request_count may become stale because of schedule
2001                 * out, so check plug list again.
2002                 */
2003                if (!request_count || list_empty(&plug->list))
2004                        trace_block_plug(q);
2005                else {
2006                        struct request *last = list_entry_rq(plug->list.prev);
2007                        if (request_count >= BLK_MAX_REQUEST_COUNT ||
2008                            blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
2009                                blk_flush_plug_list(plug, false);
2010                                trace_block_plug(q);
2011                        }
2012                }
2013                list_add_tail(&req->queuelist, &plug->list);
2014                blk_account_io_start(req, true);
2015        } else {
2016                spin_lock_irq(q->queue_lock);
2017                add_acct_request(q, req, where);
2018                __blk_run_queue(q);
2019out_unlock:
2020                spin_unlock_irq(q->queue_lock);
2021        }
2022
2023        return BLK_QC_T_NONE;
2024}
2025
2026static void handle_bad_sector(struct bio *bio)
2027{
2028        char b[BDEVNAME_SIZE];
2029
2030        printk(KERN_INFO "attempt to access beyond end of device\n");
2031        printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
2032                        bio_devname(bio, b), bio->bi_opf,
2033                        (unsigned long long)bio_end_sector(bio),
2034                        (long long)get_capacity(bio->bi_disk));
2035}
2036
2037#ifdef CONFIG_FAIL_MAKE_REQUEST
2038
2039static DECLARE_FAULT_ATTR(fail_make_request);
2040
2041static int __init setup_fail_make_request(char *str)
2042{
2043        return setup_fault_attr(&fail_make_request, str);
2044}
2045__setup("fail_make_request=", setup_fail_make_request);
2046
2047static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
2048{
2049        return part->make_it_fail && should_fail(&fail_make_request, bytes);
2050}
2051
2052static int __init fail_make_request_debugfs(void)
2053{
2054        struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
2055                                                NULL, &fail_make_request);
2056
2057        return PTR_ERR_OR_ZERO(dir);
2058}
2059
2060late_initcall(fail_make_request_debugfs);
2061
2062#else /* CONFIG_FAIL_MAKE_REQUEST */
2063
2064static inline bool should_fail_request(struct hd_struct *part,
2065                                        unsigned int bytes)
2066{
2067        return false;
2068}
2069
2070#endif /* CONFIG_FAIL_MAKE_REQUEST */
2071
2072static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
2073{
2074        if (part->policy && op_is_write(bio_op(bio))) {
2075                char b[BDEVNAME_SIZE];
2076
2077                printk(KERN_ERR
2078                       "generic_make_request: Trying to write "
2079                        "to read-only block-device %s (partno %d)\n",
2080                        bio_devname(bio, b), part->partno);
2081                return true;
2082        }
2083
2084        return false;
2085}
2086
2087static noinline int should_fail_bio(struct bio *bio)
2088{
2089        if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
2090                return -EIO;
2091        return 0;
2092}
2093ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
2094
2095/*
2096 * Remap block n of partition p to block n+start(p) of the disk.
2097 */
2098static inline int blk_partition_remap(struct bio *bio)
2099{
2100        struct hd_struct *p;
2101        int ret = 0;
2102
2103        rcu_read_lock();
2104        p = __disk_get_part(bio->bi_disk, bio->bi_partno);
2105        if (unlikely(!p || should_fail_request(p, bio->bi_iter.bi_size) ||
2106                     bio_check_ro(bio, p))) {
2107                ret = -EIO;
2108                goto out;
2109        }
2110
2111        /*
2112         * Zone reset does not include bi_size so bio_sectors() is always 0.
2113         * Include a test for the reset op code and perform the remap if needed.
2114         */
2115        if (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET)
2116                goto out;
2117
2118        bio->bi_iter.bi_sector += p->start_sect;
2119        bio->bi_partno = 0;
2120        trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
2121                              bio->bi_iter.bi_sector - p->start_sect);
2122
2123out:
2124        rcu_read_unlock();
2125        return ret;
2126}
2127
2128/*
2129 * Check whether this bio extends beyond the end of the device.
2130 */
2131static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
2132{
2133        sector_t maxsector;
2134
2135        if (!nr_sectors)
2136                return 0;
2137
2138        /* Test device or partition size, when known. */
2139        maxsector = get_capacity(bio->bi_disk);
2140        if (maxsector) {
2141                sector_t sector = bio->bi_iter.bi_sector;
2142
2143                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
2144                        /*
2145                         * This may well happen - the kernel calls bread()
2146                         * without checking the size of the device, e.g., when
2147                         * mounting a device.
2148                         */
2149                        handle_bad_sector(bio);
2150                        return 1;
2151                }
2152        }
2153
2154        return 0;
2155}
2156
2157static noinline_for_stack bool
2158generic_make_request_checks(struct bio *bio)
2159{
2160        struct request_queue *q;
2161        int nr_sectors = bio_sectors(bio);
2162        blk_status_t status = BLK_STS_IOERR;
2163        char b[BDEVNAME_SIZE];
2164
2165        might_sleep();
2166
2167        if (bio_check_eod(bio, nr_sectors))
2168                goto end_io;
2169
2170        q = bio->bi_disk->queue;
2171        if (unlikely(!q)) {
2172                printk(KERN_ERR
2173                       "generic_make_request: Trying to access "
2174                        "nonexistent block-device %s (%Lu)\n",
2175                        bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
2176                goto end_io;
2177        }
2178
2179        /*
2180         * For a REQ_NOWAIT based request, return -EOPNOTSUPP
2181         * if queue is not a request based queue.
2182         */
2183        if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
2184                goto not_supported;
2185
2186        if (should_fail_bio(bio))
2187                goto end_io;
2188
2189        if (!bio->bi_partno) {
2190                if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
2191                        goto end_io;
2192        } else {
2193                if (blk_partition_remap(bio))
2194                        goto end_io;
2195        }
2196
2197        if (bio_check_eod(bio, nr_sectors))
2198                goto end_io;
2199
2200        /*
2201         * Filter flush bio's early so that make_request based
2202         * drivers without flush support don't have to worry
2203         * about them.
2204         */
2205        if (op_is_flush(bio->bi_opf) &&
2206            !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
2207                bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
2208                if (!nr_sectors) {
2209                        status = BLK_STS_OK;
2210                        goto end_io;
2211                }
2212        }
2213
2214        switch (bio_op(bio)) {
2215        case REQ_OP_DISCARD:
2216                if (!blk_queue_discard(q))
2217                        goto not_supported;
2218                break;
2219        case REQ_OP_SECURE_ERASE:
2220                if (!blk_queue_secure_erase(q))
2221                        goto not_supported;
2222                break;
2223        case REQ_OP_WRITE_SAME:
2224                if (!q->limits.max_write_same_sectors)
2225                        goto not_supported;
2226                break;
2227        case REQ_OP_ZONE_REPORT:
2228        case REQ_OP_ZONE_RESET:
2229                if (!blk_queue_is_zoned(q))
2230                        goto not_supported;
2231                break;
2232        case REQ_OP_WRITE_ZEROES:
2233                if (!q->limits.max_write_zeroes_sectors)
2234                        goto not_supported;
2235                break;
2236        default:
2237                break;
2238        }
2239
2240        /*
2241         * Various block parts want %current->io_context and lazy ioc
2242         * allocation ends up trading a lot of pain for a small amount of
2243         * memory.  Just allocate it upfront.  This may fail and block
2244         * layer knows how to live with it.
2245         */
2246        create_io_context(GFP_ATOMIC, q->node);
2247
2248        if (!blkcg_bio_issue_check(q, bio))
2249                return false;
2250
2251        if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
2252                trace_block_bio_queue(q, bio);
2253                /* Now that enqueuing has been traced, we need to trace
2254                 * completion as well.
2255                 */
2256                bio_set_flag(bio, BIO_TRACE_COMPLETION);
2257        }
2258        return true;
2259
2260not_supported:
2261        status = BLK_STS_NOTSUPP;
2262end_io:
2263        bio->bi_status = status;
2264        bio_endio(bio);
2265        return false;
2266}
2267
2268/**
2269 * generic_make_request - hand a buffer to its device driver for I/O
2270 * @bio:  The bio describing the location in memory and on the device.
2271 *
2272 * generic_make_request() is used to make I/O requests of block
2273 * devices. It is passed a &struct bio, which describes the I/O that needs
2274 * to be done.
2275 *
2276 * generic_make_request() does not return any status.  The
2277 * success/failure status of the request, along with notification of
2278 * completion, is delivered asynchronously through the bio->bi_end_io
2279 * function described (one day) else where.
2280 *
2281 * The caller of generic_make_request must make sure that bi_io_vec
2282 * are set to describe the memory buffer, and that bi_dev and bi_sector are
2283 * set to describe the device address, and the
2284 * bi_end_io and optionally bi_private are set to describe how
2285 * completion notification should be signaled.
2286 *
2287 * generic_make_request and the drivers it calls may use bi_next if this
2288 * bio happens to be merged with someone else, and may resubmit the bio to
2289 * a lower device by calling into generic_make_request recursively, which
2290 * means the bio should NOT be touched after the call to ->make_request_fn.
2291 */
2292blk_qc_t generic_make_request(struct bio *bio)
2293{
2294        /*
2295         * bio_list_on_stack[0] contains bios submitted by the current
2296         * make_request_fn.
2297         * bio_list_on_stack[1] contains bios that were submitted before
2298         * the current make_request_fn, but that haven't been processed
2299         * yet.
2300         */
2301        struct bio_list bio_list_on_stack[2];
2302        blk_qc_t ret = BLK_QC_T_NONE;
2303
2304        if (!generic_make_request_checks(bio))
2305                goto out;
2306
2307        /*
2308         * We only want one ->make_request_fn to be active at a time, else
2309         * stack usage with stacked devices could be a problem.  So use
2310         * current->bio_list to keep a list of requests submited by a
2311         * make_request_fn function.  current->bio_list is also used as a
2312         * flag to say if generic_make_request is currently active in this
2313         * task or not.  If it is NULL, then no make_request is active.  If
2314         * it is non-NULL, then a make_request is active, and new requests
2315         * should be added at the tail
2316         */
2317        if (current->bio_list) {
2318                bio_list_add(&current->bio_list[0], bio);
2319                goto out;
2320        }
2321
2322        /* following loop may be a bit non-obvious, and so deserves some
2323         * explanation.
2324         * Before entering the loop, bio->bi_next is NULL (as all callers
2325         * ensure that) so we have a list with a single bio.
2326         * We pretend that we have just taken it off a longer list, so
2327         * we assign bio_list to a pointer to the bio_list_on_stack,
2328         * thus initialising the bio_list of new bios to be
2329         * added.  ->make_request() may indeed add some more bios
2330         * through a recursive call to generic_make_request.  If it
2331         * did, we find a non-NULL value in bio_list and re-enter the loop
2332         * from the top.  In this case we really did just take the bio
2333         * of the top of the list (no pretending) and so remove it from
2334         * bio_list, and call into ->make_request() again.
2335         */
2336        BUG_ON(bio->bi_next);
2337        bio_list_init(&bio_list_on_stack[0]);
2338        current->bio_list = bio_list_on_stack;
2339        do {
2340                struct request_queue *q = bio->bi_disk->queue;
2341                blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
2342                        BLK_MQ_REQ_NOWAIT : 0;
2343
2344                if (likely(blk_queue_enter(q, flags) == 0)) {
2345                        struct bio_list lower, same;
2346
2347                        /* Create a fresh bio_list for all subordinate requests */
2348                        bio_list_on_stack[1] = bio_list_on_stack[0];
2349                        bio_list_init(&bio_list_on_stack[0]);
2350                        ret = q->make_request_fn(q, bio);
2351
2352                        blk_queue_exit(q);
2353
2354                        /* sort new bios into those for a lower level
2355                         * and those for the same level
2356                         */
2357                        bio_list_init(&lower);
2358                        bio_list_init(&same);
2359                        while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
2360                                if (q == bio->bi_disk->queue)
2361                                        bio_list_add(&same, bio);
2362                                else
2363                                        bio_list_add(&lower, bio);
2364                        /* now assemble so we handle the lowest level first */
2365                        bio_list_merge(&bio_list_on_stack[0], &lower);
2366                        bio_list_merge(&bio_list_on_stack[0], &same);
2367                        bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
2368                } else {
2369                        if (unlikely(!blk_queue_dying(q) &&
2370                                        (bio->bi_opf & REQ_NOWAIT)))
2371                                bio_wouldblock_error(bio);
2372                        else
2373                                bio_io_error(bio);
2374                }
2375                bio = bio_list_pop(&bio_list_on_stack[0]);
2376        } while (bio);
2377        current->bio_list = NULL; /* deactivate */
2378
2379out:
2380        return ret;
2381}
2382EXPORT_SYMBOL(generic_make_request);
2383
2384/**
2385 * direct_make_request - hand a buffer directly to its device driver for I/O
2386 * @bio:  The bio describing the location in memory and on the device.
2387 *
2388 * This function behaves like generic_make_request(), but does not protect
2389 * against recursion.  Must only be used if the called driver is known
2390 * to not call generic_make_request (or direct_make_request) again from
2391 * its make_request function.  (Calling direct_make_request again from
2392 * a workqueue is perfectly fine as that doesn't recurse).
2393 */
2394blk_qc_t direct_make_request(struct bio *bio)
2395{
2396        struct request_queue *q = bio->bi_disk->queue;
2397        bool nowait = bio->bi_opf & REQ_NOWAIT;
2398        blk_qc_t ret;
2399
2400        if (!generic_make_request_checks(bio))
2401                return BLK_QC_T_NONE;
2402
2403        if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
2404                if (nowait && !blk_queue_dying(q))
2405                        bio->bi_status = BLK_STS_AGAIN;
2406                else
2407                        bio->bi_status = BLK_STS_IOERR;
2408                bio_endio(bio);
2409                return BLK_QC_T_NONE;
2410        }
2411
2412        ret = q->make_request_fn(q, bio);
2413        blk_queue_exit(q);
2414        return ret;
2415}
2416EXPORT_SYMBOL_GPL(direct_make_request);
2417
2418/**
2419 * submit_bio - submit a bio to the block device layer for I/O
2420 * @bio: The &struct bio which describes the I/O
2421 *
2422 * submit_bio() is very similar in purpose to generic_make_request(), and
2423 * uses that function to do most of the work. Both are fairly rough
2424 * interfaces; @bio must be presetup and ready for I/O.
2425 *
2426 */
2427blk_qc_t submit_bio(struct bio *bio)
2428{
2429        /*
2430         * If it's a regular read/write or a barrier with data attached,
2431         * go through the normal accounting stuff before submission.
2432         */
2433        if (bio_has_data(bio)) {
2434                unsigned int count;
2435
2436                if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
2437                        count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
2438                else
2439                        count = bio_sectors(bio);
2440
2441                if (op_is_write(bio_op(bio))) {
2442                        count_vm_events(PGPGOUT, count);
2443                } else {
2444                        task_io_account_read(bio->bi_iter.bi_size);
2445                        count_vm_events(PGPGIN, count);
2446                }
2447
2448                if (unlikely(block_dump)) {
2449                        char b[BDEVNAME_SIZE];
2450                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
2451                        current->comm, task_pid_nr(current),
2452                                op_is_write(bio_op(bio)) ? "WRITE" : "READ",
2453                                (unsigned long long)bio->bi_iter.bi_sector,
2454                                bio_devname(bio, b), count);
2455                }
2456        }
2457
2458        return generic_make_request(bio);
2459}
2460EXPORT_SYMBOL(submit_bio);
2461
2462bool blk_poll(struct request_queue *q, blk_qc_t cookie)
2463{
2464        if (!q->poll_fn || !blk_qc_t_valid(cookie))
2465                return false;
2466
2467        if (current->plug)
2468                blk_flush_plug_list(current->plug, false);
2469        return q->poll_fn(q, cookie);
2470}
2471EXPORT_SYMBOL_GPL(blk_poll);
2472
2473/**
2474 * blk_cloned_rq_check_limits - Helper function to check a cloned request
2475 *                              for new the queue limits
2476 * @q:  the queue
2477 * @rq: the request being checked
2478 *
2479 * Description:
2480 *    @rq may have been made based on weaker limitations of upper-level queues
2481 *    in request stacking drivers, and it may violate the limitation of @q.
2482 *    Since the block layer and the underlying device driver trust @rq
2483 *    after it is inserted to @q, it should be checked against @q before
2484 *    the insertion using this generic function.
2485 *
2486 *    Request stacking drivers like request-based dm may change the queue
2487 *    limits when retrying requests on other queues. Those requests need
2488 *    to be checked against the new queue limits again during dispatch.
2489 */
2490static int blk_cloned_rq_check_limits(struct request_queue *q,
2491                                      struct request *rq)
2492{
2493        if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
2494                printk(KERN_ERR "%s: over max size limit.\n", __func__);
2495                return -EIO;
2496        }
2497
2498        /*
2499         * queue's settings related to segment counting like q->bounce_pfn
2500         * may differ from that of other stacking queues.
2501         * Recalculate it to check the request correctly on this queue's
2502         * limitation.
2503         */
2504        blk_recalc_rq_segments(rq);
2505        if (rq->nr_phys_segments > queue_max_segments(q)) {
2506                printk(KERN_ERR "%s: over max segments limit.\n", __func__);
2507                return -EIO;
2508        }
2509
2510        return 0;
2511}
2512
2513/**
2514 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2515 * @q:  the queue to submit the request
2516 * @rq: the request being queued
2517 */
2518blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2519{
2520        unsigned long flags;
2521        int where = ELEVATOR_INSERT_BACK;
2522
2523        if (blk_cloned_rq_check_limits(q, rq))
2524                return BLK_STS_IOERR;
2525
2526        if (rq->rq_disk &&
2527            should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2528                return BLK_STS_IOERR;
2529
2530        if (q->mq_ops) {
2531                if (blk_queue_io_stat(q))
2532                        blk_account_io_start(rq, true);
2533                /*
2534                 * Since we have a scheduler attached on the top device,
2535                 * bypass a potential scheduler on the bottom device for
2536                 * insert.
2537                 */
2538                return blk_mq_request_issue_directly(rq);
2539        }
2540
2541        spin_lock_irqsave(q->queue_lock, flags);
2542        if (unlikely(blk_queue_dying(q))) {
2543                spin_unlock_irqrestore(q->queue_lock, flags);
2544                return BLK_STS_IOERR;
2545        }
2546
2547        /*
2548         * Submitting request must be dequeued before calling this function
2549         * because it will be linked to another request_queue
2550         */
2551        BUG_ON(blk_queued_rq(rq));
2552
2553        if (op_is_flush(rq->cmd_flags))
2554                where = ELEVATOR_INSERT_FLUSH;
2555
2556        add_acct_request(q, rq, where);
2557        if (where == ELEVATOR_INSERT_FLUSH)
2558                __blk_run_queue(q);
2559        spin_unlock_irqrestore(q->queue_lock, flags);
2560
2561        return BLK_STS_OK;
2562}
2563EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2564
2565/**
2566 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
2567 * @rq: request to examine
2568 *
2569 * Description:
2570 *     A request could be merge of IOs which require different failure
2571 *     handling.  This function determines the number of bytes which
2572 *     can be failed from the beginning of the request without
2573 *     crossing into area which need to be retried further.
2574 *
2575 * Return:
2576 *     The number of bytes to fail.
2577 */
2578unsigned int blk_rq_err_bytes(const struct request *rq)
2579{
2580        unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
2581        unsigned int bytes = 0;
2582        struct bio *bio;
2583
2584        if (!(rq->rq_flags & RQF_MIXED_MERGE))
2585                return blk_rq_bytes(rq);
2586
2587        /*
2588         * Currently the only 'mixing' which can happen is between
2589         * different fastfail types.  We can safely fail portions
2590         * which have all the failfast bits that the first one has -
2591         * the ones which are at least as eager to fail as the first
2592         * one.
2593         */
2594        for (bio = rq->bio; bio; bio = bio->bi_next) {
2595                if ((bio->bi_opf & ff) != ff)
2596                        break;
2597                bytes += bio->bi_iter.bi_size;
2598        }
2599
2600        /* this could lead to infinite loop */
2601        BUG_ON(blk_rq_bytes(rq) && !bytes);
2602        return bytes;
2603}
2604EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2605
2606void blk_account_io_completion(struct request *req, unsigned int bytes)
2607{
2608        if (blk_do_io_stat(req)) {
2609                const int rw = rq_data_dir(req);
2610                struct hd_struct *part;
2611                int cpu;
2612
2613                cpu = part_stat_lock();
2614                part = req->part;
2615                part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2616                part_stat_unlock();
2617        }
2618}
2619
2620void blk_account_io_done(struct request *req)
2621{
2622        /*
2623         * Account IO completion.  flush_rq isn't accounted as a
2624         * normal IO on queueing nor completion.  Accounting the
2625         * containing request is enough.
2626         */
2627        if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
2628                unsigned long duration = jiffies - req->start_time;
2629                const int rw = rq_data_dir(req);
2630                struct hd_struct *part;
2631                int cpu;
2632
2633                cpu = part_stat_lock();
2634                part = req->part;
2635
2636                part_stat_inc(cpu, part, ios[rw]);
2637                part_stat_add(cpu, part, ticks[rw], duration);
2638                part_round_stats(req->q, cpu, part);
2639                part_dec_in_flight(req->q, part, rw);
2640
2641                hd_struct_put(part);
2642                part_stat_unlock();
2643        }
2644}
2645
2646#ifdef CONFIG_PM
2647/*
2648 * Don't process normal requests when queue is suspended
2649 * or in the process of suspending/resuming
2650 */
2651static bool blk_pm_allow_request(struct request *rq)
2652{
2653        switch (rq->q->rpm_status) {
2654        case RPM_RESUMING:
2655        case RPM_SUSPENDING:
2656                return rq->rq_flags & RQF_PM;
2657        case RPM_SUSPENDED:
2658                return false;
2659        }
2660
2661        return true;
2662}
2663#else
2664static bool blk_pm_allow_request(struct request *rq)
2665{
2666        return true;
2667}
2668#endif
2669
2670void blk_account_io_start(struct request *rq, bool new_io)
2671{
2672        struct hd_struct *part;
2673        int rw = rq_data_dir(rq);
2674        int cpu;
2675
2676        if (!blk_do_io_stat(rq))
2677                return;
2678
2679        cpu = part_stat_lock();
2680
2681        if (!new_io) {
2682                part = rq->part;
2683                part_stat_inc(cpu, part, merges[rw]);
2684        } else {
2685                part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2686                if (!hd_struct_try_get(part)) {
2687                        /*
2688                         * The partition is already being removed,
2689                         * the request will be accounted on the disk only
2690                         *
2691                         * We take a reference on disk->part0 although that
2692                         * partition will never be deleted, so we can treat
2693                         * it as any other partition.
2694                         */
2695                        part = &rq->rq_disk->part0;
2696                        hd_struct_get(part);
2697                }
2698                part_round_stats(rq->q, cpu, part);
2699                part_inc_in_flight(rq->q, part, rw);
2700                rq->part = part;
2701        }
2702
2703        part_stat_unlock();
2704}
2705
2706static struct request *elv_next_request(struct request_queue *q)
2707{
2708        struct request *rq;
2709        struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
2710
2711        WARN_ON_ONCE(q->mq_ops);
2712
2713        while (1) {
2714                list_for_each_entry(rq, &q->queue_head, queuelist) {
2715                        if (blk_pm_allow_request(rq))
2716                                return rq;
2717
2718                        if (rq->rq_flags & RQF_SOFTBARRIER)
2719                                break;
2720                }
2721
2722                /*
2723                 * Flush request is running and flush request isn't queueable
2724                 * in the drive, we can hold the queue till flush request is
2725                 * finished. Even we don't do this, driver can't dispatch next
2726                 * requests and will requeue them. And this can improve
2727                 * throughput too. For example, we have request flush1, write1,
2728                 * flush 2. flush1 is dispatched, then queue is hold, write1
2729                 * isn't inserted to queue. After flush1 is finished, flush2
2730                 * will be dispatched. Since disk cache is already clean,
2731                 * flush2 will be finished very soon, so looks like flush2 is
2732                 * folded to flush1.
2733                 * Since the queue is hold, a flag is set to indicate the queue
2734                 * should be restarted later. Please see flush_end_io() for
2735                 * details.
2736                 */
2737                if (fq->flush_pending_idx != fq->flush_running_idx &&
2738                                !queue_flush_queueable(q)) {
2739                        fq->flush_queue_delayed = 1;
2740                        return NULL;
2741                }
2742                if (unlikely(blk_queue_bypass(q)) ||
2743                    !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
2744                        return NULL;
2745        }
2746}
2747
2748/**
2749 * blk_peek_request - peek at the top of a request queue
2750 * @q: request queue to peek at
2751 *
2752 * Description:
2753 *     Return the request at the top of @q.  The returned request
2754 *     should be started using blk_start_request() before LLD starts
2755 *     processing it.
2756 *
2757 * Return:
2758 *     Pointer to the request at the top of @q if available.  Null
2759 *     otherwise.
2760 */
2761struct request *blk_peek_request(struct request_queue *q)
2762{
2763        struct request *rq;
2764        int ret;
2765
2766        lockdep_assert_held(q->queue_lock);
2767        WARN_ON_ONCE(q->mq_ops);
2768
2769        while ((rq = elv_next_request(q)) != NULL) {
2770                if (!(rq->rq_flags & RQF_STARTED)) {
2771                        /*
2772                         * This is the first time the device driver
2773                         * sees this request (possibly after
2774                         * requeueing).  Notify IO scheduler.
2775                         */
2776                        if (rq->rq_flags & RQF_SORTED)
2777                                elv_activate_rq(q, rq);
2778
2779                        /*
2780                         * just mark as started even if we don't start
2781                         * it, a request that has been delayed should
2782                         * not be passed by new incoming requests
2783                         */
2784                        rq->rq_flags |= RQF_STARTED;
2785                        trace_block_rq_issue(q, rq);
2786                }
2787
2788                if (!q->boundary_rq || q->boundary_rq == rq) {
2789                        q->end_sector = rq_end_sector(rq);
2790                        q->boundary_rq = NULL;
2791                }
2792
2793                if (rq->rq_flags & RQF_DONTPREP)
2794                        break;
2795
2796                if (q->dma_drain_size && blk_rq_bytes(rq)) {
2797                        /*
2798                         * make sure space for the drain appears we
2799                         * know we can do this because max_hw_segments
2800                         * has been adjusted to be one fewer than the
2801                         * device can handle
2802                         */
2803                        rq->nr_phys_segments++;
2804                }
2805
2806                if (!q->prep_rq_fn)
2807                        break;
2808
2809                ret = q->prep_rq_fn(q, rq);
2810                if (ret == BLKPREP_OK) {
2811                        break;
2812                } else if (ret == BLKPREP_DEFER) {
2813                        /*
2814                         * the request may have been (partially) prepped.
2815                         * we need to keep this request in the front to
2816                         * avoid resource deadlock.  RQF_STARTED will
2817                         * prevent other fs requests from passing this one.
2818                         */
2819                        if (q->dma_drain_size && blk_rq_bytes(rq) &&
2820                            !(rq->rq_flags & RQF_DONTPREP)) {
2821                                /*
2822                                 * remove the space for the drain we added
2823                                 * so that we don't add it again
2824                                 */
2825                                --rq->nr_phys_segments;
2826                        }
2827
2828                        rq = NULL;
2829                        break;
2830                } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
2831                        rq->rq_flags |= RQF_QUIET;
2832                        /*
2833                         * Mark this request as started so we don't trigger
2834                         * any debug logic in the end I/O path.
2835                         */
2836                        blk_start_request(rq);
2837                        __blk_end_request_all(rq, ret == BLKPREP_INVALID ?
2838                                        BLK_STS_TARGET : BLK_STS_IOERR);
2839                } else {
2840                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2841                        break;
2842                }
2843        }
2844
2845        return rq;
2846}
2847EXPORT_SYMBOL(blk_peek_request);
2848
2849static void blk_dequeue_request(struct request *rq)
2850{
2851        struct request_queue *q = rq->q;
2852
2853        BUG_ON(list_empty(&rq->queuelist));
2854        BUG_ON(ELV_ON_HASH(rq));
2855
2856        list_del_init(&rq->queuelist);
2857
2858        /*
2859         * the time frame between a request being removed from the lists
2860         * and to it is freed is accounted as io that is in progress at
2861         * the driver side.
2862         */
2863        if (blk_account_rq(rq)) {
2864                q->in_flight[rq_is_sync(rq)]++;
2865                set_io_start_time_ns(rq);
2866        }
2867}
2868
2869/**
2870 * blk_start_request - start request processing on the driver
2871 * @req: request to dequeue
2872 *
2873 * Description:
2874 *     Dequeue @req and start timeout timer on it.  This hands off the
2875 *     request to the driver.
2876 */
2877void blk_start_request(struct request *req)
2878{
2879        lockdep_assert_held(req->q->queue_lock);
2880        WARN_ON_ONCE(req->q->mq_ops);
2881
2882        blk_dequeue_request(req);
2883
2884        if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
2885                blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req));
2886                req->rq_flags |= RQF_STATS;
2887                wbt_issue(req->q->rq_wb, &req->issue_stat);
2888        }
2889
2890        BUG_ON(blk_rq_is_complete(req));
2891        blk_add_timer(req);
2892}
2893EXPORT_SYMBOL(blk_start_request);
2894
2895/**
2896 * blk_fetch_request - fetch a request from a request queue
2897 * @q: request queue to fetch a request from
2898 *
2899 * Description:
2900 *     Return the request at the top of @q.  The request is started on
2901 *     return and LLD can start processing it immediately.
2902 *
2903 * Return:
2904 *     Pointer to the request at the top of @q if available.  Null
2905 *     otherwise.
2906 */
2907struct request *blk_fetch_request(struct request_queue *q)
2908{
2909        struct request *rq;
2910
2911        lockdep_assert_held(q->queue_lock);
2912        WARN_ON_ONCE(q->mq_ops);
2913
2914        rq = blk_peek_request(q);
2915        if (rq)
2916                blk_start_request(rq);
2917        return rq;
2918}
2919EXPORT_SYMBOL(blk_fetch_request);
2920
2921/*
2922 * Steal bios from a request and add them to a bio list.
2923 * The request must not have been partially completed before.
2924 */
2925void blk_steal_bios(struct bio_list *list, struct request *rq)
2926{
2927        if (rq->bio) {
2928                if (list->tail)
2929                        list->tail->bi_next = rq->bio;
2930                else
2931                        list->head = rq->bio;
2932                list->tail = rq->biotail;
2933
2934                rq->bio = NULL;
2935                rq->biotail = NULL;
2936        }
2937
2938        rq->__data_len = 0;
2939}
2940EXPORT_SYMBOL_GPL(blk_steal_bios);
2941
2942/**
2943 * blk_update_request - Special helper function for request stacking drivers
2944 * @req:      the request being processed
2945 * @error:    block status code
2946 * @nr_bytes: number of bytes to complete @req
2947 *
2948 * Description:
2949 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
2950 *     the request structure even if @req doesn't have leftover.
2951 *     If @req has leftover, sets it up for the next range of segments.
2952 *
2953 *     This special helper function is only for request stacking drivers
2954 *     (e.g. request-based dm) so that they can handle partial completion.
2955 *     Actual device drivers should use blk_end_request instead.
2956 *
2957 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2958 *     %false return from this function.
2959 *
2960 * Return:
2961 *     %false - this request doesn't have any more data
2962 *     %true  - this request has more data
2963 **/
2964bool blk_update_request(struct request *req, blk_status_t error,
2965                unsigned int nr_bytes)
2966{
2967        int total_bytes;
2968
2969        trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
2970
2971        if (!req->bio)
2972                return false;
2973
2974        if (unlikely(error && !blk_rq_is_passthrough(req) &&
2975                     !(req->rq_flags & RQF_QUIET)))
2976                print_req_error(req, error);
2977
2978        blk_account_io_completion(req, nr_bytes);
2979
2980        total_bytes = 0;
2981        while (req->bio) {
2982                struct bio *bio = req->bio;
2983                unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
2984
2985                if (bio_bytes == bio->bi_iter.bi_size)
2986                        req->bio = bio->bi_next;
2987
2988                /* Completion has already been traced */
2989                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
2990                req_bio_endio(req, bio, bio_bytes, error);
2991
2992                total_bytes += bio_bytes;
2993                nr_bytes -= bio_bytes;
2994
2995                if (!nr_bytes)
2996                        break;
2997        }
2998
2999        /*
3000         * completely done
3001         */
3002        if (!req->bio) {
3003                /*
3004                 * Reset counters so that the request stacking driver
3005                 * can find how many bytes remain in the request
3006                 * later.
3007                 */
3008                req->__data_len = 0;
3009                return false;
3010        }
3011
3012        req->__data_len -= total_bytes;
3013
3014        /* update sector only for requests with clear definition of sector */
3015        if (!blk_rq_is_passthrough(req))
3016                req->__sector += total_bytes >> 9;
3017
3018        /* mixed attributes always follow the first bio */
3019        if (req->rq_flags & RQF_MIXED_MERGE) {
3020                req->cmd_flags &= ~REQ_FAILFAST_MASK;
3021                req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
3022        }
3023
3024        if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
3025                /*
3026                 * If total number of sectors is less than the first segment
3027                 * size, something has gone terribly wrong.
3028                 */
3029                if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
3030                        blk_dump_rq_flags(req, "request botched");
3031                        req->__data_len = blk_rq_cur_bytes(req);
3032                }
3033
3034                /* recalculate the number of segments */
3035                blk_recalc_rq_segments(req);
3036        }
3037
3038        return true;
3039}
3040EXPORT_SYMBOL_GPL(blk_update_request);
3041
3042static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
3043                                    unsigned int nr_bytes,
3044                                    unsigned int bidi_bytes)
3045{
3046        if (blk_update_request(rq, error, nr_bytes))
3047                return true;
3048
3049        /* Bidi request must be completed as a whole */
3050        if (unlikely(blk_bidi_rq(rq)) &&
3051            blk_update_request(rq->next_rq, error, bidi_bytes))
3052                return true;
3053
3054        if (blk_queue_add_random(rq->q))
3055                add_disk_randomness(rq->rq_disk);
3056
3057        return false;
3058}
3059
3060/**
3061 * blk_unprep_request - unprepare a request
3062 * @req:        the request
3063 *
3064 * This function makes a request ready for complete resubmission (or
3065 * completion).  It happens only after all error handling is complete,
3066 * so represents the appropriate moment to deallocate any resources
3067 * that were allocated to the request in the prep_rq_fn.  The queue
3068 * lock is held when calling this.
3069 */
3070void blk_unprep_request(struct request *req)
3071{
3072        struct request_queue *q = req->q;
3073
3074        req->rq_flags &= ~RQF_DONTPREP;
3075        if (q->unprep_rq_fn)
3076                q->unprep_rq_fn(q, req);
3077}
3078EXPORT_SYMBOL_GPL(blk_unprep_request);
3079
3080void blk_finish_request(struct request *req, blk_status_t error)
3081{
3082        struct request_queue *q = req->q;
3083
3084        lockdep_assert_held(req->q->queue_lock);
3085        WARN_ON_ONCE(q->mq_ops);
3086
3087        if (req->rq_flags & RQF_STATS)
3088                blk_stat_add(req);
3089
3090        if (req->rq_flags & RQF_QUEUED)
3091                blk_queue_end_tag(q, req);
3092
3093        BUG_ON(blk_queued_rq(req));
3094
3095        if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
3096                laptop_io_completion(req->q->backing_dev_info);
3097
3098        blk_delete_timer(req);
3099
3100        if (req->rq_flags & RQF_DONTPREP)
3101                blk_unprep_request(req);
3102
3103        blk_account_io_done(req);
3104
3105        if (req->end_io) {
3106                wbt_done(req->q->rq_wb, &req->issue_stat);
3107                req->end_io(req, error);
3108        } else {
3109                if (blk_bidi_rq(req))
3110                        __blk_put_request(req->next_rq->q, req->next_rq);
3111
3112                __blk_put_request(q, req);
3113        }
3114}
3115EXPORT_SYMBOL(blk_finish_request);
3116
3117/**
3118 * blk_end_bidi_request - Complete a bidi request
3119 * @rq:         the request to complete
3120 * @error:      block status code
3121 * @nr_bytes:   number of bytes to complete @rq
3122 * @bidi_bytes: number of bytes to complete @rq->next_rq
3123 *
3124 * Description:
3125 *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
3126 *     Drivers that supports bidi can safely call this member for any
3127 *     type of request, bidi or uni.  In the later case @bidi_bytes is
3128 *     just ignored.
3129 *
3130 * Return:
3131 *     %false - we are done with this request
3132 *     %true  - still buffers pending for this request
3133 **/
3134static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
3135                                 unsigned int nr_bytes, unsigned int bidi_bytes)
3136{
3137        struct request_queue *q = rq->q;
3138        unsigned long flags;
3139
3140        WARN_ON_ONCE(q->mq_ops);
3141
3142        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
3143                return true;
3144
3145        spin_lock_irqsave(q->queue_lock, flags);
3146        blk_finish_request(rq, error);
3147        spin_unlock_irqrestore(q->queue_lock, flags);
3148
3149        return false;
3150}
3151
3152/**
3153 * __blk_end_bidi_request - Complete a bidi request with queue lock held
3154 * @rq:         the request to complete
3155 * @error:      block status code
3156 * @nr_bytes:   number of bytes to complete @rq
3157 * @bidi_bytes: number of bytes to complete @rq->next_rq
3158 *
3159 * Description:
3160 *     Identical to blk_end_bidi_request() except that queue lock is
3161 *     assumed to be locked on entry and remains so on return.
3162 *
3163 * Return:
3164 *     %false - we are done with this request
3165 *     %true  - still buffers pending for this request
3166 **/
3167static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
3168                                   unsigned int nr_bytes, unsigned int bidi_bytes)
3169{
3170        lockdep_assert_held(rq->q->queue_lock);
3171        WARN_ON_ONCE(rq->q->mq_ops);
3172
3173        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
3174                return true;
3175
3176        blk_finish_request(rq, error);
3177
3178        return false;
3179}
3180
3181/**
3182 * blk_end_request - Helper function for drivers to complete the request.
3183 * @rq:       the request being processed
3184 * @error:    block status code
3185 * @nr_bytes: number of bytes to complete
3186 *
3187 * Description:
3188 *     Ends I/O on a number of bytes attached to @rq.
3189 *     If @rq has leftover, sets it up for the next range of segments.
3190 *
3191 * Return:
3192 *     %false - we are done with this request
3193 *     %true  - still buffers pending for this request
3194 **/
3195bool blk_end_request(struct request *rq, blk_status_t error,
3196                unsigned int nr_bytes)
3197{
3198        WARN_ON_ONCE(rq->q->mq_ops);
3199        return blk_end_bidi_request(rq, error, nr_bytes, 0);
3200}
3201EXPORT_SYMBOL(blk_end_request);
3202
3203/**
3204 * blk_end_request_all - Helper function for drives to finish the request.
3205 * @rq: the request to finish
3206 * @error: block status code
3207 *
3208 * Description:
3209 *     Completely finish @rq.
3210 */
3211void blk_end_request_all(struct request *rq, blk_status_t error)
3212{
3213        bool pending;
3214        unsigned int bidi_bytes = 0;
3215
3216        if (unlikely(blk_bidi_rq(rq)))
3217                bidi_bytes = blk_rq_bytes(rq->next_rq);
3218
3219        pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
3220        BUG_ON(pending);
3221}
3222EXPORT_SYMBOL(blk_end_request_all);
3223
3224/**
3225 * __blk_end_request - Helper function for drivers to complete the request.
3226 * @rq:       the request being processed
3227 * @error:    block status code
3228 * @nr_bytes: number of bytes to complete
3229 *
3230 * Description:
3231 *     Must be called with queue lock held unlike blk_end_request().
3232 *
3233 * Return:
3234 *     %false - we are done with this request
3235 *     %true  - still buffers pending for this request
3236 **/
3237bool __blk_end_request(struct request *rq, blk_status_t error,
3238                unsigned int nr_bytes)
3239{
3240        lockdep_assert_held(rq->q->queue_lock);
3241        WARN_ON_ONCE(rq->q->mq_ops);
3242
3243        return __blk_end_bidi_request(rq, error, nr_bytes, 0);
3244}
3245EXPORT_SYMBOL(__blk_end_request);
3246
3247/**
3248 * __blk_end_request_all - Helper function for drives to finish the request.
3249 * @rq: the request to finish
3250 * @error:    block status code
3251 *
3252 * Description:
3253 *     Completely finish @rq.  Must be called with queue lock held.
3254 */
3255void __blk_end_request_all(struct request *rq, blk_status_t error)
3256{
3257        bool pending;
3258        unsigned int bidi_bytes = 0;
3259
3260        lockdep_assert_held(rq->q->queue_lock);
3261        WARN_ON_ONCE(rq->q->mq_ops);
3262
3263        if (unlikely(blk_bidi_rq(rq)))
3264                bidi_bytes = blk_rq_bytes(rq->next_rq);
3265
3266        pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
3267        BUG_ON(pending);
3268}
3269EXPORT_SYMBOL(__blk_end_request_all);
3270
3271/**
3272 * __blk_end_request_cur - Helper function to finish the current request chunk.
3273 * @rq: the request to finish the current chunk for
3274 * @error:    block status code
3275 *
3276 * Description:
3277 *     Complete the current consecutively mapped chunk from @rq.  Must
3278 *     be called with queue lock held.
3279 *
3280 * Return:
3281 *     %false - we are done with this request
3282 *     %true  - still buffers pending for this request
3283 */
3284bool __blk_end_request_cur(struct request *rq, blk_status_t error)
3285{
3286        return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
3287}
3288EXPORT_SYMBOL(__blk_end_request_cur);
3289
3290void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3291                     struct bio *bio)
3292{
3293        if (bio_has_data(bio))
3294                rq->nr_phys_segments = bio_phys_segments(q, bio);
3295        else if (bio_op(bio) == REQ_OP_DISCARD)
3296                rq->nr_phys_segments = 1;
3297
3298        rq->__data_len = bio->bi_iter.bi_size;
3299        rq->bio = rq->biotail = bio;
3300
3301        if (bio->bi_disk)
3302                rq->rq_disk = bio->bi_disk;
3303}
3304
3305#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
3306/**
3307 * rq_flush_dcache_pages - Helper function to flush all pages in a request
3308 * @rq: the request to be flushed
3309 *
3310 * Description:
3311 *     Flush all pages in @rq.
3312 */
3313void rq_flush_dcache_pages(struct request *rq)
3314{
3315        struct req_iterator iter;
3316        struct bio_vec bvec;
3317
3318        rq_for_each_segment(bvec, rq, iter)
3319                flush_dcache_page(bvec.bv_page);
3320}
3321EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
3322#endif
3323
3324/**
3325 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
3326 * @q : the queue of the device being checked
3327 *
3328 * Description:
3329 *    Check if underlying low-level drivers of a device are busy.
3330 *    If the drivers want to export their busy state, they must set own
3331 *    exporting function using blk_queue_lld_busy() first.
3332 *
3333 *    Basically, this function is used only by request stacking drivers
3334 *    to stop dispatching requests to underlying devices when underlying
3335 *    devices are busy.  This behavior helps more I/O merging on the queue
3336 *    of the request stacking driver and prevents I/O throughput regression
3337 *    on burst I/O load.
3338 *
3339 * Return:
3340 *    0 - Not busy (The request stacking driver should dispatch request)
3341 *    1 - Busy (The request stacking driver should stop dispatching request)
3342 */
3343int blk_lld_busy(struct request_queue *q)
3344{
3345        if (q->lld_busy_fn)
3346                return q->lld_busy_fn(q);
3347
3348        return 0;
3349}
3350EXPORT_SYMBOL_GPL(blk_lld_busy);
3351
3352/**
3353 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3354 * @rq: the clone request to be cleaned up
3355 *
3356 * Description:
3357 *     Free all bios in @rq for a cloned request.
3358 */
3359void blk_rq_unprep_clone(struct request *rq)
3360{
3361        struct bio *bio;
3362
3363        while ((bio = rq->bio) != NULL) {
3364                rq->bio = bio->bi_next;
3365
3366                bio_put(bio);
3367        }
3368}
3369EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3370
3371/*
3372 * Copy attributes of the original request to the clone request.
3373 * The actual data parts (e.g. ->cmd, ->sense) are not copied.
3374 */
3375static void __blk_rq_prep_clone(struct request *dst, struct request *src)
3376{
3377        dst->cpu = src->cpu;
3378        dst->__sector = blk_rq_pos(src);
3379        dst->__data_len = blk_rq_bytes(src);
3380        dst->nr_phys_segments = src->nr_phys_segments;
3381        dst->ioprio = src->ioprio;
3382        dst->extra_len = src->extra_len;
3383}
3384
3385/**
3386 * blk_rq_prep_clone - Helper function to setup clone request
3387 * @rq: the request to be setup
3388 * @rq_src: original request to be cloned
3389 * @bs: bio_set that bios for clone are allocated from
3390 * @gfp_mask: memory allocation mask for bio
3391 * @bio_ctr: setup function to be called for each clone bio.
3392 *           Returns %0 for success, non %0 for failure.
3393 * @data: private data to be passed to @bio_ctr
3394 *
3395 * Description:
3396 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3397 *     The actual data parts of @rq_src (e.g. ->cmd, ->sense)
3398 *     are not copied, and copying such parts is the caller's responsibility.
3399 *     Also, pages which the original bios are pointing to are not copied
3400 *     and the cloned bios just point same pages.
3401 *     So cloned bios must be completed before original bios, which means
3402 *     the caller must complete @rq before @rq_src.
3403 */
3404int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3405                      struct bio_set *bs, gfp_t gfp_mask,
3406                      int (*bio_ctr)(struct bio *, struct bio *, void *),
3407                      void *data)
3408{
3409        struct bio *bio, *bio_src;
3410
3411        if (!bs)
3412                bs = fs_bio_set;
3413
3414        __rq_for_each_bio(bio_src, rq_src) {
3415                bio = bio_clone_fast(bio_src, gfp_mask, bs);
3416                if (!bio)
3417                        goto free_and_out;
3418
3419                if (bio_ctr && bio_ctr(bio, bio_src, data))
3420                        goto free_and_out;
3421
3422                if (rq->bio) {
3423                        rq->biotail->bi_next = bio;
3424                        rq->biotail = bio;
3425                } else
3426                        rq->bio = rq->biotail = bio;
3427        }
3428
3429        __blk_rq_prep_clone(rq, rq_src);
3430
3431        return 0;
3432
3433free_and_out:
3434        if (bio)
3435                bio_put(bio);
3436        blk_rq_unprep_clone(rq);
3437
3438        return -ENOMEM;
3439}
3440EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3441
3442int kblockd_schedule_work(struct work_struct *work)
3443{
3444        return queue_work(kblockd_workqueue, work);
3445}
3446EXPORT_SYMBOL(kblockd_schedule_work);
3447
3448int kblockd_schedule_work_on(int cpu, struct work_struct *work)
3449{
3450        return queue_work_on(cpu, kblockd_workqueue, work);
3451}
3452EXPORT_SYMBOL(kblockd_schedule_work_on);
3453
3454int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
3455                                unsigned long delay)
3456{
3457        return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3458}
3459EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
3460
3461/**
3462 * blk_start_plug - initialize blk_plug and track it inside the task_struct
3463 * @plug:       The &struct blk_plug that needs to be initialized
3464 *
3465 * Description:
3466 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
3467 *   pending I/O should the task end up blocking between blk_start_plug() and
3468 *   blk_finish_plug(). This is important from a performance perspective, but
3469 *   also ensures that we don't deadlock. For instance, if the task is blocking
3470 *   for a memory allocation, memory reclaim could end up wanting to free a
3471 *   page belonging to that request that is currently residing in our private
3472 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
3473 *   this kind of deadlock.
3474 */
3475void blk_start_plug(struct blk_plug *plug)
3476{
3477        struct task_struct *tsk = current;
3478
3479        /*
3480         * If this is a nested plug, don't actually assign it.
3481         */
3482        if (tsk->plug)
3483                return;
3484
3485        INIT_LIST_HEAD(&plug->list);
3486        INIT_LIST_HEAD(&plug->mq_list);
3487        INIT_LIST_HEAD(&plug->cb_list);
3488        /*
3489         * Store ordering should not be needed here, since a potential
3490         * preempt will imply a full memory barrier
3491         */
3492        tsk->plug = plug;
3493}
3494EXPORT_SYMBOL(blk_start_plug);
3495
3496static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
3497{
3498        struct request *rqa = container_of(a, struct request, queuelist);
3499        struct request *rqb = container_of(b, struct request, queuelist);
3500
3501        return !(rqa->q < rqb->q ||
3502                (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
3503}
3504
3505/*
3506 * If 'from_schedule' is true, then postpone the dispatch of requests
3507 * until a safe kblockd context. We due this to avoid accidental big
3508 * additional stack usage in driver dispatch, in places where the originally
3509 * plugger did not intend it.
3510 */
3511static void queue_unplugged(struct request_queue *q, unsigned int depth,
3512                            bool from_schedule)
3513        __releases(q->queue_lock)
3514{
3515        lockdep_assert_held(q->queue_lock);
3516
3517        trace_block_unplug(q, depth, !from_schedule);
3518
3519        if (from_schedule)
3520                blk_run_queue_async(q);
3521        else
3522                __blk_run_queue(q);
3523        spin_unlock(q->queue_lock);
3524}
3525
3526static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
3527{
3528        LIST_HEAD(callbacks);
3529
3530        while (!list_empty(&plug->cb_list)) {
3531                list_splice_init(&plug->cb_list, &callbacks);
3532
3533                while (!list_empty(&callbacks)) {
3534                        struct blk_plug_cb *cb = list_first_entry(&callbacks,
3535                                                          struct blk_plug_cb,
3536                                                          list);
3537                        list_del(&cb->list);
3538                        cb->callback(cb, from_schedule);
3539                }
3540        }
3541}
3542
3543struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
3544                                      int size)
3545{
3546        struct blk_plug *plug = current->plug;
3547        struct blk_plug_cb *cb;
3548
3549        if (!plug)
3550                return NULL;
3551
3552        list_for_each_entry(cb, &plug->cb_list, list)
3553                if (cb->callback == unplug && cb->data == data)
3554                        return cb;
3555
3556        /* Not currently on the callback list */
3557        BUG_ON(size < sizeof(*cb));
3558        cb = kzalloc(size, GFP_ATOMIC);
3559        if (cb) {
3560                cb->data = data;
3561                cb->callback = unplug;
3562                list_add(&cb->list, &plug->cb_list);
3563        }
3564        return cb;
3565}
3566EXPORT_SYMBOL(blk_check_plugged);
3567
3568void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3569{
3570        struct request_queue *q;
3571        unsigned long flags;
3572        struct request *rq;
3573        LIST_HEAD(list);
3574        unsigned int depth;
3575
3576        flush_plug_callbacks(plug, from_schedule);
3577
3578        if (!list_empty(&plug->mq_list))
3579                blk_mq_flush_plug_list(plug, from_schedule);
3580
3581        if (list_empty(&plug->list))
3582                return;
3583
3584        list_splice_init(&plug->list, &list);
3585
3586        list_sort(NULL, &list, plug_rq_cmp);
3587
3588        q = NULL;
3589        depth = 0;
3590
3591        /*
3592         * Save and disable interrupts here, to avoid doing it for every
3593         * queue lock we have to take.
3594         */
3595        local_irq_save(flags);
3596        while (!list_empty(&list)) {
3597                rq = list_entry_rq(list.next);
3598                list_del_init(&rq->queuelist);
3599                BUG_ON(!rq->q);
3600                if (rq->q != q) {
3601                        /*
3602                         * This drops the queue lock
3603                         */
3604                        if (q)
3605                                queue_unplugged(q, depth, from_schedule);
3606                        q = rq->q;
3607                        depth = 0;
3608                        spin_lock(q->queue_lock);
3609                }
3610
3611                /*
3612                 * Short-circuit if @q is dead
3613                 */
3614                if (unlikely(blk_queue_dying(q))) {
3615                        __blk_end_request_all(rq, BLK_STS_IOERR);
3616                        continue;
3617                }
3618
3619                /*
3620                 * rq is already accounted, so use raw insert
3621                 */
3622                if (op_is_flush(rq->cmd_flags))
3623                        __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3624                else
3625                        __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
3626
3627                depth++;
3628        }
3629
3630        /*
3631         * This drops the queue lock
3632         */
3633        if (q)
3634                queue_unplugged(q, depth, from_schedule);
3635
3636        local_irq_restore(flags);
3637}
3638
3639void blk_finish_plug(struct blk_plug *plug)
3640{
3641        if (plug != current->plug)
3642                return;
3643        blk_flush_plug_list(plug, false);
3644
3645        current->plug = NULL;
3646}
3647EXPORT_SYMBOL(blk_finish_plug);
3648
3649#ifdef CONFIG_PM
3650/**
3651 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3652 * @q: the queue of the device
3653 * @dev: the device the queue belongs to
3654 *
3655 * Description:
3656 *    Initialize runtime-PM-related fields for @q and start auto suspend for
3657 *    @dev. Drivers that want to take advantage of request-based runtime PM
3658 *    should call this function after @dev has been initialized, and its
3659 *    request queue @q has been allocated, and runtime PM for it can not happen
3660 *    yet(either due to disabled/forbidden or its usage_count > 0). In most
3661 *    cases, driver should call this function before any I/O has taken place.
3662 *
3663 *    This function takes care of setting up using auto suspend for the device,
3664 *    the autosuspend delay is set to -1 to make runtime suspend impossible
3665 *    until an updated value is either set by user or by driver. Drivers do
3666 *    not need to touch other autosuspend settings.
3667 *
3668 *    The block layer runtime PM is request based, so only works for drivers
3669 *    that use request as their IO unit instead of those directly use bio's.
3670 */
3671void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3672{
3673        /* not support for RQF_PM and ->rpm_status in blk-mq yet */
3674        if (q->mq_ops)
3675                return;
3676
3677        q->dev = dev;
3678        q->rpm_status = RPM_ACTIVE;
3679        pm_runtime_set_autosuspend_delay(q->dev, -1);
3680        pm_runtime_use_autosuspend(q->dev);
3681}
3682EXPORT_SYMBOL(blk_pm_runtime_init);
3683
3684/**
3685 * blk_pre_runtime_suspend - Pre runtime suspend check
3686 * @q: the queue of the device
3687 *
3688 * Description:
3689 *    This function will check if runtime suspend is allowed for the device
3690 *    by examining if there are any requests pending in the queue. If there
3691 *    are requests pending, the device can not be runtime suspended; otherwise,
3692 *    the queue's status will be updated to SUSPENDING and the driver can
3693 *    proceed to suspend the device.
3694 *
3695 *    For the not allowed case, we mark last busy for the device so that
3696 *    runtime PM core will try to autosuspend it some time later.
3697 *
3698 *    This function should be called near the start of the device's
3699 *    runtime_suspend callback.
3700 *
3701 * Return:
3702 *    0         - OK to runtime suspend the device
3703 *    -EBUSY    - Device should not be runtime suspended
3704 */
3705int blk_pre_runtime_suspend(struct request_queue *q)
3706{
3707        int ret = 0;
3708
3709        if (!q->dev)
3710                return ret;
3711
3712        spin_lock_irq(q->queue_lock);
3713        if (q->nr_pending) {
3714                ret = -EBUSY;
3715                pm_runtime_mark_last_busy(q->dev);
3716        } else {
3717                q->rpm_status = RPM_SUSPENDING;
3718        }
3719        spin_unlock_irq(q->queue_lock);
3720        return ret;
3721}
3722EXPORT_SYMBOL(blk_pre_runtime_suspend);
3723
3724/**
3725 * blk_post_runtime_suspend - Post runtime suspend processing
3726 * @q: the queue of the device
3727 * @err: return value of the device's runtime_suspend function
3728 *
3729 * Description:
3730 *    Update the queue's runtime status according to the return value of the
3731 *    device's runtime suspend function and mark last busy for the device so
3732 *    that PM core will try to auto suspend the device at a later time.
3733 *
3734 *    This function should be called near the end of the device's
3735 *    runtime_suspend callback.
3736 */
3737void blk_post_runtime_suspend(struct request_queue *q, int err)
3738{
3739        if (!q->dev)
3740                return;
3741
3742        spin_lock_irq(q->queue_lock);
3743        if (!err) {
3744                q->rpm_status = RPM_SUSPENDED;
3745        } else {
3746                q->rpm_status = RPM_ACTIVE;
3747                pm_runtime_mark_last_busy(q->dev);
3748        }
3749        spin_unlock_irq(q->queue_lock);
3750}
3751EXPORT_SYMBOL(blk_post_runtime_suspend);
3752
3753/**
3754 * blk_pre_runtime_resume - Pre runtime resume processing
3755 * @q: the queue of the device
3756 *
3757 * Description:
3758 *    Update the queue's runtime status to RESUMING in preparation for the
3759 *    runtime resume of the device.
3760 *
3761 *    This function should be called near the start of the device's
3762 *    runtime_resume callback.
3763 */
3764void blk_pre_runtime_resume(struct request_queue *q)
3765{
3766        if (!q->dev)
3767                return;
3768
3769        spin_lock_irq(q->queue_lock);
3770        q->rpm_status = RPM_RESUMING;
3771        spin_unlock_irq(q->queue_lock);
3772}
3773EXPORT_SYMBOL(blk_pre_runtime_resume);
3774
3775/**
3776 * blk_post_runtime_resume - Post runtime resume processing
3777 * @q: the queue of the device
3778 * @err: return value of the device's runtime_resume function
3779 *
3780 * Description:
3781 *    Update the queue's runtime status according to the return value of the
3782 *    device's runtime_resume function. If it is successfully resumed, process
3783 *    the requests that are queued into the device's queue when it is resuming
3784 *    and then mark last busy and initiate autosuspend for it.
3785 *
3786 *    This function should be called near the end of the device's
3787 *    runtime_resume callback.
3788 */
3789void blk_post_runtime_resume(struct request_queue *q, int err)
3790{
3791        if (!q->dev)
3792                return;
3793
3794        spin_lock_irq(q->queue_lock);
3795        if (!err) {
3796                q->rpm_status = RPM_ACTIVE;
3797                __blk_run_queue(q);
3798                pm_runtime_mark_last_busy(q->dev);
3799                pm_request_autosuspend(q->dev);
3800        } else {
3801                q->rpm_status = RPM_SUSPENDED;
3802        }
3803        spin_unlock_irq(q->queue_lock);
3804}
3805EXPORT_SYMBOL(blk_post_runtime_resume);
3806
3807/**
3808 * blk_set_runtime_active - Force runtime status of the queue to be active
3809 * @q: the queue of the device
3810 *
3811 * If the device is left runtime suspended during system suspend the resume
3812 * hook typically resumes the device and corrects runtime status
3813 * accordingly. However, that does not affect the queue runtime PM status
3814 * which is still "suspended". This prevents processing requests from the
3815 * queue.
3816 *
3817 * This function can be used in driver's resume hook to correct queue
3818 * runtime PM status and re-enable peeking requests from the queue. It
3819 * should be called before first request is added to the queue.
3820 */
3821void blk_set_runtime_active(struct request_queue *q)
3822{
3823        spin_lock_irq(q->queue_lock);
3824        q->rpm_status = RPM_ACTIVE;
3825        pm_runtime_mark_last_busy(q->dev);
3826        pm_request_autosuspend(q->dev);
3827        spin_unlock_irq(q->queue_lock);
3828}
3829EXPORT_SYMBOL(blk_set_runtime_active);
3830#endif
3831
3832int __init blk_dev_init(void)
3833{
3834        BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
3835        BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3836                        FIELD_SIZEOF(struct request, cmd_flags));
3837        BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3838                        FIELD_SIZEOF(struct bio, bi_opf));
3839
3840        /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3841        kblockd_workqueue = alloc_workqueue("kblockd",
3842                                            WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3843        if (!kblockd_workqueue)
3844                panic("Failed to create kblockd\n");
3845
3846        request_cachep = kmem_cache_create("blkdev_requests",
3847                        sizeof(struct request), 0, SLAB_PANIC, NULL);
3848
3849        blk_requestq_cachep = kmem_cache_create("request_queue",
3850                        sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3851
3852#ifdef CONFIG_DEBUG_FS
3853        blk_debugfs_root = debugfs_create_dir("block", NULL);
3854#endif
3855
3856        return 0;
3857}
3858