linux/include/linux/blk-mq.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef BLK_MQ_H
   3#define BLK_MQ_H
   4
   5#include <linux/blkdev.h>
   6#include <linux/sbitmap.h>
   7#include <linux/srcu.h>
   8#include <linux/lockdep.h>
   9
  10struct blk_mq_tags;
  11struct blk_flush_queue;
  12
  13/**
  14 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
  15 * block device
  16 */
  17struct blk_mq_hw_ctx {
  18        struct {
  19                /** @lock: Protects the dispatch list. */
  20                spinlock_t              lock;
  21                /**
  22                 * @dispatch: Used for requests that are ready to be
  23                 * dispatched to the hardware but for some reason (e.g. lack of
  24                 * resources) could not be sent to the hardware. As soon as the
  25                 * driver can send new requests, requests at this list will
  26                 * be sent first for a fairer dispatch.
  27                 */
  28                struct list_head        dispatch;
  29                 /**
  30                  * @state: BLK_MQ_S_* flags. Defines the state of the hw
  31                  * queue (active, scheduled to restart, stopped).
  32                  */
  33                unsigned long           state;
  34        } ____cacheline_aligned_in_smp;
  35
  36        /**
  37         * @run_work: Used for scheduling a hardware queue run at a later time.
  38         */
  39        struct delayed_work     run_work;
  40        /** @cpumask: Map of available CPUs where this hctx can run. */
  41        cpumask_var_t           cpumask;
  42        /**
  43         * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
  44         * selection from @cpumask.
  45         */
  46        int                     next_cpu;
  47        /**
  48         * @next_cpu_batch: Counter of how many works left in the batch before
  49         * changing to the next CPU.
  50         */
  51        int                     next_cpu_batch;
  52
  53        /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
  54        unsigned long           flags;
  55
  56        /**
  57         * @sched_data: Pointer owned by the IO scheduler attached to a request
  58         * queue. It's up to the IO scheduler how to use this pointer.
  59         */
  60        void                    *sched_data;
  61        /**
  62         * @queue: Pointer to the request queue that owns this hardware context.
  63         */
  64        struct request_queue    *queue;
  65        /** @fq: Queue of requests that need to perform a flush operation. */
  66        struct blk_flush_queue  *fq;
  67
  68        /**
  69         * @driver_data: Pointer to data owned by the block driver that created
  70         * this hctx
  71         */
  72        void                    *driver_data;
  73
  74        /**
  75         * @ctx_map: Bitmap for each software queue. If bit is on, there is a
  76         * pending request in that software queue.
  77         */
  78        struct sbitmap          ctx_map;
  79
  80        /**
  81         * @dispatch_from: Software queue to be used when no scheduler was
  82         * selected.
  83         */
  84        struct blk_mq_ctx       *dispatch_from;
  85        /**
  86         * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
  87         * decide if the hw_queue is busy using Exponential Weighted Moving
  88         * Average algorithm.
  89         */
  90        unsigned int            dispatch_busy;
  91
  92        /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
  93        unsigned short          type;
  94        /** @nr_ctx: Number of software queues. */
  95        unsigned short          nr_ctx;
  96        /** @ctxs: Array of software queues. */
  97        struct blk_mq_ctx       **ctxs;
  98
  99        /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
 100        spinlock_t              dispatch_wait_lock;
 101        /**
 102         * @dispatch_wait: Waitqueue to put requests when there is no tag
 103         * available at the moment, to wait for another try in the future.
 104         */
 105        wait_queue_entry_t      dispatch_wait;
 106
 107        /**
 108         * @wait_index: Index of next available dispatch_wait queue to insert
 109         * requests.
 110         */
 111        atomic_t                wait_index;
 112
 113        /**
 114         * @tags: Tags owned by the block driver. A tag at this set is only
 115         * assigned when a request is dispatched from a hardware queue.
 116         */
 117        struct blk_mq_tags      *tags;
 118        /**
 119         * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
 120         * scheduler associated with a request queue, a tag is assigned when
 121         * that request is allocated. Else, this member is not used.
 122         */
 123        struct blk_mq_tags      *sched_tags;
 124
 125        /** @queued: Number of queued requests. */
 126        unsigned long           queued;
 127        /** @run: Number of dispatched requests. */
 128        unsigned long           run;
 129#define BLK_MQ_MAX_DISPATCH_ORDER       7
 130        /** @dispatched: Number of dispatch requests by queue. */
 131        unsigned long           dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
 132
 133        /** @numa_node: NUMA node the storage adapter has been connected to. */
 134        unsigned int            numa_node;
 135        /** @queue_num: Index of this hardware queue. */
 136        unsigned int            queue_num;
 137
 138        /**
 139         * @nr_active: Number of active requests. Only used when a tag set is
 140         * shared across request queues.
 141         */
 142        atomic_t                nr_active;
 143
 144        /** @cpuhp_online: List to store request if CPU is going to die */
 145        struct hlist_node       cpuhp_online;
 146        /** @cpuhp_dead: List to store request if some CPU die. */
 147        struct hlist_node       cpuhp_dead;
 148        /** @kobj: Kernel object for sysfs. */
 149        struct kobject          kobj;
 150
 151        /** @poll_considered: Count times blk_poll() was called. */
 152        unsigned long           poll_considered;
 153        /** @poll_invoked: Count how many requests blk_poll() polled. */
 154        unsigned long           poll_invoked;
 155        /** @poll_success: Count how many polled requests were completed. */
 156        unsigned long           poll_success;
 157
 158#ifdef CONFIG_BLK_DEBUG_FS
 159        /**
 160         * @debugfs_dir: debugfs directory for this hardware queue. Named
 161         * as cpu<cpu_number>.
 162         */
 163        struct dentry           *debugfs_dir;
 164        /** @sched_debugfs_dir: debugfs directory for the scheduler. */
 165        struct dentry           *sched_debugfs_dir;
 166#endif
 167
 168        /**
 169         * @hctx_list: if this hctx is not in use, this is an entry in
 170         * q->unused_hctx_list.
 171         */
 172        struct list_head        hctx_list;
 173
 174        /**
 175         * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
 176         * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
 177         * blk_mq_hw_ctx_size().
 178         */
 179        struct srcu_struct      srcu[];
 180};
 181
 182/**
 183 * struct blk_mq_queue_map - Map software queues to hardware queues
 184 * @mq_map:       CPU ID to hardware queue index map. This is an array
 185 *      with nr_cpu_ids elements. Each element has a value in the range
 186 *      [@queue_offset, @queue_offset + @nr_queues).
 187 * @nr_queues:    Number of hardware queues to map CPU IDs onto.
 188 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
 189 *      driver to map each hardware queue type (enum hctx_type) onto a distinct
 190 *      set of hardware queues.
 191 */
 192struct blk_mq_queue_map {
 193        unsigned int *mq_map;
 194        unsigned int nr_queues;
 195        unsigned int queue_offset;
 196};
 197
 198/**
 199 * enum hctx_type - Type of hardware queue
 200 * @HCTX_TYPE_DEFAULT:  All I/O not otherwise accounted for.
 201 * @HCTX_TYPE_READ:     Just for READ I/O.
 202 * @HCTX_TYPE_POLL:     Polled I/O of any kind.
 203 * @HCTX_MAX_TYPES:     Number of types of hctx.
 204 */
 205enum hctx_type {
 206        HCTX_TYPE_DEFAULT,
 207        HCTX_TYPE_READ,
 208        HCTX_TYPE_POLL,
 209
 210        HCTX_MAX_TYPES,
 211};
 212
 213/**
 214 * struct blk_mq_tag_set - tag set that can be shared between request queues
 215 * @map:           One or more ctx -> hctx mappings. One map exists for each
 216 *                 hardware queue type (enum hctx_type) that the driver wishes
 217 *                 to support. There are no restrictions on maps being of the
 218 *                 same size, and it's perfectly legal to share maps between
 219 *                 types.
 220 * @nr_maps:       Number of elements in the @map array. A number in the range
 221 *                 [1, HCTX_MAX_TYPES].
 222 * @ops:           Pointers to functions that implement block driver behavior.
 223 * @nr_hw_queues:  Number of hardware queues supported by the block driver that
 224 *                 owns this data structure.
 225 * @queue_depth:   Number of tags per hardware queue, reserved tags included.
 226 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
 227 *                 allocations.
 228 * @cmd_size:      Number of additional bytes to allocate per request. The block
 229 *                 driver owns these additional bytes.
 230 * @numa_node:     NUMA node the storage adapter has been connected to.
 231 * @timeout:       Request processing timeout in jiffies.
 232 * @flags:         Zero or more BLK_MQ_F_* flags.
 233 * @driver_data:   Pointer to data owned by the block driver that created this
 234 *                 tag set.
 235 * @active_queues_shared_sbitmap:
 236 *                 number of active request queues per tag set.
 237 * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
 238 * @__breserved_tags:
 239 *                 A shared reserved tags sbitmap, used over all hctx's
 240 * @tags:          Tag sets. One tag set per hardware queue. Has @nr_hw_queues
 241 *                 elements.
 242 * @tag_list_lock: Serializes tag_list accesses.
 243 * @tag_list:      List of the request queues that use this tag set. See also
 244 *                 request_queue.tag_set_list.
 245 */
 246struct blk_mq_tag_set {
 247        struct blk_mq_queue_map map[HCTX_MAX_TYPES];
 248        unsigned int            nr_maps;
 249        const struct blk_mq_ops *ops;
 250        unsigned int            nr_hw_queues;
 251        unsigned int            queue_depth;
 252        unsigned int            reserved_tags;
 253        unsigned int            cmd_size;
 254        int                     numa_node;
 255        unsigned int            timeout;
 256        unsigned int            flags;
 257        void                    *driver_data;
 258        atomic_t                active_queues_shared_sbitmap;
 259
 260        struct sbitmap_queue    __bitmap_tags;
 261        struct sbitmap_queue    __breserved_tags;
 262        struct blk_mq_tags      **tags;
 263
 264        struct mutex            tag_list_lock;
 265        struct list_head        tag_list;
 266};
 267
 268/**
 269 * struct blk_mq_queue_data - Data about a request inserted in a queue
 270 *
 271 * @rq:   Request pointer.
 272 * @last: If it is the last request in the queue.
 273 */
 274struct blk_mq_queue_data {
 275        struct request *rq;
 276        bool last;
 277};
 278
 279typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
 280                bool);
 281typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
 282
 283/**
 284 * struct blk_mq_ops - Callback functions that implements block driver
 285 * behaviour.
 286 */
 287struct blk_mq_ops {
 288        /**
 289         * @queue_rq: Queue a new request from block IO.
 290         */
 291        blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
 292                                 const struct blk_mq_queue_data *);
 293
 294        /**
 295         * @commit_rqs: If a driver uses bd->last to judge when to submit
 296         * requests to hardware, it must define this function. In case of errors
 297         * that make us stop issuing further requests, this hook serves the
 298         * purpose of kicking the hardware (which the last request otherwise
 299         * would have done).
 300         */
 301        void (*commit_rqs)(struct blk_mq_hw_ctx *);
 302
 303        /**
 304         * @get_budget: Reserve budget before queue request, once .queue_rq is
 305         * run, it is driver's responsibility to release the
 306         * reserved budget. Also we have to handle failure case
 307         * of .get_budget for avoiding I/O deadlock.
 308         */
 309        int (*get_budget)(struct request_queue *);
 310
 311        /**
 312         * @put_budget: Release the reserved budget.
 313         */
 314        void (*put_budget)(struct request_queue *, int);
 315
 316        /**
 317         * @set_rq_budget_token: store rq's budget token
 318         */
 319        void (*set_rq_budget_token)(struct request *, int);
 320        /**
 321         * @get_rq_budget_token: retrieve rq's budget token
 322         */
 323        int (*get_rq_budget_token)(struct request *);
 324
 325        /**
 326         * @timeout: Called on request timeout.
 327         */
 328        enum blk_eh_timer_return (*timeout)(struct request *, bool);
 329
 330        /**
 331         * @poll: Called to poll for completion of a specific tag.
 332         */
 333        int (*poll)(struct blk_mq_hw_ctx *);
 334
 335        /**
 336         * @complete: Mark the request as complete.
 337         */
 338        void (*complete)(struct request *);
 339
 340        /**
 341         * @init_hctx: Called when the block layer side of a hardware queue has
 342         * been set up, allowing the driver to allocate/init matching
 343         * structures.
 344         */
 345        int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
 346        /**
 347         * @exit_hctx: Ditto for exit/teardown.
 348         */
 349        void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
 350
 351        /**
 352         * @init_request: Called for every command allocated by the block layer
 353         * to allow the driver to set up driver specific data.
 354         *
 355         * Tag greater than or equal to queue_depth is for setting up
 356         * flush request.
 357         */
 358        int (*init_request)(struct blk_mq_tag_set *set, struct request *,
 359                            unsigned int, unsigned int);
 360        /**
 361         * @exit_request: Ditto for exit/teardown.
 362         */
 363        void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
 364                             unsigned int);
 365
 366        /**
 367         * @initialize_rq_fn: Called from inside blk_get_request().
 368         */
 369        void (*initialize_rq_fn)(struct request *rq);
 370
 371        /**
 372         * @cleanup_rq: Called before freeing one request which isn't completed
 373         * yet, and usually for freeing the driver private data.
 374         */
 375        void (*cleanup_rq)(struct request *);
 376
 377        /**
 378         * @busy: If set, returns whether or not this queue currently is busy.
 379         */
 380        bool (*busy)(struct request_queue *);
 381
 382        /**
 383         * @map_queues: This allows drivers specify their own queue mapping by
 384         * overriding the setup-time function that builds the mq_map.
 385         */
 386        int (*map_queues)(struct blk_mq_tag_set *set);
 387
 388#ifdef CONFIG_BLK_DEBUG_FS
 389        /**
 390         * @show_rq: Used by the debugfs implementation to show driver-specific
 391         * information about a request.
 392         */
 393        void (*show_rq)(struct seq_file *m, struct request *rq);
 394#endif
 395};
 396
 397enum {
 398        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
 399        BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
 400        /*
 401         * Set when this device requires underlying blk-mq device for
 402         * completing IO:
 403         */
 404        BLK_MQ_F_STACKING       = 1 << 2,
 405        BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
 406        BLK_MQ_F_BLOCKING       = 1 << 5,
 407        /* Do not allow an I/O scheduler to be configured. */
 408        BLK_MQ_F_NO_SCHED       = 1 << 6,
 409        /*
 410         * Select 'none' during queue registration in case of a single hwq
 411         * or shared hwqs instead of 'mq-deadline'.
 412         */
 413        BLK_MQ_F_NO_SCHED_BY_DEFAULT    = 1 << 7,
 414        BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
 415        BLK_MQ_F_ALLOC_POLICY_BITS = 1,
 416
 417        BLK_MQ_S_STOPPED        = 0,
 418        BLK_MQ_S_TAG_ACTIVE     = 1,
 419        BLK_MQ_S_SCHED_RESTART  = 2,
 420
 421        /* hw queue is inactive after all its CPUs become offline */
 422        BLK_MQ_S_INACTIVE       = 3,
 423
 424        BLK_MQ_MAX_DEPTH        = 10240,
 425
 426        BLK_MQ_CPU_WORK_BATCH   = 8,
 427};
 428#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
 429        ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
 430                ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
 431#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
 432        ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
 433                << BLK_MQ_F_ALLOC_POLICY_START_BIT)
 434
 435struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
 436                struct lock_class_key *lkclass);
 437#define blk_mq_alloc_disk(set, queuedata)                               \
 438({                                                                      \
 439        static struct lock_class_key __key;                             \
 440                                                                        \
 441        __blk_mq_alloc_disk(set, queuedata, &__key);                    \
 442})
 443struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
 444int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 445                struct request_queue *q);
 446void blk_mq_unregister_dev(struct device *, struct request_queue *);
 447
 448int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
 449int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
 450                const struct blk_mq_ops *ops, unsigned int queue_depth,
 451                unsigned int set_flags);
 452void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
 453
 454void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
 455
 456void blk_mq_free_request(struct request *rq);
 457
 458bool blk_mq_queue_inflight(struct request_queue *q);
 459
 460enum {
 461        /* return when out of requests */
 462        BLK_MQ_REQ_NOWAIT       = (__force blk_mq_req_flags_t)(1 << 0),
 463        /* allocate from reserved pool */
 464        BLK_MQ_REQ_RESERVED     = (__force blk_mq_req_flags_t)(1 << 1),
 465        /* set RQF_PM */
 466        BLK_MQ_REQ_PM           = (__force blk_mq_req_flags_t)(1 << 2),
 467};
 468
 469struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 470                blk_mq_req_flags_t flags);
 471struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 472                unsigned int op, blk_mq_req_flags_t flags,
 473                unsigned int hctx_idx);
 474struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
 475
 476enum {
 477        BLK_MQ_UNIQUE_TAG_BITS = 16,
 478        BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
 479};
 480
 481u32 blk_mq_unique_tag(struct request *rq);
 482
 483static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
 484{
 485        return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
 486}
 487
 488static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 489{
 490        return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
 491}
 492
 493/**
 494 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
 495 * @rq: target request.
 496 */
 497static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
 498{
 499        return READ_ONCE(rq->state);
 500}
 501
 502static inline int blk_mq_request_started(struct request *rq)
 503{
 504        return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
 505}
 506
 507static inline int blk_mq_request_completed(struct request *rq)
 508{
 509        return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
 510}
 511
 512/*
 513 * 
 514 * Set the state to complete when completing a request from inside ->queue_rq.
 515 * This is used by drivers that want to ensure special complete actions that
 516 * need access to the request are called on failure, e.g. by nvme for
 517 * multipathing.
 518 */
 519static inline void blk_mq_set_request_complete(struct request *rq)
 520{
 521        WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
 522}
 523
 524void blk_mq_start_request(struct request *rq);
 525void blk_mq_end_request(struct request *rq, blk_status_t error);
 526void __blk_mq_end_request(struct request *rq, blk_status_t error);
 527
 528void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 529void blk_mq_kick_requeue_list(struct request_queue *q);
 530void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 531void blk_mq_complete_request(struct request *rq);
 532bool blk_mq_complete_request_remote(struct request *rq);
 533bool blk_mq_queue_stopped(struct request_queue *q);
 534void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 535void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
 536void blk_mq_stop_hw_queues(struct request_queue *q);
 537void blk_mq_start_hw_queues(struct request_queue *q);
 538void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 539void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 540void blk_mq_quiesce_queue(struct request_queue *q);
 541void blk_mq_unquiesce_queue(struct request_queue *q);
 542void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 543void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 544void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 545void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
 546void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 547                busy_tag_iter_fn *fn, void *priv);
 548void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
 549void blk_mq_freeze_queue(struct request_queue *q);
 550void blk_mq_unfreeze_queue(struct request_queue *q);
 551void blk_freeze_queue_start(struct request_queue *q);
 552void blk_mq_freeze_queue_wait(struct request_queue *q);
 553int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
 554                                     unsigned long timeout);
 555
 556int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
 557void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 558
 559void blk_mq_quiesce_queue_nowait(struct request_queue *q);
 560
 561unsigned int blk_mq_rq_cpu(struct request *rq);
 562
 563bool __blk_should_fake_timeout(struct request_queue *q);
 564static inline bool blk_should_fake_timeout(struct request_queue *q)
 565{
 566        if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
 567            test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
 568                return __blk_should_fake_timeout(q);
 569        return false;
 570}
 571
 572/**
 573 * blk_mq_rq_from_pdu - cast a PDU to a request
 574 * @pdu: the PDU (Protocol Data Unit) to be casted
 575 *
 576 * Return: request
 577 *
 578 * Driver command data is immediately after the request. So subtract request
 579 * size to get back to the original request.
 580 */
 581static inline struct request *blk_mq_rq_from_pdu(void *pdu)
 582{
 583        return pdu - sizeof(struct request);
 584}
 585
 586/**
 587 * blk_mq_rq_to_pdu - cast a request to a PDU
 588 * @rq: the request to be casted
 589 *
 590 * Return: pointer to the PDU
 591 *
 592 * Driver command data is immediately after the request. So add request to get
 593 * the PDU.
 594 */
 595static inline void *blk_mq_rq_to_pdu(struct request *rq)
 596{
 597        return rq + 1;
 598}
 599
 600#define queue_for_each_hw_ctx(q, hctx, i)                               \
 601        for ((i) = 0; (i) < (q)->nr_hw_queues &&                        \
 602             ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
 603
 604#define hctx_for_each_ctx(hctx, ctx, i)                                 \
 605        for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
 606             ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
 607
 608static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
 609                struct request *rq)
 610{
 611        if (rq->tag != -1)
 612                return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
 613
 614        return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
 615                        BLK_QC_T_INTERNAL;
 616}
 617
 618static inline void blk_mq_cleanup_rq(struct request *rq)
 619{
 620        if (rq->q->mq_ops->cleanup_rq)
 621                rq->q->mq_ops->cleanup_rq(rq);
 622}
 623
 624static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
 625                unsigned int nr_segs)
 626{
 627        rq->nr_phys_segments = nr_segs;
 628        rq->__data_len = bio->bi_iter.bi_size;
 629        rq->bio = rq->biotail = bio;
 630        rq->ioprio = bio_prio(bio);
 631
 632        if (bio->bi_bdev)
 633                rq->rq_disk = bio->bi_bdev->bd_disk;
 634}
 635
 636blk_qc_t blk_mq_submit_bio(struct bio *bio);
 637void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
 638                struct lock_class_key *key);
 639
 640#endif
 641