linux/include/linux/blk-mq.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef BLK_MQ_H
   3#define BLK_MQ_H
   4
   5#include <linux/blkdev.h>
   6#include <linux/sbitmap.h>
   7#include <linux/srcu.h>
   8#include <linux/lockdep.h>
   9
  10struct blk_mq_tags;
  11struct blk_flush_queue;
  12
  13/**
  14 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
  15 * block device
  16 */
  17struct blk_mq_hw_ctx {
  18        struct {
  19                /** @lock: Protects the dispatch list. */
  20                spinlock_t              lock;
  21                /**
  22                 * @dispatch: Used for requests that are ready to be
  23                 * dispatched to the hardware but for some reason (e.g. lack of
  24                 * resources) could not be sent to the hardware. As soon as the
  25                 * driver can send new requests, requests at this list will
  26                 * be sent first for a fairer dispatch.
  27                 */
  28                struct list_head        dispatch;
  29                 /**
  30                  * @state: BLK_MQ_S_* flags. Defines the state of the hw
  31                  * queue (active, scheduled to restart, stopped).
  32                  */
  33                unsigned long           state;
  34        } ____cacheline_aligned_in_smp;
  35
  36        /**
  37         * @run_work: Used for scheduling a hardware queue run at a later time.
  38         */
  39        struct delayed_work     run_work;
  40        /** @cpumask: Map of available CPUs where this hctx can run. */
  41        cpumask_var_t           cpumask;
  42        /**
  43         * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
  44         * selection from @cpumask.
  45         */
  46        int                     next_cpu;
  47        /**
  48         * @next_cpu_batch: Counter of how many works left in the batch before
  49         * changing to the next CPU.
  50         */
  51        int                     next_cpu_batch;
  52
  53        /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
  54        unsigned long           flags;
  55
  56        /**
  57         * @sched_data: Pointer owned by the IO scheduler attached to a request
  58         * queue. It's up to the IO scheduler how to use this pointer.
  59         */
  60        void                    *sched_data;
  61        /**
  62         * @queue: Pointer to the request queue that owns this hardware context.
  63         */
  64        struct request_queue    *queue;
  65        /** @fq: Queue of requests that need to perform a flush operation. */
  66        struct blk_flush_queue  *fq;
  67
  68        /**
  69         * @driver_data: Pointer to data owned by the block driver that created
  70         * this hctx
  71         */
  72        void                    *driver_data;
  73
  74        /**
  75         * @ctx_map: Bitmap for each software queue. If bit is on, there is a
  76         * pending request in that software queue.
  77         */
  78        struct sbitmap          ctx_map;
  79
  80        /**
  81         * @dispatch_from: Software queue to be used when no scheduler was
  82         * selected.
  83         */
  84        struct blk_mq_ctx       *dispatch_from;
  85        /**
  86         * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
  87         * decide if the hw_queue is busy using Exponential Weighted Moving
  88         * Average algorithm.
  89         */
  90        unsigned int            dispatch_busy;
  91
  92        /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
  93        unsigned short          type;
  94        /** @nr_ctx: Number of software queues. */
  95        unsigned short          nr_ctx;
  96        /** @ctxs: Array of software queues. */
  97        struct blk_mq_ctx       **ctxs;
  98
  99        /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
 100        spinlock_t              dispatch_wait_lock;
 101        /**
 102         * @dispatch_wait: Waitqueue to put requests when there is no tag
 103         * available at the moment, to wait for another try in the future.
 104         */
 105        wait_queue_entry_t      dispatch_wait;
 106
 107        /**
 108         * @wait_index: Index of next available dispatch_wait queue to insert
 109         * requests.
 110         */
 111        atomic_t                wait_index;
 112
 113        /**
 114         * @tags: Tags owned by the block driver. A tag at this set is only
 115         * assigned when a request is dispatched from a hardware queue.
 116         */
 117        struct blk_mq_tags      *tags;
 118        /**
 119         * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
 120         * scheduler associated with a request queue, a tag is assigned when
 121         * that request is allocated. Else, this member is not used.
 122         */
 123        struct blk_mq_tags      *sched_tags;
 124
 125        /** @queued: Number of queued requests. */
 126        unsigned long           queued;
 127        /** @run: Number of dispatched requests. */
 128        unsigned long           run;
 129#define BLK_MQ_MAX_DISPATCH_ORDER       7
 130        /** @dispatched: Number of dispatch requests by queue. */
 131        unsigned long           dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
 132
 133        /** @numa_node: NUMA node the storage adapter has been connected to. */
 134        unsigned int            numa_node;
 135        /** @queue_num: Index of this hardware queue. */
 136        unsigned int            queue_num;
 137
 138        /**
 139         * @nr_active: Number of active requests. Only used when a tag set is
 140         * shared across request queues.
 141         */
 142        atomic_t                nr_active;
 143
 144        /** @cpuhp_online: List to store request if CPU is going to die */
 145        struct hlist_node       cpuhp_online;
 146        /** @cpuhp_dead: List to store request if some CPU die. */
 147        struct hlist_node       cpuhp_dead;
 148        /** @kobj: Kernel object for sysfs. */
 149        struct kobject          kobj;
 150
 151        /** @poll_considered: Count times blk_poll() was called. */
 152        unsigned long           poll_considered;
 153        /** @poll_invoked: Count how many requests blk_poll() polled. */
 154        unsigned long           poll_invoked;
 155        /** @poll_success: Count how many polled requests were completed. */
 156        unsigned long           poll_success;
 157
 158#ifdef CONFIG_BLK_DEBUG_FS
 159        /**
 160         * @debugfs_dir: debugfs directory for this hardware queue. Named
 161         * as cpu<cpu_number>.
 162         */
 163        struct dentry           *debugfs_dir;
 164        /** @sched_debugfs_dir: debugfs directory for the scheduler. */
 165        struct dentry           *sched_debugfs_dir;
 166#endif
 167
 168        /**
 169         * @hctx_list: if this hctx is not in use, this is an entry in
 170         * q->unused_hctx_list.
 171         */
 172        struct list_head        hctx_list;
 173
 174        /**
 175         * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
 176         * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
 177         * blk_mq_hw_ctx_size().
 178         */
 179        struct srcu_struct      srcu[];
 180};
 181
 182/**
 183 * struct blk_mq_queue_map - Map software queues to hardware queues
 184 * @mq_map:       CPU ID to hardware queue index map. This is an array
 185 *      with nr_cpu_ids elements. Each element has a value in the range
 186 *      [@queue_offset, @queue_offset + @nr_queues).
 187 * @nr_queues:    Number of hardware queues to map CPU IDs onto.
 188 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
 189 *      driver to map each hardware queue type (enum hctx_type) onto a distinct
 190 *      set of hardware queues.
 191 */
 192struct blk_mq_queue_map {
 193        unsigned int *mq_map;
 194        unsigned int nr_queues;
 195        unsigned int queue_offset;
 196};
 197
 198/**
 199 * enum hctx_type - Type of hardware queue
 200 * @HCTX_TYPE_DEFAULT:  All I/O not otherwise accounted for.
 201 * @HCTX_TYPE_READ:     Just for READ I/O.
 202 * @HCTX_TYPE_POLL:     Polled I/O of any kind.
 203 * @HCTX_MAX_TYPES:     Number of types of hctx.
 204 */
 205enum hctx_type {
 206        HCTX_TYPE_DEFAULT,
 207        HCTX_TYPE_READ,
 208        HCTX_TYPE_POLL,
 209
 210        HCTX_MAX_TYPES,
 211};
 212
 213/**
 214 * struct blk_mq_tag_set - tag set that can be shared between request queues
 215 * @map:           One or more ctx -> hctx mappings. One map exists for each
 216 *                 hardware queue type (enum hctx_type) that the driver wishes
 217 *                 to support. There are no restrictions on maps being of the
 218 *                 same size, and it's perfectly legal to share maps between
 219 *                 types.
 220 * @nr_maps:       Number of elements in the @map array. A number in the range
 221 *                 [1, HCTX_MAX_TYPES].
 222 * @ops:           Pointers to functions that implement block driver behavior.
 223 * @nr_hw_queues:  Number of hardware queues supported by the block driver that
 224 *                 owns this data structure.
 225 * @queue_depth:   Number of tags per hardware queue, reserved tags included.
 226 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
 227 *                 allocations.
 228 * @cmd_size:      Number of additional bytes to allocate per request. The block
 229 *                 driver owns these additional bytes.
 230 * @numa_node:     NUMA node the storage adapter has been connected to.
 231 * @timeout:       Request processing timeout in jiffies.
 232 * @flags:         Zero or more BLK_MQ_F_* flags.
 233 * @driver_data:   Pointer to data owned by the block driver that created this
 234 *                 tag set.
 235 * @active_queues_shared_sbitmap:
 236 *                 number of active request queues per tag set.
 237 * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
 238 * @__breserved_tags:
 239 *                 A shared reserved tags sbitmap, used over all hctx's
 240 * @tags:          Tag sets. One tag set per hardware queue. Has @nr_hw_queues
 241 *                 elements.
 242 * @tag_list_lock: Serializes tag_list accesses.
 243 * @tag_list:      List of the request queues that use this tag set. See also
 244 *                 request_queue.tag_set_list.
 245 */
 246struct blk_mq_tag_set {
 247        struct blk_mq_queue_map map[HCTX_MAX_TYPES];
 248        unsigned int            nr_maps;
 249        const struct blk_mq_ops *ops;
 250        unsigned int            nr_hw_queues;
 251        unsigned int            queue_depth;
 252        unsigned int            reserved_tags;
 253        unsigned int            cmd_size;
 254        int                     numa_node;
 255        unsigned int            timeout;
 256        unsigned int            flags;
 257        void                    *driver_data;
 258        atomic_t                active_queues_shared_sbitmap;
 259
 260        struct sbitmap_queue    __bitmap_tags;
 261        struct sbitmap_queue    __breserved_tags;
 262        struct blk_mq_tags      **tags;
 263
 264        struct mutex            tag_list_lock;
 265        struct list_head        tag_list;
 266};
 267
 268/**
 269 * struct blk_mq_queue_data - Data about a request inserted in a queue
 270 *
 271 * @rq:   Request pointer.
 272 * @last: If it is the last request in the queue.
 273 */
 274struct blk_mq_queue_data {
 275        struct request *rq;
 276        bool last;
 277};
 278
 279typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
 280                bool);
 281typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
 282
 283/**
 284 * struct blk_mq_ops - Callback functions that implements block driver
 285 * behaviour.
 286 */
 287struct blk_mq_ops {
 288        /**
 289         * @queue_rq: Queue a new request from block IO.
 290         */
 291        blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
 292                                 const struct blk_mq_queue_data *);
 293
 294        /**
 295         * @commit_rqs: If a driver uses bd->last to judge when to submit
 296         * requests to hardware, it must define this function. In case of errors
 297         * that make us stop issuing further requests, this hook serves the
 298         * purpose of kicking the hardware (which the last request otherwise
 299         * would have done).
 300         */
 301        void (*commit_rqs)(struct blk_mq_hw_ctx *);
 302
 303        /**
 304         * @get_budget: Reserve budget before queue request, once .queue_rq is
 305         * run, it is driver's responsibility to release the
 306         * reserved budget. Also we have to handle failure case
 307         * of .get_budget for avoiding I/O deadlock.
 308         */
 309        int (*get_budget)(struct request_queue *);
 310
 311        /**
 312         * @put_budget: Release the reserved budget.
 313         */
 314        void (*put_budget)(struct request_queue *, int);
 315
 316        /**
 317         * @set_rq_budget_token: store rq's budget token
 318         */
 319        void (*set_rq_budget_token)(struct request *, int);
 320        /**
 321         * @get_rq_budget_token: retrieve rq's budget token
 322         */
 323        int (*get_rq_budget_token)(struct request *);
 324
 325        /**
 326         * @timeout: Called on request timeout.
 327         */
 328        enum blk_eh_timer_return (*timeout)(struct request *, bool);
 329
 330        /**
 331         * @poll: Called to poll for completion of a specific tag.
 332         */
 333        int (*poll)(struct blk_mq_hw_ctx *);
 334
 335        /**
 336         * @complete: Mark the request as complete.
 337         */
 338        void (*complete)(struct request *);
 339
 340        /**
 341         * @init_hctx: Called when the block layer side of a hardware queue has
 342         * been set up, allowing the driver to allocate/init matching
 343         * structures.
 344         */
 345        int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
 346        /**
 347         * @exit_hctx: Ditto for exit/teardown.
 348         */
 349        void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
 350
 351        /**
 352         * @init_request: Called for every command allocated by the block layer
 353         * to allow the driver to set up driver specific data.
 354         *
 355         * Tag greater than or equal to queue_depth is for setting up
 356         * flush request.
 357         */
 358        int (*init_request)(struct blk_mq_tag_set *set, struct request *,
 359                            unsigned int, unsigned int);
 360        /**
 361         * @exit_request: Ditto for exit/teardown.
 362         */
 363        void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
 364                             unsigned int);
 365
 366        /**
 367         * @initialize_rq_fn: Called from inside blk_get_request().
 368         */
 369        void (*initialize_rq_fn)(struct request *rq);
 370
 371        /**
 372         * @cleanup_rq: Called before freeing one request which isn't completed
 373         * yet, and usually for freeing the driver private data.
 374         */
 375        void (*cleanup_rq)(struct request *);
 376
 377        /**
 378         * @busy: If set, returns whether or not this queue currently is busy.
 379         */
 380        bool (*busy)(struct request_queue *);
 381
 382        /**
 383         * @map_queues: This allows drivers specify their own queue mapping by
 384         * overriding the setup-time function that builds the mq_map.
 385         */
 386        int (*map_queues)(struct blk_mq_tag_set *set);
 387
 388#ifdef CONFIG_BLK_DEBUG_FS
 389        /**
 390         * @show_rq: Used by the debugfs implementation to show driver-specific
 391         * information about a request.
 392         */
 393        void (*show_rq)(struct seq_file *m, struct request *rq);
 394#endif
 395};
 396
 397enum {
 398        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
 399        BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
 400        /*
 401         * Set when this device requires underlying blk-mq device for
 402         * completing IO:
 403         */
 404        BLK_MQ_F_STACKING       = 1 << 2,
 405        BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
 406        BLK_MQ_F_BLOCKING       = 1 << 5,
 407        BLK_MQ_F_NO_SCHED       = 1 << 6,
 408        BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
 409        BLK_MQ_F_ALLOC_POLICY_BITS = 1,
 410
 411        BLK_MQ_S_STOPPED        = 0,
 412        BLK_MQ_S_TAG_ACTIVE     = 1,
 413        BLK_MQ_S_SCHED_RESTART  = 2,
 414
 415        /* hw queue is inactive after all its CPUs become offline */
 416        BLK_MQ_S_INACTIVE       = 3,
 417
 418        BLK_MQ_MAX_DEPTH        = 10240,
 419
 420        BLK_MQ_CPU_WORK_BATCH   = 8,
 421};
 422#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
 423        ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
 424                ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
 425#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
 426        ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
 427                << BLK_MQ_F_ALLOC_POLICY_START_BIT)
 428
 429#define blk_mq_alloc_disk(set, queuedata)                               \
 430({                                                                      \
 431        static struct lock_class_key __key;                             \
 432        struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata);   \
 433                                                                        \
 434        if (!IS_ERR(__disk))                                            \
 435                lockdep_init_map(&__disk->lockdep_map,                  \
 436                        "(bio completion)", &__key, 0);                 \
 437        __disk;                                                         \
 438})
 439struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
 440                void *queuedata);
 441struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
 442int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 443                struct request_queue *q);
 444void blk_mq_unregister_dev(struct device *, struct request_queue *);
 445
 446int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
 447int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
 448                const struct blk_mq_ops *ops, unsigned int queue_depth,
 449                unsigned int set_flags);
 450void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
 451
 452void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
 453
 454void blk_mq_free_request(struct request *rq);
 455
 456bool blk_mq_queue_inflight(struct request_queue *q);
 457
 458enum {
 459        /* return when out of requests */
 460        BLK_MQ_REQ_NOWAIT       = (__force blk_mq_req_flags_t)(1 << 0),
 461        /* allocate from reserved pool */
 462        BLK_MQ_REQ_RESERVED     = (__force blk_mq_req_flags_t)(1 << 1),
 463        /* set RQF_PM */
 464        BLK_MQ_REQ_PM           = (__force blk_mq_req_flags_t)(1 << 2),
 465};
 466
 467struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 468                blk_mq_req_flags_t flags);
 469struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 470                unsigned int op, blk_mq_req_flags_t flags,
 471                unsigned int hctx_idx);
 472struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
 473
 474enum {
 475        BLK_MQ_UNIQUE_TAG_BITS = 16,
 476        BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
 477};
 478
 479u32 blk_mq_unique_tag(struct request *rq);
 480
 481static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
 482{
 483        return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
 484}
 485
 486static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 487{
 488        return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
 489}
 490
 491/**
 492 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
 493 * @rq: target request.
 494 */
 495static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
 496{
 497        return READ_ONCE(rq->state);
 498}
 499
 500static inline int blk_mq_request_started(struct request *rq)
 501{
 502        return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
 503}
 504
 505static inline int blk_mq_request_completed(struct request *rq)
 506{
 507        return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
 508}
 509
 510/*
 511 * 
 512 * Set the state to complete when completing a request from inside ->queue_rq.
 513 * This is used by drivers that want to ensure special complete actions that
 514 * need access to the request are called on failure, e.g. by nvme for
 515 * multipathing.
 516 */
 517static inline void blk_mq_set_request_complete(struct request *rq)
 518{
 519        WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
 520}
 521
 522void blk_mq_start_request(struct request *rq);
 523void blk_mq_end_request(struct request *rq, blk_status_t error);
 524void __blk_mq_end_request(struct request *rq, blk_status_t error);
 525
 526void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 527void blk_mq_kick_requeue_list(struct request_queue *q);
 528void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 529void blk_mq_complete_request(struct request *rq);
 530bool blk_mq_complete_request_remote(struct request *rq);
 531bool blk_mq_queue_stopped(struct request_queue *q);
 532void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 533void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
 534void blk_mq_stop_hw_queues(struct request_queue *q);
 535void blk_mq_start_hw_queues(struct request_queue *q);
 536void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 537void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 538void blk_mq_quiesce_queue(struct request_queue *q);
 539void blk_mq_unquiesce_queue(struct request_queue *q);
 540void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 541void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 542void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 543void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
 544void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 545                busy_tag_iter_fn *fn, void *priv);
 546void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
 547void blk_mq_freeze_queue(struct request_queue *q);
 548void blk_mq_unfreeze_queue(struct request_queue *q);
 549void blk_freeze_queue_start(struct request_queue *q);
 550void blk_mq_freeze_queue_wait(struct request_queue *q);
 551int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
 552                                     unsigned long timeout);
 553
 554int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
 555void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 556
 557void blk_mq_quiesce_queue_nowait(struct request_queue *q);
 558
 559unsigned int blk_mq_rq_cpu(struct request *rq);
 560
 561bool __blk_should_fake_timeout(struct request_queue *q);
 562static inline bool blk_should_fake_timeout(struct request_queue *q)
 563{
 564        if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
 565            test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
 566                return __blk_should_fake_timeout(q);
 567        return false;
 568}
 569
 570/**
 571 * blk_mq_rq_from_pdu - cast a PDU to a request
 572 * @pdu: the PDU (Protocol Data Unit) to be casted
 573 *
 574 * Return: request
 575 *
 576 * Driver command data is immediately after the request. So subtract request
 577 * size to get back to the original request.
 578 */
 579static inline struct request *blk_mq_rq_from_pdu(void *pdu)
 580{
 581        return pdu - sizeof(struct request);
 582}
 583
 584/**
 585 * blk_mq_rq_to_pdu - cast a request to a PDU
 586 * @rq: the request to be casted
 587 *
 588 * Return: pointer to the PDU
 589 *
 590 * Driver command data is immediately after the request. So add request to get
 591 * the PDU.
 592 */
 593static inline void *blk_mq_rq_to_pdu(struct request *rq)
 594{
 595        return rq + 1;
 596}
 597
 598#define queue_for_each_hw_ctx(q, hctx, i)                               \
 599        for ((i) = 0; (i) < (q)->nr_hw_queues &&                        \
 600             ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
 601
 602#define hctx_for_each_ctx(hctx, ctx, i)                                 \
 603        for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
 604             ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
 605
 606static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
 607                struct request *rq)
 608{
 609        if (rq->tag != -1)
 610                return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
 611
 612        return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
 613                        BLK_QC_T_INTERNAL;
 614}
 615
 616static inline void blk_mq_cleanup_rq(struct request *rq)
 617{
 618        if (rq->q->mq_ops->cleanup_rq)
 619                rq->q->mq_ops->cleanup_rq(rq);
 620}
 621
 622static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
 623                unsigned int nr_segs)
 624{
 625        rq->nr_phys_segments = nr_segs;
 626        rq->__data_len = bio->bi_iter.bi_size;
 627        rq->bio = rq->biotail = bio;
 628        rq->ioprio = bio_prio(bio);
 629
 630        if (bio->bi_bdev)
 631                rq->rq_disk = bio->bi_bdev->bd_disk;
 632}
 633
 634blk_qc_t blk_mq_submit_bio(struct bio *bio);
 635void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
 636                struct lock_class_key *key);
 637
 638#endif
 639