linux/include/linux/blkdev.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_BLKDEV_H
   3#define _LINUX_BLKDEV_H
   4
   5#include <linux/sched.h>
   6#include <linux/sched/clock.h>
   7#include <linux/major.h>
   8#include <linux/genhd.h>
   9#include <linux/list.h>
  10#include <linux/llist.h>
  11#include <linux/minmax.h>
  12#include <linux/timer.h>
  13#include <linux/workqueue.h>
  14#include <linux/backing-dev-defs.h>
  15#include <linux/wait.h>
  16#include <linux/mempool.h>
  17#include <linux/pfn.h>
  18#include <linux/bio.h>
  19#include <linux/stringify.h>
  20#include <linux/gfp.h>
  21#include <linux/bsg.h>
  22#include <linux/smp.h>
  23#include <linux/rcupdate.h>
  24#include <linux/percpu-refcount.h>
  25#include <linux/scatterlist.h>
  26#include <linux/blkzoned.h>
  27#include <linux/pm.h>
  28#include <linux/sbitmap.h>
  29
  30struct module;
  31struct scsi_ioctl_command;
  32
  33struct request_queue;
  34struct elevator_queue;
  35struct blk_trace;
  36struct request;
  37struct sg_io_hdr;
  38struct bsg_job;
  39struct blkcg_gq;
  40struct blk_flush_queue;
  41struct pr_ops;
  42struct rq_qos;
  43struct blk_queue_stats;
  44struct blk_stat_callback;
  45struct blk_keyslot_manager;
  46
  47#define BLKDEV_MIN_RQ   4
  48#define BLKDEV_MAX_RQ   128     /* Default maximum */
  49
  50/* Must be consistent with blk_mq_poll_stats_bkt() */
  51#define BLK_MQ_POLL_STATS_BKTS 16
  52
  53/* Doing classic polling */
  54#define BLK_MQ_POLL_CLASSIC -1
  55
  56/*
  57 * Maximum number of blkcg policies allowed to be registered concurrently.
  58 * Defined here to simplify include dependency.
  59 */
  60#define BLKCG_MAX_POLS          6
  61
  62typedef void (rq_end_io_fn)(struct request *, blk_status_t);
  63
  64/*
  65 * request flags */
  66typedef __u32 __bitwise req_flags_t;
  67
  68/* drive already may have started this one */
  69#define RQF_STARTED             ((__force req_flags_t)(1 << 1))
  70/* may not be passed by ioscheduler */
  71#define RQF_SOFTBARRIER         ((__force req_flags_t)(1 << 3))
  72/* request for flush sequence */
  73#define RQF_FLUSH_SEQ           ((__force req_flags_t)(1 << 4))
  74/* merge of different types, fail separately */
  75#define RQF_MIXED_MERGE         ((__force req_flags_t)(1 << 5))
  76/* track inflight for MQ */
  77#define RQF_MQ_INFLIGHT         ((__force req_flags_t)(1 << 6))
  78/* don't call prep for this one */
  79#define RQF_DONTPREP            ((__force req_flags_t)(1 << 7))
  80/* vaguely specified driver internal error.  Ignored by the block layer */
  81#define RQF_FAILED              ((__force req_flags_t)(1 << 10))
  82/* don't warn about errors */
  83#define RQF_QUIET               ((__force req_flags_t)(1 << 11))
  84/* elevator private data attached */
  85#define RQF_ELVPRIV             ((__force req_flags_t)(1 << 12))
  86/* account into disk and partition IO statistics */
  87#define RQF_IO_STAT             ((__force req_flags_t)(1 << 13))
  88/* runtime pm request */
  89#define RQF_PM                  ((__force req_flags_t)(1 << 15))
  90/* on IO scheduler merge hash */
  91#define RQF_HASHED              ((__force req_flags_t)(1 << 16))
  92/* track IO completion time */
  93#define RQF_STATS               ((__force req_flags_t)(1 << 17))
  94/* Look at ->special_vec for the actual data payload instead of the
  95   bio chain. */
  96#define RQF_SPECIAL_PAYLOAD     ((__force req_flags_t)(1 << 18))
  97/* The per-zone write lock is held for this request */
  98#define RQF_ZONE_WRITE_LOCKED   ((__force req_flags_t)(1 << 19))
  99/* already slept for hybrid poll */
 100#define RQF_MQ_POLL_SLEPT       ((__force req_flags_t)(1 << 20))
 101/* ->timeout has been called, don't expire again */
 102#define RQF_TIMED_OUT           ((__force req_flags_t)(1 << 21))
 103
 104/* flags that prevent us from merging requests: */
 105#define RQF_NOMERGE_FLAGS \
 106        (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
 107
 108/*
 109 * Request state for blk-mq.
 110 */
 111enum mq_rq_state {
 112        MQ_RQ_IDLE              = 0,
 113        MQ_RQ_IN_FLIGHT         = 1,
 114        MQ_RQ_COMPLETE          = 2,
 115};
 116
 117/*
 118 * Try to put the fields that are referenced together in the same cacheline.
 119 *
 120 * If you modify this structure, make sure to update blk_rq_init() and
 121 * especially blk_mq_rq_ctx_init() to take care of the added fields.
 122 */
 123struct request {
 124        struct request_queue *q;
 125        struct blk_mq_ctx *mq_ctx;
 126        struct blk_mq_hw_ctx *mq_hctx;
 127
 128        unsigned int cmd_flags;         /* op and common flags */
 129        req_flags_t rq_flags;
 130
 131        int tag;
 132        int internal_tag;
 133
 134        /* the following two fields are internal, NEVER access directly */
 135        unsigned int __data_len;        /* total data len */
 136        sector_t __sector;              /* sector cursor */
 137
 138        struct bio *bio;
 139        struct bio *biotail;
 140
 141        struct list_head queuelist;
 142
 143        /*
 144         * The hash is used inside the scheduler, and killed once the
 145         * request reaches the dispatch list. The ipi_list is only used
 146         * to queue the request for softirq completion, which is long
 147         * after the request has been unhashed (and even removed from
 148         * the dispatch list).
 149         */
 150        union {
 151                struct hlist_node hash; /* merge hash */
 152                struct llist_node ipi_list;
 153        };
 154
 155        /*
 156         * The rb_node is only used inside the io scheduler, requests
 157         * are pruned when moved to the dispatch queue. So let the
 158         * completion_data share space with the rb_node.
 159         */
 160        union {
 161                struct rb_node rb_node; /* sort/lookup */
 162                struct bio_vec special_vec;
 163                void *completion_data;
 164                int error_count; /* for legacy drivers, don't use */
 165        };
 166
 167        /*
 168         * Three pointers are available for the IO schedulers, if they need
 169         * more they have to dynamically allocate it.  Flush requests are
 170         * never put on the IO scheduler. So let the flush fields share
 171         * space with the elevator data.
 172         */
 173        union {
 174                struct {
 175                        struct io_cq            *icq;
 176                        void                    *priv[2];
 177                } elv;
 178
 179                struct {
 180                        unsigned int            seq;
 181                        struct list_head        list;
 182                        rq_end_io_fn            *saved_end_io;
 183                } flush;
 184        };
 185
 186        struct gendisk *rq_disk;
 187        struct block_device *part;
 188#ifdef CONFIG_BLK_RQ_ALLOC_TIME
 189        /* Time that the first bio started allocating this request. */
 190        u64 alloc_time_ns;
 191#endif
 192        /* Time that this request was allocated for this IO. */
 193        u64 start_time_ns;
 194        /* Time that I/O was submitted to the device. */
 195        u64 io_start_time_ns;
 196
 197#ifdef CONFIG_BLK_WBT
 198        unsigned short wbt_flags;
 199#endif
 200        /*
 201         * rq sectors used for blk stats. It has the same value
 202         * with blk_rq_sectors(rq), except that it never be zeroed
 203         * by completion.
 204         */
 205        unsigned short stats_sectors;
 206
 207        /*
 208         * Number of scatter-gather DMA addr+len pairs after
 209         * physical address coalescing is performed.
 210         */
 211        unsigned short nr_phys_segments;
 212
 213#if defined(CONFIG_BLK_DEV_INTEGRITY)
 214        unsigned short nr_integrity_segments;
 215#endif
 216
 217#ifdef CONFIG_BLK_INLINE_ENCRYPTION
 218        struct bio_crypt_ctx *crypt_ctx;
 219        struct blk_ksm_keyslot *crypt_keyslot;
 220#endif
 221
 222        unsigned short write_hint;
 223        unsigned short ioprio;
 224
 225        enum mq_rq_state state;
 226        refcount_t ref;
 227
 228        unsigned int timeout;
 229        unsigned long deadline;
 230
 231        union {
 232                struct __call_single_data csd;
 233                u64 fifo_time;
 234        };
 235
 236        /*
 237         * completion callback.
 238         */
 239        rq_end_io_fn *end_io;
 240        void *end_io_data;
 241};
 242
 243static inline bool blk_op_is_passthrough(unsigned int op)
 244{
 245        op &= REQ_OP_MASK;
 246        return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
 247}
 248
 249static inline bool blk_rq_is_passthrough(struct request *rq)
 250{
 251        return blk_op_is_passthrough(req_op(rq));
 252}
 253
 254static inline unsigned short req_get_ioprio(struct request *req)
 255{
 256        return req->ioprio;
 257}
 258
 259#include <linux/elevator.h>
 260
 261struct blk_queue_ctx;
 262
 263struct bio_vec;
 264
 265enum blk_eh_timer_return {
 266        BLK_EH_DONE,            /* drivers has completed the command */
 267        BLK_EH_RESET_TIMER,     /* reset timer and try again */
 268};
 269
 270enum blk_queue_state {
 271        Queue_down,
 272        Queue_up,
 273};
 274
 275#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
 276#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
 277
 278#define BLK_SCSI_MAX_CMDS       (256)
 279#define BLK_SCSI_CMD_PER_LONG   (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
 280
 281/*
 282 * Zoned block device models (zoned limit).
 283 *
 284 * Note: This needs to be ordered from the least to the most severe
 285 * restrictions for the inheritance in blk_stack_limits() to work.
 286 */
 287enum blk_zoned_model {
 288        BLK_ZONED_NONE = 0,     /* Regular block device */
 289        BLK_ZONED_HA,           /* Host-aware zoned block device */
 290        BLK_ZONED_HM,           /* Host-managed zoned block device */
 291};
 292
 293/*
 294 * BLK_BOUNCE_NONE:     never bounce (default)
 295 * BLK_BOUNCE_HIGH:     bounce all highmem pages
 296 */
 297enum blk_bounce {
 298        BLK_BOUNCE_NONE,
 299        BLK_BOUNCE_HIGH,
 300};
 301
 302struct queue_limits {
 303        enum blk_bounce         bounce;
 304        unsigned long           seg_boundary_mask;
 305        unsigned long           virt_boundary_mask;
 306
 307        unsigned int            max_hw_sectors;
 308        unsigned int            max_dev_sectors;
 309        unsigned int            chunk_sectors;
 310        unsigned int            max_sectors;
 311        unsigned int            max_segment_size;
 312        unsigned int            physical_block_size;
 313        unsigned int            logical_block_size;
 314        unsigned int            alignment_offset;
 315        unsigned int            io_min;
 316        unsigned int            io_opt;
 317        unsigned int            max_discard_sectors;
 318        unsigned int            max_hw_discard_sectors;
 319        unsigned int            max_write_same_sectors;
 320        unsigned int            max_write_zeroes_sectors;
 321        unsigned int            max_zone_append_sectors;
 322        unsigned int            discard_granularity;
 323        unsigned int            discard_alignment;
 324        unsigned int            zone_write_granularity;
 325
 326        unsigned short          max_segments;
 327        unsigned short          max_integrity_segments;
 328        unsigned short          max_discard_segments;
 329
 330        unsigned char           misaligned;
 331        unsigned char           discard_misaligned;
 332        unsigned char           raid_partial_stripes_expensive;
 333        enum blk_zoned_model    zoned;
 334};
 335
 336typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
 337                               void *data);
 338
 339void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
 340
 341#ifdef CONFIG_BLK_DEV_ZONED
 342
 343#define BLK_ALL_ZONES  ((unsigned int)-1)
 344int blkdev_report_zones(struct block_device *bdev, sector_t sector,
 345                        unsigned int nr_zones, report_zones_cb cb, void *data);
 346unsigned int blkdev_nr_zones(struct gendisk *disk);
 347extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
 348                            sector_t sectors, sector_t nr_sectors,
 349                            gfp_t gfp_mask);
 350int blk_revalidate_disk_zones(struct gendisk *disk,
 351                              void (*update_driver_data)(struct gendisk *disk));
 352
 353extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
 354                                     unsigned int cmd, unsigned long arg);
 355extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
 356                                  unsigned int cmd, unsigned long arg);
 357
 358#else /* CONFIG_BLK_DEV_ZONED */
 359
 360static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
 361{
 362        return 0;
 363}
 364
 365static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
 366                                            fmode_t mode, unsigned int cmd,
 367                                            unsigned long arg)
 368{
 369        return -ENOTTY;
 370}
 371
 372static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
 373                                         fmode_t mode, unsigned int cmd,
 374                                         unsigned long arg)
 375{
 376        return -ENOTTY;
 377}
 378
 379#endif /* CONFIG_BLK_DEV_ZONED */
 380
 381struct request_queue {
 382        struct request          *last_merge;
 383        struct elevator_queue   *elevator;
 384
 385        struct percpu_ref       q_usage_counter;
 386
 387        struct blk_queue_stats  *stats;
 388        struct rq_qos           *rq_qos;
 389
 390        const struct blk_mq_ops *mq_ops;
 391
 392        /* sw queues */
 393        struct blk_mq_ctx __percpu      *queue_ctx;
 394
 395        unsigned int            queue_depth;
 396
 397        /* hw dispatch queues */
 398        struct blk_mq_hw_ctx    **queue_hw_ctx;
 399        unsigned int            nr_hw_queues;
 400
 401        struct backing_dev_info *backing_dev_info;
 402
 403        /*
 404         * The queue owner gets to use this for whatever they like.
 405         * ll_rw_blk doesn't touch it.
 406         */
 407        void                    *queuedata;
 408
 409        /*
 410         * various queue flags, see QUEUE_* below
 411         */
 412        unsigned long           queue_flags;
 413        /*
 414         * Number of contexts that have called blk_set_pm_only(). If this
 415         * counter is above zero then only RQF_PM requests are processed.
 416         */
 417        atomic_t                pm_only;
 418
 419        /*
 420         * ida allocated id for this queue.  Used to index queues from
 421         * ioctx.
 422         */
 423        int                     id;
 424
 425        spinlock_t              queue_lock;
 426
 427        /*
 428         * queue kobject
 429         */
 430        struct kobject kobj;
 431
 432        /*
 433         * mq queue kobject
 434         */
 435        struct kobject *mq_kobj;
 436
 437#ifdef  CONFIG_BLK_DEV_INTEGRITY
 438        struct blk_integrity integrity;
 439#endif  /* CONFIG_BLK_DEV_INTEGRITY */
 440
 441#ifdef CONFIG_PM
 442        struct device           *dev;
 443        enum rpm_status         rpm_status;
 444#endif
 445
 446        /*
 447         * queue settings
 448         */
 449        unsigned long           nr_requests;    /* Max # of requests */
 450
 451        unsigned int            dma_pad_mask;
 452        unsigned int            dma_alignment;
 453
 454#ifdef CONFIG_BLK_INLINE_ENCRYPTION
 455        /* Inline crypto capabilities */
 456        struct blk_keyslot_manager *ksm;
 457#endif
 458
 459        unsigned int            rq_timeout;
 460        int                     poll_nsec;
 461
 462        struct blk_stat_callback        *poll_cb;
 463        struct blk_rq_stat      poll_stat[BLK_MQ_POLL_STATS_BKTS];
 464
 465        struct timer_list       timeout;
 466        struct work_struct      timeout_work;
 467
 468        atomic_t                nr_active_requests_shared_sbitmap;
 469
 470        struct sbitmap_queue    sched_bitmap_tags;
 471        struct sbitmap_queue    sched_breserved_tags;
 472
 473        struct list_head        icq_list;
 474#ifdef CONFIG_BLK_CGROUP
 475        DECLARE_BITMAP          (blkcg_pols, BLKCG_MAX_POLS);
 476        struct blkcg_gq         *root_blkg;
 477        struct list_head        blkg_list;
 478#endif
 479
 480        struct queue_limits     limits;
 481
 482        unsigned int            required_elevator_features;
 483
 484#ifdef CONFIG_BLK_DEV_ZONED
 485        /*
 486         * Zoned block device information for request dispatch control.
 487         * nr_zones is the total number of zones of the device. This is always
 488         * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
 489         * bits which indicates if a zone is conventional (bit set) or
 490         * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
 491         * bits which indicates if a zone is write locked, that is, if a write
 492         * request targeting the zone was dispatched. All three fields are
 493         * initialized by the low level device driver (e.g. scsi/sd.c).
 494         * Stacking drivers (device mappers) may or may not initialize
 495         * these fields.
 496         *
 497         * Reads of this information must be protected with blk_queue_enter() /
 498         * blk_queue_exit(). Modifying this information is only allowed while
 499         * no requests are being processed. See also blk_mq_freeze_queue() and
 500         * blk_mq_unfreeze_queue().
 501         */
 502        unsigned int            nr_zones;
 503        unsigned long           *conv_zones_bitmap;
 504        unsigned long           *seq_zones_wlock;
 505        unsigned int            max_open_zones;
 506        unsigned int            max_active_zones;
 507#endif /* CONFIG_BLK_DEV_ZONED */
 508
 509        /*
 510         * sg stuff
 511         */
 512        unsigned int            sg_timeout;
 513        unsigned int            sg_reserved_size;
 514        int                     node;
 515        struct mutex            debugfs_mutex;
 516#ifdef CONFIG_BLK_DEV_IO_TRACE
 517        struct blk_trace __rcu  *blk_trace;
 518#endif
 519        /*
 520         * for flush operations
 521         */
 522        struct blk_flush_queue  *fq;
 523
 524        struct list_head        requeue_list;
 525        spinlock_t              requeue_lock;
 526        struct delayed_work     requeue_work;
 527
 528        struct mutex            sysfs_lock;
 529        struct mutex            sysfs_dir_lock;
 530
 531        /*
 532         * for reusing dead hctx instance in case of updating
 533         * nr_hw_queues
 534         */
 535        struct list_head        unused_hctx_list;
 536        spinlock_t              unused_hctx_lock;
 537
 538        int                     mq_freeze_depth;
 539
 540#if defined(CONFIG_BLK_DEV_BSG)
 541        struct bsg_class_device bsg_dev;
 542#endif
 543
 544#ifdef CONFIG_BLK_DEV_THROTTLING
 545        /* Throttle data */
 546        struct throtl_data *td;
 547#endif
 548        struct rcu_head         rcu_head;
 549        wait_queue_head_t       mq_freeze_wq;
 550        /*
 551         * Protect concurrent access to q_usage_counter by
 552         * percpu_ref_kill() and percpu_ref_reinit().
 553         */
 554        struct mutex            mq_freeze_lock;
 555
 556        struct blk_mq_tag_set   *tag_set;
 557        struct list_head        tag_set_list;
 558        struct bio_set          bio_split;
 559
 560        struct dentry           *debugfs_dir;
 561
 562#ifdef CONFIG_BLK_DEBUG_FS
 563        struct dentry           *sched_debugfs_dir;
 564        struct dentry           *rqos_debugfs_dir;
 565#endif
 566
 567        bool                    mq_sysfs_init_done;
 568
 569        size_t                  cmd_size;
 570
 571#define BLK_MAX_WRITE_HINTS     5
 572        u64                     write_hints[BLK_MAX_WRITE_HINTS];
 573};
 574
 575/* Keep blk_queue_flag_name[] in sync with the definitions below */
 576#define QUEUE_FLAG_STOPPED      0       /* queue is stopped */
 577#define QUEUE_FLAG_DYING        1       /* queue being torn down */
 578#define QUEUE_FLAG_NOMERGES     3       /* disable merge attempts */
 579#define QUEUE_FLAG_SAME_COMP    4       /* complete on same CPU-group */
 580#define QUEUE_FLAG_FAIL_IO      5       /* fake timeout */
 581#define QUEUE_FLAG_NONROT       6       /* non-rotational device (SSD) */
 582#define QUEUE_FLAG_VIRT         QUEUE_FLAG_NONROT /* paravirt device */
 583#define QUEUE_FLAG_IO_STAT      7       /* do disk/partitions IO accounting */
 584#define QUEUE_FLAG_DISCARD      8       /* supports DISCARD */
 585#define QUEUE_FLAG_NOXMERGES    9       /* No extended merges */
 586#define QUEUE_FLAG_ADD_RANDOM   10      /* Contributes to random pool */
 587#define QUEUE_FLAG_SECERASE     11      /* supports secure erase */
 588#define QUEUE_FLAG_SAME_FORCE   12      /* force complete on same CPU */
 589#define QUEUE_FLAG_DEAD         13      /* queue tear-down finished */
 590#define QUEUE_FLAG_INIT_DONE    14      /* queue is initialized */
 591#define QUEUE_FLAG_STABLE_WRITES 15     /* don't modify blks until WB is done */
 592#define QUEUE_FLAG_POLL         16      /* IO polling enabled if set */
 593#define QUEUE_FLAG_WC           17      /* Write back caching */
 594#define QUEUE_FLAG_FUA          18      /* device supports FUA writes */
 595#define QUEUE_FLAG_DAX          19      /* device supports DAX */
 596#define QUEUE_FLAG_STATS        20      /* track IO start and completion times */
 597#define QUEUE_FLAG_POLL_STATS   21      /* collecting stats for hybrid polling */
 598#define QUEUE_FLAG_REGISTERED   22      /* queue has been registered to a disk */
 599#define QUEUE_FLAG_SCSI_PASSTHROUGH 23  /* queue supports SCSI commands */
 600#define QUEUE_FLAG_QUIESCED     24      /* queue has been quiesced */
 601#define QUEUE_FLAG_PCI_P2PDMA   25      /* device supports PCI p2p requests */
 602#define QUEUE_FLAG_ZONE_RESETALL 26     /* supports Zone Reset All */
 603#define QUEUE_FLAG_RQ_ALLOC_TIME 27     /* record rq->alloc_time_ns */
 604#define QUEUE_FLAG_HCTX_ACTIVE  28      /* at least one blk-mq hctx is active */
 605#define QUEUE_FLAG_NOWAIT       29      /* device supports NOWAIT */
 606
 607#define QUEUE_FLAG_MQ_DEFAULT   ((1 << QUEUE_FLAG_IO_STAT) |            \
 608                                 (1 << QUEUE_FLAG_SAME_COMP) |          \
 609                                 (1 << QUEUE_FLAG_NOWAIT))
 610
 611void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
 612void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
 613bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
 614
 615#define blk_queue_stopped(q)    test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 616#define blk_queue_dying(q)      test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
 617#define blk_queue_dead(q)       test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
 618#define blk_queue_init_done(q)  test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
 619#define blk_queue_nomerges(q)   test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 620#define blk_queue_noxmerges(q)  \
 621        test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
 622#define blk_queue_nonrot(q)     test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
 623#define blk_queue_stable_writes(q) \
 624        test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
 625#define blk_queue_io_stat(q)    test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
 626#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
 627#define blk_queue_discard(q)    test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
 628#define blk_queue_zone_resetall(q)      \
 629        test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
 630#define blk_queue_secure_erase(q) \
 631        (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
 632#define blk_queue_dax(q)        test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
 633#define blk_queue_scsi_passthrough(q)   \
 634        test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
 635#define blk_queue_pci_p2pdma(q) \
 636        test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
 637#ifdef CONFIG_BLK_RQ_ALLOC_TIME
 638#define blk_queue_rq_alloc_time(q)      \
 639        test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
 640#else
 641#define blk_queue_rq_alloc_time(q)      false
 642#endif
 643
 644#define blk_noretry_request(rq) \
 645        ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
 646                             REQ_FAILFAST_DRIVER))
 647#define blk_queue_quiesced(q)   test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
 648#define blk_queue_pm_only(q)    atomic_read(&(q)->pm_only)
 649#define blk_queue_fua(q)        test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
 650#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
 651#define blk_queue_nowait(q)     test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
 652
 653extern void blk_set_pm_only(struct request_queue *q);
 654extern void blk_clear_pm_only(struct request_queue *q);
 655
 656#define list_entry_rq(ptr)      list_entry((ptr), struct request, queuelist)
 657
 658#define rq_data_dir(rq)         (op_is_write(req_op(rq)) ? WRITE : READ)
 659
 660#define rq_dma_dir(rq) \
 661        (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
 662
 663#define dma_map_bvec(dev, bv, dir, attrs) \
 664        dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
 665        (dir), (attrs))
 666
 667#define queue_to_disk(q)        (dev_to_disk(kobj_to_dev((q)->kobj.parent)))
 668
 669static inline bool queue_is_mq(struct request_queue *q)
 670{
 671        return q->mq_ops;
 672}
 673
 674#ifdef CONFIG_PM
 675static inline enum rpm_status queue_rpm_status(struct request_queue *q)
 676{
 677        return q->rpm_status;
 678}
 679#else
 680static inline enum rpm_status queue_rpm_status(struct request_queue *q)
 681{
 682        return RPM_ACTIVE;
 683}
 684#endif
 685
 686static inline enum blk_zoned_model
 687blk_queue_zoned_model(struct request_queue *q)
 688{
 689        if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
 690                return q->limits.zoned;
 691        return BLK_ZONED_NONE;
 692}
 693
 694static inline bool blk_queue_is_zoned(struct request_queue *q)
 695{
 696        switch (blk_queue_zoned_model(q)) {
 697        case BLK_ZONED_HA:
 698        case BLK_ZONED_HM:
 699                return true;
 700        default:
 701                return false;
 702        }
 703}
 704
 705static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
 706{
 707        return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
 708}
 709
 710#ifdef CONFIG_BLK_DEV_ZONED
 711static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
 712{
 713        return blk_queue_is_zoned(q) ? q->nr_zones : 0;
 714}
 715
 716static inline unsigned int blk_queue_zone_no(struct request_queue *q,
 717                                             sector_t sector)
 718{
 719        if (!blk_queue_is_zoned(q))
 720                return 0;
 721        return sector >> ilog2(q->limits.chunk_sectors);
 722}
 723
 724static inline bool blk_queue_zone_is_seq(struct request_queue *q,
 725                                         sector_t sector)
 726{
 727        if (!blk_queue_is_zoned(q))
 728                return false;
 729        if (!q->conv_zones_bitmap)
 730                return true;
 731        return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
 732}
 733
 734static inline void blk_queue_max_open_zones(struct request_queue *q,
 735                unsigned int max_open_zones)
 736{
 737        q->max_open_zones = max_open_zones;
 738}
 739
 740static inline unsigned int queue_max_open_zones(const struct request_queue *q)
 741{
 742        return q->max_open_zones;
 743}
 744
 745static inline void blk_queue_max_active_zones(struct request_queue *q,
 746                unsigned int max_active_zones)
 747{
 748        q->max_active_zones = max_active_zones;
 749}
 750
 751static inline unsigned int queue_max_active_zones(const struct request_queue *q)
 752{
 753        return q->max_active_zones;
 754}
 755#else /* CONFIG_BLK_DEV_ZONED */
 756static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
 757{
 758        return 0;
 759}
 760static inline bool blk_queue_zone_is_seq(struct request_queue *q,
 761                                         sector_t sector)
 762{
 763        return false;
 764}
 765static inline unsigned int blk_queue_zone_no(struct request_queue *q,
 766                                             sector_t sector)
 767{
 768        return 0;
 769}
 770static inline unsigned int queue_max_open_zones(const struct request_queue *q)
 771{
 772        return 0;
 773}
 774static inline unsigned int queue_max_active_zones(const struct request_queue *q)
 775{
 776        return 0;
 777}
 778#endif /* CONFIG_BLK_DEV_ZONED */
 779
 780static inline bool rq_is_sync(struct request *rq)
 781{
 782        return op_is_sync(rq->cmd_flags);
 783}
 784
 785static inline bool rq_mergeable(struct request *rq)
 786{
 787        if (blk_rq_is_passthrough(rq))
 788                return false;
 789
 790        if (req_op(rq) == REQ_OP_FLUSH)
 791                return false;
 792
 793        if (req_op(rq) == REQ_OP_WRITE_ZEROES)
 794                return false;
 795
 796        if (req_op(rq) == REQ_OP_ZONE_APPEND)
 797                return false;
 798
 799        if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
 800                return false;
 801        if (rq->rq_flags & RQF_NOMERGE_FLAGS)
 802                return false;
 803
 804        return true;
 805}
 806
 807static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
 808{
 809        if (bio_page(a) == bio_page(b) &&
 810            bio_offset(a) == bio_offset(b))
 811                return true;
 812
 813        return false;
 814}
 815
 816static inline unsigned int blk_queue_depth(struct request_queue *q)
 817{
 818        if (q->queue_depth)
 819                return q->queue_depth;
 820
 821        return q->nr_requests;
 822}
 823
 824/*
 825 * default timeout for SG_IO if none specified
 826 */
 827#define BLK_DEFAULT_SG_TIMEOUT  (60 * HZ)
 828#define BLK_MIN_SG_TIMEOUT      (7 * HZ)
 829
 830struct rq_map_data {
 831        struct page **pages;
 832        int page_order;
 833        int nr_entries;
 834        unsigned long offset;
 835        int null_mapped;
 836        int from_user;
 837};
 838
 839struct req_iterator {
 840        struct bvec_iter iter;
 841        struct bio *bio;
 842};
 843
 844/* This should not be used directly - use rq_for_each_segment */
 845#define for_each_bio(_bio)              \
 846        for (; _bio; _bio = _bio->bi_next)
 847#define __rq_for_each_bio(_bio, rq)     \
 848        if ((rq->bio))                  \
 849                for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
 850
 851#define rq_for_each_segment(bvl, _rq, _iter)                    \
 852        __rq_for_each_bio(_iter.bio, _rq)                       \
 853                bio_for_each_segment(bvl, _iter.bio, _iter.iter)
 854
 855#define rq_for_each_bvec(bvl, _rq, _iter)                       \
 856        __rq_for_each_bio(_iter.bio, _rq)                       \
 857                bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
 858
 859#define rq_iter_last(bvec, _iter)                               \
 860                (_iter.bio->bi_next == NULL &&                  \
 861                 bio_iter_last(bvec, _iter.iter))
 862
 863#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 864# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
 865#endif
 866#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 867extern void rq_flush_dcache_pages(struct request *rq);
 868#else
 869static inline void rq_flush_dcache_pages(struct request *rq)
 870{
 871}
 872#endif
 873
 874extern int blk_register_queue(struct gendisk *disk);
 875extern void blk_unregister_queue(struct gendisk *disk);
 876blk_qc_t submit_bio_noacct(struct bio *bio);
 877extern void blk_rq_init(struct request_queue *q, struct request *rq);
 878extern void blk_put_request(struct request *);
 879extern struct request *blk_get_request(struct request_queue *, unsigned int op,
 880                                       blk_mq_req_flags_t flags);
 881extern int blk_lld_busy(struct request_queue *q);
 882extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 883                             struct bio_set *bs, gfp_t gfp_mask,
 884                             int (*bio_ctr)(struct bio *, struct bio *, void *),
 885                             void *data);
 886extern void blk_rq_unprep_clone(struct request *rq);
 887extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
 888                                     struct request *rq);
 889int blk_rq_append_bio(struct request *rq, struct bio *bio);
 890extern void blk_queue_split(struct bio **);
 891extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
 892extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
 893                              unsigned int, void __user *);
 894extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 895                          unsigned int, void __user *);
 896extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 897                         struct scsi_ioctl_command __user *);
 898extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
 899extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
 900
 901extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
 902extern void blk_queue_exit(struct request_queue *q);
 903extern void blk_sync_queue(struct request_queue *q);
 904extern int blk_rq_map_user(struct request_queue *, struct request *,
 905                           struct rq_map_data *, void __user *, unsigned long,
 906                           gfp_t);
 907extern int blk_rq_unmap_user(struct bio *);
 908extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
 909extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
 910                               struct rq_map_data *, const struct iov_iter *,
 911                               gfp_t);
 912extern void blk_execute_rq_nowait(struct gendisk *,
 913                                  struct request *, int, rq_end_io_fn *);
 914
 915blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq,
 916                            int at_head);
 917
 918/* Helper to convert REQ_OP_XXX to its string format XXX */
 919extern const char *blk_op_str(unsigned int op);
 920
 921int blk_status_to_errno(blk_status_t status);
 922blk_status_t errno_to_blk_status(int errno);
 923
 924int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
 925
 926static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 927{
 928        return bdev->bd_disk->queue;    /* this is never NULL */
 929}
 930
 931/*
 932 * The basic unit of block I/O is a sector. It is used in a number of contexts
 933 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
 934 * bytes. Variables of type sector_t represent an offset or size that is a
 935 * multiple of 512 bytes. Hence these two constants.
 936 */
 937#ifndef SECTOR_SHIFT
 938#define SECTOR_SHIFT 9
 939#endif
 940#ifndef SECTOR_SIZE
 941#define SECTOR_SIZE (1 << SECTOR_SHIFT)
 942#endif
 943
 944/*
 945 * blk_rq_pos()                 : the current sector
 946 * blk_rq_bytes()               : bytes left in the entire request
 947 * blk_rq_cur_bytes()           : bytes left in the current segment
 948 * blk_rq_err_bytes()           : bytes left till the next error boundary
 949 * blk_rq_sectors()             : sectors left in the entire request
 950 * blk_rq_cur_sectors()         : sectors left in the current segment
 951 * blk_rq_stats_sectors()       : sectors of the entire request used for stats
 952 */
 953static inline sector_t blk_rq_pos(const struct request *rq)
 954{
 955        return rq->__sector;
 956}
 957
 958static inline unsigned int blk_rq_bytes(const struct request *rq)
 959{
 960        return rq->__data_len;
 961}
 962
 963static inline int blk_rq_cur_bytes(const struct request *rq)
 964{
 965        return rq->bio ? bio_cur_bytes(rq->bio) : 0;
 966}
 967
 968extern unsigned int blk_rq_err_bytes(const struct request *rq);
 969
 970static inline unsigned int blk_rq_sectors(const struct request *rq)
 971{
 972        return blk_rq_bytes(rq) >> SECTOR_SHIFT;
 973}
 974
 975static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
 976{
 977        return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
 978}
 979
 980static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
 981{
 982        return rq->stats_sectors;
 983}
 984
 985#ifdef CONFIG_BLK_DEV_ZONED
 986
 987/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
 988const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
 989
 990static inline unsigned int bio_zone_no(struct bio *bio)
 991{
 992        return blk_queue_zone_no(bdev_get_queue(bio->bi_bdev),
 993                                 bio->bi_iter.bi_sector);
 994}
 995
 996static inline unsigned int bio_zone_is_seq(struct bio *bio)
 997{
 998        return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev),
 999                                     bio->bi_iter.bi_sector);
1000}
1001
1002static inline unsigned int blk_rq_zone_no(struct request *rq)
1003{
1004        return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
1005}
1006
1007static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1008{
1009        return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
1010}
1011#endif /* CONFIG_BLK_DEV_ZONED */
1012
1013/*
1014 * Some commands like WRITE SAME have a payload or data transfer size which
1015 * is different from the size of the request.  Any driver that supports such
1016 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1017 * calculate the data transfer size.
1018 */
1019static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1020{
1021        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1022                return rq->special_vec.bv_len;
1023        return blk_rq_bytes(rq);
1024}
1025
1026/*
1027 * Return the first full biovec in the request.  The caller needs to check that
1028 * there are any bvecs before calling this helper.
1029 */
1030static inline struct bio_vec req_bvec(struct request *rq)
1031{
1032        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1033                return rq->special_vec;
1034        return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1035}
1036
1037static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
1038                                                     int op)
1039{
1040        if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
1041                return min(q->limits.max_discard_sectors,
1042                           UINT_MAX >> SECTOR_SHIFT);
1043
1044        if (unlikely(op == REQ_OP_WRITE_SAME))
1045                return q->limits.max_write_same_sectors;
1046
1047        if (unlikely(op == REQ_OP_WRITE_ZEROES))
1048                return q->limits.max_write_zeroes_sectors;
1049
1050        return q->limits.max_sectors;
1051}
1052
1053/*
1054 * Return maximum size of a request at given offset. Only valid for
1055 * file system requests.
1056 */
1057static inline unsigned int blk_max_size_offset(struct request_queue *q,
1058                                               sector_t offset,
1059                                               unsigned int chunk_sectors)
1060{
1061        if (!chunk_sectors) {
1062                if (q->limits.chunk_sectors)
1063                        chunk_sectors = q->limits.chunk_sectors;
1064                else
1065                        return q->limits.max_sectors;
1066        }
1067
1068        if (likely(is_power_of_2(chunk_sectors)))
1069                chunk_sectors -= offset & (chunk_sectors - 1);
1070        else
1071                chunk_sectors -= sector_div(offset, chunk_sectors);
1072
1073        return min(q->limits.max_sectors, chunk_sectors);
1074}
1075
1076static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
1077                                                  sector_t offset)
1078{
1079        struct request_queue *q = rq->q;
1080
1081        if (blk_rq_is_passthrough(rq))
1082                return q->limits.max_hw_sectors;
1083
1084        if (!q->limits.chunk_sectors ||
1085            req_op(rq) == REQ_OP_DISCARD ||
1086            req_op(rq) == REQ_OP_SECURE_ERASE)
1087                return blk_queue_get_max_sectors(q, req_op(rq));
1088
1089        return min(blk_max_size_offset(q, offset, 0),
1090                        blk_queue_get_max_sectors(q, req_op(rq)));
1091}
1092
1093static inline unsigned int blk_rq_count_bios(struct request *rq)
1094{
1095        unsigned int nr_bios = 0;
1096        struct bio *bio;
1097
1098        __rq_for_each_bio(bio, rq)
1099                nr_bios++;
1100
1101        return nr_bios;
1102}
1103
1104void blk_steal_bios(struct bio_list *list, struct request *rq);
1105
1106/*
1107 * Request completion related functions.
1108 *
1109 * blk_update_request() completes given number of bytes and updates
1110 * the request without completing it.
1111 */
1112extern bool blk_update_request(struct request *rq, blk_status_t error,
1113                               unsigned int nr_bytes);
1114
1115extern void blk_abort_request(struct request *);
1116
1117/*
1118 * Access functions for manipulating queue properties
1119 */
1120extern void blk_cleanup_queue(struct request_queue *);
1121void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
1122extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1123extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1124extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1125extern void blk_queue_max_discard_segments(struct request_queue *,
1126                unsigned short);
1127extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1128extern void blk_queue_max_discard_sectors(struct request_queue *q,
1129                unsigned int max_discard_sectors);
1130extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1131                unsigned int max_write_same_sectors);
1132extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1133                unsigned int max_write_same_sectors);
1134extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
1135extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
1136                unsigned int max_zone_append_sectors);
1137extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1138void blk_queue_zone_write_granularity(struct request_queue *q,
1139                                      unsigned int size);
1140extern void blk_queue_alignment_offset(struct request_queue *q,
1141                                       unsigned int alignment);
1142void blk_queue_update_readahead(struct request_queue *q);
1143extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
1144extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1145extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
1146extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1147extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1148extern void blk_set_default_limits(struct queue_limits *lim);
1149extern void blk_set_stacking_limits(struct queue_limits *lim);
1150extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1151                            sector_t offset);
1152extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1153                              sector_t offset);
1154extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1155extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1156extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1157extern void blk_queue_dma_alignment(struct request_queue *, int);
1158extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1159extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1160extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1161extern void blk_queue_required_elevator_features(struct request_queue *q,
1162                                                 unsigned int features);
1163extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1164                                              struct device *dev);
1165
1166/*
1167 * Number of physical segments as sent to the device.
1168 *
1169 * Normally this is the number of discontiguous data segments sent by the
1170 * submitter.  But for data-less command like discard we might have no
1171 * actual data segments submitted, but the driver might have to add it's
1172 * own special payload.  In that case we still return 1 here so that this
1173 * special payload will be mapped.
1174 */
1175static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1176{
1177        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1178                return 1;
1179        return rq->nr_phys_segments;
1180}
1181
1182/*
1183 * Number of discard segments (or ranges) the driver needs to fill in.
1184 * Each discard bio merged into a request is counted as one segment.
1185 */
1186static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1187{
1188        return max_t(unsigned short, rq->nr_phys_segments, 1);
1189}
1190
1191int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1192                struct scatterlist *sglist, struct scatterlist **last_sg);
1193static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1194                struct scatterlist *sglist)
1195{
1196        struct scatterlist *last_sg = NULL;
1197
1198        return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1199}
1200extern void blk_dump_rq_flags(struct request *, char *);
1201
1202bool __must_check blk_get_queue(struct request_queue *);
1203extern void blk_put_queue(struct request_queue *);
1204extern void blk_set_queue_dying(struct request_queue *);
1205
1206#ifdef CONFIG_BLOCK
1207/*
1208 * blk_plug permits building a queue of related requests by holding the I/O
1209 * fragments for a short period. This allows merging of sequential requests
1210 * into single larger request. As the requests are moved from a per-task list to
1211 * the device's request_queue in a batch, this results in improved scalability
1212 * as the lock contention for request_queue lock is reduced.
1213 *
1214 * It is ok not to disable preemption when adding the request to the plug list
1215 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1216 * the plug list when the task sleeps by itself. For details, please see
1217 * schedule() where blk_schedule_flush_plug() is called.
1218 */
1219struct blk_plug {
1220        struct list_head mq_list; /* blk-mq requests */
1221        struct list_head cb_list; /* md requires an unplug callback */
1222        unsigned short rq_count;
1223        bool multiple_queues;
1224        bool nowait;
1225};
1226#define BLK_MAX_REQUEST_COUNT 16
1227#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
1228
1229struct blk_plug_cb;
1230typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1231struct blk_plug_cb {
1232        struct list_head list;
1233        blk_plug_cb_fn callback;
1234        void *data;
1235};
1236extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1237                                             void *data, int size);
1238extern void blk_start_plug(struct blk_plug *);
1239extern void blk_finish_plug(struct blk_plug *);
1240extern void blk_flush_plug_list(struct blk_plug *, bool);
1241
1242static inline void blk_flush_plug(struct task_struct *tsk)
1243{
1244        struct blk_plug *plug = tsk->plug;
1245
1246        if (plug)
1247                blk_flush_plug_list(plug, false);
1248}
1249
1250static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1251{
1252        struct blk_plug *plug = tsk->plug;
1253
1254        if (plug)
1255                blk_flush_plug_list(plug, true);
1256}
1257
1258static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1259{
1260        struct blk_plug *plug = tsk->plug;
1261
1262        return plug &&
1263                 (!list_empty(&plug->mq_list) ||
1264                 !list_empty(&plug->cb_list));
1265}
1266
1267int blkdev_issue_flush(struct block_device *bdev);
1268long nr_blockdev_pages(void);
1269#else /* CONFIG_BLOCK */
1270struct blk_plug {
1271};
1272
1273static inline void blk_start_plug(struct blk_plug *plug)
1274{
1275}
1276
1277static inline void blk_finish_plug(struct blk_plug *plug)
1278{
1279}
1280
1281static inline void blk_flush_plug(struct task_struct *task)
1282{
1283}
1284
1285static inline void blk_schedule_flush_plug(struct task_struct *task)
1286{
1287}
1288
1289
1290static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1291{
1292        return false;
1293}
1294
1295static inline int blkdev_issue_flush(struct block_device *bdev)
1296{
1297        return 0;
1298}
1299
1300static inline long nr_blockdev_pages(void)
1301{
1302        return 0;
1303}
1304#endif /* CONFIG_BLOCK */
1305
1306extern void blk_io_schedule(void);
1307
1308extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1309                sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1310
1311#define BLKDEV_DISCARD_SECURE   (1 << 0)        /* issue a secure erase */
1312
1313extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1314                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1315extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1316                sector_t nr_sects, gfp_t gfp_mask, int flags,
1317                struct bio **biop);
1318
1319#define BLKDEV_ZERO_NOUNMAP     (1 << 0)  /* do not free blocks */
1320#define BLKDEV_ZERO_NOFALLBACK  (1 << 1)  /* don't write explicit zeroes */
1321
1322extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1323                sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1324                unsigned flags);
1325extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1326                sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1327
1328static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1329                sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1330{
1331        return blkdev_issue_discard(sb->s_bdev,
1332                                    block << (sb->s_blocksize_bits -
1333                                              SECTOR_SHIFT),
1334                                    nr_blocks << (sb->s_blocksize_bits -
1335                                                  SECTOR_SHIFT),
1336                                    gfp_mask, flags);
1337}
1338static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1339                sector_t nr_blocks, gfp_t gfp_mask)
1340{
1341        return blkdev_issue_zeroout(sb->s_bdev,
1342                                    block << (sb->s_blocksize_bits -
1343                                              SECTOR_SHIFT),
1344                                    nr_blocks << (sb->s_blocksize_bits -
1345                                                  SECTOR_SHIFT),
1346                                    gfp_mask, 0);
1347}
1348
1349extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
1350
1351static inline bool bdev_is_partition(struct block_device *bdev)
1352{
1353        return bdev->bd_partno;
1354}
1355
1356enum blk_default_limits {
1357        BLK_MAX_SEGMENTS        = 128,
1358        BLK_SAFE_MAX_SECTORS    = 255,
1359        BLK_DEF_MAX_SECTORS     = 2560,
1360        BLK_MAX_SEGMENT_SIZE    = 65536,
1361        BLK_SEG_BOUNDARY_MASK   = 0xFFFFFFFFUL,
1362};
1363
1364static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1365{
1366        return q->limits.seg_boundary_mask;
1367}
1368
1369static inline unsigned long queue_virt_boundary(const struct request_queue *q)
1370{
1371        return q->limits.virt_boundary_mask;
1372}
1373
1374static inline unsigned int queue_max_sectors(const struct request_queue *q)
1375{
1376        return q->limits.max_sectors;
1377}
1378
1379static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1380{
1381        return q->limits.max_hw_sectors;
1382}
1383
1384static inline unsigned short queue_max_segments(const struct request_queue *q)
1385{
1386        return q->limits.max_segments;
1387}
1388
1389static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1390{
1391        return q->limits.max_discard_segments;
1392}
1393
1394static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1395{
1396        return q->limits.max_segment_size;
1397}
1398
1399static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
1400{
1401
1402        const struct queue_limits *l = &q->limits;
1403
1404        return min(l->max_zone_append_sectors, l->max_sectors);
1405}
1406
1407static inline unsigned queue_logical_block_size(const struct request_queue *q)
1408{
1409        int retval = 512;
1410
1411        if (q && q->limits.logical_block_size)
1412                retval = q->limits.logical_block_size;
1413
1414        return retval;
1415}
1416
1417static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1418{
1419        return queue_logical_block_size(bdev_get_queue(bdev));
1420}
1421
1422static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1423{
1424        return q->limits.physical_block_size;
1425}
1426
1427static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1428{
1429        return queue_physical_block_size(bdev_get_queue(bdev));
1430}
1431
1432static inline unsigned int queue_io_min(const struct request_queue *q)
1433{
1434        return q->limits.io_min;
1435}
1436
1437static inline int bdev_io_min(struct block_device *bdev)
1438{
1439        return queue_io_min(bdev_get_queue(bdev));
1440}
1441
1442static inline unsigned int queue_io_opt(const struct request_queue *q)
1443{
1444        return q->limits.io_opt;
1445}
1446
1447static inline int bdev_io_opt(struct block_device *bdev)
1448{
1449        return queue_io_opt(bdev_get_queue(bdev));
1450}
1451
1452static inline unsigned int
1453queue_zone_write_granularity(const struct request_queue *q)
1454{
1455        return q->limits.zone_write_granularity;
1456}
1457
1458static inline unsigned int
1459bdev_zone_write_granularity(struct block_device *bdev)
1460{
1461        return queue_zone_write_granularity(bdev_get_queue(bdev));
1462}
1463
1464static inline int queue_alignment_offset(const struct request_queue *q)
1465{
1466        if (q->limits.misaligned)
1467                return -1;
1468
1469        return q->limits.alignment_offset;
1470}
1471
1472static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1473{
1474        unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1475        unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
1476                << SECTOR_SHIFT;
1477
1478        return (granularity + lim->alignment_offset - alignment) % granularity;
1479}
1480
1481static inline int bdev_alignment_offset(struct block_device *bdev)
1482{
1483        struct request_queue *q = bdev_get_queue(bdev);
1484
1485        if (q->limits.misaligned)
1486                return -1;
1487        if (bdev_is_partition(bdev))
1488                return queue_limit_alignment_offset(&q->limits,
1489                                bdev->bd_start_sect);
1490        return q->limits.alignment_offset;
1491}
1492
1493static inline int queue_discard_alignment(const struct request_queue *q)
1494{
1495        if (q->limits.discard_misaligned)
1496                return -1;
1497
1498        return q->limits.discard_alignment;
1499}
1500
1501static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1502{
1503        unsigned int alignment, granularity, offset;
1504
1505        if (!lim->max_discard_sectors)
1506                return 0;
1507
1508        /* Why are these in bytes, not sectors? */
1509        alignment = lim->discard_alignment >> SECTOR_SHIFT;
1510        granularity = lim->discard_granularity >> SECTOR_SHIFT;
1511        if (!granularity)
1512                return 0;
1513
1514        /* Offset of the partition start in 'granularity' sectors */
1515        offset = sector_div(sector, granularity);
1516
1517        /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1518        offset = (granularity + alignment - offset) % granularity;
1519
1520        /* Turn it back into bytes, gaah */
1521        return offset << SECTOR_SHIFT;
1522}
1523
1524static inline int bdev_discard_alignment(struct block_device *bdev)
1525{
1526        struct request_queue *q = bdev_get_queue(bdev);
1527
1528        if (bdev_is_partition(bdev))
1529                return queue_limit_discard_alignment(&q->limits,
1530                                bdev->bd_start_sect);
1531        return q->limits.discard_alignment;
1532}
1533
1534static inline unsigned int bdev_write_same(struct block_device *bdev)
1535{
1536        struct request_queue *q = bdev_get_queue(bdev);
1537
1538        if (q)
1539                return q->limits.max_write_same_sectors;
1540
1541        return 0;
1542}
1543
1544static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1545{
1546        struct request_queue *q = bdev_get_queue(bdev);
1547
1548        if (q)
1549                return q->limits.max_write_zeroes_sectors;
1550
1551        return 0;
1552}
1553
1554static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1555{
1556        struct request_queue *q = bdev_get_queue(bdev);
1557
1558        if (q)
1559                return blk_queue_zoned_model(q);
1560
1561        return BLK_ZONED_NONE;
1562}
1563
1564static inline bool bdev_is_zoned(struct block_device *bdev)
1565{
1566        struct request_queue *q = bdev_get_queue(bdev);
1567
1568        if (q)
1569                return blk_queue_is_zoned(q);
1570
1571        return false;
1572}
1573
1574static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1575{
1576        struct request_queue *q = bdev_get_queue(bdev);
1577
1578        if (q)
1579                return blk_queue_zone_sectors(q);
1580        return 0;
1581}
1582
1583static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
1584{
1585        struct request_queue *q = bdev_get_queue(bdev);
1586
1587        if (q)
1588                return queue_max_open_zones(q);
1589        return 0;
1590}
1591
1592static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
1593{
1594        struct request_queue *q = bdev_get_queue(bdev);
1595
1596        if (q)
1597                return queue_max_active_zones(q);
1598        return 0;
1599}
1600
1601static inline int queue_dma_alignment(const struct request_queue *q)
1602{
1603        return q ? q->dma_alignment : 511;
1604}
1605
1606static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1607                                 unsigned int len)
1608{
1609        unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1610        return !(addr & alignment) && !(len & alignment);
1611}
1612
1613/* assumes size > 256 */
1614static inline unsigned int blksize_bits(unsigned int size)
1615{
1616        unsigned int bits = 8;
1617        do {
1618                bits++;
1619                size >>= 1;
1620        } while (size > 256);
1621        return bits;
1622}
1623
1624static inline unsigned int block_size(struct block_device *bdev)
1625{
1626        return 1 << bdev->bd_inode->i_blkbits;
1627}
1628
1629int kblockd_schedule_work(struct work_struct *work);
1630int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1631
1632#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1633        MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1634#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1635        MODULE_ALIAS("block-major-" __stringify(major) "-*")
1636
1637#if defined(CONFIG_BLK_DEV_INTEGRITY)
1638
1639enum blk_integrity_flags {
1640        BLK_INTEGRITY_VERIFY            = 1 << 0,
1641        BLK_INTEGRITY_GENERATE          = 1 << 1,
1642        BLK_INTEGRITY_DEVICE_CAPABLE    = 1 << 2,
1643        BLK_INTEGRITY_IP_CHECKSUM       = 1 << 3,
1644};
1645
1646struct blk_integrity_iter {
1647        void                    *prot_buf;
1648        void                    *data_buf;
1649        sector_t                seed;
1650        unsigned int            data_size;
1651        unsigned short          interval;
1652        const char              *disk_name;
1653};
1654
1655typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
1656typedef void (integrity_prepare_fn) (struct request *);
1657typedef void (integrity_complete_fn) (struct request *, unsigned int);
1658
1659struct blk_integrity_profile {
1660        integrity_processing_fn         *generate_fn;
1661        integrity_processing_fn         *verify_fn;
1662        integrity_prepare_fn            *prepare_fn;
1663        integrity_complete_fn           *complete_fn;
1664        const char                      *name;
1665};
1666
1667extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
1668extern void blk_integrity_unregister(struct gendisk *);
1669extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1670extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1671                                   struct scatterlist *);
1672extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1673
1674static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1675{
1676        struct blk_integrity *bi = &disk->queue->integrity;
1677
1678        if (!bi->profile)
1679                return NULL;
1680
1681        return bi;
1682}
1683
1684static inline
1685struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1686{
1687        return blk_get_integrity(bdev->bd_disk);
1688}
1689
1690static inline bool
1691blk_integrity_queue_supports_integrity(struct request_queue *q)
1692{
1693        return q->integrity.profile;
1694}
1695
1696static inline bool blk_integrity_rq(struct request *rq)
1697{
1698        return rq->cmd_flags & REQ_INTEGRITY;
1699}
1700
1701static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1702                                                    unsigned int segs)
1703{
1704        q->limits.max_integrity_segments = segs;
1705}
1706
1707static inline unsigned short
1708queue_max_integrity_segments(const struct request_queue *q)
1709{
1710        return q->limits.max_integrity_segments;
1711}
1712
1713/**
1714 * bio_integrity_intervals - Return number of integrity intervals for a bio
1715 * @bi:         blk_integrity profile for device
1716 * @sectors:    Size of the bio in 512-byte sectors
1717 *
1718 * Description: The block layer calculates everything in 512 byte
1719 * sectors but integrity metadata is done in terms of the data integrity
1720 * interval size of the storage device.  Convert the block layer sectors
1721 * to the appropriate number of integrity intervals.
1722 */
1723static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1724                                                   unsigned int sectors)
1725{
1726        return sectors >> (bi->interval_exp - 9);
1727}
1728
1729static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1730                                               unsigned int sectors)
1731{
1732        return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
1733}
1734
1735/*
1736 * Return the first bvec that contains integrity data.  Only drivers that are
1737 * limited to a single integrity segment should use this helper.
1738 */
1739static inline struct bio_vec *rq_integrity_vec(struct request *rq)
1740{
1741        if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
1742                return NULL;
1743        return rq->bio->bi_integrity->bip_vec;
1744}
1745
1746#else /* CONFIG_BLK_DEV_INTEGRITY */
1747
1748struct bio;
1749struct block_device;
1750struct gendisk;
1751struct blk_integrity;
1752
1753static inline int blk_integrity_rq(struct request *rq)
1754{
1755        return 0;
1756}
1757static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1758                                            struct bio *b)
1759{
1760        return 0;
1761}
1762static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1763                                          struct bio *b,
1764                                          struct scatterlist *s)
1765{
1766        return 0;
1767}
1768static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1769{
1770        return NULL;
1771}
1772static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1773{
1774        return NULL;
1775}
1776static inline bool
1777blk_integrity_queue_supports_integrity(struct request_queue *q)
1778{
1779        return false;
1780}
1781static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1782{
1783        return 0;
1784}
1785static inline void blk_integrity_register(struct gendisk *d,
1786                                         struct blk_integrity *b)
1787{
1788}
1789static inline void blk_integrity_unregister(struct gendisk *d)
1790{
1791}
1792static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1793                                                    unsigned int segs)
1794{
1795}
1796static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
1797{
1798        return 0;
1799}
1800
1801static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1802                                                   unsigned int sectors)
1803{
1804        return 0;
1805}
1806
1807static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1808                                               unsigned int sectors)
1809{
1810        return 0;
1811}
1812
1813static inline struct bio_vec *rq_integrity_vec(struct request *rq)
1814{
1815        return NULL;
1816}
1817
1818#endif /* CONFIG_BLK_DEV_INTEGRITY */
1819
1820#ifdef CONFIG_BLK_INLINE_ENCRYPTION
1821
1822bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
1823
1824void blk_ksm_unregister(struct request_queue *q);
1825
1826#else /* CONFIG_BLK_INLINE_ENCRYPTION */
1827
1828static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
1829                                    struct request_queue *q)
1830{
1831        return true;
1832}
1833
1834static inline void blk_ksm_unregister(struct request_queue *q) { }
1835
1836#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1837
1838
1839struct block_device_operations {
1840        blk_qc_t (*submit_bio) (struct bio *bio);
1841        int (*open) (struct block_device *, fmode_t);
1842        void (*release) (struct gendisk *, fmode_t);
1843        int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
1844        int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1845        int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1846        unsigned int (*check_events) (struct gendisk *disk,
1847                                      unsigned int clearing);
1848        void (*unlock_native_capacity) (struct gendisk *);
1849        int (*getgeo)(struct block_device *, struct hd_geometry *);
1850        int (*set_read_only)(struct block_device *bdev, bool ro);
1851        /* this callback is with swap_lock and sometimes page table lock held */
1852        void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1853        int (*report_zones)(struct gendisk *, sector_t sector,
1854                        unsigned int nr_zones, report_zones_cb cb, void *data);
1855        char *(*devnode)(struct gendisk *disk, umode_t *mode);
1856        struct module *owner;
1857        const struct pr_ops *pr_ops;
1858};
1859
1860#ifdef CONFIG_COMPAT
1861extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
1862                                      unsigned int, unsigned long);
1863#else
1864#define blkdev_compat_ptr_ioctl NULL
1865#endif
1866
1867extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1868extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1869                                                struct writeback_control *);
1870
1871#ifdef CONFIG_BLK_DEV_ZONED
1872bool blk_req_needs_zone_write_lock(struct request *rq);
1873bool blk_req_zone_write_trylock(struct request *rq);
1874void __blk_req_zone_write_lock(struct request *rq);
1875void __blk_req_zone_write_unlock(struct request *rq);
1876
1877static inline void blk_req_zone_write_lock(struct request *rq)
1878{
1879        if (blk_req_needs_zone_write_lock(rq))
1880                __blk_req_zone_write_lock(rq);
1881}
1882
1883static inline void blk_req_zone_write_unlock(struct request *rq)
1884{
1885        if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1886                __blk_req_zone_write_unlock(rq);
1887}
1888
1889static inline bool blk_req_zone_is_write_locked(struct request *rq)
1890{
1891        return rq->q->seq_zones_wlock &&
1892                test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
1893}
1894
1895static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1896{
1897        if (!blk_req_needs_zone_write_lock(rq))
1898                return true;
1899        return !blk_req_zone_is_write_locked(rq);
1900}
1901#else
1902static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1903{
1904        return false;
1905}
1906
1907static inline void blk_req_zone_write_lock(struct request *rq)
1908{
1909}
1910
1911static inline void blk_req_zone_write_unlock(struct request *rq)
1912{
1913}
1914static inline bool blk_req_zone_is_write_locked(struct request *rq)
1915{
1916        return false;
1917}
1918
1919static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1920{
1921        return true;
1922}
1923#endif /* CONFIG_BLK_DEV_ZONED */
1924
1925static inline void blk_wake_io_task(struct task_struct *waiter)
1926{
1927        /*
1928         * If we're polling, the task itself is doing the completions. For
1929         * that case, we don't need to signal a wakeup, it's enough to just
1930         * mark us as RUNNING.
1931         */
1932        if (waiter == current)
1933                __set_current_state(TASK_RUNNING);
1934        else
1935                wake_up_process(waiter);
1936}
1937
1938unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1939                unsigned int op);
1940void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1941                unsigned long start_time);
1942
1943unsigned long bio_start_io_acct(struct bio *bio);
1944void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1945                struct block_device *orig_bdev);
1946
1947/**
1948 * bio_end_io_acct - end I/O accounting for bio based drivers
1949 * @bio:        bio to end account for
1950 * @start:      start time returned by bio_start_io_acct()
1951 */
1952static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1953{
1954        return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
1955}
1956
1957int bdev_read_only(struct block_device *bdev);
1958int set_blocksize(struct block_device *bdev, int size);
1959
1960const char *bdevname(struct block_device *bdev, char *buffer);
1961int lookup_bdev(const char *pathname, dev_t *dev);
1962
1963void blkdev_show(struct seq_file *seqf, off_t offset);
1964
1965#define BDEVNAME_SIZE   32      /* Largest string for a blockdev identifier */
1966#define BDEVT_SIZE      10      /* Largest string for MAJ:MIN for blkdev */
1967#ifdef CONFIG_BLOCK
1968#define BLKDEV_MAJOR_MAX        512
1969#else
1970#define BLKDEV_MAJOR_MAX        0
1971#endif
1972
1973struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1974                void *holder);
1975struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
1976int bd_prepare_to_claim(struct block_device *bdev, void *holder);
1977void bd_abort_claiming(struct block_device *bdev, void *holder);
1978void blkdev_put(struct block_device *bdev, fmode_t mode);
1979
1980/* just for blk-cgroup, don't use elsewhere */
1981struct block_device *blkdev_get_no_open(dev_t dev);
1982void blkdev_put_no_open(struct block_device *bdev);
1983
1984struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
1985void bdev_add(struct block_device *bdev, dev_t dev);
1986struct block_device *I_BDEV(struct inode *inode);
1987struct block_device *bdgrab(struct block_device *bdev);
1988void bdput(struct block_device *);
1989int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
1990                loff_t lend);
1991
1992#ifdef CONFIG_BLOCK
1993void invalidate_bdev(struct block_device *bdev);
1994int sync_blockdev(struct block_device *bdev);
1995#else
1996static inline void invalidate_bdev(struct block_device *bdev)
1997{
1998}
1999static inline int sync_blockdev(struct block_device *bdev)
2000{
2001        return 0;
2002}
2003#endif
2004int fsync_bdev(struct block_device *bdev);
2005
2006int freeze_bdev(struct block_device *bdev);
2007int thaw_bdev(struct block_device *bdev);
2008
2009#endif /* _LINUX_BLKDEV_H */
2010