linux/include/linux/blkdev.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_BLKDEV_H
   3#define _LINUX_BLKDEV_H
   4
   5#include <linux/sched.h>
   6#include <linux/sched/clock.h>
   7
   8#ifdef CONFIG_BLOCK
   9
  10#include <linux/major.h>
  11#include <linux/genhd.h>
  12#include <linux/list.h>
  13#include <linux/llist.h>
  14#include <linux/timer.h>
  15#include <linux/workqueue.h>
  16#include <linux/pagemap.h>
  17#include <linux/backing-dev-defs.h>
  18#include <linux/wait.h>
  19#include <linux/mempool.h>
  20#include <linux/pfn.h>
  21#include <linux/bio.h>
  22#include <linux/stringify.h>
  23#include <linux/gfp.h>
  24#include <linux/bsg.h>
  25#include <linux/smp.h>
  26#include <linux/rcupdate.h>
  27#include <linux/percpu-refcount.h>
  28#include <linux/scatterlist.h>
  29#include <linux/blkzoned.h>
  30#include <linux/seqlock.h>
  31#include <linux/u64_stats_sync.h>
  32
  33struct module;
  34struct scsi_ioctl_command;
  35
  36struct request_queue;
  37struct elevator_queue;
  38struct blk_trace;
  39struct request;
  40struct sg_io_hdr;
  41struct bsg_job;
  42struct blkcg_gq;
  43struct blk_flush_queue;
  44struct pr_ops;
  45struct rq_wb;
  46struct blk_queue_stats;
  47struct blk_stat_callback;
  48
  49#define BLKDEV_MIN_RQ   4
  50#define BLKDEV_MAX_RQ   128     /* Default maximum */
  51
  52/* Must be consistent with blk_mq_poll_stats_bkt() */
  53#define BLK_MQ_POLL_STATS_BKTS 16
  54
  55/*
  56 * Maximum number of blkcg policies allowed to be registered concurrently.
  57 * Defined here to simplify include dependency.
  58 */
  59#define BLKCG_MAX_POLS          3
  60
  61typedef void (rq_end_io_fn)(struct request *, blk_status_t);
  62
  63#define BLK_RL_SYNCFULL         (1U << 0)
  64#define BLK_RL_ASYNCFULL        (1U << 1)
  65
  66struct request_list {
  67        struct request_queue    *q;     /* the queue this rl belongs to */
  68#ifdef CONFIG_BLK_CGROUP
  69        struct blkcg_gq         *blkg;  /* blkg this request pool belongs to */
  70#endif
  71        /*
  72         * count[], starved[], and wait[] are indexed by
  73         * BLK_RW_SYNC/BLK_RW_ASYNC
  74         */
  75        int                     count[2];
  76        int                     starved[2];
  77        mempool_t               *rq_pool;
  78        wait_queue_head_t       wait[2];
  79        unsigned int            flags;
  80};
  81
  82/*
  83 * request flags */
  84typedef __u32 __bitwise req_flags_t;
  85
  86/* elevator knows about this request */
  87#define RQF_SORTED              ((__force req_flags_t)(1 << 0))
  88/* drive already may have started this one */
  89#define RQF_STARTED             ((__force req_flags_t)(1 << 1))
  90/* uses tagged queueing */
  91#define RQF_QUEUED              ((__force req_flags_t)(1 << 2))
  92/* may not be passed by ioscheduler */
  93#define RQF_SOFTBARRIER         ((__force req_flags_t)(1 << 3))
  94/* request for flush sequence */
  95#define RQF_FLUSH_SEQ           ((__force req_flags_t)(1 << 4))
  96/* merge of different types, fail separately */
  97#define RQF_MIXED_MERGE         ((__force req_flags_t)(1 << 5))
  98/* track inflight for MQ */
  99#define RQF_MQ_INFLIGHT         ((__force req_flags_t)(1 << 6))
 100/* don't call prep for this one */
 101#define RQF_DONTPREP            ((__force req_flags_t)(1 << 7))
 102/* set for "ide_preempt" requests and also for requests for which the SCSI
 103   "quiesce" state must be ignored. */
 104#define RQF_PREEMPT             ((__force req_flags_t)(1 << 8))
 105/* contains copies of user pages */
 106#define RQF_COPY_USER           ((__force req_flags_t)(1 << 9))
 107/* vaguely specified driver internal error.  Ignored by the block layer */
 108#define RQF_FAILED              ((__force req_flags_t)(1 << 10))
 109/* don't warn about errors */
 110#define RQF_QUIET               ((__force req_flags_t)(1 << 11))
 111/* elevator private data attached */
 112#define RQF_ELVPRIV             ((__force req_flags_t)(1 << 12))
 113/* account I/O stat */
 114#define RQF_IO_STAT             ((__force req_flags_t)(1 << 13))
 115/* request came from our alloc pool */
 116#define RQF_ALLOCED             ((__force req_flags_t)(1 << 14))
 117/* runtime pm request */
 118#define RQF_PM                  ((__force req_flags_t)(1 << 15))
 119/* on IO scheduler merge hash */
 120#define RQF_HASHED              ((__force req_flags_t)(1 << 16))
 121/* IO stats tracking on */
 122#define RQF_STATS               ((__force req_flags_t)(1 << 17))
 123/* Look at ->special_vec for the actual data payload instead of the
 124   bio chain. */
 125#define RQF_SPECIAL_PAYLOAD     ((__force req_flags_t)(1 << 18))
 126/* The per-zone write lock is held for this request */
 127#define RQF_ZONE_WRITE_LOCKED   ((__force req_flags_t)(1 << 19))
 128/* timeout is expired */
 129#define RQF_MQ_TIMEOUT_EXPIRED  ((__force req_flags_t)(1 << 20))
 130/* already slept for hybrid poll */
 131#define RQF_MQ_POLL_SLEPT       ((__force req_flags_t)(1 << 21))
 132
 133/* flags that prevent us from merging requests: */
 134#define RQF_NOMERGE_FLAGS \
 135        (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
 136
 137/*
 138 * Try to put the fields that are referenced together in the same cacheline.
 139 *
 140 * If you modify this structure, make sure to update blk_rq_init() and
 141 * especially blk_mq_rq_ctx_init() to take care of the added fields.
 142 */
 143struct request {
 144        struct request_queue *q;
 145        struct blk_mq_ctx *mq_ctx;
 146
 147        int cpu;
 148        unsigned int cmd_flags;         /* op and common flags */
 149        req_flags_t rq_flags;
 150
 151        int internal_tag;
 152
 153        /* the following two fields are internal, NEVER access directly */
 154        unsigned int __data_len;        /* total data len */
 155        int tag;
 156        sector_t __sector;              /* sector cursor */
 157
 158        struct bio *bio;
 159        struct bio *biotail;
 160
 161        struct list_head queuelist;
 162
 163        /*
 164         * The hash is used inside the scheduler, and killed once the
 165         * request reaches the dispatch list. The ipi_list is only used
 166         * to queue the request for softirq completion, which is long
 167         * after the request has been unhashed (and even removed from
 168         * the dispatch list).
 169         */
 170        union {
 171                struct hlist_node hash; /* merge hash */
 172                struct list_head ipi_list;
 173        };
 174
 175        /*
 176         * The rb_node is only used inside the io scheduler, requests
 177         * are pruned when moved to the dispatch queue. So let the
 178         * completion_data share space with the rb_node.
 179         */
 180        union {
 181                struct rb_node rb_node; /* sort/lookup */
 182                struct bio_vec special_vec;
 183                void *completion_data;
 184                int error_count; /* for legacy drivers, don't use */
 185        };
 186
 187        /*
 188         * Three pointers are available for the IO schedulers, if they need
 189         * more they have to dynamically allocate it.  Flush requests are
 190         * never put on the IO scheduler. So let the flush fields share
 191         * space with the elevator data.
 192         */
 193        union {
 194                struct {
 195                        struct io_cq            *icq;
 196                        void                    *priv[2];
 197                } elv;
 198
 199                struct {
 200                        unsigned int            seq;
 201                        struct list_head        list;
 202                        rq_end_io_fn            *saved_end_io;
 203                } flush;
 204        };
 205
 206        struct gendisk *rq_disk;
 207        struct hd_struct *part;
 208        unsigned long start_time;
 209        struct blk_issue_stat issue_stat;
 210        /* Number of scatter-gather DMA addr+len pairs after
 211         * physical address coalescing is performed.
 212         */
 213        unsigned short nr_phys_segments;
 214
 215#if defined(CONFIG_BLK_DEV_INTEGRITY)
 216        unsigned short nr_integrity_segments;
 217#endif
 218
 219        unsigned short write_hint;
 220        unsigned short ioprio;
 221
 222        unsigned int timeout;
 223
 224        void *special;          /* opaque pointer available for LLD use */
 225
 226        unsigned int extra_len; /* length of alignment and padding */
 227
 228        /*
 229         * On blk-mq, the lower bits of ->gstate (generation number and
 230         * state) carry the MQ_RQ_* state value and the upper bits the
 231         * generation number which is monotonically incremented and used to
 232         * distinguish the reuse instances.
 233         *
 234         * ->gstate_seq allows updates to ->gstate and other fields
 235         * (currently ->deadline) during request start to be read
 236         * atomically from the timeout path, so that it can operate on a
 237         * coherent set of information.
 238         */
 239        seqcount_t gstate_seq;
 240        u64 gstate;
 241
 242        /*
 243         * ->aborted_gstate is used by the timeout to claim a specific
 244         * recycle instance of this request.  See blk_mq_timeout_work().
 245         */
 246        struct u64_stats_sync aborted_gstate_sync;
 247        u64 aborted_gstate;
 248
 249        /* access through blk_rq_set_deadline, blk_rq_deadline */
 250        unsigned long __deadline;
 251
 252        struct list_head timeout_list;
 253
 254        union {
 255                struct __call_single_data csd;
 256                u64 fifo_time;
 257        };
 258
 259        /*
 260         * completion callback.
 261         */
 262        rq_end_io_fn *end_io;
 263        void *end_io_data;
 264
 265        /* for bidi */
 266        struct request *next_rq;
 267
 268#ifdef CONFIG_BLK_CGROUP
 269        struct request_list *rl;                /* rl this rq is alloced from */
 270        unsigned long long start_time_ns;
 271        unsigned long long io_start_time_ns;    /* when passed to hardware */
 272#endif
 273};
 274
 275static inline bool blk_op_is_scsi(unsigned int op)
 276{
 277        return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
 278}
 279
 280static inline bool blk_op_is_private(unsigned int op)
 281{
 282        return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
 283}
 284
 285static inline bool blk_rq_is_scsi(struct request *rq)
 286{
 287        return blk_op_is_scsi(req_op(rq));
 288}
 289
 290static inline bool blk_rq_is_private(struct request *rq)
 291{
 292        return blk_op_is_private(req_op(rq));
 293}
 294
 295static inline bool blk_rq_is_passthrough(struct request *rq)
 296{
 297        return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
 298}
 299
 300static inline bool bio_is_passthrough(struct bio *bio)
 301{
 302        unsigned op = bio_op(bio);
 303
 304        return blk_op_is_scsi(op) || blk_op_is_private(op);
 305}
 306
 307static inline unsigned short req_get_ioprio(struct request *req)
 308{
 309        return req->ioprio;
 310}
 311
 312#include <linux/elevator.h>
 313
 314struct blk_queue_ctx;
 315
 316typedef void (request_fn_proc) (struct request_queue *q);
 317typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
 318typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
 319typedef int (prep_rq_fn) (struct request_queue *, struct request *);
 320typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
 321
 322struct bio_vec;
 323typedef void (softirq_done_fn)(struct request *);
 324typedef int (dma_drain_needed_fn)(struct request *);
 325typedef int (lld_busy_fn) (struct request_queue *q);
 326typedef int (bsg_job_fn) (struct bsg_job *);
 327typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
 328typedef void (exit_rq_fn)(struct request_queue *, struct request *);
 329
 330enum blk_eh_timer_return {
 331        BLK_EH_NOT_HANDLED,
 332        BLK_EH_HANDLED,
 333        BLK_EH_RESET_TIMER,
 334};
 335
 336typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
 337
 338enum blk_queue_state {
 339        Queue_down,
 340        Queue_up,
 341};
 342
 343struct blk_queue_tag {
 344        struct request **tag_index;     /* map of busy tags */
 345        unsigned long *tag_map;         /* bit map of free/busy tags */
 346        int max_depth;                  /* what we will send to device */
 347        int real_max_depth;             /* what the array can hold */
 348        atomic_t refcnt;                /* map can be shared */
 349        int alloc_policy;               /* tag allocation policy */
 350        int next_tag;                   /* next tag */
 351};
 352#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
 353#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
 354
 355#define BLK_SCSI_MAX_CMDS       (256)
 356#define BLK_SCSI_CMD_PER_LONG   (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
 357
 358/*
 359 * Zoned block device models (zoned limit).
 360 */
 361enum blk_zoned_model {
 362        BLK_ZONED_NONE, /* Regular block device */
 363        BLK_ZONED_HA,   /* Host-aware zoned block device */
 364        BLK_ZONED_HM,   /* Host-managed zoned block device */
 365};
 366
 367struct queue_limits {
 368        unsigned long           bounce_pfn;
 369        unsigned long           seg_boundary_mask;
 370        unsigned long           virt_boundary_mask;
 371
 372        unsigned int            max_hw_sectors;
 373        unsigned int            max_dev_sectors;
 374        unsigned int            chunk_sectors;
 375        unsigned int            max_sectors;
 376        unsigned int            max_segment_size;
 377        unsigned int            physical_block_size;
 378        unsigned int            alignment_offset;
 379        unsigned int            io_min;
 380        unsigned int            io_opt;
 381        unsigned int            max_discard_sectors;
 382        unsigned int            max_hw_discard_sectors;
 383        unsigned int            max_write_same_sectors;
 384        unsigned int            max_write_zeroes_sectors;
 385        unsigned int            discard_granularity;
 386        unsigned int            discard_alignment;
 387
 388        unsigned short          logical_block_size;
 389        unsigned short          max_segments;
 390        unsigned short          max_integrity_segments;
 391        unsigned short          max_discard_segments;
 392
 393        unsigned char           misaligned;
 394        unsigned char           discard_misaligned;
 395        unsigned char           cluster;
 396        unsigned char           raid_partial_stripes_expensive;
 397        enum blk_zoned_model    zoned;
 398};
 399
 400#ifdef CONFIG_BLK_DEV_ZONED
 401
 402struct blk_zone_report_hdr {
 403        unsigned int    nr_zones;
 404        u8              padding[60];
 405};
 406
 407extern int blkdev_report_zones(struct block_device *bdev,
 408                               sector_t sector, struct blk_zone *zones,
 409                               unsigned int *nr_zones, gfp_t gfp_mask);
 410extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
 411                              sector_t nr_sectors, gfp_t gfp_mask);
 412
 413extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
 414                                     unsigned int cmd, unsigned long arg);
 415extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
 416                                    unsigned int cmd, unsigned long arg);
 417
 418#else /* CONFIG_BLK_DEV_ZONED */
 419
 420static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
 421                                            fmode_t mode, unsigned int cmd,
 422                                            unsigned long arg)
 423{
 424        return -ENOTTY;
 425}
 426
 427static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
 428                                           fmode_t mode, unsigned int cmd,
 429                                           unsigned long arg)
 430{
 431        return -ENOTTY;
 432}
 433
 434#endif /* CONFIG_BLK_DEV_ZONED */
 435
 436struct request_queue {
 437        /*
 438         * Together with queue_head for cacheline sharing
 439         */
 440        struct list_head        queue_head;
 441        struct request          *last_merge;
 442        struct elevator_queue   *elevator;
 443        int                     nr_rqs[2];      /* # allocated [a]sync rqs */
 444        int                     nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
 445
 446        atomic_t                shared_hctx_restart;
 447
 448        struct blk_queue_stats  *stats;
 449        struct rq_wb            *rq_wb;
 450
 451        /*
 452         * If blkcg is not used, @q->root_rl serves all requests.  If blkcg
 453         * is used, root blkg allocates from @q->root_rl and all other
 454         * blkgs from their own blkg->rl.  Which one to use should be
 455         * determined using bio_request_list().
 456         */
 457        struct request_list     root_rl;
 458
 459        request_fn_proc         *request_fn;
 460        make_request_fn         *make_request_fn;
 461        poll_q_fn               *poll_fn;
 462        prep_rq_fn              *prep_rq_fn;
 463        unprep_rq_fn            *unprep_rq_fn;
 464        softirq_done_fn         *softirq_done_fn;
 465        rq_timed_out_fn         *rq_timed_out_fn;
 466        dma_drain_needed_fn     *dma_drain_needed;
 467        lld_busy_fn             *lld_busy_fn;
 468        /* Called just after a request is allocated */
 469        init_rq_fn              *init_rq_fn;
 470        /* Called just before a request is freed */
 471        exit_rq_fn              *exit_rq_fn;
 472        /* Called from inside blk_get_request() */
 473        void (*initialize_rq_fn)(struct request *rq);
 474
 475        const struct blk_mq_ops *mq_ops;
 476
 477        unsigned int            *mq_map;
 478
 479        /* sw queues */
 480        struct blk_mq_ctx __percpu      *queue_ctx;
 481        unsigned int            nr_queues;
 482
 483        unsigned int            queue_depth;
 484
 485        /* hw dispatch queues */
 486        struct blk_mq_hw_ctx    **queue_hw_ctx;
 487        unsigned int            nr_hw_queues;
 488
 489        /*
 490         * Dispatch queue sorting
 491         */
 492        sector_t                end_sector;
 493        struct request          *boundary_rq;
 494
 495        /*
 496         * Delayed queue handling
 497         */
 498        struct delayed_work     delay_work;
 499
 500        struct backing_dev_info *backing_dev_info;
 501
 502        /*
 503         * The queue owner gets to use this for whatever they like.
 504         * ll_rw_blk doesn't touch it.
 505         */
 506        void                    *queuedata;
 507
 508        /*
 509         * various queue flags, see QUEUE_* below
 510         */
 511        unsigned long           queue_flags;
 512
 513        /*
 514         * ida allocated id for this queue.  Used to index queues from
 515         * ioctx.
 516         */
 517        int                     id;
 518
 519        /*
 520         * queue needs bounce pages for pages above this limit
 521         */
 522        gfp_t                   bounce_gfp;
 523
 524        /*
 525         * protects queue structures from reentrancy. ->__queue_lock should
 526         * _never_ be used directly, it is queue private. always use
 527         * ->queue_lock.
 528         */
 529        spinlock_t              __queue_lock;
 530        spinlock_t              *queue_lock;
 531
 532        /*
 533         * queue kobject
 534         */
 535        struct kobject kobj;
 536
 537        /*
 538         * mq queue kobject
 539         */
 540        struct kobject mq_kobj;
 541
 542#ifdef  CONFIG_BLK_DEV_INTEGRITY
 543        struct blk_integrity integrity;
 544#endif  /* CONFIG_BLK_DEV_INTEGRITY */
 545
 546#ifdef CONFIG_PM
 547        struct device           *dev;
 548        int                     rpm_status;
 549        unsigned int            nr_pending;
 550#endif
 551
 552        /*
 553         * queue settings
 554         */
 555        unsigned long           nr_requests;    /* Max # of requests */
 556        unsigned int            nr_congestion_on;
 557        unsigned int            nr_congestion_off;
 558        unsigned int            nr_batching;
 559
 560        unsigned int            dma_drain_size;
 561        void                    *dma_drain_buffer;
 562        unsigned int            dma_pad_mask;
 563        unsigned int            dma_alignment;
 564
 565        struct blk_queue_tag    *queue_tags;
 566        struct list_head        tag_busy_list;
 567
 568        unsigned int            nr_sorted;
 569        unsigned int            in_flight[2];
 570
 571        /*
 572         * Number of active block driver functions for which blk_drain_queue()
 573         * must wait. Must be incremented around functions that unlock the
 574         * queue_lock internally, e.g. scsi_request_fn().
 575         */
 576        unsigned int            request_fn_active;
 577
 578        unsigned int            rq_timeout;
 579        int                     poll_nsec;
 580
 581        struct blk_stat_callback        *poll_cb;
 582        struct blk_rq_stat      poll_stat[BLK_MQ_POLL_STATS_BKTS];
 583
 584        struct timer_list       timeout;
 585        struct work_struct      timeout_work;
 586        struct list_head        timeout_list;
 587
 588        struct list_head        icq_list;
 589#ifdef CONFIG_BLK_CGROUP
 590        DECLARE_BITMAP          (blkcg_pols, BLKCG_MAX_POLS);
 591        struct blkcg_gq         *root_blkg;
 592        struct list_head        blkg_list;
 593#endif
 594
 595        struct queue_limits     limits;
 596
 597        /*
 598         * Zoned block device information for request dispatch control.
 599         * nr_zones is the total number of zones of the device. This is always
 600         * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
 601         * bits which indicates if a zone is conventional (bit clear) or
 602         * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
 603         * bits which indicates if a zone is write locked, that is, if a write
 604         * request targeting the zone was dispatched. All three fields are
 605         * initialized by the low level device driver (e.g. scsi/sd.c).
 606         * Stacking drivers (device mappers) may or may not initialize
 607         * these fields.
 608         *
 609         * Reads of this information must be protected with blk_queue_enter() /
 610         * blk_queue_exit(). Modifying this information is only allowed while
 611         * no requests are being processed. See also blk_mq_freeze_queue() and
 612         * blk_mq_unfreeze_queue().
 613         */
 614        unsigned int            nr_zones;
 615        unsigned long           *seq_zones_bitmap;
 616        unsigned long           *seq_zones_wlock;
 617
 618        /*
 619         * sg stuff
 620         */
 621        unsigned int            sg_timeout;
 622        unsigned int            sg_reserved_size;
 623        int                     node;
 624#ifdef CONFIG_BLK_DEV_IO_TRACE
 625        struct blk_trace        *blk_trace;
 626        struct mutex            blk_trace_mutex;
 627#endif
 628        /*
 629         * for flush operations
 630         */
 631        struct blk_flush_queue  *fq;
 632
 633        struct list_head        requeue_list;
 634        spinlock_t              requeue_lock;
 635        struct delayed_work     requeue_work;
 636
 637        struct mutex            sysfs_lock;
 638
 639        int                     bypass_depth;
 640        atomic_t                mq_freeze_depth;
 641
 642#if defined(CONFIG_BLK_DEV_BSG)
 643        bsg_job_fn              *bsg_job_fn;
 644        struct bsg_class_device bsg_dev;
 645#endif
 646
 647#ifdef CONFIG_BLK_DEV_THROTTLING
 648        /* Throttle data */
 649        struct throtl_data *td;
 650#endif
 651        struct rcu_head         rcu_head;
 652        wait_queue_head_t       mq_freeze_wq;
 653        struct percpu_ref       q_usage_counter;
 654        struct list_head        all_q_node;
 655
 656        struct blk_mq_tag_set   *tag_set;
 657        struct list_head        tag_set_list;
 658        struct bio_set          *bio_split;
 659
 660#ifdef CONFIG_BLK_DEBUG_FS
 661        struct dentry           *debugfs_dir;
 662        struct dentry           *sched_debugfs_dir;
 663#endif
 664
 665        bool                    mq_sysfs_init_done;
 666
 667        size_t                  cmd_size;
 668        void                    *rq_alloc_data;
 669
 670        struct work_struct      release_work;
 671
 672#define BLK_MAX_WRITE_HINTS     5
 673        u64                     write_hints[BLK_MAX_WRITE_HINTS];
 674};
 675
 676#define QUEUE_FLAG_QUEUED       0       /* uses generic tag queueing */
 677#define QUEUE_FLAG_STOPPED      1       /* queue is stopped */
 678#define QUEUE_FLAG_DYING        2       /* queue being torn down */
 679#define QUEUE_FLAG_BYPASS       3       /* act as dumb FIFO queue */
 680#define QUEUE_FLAG_BIDI         4       /* queue supports bidi requests */
 681#define QUEUE_FLAG_NOMERGES     5       /* disable merge attempts */
 682#define QUEUE_FLAG_SAME_COMP    6       /* complete on same CPU-group */
 683#define QUEUE_FLAG_FAIL_IO      7       /* fake timeout */
 684#define QUEUE_FLAG_NONROT       9       /* non-rotational device (SSD) */
 685#define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
 686#define QUEUE_FLAG_IO_STAT     10       /* do IO stats */
 687#define QUEUE_FLAG_DISCARD     11       /* supports DISCARD */
 688#define QUEUE_FLAG_NOXMERGES   12       /* No extended merges */
 689#define QUEUE_FLAG_ADD_RANDOM  13       /* Contributes to random pool */
 690#define QUEUE_FLAG_SECERASE    14       /* supports secure erase */
 691#define QUEUE_FLAG_SAME_FORCE  15       /* force complete on same CPU */
 692#define QUEUE_FLAG_DEAD        16       /* queue tear-down finished */
 693#define QUEUE_FLAG_INIT_DONE   17       /* queue is initialized */
 694#define QUEUE_FLAG_NO_SG_MERGE 18       /* don't attempt to merge SG segments*/
 695#define QUEUE_FLAG_POLL        19       /* IO polling enabled if set */
 696#define QUEUE_FLAG_WC          20       /* Write back caching */
 697#define QUEUE_FLAG_FUA         21       /* device supports FUA writes */
 698#define QUEUE_FLAG_FLUSH_NQ    22       /* flush not queueuable */
 699#define QUEUE_FLAG_DAX         23       /* device supports DAX */
 700#define QUEUE_FLAG_STATS       24       /* track rq completion times */
 701#define QUEUE_FLAG_POLL_STATS  25       /* collecting stats for hybrid polling */
 702#define QUEUE_FLAG_REGISTERED  26       /* queue has been registered to a disk */
 703#define QUEUE_FLAG_SCSI_PASSTHROUGH 27  /* queue supports SCSI commands */
 704#define QUEUE_FLAG_QUIESCED    28       /* queue has been quiesced */
 705#define QUEUE_FLAG_PREEMPT_ONLY 29      /* only process REQ_PREEMPT requests */
 706
 707#define QUEUE_FLAG_DEFAULT      ((1 << QUEUE_FLAG_IO_STAT) |            \
 708                                 (1 << QUEUE_FLAG_SAME_COMP)    |       \
 709                                 (1 << QUEUE_FLAG_ADD_RANDOM))
 710
 711#define QUEUE_FLAG_MQ_DEFAULT   ((1 << QUEUE_FLAG_IO_STAT) |            \
 712                                 (1 << QUEUE_FLAG_SAME_COMP)    |       \
 713                                 (1 << QUEUE_FLAG_POLL))
 714
 715void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
 716void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
 717bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
 718bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
 719
 720#define blk_queue_tagged(q)     test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 721#define blk_queue_stopped(q)    test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 722#define blk_queue_dying(q)      test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
 723#define blk_queue_dead(q)       test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
 724#define blk_queue_bypass(q)     test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
 725#define blk_queue_init_done(q)  test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
 726#define blk_queue_nomerges(q)   test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 727#define blk_queue_noxmerges(q)  \
 728        test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
 729#define blk_queue_nonrot(q)     test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
 730#define blk_queue_io_stat(q)    test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
 731#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
 732#define blk_queue_discard(q)    test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
 733#define blk_queue_secure_erase(q) \
 734        (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
 735#define blk_queue_dax(q)        test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
 736#define blk_queue_scsi_passthrough(q)   \
 737        test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
 738
 739#define blk_noretry_request(rq) \
 740        ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
 741                             REQ_FAILFAST_DRIVER))
 742#define blk_queue_quiesced(q)   test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
 743#define blk_queue_preempt_only(q)                               \
 744        test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
 745#define blk_queue_fua(q)        test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
 746
 747extern int blk_set_preempt_only(struct request_queue *q);
 748extern void blk_clear_preempt_only(struct request_queue *q);
 749
 750static inline int queue_in_flight(struct request_queue *q)
 751{
 752        return q->in_flight[0] + q->in_flight[1];
 753}
 754
 755static inline bool blk_account_rq(struct request *rq)
 756{
 757        return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
 758}
 759
 760#define blk_rq_cpu_valid(rq)    ((rq)->cpu != -1)
 761#define blk_bidi_rq(rq)         ((rq)->next_rq != NULL)
 762/* rq->queuelist of dequeued request must be list_empty() */
 763#define blk_queued_rq(rq)       (!list_empty(&(rq)->queuelist))
 764
 765#define list_entry_rq(ptr)      list_entry((ptr), struct request, queuelist)
 766
 767#define rq_data_dir(rq)         (op_is_write(req_op(rq)) ? WRITE : READ)
 768
 769/*
 770 * Driver can handle struct request, if it either has an old style
 771 * request_fn defined, or is blk-mq based.
 772 */
 773static inline bool queue_is_rq_based(struct request_queue *q)
 774{
 775        return q->request_fn || q->mq_ops;
 776}
 777
 778static inline unsigned int blk_queue_cluster(struct request_queue *q)
 779{
 780        return q->limits.cluster;
 781}
 782
 783static inline enum blk_zoned_model
 784blk_queue_zoned_model(struct request_queue *q)
 785{
 786        return q->limits.zoned;
 787}
 788
 789static inline bool blk_queue_is_zoned(struct request_queue *q)
 790{
 791        switch (blk_queue_zoned_model(q)) {
 792        case BLK_ZONED_HA:
 793        case BLK_ZONED_HM:
 794                return true;
 795        default:
 796                return false;
 797        }
 798}
 799
 800static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
 801{
 802        return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
 803}
 804
 805static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
 806{
 807        return q->nr_zones;
 808}
 809
 810static inline unsigned int blk_queue_zone_no(struct request_queue *q,
 811                                             sector_t sector)
 812{
 813        if (!blk_queue_is_zoned(q))
 814                return 0;
 815        return sector >> ilog2(q->limits.chunk_sectors);
 816}
 817
 818static inline bool blk_queue_zone_is_seq(struct request_queue *q,
 819                                         sector_t sector)
 820{
 821        if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
 822                return false;
 823        return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
 824}
 825
 826static inline bool rq_is_sync(struct request *rq)
 827{
 828        return op_is_sync(rq->cmd_flags);
 829}
 830
 831static inline bool blk_rl_full(struct request_list *rl, bool sync)
 832{
 833        unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
 834
 835        return rl->flags & flag;
 836}
 837
 838static inline void blk_set_rl_full(struct request_list *rl, bool sync)
 839{
 840        unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
 841
 842        rl->flags |= flag;
 843}
 844
 845static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
 846{
 847        unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
 848
 849        rl->flags &= ~flag;
 850}
 851
 852static inline bool rq_mergeable(struct request *rq)
 853{
 854        if (blk_rq_is_passthrough(rq))
 855                return false;
 856
 857        if (req_op(rq) == REQ_OP_FLUSH)
 858                return false;
 859
 860        if (req_op(rq) == REQ_OP_WRITE_ZEROES)
 861                return false;
 862
 863        if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
 864                return false;
 865        if (rq->rq_flags & RQF_NOMERGE_FLAGS)
 866                return false;
 867
 868        return true;
 869}
 870
 871static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
 872{
 873        if (bio_page(a) == bio_page(b) &&
 874            bio_offset(a) == bio_offset(b))
 875                return true;
 876
 877        return false;
 878}
 879
 880static inline unsigned int blk_queue_depth(struct request_queue *q)
 881{
 882        if (q->queue_depth)
 883                return q->queue_depth;
 884
 885        return q->nr_requests;
 886}
 887
 888/*
 889 * q->prep_rq_fn return values
 890 */
 891enum {
 892        BLKPREP_OK,             /* serve it */
 893        BLKPREP_KILL,           /* fatal error, kill, return -EIO */
 894        BLKPREP_DEFER,          /* leave on queue */
 895        BLKPREP_INVALID,        /* invalid command, kill, return -EREMOTEIO */
 896};
 897
 898extern unsigned long blk_max_low_pfn, blk_max_pfn;
 899
 900/*
 901 * standard bounce addresses:
 902 *
 903 * BLK_BOUNCE_HIGH      : bounce all highmem pages
 904 * BLK_BOUNCE_ANY       : don't bounce anything
 905 * BLK_BOUNCE_ISA       : bounce pages above ISA DMA boundary
 906 */
 907
 908#if BITS_PER_LONG == 32
 909#define BLK_BOUNCE_HIGH         ((u64)blk_max_low_pfn << PAGE_SHIFT)
 910#else
 911#define BLK_BOUNCE_HIGH         -1ULL
 912#endif
 913#define BLK_BOUNCE_ANY          (-1ULL)
 914#define BLK_BOUNCE_ISA          (DMA_BIT_MASK(24))
 915
 916/*
 917 * default timeout for SG_IO if none specified
 918 */
 919#define BLK_DEFAULT_SG_TIMEOUT  (60 * HZ)
 920#define BLK_MIN_SG_TIMEOUT      (7 * HZ)
 921
 922struct rq_map_data {
 923        struct page **pages;
 924        int page_order;
 925        int nr_entries;
 926        unsigned long offset;
 927        int null_mapped;
 928        int from_user;
 929};
 930
 931struct req_iterator {
 932        struct bvec_iter iter;
 933        struct bio *bio;
 934};
 935
 936/* This should not be used directly - use rq_for_each_segment */
 937#define for_each_bio(_bio)              \
 938        for (; _bio; _bio = _bio->bi_next)
 939#define __rq_for_each_bio(_bio, rq)     \
 940        if ((rq->bio))                  \
 941                for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
 942
 943#define rq_for_each_segment(bvl, _rq, _iter)                    \
 944        __rq_for_each_bio(_iter.bio, _rq)                       \
 945                bio_for_each_segment(bvl, _iter.bio, _iter.iter)
 946
 947#define rq_iter_last(bvec, _iter)                               \
 948                (_iter.bio->bi_next == NULL &&                  \
 949                 bio_iter_last(bvec, _iter.iter))
 950
 951#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 952# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
 953#endif
 954#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 955extern void rq_flush_dcache_pages(struct request *rq);
 956#else
 957static inline void rq_flush_dcache_pages(struct request *rq)
 958{
 959}
 960#endif
 961
 962extern int blk_register_queue(struct gendisk *disk);
 963extern void blk_unregister_queue(struct gendisk *disk);
 964extern blk_qc_t generic_make_request(struct bio *bio);
 965extern blk_qc_t direct_make_request(struct bio *bio);
 966extern void blk_rq_init(struct request_queue *q, struct request *rq);
 967extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
 968extern void blk_put_request(struct request *);
 969extern void __blk_put_request(struct request_queue *, struct request *);
 970extern struct request *blk_get_request_flags(struct request_queue *,
 971                                             unsigned int op,
 972                                             blk_mq_req_flags_t flags);
 973extern struct request *blk_get_request(struct request_queue *, unsigned int op,
 974                                       gfp_t gfp_mask);
 975extern void blk_requeue_request(struct request_queue *, struct request *);
 976extern int blk_lld_busy(struct request_queue *q);
 977extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 978                             struct bio_set *bs, gfp_t gfp_mask,
 979                             int (*bio_ctr)(struct bio *, struct bio *, void *),
 980                             void *data);
 981extern void blk_rq_unprep_clone(struct request *rq);
 982extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
 983                                     struct request *rq);
 984extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
 985extern void blk_delay_queue(struct request_queue *, unsigned long);
 986extern void blk_queue_split(struct request_queue *, struct bio **);
 987extern void blk_recount_segments(struct request_queue *, struct bio *);
 988extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
 989extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
 990                              unsigned int, void __user *);
 991extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 992                          unsigned int, void __user *);
 993extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 994                         struct scsi_ioctl_command __user *);
 995
 996extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
 997extern void blk_queue_exit(struct request_queue *q);
 998extern void blk_start_queue(struct request_queue *q);
 999extern void blk_start_queue_async(struct request_queue *q);
1000extern void blk_stop_queue(struct request_queue *q);
1001extern void blk_sync_queue(struct request_queue *q);
1002extern void __blk_stop_queue(struct request_queue *q);
1003extern void __blk_run_queue(struct request_queue *q);
1004extern void __blk_run_queue_uncond(struct request_queue *q);
1005extern void blk_run_queue(struct request_queue *);
1006extern void blk_run_queue_async(struct request_queue *q);
1007extern int blk_rq_map_user(struct request_queue *, struct request *,
1008                           struct rq_map_data *, void __user *, unsigned long,
1009                           gfp_t);
1010extern int blk_rq_unmap_user(struct bio *);
1011extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
1012extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
1013                               struct rq_map_data *, const struct iov_iter *,
1014                               gfp_t);
1015extern void blk_execute_rq(struct request_queue *, struct gendisk *,
1016                          struct request *, int);
1017extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
1018                                  struct request *, int, rq_end_io_fn *);
1019
1020int blk_status_to_errno(blk_status_t status);
1021blk_status_t errno_to_blk_status(int errno);
1022
1023bool blk_poll(struct request_queue *q, blk_qc_t cookie);
1024
1025static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1026{
1027        return bdev->bd_disk->queue;    /* this is never NULL */
1028}
1029
1030/*
1031 * The basic unit of block I/O is a sector. It is used in a number of contexts
1032 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
1033 * bytes. Variables of type sector_t represent an offset or size that is a
1034 * multiple of 512 bytes. Hence these two constants.
1035 */
1036#ifndef SECTOR_SHIFT
1037#define SECTOR_SHIFT 9
1038#endif
1039#ifndef SECTOR_SIZE
1040#define SECTOR_SIZE (1 << SECTOR_SHIFT)
1041#endif
1042
1043/*
1044 * blk_rq_pos()                 : the current sector
1045 * blk_rq_bytes()               : bytes left in the entire request
1046 * blk_rq_cur_bytes()           : bytes left in the current segment
1047 * blk_rq_err_bytes()           : bytes left till the next error boundary
1048 * blk_rq_sectors()             : sectors left in the entire request
1049 * blk_rq_cur_sectors()         : sectors left in the current segment
1050 */
1051static inline sector_t blk_rq_pos(const struct request *rq)
1052{
1053        return rq->__sector;
1054}
1055
1056static inline unsigned int blk_rq_bytes(const struct request *rq)
1057{
1058        return rq->__data_len;
1059}
1060
1061static inline int blk_rq_cur_bytes(const struct request *rq)
1062{
1063        return rq->bio ? bio_cur_bytes(rq->bio) : 0;
1064}
1065
1066extern unsigned int blk_rq_err_bytes(const struct request *rq);
1067
1068static inline unsigned int blk_rq_sectors(const struct request *rq)
1069{
1070        return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1071}
1072
1073static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1074{
1075        return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1076}
1077
1078static inline unsigned int blk_rq_zone_no(struct request *rq)
1079{
1080        return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
1081}
1082
1083static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1084{
1085        return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
1086}
1087
1088/*
1089 * Some commands like WRITE SAME have a payload or data transfer size which
1090 * is different from the size of the request.  Any driver that supports such
1091 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1092 * calculate the data transfer size.
1093 */
1094static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1095{
1096        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1097                return rq->special_vec.bv_len;
1098        return blk_rq_bytes(rq);
1099}
1100
1101static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
1102                                                     int op)
1103{
1104        if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
1105                return min(q->limits.max_discard_sectors,
1106                           UINT_MAX >> SECTOR_SHIFT);
1107
1108        if (unlikely(op == REQ_OP_WRITE_SAME))
1109                return q->limits.max_write_same_sectors;
1110
1111        if (unlikely(op == REQ_OP_WRITE_ZEROES))
1112                return q->limits.max_write_zeroes_sectors;
1113
1114        return q->limits.max_sectors;
1115}
1116
1117/*
1118 * Return maximum size of a request at given offset. Only valid for
1119 * file system requests.
1120 */
1121static inline unsigned int blk_max_size_offset(struct request_queue *q,
1122                                               sector_t offset)
1123{
1124        if (!q->limits.chunk_sectors)
1125                return q->limits.max_sectors;
1126
1127        return q->limits.chunk_sectors -
1128                        (offset & (q->limits.chunk_sectors - 1));
1129}
1130
1131static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
1132                                                  sector_t offset)
1133{
1134        struct request_queue *q = rq->q;
1135
1136        if (blk_rq_is_passthrough(rq))
1137                return q->limits.max_hw_sectors;
1138
1139        if (!q->limits.chunk_sectors ||
1140            req_op(rq) == REQ_OP_DISCARD ||
1141            req_op(rq) == REQ_OP_SECURE_ERASE)
1142                return blk_queue_get_max_sectors(q, req_op(rq));
1143
1144        return min(blk_max_size_offset(q, offset),
1145                        blk_queue_get_max_sectors(q, req_op(rq)));
1146}
1147
1148static inline unsigned int blk_rq_count_bios(struct request *rq)
1149{
1150        unsigned int nr_bios = 0;
1151        struct bio *bio;
1152
1153        __rq_for_each_bio(bio, rq)
1154                nr_bios++;
1155
1156        return nr_bios;
1157}
1158
1159/*
1160 * Request issue related functions.
1161 */
1162extern struct request *blk_peek_request(struct request_queue *q);
1163extern void blk_start_request(struct request *rq);
1164extern struct request *blk_fetch_request(struct request_queue *q);
1165
1166void blk_steal_bios(struct bio_list *list, struct request *rq);
1167
1168/*
1169 * Request completion related functions.
1170 *
1171 * blk_update_request() completes given number of bytes and updates
1172 * the request without completing it.
1173 *
1174 * blk_end_request() and friends.  __blk_end_request() must be called
1175 * with the request queue spinlock acquired.
1176 *
1177 * Several drivers define their own end_request and call
1178 * blk_end_request() for parts of the original function.
1179 * This prevents code duplication in drivers.
1180 */
1181extern bool blk_update_request(struct request *rq, blk_status_t error,
1182                               unsigned int nr_bytes);
1183extern void blk_finish_request(struct request *rq, blk_status_t error);
1184extern bool blk_end_request(struct request *rq, blk_status_t error,
1185                            unsigned int nr_bytes);
1186extern void blk_end_request_all(struct request *rq, blk_status_t error);
1187extern bool __blk_end_request(struct request *rq, blk_status_t error,
1188                              unsigned int nr_bytes);
1189extern void __blk_end_request_all(struct request *rq, blk_status_t error);
1190extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
1191
1192extern void blk_complete_request(struct request *);
1193extern void __blk_complete_request(struct request *);
1194extern void blk_abort_request(struct request *);
1195extern void blk_unprep_request(struct request *);
1196
1197/*
1198 * Access functions for manipulating queue properties
1199 */
1200extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1201                                        spinlock_t *lock, int node_id);
1202extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
1203extern int blk_init_allocated_queue(struct request_queue *);
1204extern void blk_cleanup_queue(struct request_queue *);
1205extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1206extern void blk_queue_bounce_limit(struct request_queue *, u64);
1207extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1208extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1209extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1210extern void blk_queue_max_discard_segments(struct request_queue *,
1211                unsigned short);
1212extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1213extern void blk_queue_max_discard_sectors(struct request_queue *q,
1214                unsigned int max_discard_sectors);
1215extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1216                unsigned int max_write_same_sectors);
1217extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1218                unsigned int max_write_same_sectors);
1219extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
1220extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1221extern void blk_queue_alignment_offset(struct request_queue *q,
1222                                       unsigned int alignment);
1223extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
1224extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1225extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
1226extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1227extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1228extern void blk_set_default_limits(struct queue_limits *lim);
1229extern void blk_set_stacking_limits(struct queue_limits *lim);
1230extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1231                            sector_t offset);
1232extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1233                            sector_t offset);
1234extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1235                              sector_t offset);
1236extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1237extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
1238extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1239extern int blk_queue_dma_drain(struct request_queue *q,
1240                               dma_drain_needed_fn *dma_drain_needed,
1241                               void *buf, unsigned int size);
1242extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1243extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1244extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1245extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
1246extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
1247extern void blk_queue_dma_alignment(struct request_queue *, int);
1248extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1249extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1250extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1251extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1252extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1253extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1254
1255/*
1256 * Number of physical segments as sent to the device.
1257 *
1258 * Normally this is the number of discontiguous data segments sent by the
1259 * submitter.  But for data-less command like discard we might have no
1260 * actual data segments submitted, but the driver might have to add it's
1261 * own special payload.  In that case we still return 1 here so that this
1262 * special payload will be mapped.
1263 */
1264static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1265{
1266        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1267                return 1;
1268        return rq->nr_phys_segments;
1269}
1270
1271/*
1272 * Number of discard segments (or ranges) the driver needs to fill in.
1273 * Each discard bio merged into a request is counted as one segment.
1274 */
1275static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1276{
1277        return max_t(unsigned short, rq->nr_phys_segments, 1);
1278}
1279
1280extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1281extern void blk_dump_rq_flags(struct request *, char *);
1282extern long nr_blockdev_pages(void);
1283
1284bool __must_check blk_get_queue(struct request_queue *);
1285struct request_queue *blk_alloc_queue(gfp_t);
1286struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
1287                                           spinlock_t *lock);
1288extern void blk_put_queue(struct request_queue *);
1289extern void blk_set_queue_dying(struct request_queue *);
1290
1291/*
1292 * block layer runtime pm functions
1293 */
1294#ifdef CONFIG_PM
1295extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1296extern int blk_pre_runtime_suspend(struct request_queue *q);
1297extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1298extern void blk_pre_runtime_resume(struct request_queue *q);
1299extern void blk_post_runtime_resume(struct request_queue *q, int err);
1300extern void blk_set_runtime_active(struct request_queue *q);
1301#else
1302static inline void blk_pm_runtime_init(struct request_queue *q,
1303        struct device *dev) {}
1304static inline int blk_pre_runtime_suspend(struct request_queue *q)
1305{
1306        return -ENOSYS;
1307}
1308static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1309static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1310static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1311static inline void blk_set_runtime_active(struct request_queue *q) {}
1312#endif
1313
1314/*
1315 * blk_plug permits building a queue of related requests by holding the I/O
1316 * fragments for a short period. This allows merging of sequential requests
1317 * into single larger request. As the requests are moved from a per-task list to
1318 * the device's request_queue in a batch, this results in improved scalability
1319 * as the lock contention for request_queue lock is reduced.
1320 *
1321 * It is ok not to disable preemption when adding the request to the plug list
1322 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1323 * the plug list when the task sleeps by itself. For details, please see
1324 * schedule() where blk_schedule_flush_plug() is called.
1325 */
1326struct blk_plug {
1327        struct list_head list; /* requests */
1328        struct list_head mq_list; /* blk-mq requests */
1329        struct list_head cb_list; /* md requires an unplug callback */
1330};
1331#define BLK_MAX_REQUEST_COUNT 16
1332#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
1333
1334struct blk_plug_cb;
1335typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1336struct blk_plug_cb {
1337        struct list_head list;
1338        blk_plug_cb_fn callback;
1339        void *data;
1340};
1341extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1342                                             void *data, int size);
1343extern void blk_start_plug(struct blk_plug *);
1344extern void blk_finish_plug(struct blk_plug *);
1345extern void blk_flush_plug_list(struct blk_plug *, bool);
1346
1347static inline void blk_flush_plug(struct task_struct *tsk)
1348{
1349        struct blk_plug *plug = tsk->plug;
1350
1351        if (plug)
1352                blk_flush_plug_list(plug, false);
1353}
1354
1355static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1356{
1357        struct blk_plug *plug = tsk->plug;
1358
1359        if (plug)
1360                blk_flush_plug_list(plug, true);
1361}
1362
1363static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1364{
1365        struct blk_plug *plug = tsk->plug;
1366
1367        return plug &&
1368                (!list_empty(&plug->list) ||
1369                 !list_empty(&plug->mq_list) ||
1370                 !list_empty(&plug->cb_list));
1371}
1372
1373/*
1374 * tag stuff
1375 */
1376extern int blk_queue_start_tag(struct request_queue *, struct request *);
1377extern struct request *blk_queue_find_tag(struct request_queue *, int);
1378extern void blk_queue_end_tag(struct request_queue *, struct request *);
1379extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1380extern void blk_queue_free_tags(struct request_queue *);
1381extern int blk_queue_resize_tags(struct request_queue *, int);
1382extern void blk_queue_invalidate_tags(struct request_queue *);
1383extern struct blk_queue_tag *blk_init_tags(int, int);
1384extern void blk_free_tags(struct blk_queue_tag *);
1385
1386static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1387                                                int tag)
1388{
1389        if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1390                return NULL;
1391        return bqt->tag_index[tag];
1392}
1393
1394extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1395extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1396                sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1397
1398#define BLKDEV_DISCARD_SECURE   (1 << 0)        /* issue a secure erase */
1399
1400extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1401                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1402extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1403                sector_t nr_sects, gfp_t gfp_mask, int flags,
1404                struct bio **biop);
1405
1406#define BLKDEV_ZERO_NOUNMAP     (1 << 0)  /* do not free blocks */
1407#define BLKDEV_ZERO_NOFALLBACK  (1 << 1)  /* don't write explicit zeroes */
1408
1409extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1410                sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1411                unsigned flags);
1412extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1413                sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1414
1415static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1416                sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1417{
1418        return blkdev_issue_discard(sb->s_bdev,
1419                                    block << (sb->s_blocksize_bits -
1420                                              SECTOR_SHIFT),
1421                                    nr_blocks << (sb->s_blocksize_bits -
1422                                                  SECTOR_SHIFT),
1423                                    gfp_mask, flags);
1424}
1425static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1426                sector_t nr_blocks, gfp_t gfp_mask)
1427{
1428        return blkdev_issue_zeroout(sb->s_bdev,
1429                                    block << (sb->s_blocksize_bits -
1430                                              SECTOR_SHIFT),
1431                                    nr_blocks << (sb->s_blocksize_bits -
1432                                                  SECTOR_SHIFT),
1433                                    gfp_mask, 0);
1434}
1435
1436extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
1437
1438enum blk_default_limits {
1439        BLK_MAX_SEGMENTS        = 128,
1440        BLK_SAFE_MAX_SECTORS    = 255,
1441        BLK_DEF_MAX_SECTORS     = 2560,
1442        BLK_MAX_SEGMENT_SIZE    = 65536,
1443        BLK_SEG_BOUNDARY_MASK   = 0xFFFFFFFFUL,
1444};
1445
1446#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1447
1448static inline unsigned long queue_segment_boundary(struct request_queue *q)
1449{
1450        return q->limits.seg_boundary_mask;
1451}
1452
1453static inline unsigned long queue_virt_boundary(struct request_queue *q)
1454{
1455        return q->limits.virt_boundary_mask;
1456}
1457
1458static inline unsigned int queue_max_sectors(struct request_queue *q)
1459{
1460        return q->limits.max_sectors;
1461}
1462
1463static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1464{
1465        return q->limits.max_hw_sectors;
1466}
1467
1468static inline unsigned short queue_max_segments(struct request_queue *q)
1469{
1470        return q->limits.max_segments;
1471}
1472
1473static inline unsigned short queue_max_discard_segments(struct request_queue *q)
1474{
1475        return q->limits.max_discard_segments;
1476}
1477
1478static inline unsigned int queue_max_segment_size(struct request_queue *q)
1479{
1480        return q->limits.max_segment_size;
1481}
1482
1483static inline unsigned short queue_logical_block_size(struct request_queue *q)
1484{
1485        int retval = 512;
1486
1487        if (q && q->limits.logical_block_size)
1488                retval = q->limits.logical_block_size;
1489
1490        return retval;
1491}
1492
1493static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1494{
1495        return queue_logical_block_size(bdev_get_queue(bdev));
1496}
1497
1498static inline unsigned int queue_physical_block_size(struct request_queue *q)
1499{
1500        return q->limits.physical_block_size;
1501}
1502
1503static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1504{
1505        return queue_physical_block_size(bdev_get_queue(bdev));
1506}
1507
1508static inline unsigned int queue_io_min(struct request_queue *q)
1509{
1510        return q->limits.io_min;
1511}
1512
1513static inline int bdev_io_min(struct block_device *bdev)
1514{
1515        return queue_io_min(bdev_get_queue(bdev));
1516}
1517
1518static inline unsigned int queue_io_opt(struct request_queue *q)
1519{
1520        return q->limits.io_opt;
1521}
1522
1523static inline int bdev_io_opt(struct block_device *bdev)
1524{
1525        return queue_io_opt(bdev_get_queue(bdev));
1526}
1527
1528static inline int queue_alignment_offset(struct request_queue *q)
1529{
1530        if (q->limits.misaligned)
1531                return -1;
1532
1533        return q->limits.alignment_offset;
1534}
1535
1536static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1537{
1538        unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1539        unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
1540                << SECTOR_SHIFT;
1541
1542        return (granularity + lim->alignment_offset - alignment) % granularity;
1543}
1544
1545static inline int bdev_alignment_offset(struct block_device *bdev)
1546{
1547        struct request_queue *q = bdev_get_queue(bdev);
1548
1549        if (q->limits.misaligned)
1550                return -1;
1551
1552        if (bdev != bdev->bd_contains)
1553                return bdev->bd_part->alignment_offset;
1554
1555        return q->limits.alignment_offset;
1556}
1557
1558static inline int queue_discard_alignment(struct request_queue *q)
1559{
1560        if (q->limits.discard_misaligned)
1561                return -1;
1562
1563        return q->limits.discard_alignment;
1564}
1565
1566static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1567{
1568        unsigned int alignment, granularity, offset;
1569
1570        if (!lim->max_discard_sectors)
1571                return 0;
1572
1573        /* Why are these in bytes, not sectors? */
1574        alignment = lim->discard_alignment >> SECTOR_SHIFT;
1575        granularity = lim->discard_granularity >> SECTOR_SHIFT;
1576        if (!granularity)
1577                return 0;
1578
1579        /* Offset of the partition start in 'granularity' sectors */
1580        offset = sector_div(sector, granularity);
1581
1582        /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1583        offset = (granularity + alignment - offset) % granularity;
1584
1585        /* Turn it back into bytes, gaah */
1586        return offset << SECTOR_SHIFT;
1587}
1588
1589static inline int bdev_discard_alignment(struct block_device *bdev)
1590{
1591        struct request_queue *q = bdev_get_queue(bdev);
1592
1593        if (bdev != bdev->bd_contains)
1594                return bdev->bd_part->discard_alignment;
1595
1596        return q->limits.discard_alignment;
1597}
1598
1599static inline unsigned int bdev_write_same(struct block_device *bdev)
1600{
1601        struct request_queue *q = bdev_get_queue(bdev);
1602
1603        if (q)
1604                return q->limits.max_write_same_sectors;
1605
1606        return 0;
1607}
1608
1609static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1610{
1611        struct request_queue *q = bdev_get_queue(bdev);
1612
1613        if (q)
1614                return q->limits.max_write_zeroes_sectors;
1615
1616        return 0;
1617}
1618
1619static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1620{
1621        struct request_queue *q = bdev_get_queue(bdev);
1622
1623        if (q)
1624                return blk_queue_zoned_model(q);
1625
1626        return BLK_ZONED_NONE;
1627}
1628
1629static inline bool bdev_is_zoned(struct block_device *bdev)
1630{
1631        struct request_queue *q = bdev_get_queue(bdev);
1632
1633        if (q)
1634                return blk_queue_is_zoned(q);
1635
1636        return false;
1637}
1638
1639static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
1640{
1641        struct request_queue *q = bdev_get_queue(bdev);
1642
1643        if (q)
1644                return blk_queue_zone_sectors(q);
1645        return 0;
1646}
1647
1648static inline unsigned int bdev_nr_zones(struct block_device *bdev)
1649{
1650        struct request_queue *q = bdev_get_queue(bdev);
1651
1652        if (q)
1653                return blk_queue_nr_zones(q);
1654        return 0;
1655}
1656
1657static inline int queue_dma_alignment(struct request_queue *q)
1658{
1659        return q ? q->dma_alignment : 511;
1660}
1661
1662static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1663                                 unsigned int len)
1664{
1665        unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1666        return !(addr & alignment) && !(len & alignment);
1667}
1668
1669/* assumes size > 256 */
1670static inline unsigned int blksize_bits(unsigned int size)
1671{
1672        unsigned int bits = 8;
1673        do {
1674                bits++;
1675                size >>= 1;
1676        } while (size > 256);
1677        return bits;
1678}
1679
1680static inline unsigned int block_size(struct block_device *bdev)
1681{
1682        return bdev->bd_block_size;
1683}
1684
1685static inline bool queue_flush_queueable(struct request_queue *q)
1686{
1687        return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
1688}
1689
1690typedef struct {struct page *v;} Sector;
1691
1692unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1693
1694static inline void put_dev_sector(Sector p)
1695{
1696        put_page(p.v);
1697}
1698
1699static inline bool __bvec_gap_to_prev(struct request_queue *q,
1700                                struct bio_vec *bprv, unsigned int offset)
1701{
1702        return offset ||
1703                ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1704}
1705
1706/*
1707 * Check if adding a bio_vec after bprv with offset would create a gap in
1708 * the SG list. Most drivers don't care about this, but some do.
1709 */
1710static inline bool bvec_gap_to_prev(struct request_queue *q,
1711                                struct bio_vec *bprv, unsigned int offset)
1712{
1713        if (!queue_virt_boundary(q))
1714                return false;
1715        return __bvec_gap_to_prev(q, bprv, offset);
1716}
1717
1718/*
1719 * Check if the two bvecs from two bios can be merged to one segment.
1720 * If yes, no need to check gap between the two bios since the 1st bio
1721 * and the 1st bvec in the 2nd bio can be handled in one segment.
1722 */
1723static inline bool bios_segs_mergeable(struct request_queue *q,
1724                struct bio *prev, struct bio_vec *prev_last_bv,
1725                struct bio_vec *next_first_bv)
1726{
1727        if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
1728                return false;
1729        if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
1730                return false;
1731        if (prev->bi_seg_back_size + next_first_bv->bv_len >
1732                        queue_max_segment_size(q))
1733                return false;
1734        return true;
1735}
1736
1737static inline bool bio_will_gap(struct request_queue *q,
1738                                struct request *prev_rq,
1739                                struct bio *prev,
1740                                struct bio *next)
1741{
1742        if (bio_has_data(prev) && queue_virt_boundary(q)) {
1743                struct bio_vec pb, nb;
1744
1745                /*
1746                 * don't merge if the 1st bio starts with non-zero
1747                 * offset, otherwise it is quite difficult to respect
1748                 * sg gap limit. We work hard to merge a huge number of small
1749                 * single bios in case of mkfs.
1750                 */
1751                if (prev_rq)
1752                        bio_get_first_bvec(prev_rq->bio, &pb);
1753                else
1754                        bio_get_first_bvec(prev, &pb);
1755                if (pb.bv_offset)
1756                        return true;
1757
1758                /*
1759                 * We don't need to worry about the situation that the
1760                 * merged segment ends in unaligned virt boundary:
1761                 *
1762                 * - if 'pb' ends aligned, the merged segment ends aligned
1763                 * - if 'pb' ends unaligned, the next bio must include
1764                 *   one single bvec of 'nb', otherwise the 'nb' can't
1765                 *   merge with 'pb'
1766                 */
1767                bio_get_last_bvec(prev, &pb);
1768                bio_get_first_bvec(next, &nb);
1769
1770                if (!bios_segs_mergeable(q, prev, &pb, &nb))
1771                        return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1772        }
1773
1774        return false;
1775}
1776
1777static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1778{
1779        return bio_will_gap(req->q, req, req->biotail, bio);
1780}
1781
1782static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1783{
1784        return bio_will_gap(req->q, NULL, bio, req->bio);
1785}
1786
1787int kblockd_schedule_work(struct work_struct *work);
1788int kblockd_schedule_work_on(int cpu, struct work_struct *work);
1789int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1790
1791#ifdef CONFIG_BLK_CGROUP
1792/*
1793 * This should not be using sched_clock(). A real patch is in progress
1794 * to fix this up, until that is in place we need to disable preemption
1795 * around sched_clock() in this function and set_io_start_time_ns().
1796 */
1797static inline void set_start_time_ns(struct request *req)
1798{
1799        preempt_disable();
1800        req->start_time_ns = sched_clock();
1801        preempt_enable();
1802}
1803
1804static inline void set_io_start_time_ns(struct request *req)
1805{
1806        preempt_disable();
1807        req->io_start_time_ns = sched_clock();
1808        preempt_enable();
1809}
1810
1811static inline uint64_t rq_start_time_ns(struct request *req)
1812{
1813        return req->start_time_ns;
1814}
1815
1816static inline uint64_t rq_io_start_time_ns(struct request *req)
1817{
1818        return req->io_start_time_ns;
1819}
1820#else
1821static inline void set_start_time_ns(struct request *req) {}
1822static inline void set_io_start_time_ns(struct request *req) {}
1823static inline uint64_t rq_start_time_ns(struct request *req)
1824{
1825        return 0;
1826}
1827static inline uint64_t rq_io_start_time_ns(struct request *req)
1828{
1829        return 0;
1830}
1831#endif
1832
1833#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1834        MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1835#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1836        MODULE_ALIAS("block-major-" __stringify(major) "-*")
1837
1838#if defined(CONFIG_BLK_DEV_INTEGRITY)
1839
1840enum blk_integrity_flags {
1841        BLK_INTEGRITY_VERIFY            = 1 << 0,
1842        BLK_INTEGRITY_GENERATE          = 1 << 1,
1843        BLK_INTEGRITY_DEVICE_CAPABLE    = 1 << 2,
1844        BLK_INTEGRITY_IP_CHECKSUM       = 1 << 3,
1845};
1846
1847struct blk_integrity_iter {
1848        void                    *prot_buf;
1849        void                    *data_buf;
1850        sector_t                seed;
1851        unsigned int            data_size;
1852        unsigned short          interval;
1853        const char              *disk_name;
1854};
1855
1856typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
1857
1858struct blk_integrity_profile {
1859        integrity_processing_fn         *generate_fn;
1860        integrity_processing_fn         *verify_fn;
1861        const char                      *name;
1862};
1863
1864extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
1865extern void blk_integrity_unregister(struct gendisk *);
1866extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1867extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1868                                   struct scatterlist *);
1869extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1870extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1871                                   struct request *);
1872extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1873                                    struct bio *);
1874
1875static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1876{
1877        struct blk_integrity *bi = &disk->queue->integrity;
1878
1879        if (!bi->profile)
1880                return NULL;
1881
1882        return bi;
1883}
1884
1885static inline
1886struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1887{
1888        return blk_get_integrity(bdev->bd_disk);
1889}
1890
1891static inline bool blk_integrity_rq(struct request *rq)
1892{
1893        return rq->cmd_flags & REQ_INTEGRITY;
1894}
1895
1896static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1897                                                    unsigned int segs)
1898{
1899        q->limits.max_integrity_segments = segs;
1900}
1901
1902static inline unsigned short
1903queue_max_integrity_segments(struct request_queue *q)
1904{
1905        return q->limits.max_integrity_segments;
1906}
1907
1908static inline bool integrity_req_gap_back_merge(struct request *req,
1909                                                struct bio *next)
1910{
1911        struct bio_integrity_payload *bip = bio_integrity(req->bio);
1912        struct bio_integrity_payload *bip_next = bio_integrity(next);
1913
1914        return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1915                                bip_next->bip_vec[0].bv_offset);
1916}
1917
1918static inline bool integrity_req_gap_front_merge(struct request *req,
1919                                                 struct bio *bio)
1920{
1921        struct bio_integrity_payload *bip = bio_integrity(bio);
1922        struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1923
1924        return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1925                                bip_next->bip_vec[0].bv_offset);
1926}
1927
1928#else /* CONFIG_BLK_DEV_INTEGRITY */
1929
1930struct bio;
1931struct block_device;
1932struct gendisk;
1933struct blk_integrity;
1934
1935static inline int blk_integrity_rq(struct request *rq)
1936{
1937        return 0;
1938}
1939static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1940                                            struct bio *b)
1941{
1942        return 0;
1943}
1944static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1945                                          struct bio *b,
1946                                          struct scatterlist *s)
1947{
1948        return 0;
1949}
1950static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1951{
1952        return NULL;
1953}
1954static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1955{
1956        return NULL;
1957}
1958static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1959{
1960        return 0;
1961}
1962static inline void blk_integrity_register(struct gendisk *d,
1963                                         struct blk_integrity *b)
1964{
1965}
1966static inline void blk_integrity_unregister(struct gendisk *d)
1967{
1968}
1969static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1970                                                    unsigned int segs)
1971{
1972}
1973static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1974{
1975        return 0;
1976}
1977static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1978                                          struct request *r1,
1979                                          struct request *r2)
1980{
1981        return true;
1982}
1983static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1984                                           struct request *r,
1985                                           struct bio *b)
1986{
1987        return true;
1988}
1989
1990static inline bool integrity_req_gap_back_merge(struct request *req,
1991                                                struct bio *next)
1992{
1993        return false;
1994}
1995static inline bool integrity_req_gap_front_merge(struct request *req,
1996                                                 struct bio *bio)
1997{
1998        return false;
1999}
2000
2001#endif /* CONFIG_BLK_DEV_INTEGRITY */
2002
2003struct block_device_operations {
2004        int (*open) (struct block_device *, fmode_t);
2005        void (*release) (struct gendisk *, fmode_t);
2006        int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
2007        int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
2008        int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
2009        unsigned int (*check_events) (struct gendisk *disk,
2010                                      unsigned int clearing);
2011        /* ->media_changed() is DEPRECATED, use ->check_events() instead */
2012        int (*media_changed) (struct gendisk *);
2013        void (*unlock_native_capacity) (struct gendisk *);
2014        int (*revalidate_disk) (struct gendisk *);
2015        int (*getgeo)(struct block_device *, struct hd_geometry *);
2016        /* this callback is with swap_lock and sometimes page table lock held */
2017        void (*swap_slot_free_notify) (struct block_device *, unsigned long);
2018        struct module *owner;
2019        const struct pr_ops *pr_ops;
2020};
2021
2022extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
2023                                 unsigned long);
2024extern int bdev_read_page(struct block_device *, sector_t, struct page *);
2025extern int bdev_write_page(struct block_device *, sector_t, struct page *,
2026                                                struct writeback_control *);
2027
2028#ifdef CONFIG_BLK_DEV_ZONED
2029bool blk_req_needs_zone_write_lock(struct request *rq);
2030void __blk_req_zone_write_lock(struct request *rq);
2031void __blk_req_zone_write_unlock(struct request *rq);
2032
2033static inline void blk_req_zone_write_lock(struct request *rq)
2034{
2035        if (blk_req_needs_zone_write_lock(rq))
2036                __blk_req_zone_write_lock(rq);
2037}
2038
2039static inline void blk_req_zone_write_unlock(struct request *rq)
2040{
2041        if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
2042                __blk_req_zone_write_unlock(rq);
2043}
2044
2045static inline bool blk_req_zone_is_write_locked(struct request *rq)
2046{
2047        return rq->q->seq_zones_wlock &&
2048                test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
2049}
2050
2051static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
2052{
2053        if (!blk_req_needs_zone_write_lock(rq))
2054                return true;
2055        return !blk_req_zone_is_write_locked(rq);
2056}
2057#else
2058static inline bool blk_req_needs_zone_write_lock(struct request *rq)
2059{
2060        return false;
2061}
2062
2063static inline void blk_req_zone_write_lock(struct request *rq)
2064{
2065}
2066
2067static inline void blk_req_zone_write_unlock(struct request *rq)
2068{
2069}
2070static inline bool blk_req_zone_is_write_locked(struct request *rq)
2071{
2072        return false;
2073}
2074
2075static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
2076{
2077        return true;
2078}
2079#endif /* CONFIG_BLK_DEV_ZONED */
2080
2081#else /* CONFIG_BLOCK */
2082
2083struct block_device;
2084
2085/*
2086 * stubs for when the block layer is configured out
2087 */
2088#define buffer_heads_over_limit 0
2089
2090static inline long nr_blockdev_pages(void)
2091{
2092        return 0;
2093}
2094
2095struct blk_plug {
2096};
2097
2098static inline void blk_start_plug(struct blk_plug *plug)
2099{
2100}
2101
2102static inline void blk_finish_plug(struct blk_plug *plug)
2103{
2104}
2105
2106static inline void blk_flush_plug(struct task_struct *task)
2107{
2108}
2109
2110static inline void blk_schedule_flush_plug(struct task_struct *task)
2111{
2112}
2113
2114
2115static inline bool blk_needs_flush_plug(struct task_struct *tsk)
2116{
2117        return false;
2118}
2119
2120static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
2121                                     sector_t *error_sector)
2122{
2123        return 0;
2124}
2125
2126#endif /* CONFIG_BLOCK */
2127
2128#endif
2129