linux/include/linux/blk_types.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Block data types and constants.  Directly include this file only to
   4 * break include dependency loop.
   5 */
   6#ifndef __LINUX_BLK_TYPES_H
   7#define __LINUX_BLK_TYPES_H
   8
   9#include <linux/types.h>
  10#include <linux/bvec.h>
  11
  12struct bio_set;
  13struct bio;
  14struct bio_integrity_payload;
  15struct page;
  16struct block_device;
  17struct io_context;
  18struct cgroup_subsys_state;
  19typedef void (bio_end_io_t) (struct bio *);
  20
  21/*
  22 * Block error status values.  See block/blk-core:blk_errors for the details.
  23 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
  24 */
  25#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
  26typedef u32 __bitwise blk_status_t;
  27#else
  28typedef u8 __bitwise blk_status_t;
  29#endif
  30#define BLK_STS_OK 0
  31#define BLK_STS_NOTSUPP         ((__force blk_status_t)1)
  32#define BLK_STS_TIMEOUT         ((__force blk_status_t)2)
  33#define BLK_STS_NOSPC           ((__force blk_status_t)3)
  34#define BLK_STS_TRANSPORT       ((__force blk_status_t)4)
  35#define BLK_STS_TARGET          ((__force blk_status_t)5)
  36#define BLK_STS_NEXUS           ((__force blk_status_t)6)
  37#define BLK_STS_MEDIUM          ((__force blk_status_t)7)
  38#define BLK_STS_PROTECTION      ((__force blk_status_t)8)
  39#define BLK_STS_RESOURCE        ((__force blk_status_t)9)
  40#define BLK_STS_IOERR           ((__force blk_status_t)10)
  41
  42/* hack for device mapper, don't use elsewhere: */
  43#define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
  44
  45#define BLK_STS_AGAIN           ((__force blk_status_t)12)
  46
  47/*
  48 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
  49 * device related resources are unavailable, but the driver can guarantee
  50 * that the queue will be rerun in the future once resources become
  51 * available again. This is typically the case for device specific
  52 * resources that are consumed for IO. If the driver fails allocating these
  53 * resources, we know that inflight (or pending) IO will free these
  54 * resource upon completion.
  55 *
  56 * This is different from BLK_STS_RESOURCE in that it explicitly references
  57 * a device specific resource. For resources of wider scope, allocation
  58 * failure can happen without having pending IO. This means that we can't
  59 * rely on request completions freeing these resources, as IO may not be in
  60 * flight. Examples of that are kernel memory allocations, DMA mappings, or
  61 * any other system wide resources.
  62 */
  63#define BLK_STS_DEV_RESOURCE    ((__force blk_status_t)13)
  64
  65/**
  66 * blk_path_error - returns true if error may be path related
  67 * @error: status the request was completed with
  68 *
  69 * Description:
  70 *     This classifies block error status into non-retryable errors and ones
  71 *     that may be successful if retried on a failover path.
  72 *
  73 * Return:
  74 *     %false - retrying failover path will not help
  75 *     %true  - may succeed if retried
  76 */
  77static inline bool blk_path_error(blk_status_t error)
  78{
  79        switch (error) {
  80        case BLK_STS_NOTSUPP:
  81        case BLK_STS_NOSPC:
  82        case BLK_STS_TARGET:
  83        case BLK_STS_NEXUS:
  84        case BLK_STS_MEDIUM:
  85        case BLK_STS_PROTECTION:
  86                return false;
  87        }
  88
  89        /* Anything else could be a path failure, so should be retried */
  90        return true;
  91}
  92
  93struct blk_issue_stat {
  94        u64 stat;
  95};
  96
  97/*
  98 * main unit of I/O for the block layer and lower layers (ie drivers and
  99 * stacking drivers)
 100 */
 101struct bio {
 102        struct bio              *bi_next;       /* request queue link */
 103        struct gendisk          *bi_disk;
 104        unsigned int            bi_opf;         /* bottom bits req flags,
 105                                                 * top bits REQ_OP. Use
 106                                                 * accessors.
 107                                                 */
 108        unsigned short          bi_flags;       /* status, etc and bvec pool number */
 109        unsigned short          bi_ioprio;
 110        unsigned short          bi_write_hint;
 111        blk_status_t            bi_status;
 112        u8                      bi_partno;
 113
 114        /* Number of segments in this BIO after
 115         * physical address coalescing is performed.
 116         */
 117        unsigned int            bi_phys_segments;
 118
 119        /*
 120         * To keep track of the max segment size, we account for the
 121         * sizes of the first and last mergeable segments in this bio.
 122         */
 123        unsigned int            bi_seg_front_size;
 124        unsigned int            bi_seg_back_size;
 125
 126        struct bvec_iter        bi_iter;
 127
 128        atomic_t                __bi_remaining;
 129        bio_end_io_t            *bi_end_io;
 130
 131        void                    *bi_private;
 132#ifdef CONFIG_BLK_CGROUP
 133        /*
 134         * Optional ioc and css associated with this bio.  Put on bio
 135         * release.  Read comment on top of bio_associate_current().
 136         */
 137        struct io_context       *bi_ioc;
 138        struct cgroup_subsys_state *bi_css;
 139#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 140        void                    *bi_cg_private;
 141        struct blk_issue_stat   bi_issue_stat;
 142#endif
 143#endif
 144        union {
 145#if defined(CONFIG_BLK_DEV_INTEGRITY)
 146                struct bio_integrity_payload *bi_integrity; /* data integrity */
 147#endif
 148        };
 149
 150        unsigned short          bi_vcnt;        /* how many bio_vec's */
 151
 152        /*
 153         * Everything starting with bi_max_vecs will be preserved by bio_reset()
 154         */
 155
 156        unsigned short          bi_max_vecs;    /* max bvl_vecs we can hold */
 157
 158        atomic_t                __bi_cnt;       /* pin count */
 159
 160        struct bio_vec          *bi_io_vec;     /* the actual vec list */
 161
 162        struct bio_set          *bi_pool;
 163
 164        /*
 165         * We can inline a number of vecs at the end of the bio, to avoid
 166         * double allocations for a small number of bio_vecs. This member
 167         * MUST obviously be kept at the very end of the bio.
 168         */
 169        struct bio_vec          bi_inline_vecs[0];
 170};
 171
 172#define BIO_RESET_BYTES         offsetof(struct bio, bi_max_vecs)
 173
 174/*
 175 * bio flags
 176 */
 177#define BIO_SEG_VALID   1       /* bi_phys_segments valid */
 178#define BIO_CLONED      2       /* doesn't own data */
 179#define BIO_BOUNCED     3       /* bio is a bounce bio */
 180#define BIO_USER_MAPPED 4       /* contains user pages */
 181#define BIO_NULL_MAPPED 5       /* contains invalid user pages */
 182#define BIO_QUIET       6       /* Make BIO Quiet */
 183#define BIO_CHAIN       7       /* chained bio, ->bi_remaining in effect */
 184#define BIO_REFFED      8       /* bio has elevated ->bi_cnt */
 185#define BIO_THROTTLED   9       /* This bio has already been subjected to
 186                                 * throttling rules. Don't do it again. */
 187#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
 188                                 * of this bio. */
 189/* See BVEC_POOL_OFFSET below before adding new flags */
 190
 191/*
 192 * We support 6 different bvec pools, the last one is magic in that it
 193 * is backed by a mempool.
 194 */
 195#define BVEC_POOL_NR            6
 196#define BVEC_POOL_MAX           (BVEC_POOL_NR - 1)
 197
 198/*
 199 * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
 200 * 1 to the actual index so that 0 indicates that there are no bvecs to be
 201 * freed.
 202 */
 203#define BVEC_POOL_BITS          (3)
 204#define BVEC_POOL_OFFSET        (16 - BVEC_POOL_BITS)
 205#define BVEC_POOL_IDX(bio)      ((bio)->bi_flags >> BVEC_POOL_OFFSET)
 206#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
 207# error "BVEC_POOL_BITS is too small"
 208#endif
 209
 210/*
 211 * Flags starting here get preserved by bio_reset() - this includes
 212 * only BVEC_POOL_IDX()
 213 */
 214#define BIO_RESET_BITS  BVEC_POOL_OFFSET
 215
 216typedef __u32 __bitwise blk_mq_req_flags_t;
 217
 218/*
 219 * Operations and flags common to the bio and request structures.
 220 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
 221 *
 222 * The least significant bit of the operation number indicates the data
 223 * transfer direction:
 224 *
 225 *   - if the least significant bit is set transfers are TO the device
 226 *   - if the least significant bit is not set transfers are FROM the device
 227 *
 228 * If a operation does not transfer data the least significant bit has no
 229 * meaning.
 230 */
 231#define REQ_OP_BITS     8
 232#define REQ_OP_MASK     ((1 << REQ_OP_BITS) - 1)
 233#define REQ_FLAG_BITS   24
 234
 235enum req_opf {
 236        /* read sectors from the device */
 237        REQ_OP_READ             = 0,
 238        /* write sectors to the device */
 239        REQ_OP_WRITE            = 1,
 240        /* flush the volatile write cache */
 241        REQ_OP_FLUSH            = 2,
 242        /* discard sectors */
 243        REQ_OP_DISCARD          = 3,
 244        /* get zone information */
 245        REQ_OP_ZONE_REPORT      = 4,
 246        /* securely erase sectors */
 247        REQ_OP_SECURE_ERASE     = 5,
 248        /* seset a zone write pointer */
 249        REQ_OP_ZONE_RESET       = 6,
 250        /* write the same sector many times */
 251        REQ_OP_WRITE_SAME       = 7,
 252        /* write the zero filled sector many times */
 253        REQ_OP_WRITE_ZEROES     = 9,
 254
 255        /* SCSI passthrough using struct scsi_request */
 256        REQ_OP_SCSI_IN          = 32,
 257        REQ_OP_SCSI_OUT         = 33,
 258        /* Driver private requests */
 259        REQ_OP_DRV_IN           = 34,
 260        REQ_OP_DRV_OUT          = 35,
 261
 262        REQ_OP_LAST,
 263};
 264
 265enum req_flag_bits {
 266        __REQ_FAILFAST_DEV =    /* no driver retries of device errors */
 267                REQ_OP_BITS,
 268        __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
 269        __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
 270        __REQ_SYNC,             /* request is sync (sync write or read) */
 271        __REQ_META,             /* metadata io request */
 272        __REQ_PRIO,             /* boost priority in cfq */
 273        __REQ_NOMERGE,          /* don't touch this for merging */
 274        __REQ_IDLE,             /* anticipate more IO after this one */
 275        __REQ_INTEGRITY,        /* I/O includes block integrity payload */
 276        __REQ_FUA,              /* forced unit access */
 277        __REQ_PREFLUSH,         /* request for cache flush */
 278        __REQ_RAHEAD,           /* read ahead, can fail anytime */
 279        __REQ_BACKGROUND,       /* background IO */
 280        __REQ_NOWAIT,           /* Don't wait if request will block */
 281
 282        /* command specific flags for REQ_OP_WRITE_ZEROES: */
 283        __REQ_NOUNMAP,          /* do not free blocks when zeroing */
 284
 285        /* for driver use */
 286        __REQ_DRV,
 287
 288        __REQ_NR_BITS,          /* stops here */
 289};
 290
 291#define REQ_FAILFAST_DEV        (1ULL << __REQ_FAILFAST_DEV)
 292#define REQ_FAILFAST_TRANSPORT  (1ULL << __REQ_FAILFAST_TRANSPORT)
 293#define REQ_FAILFAST_DRIVER     (1ULL << __REQ_FAILFAST_DRIVER)
 294#define REQ_SYNC                (1ULL << __REQ_SYNC)
 295#define REQ_META                (1ULL << __REQ_META)
 296#define REQ_PRIO                (1ULL << __REQ_PRIO)
 297#define REQ_NOMERGE             (1ULL << __REQ_NOMERGE)
 298#define REQ_IDLE                (1ULL << __REQ_IDLE)
 299#define REQ_INTEGRITY           (1ULL << __REQ_INTEGRITY)
 300#define REQ_FUA                 (1ULL << __REQ_FUA)
 301#define REQ_PREFLUSH            (1ULL << __REQ_PREFLUSH)
 302#define REQ_RAHEAD              (1ULL << __REQ_RAHEAD)
 303#define REQ_BACKGROUND          (1ULL << __REQ_BACKGROUND)
 304#define REQ_NOWAIT              (1ULL << __REQ_NOWAIT)
 305
 306#define REQ_NOUNMAP             (1ULL << __REQ_NOUNMAP)
 307
 308#define REQ_DRV                 (1ULL << __REQ_DRV)
 309
 310#define REQ_FAILFAST_MASK \
 311        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 312
 313#define REQ_NOMERGE_FLAGS \
 314        (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
 315
 316#define bio_op(bio) \
 317        ((bio)->bi_opf & REQ_OP_MASK)
 318#define req_op(req) \
 319        ((req)->cmd_flags & REQ_OP_MASK)
 320
 321/* obsolete, don't use in new code */
 322static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
 323                unsigned op_flags)
 324{
 325        bio->bi_opf = op | op_flags;
 326}
 327
 328static inline bool op_is_write(unsigned int op)
 329{
 330        return (op & 1);
 331}
 332
 333/*
 334 * Check if the bio or request is one that needs special treatment in the
 335 * flush state machine.
 336 */
 337static inline bool op_is_flush(unsigned int op)
 338{
 339        return op & (REQ_FUA | REQ_PREFLUSH);
 340}
 341
 342/*
 343 * Reads are always treated as synchronous, as are requests with the FUA or
 344 * PREFLUSH flag.  Other operations may be marked as synchronous using the
 345 * REQ_SYNC flag.
 346 */
 347static inline bool op_is_sync(unsigned int op)
 348{
 349        return (op & REQ_OP_MASK) == REQ_OP_READ ||
 350                (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
 351}
 352
 353typedef unsigned int blk_qc_t;
 354#define BLK_QC_T_NONE           -1U
 355#define BLK_QC_T_SHIFT          16
 356#define BLK_QC_T_INTERNAL       (1U << 31)
 357
 358static inline bool blk_qc_t_valid(blk_qc_t cookie)
 359{
 360        return cookie != BLK_QC_T_NONE;
 361}
 362
 363static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
 364                                       bool internal)
 365{
 366        blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
 367
 368        if (internal)
 369                ret |= BLK_QC_T_INTERNAL;
 370
 371        return ret;
 372}
 373
 374static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
 375{
 376        return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
 377}
 378
 379static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
 380{
 381        return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
 382}
 383
 384static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
 385{
 386        return (cookie & BLK_QC_T_INTERNAL) != 0;
 387}
 388
 389struct blk_rq_stat {
 390        u64 mean;
 391        u64 min;
 392        u64 max;
 393        u32 nr_samples;
 394        u64 batch;
 395};
 396
 397#endif /* __LINUX_BLK_TYPES_H */
 398