linux/include/linux/blk_types.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Block data types and constants.  Directly include this file only to
   4 * break include dependency loop.
   5 */
   6#ifndef __LINUX_BLK_TYPES_H
   7#define __LINUX_BLK_TYPES_H
   8
   9#include <linux/types.h>
  10#include <linux/bvec.h>
  11#include <linux/ktime.h>
  12
  13struct bio_set;
  14struct bio;
  15struct bio_integrity_payload;
  16struct page;
  17struct block_device;
  18struct io_context;
  19struct cgroup_subsys_state;
  20typedef void (bio_end_io_t) (struct bio *);
  21
  22/*
  23 * Block error status values.  See block/blk-core:blk_errors for the details.
  24 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
  25 */
  26#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
  27typedef u32 __bitwise blk_status_t;
  28#else
  29typedef u8 __bitwise blk_status_t;
  30#endif
  31#define BLK_STS_OK 0
  32#define BLK_STS_NOTSUPP         ((__force blk_status_t)1)
  33#define BLK_STS_TIMEOUT         ((__force blk_status_t)2)
  34#define BLK_STS_NOSPC           ((__force blk_status_t)3)
  35#define BLK_STS_TRANSPORT       ((__force blk_status_t)4)
  36#define BLK_STS_TARGET          ((__force blk_status_t)5)
  37#define BLK_STS_NEXUS           ((__force blk_status_t)6)
  38#define BLK_STS_MEDIUM          ((__force blk_status_t)7)
  39#define BLK_STS_PROTECTION      ((__force blk_status_t)8)
  40#define BLK_STS_RESOURCE        ((__force blk_status_t)9)
  41#define BLK_STS_IOERR           ((__force blk_status_t)10)
  42
  43/* hack for device mapper, don't use elsewhere: */
  44#define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
  45
  46#define BLK_STS_AGAIN           ((__force blk_status_t)12)
  47
  48/*
  49 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
  50 * device related resources are unavailable, but the driver can guarantee
  51 * that the queue will be rerun in the future once resources become
  52 * available again. This is typically the case for device specific
  53 * resources that are consumed for IO. If the driver fails allocating these
  54 * resources, we know that inflight (or pending) IO will free these
  55 * resource upon completion.
  56 *
  57 * This is different from BLK_STS_RESOURCE in that it explicitly references
  58 * a device specific resource. For resources of wider scope, allocation
  59 * failure can happen without having pending IO. This means that we can't
  60 * rely on request completions freeing these resources, as IO may not be in
  61 * flight. Examples of that are kernel memory allocations, DMA mappings, or
  62 * any other system wide resources.
  63 */
  64#define BLK_STS_DEV_RESOURCE    ((__force blk_status_t)13)
  65
  66/**
  67 * blk_path_error - returns true if error may be path related
  68 * @error: status the request was completed with
  69 *
  70 * Description:
  71 *     This classifies block error status into non-retryable errors and ones
  72 *     that may be successful if retried on a failover path.
  73 *
  74 * Return:
  75 *     %false - retrying failover path will not help
  76 *     %true  - may succeed if retried
  77 */
  78static inline bool blk_path_error(blk_status_t error)
  79{
  80        switch (error) {
  81        case BLK_STS_NOTSUPP:
  82        case BLK_STS_NOSPC:
  83        case BLK_STS_TARGET:
  84        case BLK_STS_NEXUS:
  85        case BLK_STS_MEDIUM:
  86        case BLK_STS_PROTECTION:
  87                return false;
  88        }
  89
  90        /* Anything else could be a path failure, so should be retried */
  91        return true;
  92}
  93
  94/*
  95 * From most significant bit:
  96 * 1 bit: reserved for other usage, see below
  97 * 12 bits: original size of bio
  98 * 51 bits: issue time of bio
  99 */
 100#define BIO_ISSUE_RES_BITS      1
 101#define BIO_ISSUE_SIZE_BITS     12
 102#define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
 103#define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
 104#define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
 105#define BIO_ISSUE_SIZE_MASK     \
 106        (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
 107#define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
 108
 109/* Reserved bit for blk-throtl */
 110#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
 111
 112struct bio_issue {
 113        u64 value;
 114};
 115
 116static inline u64 __bio_issue_time(u64 time)
 117{
 118        return time & BIO_ISSUE_TIME_MASK;
 119}
 120
 121static inline u64 bio_issue_time(struct bio_issue *issue)
 122{
 123        return __bio_issue_time(issue->value);
 124}
 125
 126static inline sector_t bio_issue_size(struct bio_issue *issue)
 127{
 128        return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
 129}
 130
 131static inline void bio_issue_init(struct bio_issue *issue,
 132                                       sector_t size)
 133{
 134        size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
 135        issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
 136                        (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
 137                        ((u64)size << BIO_ISSUE_SIZE_SHIFT));
 138}
 139
 140/*
 141 * main unit of I/O for the block layer and lower layers (ie drivers and
 142 * stacking drivers)
 143 */
 144struct bio {
 145        struct bio              *bi_next;       /* request queue link */
 146        struct gendisk          *bi_disk;
 147        unsigned int            bi_opf;         /* bottom bits req flags,
 148                                                 * top bits REQ_OP. Use
 149                                                 * accessors.
 150                                                 */
 151        unsigned short          bi_flags;       /* status, etc and bvec pool number */
 152        unsigned short          bi_ioprio;
 153        unsigned short          bi_write_hint;
 154        blk_status_t            bi_status;
 155        u8                      bi_partno;
 156
 157        /* Number of segments in this BIO after
 158         * physical address coalescing is performed.
 159         */
 160        unsigned int            bi_phys_segments;
 161
 162        struct bvec_iter        bi_iter;
 163
 164        atomic_t                __bi_remaining;
 165        bio_end_io_t            *bi_end_io;
 166
 167        void                    *bi_private;
 168#ifdef CONFIG_BLK_CGROUP
 169        /*
 170         * Represents the association of the css and request_queue for the bio.
 171         * If a bio goes direct to device, it will not have a blkg as it will
 172         * not have a request_queue associated with it.  The reference is put
 173         * on release of the bio.
 174         */
 175        struct blkcg_gq         *bi_blkg;
 176        struct bio_issue        bi_issue;
 177#endif
 178        union {
 179#if defined(CONFIG_BLK_DEV_INTEGRITY)
 180                struct bio_integrity_payload *bi_integrity; /* data integrity */
 181#endif
 182        };
 183
 184        unsigned short          bi_vcnt;        /* how many bio_vec's */
 185
 186        /*
 187         * Everything starting with bi_max_vecs will be preserved by bio_reset()
 188         */
 189
 190        unsigned short          bi_max_vecs;    /* max bvl_vecs we can hold */
 191
 192        atomic_t                __bi_cnt;       /* pin count */
 193
 194        struct bio_vec          *bi_io_vec;     /* the actual vec list */
 195
 196        struct bio_set          *bi_pool;
 197
 198        /*
 199         * We can inline a number of vecs at the end of the bio, to avoid
 200         * double allocations for a small number of bio_vecs. This member
 201         * MUST obviously be kept at the very end of the bio.
 202         */
 203        struct bio_vec          bi_inline_vecs[0];
 204};
 205
 206#define BIO_RESET_BYTES         offsetof(struct bio, bi_max_vecs)
 207
 208/*
 209 * bio flags
 210 */
 211enum {
 212        BIO_NO_PAGE_REF,        /* don't put release vec pages */
 213        BIO_SEG_VALID,          /* bi_phys_segments valid */
 214        BIO_CLONED,             /* doesn't own data */
 215        BIO_BOUNCED,            /* bio is a bounce bio */
 216        BIO_USER_MAPPED,        /* contains user pages */
 217        BIO_NULL_MAPPED,        /* contains invalid user pages */
 218        BIO_QUIET,              /* Make BIO Quiet */
 219        BIO_CHAIN,              /* chained bio, ->bi_remaining in effect */
 220        BIO_REFFED,             /* bio has elevated ->bi_cnt */
 221        BIO_THROTTLED,          /* This bio has already been subjected to
 222                                 * throttling rules. Don't do it again. */
 223        BIO_TRACE_COMPLETION,   /* bio_endio() should trace the final completion
 224                                 * of this bio. */
 225        BIO_QUEUE_ENTERED,      /* can use blk_queue_enter_live() */
 226        BIO_TRACKED,            /* set if bio goes through the rq_qos path */
 227        BIO_FLAG_LAST
 228};
 229
 230/* See BVEC_POOL_OFFSET below before adding new flags */
 231
 232/*
 233 * We support 6 different bvec pools, the last one is magic in that it
 234 * is backed by a mempool.
 235 */
 236#define BVEC_POOL_NR            6
 237#define BVEC_POOL_MAX           (BVEC_POOL_NR - 1)
 238
 239/*
 240 * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
 241 * 1 to the actual index so that 0 indicates that there are no bvecs to be
 242 * freed.
 243 */
 244#define BVEC_POOL_BITS          (3)
 245#define BVEC_POOL_OFFSET        (16 - BVEC_POOL_BITS)
 246#define BVEC_POOL_IDX(bio)      ((bio)->bi_flags >> BVEC_POOL_OFFSET)
 247#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
 248# error "BVEC_POOL_BITS is too small"
 249#endif
 250
 251/*
 252 * Flags starting here get preserved by bio_reset() - this includes
 253 * only BVEC_POOL_IDX()
 254 */
 255#define BIO_RESET_BITS  BVEC_POOL_OFFSET
 256
 257typedef __u32 __bitwise blk_mq_req_flags_t;
 258
 259/*
 260 * Operations and flags common to the bio and request structures.
 261 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
 262 *
 263 * The least significant bit of the operation number indicates the data
 264 * transfer direction:
 265 *
 266 *   - if the least significant bit is set transfers are TO the device
 267 *   - if the least significant bit is not set transfers are FROM the device
 268 *
 269 * If a operation does not transfer data the least significant bit has no
 270 * meaning.
 271 */
 272#define REQ_OP_BITS     8
 273#define REQ_OP_MASK     ((1 << REQ_OP_BITS) - 1)
 274#define REQ_FLAG_BITS   24
 275
 276enum req_opf {
 277        /* read sectors from the device */
 278        REQ_OP_READ             = 0,
 279        /* write sectors to the device */
 280        REQ_OP_WRITE            = 1,
 281        /* flush the volatile write cache */
 282        REQ_OP_FLUSH            = 2,
 283        /* discard sectors */
 284        REQ_OP_DISCARD          = 3,
 285        /* securely erase sectors */
 286        REQ_OP_SECURE_ERASE     = 5,
 287        /* reset a zone write pointer */
 288        REQ_OP_ZONE_RESET       = 6,
 289        /* write the same sector many times */
 290        REQ_OP_WRITE_SAME       = 7,
 291        /* write the zero filled sector many times */
 292        REQ_OP_WRITE_ZEROES     = 9,
 293
 294        /* SCSI passthrough using struct scsi_request */
 295        REQ_OP_SCSI_IN          = 32,
 296        REQ_OP_SCSI_OUT         = 33,
 297        /* Driver private requests */
 298        REQ_OP_DRV_IN           = 34,
 299        REQ_OP_DRV_OUT          = 35,
 300
 301        REQ_OP_LAST,
 302};
 303
 304enum req_flag_bits {
 305        __REQ_FAILFAST_DEV =    /* no driver retries of device errors */
 306                REQ_OP_BITS,
 307        __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
 308        __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
 309        __REQ_SYNC,             /* request is sync (sync write or read) */
 310        __REQ_META,             /* metadata io request */
 311        __REQ_PRIO,             /* boost priority in cfq */
 312        __REQ_NOMERGE,          /* don't touch this for merging */
 313        __REQ_IDLE,             /* anticipate more IO after this one */
 314        __REQ_INTEGRITY,        /* I/O includes block integrity payload */
 315        __REQ_FUA,              /* forced unit access */
 316        __REQ_PREFLUSH,         /* request for cache flush */
 317        __REQ_RAHEAD,           /* read ahead, can fail anytime */
 318        __REQ_BACKGROUND,       /* background IO */
 319        __REQ_NOWAIT,           /* Don't wait if request will block */
 320
 321        /* command specific flags for REQ_OP_WRITE_ZEROES: */
 322        __REQ_NOUNMAP,          /* do not free blocks when zeroing */
 323
 324        __REQ_HIPRI,
 325
 326        /* for driver use */
 327        __REQ_DRV,
 328        __REQ_SWAP,             /* swapping request. */
 329        __REQ_NR_BITS,          /* stops here */
 330};
 331
 332#define REQ_FAILFAST_DEV        (1ULL << __REQ_FAILFAST_DEV)
 333#define REQ_FAILFAST_TRANSPORT  (1ULL << __REQ_FAILFAST_TRANSPORT)
 334#define REQ_FAILFAST_DRIVER     (1ULL << __REQ_FAILFAST_DRIVER)
 335#define REQ_SYNC                (1ULL << __REQ_SYNC)
 336#define REQ_META                (1ULL << __REQ_META)
 337#define REQ_PRIO                (1ULL << __REQ_PRIO)
 338#define REQ_NOMERGE             (1ULL << __REQ_NOMERGE)
 339#define REQ_IDLE                (1ULL << __REQ_IDLE)
 340#define REQ_INTEGRITY           (1ULL << __REQ_INTEGRITY)
 341#define REQ_FUA                 (1ULL << __REQ_FUA)
 342#define REQ_PREFLUSH            (1ULL << __REQ_PREFLUSH)
 343#define REQ_RAHEAD              (1ULL << __REQ_RAHEAD)
 344#define REQ_BACKGROUND          (1ULL << __REQ_BACKGROUND)
 345#define REQ_NOWAIT              (1ULL << __REQ_NOWAIT)
 346#define REQ_NOUNMAP             (1ULL << __REQ_NOUNMAP)
 347#define REQ_HIPRI               (1ULL << __REQ_HIPRI)
 348
 349#define REQ_DRV                 (1ULL << __REQ_DRV)
 350#define REQ_SWAP                (1ULL << __REQ_SWAP)
 351
 352#define REQ_FAILFAST_MASK \
 353        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 354
 355#define REQ_NOMERGE_FLAGS \
 356        (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
 357
 358enum stat_group {
 359        STAT_READ,
 360        STAT_WRITE,
 361        STAT_DISCARD,
 362
 363        NR_STAT_GROUPS
 364};
 365
 366#define bio_op(bio) \
 367        ((bio)->bi_opf & REQ_OP_MASK)
 368#define req_op(req) \
 369        ((req)->cmd_flags & REQ_OP_MASK)
 370
 371/* obsolete, don't use in new code */
 372static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
 373                unsigned op_flags)
 374{
 375        bio->bi_opf = op | op_flags;
 376}
 377
 378static inline bool op_is_write(unsigned int op)
 379{
 380        return (op & 1);
 381}
 382
 383/*
 384 * Check if the bio or request is one that needs special treatment in the
 385 * flush state machine.
 386 */
 387static inline bool op_is_flush(unsigned int op)
 388{
 389        return op & (REQ_FUA | REQ_PREFLUSH);
 390}
 391
 392/*
 393 * Reads are always treated as synchronous, as are requests with the FUA or
 394 * PREFLUSH flag.  Other operations may be marked as synchronous using the
 395 * REQ_SYNC flag.
 396 */
 397static inline bool op_is_sync(unsigned int op)
 398{
 399        return (op & REQ_OP_MASK) == REQ_OP_READ ||
 400                (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
 401}
 402
 403static inline bool op_is_discard(unsigned int op)
 404{
 405        return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
 406}
 407
 408static inline int op_stat_group(unsigned int op)
 409{
 410        if (op_is_discard(op))
 411                return STAT_DISCARD;
 412        return op_is_write(op);
 413}
 414
 415typedef unsigned int blk_qc_t;
 416#define BLK_QC_T_NONE           -1U
 417#define BLK_QC_T_SHIFT          16
 418#define BLK_QC_T_INTERNAL       (1U << 31)
 419
 420static inline bool blk_qc_t_valid(blk_qc_t cookie)
 421{
 422        return cookie != BLK_QC_T_NONE;
 423}
 424
 425static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
 426{
 427        return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
 428}
 429
 430static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
 431{
 432        return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
 433}
 434
 435static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
 436{
 437        return (cookie & BLK_QC_T_INTERNAL) != 0;
 438}
 439
 440struct blk_rq_stat {
 441        u64 mean;
 442        u64 min;
 443        u64 max;
 444        u32 nr_samples;
 445        u64 batch;
 446};
 447
 448#endif /* __LINUX_BLK_TYPES_H */
 449