linux/block/blk.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef BLK_INTERNAL_H
   3#define BLK_INTERNAL_H
   4
   5#include <linux/idr.h>
   6#include <linux/blk-mq.h>
   7#include <xen/xen.h>
   8#include "blk-mq.h"
   9
  10/* Amount of time in which a process may batch requests */
  11#define BLK_BATCH_TIME  (HZ/50UL)
  12
  13/* Number of requests a "batching" process may submit */
  14#define BLK_BATCH_REQ   32
  15
  16/* Max future timer expiry for timeouts */
  17#define BLK_MAX_TIMEOUT         (5 * HZ)
  18
  19#ifdef CONFIG_DEBUG_FS
  20extern struct dentry *blk_debugfs_root;
  21#endif
  22
  23struct blk_flush_queue {
  24        unsigned int            flush_queue_delayed:1;
  25        unsigned int            flush_pending_idx:1;
  26        unsigned int            flush_running_idx:1;
  27        unsigned long           flush_pending_since;
  28        struct list_head        flush_queue[2];
  29        struct list_head        flush_data_in_flight;
  30        struct request          *flush_rq;
  31
  32        /*
  33         * flush_rq shares tag with this rq, both can't be active
  34         * at the same time
  35         */
  36        struct request          *orig_rq;
  37        spinlock_t              mq_flush_lock;
  38};
  39
  40extern struct kmem_cache *blk_requestq_cachep;
  41extern struct kmem_cache *request_cachep;
  42extern struct kobj_type blk_queue_ktype;
  43extern struct ida blk_queue_ida;
  44
  45/*
  46 * @q->queue_lock is set while a queue is being initialized. Since we know
  47 * that no other threads access the queue object before @q->queue_lock has
  48 * been set, it is safe to manipulate queue flags without holding the
  49 * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
  50 * blk_init_allocated_queue().
  51 */
  52static inline void queue_lockdep_assert_held(struct request_queue *q)
  53{
  54        if (q->queue_lock)
  55                lockdep_assert_held(q->queue_lock);
  56}
  57
  58static inline void queue_flag_set_unlocked(unsigned int flag,
  59                                           struct request_queue *q)
  60{
  61        if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
  62            kref_read(&q->kobj.kref))
  63                lockdep_assert_held(q->queue_lock);
  64        __set_bit(flag, &q->queue_flags);
  65}
  66
  67static inline void queue_flag_clear_unlocked(unsigned int flag,
  68                                             struct request_queue *q)
  69{
  70        if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
  71            kref_read(&q->kobj.kref))
  72                lockdep_assert_held(q->queue_lock);
  73        __clear_bit(flag, &q->queue_flags);
  74}
  75
  76static inline int queue_flag_test_and_clear(unsigned int flag,
  77                                            struct request_queue *q)
  78{
  79        queue_lockdep_assert_held(q);
  80
  81        if (test_bit(flag, &q->queue_flags)) {
  82                __clear_bit(flag, &q->queue_flags);
  83                return 1;
  84        }
  85
  86        return 0;
  87}
  88
  89static inline int queue_flag_test_and_set(unsigned int flag,
  90                                          struct request_queue *q)
  91{
  92        queue_lockdep_assert_held(q);
  93
  94        if (!test_bit(flag, &q->queue_flags)) {
  95                __set_bit(flag, &q->queue_flags);
  96                return 0;
  97        }
  98
  99        return 1;
 100}
 101
 102static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
 103{
 104        queue_lockdep_assert_held(q);
 105        __set_bit(flag, &q->queue_flags);
 106}
 107
 108static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 109{
 110        queue_lockdep_assert_held(q);
 111        __clear_bit(flag, &q->queue_flags);
 112}
 113
 114static inline struct blk_flush_queue *blk_get_flush_queue(
 115                struct request_queue *q, struct blk_mq_ctx *ctx)
 116{
 117        if (q->mq_ops)
 118                return blk_mq_map_queue(q, ctx->cpu)->fq;
 119        return q->fq;
 120}
 121
 122static inline void __blk_get_queue(struct request_queue *q)
 123{
 124        kobject_get(&q->kobj);
 125}
 126
 127struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
 128                int node, int cmd_size, gfp_t flags);
 129void blk_free_flush_queue(struct blk_flush_queue *q);
 130
 131int blk_init_rl(struct request_list *rl, struct request_queue *q,
 132                gfp_t gfp_mask);
 133void blk_exit_rl(struct request_queue *q, struct request_list *rl);
 134void blk_exit_queue(struct request_queue *q);
 135void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 136                        struct bio *bio);
 137void blk_queue_bypass_start(struct request_queue *q);
 138void blk_queue_bypass_end(struct request_queue *q);
 139void __blk_queue_free_tags(struct request_queue *q);
 140void blk_freeze_queue(struct request_queue *q);
 141
 142static inline void blk_queue_enter_live(struct request_queue *q)
 143{
 144        /*
 145         * Given that running in generic_make_request() context
 146         * guarantees that a live reference against q_usage_counter has
 147         * been established, further references under that same context
 148         * need not check that the queue has been frozen (marked dead).
 149         */
 150        percpu_ref_get(&q->q_usage_counter);
 151}
 152
 153static inline bool biovec_phys_mergeable(struct request_queue *q,
 154                struct bio_vec *vec1, struct bio_vec *vec2)
 155{
 156        unsigned long mask = queue_segment_boundary(q);
 157        phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
 158        phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
 159
 160        if (addr1 + vec1->bv_len != addr2)
 161                return false;
 162        if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
 163                return false;
 164        if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
 165                return false;
 166        return true;
 167}
 168
 169static inline bool __bvec_gap_to_prev(struct request_queue *q,
 170                struct bio_vec *bprv, unsigned int offset)
 171{
 172        return (offset & queue_virt_boundary(q)) ||
 173                ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
 174}
 175
 176/*
 177 * Check if adding a bio_vec after bprv with offset would create a gap in
 178 * the SG list. Most drivers don't care about this, but some do.
 179 */
 180static inline bool bvec_gap_to_prev(struct request_queue *q,
 181                struct bio_vec *bprv, unsigned int offset)
 182{
 183        if (!queue_virt_boundary(q))
 184                return false;
 185        return __bvec_gap_to_prev(q, bprv, offset);
 186}
 187
 188#ifdef CONFIG_BLK_DEV_INTEGRITY
 189void blk_flush_integrity(void);
 190bool __bio_integrity_endio(struct bio *);
 191static inline bool bio_integrity_endio(struct bio *bio)
 192{
 193        if (bio_integrity(bio))
 194                return __bio_integrity_endio(bio);
 195        return true;
 196}
 197
 198static inline bool integrity_req_gap_back_merge(struct request *req,
 199                struct bio *next)
 200{
 201        struct bio_integrity_payload *bip = bio_integrity(req->bio);
 202        struct bio_integrity_payload *bip_next = bio_integrity(next);
 203
 204        return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
 205                                bip_next->bip_vec[0].bv_offset);
 206}
 207
 208static inline bool integrity_req_gap_front_merge(struct request *req,
 209                struct bio *bio)
 210{
 211        struct bio_integrity_payload *bip = bio_integrity(bio);
 212        struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
 213
 214        return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
 215                                bip_next->bip_vec[0].bv_offset);
 216}
 217#else /* CONFIG_BLK_DEV_INTEGRITY */
 218static inline bool integrity_req_gap_back_merge(struct request *req,
 219                struct bio *next)
 220{
 221        return false;
 222}
 223static inline bool integrity_req_gap_front_merge(struct request *req,
 224                struct bio *bio)
 225{
 226        return false;
 227}
 228
 229static inline void blk_flush_integrity(void)
 230{
 231}
 232static inline bool bio_integrity_endio(struct bio *bio)
 233{
 234        return true;
 235}
 236#endif /* CONFIG_BLK_DEV_INTEGRITY */
 237
 238void blk_timeout_work(struct work_struct *work);
 239unsigned long blk_rq_timeout(unsigned long timeout);
 240void blk_add_timer(struct request *req);
 241void blk_delete_timer(struct request *);
 242
 243
 244bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
 245                             struct bio *bio);
 246bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 247                            struct bio *bio);
 248bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
 249                struct bio *bio);
 250bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
 251                            unsigned int *request_count,
 252                            struct request **same_queue_rq);
 253unsigned int blk_plug_queued_count(struct request_queue *q);
 254
 255void blk_account_io_start(struct request *req, bool new_io);
 256void blk_account_io_completion(struct request *req, unsigned int bytes);
 257void blk_account_io_done(struct request *req, u64 now);
 258
 259/*
 260 * EH timer and IO completion will both attempt to 'grab' the request, make
 261 * sure that only one of them succeeds. Steal the bottom bit of the
 262 * __deadline field for this.
 263 */
 264static inline int blk_mark_rq_complete(struct request *rq)
 265{
 266        return test_and_set_bit(0, &rq->__deadline);
 267}
 268
 269static inline void blk_clear_rq_complete(struct request *rq)
 270{
 271        clear_bit(0, &rq->__deadline);
 272}
 273
 274static inline bool blk_rq_is_complete(struct request *rq)
 275{
 276        return test_bit(0, &rq->__deadline);
 277}
 278
 279/*
 280 * Internal elevator interface
 281 */
 282#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
 283
 284void blk_insert_flush(struct request *rq);
 285
 286static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
 287{
 288        struct elevator_queue *e = q->elevator;
 289
 290        if (e->type->ops.sq.elevator_activate_req_fn)
 291                e->type->ops.sq.elevator_activate_req_fn(q, rq);
 292}
 293
 294static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
 295{
 296        struct elevator_queue *e = q->elevator;
 297
 298        if (e->type->ops.sq.elevator_deactivate_req_fn)
 299                e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
 300}
 301
 302int elevator_init(struct request_queue *);
 303int elevator_init_mq(struct request_queue *q);
 304int elevator_switch_mq(struct request_queue *q,
 305                              struct elevator_type *new_e);
 306void elevator_exit(struct request_queue *, struct elevator_queue *);
 307int elv_register_queue(struct request_queue *q);
 308void elv_unregister_queue(struct request_queue *q);
 309
 310struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
 311
 312#ifdef CONFIG_FAIL_IO_TIMEOUT
 313int blk_should_fake_timeout(struct request_queue *);
 314ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
 315ssize_t part_timeout_store(struct device *, struct device_attribute *,
 316                                const char *, size_t);
 317#else
 318static inline int blk_should_fake_timeout(struct request_queue *q)
 319{
 320        return 0;
 321}
 322#endif
 323
 324int ll_back_merge_fn(struct request_queue *q, struct request *req,
 325                     struct bio *bio);
 326int ll_front_merge_fn(struct request_queue *q, struct request *req, 
 327                      struct bio *bio);
 328struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
 329struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
 330int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 331                                struct request *next);
 332void blk_recalc_rq_segments(struct request *rq);
 333void blk_rq_set_mixed_merge(struct request *rq);
 334bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
 335enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
 336
 337void blk_queue_congestion_threshold(struct request_queue *q);
 338
 339int blk_dev_init(void);
 340
 341
 342/*
 343 * Return the threshold (number of used requests) at which the queue is
 344 * considered to be congested.  It include a little hysteresis to keep the
 345 * context switch rate down.
 346 */
 347static inline int queue_congestion_on_threshold(struct request_queue *q)
 348{
 349        return q->nr_congestion_on;
 350}
 351
 352/*
 353 * The threshold at which a queue is considered to be uncongested
 354 */
 355static inline int queue_congestion_off_threshold(struct request_queue *q)
 356{
 357        return q->nr_congestion_off;
 358}
 359
 360extern int blk_update_nr_requests(struct request_queue *, unsigned int);
 361
 362/*
 363 * Contribute to IO statistics IFF:
 364 *
 365 *      a) it's attached to a gendisk, and
 366 *      b) the queue had IO stats enabled when this request was started, and
 367 *      c) it's a file system request
 368 */
 369static inline bool blk_do_io_stat(struct request *rq)
 370{
 371        return rq->rq_disk &&
 372               (rq->rq_flags & RQF_IO_STAT) &&
 373                !blk_rq_is_passthrough(rq);
 374}
 375
 376static inline void req_set_nomerge(struct request_queue *q, struct request *req)
 377{
 378        req->cmd_flags |= REQ_NOMERGE;
 379        if (req == q->last_merge)
 380                q->last_merge = NULL;
 381}
 382
 383/*
 384 * Steal a bit from this field for legacy IO path atomic IO marking. Note that
 385 * setting the deadline clears the bottom bit, potentially clearing the
 386 * completed bit. The user has to be OK with this (current ones are fine).
 387 */
 388static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
 389{
 390        rq->__deadline = time & ~0x1UL;
 391}
 392
 393static inline unsigned long blk_rq_deadline(struct request *rq)
 394{
 395        return rq->__deadline & ~0x1UL;
 396}
 397
 398/*
 399 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
 400 * is defined as 'unsigned int', meantime it has to aligned to with logical
 401 * block size which is the minimum accepted unit by hardware.
 402 */
 403static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
 404{
 405        return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
 406}
 407
 408/*
 409 * Internal io_context interface
 410 */
 411void get_io_context(struct io_context *ioc);
 412struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
 413struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
 414                             gfp_t gfp_mask);
 415void ioc_clear_queue(struct request_queue *q);
 416
 417int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
 418
 419/**
 420 * rq_ioc - determine io_context for request allocation
 421 * @bio: request being allocated is for this bio (can be %NULL)
 422 *
 423 * Determine io_context to use for request allocation for @bio.  May return
 424 * %NULL if %current->io_context doesn't exist.
 425 */
 426static inline struct io_context *rq_ioc(struct bio *bio)
 427{
 428#ifdef CONFIG_BLK_CGROUP
 429        if (bio && bio->bi_ioc)
 430                return bio->bi_ioc;
 431#endif
 432        return current->io_context;
 433}
 434
 435/**
 436 * create_io_context - try to create task->io_context
 437 * @gfp_mask: allocation mask
 438 * @node: allocation node
 439 *
 440 * If %current->io_context is %NULL, allocate a new io_context and install
 441 * it.  Returns the current %current->io_context which may be %NULL if
 442 * allocation failed.
 443 *
 444 * Note that this function can't be called with IRQ disabled because
 445 * task_lock which protects %current->io_context is IRQ-unsafe.
 446 */
 447static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
 448{
 449        WARN_ON_ONCE(irqs_disabled());
 450        if (unlikely(!current->io_context))
 451                create_task_io_context(current, gfp_mask, node);
 452        return current->io_context;
 453}
 454
 455/*
 456 * Internal throttling interface
 457 */
 458#ifdef CONFIG_BLK_DEV_THROTTLING
 459extern void blk_throtl_drain(struct request_queue *q);
 460extern int blk_throtl_init(struct request_queue *q);
 461extern void blk_throtl_exit(struct request_queue *q);
 462extern void blk_throtl_register_queue(struct request_queue *q);
 463#else /* CONFIG_BLK_DEV_THROTTLING */
 464static inline void blk_throtl_drain(struct request_queue *q) { }
 465static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 466static inline void blk_throtl_exit(struct request_queue *q) { }
 467static inline void blk_throtl_register_queue(struct request_queue *q) { }
 468#endif /* CONFIG_BLK_DEV_THROTTLING */
 469#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 470extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
 471extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
 472        const char *page, size_t count);
 473extern void blk_throtl_bio_endio(struct bio *bio);
 474extern void blk_throtl_stat_add(struct request *rq, u64 time);
 475#else
 476static inline void blk_throtl_bio_endio(struct bio *bio) { }
 477static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
 478#endif
 479
 480#ifdef CONFIG_BOUNCE
 481extern int init_emergency_isa_pool(void);
 482extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
 483#else
 484static inline int init_emergency_isa_pool(void)
 485{
 486        return 0;
 487}
 488static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
 489{
 490}
 491#endif /* CONFIG_BOUNCE */
 492
 493extern void blk_drain_queue(struct request_queue *q);
 494
 495#ifdef CONFIG_BLK_CGROUP_IOLATENCY
 496extern int blk_iolatency_init(struct request_queue *q);
 497#else
 498static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
 499#endif
 500
 501struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
 502
 503#ifdef CONFIG_BLK_DEV_ZONED
 504void blk_queue_free_zone_bitmaps(struct request_queue *q);
 505#else
 506static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
 507#endif
 508
 509#endif /* BLK_INTERNAL_H */
 510