linux/block/blk-mq.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef INT_BLK_MQ_H
   3#define INT_BLK_MQ_H
   4
   5#include "blk-stat.h"
   6#include "blk-mq-tag.h"
   7
   8struct blk_mq_tag_set;
   9
  10struct blk_mq_ctxs {
  11        struct kobject kobj;
  12        struct blk_mq_ctx __percpu      *queue_ctx;
  13};
  14
  15/**
  16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
  17 */
  18struct blk_mq_ctx {
  19        struct {
  20                spinlock_t              lock;
  21                struct list_head        rq_lists[HCTX_MAX_TYPES];
  22        } ____cacheline_aligned_in_smp;
  23
  24        unsigned int            cpu;
  25        unsigned short          index_hw[HCTX_MAX_TYPES];
  26        struct blk_mq_hw_ctx    *hctxs[HCTX_MAX_TYPES];
  27
  28        /* incremented at dispatch time */
  29        unsigned long           rq_dispatched[2];
  30        unsigned long           rq_merged;
  31
  32        /* incremented at completion time */
  33        unsigned long           ____cacheline_aligned_in_smp rq_completed[2];
  34
  35        struct request_queue    *queue;
  36        struct blk_mq_ctxs      *ctxs;
  37        struct kobject          kobj;
  38} ____cacheline_aligned_in_smp;
  39
  40void blk_mq_exit_queue(struct request_queue *q);
  41int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
  42void blk_mq_wake_waiters(struct request_queue *q);
  43bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
  44                             unsigned int);
  45void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
  46                                bool kick_requeue_list);
  47void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
  48struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
  49                                        struct blk_mq_ctx *start);
  50void blk_mq_put_rq_ref(struct request *rq);
  51
  52/*
  53 * Internal helpers for allocating/freeing the request map
  54 */
  55void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  56                     unsigned int hctx_idx);
  57void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
  58struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
  59                                        unsigned int hctx_idx,
  60                                        unsigned int nr_tags,
  61                                        unsigned int reserved_tags,
  62                                        unsigned int flags);
  63int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  64                     unsigned int hctx_idx, unsigned int depth);
  65
  66/*
  67 * Internal helpers for request insertion into sw queues
  68 */
  69void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  70                                bool at_head);
  71void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
  72                                  bool run_queue);
  73void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
  74                                struct list_head *list);
  75
  76/* Used by blk_insert_cloned_request() to issue request directly */
  77blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
  78void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
  79                                    struct list_head *list);
  80
  81/*
  82 * CPU -> queue mappings
  83 */
  84extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
  85
  86/*
  87 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
  88 * @q: request queue
  89 * @type: the hctx type index
  90 * @cpu: CPU
  91 */
  92static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
  93                                                          enum hctx_type type,
  94                                                          unsigned int cpu)
  95{
  96        return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
  97}
  98
  99/*
 100 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
 101 * @q: request queue
 102 * @flags: request command flags
 103 * @ctx: software queue cpu ctx
 104 */
 105static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 106                                                     unsigned int flags,
 107                                                     struct blk_mq_ctx *ctx)
 108{
 109        enum hctx_type type = HCTX_TYPE_DEFAULT;
 110
 111        /*
 112         * The caller ensure that if REQ_HIPRI, poll must be enabled.
 113         */
 114        if (flags & REQ_HIPRI)
 115                type = HCTX_TYPE_POLL;
 116        else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
 117                type = HCTX_TYPE_READ;
 118        
 119        return ctx->hctxs[type];
 120}
 121
 122/*
 123 * sysfs helpers
 124 */
 125extern void blk_mq_sysfs_init(struct request_queue *q);
 126extern void blk_mq_sysfs_deinit(struct request_queue *q);
 127extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
 128extern int blk_mq_sysfs_register(struct request_queue *q);
 129extern void blk_mq_sysfs_unregister(struct request_queue *q);
 130extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
 131
 132void blk_mq_release(struct request_queue *q);
 133
 134static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
 135                                           unsigned int cpu)
 136{
 137        return per_cpu_ptr(q->queue_ctx, cpu);
 138}
 139
 140/*
 141 * This assumes per-cpu software queueing queues. They could be per-node
 142 * as well, for instance. For now this is hardcoded as-is. Note that we don't
 143 * care about preemption, since we know the ctx's are persistent. This does
 144 * mean that we can't rely on ctx always matching the currently running CPU.
 145 */
 146static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
 147{
 148        return __blk_mq_get_ctx(q, raw_smp_processor_id());
 149}
 150
 151struct blk_mq_alloc_data {
 152        /* input parameter */
 153        struct request_queue *q;
 154        blk_mq_req_flags_t flags;
 155        unsigned int shallow_depth;
 156        unsigned int cmd_flags;
 157
 158        /* input & output parameter */
 159        struct blk_mq_ctx *ctx;
 160        struct blk_mq_hw_ctx *hctx;
 161};
 162
 163static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
 164{
 165        return flags & BLK_MQ_F_TAG_HCTX_SHARED;
 166}
 167
 168static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
 169{
 170        if (data->q->elevator)
 171                return data->hctx->sched_tags;
 172
 173        return data->hctx->tags;
 174}
 175
 176static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
 177{
 178        return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
 179}
 180
 181static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
 182{
 183        return hctx->nr_ctx && hctx->tags;
 184}
 185
 186unsigned int blk_mq_in_flight(struct request_queue *q,
 187                struct block_device *part);
 188void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
 189                unsigned int inflight[2]);
 190
 191static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
 192                                              int budget_token)
 193{
 194        if (q->mq_ops->put_budget)
 195                q->mq_ops->put_budget(q, budget_token);
 196}
 197
 198static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
 199{
 200        if (q->mq_ops->get_budget)
 201                return q->mq_ops->get_budget(q);
 202        return 0;
 203}
 204
 205static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
 206{
 207        if (token < 0)
 208                return;
 209
 210        if (rq->q->mq_ops->set_rq_budget_token)
 211                rq->q->mq_ops->set_rq_budget_token(rq, token);
 212}
 213
 214static inline int blk_mq_get_rq_budget_token(struct request *rq)
 215{
 216        if (rq->q->mq_ops->get_rq_budget_token)
 217                return rq->q->mq_ops->get_rq_budget_token(rq);
 218        return -1;
 219}
 220
 221static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
 222{
 223        if (blk_mq_is_sbitmap_shared(hctx->flags))
 224                atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
 225        else
 226                atomic_inc(&hctx->nr_active);
 227}
 228
 229static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
 230{
 231        if (blk_mq_is_sbitmap_shared(hctx->flags))
 232                atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
 233        else
 234                atomic_dec(&hctx->nr_active);
 235}
 236
 237static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
 238{
 239        if (blk_mq_is_sbitmap_shared(hctx->flags))
 240                return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
 241        return atomic_read(&hctx->nr_active);
 242}
 243static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
 244                                           struct request *rq)
 245{
 246        blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
 247        rq->tag = BLK_MQ_NO_TAG;
 248
 249        if (rq->rq_flags & RQF_MQ_INFLIGHT) {
 250                rq->rq_flags &= ~RQF_MQ_INFLIGHT;
 251                __blk_mq_dec_active_requests(hctx);
 252        }
 253}
 254
 255static inline void blk_mq_put_driver_tag(struct request *rq)
 256{
 257        if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
 258                return;
 259
 260        __blk_mq_put_driver_tag(rq->mq_hctx, rq);
 261}
 262
 263bool blk_mq_get_driver_tag(struct request *rq);
 264
 265static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
 266{
 267        int cpu;
 268
 269        for_each_possible_cpu(cpu)
 270                qmap->mq_map[cpu] = 0;
 271}
 272
 273/*
 274 * blk_mq_plug() - Get caller context plug
 275 * @q: request queue
 276 * @bio : the bio being submitted by the caller context
 277 *
 278 * Plugging, by design, may delay the insertion of BIOs into the elevator in
 279 * order to increase BIO merging opportunities. This however can cause BIO
 280 * insertion order to change from the order in which submit_bio() is being
 281 * executed in the case of multiple contexts concurrently issuing BIOs to a
 282 * device, even if these context are synchronized to tightly control BIO issuing
 283 * order. While this is not a problem with regular block devices, this ordering
 284 * change can cause write BIO failures with zoned block devices as these
 285 * require sequential write patterns to zones. Prevent this from happening by
 286 * ignoring the plug state of a BIO issuing context if the target request queue
 287 * is for a zoned block device and the BIO to plug is a write operation.
 288 *
 289 * Return current->plug if the bio can be plugged and NULL otherwise
 290 */
 291static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
 292                                           struct bio *bio)
 293{
 294        /*
 295         * For regular block devices or read operations, use the context plug
 296         * which may be NULL if blk_start_plug() was not executed.
 297         */
 298        if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
 299                return current->plug;
 300
 301        /* Zoned block device write operation case: do not plug the BIO */
 302        return NULL;
 303}
 304
 305/* Free all requests on the list */
 306static inline void blk_mq_free_requests(struct list_head *list)
 307{
 308        while (!list_empty(list)) {
 309                struct request *rq = list_entry_rq(list->next);
 310
 311                list_del_init(&rq->queuelist);
 312                blk_mq_free_request(rq);
 313        }
 314}
 315
 316/*
 317 * For shared tag users, we track the number of currently active users
 318 * and attempt to provide a fair share of the tag depth for each of them.
 319 */
 320static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 321                                  struct sbitmap_queue *bt)
 322{
 323        unsigned int depth, users;
 324
 325        if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
 326                return true;
 327
 328        /*
 329         * Don't try dividing an ant
 330         */
 331        if (bt->sb.depth == 1)
 332                return true;
 333
 334        if (blk_mq_is_sbitmap_shared(hctx->flags)) {
 335                struct request_queue *q = hctx->queue;
 336                struct blk_mq_tag_set *set = q->tag_set;
 337
 338                if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
 339                        return true;
 340                users = atomic_read(&set->active_queues_shared_sbitmap);
 341        } else {
 342                if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 343                        return true;
 344                users = atomic_read(&hctx->tags->active_queues);
 345        }
 346
 347        if (!users)
 348                return true;
 349
 350        /*
 351         * Allow at least some tags
 352         */
 353        depth = max((bt->sb.depth + users - 1) / users, 4U);
 354        return __blk_mq_active_requests(hctx) < depth;
 355}
 356
 357
 358#endif
 359