linux/block/blk-mq.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef INT_BLK_MQ_H
   3#define INT_BLK_MQ_H
   4
   5#include "blk-stat.h"
   6#include "blk-mq-tag.h"
   7
   8struct blk_mq_tag_set;
   9
  10/**
  11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
  12 */
  13struct blk_mq_ctx {
  14        struct {
  15                spinlock_t              lock;
  16                struct list_head        rq_list;
  17        }  ____cacheline_aligned_in_smp;
  18
  19        unsigned int            cpu;
  20        unsigned int            index_hw;
  21
  22        /* incremented at dispatch time */
  23        unsigned long           rq_dispatched[2];
  24        unsigned long           rq_merged;
  25
  26        /* incremented at completion time */
  27        unsigned long           ____cacheline_aligned_in_smp rq_completed[2];
  28
  29        struct request_queue    *queue;
  30        struct kobject          kobj;
  31} ____cacheline_aligned_in_smp;
  32
  33/*
  34 * Bits for request->gstate.  The lower two bits carry MQ_RQ_* state value
  35 * and the upper bits the generation number.
  36 */
  37enum mq_rq_state {
  38        MQ_RQ_IDLE              = 0,
  39        MQ_RQ_IN_FLIGHT         = 1,
  40        MQ_RQ_COMPLETE          = 2,
  41
  42        MQ_RQ_STATE_BITS        = 2,
  43        MQ_RQ_STATE_MASK        = (1 << MQ_RQ_STATE_BITS) - 1,
  44        MQ_RQ_GEN_INC           = 1 << MQ_RQ_STATE_BITS,
  45};
  46
  47void blk_mq_freeze_queue(struct request_queue *q);
  48void blk_mq_free_queue(struct request_queue *q);
  49int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
  50void blk_mq_wake_waiters(struct request_queue *q);
  51bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
  52void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
  53bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
  54                                bool wait);
  55struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
  56                                        struct blk_mq_ctx *start);
  57
  58/*
  59 * Internal helpers for allocating/freeing the request map
  60 */
  61void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  62                     unsigned int hctx_idx);
  63void blk_mq_free_rq_map(struct blk_mq_tags *tags);
  64struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
  65                                        unsigned int hctx_idx,
  66                                        unsigned int nr_tags,
  67                                        unsigned int reserved_tags);
  68int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  69                     unsigned int hctx_idx, unsigned int depth);
  70
  71/*
  72 * Internal helpers for request insertion into sw queues
  73 */
  74void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  75                                bool at_head);
  76void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
  77void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
  78                                struct list_head *list);
  79
  80/* Used by blk_insert_cloned_request() to issue request directly */
  81blk_status_t blk_mq_request_issue_directly(struct request *rq);
  82
  83/*
  84 * CPU -> queue mappings
  85 */
  86extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
  87
  88static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
  89                int cpu)
  90{
  91        return q->queue_hw_ctx[q->mq_map[cpu]];
  92}
  93
  94/*
  95 * sysfs helpers
  96 */
  97extern void blk_mq_sysfs_init(struct request_queue *q);
  98extern void blk_mq_sysfs_deinit(struct request_queue *q);
  99extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
 100extern int blk_mq_sysfs_register(struct request_queue *q);
 101extern void blk_mq_sysfs_unregister(struct request_queue *q);
 102extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
 103
 104void blk_mq_release(struct request_queue *q);
 105
 106/**
 107 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
 108 * @rq: target request.
 109 */
 110static inline int blk_mq_rq_state(struct request *rq)
 111{
 112        return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK;
 113}
 114
 115/**
 116 * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
 117 * @rq: target request.
 118 * @state: new state to set.
 119 *
 120 * Set @rq's state to @state.  The caller is responsible for ensuring that
 121 * there are no other updaters.  A request can transition into IN_FLIGHT
 122 * only from IDLE and doing so increments the generation number.
 123 */
 124static inline void blk_mq_rq_update_state(struct request *rq,
 125                                          enum mq_rq_state state)
 126{
 127        u64 old_val = READ_ONCE(rq->gstate);
 128        u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state;
 129
 130        if (state == MQ_RQ_IN_FLIGHT) {
 131                WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE);
 132                new_val += MQ_RQ_GEN_INC;
 133        }
 134
 135        /* avoid exposing interim values */
 136        WRITE_ONCE(rq->gstate, new_val);
 137}
 138
 139static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
 140                                           unsigned int cpu)
 141{
 142        return per_cpu_ptr(q->queue_ctx, cpu);
 143}
 144
 145/*
 146 * This assumes per-cpu software queueing queues. They could be per-node
 147 * as well, for instance. For now this is hardcoded as-is. Note that we don't
 148 * care about preemption, since we know the ctx's are persistent. This does
 149 * mean that we can't rely on ctx always matching the currently running CPU.
 150 */
 151static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
 152{
 153        return __blk_mq_get_ctx(q, get_cpu());
 154}
 155
 156static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
 157{
 158        put_cpu();
 159}
 160
 161struct blk_mq_alloc_data {
 162        /* input parameter */
 163        struct request_queue *q;
 164        blk_mq_req_flags_t flags;
 165        unsigned int shallow_depth;
 166
 167        /* input & output parameter */
 168        struct blk_mq_ctx *ctx;
 169        struct blk_mq_hw_ctx *hctx;
 170};
 171
 172static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
 173{
 174        if (data->flags & BLK_MQ_REQ_INTERNAL)
 175                return data->hctx->sched_tags;
 176
 177        return data->hctx->tags;
 178}
 179
 180static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
 181{
 182        return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
 183}
 184
 185static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
 186{
 187        return hctx->nr_ctx && hctx->tags;
 188}
 189
 190void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
 191                      unsigned int inflight[2]);
 192void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
 193                         unsigned int inflight[2]);
 194
 195static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
 196{
 197        struct request_queue *q = hctx->queue;
 198
 199        if (q->mq_ops->put_budget)
 200                q->mq_ops->put_budget(hctx);
 201}
 202
 203static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
 204{
 205        struct request_queue *q = hctx->queue;
 206
 207        if (q->mq_ops->get_budget)
 208                return q->mq_ops->get_budget(hctx);
 209        return true;
 210}
 211
 212static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
 213                                           struct request *rq)
 214{
 215        blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
 216        rq->tag = -1;
 217
 218        if (rq->rq_flags & RQF_MQ_INFLIGHT) {
 219                rq->rq_flags &= ~RQF_MQ_INFLIGHT;
 220                atomic_dec(&hctx->nr_active);
 221        }
 222}
 223
 224static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
 225                                       struct request *rq)
 226{
 227        if (rq->tag == -1 || rq->internal_tag == -1)
 228                return;
 229
 230        __blk_mq_put_driver_tag(hctx, rq);
 231}
 232
 233static inline void blk_mq_put_driver_tag(struct request *rq)
 234{
 235        struct blk_mq_hw_ctx *hctx;
 236
 237        if (rq->tag == -1 || rq->internal_tag == -1)
 238                return;
 239
 240        hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
 241        __blk_mq_put_driver_tag(hctx, rq);
 242}
 243
 244#endif
 245