linux/block/blk-mq.h
<<
>>
Prefs
   1#ifndef INT_BLK_MQ_H
   2#define INT_BLK_MQ_H
   3
   4struct blk_mq_tag_set;
   5
   6struct blk_mq_ctx {
   7        struct {
   8                spinlock_t              lock;
   9                struct list_head        rq_list;
  10        }  ____cacheline_aligned_in_smp;
  11
  12        unsigned int            cpu;
  13        unsigned int            index_hw;
  14
  15        unsigned int            last_tag ____cacheline_aligned_in_smp;
  16
  17        /* incremented at dispatch time */
  18        unsigned long           rq_dispatched[2];
  19        unsigned long           rq_merged;
  20
  21        /* incremented at completion time */
  22        unsigned long           ____cacheline_aligned_in_smp rq_completed[2];
  23
  24        struct request_queue    *queue;
  25        struct kobject          kobj;
  26} ____cacheline_aligned_in_smp;
  27
  28void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
  29void blk_mq_freeze_queue(struct request_queue *q);
  30void blk_mq_free_queue(struct request_queue *q);
  31int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
  32void blk_mq_wake_waiters(struct request_queue *q);
  33
  34/*
  35 * CPU hotplug helpers
  36 */
  37struct blk_mq_cpu_notifier;
  38void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
  39                              int (*fn)(void *, unsigned long, unsigned int),
  40                              void *data);
  41void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
  42void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
  43void blk_mq_cpu_init(void);
  44void blk_mq_enable_hotplug(void);
  45void blk_mq_disable_hotplug(void);
  46
  47/*
  48 * CPU -> queue mappings
  49 */
  50extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
  51extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
  52                                   const struct cpumask *online_mask);
  53extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
  54
  55/*
  56 * sysfs helpers
  57 */
  58extern int blk_mq_sysfs_register(struct request_queue *q);
  59extern void blk_mq_sysfs_unregister(struct request_queue *q);
  60extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
  61
  62extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
  63
  64void blk_mq_release(struct request_queue *q);
  65
  66/*
  67 * Basic implementation of sparser bitmap, allowing the user to spread
  68 * the bits over more cachelines.
  69 */
  70struct blk_align_bitmap {
  71        unsigned long word;
  72        unsigned long depth;
  73} ____cacheline_aligned_in_smp;
  74
  75static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
  76                                           unsigned int cpu)
  77{
  78        return per_cpu_ptr(q->queue_ctx, cpu);
  79}
  80
  81/*
  82 * This assumes per-cpu software queueing queues. They could be per-node
  83 * as well, for instance. For now this is hardcoded as-is. Note that we don't
  84 * care about preemption, since we know the ctx's are persistent. This does
  85 * mean that we can't rely on ctx always matching the currently running CPU.
  86 */
  87static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  88{
  89        return __blk_mq_get_ctx(q, get_cpu());
  90}
  91
  92static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
  93{
  94        put_cpu();
  95}
  96
  97struct blk_mq_alloc_data {
  98        /* input parameter */
  99        struct request_queue *q;
 100        unsigned int flags;
 101
 102        /* input & output parameter */
 103        struct blk_mq_ctx *ctx;
 104        struct blk_mq_hw_ctx *hctx;
 105};
 106
 107static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
 108                struct request_queue *q, unsigned int flags,
 109                struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
 110{
 111        data->q = q;
 112        data->flags = flags;
 113        data->ctx = ctx;
 114        data->hctx = hctx;
 115}
 116
 117static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
 118{
 119        return hctx->nr_ctx && hctx->tags;
 120}
 121
 122#endif
 123