1
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include "blk-stat.h"
6
7struct blk_mq_tag_set;
8
9struct blk_mq_ctx {
10 struct {
11 spinlock_t lock;
12 struct list_head rq_list;
13 } ____cacheline_aligned_in_smp;
14
15 unsigned int cpu;
16 unsigned int index_hw;
17
18
19 unsigned long rq_dispatched[2];
20 unsigned long rq_merged;
21
22
23 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
24
25 struct request_queue *queue;
26 struct kobject kobj;
27} ____cacheline_aligned_in_smp;
28
29void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30void blk_mq_freeze_queue(struct request_queue *q);
31void blk_mq_free_queue(struct request_queue *q);
32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33void blk_mq_wake_waiters(struct request_queue *q);
34bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
38 bool wait);
39
40
41
42
43void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
44 unsigned int hctx_idx);
45void blk_mq_free_rq_map(struct blk_mq_tags *tags);
46struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
47 unsigned int hctx_idx,
48 unsigned int nr_tags,
49 unsigned int reserved_tags);
50int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
51 unsigned int hctx_idx, unsigned int depth);
52
53
54
55
56void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
57 bool at_head);
58void blk_mq_request_bypass_insert(struct request *rq);
59void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
60 struct list_head *list);
61
62
63
64
65extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
66
67static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
68 int cpu)
69{
70 return q->queue_hw_ctx[q->mq_map[cpu]];
71}
72
73
74
75
76extern void blk_mq_sysfs_init(struct request_queue *q);
77extern void blk_mq_sysfs_deinit(struct request_queue *q);
78extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
79extern int blk_mq_sysfs_register(struct request_queue *q);
80extern void blk_mq_sysfs_unregister(struct request_queue *q);
81extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
82
83extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
84
85void blk_mq_release(struct request_queue *q);
86
87static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
88 unsigned int cpu)
89{
90 return per_cpu_ptr(q->queue_ctx, cpu);
91}
92
93
94
95
96
97
98
99static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
100{
101 return __blk_mq_get_ctx(q, get_cpu());
102}
103
104static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
105{
106 put_cpu();
107}
108
109struct blk_mq_alloc_data {
110
111 struct request_queue *q;
112 unsigned int flags;
113 unsigned int shallow_depth;
114
115
116 struct blk_mq_ctx *ctx;
117 struct blk_mq_hw_ctx *hctx;
118};
119
120static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
121{
122 if (data->flags & BLK_MQ_REQ_INTERNAL)
123 return data->hctx->sched_tags;
124
125 return data->hctx->tags;
126}
127
128static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
129{
130 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
131}
132
133static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
134{
135 return hctx->nr_ctx && hctx->tags;
136}
137
138void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
139 unsigned int inflight[2]);
140
141#endif
142