1#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
4#include <linux/rh_kabi.h>
5
6#include "blk-stat.h"
7#include "blk-mq-tag.h"
8
9struct blk_mq_tag_set;
10
11struct blk_mq_ctx {
12 struct {
13 spinlock_t lock;
14 struct list_head rq_list;
15 } ____cacheline_aligned_in_smp;
16
17 unsigned int cpu;
18 unsigned int index_hw;
19
20 RH_KABI_DEPRECATE(unsigned int, ipi_redirect)
21
22
23 unsigned long rq_dispatched[2];
24 unsigned long rq_merged;
25
26
27 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
28
29 struct request_queue *queue;
30 struct kobject kobj;
31} ____cacheline_aligned_in_smp;
32
33void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
34void blk_mq_freeze_queue(struct request_queue *q);
35void blk_mq_free_queue(struct request_queue *q);
36int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
37void blk_mq_wake_waiters(struct request_queue *q);
38bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
39void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
40bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
41bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
42 bool wait);
43struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
44 struct blk_mq_ctx *start);
45
46
47
48
49void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
50 unsigned int hctx_idx);
51void blk_mq_free_rq_map(struct blk_mq_tags *tags);
52struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
53 unsigned int hctx_idx,
54 unsigned int nr_tags,
55 unsigned int reserved_tags);
56int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
57 unsigned int hctx_idx, unsigned int depth);
58
59
60
61
62void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
63 bool at_head);
64void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
65void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
66 struct list_head *list);
67
68
69
70struct blk_mq_cpu_notifier;
71void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
72 int (*fn)(void *, unsigned long, unsigned int),
73 void *data);
74void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
75void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
76void blk_mq_cpu_init(void);
77void blk_mq_enable_hotplug(void);
78void blk_mq_disable_hotplug(void);
79
80
81
82
83int blk_mq_map_queues(struct blk_mq_tag_set *set);
84extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
85
86static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
87 int cpu)
88{
89 return q->queue_hw_ctx[q->mq_map[cpu]];
90}
91
92
93
94
95extern int blk_mq_sysfs_register(struct request_queue *q);
96extern void blk_mq_sysfs_unregister(struct request_queue *q);
97extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
98extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
99
100extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
101
102void blk_mq_release(struct request_queue *q);
103
104static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
105 unsigned int cpu)
106{
107 return per_cpu_ptr(q->queue_ctx, cpu);
108}
109
110
111
112
113
114
115
116static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
117{
118 return __blk_mq_get_ctx(q, get_cpu());
119}
120
121static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
122{
123 put_cpu();
124}
125
126struct blk_mq_alloc_data {
127
128 struct request_queue *q;
129 unsigned int flags;
130 unsigned int shallow_depth;
131
132
133 struct blk_mq_ctx *ctx;
134 struct blk_mq_hw_ctx *hctx;
135};
136
137static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
138{
139 if (data->flags & BLK_MQ_REQ_INTERNAL)
140 return data->hctx->sched_tags;
141
142 return data->hctx->tags;
143}
144
145
146
147
148void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
149 struct request *rq, unsigned int op);
150void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
151 struct request *rq);
152void blk_mq_finish_request(struct request *rq);
153struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw);
154
155static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
156{
157 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
158}
159
160static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
161{
162 return hctx->nr_ctx && hctx->tags;
163}
164
165static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
166{
167 struct request_queue *q = hctx->queue;
168
169 if (q->mq_ops->aux_ops && q->mq_ops->aux_ops->put_budget)
170 q->mq_ops->aux_ops->put_budget(hctx);
171}
172
173static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
174{
175 struct request_queue *q = hctx->queue;
176
177 if (q->mq_ops->aux_ops && q->mq_ops->aux_ops->get_budget)
178 return q->mq_ops->aux_ops->get_budget(hctx);
179 return true;
180}
181
182static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
183 struct request *rq)
184{
185 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
186 rq->tag = -1;
187
188 if (rq->cmd_flags & REQ_MQ_INFLIGHT) {
189 rq->cmd_flags &= ~REQ_MQ_INFLIGHT;
190 atomic_dec(&hctx->nr_active);
191 }
192}
193
194static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
195 struct request *rq)
196{
197 if (rq->tag == -1 || rq_aux(rq)->internal_tag == -1)
198 return;
199
200 __blk_mq_put_driver_tag(hctx, rq);
201}
202
203static inline void blk_mq_put_driver_tag(struct request *rq)
204{
205 struct blk_mq_hw_ctx *hctx;
206
207 if (rq->tag == -1 || rq_aux(rq)->internal_tag == -1)
208 return;
209
210 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
211 __blk_mq_put_driver_tag(hctx, rq);
212}
213
214#endif
215