1
2#ifndef BLK_MQ_H
3#define BLK_MQ_H
4
5#include <linux/blkdev.h>
6#include <linux/sbitmap.h>
7#include <linux/srcu.h>
8#include <linux/lockdep.h>
9
10struct blk_mq_tags;
11struct blk_flush_queue;
12
13
14
15
16
17struct blk_mq_hw_ctx {
18 struct {
19
20 spinlock_t lock;
21
22
23
24
25
26
27
28 struct list_head dispatch;
29
30
31
32
33 unsigned long state;
34 } ____cacheline_aligned_in_smp;
35
36
37
38
39 struct delayed_work run_work;
40
41 cpumask_var_t cpumask;
42
43
44
45
46 int next_cpu;
47
48
49
50
51 int next_cpu_batch;
52
53
54 unsigned long flags;
55
56
57
58
59
60 void *sched_data;
61
62
63
64 struct request_queue *queue;
65
66 struct blk_flush_queue *fq;
67
68
69
70
71
72 void *driver_data;
73
74
75
76
77
78 struct sbitmap ctx_map;
79
80
81
82
83
84 struct blk_mq_ctx *dispatch_from;
85
86
87
88
89
90 unsigned int dispatch_busy;
91
92
93 unsigned short type;
94
95 unsigned short nr_ctx;
96
97 struct blk_mq_ctx **ctxs;
98
99
100 spinlock_t dispatch_wait_lock;
101
102
103
104
105 wait_queue_entry_t dispatch_wait;
106
107
108
109
110
111 atomic_t wait_index;
112
113
114
115
116
117 struct blk_mq_tags *tags;
118
119
120
121
122
123 struct blk_mq_tags *sched_tags;
124
125
126 unsigned long queued;
127
128 unsigned long run;
129#define BLK_MQ_MAX_DISPATCH_ORDER 7
130
131 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
132
133
134 unsigned int numa_node;
135
136 unsigned int queue_num;
137
138
139
140
141
142 atomic_t nr_active;
143
144
145 struct hlist_node cpuhp_online;
146
147 struct hlist_node cpuhp_dead;
148
149 struct kobject kobj;
150
151
152 unsigned long poll_considered;
153
154 unsigned long poll_invoked;
155
156 unsigned long poll_success;
157
158#ifdef CONFIG_BLK_DEBUG_FS
159
160
161
162
163 struct dentry *debugfs_dir;
164
165 struct dentry *sched_debugfs_dir;
166#endif
167
168
169
170
171
172 struct list_head hctx_list;
173
174
175
176
177
178
179 struct srcu_struct srcu[];
180};
181
182
183
184
185
186
187
188
189
190
191
192struct blk_mq_queue_map {
193 unsigned int *mq_map;
194 unsigned int nr_queues;
195 unsigned int queue_offset;
196};
197
198
199
200
201
202
203
204
205enum hctx_type {
206 HCTX_TYPE_DEFAULT,
207 HCTX_TYPE_READ,
208 HCTX_TYPE_POLL,
209
210 HCTX_MAX_TYPES,
211};
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246struct blk_mq_tag_set {
247 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
248 unsigned int nr_maps;
249 const struct blk_mq_ops *ops;
250 unsigned int nr_hw_queues;
251 unsigned int queue_depth;
252 unsigned int reserved_tags;
253 unsigned int cmd_size;
254 int numa_node;
255 unsigned int timeout;
256 unsigned int flags;
257 void *driver_data;
258 atomic_t active_queues_shared_sbitmap;
259
260 struct sbitmap_queue __bitmap_tags;
261 struct sbitmap_queue __breserved_tags;
262 struct blk_mq_tags **tags;
263
264 struct mutex tag_list_lock;
265 struct list_head tag_list;
266};
267
268
269
270
271
272
273
274struct blk_mq_queue_data {
275 struct request *rq;
276 bool last;
277};
278
279typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
280 bool);
281typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
282
283
284
285
286
287struct blk_mq_ops {
288
289
290
291 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
292 const struct blk_mq_queue_data *);
293
294
295
296
297
298
299
300
301 void (*commit_rqs)(struct blk_mq_hw_ctx *);
302
303
304
305
306
307
308
309 int (*get_budget)(struct request_queue *);
310
311
312
313
314 void (*put_budget)(struct request_queue *, int);
315
316
317
318
319 void (*set_rq_budget_token)(struct request *, int);
320
321
322
323 int (*get_rq_budget_token)(struct request *);
324
325
326
327
328 enum blk_eh_timer_return (*timeout)(struct request *, bool);
329
330
331
332
333 int (*poll)(struct blk_mq_hw_ctx *);
334
335
336
337
338 void (*complete)(struct request *);
339
340
341
342
343
344
345 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
346
347
348
349 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
350
351
352
353
354
355
356
357
358 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
359 unsigned int, unsigned int);
360
361
362
363 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
364 unsigned int);
365
366
367
368
369 void (*initialize_rq_fn)(struct request *rq);
370
371
372
373
374
375 void (*cleanup_rq)(struct request *);
376
377
378
379
380 bool (*busy)(struct request_queue *);
381
382
383
384
385
386 int (*map_queues)(struct blk_mq_tag_set *set);
387
388#ifdef CONFIG_BLK_DEBUG_FS
389
390
391
392
393 void (*show_rq)(struct seq_file *m, struct request *rq);
394#endif
395};
396
397enum {
398 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
399 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
400
401
402
403
404 BLK_MQ_F_STACKING = 1 << 2,
405 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
406 BLK_MQ_F_BLOCKING = 1 << 5,
407
408 BLK_MQ_F_NO_SCHED = 1 << 6,
409
410
411
412
413 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
414 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
415 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
416
417 BLK_MQ_S_STOPPED = 0,
418 BLK_MQ_S_TAG_ACTIVE = 1,
419 BLK_MQ_S_SCHED_RESTART = 2,
420
421
422 BLK_MQ_S_INACTIVE = 3,
423
424 BLK_MQ_MAX_DEPTH = 10240,
425
426 BLK_MQ_CPU_WORK_BATCH = 8,
427};
428#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
429 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
430 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
431#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
432 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
433 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
434
435struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
436 struct lock_class_key *lkclass);
437#define blk_mq_alloc_disk(set, queuedata) \
438({ \
439 static struct lock_class_key __key; \
440 \
441 __blk_mq_alloc_disk(set, queuedata, &__key); \
442})
443struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
444int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
445 struct request_queue *q);
446void blk_mq_unregister_dev(struct device *, struct request_queue *);
447
448int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
449int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
450 const struct blk_mq_ops *ops, unsigned int queue_depth,
451 unsigned int set_flags);
452void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
453
454void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
455
456void blk_mq_free_request(struct request *rq);
457
458bool blk_mq_queue_inflight(struct request_queue *q);
459
460enum {
461
462 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
463
464 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
465
466 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
467};
468
469struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
470 blk_mq_req_flags_t flags);
471struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
472 unsigned int op, blk_mq_req_flags_t flags,
473 unsigned int hctx_idx);
474struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
475
476enum {
477 BLK_MQ_UNIQUE_TAG_BITS = 16,
478 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
479};
480
481u32 blk_mq_unique_tag(struct request *rq);
482
483static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
484{
485 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
486}
487
488static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
489{
490 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
491}
492
493
494
495
496
497static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
498{
499 return READ_ONCE(rq->state);
500}
501
502static inline int blk_mq_request_started(struct request *rq)
503{
504 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
505}
506
507static inline int blk_mq_request_completed(struct request *rq)
508{
509 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
510}
511
512
513
514
515
516
517
518
519static inline void blk_mq_set_request_complete(struct request *rq)
520{
521 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
522}
523
524void blk_mq_start_request(struct request *rq);
525void blk_mq_end_request(struct request *rq, blk_status_t error);
526void __blk_mq_end_request(struct request *rq, blk_status_t error);
527
528void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
529void blk_mq_kick_requeue_list(struct request_queue *q);
530void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
531void blk_mq_complete_request(struct request *rq);
532bool blk_mq_complete_request_remote(struct request *rq);
533bool blk_mq_queue_stopped(struct request_queue *q);
534void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
535void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
536void blk_mq_stop_hw_queues(struct request_queue *q);
537void blk_mq_start_hw_queues(struct request_queue *q);
538void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
539void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
540void blk_mq_quiesce_queue(struct request_queue *q);
541void blk_mq_unquiesce_queue(struct request_queue *q);
542void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
543void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
544void blk_mq_run_hw_queues(struct request_queue *q, bool async);
545void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
546void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
547 busy_tag_iter_fn *fn, void *priv);
548void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
549void blk_mq_freeze_queue(struct request_queue *q);
550void blk_mq_unfreeze_queue(struct request_queue *q);
551void blk_freeze_queue_start(struct request_queue *q);
552void blk_mq_freeze_queue_wait(struct request_queue *q);
553int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
554 unsigned long timeout);
555
556int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
557void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
558
559void blk_mq_quiesce_queue_nowait(struct request_queue *q);
560
561unsigned int blk_mq_rq_cpu(struct request *rq);
562
563bool __blk_should_fake_timeout(struct request_queue *q);
564static inline bool blk_should_fake_timeout(struct request_queue *q)
565{
566 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
567 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
568 return __blk_should_fake_timeout(q);
569 return false;
570}
571
572
573
574
575
576
577
578
579
580
581static inline struct request *blk_mq_rq_from_pdu(void *pdu)
582{
583 return pdu - sizeof(struct request);
584}
585
586
587
588
589
590
591
592
593
594
595static inline void *blk_mq_rq_to_pdu(struct request *rq)
596{
597 return rq + 1;
598}
599
600#define queue_for_each_hw_ctx(q, hctx, i) \
601 for ((i) = 0; (i) < (q)->nr_hw_queues && \
602 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
603
604#define hctx_for_each_ctx(hctx, ctx, i) \
605 for ((i) = 0; (i) < (hctx)->nr_ctx && \
606 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
607
608static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
609 struct request *rq)
610{
611 if (rq->tag != -1)
612 return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
613
614 return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
615 BLK_QC_T_INTERNAL;
616}
617
618static inline void blk_mq_cleanup_rq(struct request *rq)
619{
620 if (rq->q->mq_ops->cleanup_rq)
621 rq->q->mq_ops->cleanup_rq(rq);
622}
623
624static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
625 unsigned int nr_segs)
626{
627 rq->nr_phys_segments = nr_segs;
628 rq->__data_len = bio->bi_iter.bi_size;
629 rq->bio = rq->biotail = bio;
630 rq->ioprio = bio_prio(bio);
631
632 if (bio->bi_bdev)
633 rq->rq_disk = bio->bi_bdev->bd_disk;
634}
635
636blk_qc_t blk_mq_submit_bio(struct bio *bio);
637void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
638 struct lock_class_key *key);
639
640#endif
641