1
2#ifndef BLK_MQ_H
3#define BLK_MQ_H
4
5#include <linux/blkdev.h>
6#include <linux/sbitmap.h>
7#include <linux/srcu.h>
8#include <linux/lockdep.h>
9
10struct blk_mq_tags;
11struct blk_flush_queue;
12
13
14
15
16
17struct blk_mq_hw_ctx {
18 struct {
19
20 spinlock_t lock;
21
22
23
24
25
26
27
28 struct list_head dispatch;
29
30
31
32
33 unsigned long state;
34 } ____cacheline_aligned_in_smp;
35
36
37
38
39 struct delayed_work run_work;
40
41 cpumask_var_t cpumask;
42
43
44
45
46 int next_cpu;
47
48
49
50
51 int next_cpu_batch;
52
53
54 unsigned long flags;
55
56
57
58
59
60 void *sched_data;
61
62
63
64 struct request_queue *queue;
65
66 struct blk_flush_queue *fq;
67
68
69
70
71
72 void *driver_data;
73
74
75
76
77
78 struct sbitmap ctx_map;
79
80
81
82
83
84 struct blk_mq_ctx *dispatch_from;
85
86
87
88
89
90 unsigned int dispatch_busy;
91
92
93 unsigned short type;
94
95 unsigned short nr_ctx;
96
97 struct blk_mq_ctx **ctxs;
98
99
100 spinlock_t dispatch_wait_lock;
101
102
103
104
105 wait_queue_entry_t dispatch_wait;
106
107
108
109
110
111 atomic_t wait_index;
112
113
114
115
116
117 struct blk_mq_tags *tags;
118
119
120
121
122
123 struct blk_mq_tags *sched_tags;
124
125
126 unsigned long queued;
127
128 unsigned long run;
129#define BLK_MQ_MAX_DISPATCH_ORDER 7
130
131 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
132
133
134 unsigned int numa_node;
135
136 unsigned int queue_num;
137
138
139
140
141
142 atomic_t nr_active;
143
144
145 struct hlist_node cpuhp_online;
146
147 struct hlist_node cpuhp_dead;
148
149 struct kobject kobj;
150
151
152 unsigned long poll_considered;
153
154 unsigned long poll_invoked;
155
156 unsigned long poll_success;
157
158#ifdef CONFIG_BLK_DEBUG_FS
159
160
161
162
163 struct dentry *debugfs_dir;
164
165 struct dentry *sched_debugfs_dir;
166#endif
167
168
169
170
171
172 struct list_head hctx_list;
173
174
175
176
177
178
179 struct srcu_struct srcu[];
180};
181
182
183
184
185
186
187
188
189
190
191
192struct blk_mq_queue_map {
193 unsigned int *mq_map;
194 unsigned int nr_queues;
195 unsigned int queue_offset;
196};
197
198
199
200
201
202
203
204
205enum hctx_type {
206 HCTX_TYPE_DEFAULT,
207 HCTX_TYPE_READ,
208 HCTX_TYPE_POLL,
209
210 HCTX_MAX_TYPES,
211};
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246struct blk_mq_tag_set {
247 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
248 unsigned int nr_maps;
249 const struct blk_mq_ops *ops;
250 unsigned int nr_hw_queues;
251 unsigned int queue_depth;
252 unsigned int reserved_tags;
253 unsigned int cmd_size;
254 int numa_node;
255 unsigned int timeout;
256 unsigned int flags;
257 void *driver_data;
258 atomic_t active_queues_shared_sbitmap;
259
260 struct sbitmap_queue __bitmap_tags;
261 struct sbitmap_queue __breserved_tags;
262 struct blk_mq_tags **tags;
263
264 struct mutex tag_list_lock;
265 struct list_head tag_list;
266};
267
268
269
270
271
272
273
274struct blk_mq_queue_data {
275 struct request *rq;
276 bool last;
277};
278
279typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
280 bool);
281typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
282
283
284
285
286
287struct blk_mq_ops {
288
289
290
291 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
292 const struct blk_mq_queue_data *);
293
294
295
296
297
298
299
300
301 void (*commit_rqs)(struct blk_mq_hw_ctx *);
302
303
304
305
306
307
308
309 int (*get_budget)(struct request_queue *);
310
311
312
313
314 void (*put_budget)(struct request_queue *, int);
315
316
317
318
319 void (*set_rq_budget_token)(struct request *, int);
320
321
322
323 int (*get_rq_budget_token)(struct request *);
324
325
326
327
328 enum blk_eh_timer_return (*timeout)(struct request *, bool);
329
330
331
332
333 int (*poll)(struct blk_mq_hw_ctx *);
334
335
336
337
338 void (*complete)(struct request *);
339
340
341
342
343
344
345 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
346
347
348
349 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
350
351
352
353
354
355
356
357
358 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
359 unsigned int, unsigned int);
360
361
362
363 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
364 unsigned int);
365
366
367
368
369 void (*initialize_rq_fn)(struct request *rq);
370
371
372
373
374
375 void (*cleanup_rq)(struct request *);
376
377
378
379
380 bool (*busy)(struct request_queue *);
381
382
383
384
385
386 int (*map_queues)(struct blk_mq_tag_set *set);
387
388#ifdef CONFIG_BLK_DEBUG_FS
389
390
391
392
393 void (*show_rq)(struct seq_file *m, struct request *rq);
394#endif
395};
396
397enum {
398 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
399 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
400
401
402
403
404 BLK_MQ_F_STACKING = 1 << 2,
405 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
406 BLK_MQ_F_BLOCKING = 1 << 5,
407 BLK_MQ_F_NO_SCHED = 1 << 6,
408 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
409 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
410
411 BLK_MQ_S_STOPPED = 0,
412 BLK_MQ_S_TAG_ACTIVE = 1,
413 BLK_MQ_S_SCHED_RESTART = 2,
414
415
416 BLK_MQ_S_INACTIVE = 3,
417
418 BLK_MQ_MAX_DEPTH = 10240,
419
420 BLK_MQ_CPU_WORK_BATCH = 8,
421};
422#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
423 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
424 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
425#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
426 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
427 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
428
429#define blk_mq_alloc_disk(set, queuedata) \
430({ \
431 static struct lock_class_key __key; \
432 struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata); \
433 \
434 if (!IS_ERR(__disk)) \
435 lockdep_init_map(&__disk->lockdep_map, \
436 "(bio completion)", &__key, 0); \
437 __disk; \
438})
439struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
440 void *queuedata);
441struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
442int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
443 struct request_queue *q);
444void blk_mq_unregister_dev(struct device *, struct request_queue *);
445
446int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
447int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
448 const struct blk_mq_ops *ops, unsigned int queue_depth,
449 unsigned int set_flags);
450void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
451
452void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
453
454void blk_mq_free_request(struct request *rq);
455
456bool blk_mq_queue_inflight(struct request_queue *q);
457
458enum {
459
460 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
461
462 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
463
464 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
465};
466
467struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
468 blk_mq_req_flags_t flags);
469struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
470 unsigned int op, blk_mq_req_flags_t flags,
471 unsigned int hctx_idx);
472struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
473
474enum {
475 BLK_MQ_UNIQUE_TAG_BITS = 16,
476 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
477};
478
479u32 blk_mq_unique_tag(struct request *rq);
480
481static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
482{
483 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
484}
485
486static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
487{
488 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
489}
490
491
492
493
494
495static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
496{
497 return READ_ONCE(rq->state);
498}
499
500static inline int blk_mq_request_started(struct request *rq)
501{
502 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
503}
504
505static inline int blk_mq_request_completed(struct request *rq)
506{
507 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
508}
509
510
511
512
513
514
515
516
517static inline void blk_mq_set_request_complete(struct request *rq)
518{
519 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
520}
521
522void blk_mq_start_request(struct request *rq);
523void blk_mq_end_request(struct request *rq, blk_status_t error);
524void __blk_mq_end_request(struct request *rq, blk_status_t error);
525
526void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
527void blk_mq_kick_requeue_list(struct request_queue *q);
528void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
529void blk_mq_complete_request(struct request *rq);
530bool blk_mq_complete_request_remote(struct request *rq);
531bool blk_mq_queue_stopped(struct request_queue *q);
532void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
533void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
534void blk_mq_stop_hw_queues(struct request_queue *q);
535void blk_mq_start_hw_queues(struct request_queue *q);
536void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
537void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
538void blk_mq_quiesce_queue(struct request_queue *q);
539void blk_mq_unquiesce_queue(struct request_queue *q);
540void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
541void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
542void blk_mq_run_hw_queues(struct request_queue *q, bool async);
543void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
544void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
545 busy_tag_iter_fn *fn, void *priv);
546void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
547void blk_mq_freeze_queue(struct request_queue *q);
548void blk_mq_unfreeze_queue(struct request_queue *q);
549void blk_freeze_queue_start(struct request_queue *q);
550void blk_mq_freeze_queue_wait(struct request_queue *q);
551int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
552 unsigned long timeout);
553
554int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
555void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
556
557void blk_mq_quiesce_queue_nowait(struct request_queue *q);
558
559unsigned int blk_mq_rq_cpu(struct request *rq);
560
561bool __blk_should_fake_timeout(struct request_queue *q);
562static inline bool blk_should_fake_timeout(struct request_queue *q)
563{
564 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
565 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
566 return __blk_should_fake_timeout(q);
567 return false;
568}
569
570
571
572
573
574
575
576
577
578
579static inline struct request *blk_mq_rq_from_pdu(void *pdu)
580{
581 return pdu - sizeof(struct request);
582}
583
584
585
586
587
588
589
590
591
592
593static inline void *blk_mq_rq_to_pdu(struct request *rq)
594{
595 return rq + 1;
596}
597
598#define queue_for_each_hw_ctx(q, hctx, i) \
599 for ((i) = 0; (i) < (q)->nr_hw_queues && \
600 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
601
602#define hctx_for_each_ctx(hctx, ctx, i) \
603 for ((i) = 0; (i) < (hctx)->nr_ctx && \
604 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
605
606static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
607 struct request *rq)
608{
609 if (rq->tag != -1)
610 return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
611
612 return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
613 BLK_QC_T_INTERNAL;
614}
615
616static inline void blk_mq_cleanup_rq(struct request *rq)
617{
618 if (rq->q->mq_ops->cleanup_rq)
619 rq->q->mq_ops->cleanup_rq(rq);
620}
621
622static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
623 unsigned int nr_segs)
624{
625 rq->nr_phys_segments = nr_segs;
626 rq->__data_len = bio->bi_iter.bi_size;
627 rq->bio = rq->biotail = bio;
628 rq->ioprio = bio_prio(bio);
629
630 if (bio->bi_bdev)
631 rq->rq_disk = bio->bi_bdev->bd_disk;
632}
633
634blk_qc_t blk_mq_submit_bio(struct bio *bio);
635void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
636 struct lock_class_key *key);
637
638#endif
639