1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11
12#include <linux/blk-mq.h>
13#include <linux/delay.h>
14#include "blk.h"
15#include "blk-mq.h"
16#include "blk-mq-sched.h"
17#include "blk-mq-tag.h"
18
19
20
21
22
23
24
25bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
26{
27 if (blk_mq_is_shared_tags(hctx->flags)) {
28 struct request_queue *q = hctx->queue;
29
30 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
31 !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
32 atomic_inc(&hctx->tags->active_queues);
33 } else {
34 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
35 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
36 atomic_inc(&hctx->tags->active_queues);
37 }
38
39 return true;
40}
41
42
43
44
45void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
46{
47 sbitmap_queue_wake_all(&tags->bitmap_tags);
48 if (include_reserve)
49 sbitmap_queue_wake_all(&tags->breserved_tags);
50}
51
52
53
54
55
56void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
57{
58 struct blk_mq_tags *tags = hctx->tags;
59
60 if (blk_mq_is_shared_tags(hctx->flags)) {
61 struct request_queue *q = hctx->queue;
62
63 if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
64 &q->queue_flags))
65 return;
66 } else {
67 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
68 return;
69 }
70
71 atomic_dec(&tags->active_queues);
72
73 blk_mq_tag_wakeup_all(tags, false);
74}
75
76static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
77 struct sbitmap_queue *bt)
78{
79 if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
80 !hctx_may_queue(data->hctx, bt))
81 return BLK_MQ_NO_TAG;
82
83 if (data->shallow_depth)
84 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
85 else
86 return __sbitmap_queue_get(bt);
87}
88
89unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
90 unsigned int *offset)
91{
92 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
93 struct sbitmap_queue *bt = &tags->bitmap_tags;
94 unsigned long ret;
95
96 if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
97 data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
98 return 0;
99 ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
100 *offset += tags->nr_reserved_tags;
101 return ret;
102}
103
104unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
105{
106 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
107 struct sbitmap_queue *bt;
108 struct sbq_wait_state *ws;
109 DEFINE_SBQ_WAIT(wait);
110 unsigned int tag_offset;
111 int tag;
112
113 if (data->flags & BLK_MQ_REQ_RESERVED) {
114 if (unlikely(!tags->nr_reserved_tags)) {
115 WARN_ON_ONCE(1);
116 return BLK_MQ_NO_TAG;
117 }
118 bt = &tags->breserved_tags;
119 tag_offset = 0;
120 } else {
121 bt = &tags->bitmap_tags;
122 tag_offset = tags->nr_reserved_tags;
123 }
124
125 tag = __blk_mq_get_tag(data, bt);
126 if (tag != BLK_MQ_NO_TAG)
127 goto found_tag;
128
129 if (data->flags & BLK_MQ_REQ_NOWAIT)
130 return BLK_MQ_NO_TAG;
131
132 ws = bt_wait_ptr(bt, data->hctx);
133 do {
134 struct sbitmap_queue *bt_prev;
135
136
137
138
139
140
141 blk_mq_run_hw_queue(data->hctx, false);
142
143
144
145
146
147 tag = __blk_mq_get_tag(data, bt);
148 if (tag != BLK_MQ_NO_TAG)
149 break;
150
151 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
152
153 tag = __blk_mq_get_tag(data, bt);
154 if (tag != BLK_MQ_NO_TAG)
155 break;
156
157 bt_prev = bt;
158 io_schedule();
159
160 sbitmap_finish_wait(bt, ws, &wait);
161
162 data->ctx = blk_mq_get_ctx(data->q);
163 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
164 data->ctx);
165 tags = blk_mq_tags_from_data(data);
166 if (data->flags & BLK_MQ_REQ_RESERVED)
167 bt = &tags->breserved_tags;
168 else
169 bt = &tags->bitmap_tags;
170
171
172
173
174
175
176 if (bt != bt_prev)
177 sbitmap_queue_wake_up(bt_prev);
178
179 ws = bt_wait_ptr(bt, data->hctx);
180 } while (1);
181
182 sbitmap_finish_wait(bt, ws, &wait);
183
184found_tag:
185
186
187
188
189 if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
190 blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
191 return BLK_MQ_NO_TAG;
192 }
193 return tag + tag_offset;
194}
195
196void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
197 unsigned int tag)
198{
199 if (!blk_mq_tag_is_reserved(tags, tag)) {
200 const int real_tag = tag - tags->nr_reserved_tags;
201
202 BUG_ON(real_tag >= tags->nr_tags);
203 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
204 } else {
205 BUG_ON(tag >= tags->nr_reserved_tags);
206 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
207 }
208}
209
210void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
211{
212 sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
213 tag_array, nr_tags);
214}
215
216struct bt_iter_data {
217 struct blk_mq_hw_ctx *hctx;
218 busy_iter_fn *fn;
219 void *data;
220 bool reserved;
221};
222
223static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
224 unsigned int bitnr)
225{
226 struct request *rq;
227 unsigned long flags;
228
229 spin_lock_irqsave(&tags->lock, flags);
230 rq = tags->rqs[bitnr];
231 if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
232 rq = NULL;
233 spin_unlock_irqrestore(&tags->lock, flags);
234 return rq;
235}
236
237static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
238{
239 struct bt_iter_data *iter_data = data;
240 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
241 struct blk_mq_tags *tags = hctx->tags;
242 bool reserved = iter_data->reserved;
243 struct request *rq;
244 bool ret = true;
245
246 if (!reserved)
247 bitnr += tags->nr_reserved_tags;
248
249
250
251
252 rq = blk_mq_find_and_get_req(tags, bitnr);
253 if (!rq)
254 return true;
255
256 if (rq->q == hctx->queue && rq->mq_hctx == hctx)
257 ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
258 blk_mq_put_rq_ref(rq);
259 return ret;
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
277 busy_iter_fn *fn, void *data, bool reserved)
278{
279 struct bt_iter_data iter_data = {
280 .hctx = hctx,
281 .fn = fn,
282 .data = data,
283 .reserved = reserved,
284 };
285
286 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
287}
288
289struct bt_tags_iter_data {
290 struct blk_mq_tags *tags;
291 busy_tag_iter_fn *fn;
292 void *data;
293 unsigned int flags;
294};
295
296#define BT_TAG_ITER_RESERVED (1 << 0)
297#define BT_TAG_ITER_STARTED (1 << 1)
298#define BT_TAG_ITER_STATIC_RQS (1 << 2)
299
300static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
301{
302 struct bt_tags_iter_data *iter_data = data;
303 struct blk_mq_tags *tags = iter_data->tags;
304 bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
305 struct request *rq;
306 bool ret = true;
307 bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
308
309 if (!reserved)
310 bitnr += tags->nr_reserved_tags;
311
312
313
314
315
316 if (iter_static_rqs)
317 rq = tags->static_rqs[bitnr];
318 else
319 rq = blk_mq_find_and_get_req(tags, bitnr);
320 if (!rq)
321 return true;
322
323 if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
324 blk_mq_request_started(rq))
325 ret = iter_data->fn(rq, iter_data->data, reserved);
326 if (!iter_static_rqs)
327 blk_mq_put_rq_ref(rq);
328 return ret;
329}
330
331
332
333
334
335
336
337
338
339
340
341
342
343static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
344 busy_tag_iter_fn *fn, void *data, unsigned int flags)
345{
346 struct bt_tags_iter_data iter_data = {
347 .tags = tags,
348 .fn = fn,
349 .data = data,
350 .flags = flags,
351 };
352
353 if (tags->rqs)
354 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
355}
356
357static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
358 busy_tag_iter_fn *fn, void *priv, unsigned int flags)
359{
360 WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
361
362 if (tags->nr_reserved_tags)
363 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
364 flags | BT_TAG_ITER_RESERVED);
365 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
381 void *priv)
382{
383 __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
384}
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
400 busy_tag_iter_fn *fn, void *priv)
401{
402 unsigned int flags = tagset->flags;
403 int i, nr_tags;
404
405 nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
406
407 for (i = 0; i < nr_tags; i++) {
408 if (tagset->tags && tagset->tags[i])
409 __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
410 BT_TAG_ITER_STARTED);
411 }
412}
413EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
414
415static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
416 void *data, bool reserved)
417{
418 unsigned *count = data;
419
420 if (blk_mq_request_completed(rq))
421 (*count)++;
422 return true;
423}
424
425
426
427
428
429
430
431
432void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
433{
434 while (true) {
435 unsigned count = 0;
436
437 blk_mq_tagset_busy_iter(tagset,
438 blk_mq_tagset_count_completed_rqs, &count);
439 if (!count)
440 break;
441 msleep(5);
442 }
443}
444EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
461 void *priv)
462{
463 struct blk_mq_hw_ctx *hctx;
464 int i;
465
466
467
468
469
470
471 if (!percpu_ref_tryget(&q->q_usage_counter))
472 return;
473
474 queue_for_each_hw_ctx(q, hctx, i) {
475 struct blk_mq_tags *tags = hctx->tags;
476
477
478
479
480
481 if (!blk_mq_hw_queue_mapped(hctx))
482 continue;
483
484 if (tags->nr_reserved_tags)
485 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
486 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
487 }
488 blk_queue_exit(q);
489}
490
491static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
492 bool round_robin, int node)
493{
494 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
495 node);
496}
497
498int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
499 struct sbitmap_queue *breserved_tags,
500 unsigned int queue_depth, unsigned int reserved,
501 int node, int alloc_policy)
502{
503 unsigned int depth = queue_depth - reserved;
504 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
505
506 if (bt_alloc(bitmap_tags, depth, round_robin, node))
507 return -ENOMEM;
508 if (bt_alloc(breserved_tags, reserved, round_robin, node))
509 goto free_bitmap_tags;
510
511 return 0;
512
513free_bitmap_tags:
514 sbitmap_queue_free(bitmap_tags);
515 return -ENOMEM;
516}
517
518struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
519 unsigned int reserved_tags,
520 int node, int alloc_policy)
521{
522 struct blk_mq_tags *tags;
523
524 if (total_tags > BLK_MQ_TAG_MAX) {
525 pr_err("blk-mq: tag depth too large\n");
526 return NULL;
527 }
528
529 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
530 if (!tags)
531 return NULL;
532
533 tags->nr_tags = total_tags;
534 tags->nr_reserved_tags = reserved_tags;
535 spin_lock_init(&tags->lock);
536
537 if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
538 total_tags, reserved_tags, node,
539 alloc_policy) < 0) {
540 kfree(tags);
541 return NULL;
542 }
543 return tags;
544}
545
546void blk_mq_free_tags(struct blk_mq_tags *tags)
547{
548 sbitmap_queue_free(&tags->bitmap_tags);
549 sbitmap_queue_free(&tags->breserved_tags);
550 kfree(tags);
551}
552
553int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
554 struct blk_mq_tags **tagsptr, unsigned int tdepth,
555 bool can_grow)
556{
557 struct blk_mq_tags *tags = *tagsptr;
558
559 if (tdepth <= tags->nr_reserved_tags)
560 return -EINVAL;
561
562
563
564
565
566 if (tdepth > tags->nr_tags) {
567 struct blk_mq_tag_set *set = hctx->queue->tag_set;
568 struct blk_mq_tags *new;
569
570 if (!can_grow)
571 return -EINVAL;
572
573
574
575
576
577 if (tdepth > MAX_SCHED_RQ)
578 return -EINVAL;
579
580
581
582
583
584 if (blk_mq_is_shared_tags(set->flags))
585 return 0;
586
587 new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
588 if (!new)
589 return -ENOMEM;
590
591 blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
592 *tagsptr = new;
593 } else {
594
595
596
597
598 sbitmap_queue_resize(&tags->bitmap_tags,
599 tdepth - tags->nr_reserved_tags);
600 }
601
602 return 0;
603}
604
605void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
606{
607 struct blk_mq_tags *tags = set->shared_tags;
608
609 sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
610}
611
612void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
613{
614 sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
615 q->nr_requests - q->tag_set->reserved_tags);
616}
617
618
619
620
621
622
623
624
625
626
627
628
629
630u32 blk_mq_unique_tag(struct request *rq)
631{
632 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
633 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
634}
635EXPORT_SYMBOL(blk_mq_unique_tag);
636