1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/blk-mq.h>
9#include <linux/list_sort.h>
10
11#include <trace/events/block.h>
12
13#include "blk.h"
14#include "blk-mq.h"
15#include "blk-mq-debugfs.h"
16#include "blk-mq-sched.h"
17#include "blk-mq-tag.h"
18#include "blk-wbt.h"
19
20void blk_mq_sched_assign_ioc(struct request *rq)
21{
22 struct request_queue *q = rq->q;
23 struct io_context *ioc;
24 struct io_cq *icq;
25
26
27
28
29 ioc = current->io_context;
30 if (!ioc)
31 return;
32
33 spin_lock_irq(&q->queue_lock);
34 icq = ioc_lookup_icq(ioc, q);
35 spin_unlock_irq(&q->queue_lock);
36
37 if (!icq) {
38 icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
39 if (!icq)
40 return;
41 }
42 get_io_context(icq->ioc);
43 rq->elv.icq = icq;
44}
45
46
47
48
49
50void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
51{
52 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
53 return;
54
55 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
56}
57EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
58
59void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
60{
61 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
62 return;
63 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
64
65
66
67
68
69
70
71
72 smp_mb();
73
74 blk_mq_run_hw_queue(hctx, true);
75}
76
77static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
78{
79 struct request *rqa = container_of(a, struct request, queuelist);
80 struct request *rqb = container_of(b, struct request, queuelist);
81
82 return rqa->mq_hctx > rqb->mq_hctx;
83}
84
85static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
86{
87 struct blk_mq_hw_ctx *hctx =
88 list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
89 struct request *rq;
90 LIST_HEAD(hctx_list);
91 unsigned int count = 0;
92 bool ret;
93
94 list_for_each_entry(rq, rq_list, queuelist) {
95 if (rq->mq_hctx != hctx) {
96 list_cut_before(&hctx_list, rq_list, &rq->queuelist);
97 goto dispatch;
98 }
99 count++;
100 }
101 list_splice_tail_init(rq_list, &hctx_list);
102
103dispatch:
104 ret = blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
105 return ret;
106}
107
108#define BLK_MQ_BUDGET_DELAY 3
109
110
111
112
113
114
115
116
117
118static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
119{
120 struct request_queue *q = hctx->queue;
121 struct elevator_queue *e = q->elevator;
122 bool multi_hctxs = false, run_queue = false;
123 bool dispatched = false, busy = false;
124 unsigned int max_dispatch;
125 LIST_HEAD(rq_list);
126 int count = 0;
127
128 if (hctx->dispatch_busy)
129 max_dispatch = 1;
130 else
131 max_dispatch = hctx->queue->nr_requests;
132
133 do {
134 struct request *rq;
135 int budget_token;
136
137 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
138 break;
139
140 if (!list_empty_careful(&hctx->dispatch)) {
141 busy = true;
142 break;
143 }
144
145 budget_token = blk_mq_get_dispatch_budget(q);
146 if (budget_token < 0)
147 break;
148
149 rq = e->type->ops.dispatch_request(hctx);
150 if (!rq) {
151 blk_mq_put_dispatch_budget(q, budget_token);
152
153
154
155
156
157
158
159 run_queue = true;
160 break;
161 }
162
163 blk_mq_set_rq_budget_token(rq, budget_token);
164
165
166
167
168
169
170 list_add_tail(&rq->queuelist, &rq_list);
171 count++;
172 if (rq->mq_hctx != hctx)
173 multi_hctxs = true;
174
175
176
177
178
179
180
181 if (!blk_mq_get_driver_tag(rq))
182 break;
183 } while (count < max_dispatch);
184
185 if (!count) {
186 if (run_queue)
187 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
188 } else if (multi_hctxs) {
189
190
191
192
193
194
195
196 list_sort(NULL, &rq_list, sched_rq_cmp);
197 do {
198 dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
199 } while (!list_empty(&rq_list));
200 } else {
201 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
202 }
203
204 if (busy)
205 return -EAGAIN;
206 return !!dispatched;
207}
208
209static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
210{
211 int ret;
212
213 do {
214 ret = __blk_mq_do_dispatch_sched(hctx);
215 } while (ret == 1);
216
217 return ret;
218}
219
220static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
221 struct blk_mq_ctx *ctx)
222{
223 unsigned short idx = ctx->index_hw[hctx->type];
224
225 if (++idx == hctx->nr_ctx)
226 idx = 0;
227
228 return hctx->ctxs[idx];
229}
230
231
232
233
234
235
236
237
238
239static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
240{
241 struct request_queue *q = hctx->queue;
242 LIST_HEAD(rq_list);
243 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
244 int ret = 0;
245 struct request *rq;
246
247 do {
248 int budget_token;
249
250 if (!list_empty_careful(&hctx->dispatch)) {
251 ret = -EAGAIN;
252 break;
253 }
254
255 if (!sbitmap_any_bit_set(&hctx->ctx_map))
256 break;
257
258 budget_token = blk_mq_get_dispatch_budget(q);
259 if (budget_token < 0)
260 break;
261
262 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
263 if (!rq) {
264 blk_mq_put_dispatch_budget(q, budget_token);
265
266
267
268
269
270
271
272 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
273 break;
274 }
275
276 blk_mq_set_rq_budget_token(rq, budget_token);
277
278
279
280
281
282
283 list_add(&rq->queuelist, &rq_list);
284
285
286 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
287
288 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
289
290 WRITE_ONCE(hctx->dispatch_from, ctx);
291 return ret;
292}
293
294static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
295{
296 struct request_queue *q = hctx->queue;
297 const bool has_sched = q->elevator;
298 int ret = 0;
299 LIST_HEAD(rq_list);
300
301
302
303
304
305 if (!list_empty_careful(&hctx->dispatch)) {
306 spin_lock(&hctx->lock);
307 if (!list_empty(&hctx->dispatch))
308 list_splice_init(&hctx->dispatch, &rq_list);
309 spin_unlock(&hctx->lock);
310 }
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325 if (!list_empty(&rq_list)) {
326 blk_mq_sched_mark_restart_hctx(hctx);
327 if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
328 if (has_sched)
329 ret = blk_mq_do_dispatch_sched(hctx);
330 else
331 ret = blk_mq_do_dispatch_ctx(hctx);
332 }
333 } else if (has_sched) {
334 ret = blk_mq_do_dispatch_sched(hctx);
335 } else if (hctx->dispatch_busy) {
336
337 ret = blk_mq_do_dispatch_ctx(hctx);
338 } else {
339 blk_mq_flush_busy_ctxs(hctx, &rq_list);
340 blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
341 }
342
343 return ret;
344}
345
346void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
347{
348 struct request_queue *q = hctx->queue;
349
350
351 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
352 return;
353
354 hctx->run++;
355
356
357
358
359
360 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
361 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
362 blk_mq_run_hw_queue(hctx, true);
363 }
364}
365
366bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
367 struct request **merged_request)
368{
369 struct request *rq;
370
371 switch (elv_merge(q, &rq, bio)) {
372 case ELEVATOR_BACK_MERGE:
373 if (!blk_mq_sched_allow_merge(q, rq, bio))
374 return false;
375 if (!bio_attempt_back_merge(q, rq, bio))
376 return false;
377 *merged_request = attempt_back_merge(q, rq);
378 if (!*merged_request)
379 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
380 return true;
381 case ELEVATOR_FRONT_MERGE:
382 if (!blk_mq_sched_allow_merge(q, rq, bio))
383 return false;
384 if (!bio_attempt_front_merge(q, rq, bio))
385 return false;
386 *merged_request = attempt_front_merge(q, rq);
387 if (!*merged_request)
388 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
389 return true;
390 case ELEVATOR_DISCARD_MERGE:
391 return bio_attempt_discard_merge(q, rq, bio);
392 default:
393 return false;
394 }
395}
396EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
397
398
399
400
401
402
403static bool blk_mq_attempt_merge(struct request_queue *q,
404 struct blk_mq_hw_ctx *hctx,
405 struct blk_mq_ctx *ctx, struct bio *bio)
406{
407 enum hctx_type type = hctx->type;
408
409 lockdep_assert_held(&ctx->lock);
410
411 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio)) {
412 ctx->rq_merged++;
413 return true;
414 }
415
416 return false;
417}
418
419bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
420{
421 struct elevator_queue *e = q->elevator;
422 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
423 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
424 bool ret = false;
425 enum hctx_type type;
426
427 if (e && e->type->ops.bio_merge)
428 return e->type->ops.bio_merge(hctx, bio);
429
430 type = hctx->type;
431 if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
432 !list_empty_careful(&ctx->rq_lists[type])) {
433
434 spin_lock(&ctx->lock);
435 ret = blk_mq_attempt_merge(q, hctx, ctx, bio);
436 spin_unlock(&ctx->lock);
437 }
438
439 return ret;
440}
441
442bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
443 struct list_head *free)
444{
445 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
446}
447EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
448
449static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
450 struct request *rq)
451{
452
453
454
455
456
457
458
459
460
461
462
463 if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
464 return true;
465
466 return false;
467}
468
469void blk_mq_sched_insert_request(struct request *rq, bool at_head,
470 bool run_queue, bool async)
471{
472 struct request_queue *q = rq->q;
473 struct elevator_queue *e = q->elevator;
474 struct blk_mq_ctx *ctx = rq->mq_ctx;
475 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
476
477 WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
478
479 if (blk_mq_sched_bypass_insert(hctx, rq)) {
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
502 blk_mq_request_bypass_insert(rq, at_head, false);
503 goto run;
504 }
505
506 if (e) {
507 LIST_HEAD(list);
508
509 list_add(&rq->queuelist, &list);
510 e->type->ops.insert_requests(hctx, &list, at_head);
511 } else {
512 spin_lock(&ctx->lock);
513 __blk_mq_insert_request(hctx, rq, at_head);
514 spin_unlock(&ctx->lock);
515 }
516
517run:
518 if (run_queue)
519 blk_mq_run_hw_queue(hctx, async);
520}
521
522void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
523 struct blk_mq_ctx *ctx,
524 struct list_head *list, bool run_queue_async)
525{
526 struct elevator_queue *e;
527 struct request_queue *q = hctx->queue;
528
529
530
531
532
533
534 percpu_ref_get(&q->q_usage_counter);
535
536 e = hctx->queue->elevator;
537 if (e) {
538 e->type->ops.insert_requests(hctx, list, false);
539 } else {
540
541
542
543
544
545 if (!hctx->dispatch_busy && !e && !run_queue_async) {
546 blk_mq_try_issue_list_directly(hctx, list);
547 if (list_empty(list))
548 goto out;
549 }
550 blk_mq_insert_requests(hctx, ctx, list);
551 }
552
553 blk_mq_run_hw_queue(hctx, run_queue_async);
554 out:
555 percpu_ref_put(&q->q_usage_counter);
556}
557
558static int blk_mq_sched_alloc_tags(struct request_queue *q,
559 struct blk_mq_hw_ctx *hctx,
560 unsigned int hctx_idx)
561{
562 struct blk_mq_tag_set *set = q->tag_set;
563 int ret;
564
565 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
566 set->reserved_tags, set->flags);
567 if (!hctx->sched_tags)
568 return -ENOMEM;
569
570 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
571 if (ret) {
572 blk_mq_free_rq_map(hctx->sched_tags, set->flags);
573 hctx->sched_tags = NULL;
574 }
575
576 return ret;
577}
578
579
580static void blk_mq_sched_tags_teardown(struct request_queue *q)
581{
582 struct blk_mq_hw_ctx *hctx;
583 int i;
584
585 queue_for_each_hw_ctx(q, hctx, i) {
586 if (hctx->sched_tags) {
587 blk_mq_free_rq_map(hctx->sched_tags, hctx->flags);
588 hctx->sched_tags = NULL;
589 }
590 }
591}
592
593static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
594{
595 struct blk_mq_tag_set *set = queue->tag_set;
596 int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
597 struct blk_mq_hw_ctx *hctx;
598 int ret, i;
599
600
601
602
603
604 ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags,
605 &queue->sched_breserved_tags,
606 MAX_SCHED_RQ, set->reserved_tags,
607 set->numa_node, alloc_policy);
608 if (ret)
609 return ret;
610
611 queue_for_each_hw_ctx(queue, hctx, i) {
612 hctx->sched_tags->bitmap_tags =
613 &queue->sched_bitmap_tags;
614 hctx->sched_tags->breserved_tags =
615 &queue->sched_breserved_tags;
616 }
617
618 sbitmap_queue_resize(&queue->sched_bitmap_tags,
619 queue->nr_requests - set->reserved_tags);
620
621 return 0;
622}
623
624static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue)
625{
626 sbitmap_queue_free(&queue->sched_bitmap_tags);
627 sbitmap_queue_free(&queue->sched_breserved_tags);
628}
629
630int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
631{
632 struct blk_mq_hw_ctx *hctx;
633 struct elevator_queue *eq;
634 unsigned int i;
635 int ret;
636
637 if (!e) {
638 q->elevator = NULL;
639 q->nr_requests = q->tag_set->queue_depth;
640 return 0;
641 }
642
643
644
645
646
647
648 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
649 BLKDEV_MAX_RQ);
650
651 queue_for_each_hw_ctx(q, hctx, i) {
652 ret = blk_mq_sched_alloc_tags(q, hctx, i);
653 if (ret)
654 goto err_free_tags;
655 }
656
657 if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) {
658 ret = blk_mq_init_sched_shared_sbitmap(q);
659 if (ret)
660 goto err_free_tags;
661 }
662
663 ret = e->ops.init_sched(q, e);
664 if (ret)
665 goto err_free_sbitmap;
666
667 blk_mq_debugfs_register_sched(q);
668
669 queue_for_each_hw_ctx(q, hctx, i) {
670 if (e->ops.init_hctx) {
671 ret = e->ops.init_hctx(hctx, i);
672 if (ret) {
673 eq = q->elevator;
674 blk_mq_sched_free_requests(q);
675 blk_mq_exit_sched(q, eq);
676 kobject_put(&eq->kobj);
677 return ret;
678 }
679 }
680 blk_mq_debugfs_register_sched_hctx(q, hctx);
681 }
682
683 return 0;
684
685err_free_sbitmap:
686 if (blk_mq_is_sbitmap_shared(q->tag_set->flags))
687 blk_mq_exit_sched_shared_sbitmap(q);
688err_free_tags:
689 blk_mq_sched_free_requests(q);
690 blk_mq_sched_tags_teardown(q);
691 q->elevator = NULL;
692 return ret;
693}
694
695
696
697
698
699void blk_mq_sched_free_requests(struct request_queue *q)
700{
701 struct blk_mq_hw_ctx *hctx;
702 int i;
703
704 queue_for_each_hw_ctx(q, hctx, i) {
705 if (hctx->sched_tags)
706 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
707 }
708}
709
710void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
711{
712 struct blk_mq_hw_ctx *hctx;
713 unsigned int i;
714 unsigned int flags = 0;
715
716 queue_for_each_hw_ctx(q, hctx, i) {
717 blk_mq_debugfs_unregister_sched_hctx(hctx);
718 if (e->type->ops.exit_hctx && hctx->sched_data) {
719 e->type->ops.exit_hctx(hctx, i);
720 hctx->sched_data = NULL;
721 }
722 flags = hctx->flags;
723 }
724 blk_mq_debugfs_unregister_sched(q);
725 if (e->type->ops.exit_sched)
726 e->type->ops.exit_sched(e);
727 blk_mq_sched_tags_teardown(q);
728 if (blk_mq_is_sbitmap_shared(flags))
729 blk_mq_exit_sched_shared_sbitmap(q);
730 q->elevator = NULL;
731}
732