1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/backing-dev.h>
11#include <linux/bio.h>
12#include <linux/blkdev.h>
13#include <linux/kmemleak.h>
14#include <linux/mm.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/workqueue.h>
18#include <linux/smp.h>
19#include <linux/llist.h>
20#include <linux/list_sort.h>
21#include <linux/cpu.h>
22#include <linux/cache.h>
23#include <linux/sched/sysctl.h>
24#include <linux/sched/topology.h>
25#include <linux/sched/signal.h>
26#include <linux/delay.h>
27#include <linux/crash_dump.h>
28#include <linux/prefetch.h>
29
30#include <trace/events/block.h>
31
32#include <linux/blk-mq.h>
33#include "blk.h"
34#include "blk-mq.h"
35#include "blk-mq-debugfs.h"
36#include "blk-mq-tag.h"
37#include "blk-pm.h"
38#include "blk-stat.h"
39#include "blk-mq-sched.h"
40#include "blk-rq-qos.h"
41
42static void blk_mq_poll_stats_start(struct request_queue *q);
43static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
44
45static int blk_mq_poll_stats_bkt(const struct request *rq)
46{
47 int ddir, bytes, bucket;
48
49 ddir = rq_data_dir(rq);
50 bytes = blk_rq_bytes(rq);
51
52 bucket = ddir + 2*(ilog2(bytes) - 9);
53
54 if (bucket < 0)
55 return -1;
56 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
57 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
58
59 return bucket;
60}
61
62
63
64
65
66static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
67{
68 return !list_empty_careful(&hctx->dispatch) ||
69 sbitmap_any_bit_set(&hctx->ctx_map) ||
70 blk_mq_sched_has_work(hctx);
71}
72
73
74
75
76static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
77 struct blk_mq_ctx *ctx)
78{
79 const int bit = ctx->index_hw[hctx->type];
80
81 if (!sbitmap_test_bit(&hctx->ctx_map, bit))
82 sbitmap_set_bit(&hctx->ctx_map, bit);
83}
84
85static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
86 struct blk_mq_ctx *ctx)
87{
88 const int bit = ctx->index_hw[hctx->type];
89
90 sbitmap_clear_bit(&hctx->ctx_map, bit);
91}
92
93struct mq_inflight {
94 struct hd_struct *part;
95 unsigned int *inflight;
96};
97
98static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
99 struct request *rq, void *priv,
100 bool reserved)
101{
102 struct mq_inflight *mi = priv;
103
104
105
106
107 if (rq->part == mi->part)
108 mi->inflight[0]++;
109
110 return true;
111}
112
113unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
114{
115 unsigned inflight[2];
116 struct mq_inflight mi = { .part = part, .inflight = inflight, };
117
118 inflight[0] = inflight[1] = 0;
119 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
120
121 return inflight[0];
122}
123
124static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
125 struct request *rq, void *priv,
126 bool reserved)
127{
128 struct mq_inflight *mi = priv;
129
130 if (rq->part == mi->part)
131 mi->inflight[rq_data_dir(rq)]++;
132
133 return true;
134}
135
136void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
137 unsigned int inflight[2])
138{
139 struct mq_inflight mi = { .part = part, .inflight = inflight, };
140
141 inflight[0] = inflight[1] = 0;
142 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
143}
144
145void blk_freeze_queue_start(struct request_queue *q)
146{
147 mutex_lock(&q->mq_freeze_lock);
148 if (++q->mq_freeze_depth == 1) {
149 percpu_ref_kill(&q->q_usage_counter);
150 mutex_unlock(&q->mq_freeze_lock);
151 if (queue_is_mq(q))
152 blk_mq_run_hw_queues(q, false);
153 } else {
154 mutex_unlock(&q->mq_freeze_lock);
155 }
156}
157EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
158
159void blk_mq_freeze_queue_wait(struct request_queue *q)
160{
161 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
162}
163EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
164
165int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
166 unsigned long timeout)
167{
168 return wait_event_timeout(q->mq_freeze_wq,
169 percpu_ref_is_zero(&q->q_usage_counter),
170 timeout);
171}
172EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
173
174
175
176
177
178void blk_freeze_queue(struct request_queue *q)
179{
180
181
182
183
184
185
186
187 blk_freeze_queue_start(q);
188 blk_mq_freeze_queue_wait(q);
189}
190
191void blk_mq_freeze_queue(struct request_queue *q)
192{
193
194
195
196
197 blk_freeze_queue(q);
198}
199EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
200
201void blk_mq_unfreeze_queue(struct request_queue *q)
202{
203 mutex_lock(&q->mq_freeze_lock);
204 q->mq_freeze_depth--;
205 WARN_ON_ONCE(q->mq_freeze_depth < 0);
206 if (!q->mq_freeze_depth) {
207 percpu_ref_resurrect(&q->q_usage_counter);
208 wake_up_all(&q->mq_freeze_wq);
209 }
210 mutex_unlock(&q->mq_freeze_lock);
211}
212EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
213
214
215
216
217
218void blk_mq_quiesce_queue_nowait(struct request_queue *q)
219{
220 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
221}
222EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
223
224
225
226
227
228
229
230
231
232
233void blk_mq_quiesce_queue(struct request_queue *q)
234{
235 struct blk_mq_hw_ctx *hctx;
236 unsigned int i;
237 bool rcu = false;
238
239 blk_mq_quiesce_queue_nowait(q);
240
241 queue_for_each_hw_ctx(q, hctx, i) {
242 if (hctx->flags & BLK_MQ_F_BLOCKING)
243 synchronize_srcu(hctx->srcu);
244 else
245 rcu = true;
246 }
247 if (rcu)
248 synchronize_rcu();
249}
250EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
251
252
253
254
255
256
257
258
259void blk_mq_unquiesce_queue(struct request_queue *q)
260{
261 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
262
263
264 blk_mq_run_hw_queues(q, true);
265}
266EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
267
268void blk_mq_wake_waiters(struct request_queue *q)
269{
270 struct blk_mq_hw_ctx *hctx;
271 unsigned int i;
272
273 queue_for_each_hw_ctx(q, hctx, i)
274 if (blk_mq_hw_queue_mapped(hctx))
275 blk_mq_tag_wakeup_all(hctx->tags, true);
276}
277
278bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
279{
280 return blk_mq_has_free_tags(hctx->tags);
281}
282EXPORT_SYMBOL(blk_mq_can_queue);
283
284
285
286
287
288static inline bool blk_mq_need_time_stamp(struct request *rq)
289{
290 return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator;
291}
292
293static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
294 unsigned int tag, unsigned int op)
295{
296 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
297 struct request *rq = tags->static_rqs[tag];
298 req_flags_t rq_flags = 0;
299
300 if (data->flags & BLK_MQ_REQ_INTERNAL) {
301 rq->tag = -1;
302 rq->internal_tag = tag;
303 } else {
304 if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
305 rq_flags = RQF_MQ_INFLIGHT;
306 atomic_inc(&data->hctx->nr_active);
307 }
308 rq->tag = tag;
309 rq->internal_tag = -1;
310 data->hctx->tags->rqs[rq->tag] = rq;
311 }
312
313
314 rq->q = data->q;
315 rq->mq_ctx = data->ctx;
316 rq->mq_hctx = data->hctx;
317 rq->rq_flags = rq_flags;
318 rq->cmd_flags = op;
319 if (data->flags & BLK_MQ_REQ_PREEMPT)
320 rq->rq_flags |= RQF_PREEMPT;
321 if (blk_queue_io_stat(data->q))
322 rq->rq_flags |= RQF_IO_STAT;
323 INIT_LIST_HEAD(&rq->queuelist);
324 INIT_HLIST_NODE(&rq->hash);
325 RB_CLEAR_NODE(&rq->rb_node);
326 rq->rq_disk = NULL;
327 rq->part = NULL;
328 if (blk_mq_need_time_stamp(rq))
329 rq->start_time_ns = ktime_get_ns();
330 else
331 rq->start_time_ns = 0;
332 rq->io_start_time_ns = 0;
333 rq->nr_phys_segments = 0;
334#if defined(CONFIG_BLK_DEV_INTEGRITY)
335 rq->nr_integrity_segments = 0;
336#endif
337
338 rq->extra_len = 0;
339 WRITE_ONCE(rq->deadline, 0);
340
341 rq->timeout = 0;
342
343 rq->end_io = NULL;
344 rq->end_io_data = NULL;
345
346 data->ctx->rq_dispatched[op_is_sync(op)]++;
347 refcount_set(&rq->ref, 1);
348 return rq;
349}
350
351static struct request *blk_mq_get_request(struct request_queue *q,
352 struct bio *bio,
353 struct blk_mq_alloc_data *data)
354{
355 struct elevator_queue *e = q->elevator;
356 struct request *rq;
357 unsigned int tag;
358 bool clear_ctx_on_error = false;
359
360 blk_queue_enter_live(q);
361 data->q = q;
362 if (likely(!data->ctx)) {
363 data->ctx = blk_mq_get_ctx(q);
364 clear_ctx_on_error = true;
365 }
366 if (likely(!data->hctx))
367 data->hctx = blk_mq_map_queue(q, data->cmd_flags,
368 data->ctx);
369 if (data->cmd_flags & REQ_NOWAIT)
370 data->flags |= BLK_MQ_REQ_NOWAIT;
371
372 if (e) {
373 data->flags |= BLK_MQ_REQ_INTERNAL;
374
375
376
377
378
379
380 if (!op_is_flush(data->cmd_flags) &&
381 e->type->ops.limit_depth &&
382 !(data->flags & BLK_MQ_REQ_RESERVED))
383 e->type->ops.limit_depth(data->cmd_flags, data);
384 } else {
385 blk_mq_tag_busy(data->hctx);
386 }
387
388 tag = blk_mq_get_tag(data);
389 if (tag == BLK_MQ_TAG_FAIL) {
390 if (clear_ctx_on_error)
391 data->ctx = NULL;
392 blk_queue_exit(q);
393 return NULL;
394 }
395
396 rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags);
397 if (!op_is_flush(data->cmd_flags)) {
398 rq->elv.icq = NULL;
399 if (e && e->type->ops.prepare_request) {
400 if (e->type->icq_cache)
401 blk_mq_sched_assign_ioc(rq);
402
403 e->type->ops.prepare_request(rq, bio);
404 rq->rq_flags |= RQF_ELVPRIV;
405 }
406 }
407 data->hctx->queued++;
408 return rq;
409}
410
411struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
412 blk_mq_req_flags_t flags)
413{
414 struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
415 struct request *rq;
416 int ret;
417
418 ret = blk_queue_enter(q, flags);
419 if (ret)
420 return ERR_PTR(ret);
421
422 rq = blk_mq_get_request(q, NULL, &alloc_data);
423 blk_queue_exit(q);
424
425 if (!rq)
426 return ERR_PTR(-EWOULDBLOCK);
427
428 rq->__data_len = 0;
429 rq->__sector = (sector_t) -1;
430 rq->bio = rq->biotail = NULL;
431 return rq;
432}
433EXPORT_SYMBOL(blk_mq_alloc_request);
434
435struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
436 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
437{
438 struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
439 struct request *rq;
440 unsigned int cpu;
441 int ret;
442
443
444
445
446
447
448
449 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
450 return ERR_PTR(-EINVAL);
451
452 if (hctx_idx >= q->nr_hw_queues)
453 return ERR_PTR(-EIO);
454
455 ret = blk_queue_enter(q, flags);
456 if (ret)
457 return ERR_PTR(ret);
458
459
460
461
462
463 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
464 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
465 blk_queue_exit(q);
466 return ERR_PTR(-EXDEV);
467 }
468 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
469 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
470
471 rq = blk_mq_get_request(q, NULL, &alloc_data);
472 blk_queue_exit(q);
473
474 if (!rq)
475 return ERR_PTR(-EWOULDBLOCK);
476
477 return rq;
478}
479EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
480
481static void __blk_mq_free_request(struct request *rq)
482{
483 struct request_queue *q = rq->q;
484 struct blk_mq_ctx *ctx = rq->mq_ctx;
485 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
486 const int sched_tag = rq->internal_tag;
487
488 blk_pm_mark_last_busy(rq);
489 rq->mq_hctx = NULL;
490 if (rq->tag != -1)
491 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
492 if (sched_tag != -1)
493 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
494 blk_mq_sched_restart(hctx);
495 blk_queue_exit(q);
496}
497
498void blk_mq_free_request(struct request *rq)
499{
500 struct request_queue *q = rq->q;
501 struct elevator_queue *e = q->elevator;
502 struct blk_mq_ctx *ctx = rq->mq_ctx;
503 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
504
505 if (rq->rq_flags & RQF_ELVPRIV) {
506 if (e && e->type->ops.finish_request)
507 e->type->ops.finish_request(rq);
508 if (rq->elv.icq) {
509 put_io_context(rq->elv.icq->ioc);
510 rq->elv.icq = NULL;
511 }
512 }
513
514 ctx->rq_completed[rq_is_sync(rq)]++;
515 if (rq->rq_flags & RQF_MQ_INFLIGHT)
516 atomic_dec(&hctx->nr_active);
517
518 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
519 laptop_io_completion(q->backing_dev_info);
520
521 rq_qos_done(q, rq);
522
523 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
524 if (refcount_dec_and_test(&rq->ref))
525 __blk_mq_free_request(rq);
526}
527EXPORT_SYMBOL_GPL(blk_mq_free_request);
528
529inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
530{
531 u64 now = 0;
532
533 if (blk_mq_need_time_stamp(rq))
534 now = ktime_get_ns();
535
536 if (rq->rq_flags & RQF_STATS) {
537 blk_mq_poll_stats_start(rq->q);
538 blk_stat_add(rq, now);
539 }
540
541 if (rq->internal_tag != -1)
542 blk_mq_sched_completed_request(rq, now);
543
544 blk_account_io_done(rq, now);
545
546 if (rq->end_io) {
547 rq_qos_done(rq->q, rq);
548 rq->end_io(rq, error);
549 } else {
550 blk_mq_free_request(rq);
551 }
552}
553EXPORT_SYMBOL(__blk_mq_end_request);
554
555void blk_mq_end_request(struct request *rq, blk_status_t error)
556{
557 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
558 BUG();
559 __blk_mq_end_request(rq, error);
560}
561EXPORT_SYMBOL(blk_mq_end_request);
562
563static void __blk_mq_complete_request_remote(void *data)
564{
565 struct request *rq = data;
566 struct request_queue *q = rq->q;
567
568 q->mq_ops->complete(rq);
569}
570
571static void __blk_mq_complete_request(struct request *rq)
572{
573 struct blk_mq_ctx *ctx = rq->mq_ctx;
574 struct request_queue *q = rq->q;
575 bool shared = false;
576 int cpu;
577
578 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
579
580
581
582
583
584
585
586
587
588 if (q->nr_hw_queues == 1) {
589 __blk_complete_request(rq);
590 return;
591 }
592
593
594
595
596
597 if ((rq->cmd_flags & REQ_HIPRI) ||
598 !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
599 q->mq_ops->complete(rq);
600 return;
601 }
602
603 cpu = get_cpu();
604 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
605 shared = cpus_share_cache(cpu, ctx->cpu);
606
607 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
608 rq->csd.func = __blk_mq_complete_request_remote;
609 rq->csd.info = rq;
610 rq->csd.flags = 0;
611 smp_call_function_single_async(ctx->cpu, &rq->csd);
612 } else {
613 q->mq_ops->complete(rq);
614 }
615 put_cpu();
616}
617
618static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
619 __releases(hctx->srcu)
620{
621 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
622 rcu_read_unlock();
623 else
624 srcu_read_unlock(hctx->srcu, srcu_idx);
625}
626
627static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
628 __acquires(hctx->srcu)
629{
630 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
631
632 *srcu_idx = 0;
633 rcu_read_lock();
634 } else
635 *srcu_idx = srcu_read_lock(hctx->srcu);
636}
637
638
639
640
641
642
643
644
645
646bool blk_mq_complete_request(struct request *rq)
647{
648 if (unlikely(blk_should_fake_timeout(rq->q)))
649 return false;
650 __blk_mq_complete_request(rq);
651 return true;
652}
653EXPORT_SYMBOL(blk_mq_complete_request);
654
655void blk_mq_complete_request_sync(struct request *rq)
656{
657 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
658 rq->q->mq_ops->complete(rq);
659}
660EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
661
662int blk_mq_request_started(struct request *rq)
663{
664 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
665}
666EXPORT_SYMBOL_GPL(blk_mq_request_started);
667
668void blk_mq_start_request(struct request *rq)
669{
670 struct request_queue *q = rq->q;
671
672 trace_block_rq_issue(q, rq);
673
674 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
675 rq->io_start_time_ns = ktime_get_ns();
676#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
677 rq->throtl_size = blk_rq_sectors(rq);
678#endif
679 rq->rq_flags |= RQF_STATS;
680 rq_qos_issue(q, rq);
681 }
682
683 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
684
685 blk_add_timer(rq);
686 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
687
688 if (q->dma_drain_size && blk_rq_bytes(rq)) {
689
690
691
692
693
694 rq->nr_phys_segments++;
695 }
696}
697EXPORT_SYMBOL(blk_mq_start_request);
698
699static void __blk_mq_requeue_request(struct request *rq)
700{
701 struct request_queue *q = rq->q;
702
703 blk_mq_put_driver_tag(rq);
704
705 trace_block_rq_requeue(q, rq);
706 rq_qos_requeue(q, rq);
707
708 if (blk_mq_request_started(rq)) {
709 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
710 rq->rq_flags &= ~RQF_TIMED_OUT;
711 if (q->dma_drain_size && blk_rq_bytes(rq))
712 rq->nr_phys_segments--;
713 }
714}
715
716void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
717{
718 __blk_mq_requeue_request(rq);
719
720
721 blk_mq_sched_requeue_request(rq);
722
723 BUG_ON(!list_empty(&rq->queuelist));
724 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
725}
726EXPORT_SYMBOL(blk_mq_requeue_request);
727
728static void blk_mq_requeue_work(struct work_struct *work)
729{
730 struct request_queue *q =
731 container_of(work, struct request_queue, requeue_work.work);
732 LIST_HEAD(rq_list);
733 struct request *rq, *next;
734
735 spin_lock_irq(&q->requeue_lock);
736 list_splice_init(&q->requeue_list, &rq_list);
737 spin_unlock_irq(&q->requeue_lock);
738
739 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
740 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
741 continue;
742
743 rq->rq_flags &= ~RQF_SOFTBARRIER;
744 list_del_init(&rq->queuelist);
745
746
747
748
749
750 if (rq->rq_flags & RQF_DONTPREP)
751 blk_mq_request_bypass_insert(rq, false);
752 else
753 blk_mq_sched_insert_request(rq, true, false, false);
754 }
755
756 while (!list_empty(&rq_list)) {
757 rq = list_entry(rq_list.next, struct request, queuelist);
758 list_del_init(&rq->queuelist);
759 blk_mq_sched_insert_request(rq, false, false, false);
760 }
761
762 blk_mq_run_hw_queues(q, false);
763}
764
765void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
766 bool kick_requeue_list)
767{
768 struct request_queue *q = rq->q;
769 unsigned long flags;
770
771
772
773
774
775 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
776
777 spin_lock_irqsave(&q->requeue_lock, flags);
778 if (at_head) {
779 rq->rq_flags |= RQF_SOFTBARRIER;
780 list_add(&rq->queuelist, &q->requeue_list);
781 } else {
782 list_add_tail(&rq->queuelist, &q->requeue_list);
783 }
784 spin_unlock_irqrestore(&q->requeue_lock, flags);
785
786 if (kick_requeue_list)
787 blk_mq_kick_requeue_list(q);
788}
789
790void blk_mq_kick_requeue_list(struct request_queue *q)
791{
792 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
793}
794EXPORT_SYMBOL(blk_mq_kick_requeue_list);
795
796void blk_mq_delay_kick_requeue_list(struct request_queue *q,
797 unsigned long msecs)
798{
799 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
800 msecs_to_jiffies(msecs));
801}
802EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
803
804struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
805{
806 if (tag < tags->nr_tags) {
807 prefetch(tags->rqs[tag]);
808 return tags->rqs[tag];
809 }
810
811 return NULL;
812}
813EXPORT_SYMBOL(blk_mq_tag_to_rq);
814
815static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
816 void *priv, bool reserved)
817{
818
819
820
821
822 if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
823 bool *busy = priv;
824
825 *busy = true;
826 return false;
827 }
828
829 return true;
830}
831
832bool blk_mq_queue_inflight(struct request_queue *q)
833{
834 bool busy = false;
835
836 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
837 return busy;
838}
839EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
840
841static void blk_mq_rq_timed_out(struct request *req, bool reserved)
842{
843 req->rq_flags |= RQF_TIMED_OUT;
844 if (req->q->mq_ops->timeout) {
845 enum blk_eh_timer_return ret;
846
847 ret = req->q->mq_ops->timeout(req, reserved);
848 if (ret == BLK_EH_DONE)
849 return;
850 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
851 }
852
853 blk_add_timer(req);
854}
855
856static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
857{
858 unsigned long deadline;
859
860 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
861 return false;
862 if (rq->rq_flags & RQF_TIMED_OUT)
863 return false;
864
865 deadline = READ_ONCE(rq->deadline);
866 if (time_after_eq(jiffies, deadline))
867 return true;
868
869 if (*next == 0)
870 *next = deadline;
871 else if (time_after(*next, deadline))
872 *next = deadline;
873 return false;
874}
875
876static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
877 struct request *rq, void *priv, bool reserved)
878{
879 unsigned long *next = priv;
880
881
882
883
884
885 if (!blk_mq_req_expired(rq, next))
886 return true;
887
888
889
890
891
892
893
894
895
896
897 if (!refcount_inc_not_zero(&rq->ref))
898 return true;
899
900
901
902
903
904
905
906 if (blk_mq_req_expired(rq, next))
907 blk_mq_rq_timed_out(rq, reserved);
908 if (refcount_dec_and_test(&rq->ref))
909 __blk_mq_free_request(rq);
910
911 return true;
912}
913
914static void blk_mq_timeout_work(struct work_struct *work)
915{
916 struct request_queue *q =
917 container_of(work, struct request_queue, timeout_work);
918 unsigned long next = 0;
919 struct blk_mq_hw_ctx *hctx;
920 int i;
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935 if (!percpu_ref_tryget(&q->q_usage_counter))
936 return;
937
938 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
939
940 if (next != 0) {
941 mod_timer(&q->timeout, next);
942 } else {
943
944
945
946
947
948
949 queue_for_each_hw_ctx(q, hctx, i) {
950
951 if (blk_mq_hw_queue_mapped(hctx))
952 blk_mq_tag_idle(hctx);
953 }
954 }
955 blk_queue_exit(q);
956}
957
958struct flush_busy_ctx_data {
959 struct blk_mq_hw_ctx *hctx;
960 struct list_head *list;
961};
962
963static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
964{
965 struct flush_busy_ctx_data *flush_data = data;
966 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
967 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
968 enum hctx_type type = hctx->type;
969
970 spin_lock(&ctx->lock);
971 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
972 sbitmap_clear_bit(sb, bitnr);
973 spin_unlock(&ctx->lock);
974 return true;
975}
976
977
978
979
980
981void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
982{
983 struct flush_busy_ctx_data data = {
984 .hctx = hctx,
985 .list = list,
986 };
987
988 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
989}
990EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
991
992struct dispatch_rq_data {
993 struct blk_mq_hw_ctx *hctx;
994 struct request *rq;
995};
996
997static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
998 void *data)
999{
1000 struct dispatch_rq_data *dispatch_data = data;
1001 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1002 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1003 enum hctx_type type = hctx->type;
1004
1005 spin_lock(&ctx->lock);
1006 if (!list_empty(&ctx->rq_lists[type])) {
1007 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1008 list_del_init(&dispatch_data->rq->queuelist);
1009 if (list_empty(&ctx->rq_lists[type]))
1010 sbitmap_clear_bit(sb, bitnr);
1011 }
1012 spin_unlock(&ctx->lock);
1013
1014 return !dispatch_data->rq;
1015}
1016
1017struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1018 struct blk_mq_ctx *start)
1019{
1020 unsigned off = start ? start->index_hw[hctx->type] : 0;
1021 struct dispatch_rq_data data = {
1022 .hctx = hctx,
1023 .rq = NULL,
1024 };
1025
1026 __sbitmap_for_each_set(&hctx->ctx_map, off,
1027 dispatch_rq_from_ctx, &data);
1028
1029 return data.rq;
1030}
1031
1032static inline unsigned int queued_to_index(unsigned int queued)
1033{
1034 if (!queued)
1035 return 0;
1036
1037 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1038}
1039
1040bool blk_mq_get_driver_tag(struct request *rq)
1041{
1042 struct blk_mq_alloc_data data = {
1043 .q = rq->q,
1044 .hctx = rq->mq_hctx,
1045 .flags = BLK_MQ_REQ_NOWAIT,
1046 .cmd_flags = rq->cmd_flags,
1047 };
1048 bool shared;
1049
1050 if (rq->tag != -1)
1051 goto done;
1052
1053 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1054 data.flags |= BLK_MQ_REQ_RESERVED;
1055
1056 shared = blk_mq_tag_busy(data.hctx);
1057 rq->tag = blk_mq_get_tag(&data);
1058 if (rq->tag >= 0) {
1059 if (shared) {
1060 rq->rq_flags |= RQF_MQ_INFLIGHT;
1061 atomic_inc(&data.hctx->nr_active);
1062 }
1063 data.hctx->tags->rqs[rq->tag] = rq;
1064 }
1065
1066done:
1067 return rq->tag != -1;
1068}
1069
1070static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1071 int flags, void *key)
1072{
1073 struct blk_mq_hw_ctx *hctx;
1074
1075 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1076
1077 spin_lock(&hctx->dispatch_wait_lock);
1078 if (!list_empty(&wait->entry)) {
1079 struct sbitmap_queue *sbq;
1080
1081 list_del_init(&wait->entry);
1082 sbq = &hctx->tags->bitmap_tags;
1083 atomic_dec(&sbq->ws_active);
1084 }
1085 spin_unlock(&hctx->dispatch_wait_lock);
1086
1087 blk_mq_run_hw_queue(hctx, true);
1088 return 1;
1089}
1090
1091
1092
1093
1094
1095
1096
1097static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1098 struct request *rq)
1099{
1100 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1101 struct wait_queue_head *wq;
1102 wait_queue_entry_t *wait;
1103 bool ret;
1104
1105 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
1106 blk_mq_sched_mark_restart_hctx(hctx);
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 return blk_mq_get_driver_tag(rq);
1117 }
1118
1119 wait = &hctx->dispatch_wait;
1120 if (!list_empty_careful(&wait->entry))
1121 return false;
1122
1123 wq = &bt_wait_ptr(sbq, hctx)->wait;
1124
1125 spin_lock_irq(&wq->lock);
1126 spin_lock(&hctx->dispatch_wait_lock);
1127 if (!list_empty(&wait->entry)) {
1128 spin_unlock(&hctx->dispatch_wait_lock);
1129 spin_unlock_irq(&wq->lock);
1130 return false;
1131 }
1132
1133 atomic_inc(&sbq->ws_active);
1134 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1135 __add_wait_queue(wq, wait);
1136
1137
1138
1139
1140
1141
1142 ret = blk_mq_get_driver_tag(rq);
1143 if (!ret) {
1144 spin_unlock(&hctx->dispatch_wait_lock);
1145 spin_unlock_irq(&wq->lock);
1146 return false;
1147 }
1148
1149
1150
1151
1152
1153 list_del_init(&wait->entry);
1154 atomic_dec(&sbq->ws_active);
1155 spin_unlock(&hctx->dispatch_wait_lock);
1156 spin_unlock_irq(&wq->lock);
1157
1158 return true;
1159}
1160
1161#define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
1162#define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
1163
1164
1165
1166
1167
1168
1169
1170static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1171{
1172 unsigned int ewma;
1173
1174 if (hctx->queue->elevator)
1175 return;
1176
1177 ewma = hctx->dispatch_busy;
1178
1179 if (!ewma && !busy)
1180 return;
1181
1182 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1183 if (busy)
1184 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1185 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1186
1187 hctx->dispatch_busy = ewma;
1188}
1189
1190#define BLK_MQ_RESOURCE_DELAY 3
1191
1192
1193
1194
1195bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1196 bool got_budget)
1197{
1198 struct blk_mq_hw_ctx *hctx;
1199 struct request *rq, *nxt;
1200 bool no_tag = false;
1201 int errors, queued;
1202 blk_status_t ret = BLK_STS_OK;
1203
1204 if (list_empty(list))
1205 return false;
1206
1207 WARN_ON(!list_is_singular(list) && got_budget);
1208
1209
1210
1211
1212 errors = queued = 0;
1213 do {
1214 struct blk_mq_queue_data bd;
1215
1216 rq = list_first_entry(list, struct request, queuelist);
1217
1218 hctx = rq->mq_hctx;
1219 if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
1220 break;
1221
1222 if (!blk_mq_get_driver_tag(rq)) {
1223
1224
1225
1226
1227
1228
1229
1230 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1231 blk_mq_put_dispatch_budget(hctx);
1232
1233
1234
1235
1236 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1237 no_tag = true;
1238 break;
1239 }
1240 }
1241
1242 list_del_init(&rq->queuelist);
1243
1244 bd.rq = rq;
1245
1246
1247
1248
1249
1250 if (list_empty(list))
1251 bd.last = true;
1252 else {
1253 nxt = list_first_entry(list, struct request, queuelist);
1254 bd.last = !blk_mq_get_driver_tag(nxt);
1255 }
1256
1257 ret = q->mq_ops->queue_rq(hctx, &bd);
1258 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1259
1260
1261
1262
1263
1264 if (!list_empty(list)) {
1265 nxt = list_first_entry(list, struct request, queuelist);
1266 blk_mq_put_driver_tag(nxt);
1267 }
1268 list_add(&rq->queuelist, list);
1269 __blk_mq_requeue_request(rq);
1270 break;
1271 }
1272
1273 if (unlikely(ret != BLK_STS_OK)) {
1274 errors++;
1275 blk_mq_end_request(rq, BLK_STS_IOERR);
1276 continue;
1277 }
1278
1279 queued++;
1280 } while (!list_empty(list));
1281
1282 hctx->dispatched[queued_to_index(queued)]++;
1283
1284
1285
1286
1287
1288 if (!list_empty(list)) {
1289 bool needs_restart;
1290
1291
1292
1293
1294
1295
1296 if (q->mq_ops->commit_rqs)
1297 q->mq_ops->commit_rqs(hctx);
1298
1299 spin_lock(&hctx->lock);
1300 list_splice_init(list, &hctx->dispatch);
1301 spin_unlock(&hctx->lock);
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327 needs_restart = blk_mq_sched_needs_restart(hctx);
1328 if (!needs_restart ||
1329 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1330 blk_mq_run_hw_queue(hctx, true);
1331 else if (needs_restart && (ret == BLK_STS_RESOURCE))
1332 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1333
1334 blk_mq_update_dispatch_busy(hctx, true);
1335 return false;
1336 } else
1337 blk_mq_update_dispatch_busy(hctx, false);
1338
1339
1340
1341
1342
1343 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1344 return false;
1345
1346 return (queued + errors) != 0;
1347}
1348
1349static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1350{
1351 int srcu_idx;
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1371 cpu_online(hctx->next_cpu)) {
1372 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1373 raw_smp_processor_id(),
1374 cpumask_empty(hctx->cpumask) ? "inactive": "active");
1375 dump_stack();
1376 }
1377
1378
1379
1380
1381
1382 WARN_ON_ONCE(in_interrupt());
1383
1384 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1385
1386 hctx_lock(hctx, &srcu_idx);
1387 blk_mq_sched_dispatch_requests(hctx);
1388 hctx_unlock(hctx, srcu_idx);
1389}
1390
1391static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1392{
1393 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1394
1395 if (cpu >= nr_cpu_ids)
1396 cpu = cpumask_first(hctx->cpumask);
1397 return cpu;
1398}
1399
1400
1401
1402
1403
1404
1405
1406static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1407{
1408 bool tried = false;
1409 int next_cpu = hctx->next_cpu;
1410
1411 if (hctx->queue->nr_hw_queues == 1)
1412 return WORK_CPU_UNBOUND;
1413
1414 if (--hctx->next_cpu_batch <= 0) {
1415select_cpu:
1416 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1417 cpu_online_mask);
1418 if (next_cpu >= nr_cpu_ids)
1419 next_cpu = blk_mq_first_mapped_cpu(hctx);
1420 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1421 }
1422
1423
1424
1425
1426
1427 if (!cpu_online(next_cpu)) {
1428 if (!tried) {
1429 tried = true;
1430 goto select_cpu;
1431 }
1432
1433
1434
1435
1436
1437 hctx->next_cpu = next_cpu;
1438 hctx->next_cpu_batch = 1;
1439 return WORK_CPU_UNBOUND;
1440 }
1441
1442 hctx->next_cpu = next_cpu;
1443 return next_cpu;
1444}
1445
1446static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1447 unsigned long msecs)
1448{
1449 if (unlikely(blk_mq_hctx_stopped(hctx)))
1450 return;
1451
1452 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1453 int cpu = get_cpu();
1454 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1455 __blk_mq_run_hw_queue(hctx);
1456 put_cpu();
1457 return;
1458 }
1459
1460 put_cpu();
1461 }
1462
1463 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1464 msecs_to_jiffies(msecs));
1465}
1466
1467void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1468{
1469 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1470}
1471EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1472
1473bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1474{
1475 int srcu_idx;
1476 bool need_run;
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 hctx_lock(hctx, &srcu_idx);
1487 need_run = !blk_queue_quiesced(hctx->queue) &&
1488 blk_mq_hctx_has_pending(hctx);
1489 hctx_unlock(hctx, srcu_idx);
1490
1491 if (need_run) {
1492 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1493 return true;
1494 }
1495
1496 return false;
1497}
1498EXPORT_SYMBOL(blk_mq_run_hw_queue);
1499
1500void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1501{
1502 struct blk_mq_hw_ctx *hctx;
1503 int i;
1504
1505 queue_for_each_hw_ctx(q, hctx, i) {
1506 if (blk_mq_hctx_stopped(hctx))
1507 continue;
1508
1509 blk_mq_run_hw_queue(hctx, async);
1510 }
1511}
1512EXPORT_SYMBOL(blk_mq_run_hw_queues);
1513
1514
1515
1516
1517
1518
1519
1520
1521bool blk_mq_queue_stopped(struct request_queue *q)
1522{
1523 struct blk_mq_hw_ctx *hctx;
1524 int i;
1525
1526 queue_for_each_hw_ctx(q, hctx, i)
1527 if (blk_mq_hctx_stopped(hctx))
1528 return true;
1529
1530 return false;
1531}
1532EXPORT_SYMBOL(blk_mq_queue_stopped);
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1544{
1545 cancel_delayed_work(&hctx->run_work);
1546
1547 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1548}
1549EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560void blk_mq_stop_hw_queues(struct request_queue *q)
1561{
1562 struct blk_mq_hw_ctx *hctx;
1563 int i;
1564
1565 queue_for_each_hw_ctx(q, hctx, i)
1566 blk_mq_stop_hw_queue(hctx);
1567}
1568EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1569
1570void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1571{
1572 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1573
1574 blk_mq_run_hw_queue(hctx, false);
1575}
1576EXPORT_SYMBOL(blk_mq_start_hw_queue);
1577
1578void blk_mq_start_hw_queues(struct request_queue *q)
1579{
1580 struct blk_mq_hw_ctx *hctx;
1581 int i;
1582
1583 queue_for_each_hw_ctx(q, hctx, i)
1584 blk_mq_start_hw_queue(hctx);
1585}
1586EXPORT_SYMBOL(blk_mq_start_hw_queues);
1587
1588void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1589{
1590 if (!blk_mq_hctx_stopped(hctx))
1591 return;
1592
1593 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1594 blk_mq_run_hw_queue(hctx, async);
1595}
1596EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1597
1598void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1599{
1600 struct blk_mq_hw_ctx *hctx;
1601 int i;
1602
1603 queue_for_each_hw_ctx(q, hctx, i)
1604 blk_mq_start_stopped_hw_queue(hctx, async);
1605}
1606EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1607
1608static void blk_mq_run_work_fn(struct work_struct *work)
1609{
1610 struct blk_mq_hw_ctx *hctx;
1611
1612 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1613
1614
1615
1616
1617 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
1618 return;
1619
1620 __blk_mq_run_hw_queue(hctx);
1621}
1622
1623static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1624 struct request *rq,
1625 bool at_head)
1626{
1627 struct blk_mq_ctx *ctx = rq->mq_ctx;
1628 enum hctx_type type = hctx->type;
1629
1630 lockdep_assert_held(&ctx->lock);
1631
1632 trace_block_rq_insert(hctx->queue, rq);
1633
1634 if (at_head)
1635 list_add(&rq->queuelist, &ctx->rq_lists[type]);
1636 else
1637 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
1638}
1639
1640void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1641 bool at_head)
1642{
1643 struct blk_mq_ctx *ctx = rq->mq_ctx;
1644
1645 lockdep_assert_held(&ctx->lock);
1646
1647 __blk_mq_insert_req_list(hctx, rq, at_head);
1648 blk_mq_hctx_mark_pending(hctx, ctx);
1649}
1650
1651
1652
1653
1654
1655void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1656{
1657 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1658
1659 spin_lock(&hctx->lock);
1660 list_add_tail(&rq->queuelist, &hctx->dispatch);
1661 spin_unlock(&hctx->lock);
1662
1663 if (run_queue)
1664 blk_mq_run_hw_queue(hctx, false);
1665}
1666
1667void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1668 struct list_head *list)
1669
1670{
1671 struct request *rq;
1672 enum hctx_type type = hctx->type;
1673
1674
1675
1676
1677
1678 list_for_each_entry(rq, list, queuelist) {
1679 BUG_ON(rq->mq_ctx != ctx);
1680 trace_block_rq_insert(hctx->queue, rq);
1681 }
1682
1683 spin_lock(&ctx->lock);
1684 list_splice_tail_init(list, &ctx->rq_lists[type]);
1685 blk_mq_hctx_mark_pending(hctx, ctx);
1686 spin_unlock(&ctx->lock);
1687}
1688
1689static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
1690{
1691 struct request *rqa = container_of(a, struct request, queuelist);
1692 struct request *rqb = container_of(b, struct request, queuelist);
1693
1694 if (rqa->mq_ctx < rqb->mq_ctx)
1695 return -1;
1696 else if (rqa->mq_ctx > rqb->mq_ctx)
1697 return 1;
1698 else if (rqa->mq_hctx < rqb->mq_hctx)
1699 return -1;
1700 else if (rqa->mq_hctx > rqb->mq_hctx)
1701 return 1;
1702
1703 return blk_rq_pos(rqa) > blk_rq_pos(rqb);
1704}
1705
1706void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1707{
1708 struct blk_mq_hw_ctx *this_hctx;
1709 struct blk_mq_ctx *this_ctx;
1710 struct request_queue *this_q;
1711 struct request *rq;
1712 LIST_HEAD(list);
1713 LIST_HEAD(rq_list);
1714 unsigned int depth;
1715
1716 list_splice_init(&plug->mq_list, &list);
1717
1718 if (plug->rq_count > 2 && plug->multiple_queues)
1719 list_sort(NULL, &list, plug_rq_cmp);
1720
1721 plug->rq_count = 0;
1722
1723 this_q = NULL;
1724 this_hctx = NULL;
1725 this_ctx = NULL;
1726 depth = 0;
1727
1728 while (!list_empty(&list)) {
1729 rq = list_entry_rq(list.next);
1730 list_del_init(&rq->queuelist);
1731 BUG_ON(!rq->q);
1732 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
1733 if (this_hctx) {
1734 trace_block_unplug(this_q, depth, !from_schedule);
1735 blk_mq_sched_insert_requests(this_hctx, this_ctx,
1736 &rq_list,
1737 from_schedule);
1738 }
1739
1740 this_q = rq->q;
1741 this_ctx = rq->mq_ctx;
1742 this_hctx = rq->mq_hctx;
1743 depth = 0;
1744 }
1745
1746 depth++;
1747 list_add_tail(&rq->queuelist, &rq_list);
1748 }
1749
1750
1751
1752
1753
1754 if (this_hctx) {
1755 trace_block_unplug(this_q, depth, !from_schedule);
1756 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
1757 from_schedule);
1758 }
1759}
1760
1761static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
1762 unsigned int nr_segs)
1763{
1764 if (bio->bi_opf & REQ_RAHEAD)
1765 rq->cmd_flags |= REQ_FAILFAST_MASK;
1766
1767 rq->__sector = bio->bi_iter.bi_sector;
1768 rq->write_hint = bio->bi_write_hint;
1769 blk_rq_bio_prep(rq, bio, nr_segs);
1770
1771 blk_account_io_start(rq, true);
1772}
1773
1774static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1775 struct request *rq,
1776 blk_qc_t *cookie, bool last)
1777{
1778 struct request_queue *q = rq->q;
1779 struct blk_mq_queue_data bd = {
1780 .rq = rq,
1781 .last = last,
1782 };
1783 blk_qc_t new_cookie;
1784 blk_status_t ret;
1785
1786 new_cookie = request_to_qc_t(hctx, rq);
1787
1788
1789
1790
1791
1792
1793 ret = q->mq_ops->queue_rq(hctx, &bd);
1794 switch (ret) {
1795 case BLK_STS_OK:
1796 blk_mq_update_dispatch_busy(hctx, false);
1797 *cookie = new_cookie;
1798 break;
1799 case BLK_STS_RESOURCE:
1800 case BLK_STS_DEV_RESOURCE:
1801 blk_mq_update_dispatch_busy(hctx, true);
1802 __blk_mq_requeue_request(rq);
1803 break;
1804 default:
1805 blk_mq_update_dispatch_busy(hctx, false);
1806 *cookie = BLK_QC_T_NONE;
1807 break;
1808 }
1809
1810 return ret;
1811}
1812
1813static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1814 struct request *rq,
1815 blk_qc_t *cookie,
1816 bool bypass_insert, bool last)
1817{
1818 struct request_queue *q = rq->q;
1819 bool run_queue = true;
1820
1821
1822
1823
1824
1825
1826
1827
1828 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1829 run_queue = false;
1830 bypass_insert = false;
1831 goto insert;
1832 }
1833
1834 if (q->elevator && !bypass_insert)
1835 goto insert;
1836
1837 if (!blk_mq_get_dispatch_budget(hctx))
1838 goto insert;
1839
1840 if (!blk_mq_get_driver_tag(rq)) {
1841 blk_mq_put_dispatch_budget(hctx);
1842 goto insert;
1843 }
1844
1845 return __blk_mq_issue_directly(hctx, rq, cookie, last);
1846insert:
1847 if (bypass_insert)
1848 return BLK_STS_RESOURCE;
1849
1850 blk_mq_request_bypass_insert(rq, run_queue);
1851 return BLK_STS_OK;
1852}
1853
1854static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1855 struct request *rq, blk_qc_t *cookie)
1856{
1857 blk_status_t ret;
1858 int srcu_idx;
1859
1860 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1861
1862 hctx_lock(hctx, &srcu_idx);
1863
1864 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
1865 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1866 blk_mq_request_bypass_insert(rq, true);
1867 else if (ret != BLK_STS_OK)
1868 blk_mq_end_request(rq, ret);
1869
1870 hctx_unlock(hctx, srcu_idx);
1871}
1872
1873blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
1874{
1875 blk_status_t ret;
1876 int srcu_idx;
1877 blk_qc_t unused_cookie;
1878 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1879
1880 hctx_lock(hctx, &srcu_idx);
1881 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
1882 hctx_unlock(hctx, srcu_idx);
1883
1884 return ret;
1885}
1886
1887void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1888 struct list_head *list)
1889{
1890 while (!list_empty(list)) {
1891 blk_status_t ret;
1892 struct request *rq = list_first_entry(list, struct request,
1893 queuelist);
1894
1895 list_del_init(&rq->queuelist);
1896 ret = blk_mq_request_issue_directly(rq, list_empty(list));
1897 if (ret != BLK_STS_OK) {
1898 if (ret == BLK_STS_RESOURCE ||
1899 ret == BLK_STS_DEV_RESOURCE) {
1900 blk_mq_request_bypass_insert(rq,
1901 list_empty(list));
1902 break;
1903 }
1904 blk_mq_end_request(rq, ret);
1905 }
1906 }
1907
1908
1909
1910
1911
1912
1913 if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
1914 hctx->queue->mq_ops->commit_rqs(hctx);
1915}
1916
1917static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1918{
1919 list_add_tail(&rq->queuelist, &plug->mq_list);
1920 plug->rq_count++;
1921 if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
1922 struct request *tmp;
1923
1924 tmp = list_first_entry(&plug->mq_list, struct request,
1925 queuelist);
1926 if (tmp->q != rq->q)
1927 plug->multiple_queues = true;
1928 }
1929}
1930
1931static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1932{
1933 const int is_sync = op_is_sync(bio->bi_opf);
1934 const int is_flush_fua = op_is_flush(bio->bi_opf);
1935 struct blk_mq_alloc_data data = { .flags = 0};
1936 struct request *rq;
1937 struct blk_plug *plug;
1938 struct request *same_queue_rq = NULL;
1939 unsigned int nr_segs;
1940 blk_qc_t cookie;
1941
1942 blk_queue_bounce(q, &bio);
1943 __blk_queue_split(q, &bio, &nr_segs);
1944
1945 if (!bio_integrity_prep(bio))
1946 return BLK_QC_T_NONE;
1947
1948 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1949 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
1950 return BLK_QC_T_NONE;
1951
1952 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
1953 return BLK_QC_T_NONE;
1954
1955 rq_qos_throttle(q, bio);
1956
1957 data.cmd_flags = bio->bi_opf;
1958 rq = blk_mq_get_request(q, bio, &data);
1959 if (unlikely(!rq)) {
1960 rq_qos_cleanup(q, bio);
1961 if (bio->bi_opf & REQ_NOWAIT)
1962 bio_wouldblock_error(bio);
1963 return BLK_QC_T_NONE;
1964 }
1965
1966 trace_block_getrq(q, bio, bio->bi_opf);
1967
1968 rq_qos_track(q, rq, bio);
1969
1970 cookie = request_to_qc_t(data.hctx, rq);
1971
1972 blk_mq_bio_to_request(rq, bio, nr_segs);
1973
1974 plug = blk_mq_plug(q, bio);
1975 if (unlikely(is_flush_fua)) {
1976
1977 blk_insert_flush(rq);
1978 blk_mq_run_hw_queue(data.hctx, true);
1979 } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
1980
1981
1982
1983
1984 unsigned int request_count = plug->rq_count;
1985 struct request *last = NULL;
1986
1987 if (!request_count)
1988 trace_block_plug(q);
1989 else
1990 last = list_entry_rq(plug->mq_list.prev);
1991
1992 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1993 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1994 blk_flush_plug_list(plug, false);
1995 trace_block_plug(q);
1996 }
1997
1998 blk_add_rq_to_plug(plug, rq);
1999 } else if (plug && !blk_queue_nomerges(q)) {
2000
2001
2002
2003
2004
2005
2006
2007 if (list_empty(&plug->mq_list))
2008 same_queue_rq = NULL;
2009 if (same_queue_rq) {
2010 list_del_init(&same_queue_rq->queuelist);
2011 plug->rq_count--;
2012 }
2013 blk_add_rq_to_plug(plug, rq);
2014 trace_block_plug(q);
2015
2016 if (same_queue_rq) {
2017 data.hctx = same_queue_rq->mq_hctx;
2018 trace_block_unplug(q, 1, true);
2019 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2020 &cookie);
2021 }
2022 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
2023 !data.hctx->dispatch_busy)) {
2024 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2025 } else {
2026 blk_mq_sched_insert_request(rq, false, true, true);
2027 }
2028
2029 return cookie;
2030}
2031
2032void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2033 unsigned int hctx_idx)
2034{
2035 struct page *page;
2036
2037 if (tags->rqs && set->ops->exit_request) {
2038 int i;
2039
2040 for (i = 0; i < tags->nr_tags; i++) {
2041 struct request *rq = tags->static_rqs[i];
2042
2043 if (!rq)
2044 continue;
2045 set->ops->exit_request(set, rq, hctx_idx);
2046 tags->static_rqs[i] = NULL;
2047 }
2048 }
2049
2050 while (!list_empty(&tags->page_list)) {
2051 page = list_first_entry(&tags->page_list, struct page, lru);
2052 list_del_init(&page->lru);
2053
2054
2055
2056
2057 kmemleak_free(page_address(page));
2058 __free_pages(page, page->private);
2059 }
2060}
2061
2062void blk_mq_free_rq_map(struct blk_mq_tags *tags)
2063{
2064 kfree(tags->rqs);
2065 tags->rqs = NULL;
2066 kfree(tags->static_rqs);
2067 tags->static_rqs = NULL;
2068
2069 blk_mq_free_tags(tags);
2070}
2071
2072struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2073 unsigned int hctx_idx,
2074 unsigned int nr_tags,
2075 unsigned int reserved_tags)
2076{
2077 struct blk_mq_tags *tags;
2078 int node;
2079
2080 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2081 if (node == NUMA_NO_NODE)
2082 node = set->numa_node;
2083
2084 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
2085 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
2086 if (!tags)
2087 return NULL;
2088
2089 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2090 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2091 node);
2092 if (!tags->rqs) {
2093 blk_mq_free_tags(tags);
2094 return NULL;
2095 }
2096
2097 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2098 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2099 node);
2100 if (!tags->static_rqs) {
2101 kfree(tags->rqs);
2102 blk_mq_free_tags(tags);
2103 return NULL;
2104 }
2105
2106 return tags;
2107}
2108
2109static size_t order_to_size(unsigned int order)
2110{
2111 return (size_t)PAGE_SIZE << order;
2112}
2113
2114static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2115 unsigned int hctx_idx, int node)
2116{
2117 int ret;
2118
2119 if (set->ops->init_request) {
2120 ret = set->ops->init_request(set, rq, hctx_idx, node);
2121 if (ret)
2122 return ret;
2123 }
2124
2125 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
2126 return 0;
2127}
2128
2129int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2130 unsigned int hctx_idx, unsigned int depth)
2131{
2132 unsigned int i, j, entries_per_page, max_order = 4;
2133 size_t rq_size, left;
2134 int node;
2135
2136 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2137 if (node == NUMA_NO_NODE)
2138 node = set->numa_node;
2139
2140 INIT_LIST_HEAD(&tags->page_list);
2141
2142
2143
2144
2145
2146 rq_size = round_up(sizeof(struct request) + set->cmd_size,
2147 cache_line_size());
2148 left = rq_size * depth;
2149
2150 for (i = 0; i < depth; ) {
2151 int this_order = max_order;
2152 struct page *page;
2153 int to_do;
2154 void *p;
2155
2156 while (this_order && left < order_to_size(this_order - 1))
2157 this_order--;
2158
2159 do {
2160 page = alloc_pages_node(node,
2161 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2162 this_order);
2163 if (page)
2164 break;
2165 if (!this_order--)
2166 break;
2167 if (order_to_size(this_order) < rq_size)
2168 break;
2169 } while (1);
2170
2171 if (!page)
2172 goto fail;
2173
2174 page->private = this_order;
2175 list_add_tail(&page->lru, &tags->page_list);
2176
2177 p = page_address(page);
2178
2179
2180
2181
2182 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2183 entries_per_page = order_to_size(this_order) / rq_size;
2184 to_do = min(entries_per_page, depth - i);
2185 left -= to_do * rq_size;
2186 for (j = 0; j < to_do; j++) {
2187 struct request *rq = p;
2188
2189 tags->static_rqs[i] = rq;
2190 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2191 tags->static_rqs[i] = NULL;
2192 goto fail;
2193 }
2194
2195 p += rq_size;
2196 i++;
2197 }
2198 }
2199 return 0;
2200
2201fail:
2202 blk_mq_free_rqs(set, tags, hctx_idx);
2203 return -ENOMEM;
2204}
2205
2206
2207
2208
2209
2210
2211static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2212{
2213 struct blk_mq_hw_ctx *hctx;
2214 struct blk_mq_ctx *ctx;
2215 LIST_HEAD(tmp);
2216 enum hctx_type type;
2217
2218 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2219 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2220 type = hctx->type;
2221
2222 spin_lock(&ctx->lock);
2223 if (!list_empty(&ctx->rq_lists[type])) {
2224 list_splice_init(&ctx->rq_lists[type], &tmp);
2225 blk_mq_hctx_clear_pending(hctx, ctx);
2226 }
2227 spin_unlock(&ctx->lock);
2228
2229 if (list_empty(&tmp))
2230 return 0;
2231
2232 spin_lock(&hctx->lock);
2233 list_splice_tail_init(&tmp, &hctx->dispatch);
2234 spin_unlock(&hctx->lock);
2235
2236 blk_mq_run_hw_queue(hctx, true);
2237 return 0;
2238}
2239
2240static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2241{
2242 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2243 &hctx->cpuhp_dead);
2244}
2245
2246
2247static void blk_mq_exit_hctx(struct request_queue *q,
2248 struct blk_mq_tag_set *set,
2249 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2250{
2251 if (blk_mq_hw_queue_mapped(hctx))
2252 blk_mq_tag_idle(hctx);
2253
2254 if (set->ops->exit_request)
2255 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2256
2257 if (set->ops->exit_hctx)
2258 set->ops->exit_hctx(hctx, hctx_idx);
2259
2260 blk_mq_remove_cpuhp(hctx);
2261
2262 spin_lock(&q->unused_hctx_lock);
2263 list_add(&hctx->hctx_list, &q->unused_hctx_list);
2264 spin_unlock(&q->unused_hctx_lock);
2265}
2266
2267static void blk_mq_exit_hw_queues(struct request_queue *q,
2268 struct blk_mq_tag_set *set, int nr_queue)
2269{
2270 struct blk_mq_hw_ctx *hctx;
2271 unsigned int i;
2272
2273 queue_for_each_hw_ctx(q, hctx, i) {
2274 if (i == nr_queue)
2275 break;
2276 blk_mq_debugfs_unregister_hctx(hctx);
2277 blk_mq_exit_hctx(q, set, hctx, i);
2278 }
2279}
2280
2281static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2282{
2283 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2284
2285 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2286 __alignof__(struct blk_mq_hw_ctx)) !=
2287 sizeof(struct blk_mq_hw_ctx));
2288
2289 if (tag_set->flags & BLK_MQ_F_BLOCKING)
2290 hw_ctx_size += sizeof(struct srcu_struct);
2291
2292 return hw_ctx_size;
2293}
2294
2295static int blk_mq_init_hctx(struct request_queue *q,
2296 struct blk_mq_tag_set *set,
2297 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2298{
2299 hctx->queue_num = hctx_idx;
2300
2301 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2302
2303 hctx->tags = set->tags[hctx_idx];
2304
2305 if (set->ops->init_hctx &&
2306 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2307 goto unregister_cpu_notifier;
2308
2309 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
2310 hctx->numa_node))
2311 goto exit_hctx;
2312 return 0;
2313
2314 exit_hctx:
2315 if (set->ops->exit_hctx)
2316 set->ops->exit_hctx(hctx, hctx_idx);
2317 unregister_cpu_notifier:
2318 blk_mq_remove_cpuhp(hctx);
2319 return -1;
2320}
2321
2322static struct blk_mq_hw_ctx *
2323blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
2324 int node)
2325{
2326 struct blk_mq_hw_ctx *hctx;
2327 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
2328
2329 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
2330 if (!hctx)
2331 goto fail_alloc_hctx;
2332
2333 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
2334 goto free_hctx;
2335
2336 atomic_set(&hctx->nr_active, 0);
2337 if (node == NUMA_NO_NODE)
2338 node = set->numa_node;
2339 hctx->numa_node = node;
2340
2341 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2342 spin_lock_init(&hctx->lock);
2343 INIT_LIST_HEAD(&hctx->dispatch);
2344 hctx->queue = q;
2345 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2346
2347 INIT_LIST_HEAD(&hctx->hctx_list);
2348
2349
2350
2351
2352
2353 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2354 gfp, node);
2355 if (!hctx->ctxs)
2356 goto free_cpumask;
2357
2358 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2359 gfp, node))
2360 goto free_ctxs;
2361 hctx->nr_ctx = 0;
2362
2363 spin_lock_init(&hctx->dispatch_wait_lock);
2364 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2365 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2366
2367 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
2368 gfp);
2369 if (!hctx->fq)
2370 goto free_bitmap;
2371
2372 if (hctx->flags & BLK_MQ_F_BLOCKING)
2373 init_srcu_struct(hctx->srcu);
2374 blk_mq_hctx_kobj_init(hctx);
2375
2376 return hctx;
2377
2378 free_bitmap:
2379 sbitmap_free(&hctx->ctx_map);
2380 free_ctxs:
2381 kfree(hctx->ctxs);
2382 free_cpumask:
2383 free_cpumask_var(hctx->cpumask);
2384 free_hctx:
2385 kfree(hctx);
2386 fail_alloc_hctx:
2387 return NULL;
2388}
2389
2390static void blk_mq_init_cpu_queues(struct request_queue *q,
2391 unsigned int nr_hw_queues)
2392{
2393 struct blk_mq_tag_set *set = q->tag_set;
2394 unsigned int i, j;
2395
2396 for_each_possible_cpu(i) {
2397 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2398 struct blk_mq_hw_ctx *hctx;
2399 int k;
2400
2401 __ctx->cpu = i;
2402 spin_lock_init(&__ctx->lock);
2403 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
2404 INIT_LIST_HEAD(&__ctx->rq_lists[k]);
2405
2406 __ctx->queue = q;
2407
2408
2409
2410
2411
2412 for (j = 0; j < set->nr_maps; j++) {
2413 hctx = blk_mq_map_queue_type(q, j, i);
2414 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2415 hctx->numa_node = local_memory_node(cpu_to_node(i));
2416 }
2417 }
2418}
2419
2420static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2421{
2422 int ret = 0;
2423
2424 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2425 set->queue_depth, set->reserved_tags);
2426 if (!set->tags[hctx_idx])
2427 return false;
2428
2429 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2430 set->queue_depth);
2431 if (!ret)
2432 return true;
2433
2434 blk_mq_free_rq_map(set->tags[hctx_idx]);
2435 set->tags[hctx_idx] = NULL;
2436 return false;
2437}
2438
2439static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2440 unsigned int hctx_idx)
2441{
2442 if (set->tags && set->tags[hctx_idx]) {
2443 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2444 blk_mq_free_rq_map(set->tags[hctx_idx]);
2445 set->tags[hctx_idx] = NULL;
2446 }
2447}
2448
2449static void blk_mq_map_swqueue(struct request_queue *q)
2450{
2451 unsigned int i, j, hctx_idx;
2452 struct blk_mq_hw_ctx *hctx;
2453 struct blk_mq_ctx *ctx;
2454 struct blk_mq_tag_set *set = q->tag_set;
2455
2456
2457
2458
2459 mutex_lock(&q->sysfs_lock);
2460
2461 queue_for_each_hw_ctx(q, hctx, i) {
2462 cpumask_clear(hctx->cpumask);
2463 hctx->nr_ctx = 0;
2464 hctx->dispatch_from = NULL;
2465 }
2466
2467
2468
2469
2470
2471
2472 for_each_possible_cpu(i) {
2473 hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
2474
2475 if (!set->tags[hctx_idx] &&
2476 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2477
2478
2479
2480
2481
2482
2483 set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
2484 }
2485
2486 ctx = per_cpu_ptr(q->queue_ctx, i);
2487 for (j = 0; j < set->nr_maps; j++) {
2488 if (!set->map[j].nr_queues) {
2489 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2490 HCTX_TYPE_DEFAULT, i);
2491 continue;
2492 }
2493
2494 hctx = blk_mq_map_queue_type(q, j, i);
2495 ctx->hctxs[j] = hctx;
2496
2497
2498
2499
2500
2501 if (cpumask_test_cpu(i, hctx->cpumask))
2502 continue;
2503
2504 cpumask_set_cpu(i, hctx->cpumask);
2505 hctx->type = j;
2506 ctx->index_hw[hctx->type] = hctx->nr_ctx;
2507 hctx->ctxs[hctx->nr_ctx++] = ctx;
2508
2509
2510
2511
2512
2513 BUG_ON(!hctx->nr_ctx);
2514 }
2515
2516 for (; j < HCTX_MAX_TYPES; j++)
2517 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2518 HCTX_TYPE_DEFAULT, i);
2519 }
2520
2521 mutex_unlock(&q->sysfs_lock);
2522
2523 queue_for_each_hw_ctx(q, hctx, i) {
2524
2525
2526
2527
2528 if (!hctx->nr_ctx) {
2529
2530
2531
2532
2533 if (i && set->tags[i])
2534 blk_mq_free_map_and_requests(set, i);
2535
2536 hctx->tags = NULL;
2537 continue;
2538 }
2539
2540 hctx->tags = set->tags[i];
2541 WARN_ON(!hctx->tags);
2542
2543
2544
2545
2546
2547
2548 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2549
2550
2551
2552
2553 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
2554 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2555 }
2556}
2557
2558
2559
2560
2561
2562static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2563{
2564 struct blk_mq_hw_ctx *hctx;
2565 int i;
2566
2567 queue_for_each_hw_ctx(q, hctx, i) {
2568 if (shared)
2569 hctx->flags |= BLK_MQ_F_TAG_SHARED;
2570 else
2571 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2572 }
2573}
2574
2575static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2576 bool shared)
2577{
2578 struct request_queue *q;
2579
2580 lockdep_assert_held(&set->tag_list_lock);
2581
2582 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2583 blk_mq_freeze_queue(q);
2584 queue_set_hctx_shared(q, shared);
2585 blk_mq_unfreeze_queue(q);
2586 }
2587}
2588
2589static void blk_mq_del_queue_tag_set(struct request_queue *q)
2590{
2591 struct blk_mq_tag_set *set = q->tag_set;
2592
2593 mutex_lock(&set->tag_list_lock);
2594 list_del_rcu(&q->tag_set_list);
2595 if (list_is_singular(&set->tag_list)) {
2596
2597 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2598
2599 blk_mq_update_tag_set_depth(set, false);
2600 }
2601 mutex_unlock(&set->tag_list_lock);
2602 INIT_LIST_HEAD(&q->tag_set_list);
2603}
2604
2605static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2606 struct request_queue *q)
2607{
2608 mutex_lock(&set->tag_list_lock);
2609
2610
2611
2612
2613 if (!list_empty(&set->tag_list) &&
2614 !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2615 set->flags |= BLK_MQ_F_TAG_SHARED;
2616
2617 blk_mq_update_tag_set_depth(set, true);
2618 }
2619 if (set->flags & BLK_MQ_F_TAG_SHARED)
2620 queue_set_hctx_shared(q, true);
2621 list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2622
2623 mutex_unlock(&set->tag_list_lock);
2624}
2625
2626
2627static int blk_mq_alloc_ctxs(struct request_queue *q)
2628{
2629 struct blk_mq_ctxs *ctxs;
2630 int cpu;
2631
2632 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
2633 if (!ctxs)
2634 return -ENOMEM;
2635
2636 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2637 if (!ctxs->queue_ctx)
2638 goto fail;
2639
2640 for_each_possible_cpu(cpu) {
2641 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
2642 ctx->ctxs = ctxs;
2643 }
2644
2645 q->mq_kobj = &ctxs->kobj;
2646 q->queue_ctx = ctxs->queue_ctx;
2647
2648 return 0;
2649 fail:
2650 kfree(ctxs);
2651 return -ENOMEM;
2652}
2653
2654
2655
2656
2657
2658
2659
2660void blk_mq_release(struct request_queue *q)
2661{
2662 struct blk_mq_hw_ctx *hctx, *next;
2663 int i;
2664
2665 queue_for_each_hw_ctx(q, hctx, i)
2666 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
2667
2668
2669 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
2670 list_del_init(&hctx->hctx_list);
2671 kobject_put(&hctx->kobj);
2672 }
2673
2674 kfree(q->queue_hw_ctx);
2675
2676
2677
2678
2679
2680 blk_mq_sysfs_deinit(q);
2681}
2682
2683struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2684{
2685 struct request_queue *uninit_q, *q;
2686
2687 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2688 if (!uninit_q)
2689 return ERR_PTR(-ENOMEM);
2690
2691 q = blk_mq_init_allocated_queue(set, uninit_q);
2692 if (IS_ERR(q))
2693 blk_cleanup_queue(uninit_q);
2694
2695 return q;
2696}
2697EXPORT_SYMBOL(blk_mq_init_queue);
2698
2699
2700
2701
2702
2703struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
2704 const struct blk_mq_ops *ops,
2705 unsigned int queue_depth,
2706 unsigned int set_flags)
2707{
2708 struct request_queue *q;
2709 int ret;
2710
2711 memset(set, 0, sizeof(*set));
2712 set->ops = ops;
2713 set->nr_hw_queues = 1;
2714 set->nr_maps = 1;
2715 set->queue_depth = queue_depth;
2716 set->numa_node = NUMA_NO_NODE;
2717 set->flags = set_flags;
2718
2719 ret = blk_mq_alloc_tag_set(set);
2720 if (ret)
2721 return ERR_PTR(ret);
2722
2723 q = blk_mq_init_queue(set);
2724 if (IS_ERR(q)) {
2725 blk_mq_free_tag_set(set);
2726 return q;
2727 }
2728
2729 return q;
2730}
2731EXPORT_SYMBOL(blk_mq_init_sq_queue);
2732
2733static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
2734 struct blk_mq_tag_set *set, struct request_queue *q,
2735 int hctx_idx, int node)
2736{
2737 struct blk_mq_hw_ctx *hctx = NULL, *tmp;
2738
2739
2740 spin_lock(&q->unused_hctx_lock);
2741 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
2742 if (tmp->numa_node == node) {
2743 hctx = tmp;
2744 break;
2745 }
2746 }
2747 if (hctx)
2748 list_del_init(&hctx->hctx_list);
2749 spin_unlock(&q->unused_hctx_lock);
2750
2751 if (!hctx)
2752 hctx = blk_mq_alloc_hctx(q, set, node);
2753 if (!hctx)
2754 goto fail;
2755
2756 if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
2757 goto free_hctx;
2758
2759 return hctx;
2760
2761 free_hctx:
2762 kobject_put(&hctx->kobj);
2763 fail:
2764 return NULL;
2765}
2766
2767static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2768 struct request_queue *q)
2769{
2770 int i, j, end;
2771 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2772
2773
2774 mutex_lock(&q->sysfs_lock);
2775 for (i = 0; i < set->nr_hw_queues; i++) {
2776 int node;
2777 struct blk_mq_hw_ctx *hctx;
2778
2779 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
2780
2781
2782
2783
2784
2785 if (hctxs[i] && (hctxs[i]->numa_node == node))
2786 continue;
2787
2788 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
2789 if (hctx) {
2790 if (hctxs[i])
2791 blk_mq_exit_hctx(q, set, hctxs[i], i);
2792 hctxs[i] = hctx;
2793 } else {
2794 if (hctxs[i])
2795 pr_warn("Allocate new hctx on node %d fails,\
2796 fallback to previous one on node %d\n",
2797 node, hctxs[i]->numa_node);
2798 else
2799 break;
2800 }
2801 }
2802
2803
2804
2805
2806 if (i != set->nr_hw_queues) {
2807 j = q->nr_hw_queues;
2808 end = i;
2809 } else {
2810 j = i;
2811 end = q->nr_hw_queues;
2812 q->nr_hw_queues = set->nr_hw_queues;
2813 }
2814
2815 for (; j < end; j++) {
2816 struct blk_mq_hw_ctx *hctx = hctxs[j];
2817
2818 if (hctx) {
2819 if (hctx->tags)
2820 blk_mq_free_map_and_requests(set, j);
2821 blk_mq_exit_hctx(q, set, hctx, j);
2822 hctxs[j] = NULL;
2823 }
2824 }
2825 mutex_unlock(&q->sysfs_lock);
2826}
2827
2828
2829
2830
2831
2832
2833static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
2834{
2835 if (set->nr_maps == 1)
2836 return nr_cpu_ids;
2837
2838 return max(set->nr_hw_queues, nr_cpu_ids);
2839}
2840
2841struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2842 struct request_queue *q)
2843{
2844
2845 q->mq_ops = set->ops;
2846
2847 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2848 blk_mq_poll_stats_bkt,
2849 BLK_MQ_POLL_STATS_BKTS, q);
2850 if (!q->poll_cb)
2851 goto err_exit;
2852
2853 if (blk_mq_alloc_ctxs(q))
2854 goto err_poll;
2855
2856
2857 blk_mq_sysfs_init(q);
2858
2859 q->nr_queues = nr_hw_queues(set);
2860 q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
2861 GFP_KERNEL, set->numa_node);
2862 if (!q->queue_hw_ctx)
2863 goto err_sys_init;
2864
2865 INIT_LIST_HEAD(&q->unused_hctx_list);
2866 spin_lock_init(&q->unused_hctx_lock);
2867
2868 blk_mq_realloc_hw_ctxs(set, q);
2869 if (!q->nr_hw_queues)
2870 goto err_hctxs;
2871
2872 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2873 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2874
2875 q->tag_set = set;
2876
2877 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2878 if (set->nr_maps > HCTX_TYPE_POLL &&
2879 set->map[HCTX_TYPE_POLL].nr_queues)
2880 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
2881
2882 q->sg_reserved_size = INT_MAX;
2883
2884 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2885 INIT_LIST_HEAD(&q->requeue_list);
2886 spin_lock_init(&q->requeue_lock);
2887
2888 blk_queue_make_request(q, blk_mq_make_request);
2889
2890
2891
2892
2893 q->nr_requests = set->queue_depth;
2894
2895
2896
2897
2898 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
2899
2900 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2901 blk_mq_add_queue_tag_set(set, q);
2902 blk_mq_map_swqueue(q);
2903
2904 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2905 int ret;
2906
2907 ret = elevator_init_mq(q);
2908 if (ret)
2909 return ERR_PTR(ret);
2910 }
2911
2912 return q;
2913
2914err_hctxs:
2915 kfree(q->queue_hw_ctx);
2916err_sys_init:
2917 blk_mq_sysfs_deinit(q);
2918err_poll:
2919 blk_stat_free_callback(q->poll_cb);
2920 q->poll_cb = NULL;
2921err_exit:
2922 q->mq_ops = NULL;
2923 return ERR_PTR(-ENOMEM);
2924}
2925EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2926
2927
2928void blk_mq_exit_queue(struct request_queue *q)
2929{
2930 struct blk_mq_tag_set *set = q->tag_set;
2931
2932 blk_mq_del_queue_tag_set(q);
2933 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2934}
2935
2936static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2937{
2938 int i;
2939
2940 for (i = 0; i < set->nr_hw_queues; i++)
2941 if (!__blk_mq_alloc_rq_map(set, i))
2942 goto out_unwind;
2943
2944 return 0;
2945
2946out_unwind:
2947 while (--i >= 0)
2948 blk_mq_free_rq_map(set->tags[i]);
2949
2950 return -ENOMEM;
2951}
2952
2953
2954
2955
2956
2957
2958static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2959{
2960 unsigned int depth;
2961 int err;
2962
2963 depth = set->queue_depth;
2964 do {
2965 err = __blk_mq_alloc_rq_maps(set);
2966 if (!err)
2967 break;
2968
2969 set->queue_depth >>= 1;
2970 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2971 err = -ENOMEM;
2972 break;
2973 }
2974 } while (set->queue_depth);
2975
2976 if (!set->queue_depth || err) {
2977 pr_err("blk-mq: failed to allocate request map\n");
2978 return -ENOMEM;
2979 }
2980
2981 if (depth != set->queue_depth)
2982 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2983 depth, set->queue_depth);
2984
2985 return 0;
2986}
2987
2988static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2989{
2990 if (set->ops->map_queues && !is_kdump_kernel()) {
2991 int i;
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007 for (i = 0; i < set->nr_maps; i++)
3008 blk_mq_clear_mq_map(&set->map[i]);
3009
3010 return set->ops->map_queues(set);
3011 } else {
3012 BUG_ON(set->nr_maps > 1);
3013 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3014 }
3015}
3016
3017
3018
3019
3020
3021
3022
3023int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
3024{
3025 int i, ret;
3026
3027 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
3028
3029 if (!set->nr_hw_queues)
3030 return -EINVAL;
3031 if (!set->queue_depth)
3032 return -EINVAL;
3033 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
3034 return -EINVAL;
3035
3036 if (!set->ops->queue_rq)
3037 return -EINVAL;
3038
3039 if (!set->ops->get_budget ^ !set->ops->put_budget)
3040 return -EINVAL;
3041
3042 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
3043 pr_info("blk-mq: reduced tag depth to %u\n",
3044 BLK_MQ_MAX_DEPTH);
3045 set->queue_depth = BLK_MQ_MAX_DEPTH;
3046 }
3047
3048 if (!set->nr_maps)
3049 set->nr_maps = 1;
3050 else if (set->nr_maps > HCTX_MAX_TYPES)
3051 return -EINVAL;
3052
3053
3054
3055
3056
3057
3058 if (is_kdump_kernel()) {
3059 set->nr_hw_queues = 1;
3060 set->nr_maps = 1;
3061 set->queue_depth = min(64U, set->queue_depth);
3062 }
3063
3064
3065
3066
3067 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
3068 set->nr_hw_queues = nr_cpu_ids;
3069
3070 set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
3071 GFP_KERNEL, set->numa_node);
3072 if (!set->tags)
3073 return -ENOMEM;
3074
3075 ret = -ENOMEM;
3076 for (i = 0; i < set->nr_maps; i++) {
3077 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
3078 sizeof(set->map[i].mq_map[0]),
3079 GFP_KERNEL, set->numa_node);
3080 if (!set->map[i].mq_map)
3081 goto out_free_mq_map;
3082 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
3083 }
3084
3085 ret = blk_mq_update_queue_map(set);
3086 if (ret)
3087 goto out_free_mq_map;
3088
3089 ret = blk_mq_alloc_rq_maps(set);
3090 if (ret)
3091 goto out_free_mq_map;
3092
3093 mutex_init(&set->tag_list_lock);
3094 INIT_LIST_HEAD(&set->tag_list);
3095
3096 return 0;
3097
3098out_free_mq_map:
3099 for (i = 0; i < set->nr_maps; i++) {
3100 kfree(set->map[i].mq_map);
3101 set->map[i].mq_map = NULL;
3102 }
3103 kfree(set->tags);
3104 set->tags = NULL;
3105 return ret;
3106}
3107EXPORT_SYMBOL(blk_mq_alloc_tag_set);
3108
3109void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
3110{
3111 int i, j;
3112
3113 for (i = 0; i < nr_hw_queues(set); i++)
3114 blk_mq_free_map_and_requests(set, i);
3115
3116 for (j = 0; j < set->nr_maps; j++) {
3117 kfree(set->map[j].mq_map);
3118 set->map[j].mq_map = NULL;
3119 }
3120
3121 kfree(set->tags);
3122 set->tags = NULL;
3123}
3124EXPORT_SYMBOL(blk_mq_free_tag_set);
3125
3126int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
3127{
3128 struct blk_mq_tag_set *set = q->tag_set;
3129 struct blk_mq_hw_ctx *hctx;
3130 int i, ret;
3131
3132 if (!set)
3133 return -EINVAL;
3134
3135 if (q->nr_requests == nr)
3136 return 0;
3137
3138 blk_mq_freeze_queue(q);
3139 blk_mq_quiesce_queue(q);
3140
3141 ret = 0;
3142 queue_for_each_hw_ctx(q, hctx, i) {
3143 if (!hctx->tags)
3144 continue;
3145
3146
3147
3148
3149 if (!hctx->sched_tags) {
3150 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3151 false);
3152 } else {
3153 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
3154 nr, true);
3155 }
3156 if (ret)
3157 break;
3158 if (q->elevator && q->elevator->type->ops.depth_updated)
3159 q->elevator->type->ops.depth_updated(hctx);
3160 }
3161
3162 if (!ret)
3163 q->nr_requests = nr;
3164
3165 blk_mq_unquiesce_queue(q);
3166 blk_mq_unfreeze_queue(q);
3167
3168 return ret;
3169}
3170
3171
3172
3173
3174
3175
3176struct blk_mq_qe_pair {
3177 struct list_head node;
3178 struct request_queue *q;
3179 struct elevator_type *type;
3180};
3181
3182
3183
3184
3185
3186static bool blk_mq_elv_switch_none(struct list_head *head,
3187 struct request_queue *q)
3188{
3189 struct blk_mq_qe_pair *qe;
3190
3191 if (!q->elevator)
3192 return true;
3193
3194 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
3195 if (!qe)
3196 return false;
3197
3198 INIT_LIST_HEAD(&qe->node);
3199 qe->q = q;
3200 qe->type = q->elevator->type;
3201 list_add(&qe->node, head);
3202
3203 mutex_lock(&q->sysfs_lock);
3204
3205
3206
3207
3208
3209
3210
3211 __module_get(qe->type->elevator_owner);
3212 elevator_switch_mq(q, NULL);
3213 mutex_unlock(&q->sysfs_lock);
3214
3215 return true;
3216}
3217
3218static void blk_mq_elv_switch_back(struct list_head *head,
3219 struct request_queue *q)
3220{
3221 struct blk_mq_qe_pair *qe;
3222 struct elevator_type *t = NULL;
3223
3224 list_for_each_entry(qe, head, node)
3225 if (qe->q == q) {
3226 t = qe->type;
3227 break;
3228 }
3229
3230 if (!t)
3231 return;
3232
3233 list_del(&qe->node);
3234 kfree(qe);
3235
3236 mutex_lock(&q->sysfs_lock);
3237 elevator_switch_mq(q, t);
3238 mutex_unlock(&q->sysfs_lock);
3239}
3240
3241static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3242 int nr_hw_queues)
3243{
3244 struct request_queue *q;
3245 LIST_HEAD(head);
3246 int prev_nr_hw_queues;
3247
3248 lockdep_assert_held(&set->tag_list_lock);
3249
3250 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
3251 nr_hw_queues = nr_cpu_ids;
3252 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
3253 return;
3254
3255 list_for_each_entry(q, &set->tag_list, tag_set_list)
3256 blk_mq_freeze_queue(q);
3257
3258
3259
3260 synchronize_rcu();
3261
3262
3263
3264
3265
3266 list_for_each_entry(q, &set->tag_list, tag_set_list)
3267 if (!blk_mq_elv_switch_none(&head, q))
3268 goto switch_back;
3269
3270 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3271 blk_mq_debugfs_unregister_hctxs(q);
3272 blk_mq_sysfs_unregister(q);
3273 }
3274
3275 prev_nr_hw_queues = set->nr_hw_queues;
3276 set->nr_hw_queues = nr_hw_queues;
3277 blk_mq_update_queue_map(set);
3278fallback:
3279 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3280 blk_mq_realloc_hw_ctxs(set, q);
3281 if (q->nr_hw_queues != set->nr_hw_queues) {
3282 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3283 nr_hw_queues, prev_nr_hw_queues);
3284 set->nr_hw_queues = prev_nr_hw_queues;
3285 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3286 goto fallback;
3287 }
3288 blk_mq_map_swqueue(q);
3289 }
3290
3291 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3292 blk_mq_sysfs_register(q);
3293 blk_mq_debugfs_register_hctxs(q);
3294 }
3295
3296switch_back:
3297 list_for_each_entry(q, &set->tag_list, tag_set_list)
3298 blk_mq_elv_switch_back(&head, q);
3299
3300 list_for_each_entry(q, &set->tag_list, tag_set_list)
3301 blk_mq_unfreeze_queue(q);
3302}
3303
3304void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
3305{
3306 mutex_lock(&set->tag_list_lock);
3307 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3308 mutex_unlock(&set->tag_list_lock);
3309}
3310EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3311
3312
3313static bool blk_poll_stats_enable(struct request_queue *q)
3314{
3315 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3316 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3317 return true;
3318 blk_stat_add_callback(q, q->poll_cb);
3319 return false;
3320}
3321
3322static void blk_mq_poll_stats_start(struct request_queue *q)
3323{
3324
3325
3326
3327
3328 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3329 blk_stat_is_active(q->poll_cb))
3330 return;
3331
3332 blk_stat_activate_msecs(q->poll_cb, 100);
3333}
3334
3335static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3336{
3337 struct request_queue *q = cb->data;
3338 int bucket;
3339
3340 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3341 if (cb->stat[bucket].nr_samples)
3342 q->poll_stat[bucket] = cb->stat[bucket];
3343 }
3344}
3345
3346static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3347 struct blk_mq_hw_ctx *hctx,
3348 struct request *rq)
3349{
3350 unsigned long ret = 0;
3351 int bucket;
3352
3353
3354
3355
3356
3357 if (!blk_poll_stats_enable(q))
3358 return 0;
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369 bucket = blk_mq_poll_stats_bkt(rq);
3370 if (bucket < 0)
3371 return ret;
3372
3373 if (q->poll_stat[bucket].nr_samples)
3374 ret = (q->poll_stat[bucket].mean + 1) / 2;
3375
3376 return ret;
3377}
3378
3379static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3380 struct blk_mq_hw_ctx *hctx,
3381 struct request *rq)
3382{
3383 struct hrtimer_sleeper hs;
3384 enum hrtimer_mode mode;
3385 unsigned int nsecs;
3386 ktime_t kt;
3387
3388 if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3389 return false;
3390
3391
3392
3393
3394
3395
3396
3397 if (q->poll_nsec > 0)
3398 nsecs = q->poll_nsec;
3399 else
3400 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3401
3402 if (!nsecs)
3403 return false;
3404
3405 rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3406
3407
3408
3409
3410
3411 kt = nsecs;
3412
3413 mode = HRTIMER_MODE_REL;
3414 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3415 hrtimer_set_expires(&hs.timer, kt);
3416
3417 hrtimer_init_sleeper(&hs, current);
3418 do {
3419 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3420 break;
3421 set_current_state(TASK_UNINTERRUPTIBLE);
3422 hrtimer_start_expires(&hs.timer, mode);
3423 if (hs.task)
3424 io_schedule();
3425 hrtimer_cancel(&hs.timer);
3426 mode = HRTIMER_MODE_ABS;
3427 } while (hs.task && !signal_pending(current));
3428
3429 __set_current_state(TASK_RUNNING);
3430 destroy_hrtimer_on_stack(&hs.timer);
3431 return true;
3432}
3433
3434static bool blk_mq_poll_hybrid(struct request_queue *q,
3435 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
3436{
3437 struct request *rq;
3438
3439 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
3440 return false;
3441
3442 if (!blk_qc_t_is_internal(cookie))
3443 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3444 else {
3445 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3446
3447
3448
3449
3450
3451
3452 if (!rq)
3453 return false;
3454 }
3455
3456 return blk_mq_poll_hybrid_sleep(q, hctx, rq);
3457}
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
3472{
3473 struct blk_mq_hw_ctx *hctx;
3474 long state;
3475
3476 if (!blk_qc_t_valid(cookie) ||
3477 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3478 return 0;
3479
3480 if (current->plug)
3481 blk_flush_plug_list(current->plug, false);
3482
3483 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3484
3485
3486
3487
3488
3489
3490
3491
3492 if (blk_mq_poll_hybrid(q, hctx, cookie))
3493 return 1;
3494
3495 hctx->poll_considered++;
3496
3497 state = current->state;
3498 do {
3499 int ret;
3500
3501 hctx->poll_invoked++;
3502
3503 ret = q->mq_ops->poll(hctx);
3504 if (ret > 0) {
3505 hctx->poll_success++;
3506 __set_current_state(TASK_RUNNING);
3507 return ret;
3508 }
3509
3510 if (signal_pending_state(state, current))
3511 __set_current_state(TASK_RUNNING);
3512
3513 if (current->state == TASK_RUNNING)
3514 return 1;
3515 if (ret < 0 || !spin)
3516 break;
3517 cpu_relax();
3518 } while (!need_resched());
3519
3520 __set_current_state(TASK_RUNNING);
3521 return 0;
3522}
3523EXPORT_SYMBOL_GPL(blk_poll);
3524
3525unsigned int blk_mq_rq_cpu(struct request *rq)
3526{
3527 return rq->mq_ctx->cpu;
3528}
3529EXPORT_SYMBOL(blk_mq_rq_cpu);
3530
3531static int __init blk_mq_init(void)
3532{
3533 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3534 blk_mq_hctx_notify_dead);
3535 return 0;
3536}
3537subsys_initcall(blk_mq_init);
3538