1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/backing-dev.h>
18#include <linux/bio.h>
19#include <linux/blkdev.h>
20#include <linux/blk-mq.h>
21#include <linux/blk-pm.h>
22#include <linux/highmem.h>
23#include <linux/mm.h>
24#include <linux/pagemap.h>
25#include <linux/kernel_stat.h>
26#include <linux/string.h>
27#include <linux/init.h>
28#include <linux/completion.h>
29#include <linux/slab.h>
30#include <linux/swap.h>
31#include <linux/writeback.h>
32#include <linux/task_io_accounting_ops.h>
33#include <linux/fault-inject.h>
34#include <linux/list_sort.h>
35#include <linux/delay.h>
36#include <linux/ratelimit.h>
37#include <linux/pm_runtime.h>
38#include <linux/blk-cgroup.h>
39#include <linux/t10-pi.h>
40#include <linux/debugfs.h>
41#include <linux/bpf.h>
42#include <linux/psi.h>
43#include <linux/sched/sysctl.h>
44#include <linux/blk-crypto.h>
45
46#define CREATE_TRACE_POINTS
47#include <trace/events/block.h>
48
49#include "blk.h"
50#include "blk-mq.h"
51#include "blk-mq-sched.h"
52#include "blk-pm.h"
53#include "blk-rq-qos.h"
54
55struct dentry *blk_debugfs_root;
56
57EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
58EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
59EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
60EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
61EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
62EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
63
64DEFINE_IDA(blk_queue_ida);
65
66
67
68
69struct kmem_cache *blk_requestq_cachep;
70
71
72
73
74static struct workqueue_struct *kblockd_workqueue;
75
76
77
78
79
80
81void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
82{
83 set_bit(flag, &q->queue_flags);
84}
85EXPORT_SYMBOL(blk_queue_flag_set);
86
87
88
89
90
91
92void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
93{
94 clear_bit(flag, &q->queue_flags);
95}
96EXPORT_SYMBOL(blk_queue_flag_clear);
97
98
99
100
101
102
103
104
105
106bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
107{
108 return test_and_set_bit(flag, &q->queue_flags);
109}
110EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
111
112void blk_rq_init(struct request_queue *q, struct request *rq)
113{
114 memset(rq, 0, sizeof(*rq));
115
116 INIT_LIST_HEAD(&rq->queuelist);
117 rq->q = q;
118 rq->__sector = (sector_t) -1;
119 INIT_HLIST_NODE(&rq->hash);
120 RB_CLEAR_NODE(&rq->rb_node);
121 rq->tag = BLK_MQ_NO_TAG;
122 rq->internal_tag = BLK_MQ_NO_TAG;
123 rq->start_time_ns = ktime_get_ns();
124 rq->part = NULL;
125 blk_crypto_rq_set_defaults(rq);
126}
127EXPORT_SYMBOL(blk_rq_init);
128
129#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
130static const char *const blk_op_name[] = {
131 REQ_OP_NAME(READ),
132 REQ_OP_NAME(WRITE),
133 REQ_OP_NAME(FLUSH),
134 REQ_OP_NAME(DISCARD),
135 REQ_OP_NAME(SECURE_ERASE),
136 REQ_OP_NAME(ZONE_RESET),
137 REQ_OP_NAME(ZONE_RESET_ALL),
138 REQ_OP_NAME(ZONE_OPEN),
139 REQ_OP_NAME(ZONE_CLOSE),
140 REQ_OP_NAME(ZONE_FINISH),
141 REQ_OP_NAME(ZONE_APPEND),
142 REQ_OP_NAME(WRITE_SAME),
143 REQ_OP_NAME(WRITE_ZEROES),
144 REQ_OP_NAME(DRV_IN),
145 REQ_OP_NAME(DRV_OUT),
146};
147#undef REQ_OP_NAME
148
149
150
151
152
153
154
155
156
157inline const char *blk_op_str(unsigned int op)
158{
159 const char *op_str = "UNKNOWN";
160
161 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
162 op_str = blk_op_name[op];
163
164 return op_str;
165}
166EXPORT_SYMBOL_GPL(blk_op_str);
167
168static const struct {
169 int errno;
170 const char *name;
171} blk_errors[] = {
172 [BLK_STS_OK] = { 0, "" },
173 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
174 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
175 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
176 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
177 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
178 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
179 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
180 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
181 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
182 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
183 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
184
185
186 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
187
188
189 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
190 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
191
192
193 [BLK_STS_IOERR] = { -EIO, "I/O" },
194};
195
196blk_status_t errno_to_blk_status(int errno)
197{
198 int i;
199
200 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
201 if (blk_errors[i].errno == errno)
202 return (__force blk_status_t)i;
203 }
204
205 return BLK_STS_IOERR;
206}
207EXPORT_SYMBOL_GPL(errno_to_blk_status);
208
209int blk_status_to_errno(blk_status_t status)
210{
211 int idx = (__force int)status;
212
213 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
214 return -EIO;
215 return blk_errors[idx].errno;
216}
217EXPORT_SYMBOL_GPL(blk_status_to_errno);
218
219static void print_req_error(struct request *req, blk_status_t status,
220 const char *caller)
221{
222 int idx = (__force int)status;
223
224 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
225 return;
226
227 printk_ratelimited(KERN_ERR
228 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
229 "phys_seg %u prio class %u\n",
230 caller, blk_errors[idx].name,
231 req->rq_disk ? req->rq_disk->disk_name : "?",
232 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
233 req->cmd_flags & ~REQ_OP_MASK,
234 req->nr_phys_segments,
235 IOPRIO_PRIO_CLASS(req->ioprio));
236}
237
238static void req_bio_endio(struct request *rq, struct bio *bio,
239 unsigned int nbytes, blk_status_t error)
240{
241 if (error)
242 bio->bi_status = error;
243
244 if (unlikely(rq->rq_flags & RQF_QUIET))
245 bio_set_flag(bio, BIO_QUIET);
246
247 bio_advance(bio, nbytes);
248
249 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
250
251
252
253
254 if (bio->bi_iter.bi_size)
255 bio->bi_status = BLK_STS_IOERR;
256 else
257 bio->bi_iter.bi_sector = rq->__sector;
258 }
259
260
261 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
262 bio_endio(bio);
263}
264
265void blk_dump_rq_flags(struct request *rq, char *msg)
266{
267 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
268 rq->rq_disk ? rq->rq_disk->disk_name : "?",
269 (unsigned long long) rq->cmd_flags);
270
271 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
272 (unsigned long long)blk_rq_pos(rq),
273 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
274 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
275 rq->bio, rq->biotail, blk_rq_bytes(rq));
276}
277EXPORT_SYMBOL(blk_dump_rq_flags);
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297void blk_sync_queue(struct request_queue *q)
298{
299 del_timer_sync(&q->timeout);
300 cancel_work_sync(&q->timeout_work);
301}
302EXPORT_SYMBOL(blk_sync_queue);
303
304
305
306
307
308void blk_set_pm_only(struct request_queue *q)
309{
310 atomic_inc(&q->pm_only);
311}
312EXPORT_SYMBOL_GPL(blk_set_pm_only);
313
314void blk_clear_pm_only(struct request_queue *q)
315{
316 int pm_only;
317
318 pm_only = atomic_dec_return(&q->pm_only);
319 WARN_ON_ONCE(pm_only < 0);
320 if (pm_only == 0)
321 wake_up_all(&q->mq_freeze_wq);
322}
323EXPORT_SYMBOL_GPL(blk_clear_pm_only);
324
325
326
327
328
329
330
331
332
333
334
335void blk_put_queue(struct request_queue *q)
336{
337 kobject_put(&q->kobj);
338}
339EXPORT_SYMBOL(blk_put_queue);
340
341void blk_set_queue_dying(struct request_queue *q)
342{
343 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
344
345
346
347
348
349
350 blk_freeze_queue_start(q);
351
352 if (queue_is_mq(q))
353 blk_mq_wake_waiters(q);
354
355
356 wake_up_all(&q->mq_freeze_wq);
357}
358EXPORT_SYMBOL_GPL(blk_set_queue_dying);
359
360
361
362
363
364
365
366
367
368
369void blk_cleanup_queue(struct request_queue *q)
370{
371
372 might_sleep();
373
374 WARN_ON_ONCE(blk_queue_registered(q));
375
376
377 blk_set_queue_dying(q);
378
379 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
380 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
381
382
383
384
385
386
387 blk_freeze_queue(q);
388
389 rq_qos_exit(q);
390
391 blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
392
393
394 blk_flush_integrity();
395
396
397 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
398 blk_sync_queue(q);
399
400 if (queue_is_mq(q))
401 blk_mq_exit_queue(q);
402
403
404
405
406
407
408
409
410
411 mutex_lock(&q->sysfs_lock);
412 if (q->elevator)
413 blk_mq_sched_free_requests(q);
414 mutex_unlock(&q->sysfs_lock);
415
416 percpu_ref_exit(&q->q_usage_counter);
417
418
419 blk_put_queue(q);
420}
421EXPORT_SYMBOL(blk_cleanup_queue);
422
423
424
425
426
427
428int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
429{
430 const bool pm = flags & BLK_MQ_REQ_PM;
431
432 while (true) {
433 bool success = false;
434
435 rcu_read_lock();
436 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
437
438
439
440
441
442 if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
443 !blk_queue_pm_only(q)) {
444 success = true;
445 } else {
446 percpu_ref_put(&q->q_usage_counter);
447 }
448 }
449 rcu_read_unlock();
450
451 if (success)
452 return 0;
453
454 if (flags & BLK_MQ_REQ_NOWAIT)
455 return -EBUSY;
456
457
458
459
460
461
462
463
464 smp_rmb();
465
466 wait_event(q->mq_freeze_wq,
467 (!q->mq_freeze_depth &&
468 blk_pm_resume_queue(pm, q)) ||
469 blk_queue_dying(q));
470 if (blk_queue_dying(q))
471 return -ENODEV;
472 }
473}
474
475static inline int bio_queue_enter(struct bio *bio)
476{
477 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
478 bool nowait = bio->bi_opf & REQ_NOWAIT;
479 int ret;
480
481 ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
482 if (unlikely(ret)) {
483 if (nowait && !blk_queue_dying(q))
484 bio_wouldblock_error(bio);
485 else
486 bio_io_error(bio);
487 }
488
489 return ret;
490}
491
492void blk_queue_exit(struct request_queue *q)
493{
494 percpu_ref_put(&q->q_usage_counter);
495}
496
497static void blk_queue_usage_counter_release(struct percpu_ref *ref)
498{
499 struct request_queue *q =
500 container_of(ref, struct request_queue, q_usage_counter);
501
502 wake_up_all(&q->mq_freeze_wq);
503}
504
505static void blk_rq_timed_out_timer(struct timer_list *t)
506{
507 struct request_queue *q = from_timer(q, t, timeout);
508
509 kblockd_schedule_work(&q->timeout_work);
510}
511
512static void blk_timeout_work(struct work_struct *work)
513{
514}
515
516struct request_queue *blk_alloc_queue(int node_id)
517{
518 struct request_queue *q;
519 int ret;
520
521 q = kmem_cache_alloc_node(blk_requestq_cachep,
522 GFP_KERNEL | __GFP_ZERO, node_id);
523 if (!q)
524 return NULL;
525
526 q->last_merge = NULL;
527
528 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
529 if (q->id < 0)
530 goto fail_q;
531
532 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
533 if (ret)
534 goto fail_id;
535
536 q->backing_dev_info = bdi_alloc(node_id);
537 if (!q->backing_dev_info)
538 goto fail_split;
539
540 q->stats = blk_alloc_queue_stats();
541 if (!q->stats)
542 goto fail_stats;
543
544 q->node = node_id;
545
546 atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
547
548 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
549 laptop_mode_timer_fn, 0);
550 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
551 INIT_WORK(&q->timeout_work, blk_timeout_work);
552 INIT_LIST_HEAD(&q->icq_list);
553#ifdef CONFIG_BLK_CGROUP
554 INIT_LIST_HEAD(&q->blkg_list);
555#endif
556
557 kobject_init(&q->kobj, &blk_queue_ktype);
558
559 mutex_init(&q->debugfs_mutex);
560 mutex_init(&q->sysfs_lock);
561 mutex_init(&q->sysfs_dir_lock);
562 spin_lock_init(&q->queue_lock);
563
564 init_waitqueue_head(&q->mq_freeze_wq);
565 mutex_init(&q->mq_freeze_lock);
566
567
568
569
570
571 if (percpu_ref_init(&q->q_usage_counter,
572 blk_queue_usage_counter_release,
573 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
574 goto fail_bdi;
575
576 if (blkcg_init_queue(q))
577 goto fail_ref;
578
579 blk_queue_dma_alignment(q, 511);
580 blk_set_default_limits(&q->limits);
581 q->nr_requests = BLKDEV_MAX_RQ;
582
583 return q;
584
585fail_ref:
586 percpu_ref_exit(&q->q_usage_counter);
587fail_bdi:
588 blk_free_queue_stats(q->stats);
589fail_stats:
590 bdi_put(q->backing_dev_info);
591fail_split:
592 bioset_exit(&q->bio_split);
593fail_id:
594 ida_simple_remove(&blk_queue_ida, q->id);
595fail_q:
596 kmem_cache_free(blk_requestq_cachep, q);
597 return NULL;
598}
599
600
601
602
603
604
605
606
607
608bool blk_get_queue(struct request_queue *q)
609{
610 if (likely(!blk_queue_dying(q))) {
611 __blk_get_queue(q);
612 return true;
613 }
614
615 return false;
616}
617EXPORT_SYMBOL(blk_get_queue);
618
619
620
621
622
623
624
625struct request *blk_get_request(struct request_queue *q, unsigned int op,
626 blk_mq_req_flags_t flags)
627{
628 struct request *req;
629
630 WARN_ON_ONCE(op & REQ_NOWAIT);
631 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
632
633 req = blk_mq_alloc_request(q, op, flags);
634 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
635 q->mq_ops->initialize_rq_fn(req);
636
637 return req;
638}
639EXPORT_SYMBOL(blk_get_request);
640
641void blk_put_request(struct request *req)
642{
643 blk_mq_free_request(req);
644}
645EXPORT_SYMBOL(blk_put_request);
646
647static void handle_bad_sector(struct bio *bio, sector_t maxsector)
648{
649 char b[BDEVNAME_SIZE];
650
651 pr_info_ratelimited("attempt to access beyond end of device\n"
652 "%s: rw=%d, want=%llu, limit=%llu\n",
653 bio_devname(bio, b), bio->bi_opf,
654 bio_end_sector(bio), maxsector);
655}
656
657#ifdef CONFIG_FAIL_MAKE_REQUEST
658
659static DECLARE_FAULT_ATTR(fail_make_request);
660
661static int __init setup_fail_make_request(char *str)
662{
663 return setup_fault_attr(&fail_make_request, str);
664}
665__setup("fail_make_request=", setup_fail_make_request);
666
667static bool should_fail_request(struct block_device *part, unsigned int bytes)
668{
669 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
670}
671
672static int __init fail_make_request_debugfs(void)
673{
674 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
675 NULL, &fail_make_request);
676
677 return PTR_ERR_OR_ZERO(dir);
678}
679
680late_initcall(fail_make_request_debugfs);
681
682#else
683
684static inline bool should_fail_request(struct block_device *part,
685 unsigned int bytes)
686{
687 return false;
688}
689
690#endif
691
692static inline bool bio_check_ro(struct bio *bio)
693{
694 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
695 char b[BDEVNAME_SIZE];
696
697 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
698 return false;
699
700 WARN_ONCE(1,
701 "Trying to write to read-only block-device %s (partno %d)\n",
702 bio_devname(bio, b), bio->bi_bdev->bd_partno);
703
704 return false;
705 }
706
707 return false;
708}
709
710static noinline int should_fail_bio(struct bio *bio)
711{
712 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
713 return -EIO;
714 return 0;
715}
716ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
717
718
719
720
721
722
723static inline int bio_check_eod(struct bio *bio)
724{
725 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
726 unsigned int nr_sectors = bio_sectors(bio);
727
728 if (nr_sectors && maxsector &&
729 (nr_sectors > maxsector ||
730 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
731 handle_bad_sector(bio, maxsector);
732 return -EIO;
733 }
734 return 0;
735}
736
737
738
739
740static int blk_partition_remap(struct bio *bio)
741{
742 struct block_device *p = bio->bi_bdev;
743
744 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
745 return -EIO;
746 if (bio_sectors(bio)) {
747 bio->bi_iter.bi_sector += p->bd_start_sect;
748 trace_block_bio_remap(bio, p->bd_dev,
749 bio->bi_iter.bi_sector -
750 p->bd_start_sect);
751 }
752 bio_set_flag(bio, BIO_REMAPPED);
753 return 0;
754}
755
756
757
758
759static inline blk_status_t blk_check_zone_append(struct request_queue *q,
760 struct bio *bio)
761{
762 sector_t pos = bio->bi_iter.bi_sector;
763 int nr_sectors = bio_sectors(bio);
764
765
766 if (!blk_queue_is_zoned(q))
767 return BLK_STS_NOTSUPP;
768
769
770 if (pos & (blk_queue_zone_sectors(q) - 1) ||
771 !blk_queue_zone_is_seq(q, pos))
772 return BLK_STS_IOERR;
773
774
775
776
777
778
779 if (nr_sectors > q->limits.chunk_sectors)
780 return BLK_STS_IOERR;
781
782
783 if (nr_sectors > q->limits.max_zone_append_sectors)
784 return BLK_STS_IOERR;
785
786 bio->bi_opf |= REQ_NOMERGE;
787
788 return BLK_STS_OK;
789}
790
791static noinline_for_stack bool submit_bio_checks(struct bio *bio)
792{
793 struct block_device *bdev = bio->bi_bdev;
794 struct request_queue *q = bdev->bd_disk->queue;
795 blk_status_t status = BLK_STS_IOERR;
796 struct blk_plug *plug;
797
798 might_sleep();
799
800 plug = blk_mq_plug(q, bio);
801 if (plug && plug->nowait)
802 bio->bi_opf |= REQ_NOWAIT;
803
804
805
806
807
808 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
809 goto not_supported;
810
811 if (should_fail_bio(bio))
812 goto end_io;
813 if (unlikely(bio_check_ro(bio)))
814 goto end_io;
815 if (!bio_flagged(bio, BIO_REMAPPED)) {
816 if (unlikely(bio_check_eod(bio)))
817 goto end_io;
818 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
819 goto end_io;
820 }
821
822
823
824
825
826 if (op_is_flush(bio->bi_opf) &&
827 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
828 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
829 if (!bio_sectors(bio)) {
830 status = BLK_STS_OK;
831 goto end_io;
832 }
833 }
834
835 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
836 bio->bi_opf &= ~REQ_HIPRI;
837
838 switch (bio_op(bio)) {
839 case REQ_OP_DISCARD:
840 if (!blk_queue_discard(q))
841 goto not_supported;
842 break;
843 case REQ_OP_SECURE_ERASE:
844 if (!blk_queue_secure_erase(q))
845 goto not_supported;
846 break;
847 case REQ_OP_WRITE_SAME:
848 if (!q->limits.max_write_same_sectors)
849 goto not_supported;
850 break;
851 case REQ_OP_ZONE_APPEND:
852 status = blk_check_zone_append(q, bio);
853 if (status != BLK_STS_OK)
854 goto end_io;
855 break;
856 case REQ_OP_ZONE_RESET:
857 case REQ_OP_ZONE_OPEN:
858 case REQ_OP_ZONE_CLOSE:
859 case REQ_OP_ZONE_FINISH:
860 if (!blk_queue_is_zoned(q))
861 goto not_supported;
862 break;
863 case REQ_OP_ZONE_RESET_ALL:
864 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
865 goto not_supported;
866 break;
867 case REQ_OP_WRITE_ZEROES:
868 if (!q->limits.max_write_zeroes_sectors)
869 goto not_supported;
870 break;
871 default:
872 break;
873 }
874
875
876
877
878
879
880
881 if (unlikely(!current->io_context))
882 create_task_io_context(current, GFP_ATOMIC, q->node);
883
884 if (blk_throtl_bio(bio)) {
885 blkcg_bio_issue_init(bio);
886 return false;
887 }
888
889 blk_cgroup_bio_start(bio);
890 blkcg_bio_issue_init(bio);
891
892 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
893 trace_block_bio_queue(bio);
894
895
896
897 bio_set_flag(bio, BIO_TRACE_COMPLETION);
898 }
899 return true;
900
901not_supported:
902 status = BLK_STS_NOTSUPP;
903end_io:
904 bio->bi_status = status;
905 bio_endio(bio);
906 return false;
907}
908
909static blk_qc_t __submit_bio(struct bio *bio)
910{
911 struct gendisk *disk = bio->bi_bdev->bd_disk;
912 blk_qc_t ret = BLK_QC_T_NONE;
913
914 if (blk_crypto_bio_prep(&bio)) {
915 if (!disk->fops->submit_bio)
916 return blk_mq_submit_bio(bio);
917 ret = disk->fops->submit_bio(bio);
918 }
919 blk_queue_exit(disk->queue);
920 return ret;
921}
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942static blk_qc_t __submit_bio_noacct(struct bio *bio)
943{
944 struct bio_list bio_list_on_stack[2];
945 blk_qc_t ret = BLK_QC_T_NONE;
946
947 BUG_ON(bio->bi_next);
948
949 bio_list_init(&bio_list_on_stack[0]);
950 current->bio_list = bio_list_on_stack;
951
952 do {
953 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
954 struct bio_list lower, same;
955
956 if (unlikely(bio_queue_enter(bio) != 0))
957 continue;
958
959
960
961
962 bio_list_on_stack[1] = bio_list_on_stack[0];
963 bio_list_init(&bio_list_on_stack[0]);
964
965 ret = __submit_bio(bio);
966
967
968
969
970
971 bio_list_init(&lower);
972 bio_list_init(&same);
973 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
974 if (q == bio->bi_bdev->bd_disk->queue)
975 bio_list_add(&same, bio);
976 else
977 bio_list_add(&lower, bio);
978
979
980
981
982 bio_list_merge(&bio_list_on_stack[0], &lower);
983 bio_list_merge(&bio_list_on_stack[0], &same);
984 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
985 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
986
987 current->bio_list = NULL;
988 return ret;
989}
990
991static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
992{
993 struct bio_list bio_list[2] = { };
994 blk_qc_t ret = BLK_QC_T_NONE;
995
996 current->bio_list = bio_list;
997
998 do {
999 struct gendisk *disk = bio->bi_bdev->bd_disk;
1000
1001 if (unlikely(bio_queue_enter(bio) != 0))
1002 continue;
1003
1004 if (!blk_crypto_bio_prep(&bio)) {
1005 blk_queue_exit(disk->queue);
1006 ret = BLK_QC_T_NONE;
1007 continue;
1008 }
1009
1010 ret = blk_mq_submit_bio(bio);
1011 } while ((bio = bio_list_pop(&bio_list[0])));
1012
1013 current->bio_list = NULL;
1014 return ret;
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026blk_qc_t submit_bio_noacct(struct bio *bio)
1027{
1028 if (!submit_bio_checks(bio))
1029 return BLK_QC_T_NONE;
1030
1031
1032
1033
1034
1035
1036
1037 if (current->bio_list) {
1038 bio_list_add(¤t->bio_list[0], bio);
1039 return BLK_QC_T_NONE;
1040 }
1041
1042 if (!bio->bi_bdev->bd_disk->fops->submit_bio)
1043 return __submit_bio_noacct_mq(bio);
1044 return __submit_bio_noacct(bio);
1045}
1046EXPORT_SYMBOL(submit_bio_noacct);
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061blk_qc_t submit_bio(struct bio *bio)
1062{
1063 if (blkcg_punt_bio_submit(bio))
1064 return BLK_QC_T_NONE;
1065
1066
1067
1068
1069
1070 if (bio_has_data(bio)) {
1071 unsigned int count;
1072
1073 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1074 count = queue_logical_block_size(
1075 bio->bi_bdev->bd_disk->queue) >> 9;
1076 else
1077 count = bio_sectors(bio);
1078
1079 if (op_is_write(bio_op(bio))) {
1080 count_vm_events(PGPGOUT, count);
1081 } else {
1082 task_io_account_read(bio->bi_iter.bi_size);
1083 count_vm_events(PGPGIN, count);
1084 }
1085 }
1086
1087
1088
1089
1090
1091
1092
1093 if (unlikely(bio_op(bio) == REQ_OP_READ &&
1094 bio_flagged(bio, BIO_WORKINGSET))) {
1095 unsigned long pflags;
1096 blk_qc_t ret;
1097
1098 psi_memstall_enter(&pflags);
1099 ret = submit_bio_noacct(bio);
1100 psi_memstall_leave(&pflags);
1101
1102 return ret;
1103 }
1104
1105 return submit_bio_noacct(bio);
1106}
1107EXPORT_SYMBOL(submit_bio);
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1127 struct request *rq)
1128{
1129 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
1130
1131 if (blk_rq_sectors(rq) > max_sectors) {
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 if (max_sectors == 0)
1143 return BLK_STS_NOTSUPP;
1144
1145 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1146 __func__, blk_rq_sectors(rq), max_sectors);
1147 return BLK_STS_IOERR;
1148 }
1149
1150
1151
1152
1153
1154 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1155 if (rq->nr_phys_segments > queue_max_segments(q)) {
1156 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1157 __func__, rq->nr_phys_segments, queue_max_segments(q));
1158 return BLK_STS_IOERR;
1159 }
1160
1161 return BLK_STS_OK;
1162}
1163
1164
1165
1166
1167
1168
1169blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1170{
1171 blk_status_t ret;
1172
1173 ret = blk_cloned_rq_check_limits(q, rq);
1174 if (ret != BLK_STS_OK)
1175 return ret;
1176
1177 if (rq->rq_disk &&
1178 should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
1179 return BLK_STS_IOERR;
1180
1181 if (blk_crypto_insert_cloned_request(rq))
1182 return BLK_STS_IOERR;
1183
1184 if (blk_queue_io_stat(q))
1185 blk_account_io_start(rq);
1186
1187
1188
1189
1190
1191
1192 return blk_mq_request_issue_directly(rq, true);
1193}
1194EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209unsigned int blk_rq_err_bytes(const struct request *rq)
1210{
1211 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1212 unsigned int bytes = 0;
1213 struct bio *bio;
1214
1215 if (!(rq->rq_flags & RQF_MIXED_MERGE))
1216 return blk_rq_bytes(rq);
1217
1218
1219
1220
1221
1222
1223
1224
1225 for (bio = rq->bio; bio; bio = bio->bi_next) {
1226 if ((bio->bi_opf & ff) != ff)
1227 break;
1228 bytes += bio->bi_iter.bi_size;
1229 }
1230
1231
1232 BUG_ON(blk_rq_bytes(rq) && !bytes);
1233 return bytes;
1234}
1235EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1236
1237static void update_io_ticks(struct block_device *part, unsigned long now,
1238 bool end)
1239{
1240 unsigned long stamp;
1241again:
1242 stamp = READ_ONCE(part->bd_stamp);
1243 if (unlikely(time_after(now, stamp))) {
1244 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
1245 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
1246 }
1247 if (part->bd_partno) {
1248 part = bdev_whole(part);
1249 goto again;
1250 }
1251}
1252
1253static void blk_account_io_completion(struct request *req, unsigned int bytes)
1254{
1255 if (req->part && blk_do_io_stat(req)) {
1256 const int sgrp = op_stat_group(req_op(req));
1257
1258 part_stat_lock();
1259 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
1260 part_stat_unlock();
1261 }
1262}
1263
1264void blk_account_io_done(struct request *req, u64 now)
1265{
1266
1267
1268
1269
1270
1271 if (req->part && blk_do_io_stat(req) &&
1272 !(req->rq_flags & RQF_FLUSH_SEQ)) {
1273 const int sgrp = op_stat_group(req_op(req));
1274
1275 part_stat_lock();
1276 update_io_ticks(req->part, jiffies, true);
1277 part_stat_inc(req->part, ios[sgrp]);
1278 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
1279 part_stat_unlock();
1280 }
1281}
1282
1283void blk_account_io_start(struct request *rq)
1284{
1285 if (!blk_do_io_stat(rq))
1286 return;
1287
1288
1289 if (rq->bio && rq->bio->bi_bdev)
1290 rq->part = rq->bio->bi_bdev;
1291 else
1292 rq->part = rq->rq_disk->part0;
1293
1294 part_stat_lock();
1295 update_io_ticks(rq->part, jiffies, false);
1296 part_stat_unlock();
1297}
1298
1299static unsigned long __part_start_io_acct(struct block_device *part,
1300 unsigned int sectors, unsigned int op)
1301{
1302 const int sgrp = op_stat_group(op);
1303 unsigned long now = READ_ONCE(jiffies);
1304
1305 part_stat_lock();
1306 update_io_ticks(part, now, false);
1307 part_stat_inc(part, ios[sgrp]);
1308 part_stat_add(part, sectors[sgrp], sectors);
1309 part_stat_local_inc(part, in_flight[op_is_write(op)]);
1310 part_stat_unlock();
1311
1312 return now;
1313}
1314
1315
1316
1317
1318
1319
1320
1321unsigned long bio_start_io_acct(struct bio *bio)
1322{
1323 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio));
1324}
1325EXPORT_SYMBOL_GPL(bio_start_io_acct);
1326
1327unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1328 unsigned int op)
1329{
1330 return __part_start_io_acct(disk->part0, sectors, op);
1331}
1332EXPORT_SYMBOL(disk_start_io_acct);
1333
1334static void __part_end_io_acct(struct block_device *part, unsigned int op,
1335 unsigned long start_time)
1336{
1337 const int sgrp = op_stat_group(op);
1338 unsigned long now = READ_ONCE(jiffies);
1339 unsigned long duration = now - start_time;
1340
1341 part_stat_lock();
1342 update_io_ticks(part, now, true);
1343 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1344 part_stat_local_dec(part, in_flight[op_is_write(op)]);
1345 part_stat_unlock();
1346}
1347
1348void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1349 struct block_device *orig_bdev)
1350{
1351 __part_end_io_acct(orig_bdev, bio_op(bio), start_time);
1352}
1353EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1354
1355void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1356 unsigned long start_time)
1357{
1358 __part_end_io_acct(disk->part0, op, start_time);
1359}
1360EXPORT_SYMBOL(disk_end_io_acct);
1361
1362
1363
1364
1365
1366void blk_steal_bios(struct bio_list *list, struct request *rq)
1367{
1368 if (rq->bio) {
1369 if (list->tail)
1370 list->tail->bi_next = rq->bio;
1371 else
1372 list->head = rq->bio;
1373 list->tail = rq->biotail;
1374
1375 rq->bio = NULL;
1376 rq->biotail = NULL;
1377 }
1378
1379 rq->__data_len = 0;
1380}
1381EXPORT_SYMBOL_GPL(blk_steal_bios);
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405bool blk_update_request(struct request *req, blk_status_t error,
1406 unsigned int nr_bytes)
1407{
1408 int total_bytes;
1409
1410 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1411
1412 if (!req->bio)
1413 return false;
1414
1415#ifdef CONFIG_BLK_DEV_INTEGRITY
1416 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1417 error == BLK_STS_OK)
1418 req->q->integrity.profile->complete_fn(req, nr_bytes);
1419#endif
1420
1421 if (unlikely(error && !blk_rq_is_passthrough(req) &&
1422 !(req->rq_flags & RQF_QUIET)))
1423 print_req_error(req, error, __func__);
1424
1425 blk_account_io_completion(req, nr_bytes);
1426
1427 total_bytes = 0;
1428 while (req->bio) {
1429 struct bio *bio = req->bio;
1430 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1431
1432 if (bio_bytes == bio->bi_iter.bi_size)
1433 req->bio = bio->bi_next;
1434
1435
1436 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1437 req_bio_endio(req, bio, bio_bytes, error);
1438
1439 total_bytes += bio_bytes;
1440 nr_bytes -= bio_bytes;
1441
1442 if (!nr_bytes)
1443 break;
1444 }
1445
1446
1447
1448
1449 if (!req->bio) {
1450
1451
1452
1453
1454
1455 req->__data_len = 0;
1456 return false;
1457 }
1458
1459 req->__data_len -= total_bytes;
1460
1461
1462 if (!blk_rq_is_passthrough(req))
1463 req->__sector += total_bytes >> 9;
1464
1465
1466 if (req->rq_flags & RQF_MIXED_MERGE) {
1467 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1468 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1469 }
1470
1471 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1472
1473
1474
1475
1476 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1477 blk_dump_rq_flags(req, "request botched");
1478 req->__data_len = blk_rq_cur_bytes(req);
1479 }
1480
1481
1482 req->nr_phys_segments = blk_recalc_rq_segments(req);
1483 }
1484
1485 return true;
1486}
1487EXPORT_SYMBOL_GPL(blk_update_request);
1488
1489#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1490
1491
1492
1493
1494
1495
1496
1497void rq_flush_dcache_pages(struct request *rq)
1498{
1499 struct req_iterator iter;
1500 struct bio_vec bvec;
1501
1502 rq_for_each_segment(bvec, rq, iter)
1503 flush_dcache_page(bvec.bv_page);
1504}
1505EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1506#endif
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527int blk_lld_busy(struct request_queue *q)
1528{
1529 if (queue_is_mq(q) && q->mq_ops->busy)
1530 return q->mq_ops->busy(q);
1531
1532 return 0;
1533}
1534EXPORT_SYMBOL_GPL(blk_lld_busy);
1535
1536
1537
1538
1539
1540
1541
1542
1543void blk_rq_unprep_clone(struct request *rq)
1544{
1545 struct bio *bio;
1546
1547 while ((bio = rq->bio) != NULL) {
1548 rq->bio = bio->bi_next;
1549
1550 bio_put(bio);
1551 }
1552}
1553EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1573 struct bio_set *bs, gfp_t gfp_mask,
1574 int (*bio_ctr)(struct bio *, struct bio *, void *),
1575 void *data)
1576{
1577 struct bio *bio, *bio_src;
1578
1579 if (!bs)
1580 bs = &fs_bio_set;
1581
1582 __rq_for_each_bio(bio_src, rq_src) {
1583 bio = bio_clone_fast(bio_src, gfp_mask, bs);
1584 if (!bio)
1585 goto free_and_out;
1586
1587 if (bio_ctr && bio_ctr(bio, bio_src, data))
1588 goto free_and_out;
1589
1590 if (rq->bio) {
1591 rq->biotail->bi_next = bio;
1592 rq->biotail = bio;
1593 } else {
1594 rq->bio = rq->biotail = bio;
1595 }
1596 bio = NULL;
1597 }
1598
1599
1600 rq->__sector = blk_rq_pos(rq_src);
1601 rq->__data_len = blk_rq_bytes(rq_src);
1602 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1603 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1604 rq->special_vec = rq_src->special_vec;
1605 }
1606 rq->nr_phys_segments = rq_src->nr_phys_segments;
1607 rq->ioprio = rq_src->ioprio;
1608
1609 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1610 goto free_and_out;
1611
1612 return 0;
1613
1614free_and_out:
1615 if (bio)
1616 bio_put(bio);
1617 blk_rq_unprep_clone(rq);
1618
1619 return -ENOMEM;
1620}
1621EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1622
1623int kblockd_schedule_work(struct work_struct *work)
1624{
1625 return queue_work(kblockd_workqueue, work);
1626}
1627EXPORT_SYMBOL(kblockd_schedule_work);
1628
1629int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1630 unsigned long delay)
1631{
1632 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1633}
1634EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659void blk_start_plug(struct blk_plug *plug)
1660{
1661 struct task_struct *tsk = current;
1662
1663
1664
1665
1666 if (tsk->plug)
1667 return;
1668
1669 INIT_LIST_HEAD(&plug->mq_list);
1670 INIT_LIST_HEAD(&plug->cb_list);
1671 plug->rq_count = 0;
1672 plug->multiple_queues = false;
1673 plug->nowait = false;
1674
1675
1676
1677
1678
1679 tsk->plug = plug;
1680}
1681EXPORT_SYMBOL(blk_start_plug);
1682
1683static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1684{
1685 LIST_HEAD(callbacks);
1686
1687 while (!list_empty(&plug->cb_list)) {
1688 list_splice_init(&plug->cb_list, &callbacks);
1689
1690 while (!list_empty(&callbacks)) {
1691 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1692 struct blk_plug_cb,
1693 list);
1694 list_del(&cb->list);
1695 cb->callback(cb, from_schedule);
1696 }
1697 }
1698}
1699
1700struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1701 int size)
1702{
1703 struct blk_plug *plug = current->plug;
1704 struct blk_plug_cb *cb;
1705
1706 if (!plug)
1707 return NULL;
1708
1709 list_for_each_entry(cb, &plug->cb_list, list)
1710 if (cb->callback == unplug && cb->data == data)
1711 return cb;
1712
1713
1714 BUG_ON(size < sizeof(*cb));
1715 cb = kzalloc(size, GFP_ATOMIC);
1716 if (cb) {
1717 cb->data = data;
1718 cb->callback = unplug;
1719 list_add(&cb->list, &plug->cb_list);
1720 }
1721 return cb;
1722}
1723EXPORT_SYMBOL(blk_check_plugged);
1724
1725void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1726{
1727 flush_plug_callbacks(plug, from_schedule);
1728
1729 if (!list_empty(&plug->mq_list))
1730 blk_mq_flush_plug_list(plug, from_schedule);
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743void blk_finish_plug(struct blk_plug *plug)
1744{
1745 if (plug != current->plug)
1746 return;
1747 blk_flush_plug_list(plug, false);
1748
1749 current->plug = NULL;
1750}
1751EXPORT_SYMBOL(blk_finish_plug);
1752
1753void blk_io_schedule(void)
1754{
1755
1756 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1757
1758 if (timeout)
1759 io_schedule_timeout(timeout);
1760 else
1761 io_schedule();
1762}
1763EXPORT_SYMBOL_GPL(blk_io_schedule);
1764
1765int __init blk_dev_init(void)
1766{
1767 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1768 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1769 sizeof_field(struct request, cmd_flags));
1770 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1771 sizeof_field(struct bio, bi_opf));
1772
1773
1774 kblockd_workqueue = alloc_workqueue("kblockd",
1775 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1776 if (!kblockd_workqueue)
1777 panic("Failed to create kblockd\n");
1778
1779 blk_requestq_cachep = kmem_cache_create("request_queue",
1780 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1781
1782 blk_debugfs_root = debugfs_create_dir("block", NULL);
1783
1784 return 0;
1785}
1786