1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/blk-mq.h>
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/kernel_stat.h>
23#include <linux/string.h>
24#include <linux/init.h>
25#include <linux/completion.h>
26#include <linux/slab.h>
27#include <linux/swap.h>
28#include <linux/writeback.h>
29#include <linux/task_io_accounting_ops.h>
30#include <linux/fault-inject.h>
31#include <linux/list_sort.h>
32#include <linux/delay.h>
33#include <linux/ratelimit.h>
34#include <linux/pm_runtime.h>
35#include <linux/blk-cgroup.h>
36#include <linux/t10-pi.h>
37#include <linux/debugfs.h>
38#include <linux/bpf.h>
39
40#define CREATE_TRACE_POINTS
41#include <trace/events/block.h>
42
43#include "blk.h"
44#include "blk-mq.h"
45#include "blk-mq-sched.h"
46#include "blk-pm.h"
47#include "blk-rq-qos.h"
48
49#ifdef CONFIG_DEBUG_FS
50struct dentry *blk_debugfs_root;
51#endif
52
53EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
54EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
55EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
56EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
57EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
58
59DEFINE_IDA(blk_queue_ida);
60
61
62
63
64struct kmem_cache *blk_requestq_cachep;
65
66
67
68
69static struct workqueue_struct *kblockd_workqueue;
70
71
72
73
74
75
76void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
77{
78 set_bit(flag, &q->queue_flags);
79}
80EXPORT_SYMBOL(blk_queue_flag_set);
81
82
83
84
85
86
87void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
88{
89 clear_bit(flag, &q->queue_flags);
90}
91EXPORT_SYMBOL(blk_queue_flag_clear);
92
93
94
95
96
97
98
99
100
101bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
102{
103 return test_and_set_bit(flag, &q->queue_flags);
104}
105EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
106
107void blk_rq_init(struct request_queue *q, struct request *rq)
108{
109 memset(rq, 0, sizeof(*rq));
110
111 INIT_LIST_HEAD(&rq->queuelist);
112 rq->q = q;
113 rq->__sector = (sector_t) -1;
114 INIT_HLIST_NODE(&rq->hash);
115 RB_CLEAR_NODE(&rq->rb_node);
116 rq->tag = -1;
117 rq->internal_tag = -1;
118 rq->start_time_ns = ktime_get_ns();
119 rq->part = NULL;
120 refcount_set(&rq->ref, 1);
121}
122EXPORT_SYMBOL(blk_rq_init);
123
124#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
125static const char *const blk_op_name[] = {
126 REQ_OP_NAME(READ),
127 REQ_OP_NAME(WRITE),
128 REQ_OP_NAME(FLUSH),
129 REQ_OP_NAME(DISCARD),
130 REQ_OP_NAME(SECURE_ERASE),
131 REQ_OP_NAME(ZONE_RESET),
132 REQ_OP_NAME(ZONE_RESET_ALL),
133 REQ_OP_NAME(ZONE_OPEN),
134 REQ_OP_NAME(ZONE_CLOSE),
135 REQ_OP_NAME(ZONE_FINISH),
136 REQ_OP_NAME(WRITE_SAME),
137 REQ_OP_NAME(WRITE_ZEROES),
138 REQ_OP_NAME(SCSI_IN),
139 REQ_OP_NAME(SCSI_OUT),
140 REQ_OP_NAME(DRV_IN),
141 REQ_OP_NAME(DRV_OUT),
142};
143#undef REQ_OP_NAME
144
145
146
147
148
149
150
151
152
153inline const char *blk_op_str(unsigned int op)
154{
155 const char *op_str = "UNKNOWN";
156
157 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
158 op_str = blk_op_name[op];
159
160 return op_str;
161}
162EXPORT_SYMBOL_GPL(blk_op_str);
163
164static const struct {
165 int errno;
166 const char *name;
167} blk_errors[] = {
168 [BLK_STS_OK] = { 0, "" },
169 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
170 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
171 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
172 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
173 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
174 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
175 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
176 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
177 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
178 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
179 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
180
181
182 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
183
184
185 [BLK_STS_IOERR] = { -EIO, "I/O" },
186};
187
188blk_status_t errno_to_blk_status(int errno)
189{
190 int i;
191
192 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
193 if (blk_errors[i].errno == errno)
194 return (__force blk_status_t)i;
195 }
196
197 return BLK_STS_IOERR;
198}
199EXPORT_SYMBOL_GPL(errno_to_blk_status);
200
201int blk_status_to_errno(blk_status_t status)
202{
203 int idx = (__force int)status;
204
205 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
206 return -EIO;
207 return blk_errors[idx].errno;
208}
209EXPORT_SYMBOL_GPL(blk_status_to_errno);
210
211static void print_req_error(struct request *req, blk_status_t status,
212 const char *caller)
213{
214 int idx = (__force int)status;
215
216 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
217 return;
218
219 printk_ratelimited(KERN_ERR
220 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
221 "phys_seg %u prio class %u\n",
222 caller, blk_errors[idx].name,
223 req->rq_disk ? req->rq_disk->disk_name : "?",
224 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
225 req->cmd_flags & ~REQ_OP_MASK,
226 req->nr_phys_segments,
227 IOPRIO_PRIO_CLASS(req->ioprio));
228}
229
230static void req_bio_endio(struct request *rq, struct bio *bio,
231 unsigned int nbytes, blk_status_t error)
232{
233 if (error)
234 bio->bi_status = error;
235
236 if (unlikely(rq->rq_flags & RQF_QUIET))
237 bio_set_flag(bio, BIO_QUIET);
238
239 bio_advance(bio, nbytes);
240
241
242 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
243 bio_endio(bio);
244}
245
246void blk_dump_rq_flags(struct request *rq, char *msg)
247{
248 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
249 rq->rq_disk ? rq->rq_disk->disk_name : "?",
250 (unsigned long long) rq->cmd_flags);
251
252 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
253 (unsigned long long)blk_rq_pos(rq),
254 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
255 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
256 rq->bio, rq->biotail, blk_rq_bytes(rq));
257}
258EXPORT_SYMBOL(blk_dump_rq_flags);
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278void blk_sync_queue(struct request_queue *q)
279{
280 del_timer_sync(&q->timeout);
281 cancel_work_sync(&q->timeout_work);
282}
283EXPORT_SYMBOL(blk_sync_queue);
284
285
286
287
288
289void blk_set_pm_only(struct request_queue *q)
290{
291 atomic_inc(&q->pm_only);
292}
293EXPORT_SYMBOL_GPL(blk_set_pm_only);
294
295void blk_clear_pm_only(struct request_queue *q)
296{
297 int pm_only;
298
299 pm_only = atomic_dec_return(&q->pm_only);
300 WARN_ON_ONCE(pm_only < 0);
301 if (pm_only == 0)
302 wake_up_all(&q->mq_freeze_wq);
303}
304EXPORT_SYMBOL_GPL(blk_clear_pm_only);
305
306
307
308
309
310
311
312
313
314
315
316void blk_put_queue(struct request_queue *q)
317{
318 kobject_put(&q->kobj);
319}
320EXPORT_SYMBOL(blk_put_queue);
321
322void blk_set_queue_dying(struct request_queue *q)
323{
324 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
325
326
327
328
329
330
331 blk_freeze_queue_start(q);
332
333 if (queue_is_mq(q))
334 blk_mq_wake_waiters(q);
335
336
337 wake_up_all(&q->mq_freeze_wq);
338}
339EXPORT_SYMBOL_GPL(blk_set_queue_dying);
340
341
342
343
344
345
346
347
348
349
350void blk_cleanup_queue(struct request_queue *q)
351{
352
353 might_sleep();
354
355 WARN_ON_ONCE(blk_queue_registered(q));
356
357
358 blk_set_queue_dying(q);
359
360 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
361 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
362 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
363
364
365
366
367
368
369 blk_freeze_queue(q);
370
371 rq_qos_exit(q);
372
373 blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
374
375
376 blk_flush_integrity();
377
378
379 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
380 blk_sync_queue(q);
381
382 if (queue_is_mq(q))
383 blk_mq_exit_queue(q);
384
385
386
387
388
389
390
391
392
393 mutex_lock(&q->sysfs_lock);
394 if (q->elevator)
395 blk_mq_sched_free_requests(q);
396 mutex_unlock(&q->sysfs_lock);
397
398 percpu_ref_exit(&q->q_usage_counter);
399
400
401 blk_put_queue(q);
402}
403EXPORT_SYMBOL(blk_cleanup_queue);
404
405struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
406{
407 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
408}
409EXPORT_SYMBOL(blk_alloc_queue);
410
411
412
413
414
415
416int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
417{
418 const bool pm = flags & BLK_MQ_REQ_PREEMPT;
419
420 while (true) {
421 bool success = false;
422
423 rcu_read_lock();
424 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
425
426
427
428
429
430 if (pm || !blk_queue_pm_only(q)) {
431 success = true;
432 } else {
433 percpu_ref_put(&q->q_usage_counter);
434 }
435 }
436 rcu_read_unlock();
437
438 if (success)
439 return 0;
440
441 if (flags & BLK_MQ_REQ_NOWAIT)
442 return -EBUSY;
443
444
445
446
447
448
449
450
451 smp_rmb();
452
453 wait_event(q->mq_freeze_wq,
454 (!q->mq_freeze_depth &&
455 (pm || (blk_pm_request_resume(q),
456 !blk_queue_pm_only(q)))) ||
457 blk_queue_dying(q));
458 if (blk_queue_dying(q))
459 return -ENODEV;
460 }
461}
462
463void blk_queue_exit(struct request_queue *q)
464{
465 percpu_ref_put(&q->q_usage_counter);
466}
467
468static void blk_queue_usage_counter_release(struct percpu_ref *ref)
469{
470 struct request_queue *q =
471 container_of(ref, struct request_queue, q_usage_counter);
472
473 wake_up_all(&q->mq_freeze_wq);
474}
475
476static void blk_rq_timed_out_timer(struct timer_list *t)
477{
478 struct request_queue *q = from_timer(q, t, timeout);
479
480 kblockd_schedule_work(&q->timeout_work);
481}
482
483
484
485
486
487
488struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
489{
490 struct request_queue *q;
491 int ret;
492
493 q = kmem_cache_alloc_node(blk_requestq_cachep,
494 gfp_mask | __GFP_ZERO, node_id);
495 if (!q)
496 return NULL;
497
498 q->last_merge = NULL;
499
500 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
501 if (q->id < 0)
502 goto fail_q;
503
504 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
505 if (ret)
506 goto fail_id;
507
508 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
509 if (!q->backing_dev_info)
510 goto fail_split;
511
512 q->stats = blk_alloc_queue_stats();
513 if (!q->stats)
514 goto fail_stats;
515
516 q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
517 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
518 q->backing_dev_info->name = "block";
519 q->node = node_id;
520
521 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
522 laptop_mode_timer_fn, 0);
523 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
524 INIT_WORK(&q->timeout_work, NULL);
525 INIT_LIST_HEAD(&q->icq_list);
526#ifdef CONFIG_BLK_CGROUP
527 INIT_LIST_HEAD(&q->blkg_list);
528#endif
529
530 kobject_init(&q->kobj, &blk_queue_ktype);
531
532#ifdef CONFIG_BLK_DEV_IO_TRACE
533 mutex_init(&q->blk_trace_mutex);
534#endif
535 mutex_init(&q->sysfs_lock);
536 mutex_init(&q->sysfs_dir_lock);
537 spin_lock_init(&q->queue_lock);
538
539 init_waitqueue_head(&q->mq_freeze_wq);
540 mutex_init(&q->mq_freeze_lock);
541
542
543
544
545
546 if (percpu_ref_init(&q->q_usage_counter,
547 blk_queue_usage_counter_release,
548 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
549 goto fail_bdi;
550
551 if (blkcg_init_queue(q))
552 goto fail_ref;
553
554 return q;
555
556fail_ref:
557 percpu_ref_exit(&q->q_usage_counter);
558fail_bdi:
559 blk_free_queue_stats(q->stats);
560fail_stats:
561 bdi_put(q->backing_dev_info);
562fail_split:
563 bioset_exit(&q->bio_split);
564fail_id:
565 ida_simple_remove(&blk_queue_ida, q->id);
566fail_q:
567 kmem_cache_free(blk_requestq_cachep, q);
568 return NULL;
569}
570EXPORT_SYMBOL(blk_alloc_queue_node);
571
572
573
574
575
576
577
578
579
580bool blk_get_queue(struct request_queue *q)
581{
582 if (likely(!blk_queue_dying(q))) {
583 __blk_get_queue(q);
584 return true;
585 }
586
587 return false;
588}
589EXPORT_SYMBOL(blk_get_queue);
590
591
592
593
594
595
596
597struct request *blk_get_request(struct request_queue *q, unsigned int op,
598 blk_mq_req_flags_t flags)
599{
600 struct request *req;
601
602 WARN_ON_ONCE(op & REQ_NOWAIT);
603 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
604
605 req = blk_mq_alloc_request(q, op, flags);
606 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
607 q->mq_ops->initialize_rq_fn(req);
608
609 return req;
610}
611EXPORT_SYMBOL(blk_get_request);
612
613void blk_put_request(struct request *req)
614{
615 blk_mq_free_request(req);
616}
617EXPORT_SYMBOL(blk_put_request);
618
619bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
620 struct bio *bio)
621{
622 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
623
624 if (!ll_back_merge_fn(q, req, bio))
625 return false;
626
627 trace_block_bio_backmerge(q, req, bio);
628 rq_qos_merge(req->q, req, bio);
629
630 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
631 blk_rq_set_mixed_merge(req);
632
633 req->biotail->bi_next = bio;
634 req->biotail = bio;
635 req->__data_len += bio->bi_iter.bi_size;
636
637 blk_account_io_start(req, false);
638 return true;
639}
640
641bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
642 struct bio *bio)
643{
644 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
645
646 if (!ll_front_merge_fn(q, req, bio))
647 return false;
648
649 trace_block_bio_frontmerge(q, req, bio);
650 rq_qos_merge(req->q, req, bio);
651
652 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
653 blk_rq_set_mixed_merge(req);
654
655 bio->bi_next = req->bio;
656 req->bio = bio;
657
658 req->__sector = bio->bi_iter.bi_sector;
659 req->__data_len += bio->bi_iter.bi_size;
660
661 blk_account_io_start(req, false);
662 return true;
663}
664
665bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
666 struct bio *bio)
667{
668 unsigned short segments = blk_rq_nr_discard_segments(req);
669
670 if (segments >= queue_max_discard_segments(q))
671 goto no_merge;
672 if (blk_rq_sectors(req) + bio_sectors(bio) >
673 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
674 goto no_merge;
675
676 rq_qos_merge(q, req, bio);
677
678 req->biotail->bi_next = bio;
679 req->biotail = bio;
680 req->__data_len += bio->bi_iter.bi_size;
681 req->nr_phys_segments = segments + 1;
682
683 blk_account_io_start(req, false);
684 return true;
685no_merge:
686 req_set_nomerge(q, req);
687 return false;
688}
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
713 struct request **same_queue_rq)
714{
715 struct blk_plug *plug;
716 struct request *rq;
717 struct list_head *plug_list;
718
719 plug = blk_mq_plug(q, bio);
720 if (!plug)
721 return false;
722
723 plug_list = &plug->mq_list;
724
725 list_for_each_entry_reverse(rq, plug_list, queuelist) {
726 bool merged = false;
727
728 if (rq->q == q && same_queue_rq) {
729
730
731
732
733
734 *same_queue_rq = rq;
735 }
736
737 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
738 continue;
739
740 switch (blk_try_merge(rq, bio)) {
741 case ELEVATOR_BACK_MERGE:
742 merged = bio_attempt_back_merge(q, rq, bio);
743 break;
744 case ELEVATOR_FRONT_MERGE:
745 merged = bio_attempt_front_merge(q, rq, bio);
746 break;
747 case ELEVATOR_DISCARD_MERGE:
748 merged = bio_attempt_discard_merge(q, rq, bio);
749 break;
750 default:
751 break;
752 }
753
754 if (merged)
755 return true;
756 }
757
758 return false;
759}
760
761static void handle_bad_sector(struct bio *bio, sector_t maxsector)
762{
763 char b[BDEVNAME_SIZE];
764
765 printk(KERN_INFO "attempt to access beyond end of device\n");
766 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
767 bio_devname(bio, b), bio->bi_opf,
768 (unsigned long long)bio_end_sector(bio),
769 (long long)maxsector);
770}
771
772#ifdef CONFIG_FAIL_MAKE_REQUEST
773
774static DECLARE_FAULT_ATTR(fail_make_request);
775
776static int __init setup_fail_make_request(char *str)
777{
778 return setup_fault_attr(&fail_make_request, str);
779}
780__setup("fail_make_request=", setup_fail_make_request);
781
782static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
783{
784 return part->make_it_fail && should_fail(&fail_make_request, bytes);
785}
786
787static int __init fail_make_request_debugfs(void)
788{
789 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
790 NULL, &fail_make_request);
791
792 return PTR_ERR_OR_ZERO(dir);
793}
794
795late_initcall(fail_make_request_debugfs);
796
797#else
798
799static inline bool should_fail_request(struct hd_struct *part,
800 unsigned int bytes)
801{
802 return false;
803}
804
805#endif
806
807static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
808{
809 const int op = bio_op(bio);
810
811 if (part->policy && op_is_write(op)) {
812 char b[BDEVNAME_SIZE];
813
814 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
815 return false;
816
817 WARN_ONCE(1,
818 "generic_make_request: Trying to write "
819 "to read-only block-device %s (partno %d)\n",
820 bio_devname(bio, b), part->partno);
821
822 return false;
823 }
824
825 return false;
826}
827
828static noinline int should_fail_bio(struct bio *bio)
829{
830 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
831 return -EIO;
832 return 0;
833}
834ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
835
836
837
838
839
840
841static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
842{
843 unsigned int nr_sectors = bio_sectors(bio);
844
845 if (nr_sectors && maxsector &&
846 (nr_sectors > maxsector ||
847 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
848 handle_bad_sector(bio, maxsector);
849 return -EIO;
850 }
851 return 0;
852}
853
854
855
856
857static inline int blk_partition_remap(struct bio *bio)
858{
859 struct hd_struct *p;
860 int ret = -EIO;
861
862 rcu_read_lock();
863 p = __disk_get_part(bio->bi_disk, bio->bi_partno);
864 if (unlikely(!p))
865 goto out;
866 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
867 goto out;
868 if (unlikely(bio_check_ro(bio, p)))
869 goto out;
870
871 if (bio_sectors(bio)) {
872 if (bio_check_eod(bio, part_nr_sects_read(p)))
873 goto out;
874 bio->bi_iter.bi_sector += p->start_sect;
875 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
876 bio->bi_iter.bi_sector - p->start_sect);
877 }
878 bio->bi_partno = 0;
879 ret = 0;
880out:
881 rcu_read_unlock();
882 return ret;
883}
884
885static noinline_for_stack bool
886generic_make_request_checks(struct bio *bio)
887{
888 struct request_queue *q;
889 int nr_sectors = bio_sectors(bio);
890 blk_status_t status = BLK_STS_IOERR;
891 char b[BDEVNAME_SIZE];
892
893 might_sleep();
894
895 q = bio->bi_disk->queue;
896 if (unlikely(!q)) {
897 printk(KERN_ERR
898 "generic_make_request: Trying to access "
899 "nonexistent block-device %s (%Lu)\n",
900 bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
901 goto end_io;
902 }
903
904
905
906
907
908 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
909 goto not_supported;
910
911 if (should_fail_bio(bio))
912 goto end_io;
913
914 if (bio->bi_partno) {
915 if (unlikely(blk_partition_remap(bio)))
916 goto end_io;
917 } else {
918 if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
919 goto end_io;
920 if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
921 goto end_io;
922 }
923
924
925
926
927
928
929 if (op_is_flush(bio->bi_opf) &&
930 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
931 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
932 if (!nr_sectors) {
933 status = BLK_STS_OK;
934 goto end_io;
935 }
936 }
937
938 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
939 bio->bi_opf &= ~REQ_HIPRI;
940
941 switch (bio_op(bio)) {
942 case REQ_OP_DISCARD:
943 if (!blk_queue_discard(q))
944 goto not_supported;
945 break;
946 case REQ_OP_SECURE_ERASE:
947 if (!blk_queue_secure_erase(q))
948 goto not_supported;
949 break;
950 case REQ_OP_WRITE_SAME:
951 if (!q->limits.max_write_same_sectors)
952 goto not_supported;
953 break;
954 case REQ_OP_ZONE_RESET:
955 case REQ_OP_ZONE_OPEN:
956 case REQ_OP_ZONE_CLOSE:
957 case REQ_OP_ZONE_FINISH:
958 if (!blk_queue_is_zoned(q))
959 goto not_supported;
960 break;
961 case REQ_OP_ZONE_RESET_ALL:
962 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
963 goto not_supported;
964 break;
965 case REQ_OP_WRITE_ZEROES:
966 if (!q->limits.max_write_zeroes_sectors)
967 goto not_supported;
968 break;
969 default:
970 break;
971 }
972
973
974
975
976
977
978
979 create_io_context(GFP_ATOMIC, q->node);
980
981 if (!blkcg_bio_issue_check(q, bio))
982 return false;
983
984 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
985 trace_block_bio_queue(q, bio);
986
987
988
989 bio_set_flag(bio, BIO_TRACE_COMPLETION);
990 }
991 return true;
992
993not_supported:
994 status = BLK_STS_NOTSUPP;
995end_io:
996 bio->bi_status = status;
997 bio_endio(bio);
998 return false;
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025blk_qc_t generic_make_request(struct bio *bio)
1026{
1027
1028
1029
1030
1031
1032
1033
1034 struct bio_list bio_list_on_stack[2];
1035 blk_qc_t ret = BLK_QC_T_NONE;
1036
1037 if (!generic_make_request_checks(bio))
1038 goto out;
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 if (current->bio_list) {
1051 bio_list_add(¤t->bio_list[0], bio);
1052 goto out;
1053 }
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 BUG_ON(bio->bi_next);
1070 bio_list_init(&bio_list_on_stack[0]);
1071 current->bio_list = bio_list_on_stack;
1072 do {
1073 struct request_queue *q = bio->bi_disk->queue;
1074 blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
1075 BLK_MQ_REQ_NOWAIT : 0;
1076
1077 if (likely(blk_queue_enter(q, flags) == 0)) {
1078 struct bio_list lower, same;
1079
1080
1081 bio_list_on_stack[1] = bio_list_on_stack[0];
1082 bio_list_init(&bio_list_on_stack[0]);
1083 ret = q->make_request_fn(q, bio);
1084
1085 blk_queue_exit(q);
1086
1087
1088
1089
1090 bio_list_init(&lower);
1091 bio_list_init(&same);
1092 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
1093 if (q == bio->bi_disk->queue)
1094 bio_list_add(&same, bio);
1095 else
1096 bio_list_add(&lower, bio);
1097
1098 bio_list_merge(&bio_list_on_stack[0], &lower);
1099 bio_list_merge(&bio_list_on_stack[0], &same);
1100 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
1101 } else {
1102 if (unlikely(!blk_queue_dying(q) &&
1103 (bio->bi_opf & REQ_NOWAIT)))
1104 bio_wouldblock_error(bio);
1105 else
1106 bio_io_error(bio);
1107 }
1108 bio = bio_list_pop(&bio_list_on_stack[0]);
1109 } while (bio);
1110 current->bio_list = NULL;
1111
1112out:
1113 return ret;
1114}
1115EXPORT_SYMBOL(generic_make_request);
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127blk_qc_t direct_make_request(struct bio *bio)
1128{
1129 struct request_queue *q = bio->bi_disk->queue;
1130 bool nowait = bio->bi_opf & REQ_NOWAIT;
1131 blk_qc_t ret;
1132
1133 if (!generic_make_request_checks(bio))
1134 return BLK_QC_T_NONE;
1135
1136 if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
1137 if (nowait && !blk_queue_dying(q))
1138 bio->bi_status = BLK_STS_AGAIN;
1139 else
1140 bio->bi_status = BLK_STS_IOERR;
1141 bio_endio(bio);
1142 return BLK_QC_T_NONE;
1143 }
1144
1145 ret = q->make_request_fn(q, bio);
1146 blk_queue_exit(q);
1147 return ret;
1148}
1149EXPORT_SYMBOL_GPL(direct_make_request);
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160blk_qc_t submit_bio(struct bio *bio)
1161{
1162 if (blkcg_punt_bio_submit(bio))
1163 return BLK_QC_T_NONE;
1164
1165
1166
1167
1168
1169 if (bio_has_data(bio)) {
1170 unsigned int count;
1171
1172 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1173 count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1174 else
1175 count = bio_sectors(bio);
1176
1177 if (op_is_write(bio_op(bio))) {
1178 count_vm_events(PGPGOUT, count);
1179 } else {
1180 task_io_account_read(bio->bi_iter.bi_size);
1181 count_vm_events(PGPGIN, count);
1182 }
1183
1184 if (unlikely(block_dump)) {
1185 char b[BDEVNAME_SIZE];
1186 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1187 current->comm, task_pid_nr(current),
1188 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
1189 (unsigned long long)bio->bi_iter.bi_sector,
1190 bio_devname(bio, b), count);
1191 }
1192 }
1193
1194 return generic_make_request(bio);
1195}
1196EXPORT_SYMBOL(submit_bio);
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static int blk_cloned_rq_check_limits(struct request_queue *q,
1216 struct request *rq)
1217{
1218 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
1219 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1220 __func__, blk_rq_sectors(rq),
1221 blk_queue_get_max_sectors(q, req_op(rq)));
1222 return -EIO;
1223 }
1224
1225
1226
1227
1228
1229
1230
1231 blk_recalc_rq_segments(rq);
1232 if (rq->nr_phys_segments > queue_max_segments(q)) {
1233 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1234 __func__, rq->nr_phys_segments, queue_max_segments(q));
1235 return -EIO;
1236 }
1237
1238 return 0;
1239}
1240
1241
1242
1243
1244
1245
1246blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1247{
1248 if (blk_cloned_rq_check_limits(q, rq))
1249 return BLK_STS_IOERR;
1250
1251 if (rq->rq_disk &&
1252 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1253 return BLK_STS_IOERR;
1254
1255 if (blk_queue_io_stat(q))
1256 blk_account_io_start(rq, true);
1257
1258
1259
1260
1261
1262
1263 return blk_mq_request_issue_directly(rq, true);
1264}
1265EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280unsigned int blk_rq_err_bytes(const struct request *rq)
1281{
1282 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1283 unsigned int bytes = 0;
1284 struct bio *bio;
1285
1286 if (!(rq->rq_flags & RQF_MIXED_MERGE))
1287 return blk_rq_bytes(rq);
1288
1289
1290
1291
1292
1293
1294
1295
1296 for (bio = rq->bio; bio; bio = bio->bi_next) {
1297 if ((bio->bi_opf & ff) != ff)
1298 break;
1299 bytes += bio->bi_iter.bi_size;
1300 }
1301
1302
1303 BUG_ON(blk_rq_bytes(rq) && !bytes);
1304 return bytes;
1305}
1306EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1307
1308void blk_account_io_completion(struct request *req, unsigned int bytes)
1309{
1310 if (req->part && blk_do_io_stat(req)) {
1311 const int sgrp = op_stat_group(req_op(req));
1312 struct hd_struct *part;
1313
1314 part_stat_lock();
1315 part = req->part;
1316 part_stat_add(part, sectors[sgrp], bytes >> 9);
1317 part_stat_unlock();
1318 }
1319}
1320
1321void blk_account_io_done(struct request *req, u64 now)
1322{
1323
1324
1325
1326
1327
1328 if (req->part && blk_do_io_stat(req) &&
1329 !(req->rq_flags & RQF_FLUSH_SEQ)) {
1330 const int sgrp = op_stat_group(req_op(req));
1331 struct hd_struct *part;
1332
1333 part_stat_lock();
1334 part = req->part;
1335
1336 update_io_ticks(part, jiffies, true);
1337 part_stat_inc(part, ios[sgrp]);
1338 part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
1339 part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns));
1340 part_dec_in_flight(req->q, part, rq_data_dir(req));
1341
1342 hd_struct_put(part);
1343 part_stat_unlock();
1344 }
1345}
1346
1347void blk_account_io_start(struct request *rq, bool new_io)
1348{
1349 struct hd_struct *part;
1350 int rw = rq_data_dir(rq);
1351
1352 if (!blk_do_io_stat(rq))
1353 return;
1354
1355 part_stat_lock();
1356
1357 if (!new_io) {
1358 part = rq->part;
1359 part_stat_inc(part, merges[rw]);
1360 } else {
1361 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
1362 part_inc_in_flight(rq->q, part, rw);
1363 rq->part = part;
1364 }
1365
1366 update_io_ticks(part, jiffies, false);
1367
1368 part_stat_unlock();
1369}
1370
1371
1372
1373
1374
1375void blk_steal_bios(struct bio_list *list, struct request *rq)
1376{
1377 if (rq->bio) {
1378 if (list->tail)
1379 list->tail->bi_next = rq->bio;
1380 else
1381 list->head = rq->bio;
1382 list->tail = rq->biotail;
1383
1384 rq->bio = NULL;
1385 rq->biotail = NULL;
1386 }
1387
1388 rq->__data_len = 0;
1389}
1390EXPORT_SYMBOL_GPL(blk_steal_bios);
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418bool blk_update_request(struct request *req, blk_status_t error,
1419 unsigned int nr_bytes)
1420{
1421 int total_bytes;
1422
1423 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1424
1425 if (!req->bio)
1426 return false;
1427
1428#ifdef CONFIG_BLK_DEV_INTEGRITY
1429 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1430 error == BLK_STS_OK)
1431 req->q->integrity.profile->ext_ops->complete_fn(req, nr_bytes);
1432#endif
1433
1434 if (unlikely(error && !blk_rq_is_passthrough(req) &&
1435 !(req->rq_flags & RQF_QUIET)))
1436 print_req_error(req, error, __func__);
1437
1438 blk_account_io_completion(req, nr_bytes);
1439
1440 total_bytes = 0;
1441 while (req->bio) {
1442 struct bio *bio = req->bio;
1443 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1444
1445 if (bio_bytes == bio->bi_iter.bi_size)
1446 req->bio = bio->bi_next;
1447
1448
1449 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1450 req_bio_endio(req, bio, bio_bytes, error);
1451
1452 total_bytes += bio_bytes;
1453 nr_bytes -= bio_bytes;
1454
1455 if (!nr_bytes)
1456 break;
1457 }
1458
1459
1460
1461
1462 if (!req->bio) {
1463
1464
1465
1466
1467
1468 req->__data_len = 0;
1469 return false;
1470 }
1471
1472 req->__data_len -= total_bytes;
1473
1474
1475 if (!blk_rq_is_passthrough(req))
1476 req->__sector += total_bytes >> 9;
1477
1478
1479 if (req->rq_flags & RQF_MIXED_MERGE) {
1480 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1481 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1482 }
1483
1484 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1485
1486
1487
1488
1489 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1490 blk_dump_rq_flags(req, "request botched");
1491 req->__data_len = blk_rq_cur_bytes(req);
1492 }
1493
1494
1495 blk_recalc_rq_segments(req);
1496 }
1497
1498 return true;
1499}
1500EXPORT_SYMBOL_GPL(blk_update_request);
1501
1502void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
1503 struct bio *bio)
1504{
1505 if (bio_has_data(bio))
1506 rq->nr_phys_segments = bio_phys_segments(q, bio);
1507 else if (bio_op(bio) == REQ_OP_DISCARD)
1508 rq->nr_phys_segments = 1;
1509
1510 rq->__data_len = bio->bi_iter.bi_size;
1511 rq->bio = rq->biotail = bio;
1512 rq->ioprio = bio_prio(bio);
1513
1514 if (bio->bi_disk)
1515 rq->rq_disk = bio->bi_disk;
1516}
1517
1518#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1519
1520
1521
1522
1523
1524
1525
1526void rq_flush_dcache_pages(struct request *rq)
1527{
1528 struct req_iterator iter;
1529 struct bio_vec bvec;
1530
1531 rq_for_each_segment(bvec, rq, iter)
1532 flush_dcache_page(bvec.bv_page);
1533}
1534EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1535#endif
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556int blk_lld_busy(struct request_queue *q)
1557{
1558 if (queue_is_mq(q) && q->mq_ops->busy)
1559 return q->mq_ops->busy(q);
1560
1561 return 0;
1562}
1563EXPORT_SYMBOL_GPL(blk_lld_busy);
1564
1565
1566
1567
1568
1569
1570
1571
1572void blk_rq_unprep_clone(struct request *rq)
1573{
1574 struct bio *bio;
1575
1576 while ((bio = rq->bio) != NULL) {
1577 rq->bio = bio->bi_next;
1578
1579 bio_put(bio);
1580 }
1581}
1582EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1583
1584
1585
1586
1587
1588static void __blk_rq_prep_clone(struct request *dst, struct request *src)
1589{
1590 dst->__sector = blk_rq_pos(src);
1591 dst->__data_len = blk_rq_bytes(src);
1592 if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1593 dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
1594 dst->special_vec = src->special_vec;
1595 }
1596 dst->nr_phys_segments = src->nr_phys_segments;
1597 dst->ioprio = src->ioprio;
1598 dst->extra_len = src->extra_len;
1599}
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1621 struct bio_set *bs, gfp_t gfp_mask,
1622 int (*bio_ctr)(struct bio *, struct bio *, void *),
1623 void *data)
1624{
1625 struct bio *bio, *bio_src;
1626
1627 if (!bs)
1628 bs = &fs_bio_set;
1629
1630 __rq_for_each_bio(bio_src, rq_src) {
1631 bio = bio_clone_fast(bio_src, gfp_mask, bs);
1632 if (!bio)
1633 goto free_and_out;
1634
1635 if (bio_ctr && bio_ctr(bio, bio_src, data))
1636 goto free_and_out;
1637
1638 if (rq->bio) {
1639 rq->biotail->bi_next = bio;
1640 rq->biotail = bio;
1641 } else
1642 rq->bio = rq->biotail = bio;
1643 }
1644
1645 __blk_rq_prep_clone(rq, rq_src);
1646
1647 return 0;
1648
1649free_and_out:
1650 if (bio)
1651 bio_put(bio);
1652 blk_rq_unprep_clone(rq);
1653
1654 return -ENOMEM;
1655}
1656EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1657
1658int kblockd_schedule_work(struct work_struct *work)
1659{
1660 return queue_work(kblockd_workqueue, work);
1661}
1662EXPORT_SYMBOL(kblockd_schedule_work);
1663
1664int kblockd_schedule_work_on(int cpu, struct work_struct *work)
1665{
1666 return queue_work_on(cpu, kblockd_workqueue, work);
1667}
1668EXPORT_SYMBOL(kblockd_schedule_work_on);
1669
1670int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1671 unsigned long delay)
1672{
1673 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1674}
1675EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691void blk_start_plug(struct blk_plug *plug)
1692{
1693 struct task_struct *tsk = current;
1694
1695
1696
1697
1698 if (tsk->plug)
1699 return;
1700
1701 INIT_LIST_HEAD(&plug->mq_list);
1702 INIT_LIST_HEAD(&plug->cb_list);
1703 plug->rq_count = 0;
1704 plug->multiple_queues = false;
1705
1706
1707
1708
1709
1710 tsk->plug = plug;
1711}
1712EXPORT_SYMBOL(blk_start_plug);
1713
1714static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1715{
1716 LIST_HEAD(callbacks);
1717
1718 while (!list_empty(&plug->cb_list)) {
1719 list_splice_init(&plug->cb_list, &callbacks);
1720
1721 while (!list_empty(&callbacks)) {
1722 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1723 struct blk_plug_cb,
1724 list);
1725 list_del(&cb->list);
1726 cb->callback(cb, from_schedule);
1727 }
1728 }
1729}
1730
1731struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1732 int size)
1733{
1734 struct blk_plug *plug = current->plug;
1735 struct blk_plug_cb *cb;
1736
1737 if (!plug)
1738 return NULL;
1739
1740 list_for_each_entry(cb, &plug->cb_list, list)
1741 if (cb->callback == unplug && cb->data == data)
1742 return cb;
1743
1744
1745 BUG_ON(size < sizeof(*cb));
1746 cb = kzalloc(size, GFP_ATOMIC);
1747 if (cb) {
1748 cb->data = data;
1749 cb->callback = unplug;
1750 list_add(&cb->list, &plug->cb_list);
1751 }
1752 return cb;
1753}
1754EXPORT_SYMBOL(blk_check_plugged);
1755
1756void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1757{
1758 flush_plug_callbacks(plug, from_schedule);
1759
1760 if (!list_empty(&plug->mq_list))
1761 blk_mq_flush_plug_list(plug, from_schedule);
1762}
1763
1764void blk_finish_plug(struct blk_plug *plug)
1765{
1766 if (plug != current->plug)
1767 return;
1768 blk_flush_plug_list(plug, false);
1769
1770 current->plug = NULL;
1771}
1772EXPORT_SYMBOL(blk_finish_plug);
1773
1774
1775
1776
1777struct request_aux *blk_rq_aux(const struct request *rq)
1778{
1779 return (struct request_aux *)((void *)rq - sizeof(struct request_aux));
1780}
1781EXPORT_SYMBOL(blk_rq_aux);
1782
1783int __init blk_dev_init(void)
1784{
1785 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1786 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1787 FIELD_SIZEOF(struct request, cmd_flags));
1788 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1789 FIELD_SIZEOF(struct bio, bi_opf));
1790
1791
1792 kblockd_workqueue = alloc_workqueue("kblockd",
1793 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1794 if (!kblockd_workqueue)
1795 panic("Failed to create kblockd\n");
1796
1797 blk_requestq_cachep = kmem_cache_create("request_queue",
1798 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1799
1800#ifdef CONFIG_DEBUG_FS
1801 blk_debugfs_root = debugfs_create_dir("block", NULL);
1802#endif
1803
1804 return 0;
1805}
1806