1
2
3
4
5
6
7
8#include "dm.h"
9#include "dm-uevent.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/idr.h>
20#include <linux/hdreg.h>
21#include <linux/delay.h>
22
23#include <trace/events/block.h>
24
25#define DM_MSG_PREFIX "core"
26
27#ifdef CONFIG_PRINTK
28
29
30
31DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
32 DEFAULT_RATELIMIT_INTERVAL,
33 DEFAULT_RATELIMIT_BURST);
34EXPORT_SYMBOL(dm_ratelimit_state);
35#endif
36
37
38
39
40
41#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42#define DM_COOKIE_LENGTH 24
43
44static const char *_name = DM_NAME;
45
46static unsigned int major = 0;
47static unsigned int _major = 0;
48
49static DEFINE_IDR(_minor_idr);
50
51static DEFINE_SPINLOCK(_minor_lock);
52
53
54
55
56struct dm_io {
57 struct mapped_device *md;
58 int error;
59 atomic_t io_count;
60 struct bio *bio;
61 unsigned long start_time;
62 spinlock_t endio_lock;
63};
64
65
66
67
68
69struct dm_rq_target_io {
70 struct mapped_device *md;
71 struct dm_target *ti;
72 struct request *orig, clone;
73 int error;
74 union map_info info;
75};
76
77
78
79
80
81
82
83
84
85struct dm_rq_clone_bio_info {
86 struct bio *orig;
87 struct dm_rq_target_io *tio;
88 struct bio clone;
89};
90
91union map_info *dm_get_mapinfo(struct bio *bio)
92{
93 if (bio && bio->bi_private)
94 return &((struct dm_target_io *)bio->bi_private)->info;
95 return NULL;
96}
97
98union map_info *dm_get_rq_mapinfo(struct request *rq)
99{
100 if (rq && rq->end_io_data)
101 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
102 return NULL;
103}
104EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
105
106#define MINOR_ALLOCED ((void *)-1)
107
108
109
110
111#define DMF_BLOCK_IO_FOR_SUSPEND 0
112#define DMF_SUSPENDED 1
113#define DMF_FROZEN 2
114#define DMF_FREEING 3
115#define DMF_DELETING 4
116#define DMF_NOFLUSH_SUSPENDING 5
117#define DMF_MERGE_IS_OPTIONAL 6
118
119
120
121
122
123struct dm_table {
124 int undefined__;
125};
126
127
128
129
130struct mapped_device {
131 struct srcu_struct io_barrier;
132 struct mutex suspend_lock;
133 atomic_t holders;
134 atomic_t open_count;
135
136
137
138
139
140
141 struct dm_table *map;
142
143 unsigned long flags;
144
145 struct request_queue *queue;
146 unsigned type;
147
148 struct mutex type_lock;
149
150 struct target_type *immutable_target_type;
151
152 struct gendisk *disk;
153 char name[16];
154
155 void *interface_ptr;
156
157
158
159
160 atomic_t pending[2];
161 wait_queue_head_t wait;
162 struct work_struct work;
163 struct bio_list deferred;
164 spinlock_t deferred_lock;
165
166
167
168
169 struct workqueue_struct *wq;
170
171
172
173
174 mempool_t *io_pool;
175
176 struct bio_set *bs;
177
178
179
180
181 atomic_t event_nr;
182 wait_queue_head_t eventq;
183 atomic_t uevent_seq;
184 struct list_head uevent_list;
185 spinlock_t uevent_lock;
186
187
188
189
190 struct super_block *frozen_sb;
191 struct block_device *bdev;
192
193
194 struct hd_geometry geometry;
195
196
197 struct kobject kobj;
198
199
200 struct bio flush_bio;
201};
202
203
204
205
206struct dm_md_mempools {
207 mempool_t *io_pool;
208 struct bio_set *bs;
209};
210
211#define MIN_IOS 256
212static struct kmem_cache *_io_cache;
213static struct kmem_cache *_rq_tio_cache;
214
215static int __init local_init(void)
216{
217 int r = -ENOMEM;
218
219
220 _io_cache = KMEM_CACHE(dm_io, 0);
221 if (!_io_cache)
222 return r;
223
224 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
225 if (!_rq_tio_cache)
226 goto out_free_io_cache;
227
228 r = dm_uevent_init();
229 if (r)
230 goto out_free_rq_tio_cache;
231
232 _major = major;
233 r = register_blkdev(_major, _name);
234 if (r < 0)
235 goto out_uevent_exit;
236
237 if (!_major)
238 _major = r;
239
240 return 0;
241
242out_uevent_exit:
243 dm_uevent_exit();
244out_free_rq_tio_cache:
245 kmem_cache_destroy(_rq_tio_cache);
246out_free_io_cache:
247 kmem_cache_destroy(_io_cache);
248
249 return r;
250}
251
252static void local_exit(void)
253{
254 kmem_cache_destroy(_rq_tio_cache);
255 kmem_cache_destroy(_io_cache);
256 unregister_blkdev(_major, _name);
257 dm_uevent_exit();
258
259 _major = 0;
260
261 DMINFO("cleaned up");
262}
263
264static int (*_inits[])(void) __initdata = {
265 local_init,
266 dm_target_init,
267 dm_linear_init,
268 dm_stripe_init,
269 dm_io_init,
270 dm_kcopyd_init,
271 dm_interface_init,
272};
273
274static void (*_exits[])(void) = {
275 local_exit,
276 dm_target_exit,
277 dm_linear_exit,
278 dm_stripe_exit,
279 dm_io_exit,
280 dm_kcopyd_exit,
281 dm_interface_exit,
282};
283
284static int __init dm_init(void)
285{
286 const int count = ARRAY_SIZE(_inits);
287
288 int r, i;
289
290 for (i = 0; i < count; i++) {
291 r = _inits[i]();
292 if (r)
293 goto bad;
294 }
295
296 return 0;
297
298 bad:
299 while (i--)
300 _exits[i]();
301
302 return r;
303}
304
305static void __exit dm_exit(void)
306{
307 int i = ARRAY_SIZE(_exits);
308
309 while (i--)
310 _exits[i]();
311
312
313
314
315 idr_destroy(&_minor_idr);
316}
317
318
319
320
321int dm_deleting_md(struct mapped_device *md)
322{
323 return test_bit(DMF_DELETING, &md->flags);
324}
325
326static int dm_blk_open(struct block_device *bdev, fmode_t mode)
327{
328 struct mapped_device *md;
329
330 spin_lock(&_minor_lock);
331
332 md = bdev->bd_disk->private_data;
333 if (!md)
334 goto out;
335
336 if (test_bit(DMF_FREEING, &md->flags) ||
337 dm_deleting_md(md)) {
338 md = NULL;
339 goto out;
340 }
341
342 dm_get(md);
343 atomic_inc(&md->open_count);
344
345out:
346 spin_unlock(&_minor_lock);
347
348 return md ? 0 : -ENXIO;
349}
350
351static void dm_blk_close(struct gendisk *disk, fmode_t mode)
352{
353 struct mapped_device *md = disk->private_data;
354
355 spin_lock(&_minor_lock);
356
357 atomic_dec(&md->open_count);
358 dm_put(md);
359
360 spin_unlock(&_minor_lock);
361}
362
363int dm_open_count(struct mapped_device *md)
364{
365 return atomic_read(&md->open_count);
366}
367
368
369
370
371int dm_lock_for_deletion(struct mapped_device *md)
372{
373 int r = 0;
374
375 spin_lock(&_minor_lock);
376
377 if (dm_open_count(md))
378 r = -EBUSY;
379 else
380 set_bit(DMF_DELETING, &md->flags);
381
382 spin_unlock(&_minor_lock);
383
384 return r;
385}
386
387static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
388{
389 struct mapped_device *md = bdev->bd_disk->private_data;
390
391 return dm_get_geometry(md, geo);
392}
393
394static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
395 unsigned int cmd, unsigned long arg)
396{
397 struct mapped_device *md = bdev->bd_disk->private_data;
398 int srcu_idx;
399 struct dm_table *map;
400 struct dm_target *tgt;
401 int r = -ENOTTY;
402
403retry:
404 map = dm_get_live_table(md, &srcu_idx);
405
406 if (!map || !dm_table_get_size(map))
407 goto out;
408
409
410 if (dm_table_get_num_targets(map) != 1)
411 goto out;
412
413 tgt = dm_table_get_target(map, 0);
414
415 if (dm_suspended_md(md)) {
416 r = -EAGAIN;
417 goto out;
418 }
419
420 if (tgt->type->ioctl)
421 r = tgt->type->ioctl(tgt, cmd, arg);
422
423out:
424 dm_put_live_table(md, srcu_idx);
425
426 if (r == -ENOTCONN) {
427 msleep(10);
428 goto retry;
429 }
430
431 return r;
432}
433
434static struct dm_io *alloc_io(struct mapped_device *md)
435{
436 return mempool_alloc(md->io_pool, GFP_NOIO);
437}
438
439static void free_io(struct mapped_device *md, struct dm_io *io)
440{
441 mempool_free(io, md->io_pool);
442}
443
444static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
445{
446 bio_put(&tio->clone);
447}
448
449static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
450 gfp_t gfp_mask)
451{
452 return mempool_alloc(md->io_pool, gfp_mask);
453}
454
455static void free_rq_tio(struct dm_rq_target_io *tio)
456{
457 mempool_free(tio, tio->md->io_pool);
458}
459
460static int md_in_flight(struct mapped_device *md)
461{
462 return atomic_read(&md->pending[READ]) +
463 atomic_read(&md->pending[WRITE]);
464}
465
466static void start_io_acct(struct dm_io *io)
467{
468 struct mapped_device *md = io->md;
469 int cpu;
470 int rw = bio_data_dir(io->bio);
471
472 io->start_time = jiffies;
473
474 cpu = part_stat_lock();
475 part_round_stats(cpu, &dm_disk(md)->part0);
476 part_stat_unlock();
477 atomic_set(&dm_disk(md)->part0.in_flight[rw],
478 atomic_inc_return(&md->pending[rw]));
479}
480
481static void end_io_acct(struct dm_io *io)
482{
483 struct mapped_device *md = io->md;
484 struct bio *bio = io->bio;
485 unsigned long duration = jiffies - io->start_time;
486 int pending, cpu;
487 int rw = bio_data_dir(bio);
488
489 cpu = part_stat_lock();
490 part_round_stats(cpu, &dm_disk(md)->part0);
491 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
492 part_stat_unlock();
493
494
495
496
497
498 pending = atomic_dec_return(&md->pending[rw]);
499 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
500 pending += atomic_read(&md->pending[rw^0x1]);
501
502
503 if (!pending)
504 wake_up(&md->wait);
505}
506
507
508
509
510static void queue_io(struct mapped_device *md, struct bio *bio)
511{
512 unsigned long flags;
513
514 spin_lock_irqsave(&md->deferred_lock, flags);
515 bio_list_add(&md->deferred, bio);
516 spin_unlock_irqrestore(&md->deferred_lock, flags);
517 queue_work(md->wq, &md->work);
518}
519
520
521
522
523
524
525struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
526{
527 *srcu_idx = srcu_read_lock(&md->io_barrier);
528
529 return srcu_dereference(md->map, &md->io_barrier);
530}
531
532void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
533{
534 srcu_read_unlock(&md->io_barrier, srcu_idx);
535}
536
537void dm_sync_table(struct mapped_device *md)
538{
539 synchronize_srcu(&md->io_barrier);
540 synchronize_rcu_expedited();
541}
542
543
544
545
546
547static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
548{
549 rcu_read_lock();
550 return rcu_dereference(md->map);
551}
552
553static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
554{
555 rcu_read_unlock();
556}
557
558
559
560
561int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
562{
563 *geo = md->geometry;
564
565 return 0;
566}
567
568
569
570
571int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
572{
573 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
574
575 if (geo->start > sz) {
576 DMWARN("Start sector is beyond the geometry limits.");
577 return -EINVAL;
578 }
579
580 md->geometry = *geo;
581
582 return 0;
583}
584
585
586
587
588
589
590
591
592
593
594static int __noflush_suspending(struct mapped_device *md)
595{
596 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
597}
598
599
600
601
602
603static void dec_pending(struct dm_io *io, int error)
604{
605 unsigned long flags;
606 int io_error;
607 struct bio *bio;
608 struct mapped_device *md = io->md;
609
610
611 if (unlikely(error)) {
612 spin_lock_irqsave(&io->endio_lock, flags);
613 if (!(io->error > 0 && __noflush_suspending(md)))
614 io->error = error;
615 spin_unlock_irqrestore(&io->endio_lock, flags);
616 }
617
618 if (atomic_dec_and_test(&io->io_count)) {
619 if (io->error == DM_ENDIO_REQUEUE) {
620
621
622
623 spin_lock_irqsave(&md->deferred_lock, flags);
624 if (__noflush_suspending(md))
625 bio_list_add_head(&md->deferred, io->bio);
626 else
627
628 io->error = -EIO;
629 spin_unlock_irqrestore(&md->deferred_lock, flags);
630 }
631
632 io_error = io->error;
633 bio = io->bio;
634 end_io_acct(io);
635 free_io(md, io);
636
637 if (io_error == DM_ENDIO_REQUEUE)
638 return;
639
640 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
641
642
643
644
645 bio->bi_rw &= ~REQ_FLUSH;
646 queue_io(md, bio);
647 } else {
648
649 trace_block_bio_complete(md->queue, bio, io_error);
650 bio_endio(bio, io_error);
651 }
652 }
653}
654
655static void clone_endio(struct bio *bio, int error)
656{
657 int r = 0;
658 struct dm_target_io *tio = bio->bi_private;
659 struct dm_io *io = tio->io;
660 struct mapped_device *md = tio->io->md;
661 dm_endio_fn endio = tio->ti->type->end_io;
662
663 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
664 error = -EIO;
665
666 if (endio) {
667 r = endio(tio->ti, bio, error);
668 if (r < 0 || r == DM_ENDIO_REQUEUE)
669
670
671
672
673 error = r;
674 else if (r == DM_ENDIO_INCOMPLETE)
675
676 return;
677 else if (r) {
678 DMWARN("unimplemented target endio return value: %d", r);
679 BUG();
680 }
681 }
682
683 free_tio(md, tio);
684 dec_pending(io, error);
685}
686
687
688
689
690static void end_clone_bio(struct bio *clone, int error)
691{
692 struct dm_rq_clone_bio_info *info = clone->bi_private;
693 struct dm_rq_target_io *tio = info->tio;
694 struct bio *bio = info->orig;
695 unsigned int nr_bytes = info->orig->bi_size;
696
697 bio_put(clone);
698
699 if (tio->error)
700
701
702
703
704
705 return;
706 else if (error) {
707
708
709
710
711
712 tio->error = error;
713 return;
714 }
715
716
717
718
719
720
721
722
723
724
725
726 if (tio->orig->bio != bio)
727 DMERR("bio completion is going in the middle of the request");
728
729
730
731
732
733
734 blk_update_request(tio->orig, 0, nr_bytes);
735}
736
737
738
739
740
741
742static void rq_completed(struct mapped_device *md, int rw, int run_queue)
743{
744 atomic_dec(&md->pending[rw]);
745
746
747 if (!md_in_flight(md))
748 wake_up(&md->wait);
749
750
751
752
753
754
755
756 if (run_queue)
757 blk_run_queue_async(md->queue);
758
759
760
761
762 dm_put(md);
763}
764
765static void free_rq_clone(struct request *clone)
766{
767 struct dm_rq_target_io *tio = clone->end_io_data;
768
769 blk_rq_unprep_clone(clone);
770 free_rq_tio(tio);
771}
772
773
774
775
776
777static void dm_end_request(struct request *clone, int error)
778{
779 int rw = rq_data_dir(clone);
780 struct dm_rq_target_io *tio = clone->end_io_data;
781 struct mapped_device *md = tio->md;
782 struct request *rq = tio->orig;
783
784 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
785 rq->errors = clone->errors;
786 rq->resid_len = clone->resid_len;
787
788 if (rq->sense)
789
790
791
792
793
794 rq->sense_len = clone->sense_len;
795 }
796
797 free_rq_clone(clone);
798 blk_end_request_all(rq, error);
799 rq_completed(md, rw, true);
800}
801
802static void dm_unprep_request(struct request *rq)
803{
804 struct request *clone = rq->special;
805
806 rq->special = NULL;
807 rq->cmd_flags &= ~REQ_DONTPREP;
808
809 free_rq_clone(clone);
810}
811
812
813
814
815void dm_requeue_unmapped_request(struct request *clone)
816{
817 int rw = rq_data_dir(clone);
818 struct dm_rq_target_io *tio = clone->end_io_data;
819 struct mapped_device *md = tio->md;
820 struct request *rq = tio->orig;
821 struct request_queue *q = rq->q;
822 unsigned long flags;
823
824 dm_unprep_request(rq);
825
826 spin_lock_irqsave(q->queue_lock, flags);
827 blk_requeue_request(q, rq);
828 spin_unlock_irqrestore(q->queue_lock, flags);
829
830 rq_completed(md, rw, 0);
831}
832EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
833
834static void __stop_queue(struct request_queue *q)
835{
836 blk_stop_queue(q);
837}
838
839static void stop_queue(struct request_queue *q)
840{
841 unsigned long flags;
842
843 spin_lock_irqsave(q->queue_lock, flags);
844 __stop_queue(q);
845 spin_unlock_irqrestore(q->queue_lock, flags);
846}
847
848static void __start_queue(struct request_queue *q)
849{
850 if (blk_queue_stopped(q))
851 blk_start_queue(q);
852}
853
854static void start_queue(struct request_queue *q)
855{
856 unsigned long flags;
857
858 spin_lock_irqsave(q->queue_lock, flags);
859 __start_queue(q);
860 spin_unlock_irqrestore(q->queue_lock, flags);
861}
862
863static void dm_done(struct request *clone, int error, bool mapped)
864{
865 int r = error;
866 struct dm_rq_target_io *tio = clone->end_io_data;
867 dm_request_endio_fn rq_end_io = NULL;
868
869 if (tio->ti) {
870 rq_end_io = tio->ti->type->rq_end_io;
871
872 if (mapped && rq_end_io)
873 r = rq_end_io(tio->ti, clone, error, &tio->info);
874 }
875
876 if (r <= 0)
877
878 dm_end_request(clone, r);
879 else if (r == DM_ENDIO_INCOMPLETE)
880
881 return;
882 else if (r == DM_ENDIO_REQUEUE)
883
884 dm_requeue_unmapped_request(clone);
885 else {
886 DMWARN("unimplemented target endio return value: %d", r);
887 BUG();
888 }
889}
890
891
892
893
894static void dm_softirq_done(struct request *rq)
895{
896 bool mapped = true;
897 struct request *clone = rq->completion_data;
898 struct dm_rq_target_io *tio = clone->end_io_data;
899
900 if (rq->cmd_flags & REQ_FAILED)
901 mapped = false;
902
903 dm_done(clone, tio->error, mapped);
904}
905
906
907
908
909
910static void dm_complete_request(struct request *clone, int error)
911{
912 struct dm_rq_target_io *tio = clone->end_io_data;
913 struct request *rq = tio->orig;
914
915 tio->error = error;
916 rq->completion_data = clone;
917 blk_complete_request(rq);
918}
919
920
921
922
923
924
925
926void dm_kill_unmapped_request(struct request *clone, int error)
927{
928 struct dm_rq_target_io *tio = clone->end_io_data;
929 struct request *rq = tio->orig;
930
931 rq->cmd_flags |= REQ_FAILED;
932 dm_complete_request(clone, error);
933}
934EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
935
936
937
938
939static void end_clone_request(struct request *clone, int error)
940{
941
942
943
944
945
946
947 __blk_put_request(clone->q, clone);
948
949
950
951
952
953
954
955
956
957 dm_complete_request(clone, error);
958}
959
960
961
962
963
964static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
965{
966 sector_t target_offset = dm_target_offset(ti, sector);
967
968 return ti->len - target_offset;
969}
970
971static sector_t max_io_len(sector_t sector, struct dm_target *ti)
972{
973 sector_t len = max_io_len_target_boundary(sector, ti);
974 sector_t offset, max_len;
975
976
977
978
979 if (ti->max_io_len) {
980 offset = dm_target_offset(ti, sector);
981 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
982 max_len = sector_div(offset, ti->max_io_len);
983 else
984 max_len = offset & (ti->max_io_len - 1);
985 max_len = ti->max_io_len - max_len;
986
987 if (len > max_len)
988 len = max_len;
989 }
990
991 return len;
992}
993
994int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
995{
996 if (len > UINT_MAX) {
997 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
998 (unsigned long long)len, UINT_MAX);
999 ti->error = "Maximum size of target IO is too large";
1000 return -EINVAL;
1001 }
1002
1003 ti->max_io_len = (uint32_t) len;
1004
1005 return 0;
1006}
1007EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1008
1009static void __map_bio(struct dm_target_io *tio)
1010{
1011 int r;
1012 sector_t sector;
1013 struct mapped_device *md;
1014 struct bio *clone = &tio->clone;
1015 struct dm_target *ti = tio->ti;
1016
1017 clone->bi_end_io = clone_endio;
1018 clone->bi_private = tio;
1019
1020
1021
1022
1023
1024
1025 atomic_inc(&tio->io->io_count);
1026 sector = clone->bi_sector;
1027 r = ti->type->map(ti, clone);
1028 if (r == DM_MAPIO_REMAPPED) {
1029
1030
1031 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1032 tio->io->bio->bi_bdev->bd_dev, sector);
1033
1034 generic_make_request(clone);
1035 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1036
1037 md = tio->io->md;
1038 dec_pending(tio->io, r);
1039 free_tio(md, tio);
1040 } else if (r) {
1041 DMWARN("unimplemented target map return value: %d", r);
1042 BUG();
1043 }
1044}
1045
1046struct clone_info {
1047 struct mapped_device *md;
1048 struct dm_table *map;
1049 struct bio *bio;
1050 struct dm_io *io;
1051 sector_t sector;
1052 sector_t sector_count;
1053 unsigned short idx;
1054};
1055
1056static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
1057{
1058 bio->bi_sector = sector;
1059 bio->bi_size = to_bytes(len);
1060}
1061
1062static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
1063{
1064 bio->bi_idx = idx;
1065 bio->bi_vcnt = idx + bv_count;
1066 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
1067}
1068
1069static void clone_bio_integrity(struct bio *bio, struct bio *clone,
1070 unsigned short idx, unsigned len, unsigned offset,
1071 unsigned trim)
1072{
1073 if (!bio_integrity(bio))
1074 return;
1075
1076 bio_integrity_clone(clone, bio, GFP_NOIO);
1077
1078 if (trim)
1079 bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
1080}
1081
1082
1083
1084
1085static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
1086 sector_t sector, unsigned short idx,
1087 unsigned offset, unsigned len)
1088{
1089 struct bio *clone = &tio->clone;
1090 struct bio_vec *bv = bio->bi_io_vec + idx;
1091
1092 *clone->bi_io_vec = *bv;
1093
1094 bio_setup_sector(clone, sector, len);
1095
1096 clone->bi_bdev = bio->bi_bdev;
1097 clone->bi_rw = bio->bi_rw;
1098 clone->bi_vcnt = 1;
1099 clone->bi_io_vec->bv_offset = offset;
1100 clone->bi_io_vec->bv_len = clone->bi_size;
1101 clone->bi_flags |= 1 << BIO_CLONED;
1102
1103 clone_bio_integrity(bio, clone, idx, len, offset, 1);
1104}
1105
1106
1107
1108
1109static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1110 sector_t sector, unsigned short idx,
1111 unsigned short bv_count, unsigned len)
1112{
1113 struct bio *clone = &tio->clone;
1114 unsigned trim = 0;
1115
1116 __bio_clone(clone, bio);
1117 bio_setup_sector(clone, sector, len);
1118 bio_setup_bv(clone, idx, bv_count);
1119
1120 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1121 trim = 1;
1122 clone_bio_integrity(bio, clone, idx, len, 0, trim);
1123}
1124
1125static struct dm_target_io *alloc_tio(struct clone_info *ci,
1126 struct dm_target *ti, int nr_iovecs,
1127 unsigned target_bio_nr)
1128{
1129 struct dm_target_io *tio;
1130 struct bio *clone;
1131
1132 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs);
1133 tio = container_of(clone, struct dm_target_io, clone);
1134
1135 tio->io = ci->io;
1136 tio->ti = ti;
1137 memset(&tio->info, 0, sizeof(tio->info));
1138 tio->target_bio_nr = target_bio_nr;
1139
1140 return tio;
1141}
1142
1143static void __clone_and_map_simple_bio(struct clone_info *ci,
1144 struct dm_target *ti,
1145 unsigned target_bio_nr, sector_t len)
1146{
1147 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
1148 struct bio *clone = &tio->clone;
1149
1150
1151
1152
1153
1154
1155 __bio_clone(clone, ci->bio);
1156 if (len)
1157 bio_setup_sector(clone, ci->sector, len);
1158
1159 __map_bio(tio);
1160}
1161
1162static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1163 unsigned num_bios, sector_t len)
1164{
1165 unsigned target_bio_nr;
1166
1167 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1168 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1169}
1170
1171static int __send_empty_flush(struct clone_info *ci)
1172{
1173 unsigned target_nr = 0;
1174 struct dm_target *ti;
1175
1176 BUG_ON(bio_has_data(ci->bio));
1177 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1178 __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
1179
1180 return 0;
1181}
1182
1183static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1184 sector_t sector, int nr_iovecs,
1185 unsigned short idx, unsigned short bv_count,
1186 unsigned offset, unsigned len,
1187 unsigned split_bvec)
1188{
1189 struct bio *bio = ci->bio;
1190 struct dm_target_io *tio;
1191 unsigned target_bio_nr;
1192 unsigned num_target_bios = 1;
1193
1194
1195
1196
1197 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1198 num_target_bios = ti->num_write_bios(ti, bio);
1199
1200 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1201 tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
1202 if (split_bvec)
1203 clone_split_bio(tio, bio, sector, idx, offset, len);
1204 else
1205 clone_bio(tio, bio, sector, idx, bv_count, len);
1206 __map_bio(tio);
1207 }
1208}
1209
1210typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1211
1212static unsigned get_num_discard_bios(struct dm_target *ti)
1213{
1214 return ti->num_discard_bios;
1215}
1216
1217static unsigned get_num_write_same_bios(struct dm_target *ti)
1218{
1219 return ti->num_write_same_bios;
1220}
1221
1222typedef bool (*is_split_required_fn)(struct dm_target *ti);
1223
1224static bool is_split_required_for_discard(struct dm_target *ti)
1225{
1226 return ti->split_discard_bios;
1227}
1228
1229static int __send_changing_extent_only(struct clone_info *ci,
1230 get_num_bios_fn get_num_bios,
1231 is_split_required_fn is_split_required)
1232{
1233 struct dm_target *ti;
1234 sector_t len;
1235 unsigned num_bios;
1236
1237 do {
1238 ti = dm_table_find_target(ci->map, ci->sector);
1239 if (!dm_target_is_valid(ti))
1240 return -EIO;
1241
1242
1243
1244
1245
1246
1247
1248 num_bios = get_num_bios ? get_num_bios(ti) : 0;
1249 if (!num_bios)
1250 return -EOPNOTSUPP;
1251
1252 if (is_split_required && !is_split_required(ti))
1253 len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1254 else
1255 len = min(ci->sector_count, max_io_len(ci->sector, ti));
1256
1257 __send_duplicate_bios(ci, ti, num_bios, len);
1258
1259 ci->sector += len;
1260 } while (ci->sector_count -= len);
1261
1262 return 0;
1263}
1264
1265static int __send_discard(struct clone_info *ci)
1266{
1267 return __send_changing_extent_only(ci, get_num_discard_bios,
1268 is_split_required_for_discard);
1269}
1270
1271static int __send_write_same(struct clone_info *ci)
1272{
1273 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
1274}
1275
1276
1277
1278
1279static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
1280{
1281 struct bio *bio = ci->bio;
1282 sector_t bv_len, total_len = 0;
1283
1284 for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
1285 bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
1286
1287 if (bv_len > max)
1288 break;
1289
1290 max -= bv_len;
1291 total_len += bv_len;
1292 }
1293
1294 return total_len;
1295}
1296
1297static int __split_bvec_across_targets(struct clone_info *ci,
1298 struct dm_target *ti, sector_t max)
1299{
1300 struct bio *bio = ci->bio;
1301 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1302 sector_t remaining = to_sector(bv->bv_len);
1303 unsigned offset = 0;
1304 sector_t len;
1305
1306 do {
1307 if (offset) {
1308 ti = dm_table_find_target(ci->map, ci->sector);
1309 if (!dm_target_is_valid(ti))
1310 return -EIO;
1311
1312 max = max_io_len(ci->sector, ti);
1313 }
1314
1315 len = min(remaining, max);
1316
1317 __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
1318 bv->bv_offset + offset, len, 1);
1319
1320 ci->sector += len;
1321 ci->sector_count -= len;
1322 offset += to_bytes(len);
1323 } while (remaining -= len);
1324
1325 ci->idx++;
1326
1327 return 0;
1328}
1329
1330
1331
1332
1333static int __split_and_process_non_flush(struct clone_info *ci)
1334{
1335 struct bio *bio = ci->bio;
1336 struct dm_target *ti;
1337 sector_t len, max;
1338 int idx;
1339
1340 if (unlikely(bio->bi_rw & REQ_DISCARD))
1341 return __send_discard(ci);
1342 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1343 return __send_write_same(ci);
1344
1345 ti = dm_table_find_target(ci->map, ci->sector);
1346 if (!dm_target_is_valid(ti))
1347 return -EIO;
1348
1349 max = max_io_len(ci->sector, ti);
1350
1351
1352
1353
1354
1355 if (ci->sector_count <= max) {
1356 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1357 ci->idx, bio->bi_vcnt - ci->idx, 0,
1358 ci->sector_count, 0);
1359 ci->sector_count = 0;
1360 return 0;
1361 }
1362
1363
1364
1365
1366
1367 if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1368 len = __len_within_target(ci, max, &idx);
1369
1370 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1371 ci->idx, idx - ci->idx, 0, len, 0);
1372
1373 ci->sector += len;
1374 ci->sector_count -= len;
1375 ci->idx = idx;
1376
1377 return 0;
1378 }
1379
1380
1381
1382
1383 return __split_bvec_across_targets(ci, ti, max);
1384}
1385
1386
1387
1388
1389static void __split_and_process_bio(struct mapped_device *md,
1390 struct dm_table *map, struct bio *bio)
1391{
1392 struct clone_info ci;
1393 int error = 0;
1394
1395 if (unlikely(!map)) {
1396 bio_io_error(bio);
1397 return;
1398 }
1399
1400 ci.map = map;
1401 ci.md = md;
1402 ci.io = alloc_io(md);
1403 ci.io->error = 0;
1404 atomic_set(&ci.io->io_count, 1);
1405 ci.io->bio = bio;
1406 ci.io->md = md;
1407 spin_lock_init(&ci.io->endio_lock);
1408 ci.sector = bio->bi_sector;
1409 ci.idx = bio->bi_idx;
1410
1411 start_io_acct(ci.io);
1412
1413 if (bio->bi_rw & REQ_FLUSH) {
1414 ci.bio = &ci.md->flush_bio;
1415 ci.sector_count = 0;
1416 error = __send_empty_flush(&ci);
1417
1418 } else {
1419 ci.bio = bio;
1420 ci.sector_count = bio_sectors(bio);
1421 while (ci.sector_count && !error)
1422 error = __split_and_process_non_flush(&ci);
1423 }
1424
1425
1426 dec_pending(ci.io, error);
1427}
1428
1429
1430
1431
1432static int dm_merge_bvec(struct request_queue *q,
1433 struct bvec_merge_data *bvm,
1434 struct bio_vec *biovec)
1435{
1436 struct mapped_device *md = q->queuedata;
1437 struct dm_table *map = dm_get_live_table_fast(md);
1438 struct dm_target *ti;
1439 sector_t max_sectors;
1440 int max_size = 0;
1441
1442 if (unlikely(!map))
1443 goto out;
1444
1445 ti = dm_table_find_target(map, bvm->bi_sector);
1446 if (!dm_target_is_valid(ti))
1447 goto out;
1448
1449
1450
1451
1452 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1453 (sector_t) BIO_MAX_SECTORS);
1454 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1455 if (max_size < 0)
1456 max_size = 0;
1457
1458
1459
1460
1461
1462
1463 if (max_size && ti->type->merge)
1464 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1465
1466
1467
1468
1469
1470
1471
1472 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1473
1474 max_size = 0;
1475
1476out:
1477 dm_put_live_table_fast(md);
1478
1479
1480
1481 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1482 max_size = biovec->bv_len;
1483
1484 return max_size;
1485}
1486
1487
1488
1489
1490
1491static void _dm_request(struct request_queue *q, struct bio *bio)
1492{
1493 int rw = bio_data_dir(bio);
1494 struct mapped_device *md = q->queuedata;
1495 int cpu;
1496 int srcu_idx;
1497 struct dm_table *map;
1498
1499 map = dm_get_live_table(md, &srcu_idx);
1500
1501 cpu = part_stat_lock();
1502 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1503 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1504 part_stat_unlock();
1505
1506
1507 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1508 dm_put_live_table(md, srcu_idx);
1509
1510 if (bio_rw(bio) != READA)
1511 queue_io(md, bio);
1512 else
1513 bio_io_error(bio);
1514 return;
1515 }
1516
1517 __split_and_process_bio(md, map, bio);
1518 dm_put_live_table(md, srcu_idx);
1519 return;
1520}
1521
1522static int dm_request_based(struct mapped_device *md)
1523{
1524 return blk_queue_stackable(md->queue);
1525}
1526
1527static void dm_request(struct request_queue *q, struct bio *bio)
1528{
1529 struct mapped_device *md = q->queuedata;
1530
1531 if (dm_request_based(md))
1532 blk_queue_bio(q, bio);
1533 else
1534 _dm_request(q, bio);
1535}
1536
1537void dm_dispatch_request(struct request *rq)
1538{
1539 int r;
1540
1541 if (blk_queue_io_stat(rq->q))
1542 rq->cmd_flags |= REQ_IO_STAT;
1543
1544 rq->start_time = jiffies;
1545 r = blk_insert_cloned_request(rq->q, rq);
1546 if (r)
1547 dm_complete_request(rq, r);
1548}
1549EXPORT_SYMBOL_GPL(dm_dispatch_request);
1550
1551static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1552 void *data)
1553{
1554 struct dm_rq_target_io *tio = data;
1555 struct dm_rq_clone_bio_info *info =
1556 container_of(bio, struct dm_rq_clone_bio_info, clone);
1557
1558 info->orig = bio_orig;
1559 info->tio = tio;
1560 bio->bi_end_io = end_clone_bio;
1561 bio->bi_private = info;
1562
1563 return 0;
1564}
1565
1566static int setup_clone(struct request *clone, struct request *rq,
1567 struct dm_rq_target_io *tio)
1568{
1569 int r;
1570
1571 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1572 dm_rq_bio_constructor, tio);
1573 if (r)
1574 return r;
1575
1576 clone->cmd = rq->cmd;
1577 clone->cmd_len = rq->cmd_len;
1578 clone->sense = rq->sense;
1579 clone->buffer = rq->buffer;
1580 clone->end_io = end_clone_request;
1581 clone->end_io_data = tio;
1582
1583 return 0;
1584}
1585
1586static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1587 gfp_t gfp_mask)
1588{
1589 struct request *clone;
1590 struct dm_rq_target_io *tio;
1591
1592 tio = alloc_rq_tio(md, gfp_mask);
1593 if (!tio)
1594 return NULL;
1595
1596 tio->md = md;
1597 tio->ti = NULL;
1598 tio->orig = rq;
1599 tio->error = 0;
1600 memset(&tio->info, 0, sizeof(tio->info));
1601
1602 clone = &tio->clone;
1603 if (setup_clone(clone, rq, tio)) {
1604
1605 free_rq_tio(tio);
1606 return NULL;
1607 }
1608
1609 return clone;
1610}
1611
1612
1613
1614
1615static int dm_prep_fn(struct request_queue *q, struct request *rq)
1616{
1617 struct mapped_device *md = q->queuedata;
1618 struct request *clone;
1619
1620 if (unlikely(rq->special)) {
1621 DMWARN("Already has something in rq->special.");
1622 return BLKPREP_KILL;
1623 }
1624
1625 clone = clone_rq(rq, md, GFP_ATOMIC);
1626 if (!clone)
1627 return BLKPREP_DEFER;
1628
1629 rq->special = clone;
1630 rq->cmd_flags |= REQ_DONTPREP;
1631
1632 return BLKPREP_OK;
1633}
1634
1635
1636
1637
1638
1639
1640static int map_request(struct dm_target *ti, struct request *clone,
1641 struct mapped_device *md)
1642{
1643 int r, requeued = 0;
1644 struct dm_rq_target_io *tio = clone->end_io_data;
1645
1646 tio->ti = ti;
1647 r = ti->type->map_rq(ti, clone, &tio->info);
1648 switch (r) {
1649 case DM_MAPIO_SUBMITTED:
1650
1651 break;
1652 case DM_MAPIO_REMAPPED:
1653
1654 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1655 blk_rq_pos(tio->orig));
1656 dm_dispatch_request(clone);
1657 break;
1658 case DM_MAPIO_REQUEUE:
1659
1660 dm_requeue_unmapped_request(clone);
1661 requeued = 1;
1662 break;
1663 default:
1664 if (r > 0) {
1665 DMWARN("unimplemented target map return value: %d", r);
1666 BUG();
1667 }
1668
1669
1670 dm_kill_unmapped_request(clone, r);
1671 break;
1672 }
1673
1674 return requeued;
1675}
1676
1677static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1678{
1679 struct request *clone;
1680
1681 blk_start_request(orig);
1682 clone = orig->special;
1683 atomic_inc(&md->pending[rq_data_dir(clone)]);
1684
1685
1686
1687
1688
1689
1690
1691
1692 dm_get(md);
1693
1694 return clone;
1695}
1696
1697
1698
1699
1700
1701static void dm_request_fn(struct request_queue *q)
1702{
1703 struct mapped_device *md = q->queuedata;
1704 int srcu_idx;
1705 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1706 struct dm_target *ti;
1707 struct request *rq, *clone;
1708 sector_t pos;
1709
1710
1711
1712
1713
1714
1715
1716 while (!blk_queue_stopped(q)) {
1717 rq = blk_peek_request(q);
1718 if (!rq)
1719 goto delay_and_out;
1720
1721
1722 pos = 0;
1723 if (!(rq->cmd_flags & REQ_FLUSH))
1724 pos = blk_rq_pos(rq);
1725
1726 ti = dm_table_find_target(map, pos);
1727 if (!dm_target_is_valid(ti)) {
1728
1729
1730
1731
1732 DMERR_LIMIT("request attempted access beyond the end of device");
1733 clone = dm_start_request(md, rq);
1734 dm_kill_unmapped_request(clone, -EIO);
1735 continue;
1736 }
1737
1738 if (ti->type->busy && ti->type->busy(ti))
1739 goto delay_and_out;
1740
1741 clone = dm_start_request(md, rq);
1742
1743 spin_unlock(q->queue_lock);
1744 if (map_request(ti, clone, md))
1745 goto requeued;
1746
1747 BUG_ON(!irqs_disabled());
1748 spin_lock(q->queue_lock);
1749 }
1750
1751 goto out;
1752
1753requeued:
1754 BUG_ON(!irqs_disabled());
1755 spin_lock(q->queue_lock);
1756
1757delay_and_out:
1758 blk_delay_queue(q, HZ / 10);
1759out:
1760 dm_put_live_table(md, srcu_idx);
1761}
1762
1763int dm_underlying_device_busy(struct request_queue *q)
1764{
1765 return blk_lld_busy(q);
1766}
1767EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1768
1769static int dm_lld_busy(struct request_queue *q)
1770{
1771 int r;
1772 struct mapped_device *md = q->queuedata;
1773 struct dm_table *map = dm_get_live_table_fast(md);
1774
1775 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1776 r = 1;
1777 else
1778 r = dm_table_any_busy_target(map);
1779
1780 dm_put_live_table_fast(md);
1781
1782 return r;
1783}
1784
1785static int dm_any_congested(void *congested_data, int bdi_bits)
1786{
1787 int r = bdi_bits;
1788 struct mapped_device *md = congested_data;
1789 struct dm_table *map;
1790
1791 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1792 map = dm_get_live_table_fast(md);
1793 if (map) {
1794
1795
1796
1797
1798 if (dm_request_based(md))
1799 r = md->queue->backing_dev_info.state &
1800 bdi_bits;
1801 else
1802 r = dm_table_any_congested(map, bdi_bits);
1803 }
1804 dm_put_live_table_fast(md);
1805 }
1806
1807 return r;
1808}
1809
1810
1811
1812
1813static void free_minor(int minor)
1814{
1815 spin_lock(&_minor_lock);
1816 idr_remove(&_minor_idr, minor);
1817 spin_unlock(&_minor_lock);
1818}
1819
1820
1821
1822
1823static int specific_minor(int minor)
1824{
1825 int r;
1826
1827 if (minor >= (1 << MINORBITS))
1828 return -EINVAL;
1829
1830 idr_preload(GFP_KERNEL);
1831 spin_lock(&_minor_lock);
1832
1833 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1834
1835 spin_unlock(&_minor_lock);
1836 idr_preload_end();
1837 if (r < 0)
1838 return r == -ENOSPC ? -EBUSY : r;
1839 return 0;
1840}
1841
1842static int next_free_minor(int *minor)
1843{
1844 int r;
1845
1846 idr_preload(GFP_KERNEL);
1847 spin_lock(&_minor_lock);
1848
1849 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1850
1851 spin_unlock(&_minor_lock);
1852 idr_preload_end();
1853 if (r < 0)
1854 return r;
1855 *minor = r;
1856 return 0;
1857}
1858
1859static const struct block_device_operations dm_blk_dops;
1860
1861static void dm_wq_work(struct work_struct *work);
1862
1863static void dm_init_md_queue(struct mapped_device *md)
1864{
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1875
1876 md->queue->queuedata = md;
1877 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1878 md->queue->backing_dev_info.congested_data = md;
1879 blk_queue_make_request(md->queue, dm_request);
1880 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1881 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1882}
1883
1884
1885
1886
1887static struct mapped_device *alloc_dev(int minor)
1888{
1889 int r;
1890 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1891 void *old_md;
1892
1893 if (!md) {
1894 DMWARN("unable to allocate device, out of memory.");
1895 return NULL;
1896 }
1897
1898 if (!try_module_get(THIS_MODULE))
1899 goto bad_module_get;
1900
1901
1902 if (minor == DM_ANY_MINOR)
1903 r = next_free_minor(&minor);
1904 else
1905 r = specific_minor(minor);
1906 if (r < 0)
1907 goto bad_minor;
1908
1909 r = init_srcu_struct(&md->io_barrier);
1910 if (r < 0)
1911 goto bad_io_barrier;
1912
1913 md->type = DM_TYPE_NONE;
1914 mutex_init(&md->suspend_lock);
1915 mutex_init(&md->type_lock);
1916 spin_lock_init(&md->deferred_lock);
1917 atomic_set(&md->holders, 1);
1918 atomic_set(&md->open_count, 0);
1919 atomic_set(&md->event_nr, 0);
1920 atomic_set(&md->uevent_seq, 0);
1921 INIT_LIST_HEAD(&md->uevent_list);
1922 spin_lock_init(&md->uevent_lock);
1923
1924 md->queue = blk_alloc_queue(GFP_KERNEL);
1925 if (!md->queue)
1926 goto bad_queue;
1927
1928 dm_init_md_queue(md);
1929
1930 md->disk = alloc_disk(1);
1931 if (!md->disk)
1932 goto bad_disk;
1933
1934 atomic_set(&md->pending[0], 0);
1935 atomic_set(&md->pending[1], 0);
1936 init_waitqueue_head(&md->wait);
1937 INIT_WORK(&md->work, dm_wq_work);
1938 init_waitqueue_head(&md->eventq);
1939
1940 md->disk->major = _major;
1941 md->disk->first_minor = minor;
1942 md->disk->fops = &dm_blk_dops;
1943 md->disk->queue = md->queue;
1944 md->disk->private_data = md;
1945 sprintf(md->disk->disk_name, "dm-%d", minor);
1946 add_disk(md->disk);
1947 format_dev_t(md->name, MKDEV(_major, minor));
1948
1949 md->wq = alloc_workqueue("kdmflush",
1950 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1951 if (!md->wq)
1952 goto bad_thread;
1953
1954 md->bdev = bdget_disk(md->disk, 0);
1955 if (!md->bdev)
1956 goto bad_bdev;
1957
1958 bio_init(&md->flush_bio);
1959 md->flush_bio.bi_bdev = md->bdev;
1960 md->flush_bio.bi_rw = WRITE_FLUSH;
1961
1962
1963 spin_lock(&_minor_lock);
1964 old_md = idr_replace(&_minor_idr, md, minor);
1965 spin_unlock(&_minor_lock);
1966
1967 BUG_ON(old_md != MINOR_ALLOCED);
1968
1969 return md;
1970
1971bad_bdev:
1972 destroy_workqueue(md->wq);
1973bad_thread:
1974 del_gendisk(md->disk);
1975 put_disk(md->disk);
1976bad_disk:
1977 blk_cleanup_queue(md->queue);
1978bad_queue:
1979 cleanup_srcu_struct(&md->io_barrier);
1980bad_io_barrier:
1981 free_minor(minor);
1982bad_minor:
1983 module_put(THIS_MODULE);
1984bad_module_get:
1985 kfree(md);
1986 return NULL;
1987}
1988
1989static void unlock_fs(struct mapped_device *md);
1990
1991static void free_dev(struct mapped_device *md)
1992{
1993 int minor = MINOR(disk_devt(md->disk));
1994
1995 unlock_fs(md);
1996 bdput(md->bdev);
1997 destroy_workqueue(md->wq);
1998 if (md->io_pool)
1999 mempool_destroy(md->io_pool);
2000 if (md->bs)
2001 bioset_free(md->bs);
2002 blk_integrity_unregister(md->disk);
2003 del_gendisk(md->disk);
2004 cleanup_srcu_struct(&md->io_barrier);
2005 free_minor(minor);
2006
2007 spin_lock(&_minor_lock);
2008 md->disk->private_data = NULL;
2009 spin_unlock(&_minor_lock);
2010
2011 put_disk(md->disk);
2012 blk_cleanup_queue(md->queue);
2013 module_put(THIS_MODULE);
2014 kfree(md);
2015}
2016
2017static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2018{
2019 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2020
2021 if (md->io_pool && md->bs) {
2022
2023 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2024
2025
2026
2027
2028 bioset_free(md->bs);
2029 md->bs = p->bs;
2030 p->bs = NULL;
2031 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
2032
2033
2034
2035
2036
2037
2038
2039
2040 }
2041 goto out;
2042 }
2043
2044 BUG_ON(!p || md->io_pool || md->bs);
2045
2046 md->io_pool = p->io_pool;
2047 p->io_pool = NULL;
2048 md->bs = p->bs;
2049 p->bs = NULL;
2050
2051out:
2052
2053 dm_table_free_md_mempools(t);
2054}
2055
2056
2057
2058
2059static void event_callback(void *context)
2060{
2061 unsigned long flags;
2062 LIST_HEAD(uevents);
2063 struct mapped_device *md = (struct mapped_device *) context;
2064
2065 spin_lock_irqsave(&md->uevent_lock, flags);
2066 list_splice_init(&md->uevent_list, &uevents);
2067 spin_unlock_irqrestore(&md->uevent_lock, flags);
2068
2069 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2070
2071 atomic_inc(&md->event_nr);
2072 wake_up(&md->eventq);
2073}
2074
2075
2076
2077
2078static void __set_size(struct mapped_device *md, sector_t size)
2079{
2080 set_capacity(md->disk, size);
2081
2082 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2083}
2084
2085
2086
2087
2088
2089
2090
2091
2092int dm_queue_merge_is_compulsory(struct request_queue *q)
2093{
2094 struct mapped_device *dev_md;
2095
2096 if (!q->merge_bvec_fn)
2097 return 0;
2098
2099 if (q->make_request_fn == dm_request) {
2100 dev_md = q->queuedata;
2101 if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2102 return 0;
2103 }
2104
2105 return 1;
2106}
2107
2108static int dm_device_merge_is_compulsory(struct dm_target *ti,
2109 struct dm_dev *dev, sector_t start,
2110 sector_t len, void *data)
2111{
2112 struct block_device *bdev = dev->bdev;
2113 struct request_queue *q = bdev_get_queue(bdev);
2114
2115 return dm_queue_merge_is_compulsory(q);
2116}
2117
2118
2119
2120
2121
2122static int dm_table_merge_is_optional(struct dm_table *table)
2123{
2124 unsigned i = 0;
2125 struct dm_target *ti;
2126
2127 while (i < dm_table_get_num_targets(table)) {
2128 ti = dm_table_get_target(table, i++);
2129
2130 if (ti->type->iterate_devices &&
2131 ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2132 return 0;
2133 }
2134
2135 return 1;
2136}
2137
2138
2139
2140
2141static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2142 struct queue_limits *limits)
2143{
2144 struct dm_table *old_map;
2145 struct request_queue *q = md->queue;
2146 sector_t size;
2147 int merge_is_optional;
2148
2149 size = dm_table_get_size(t);
2150
2151
2152
2153
2154 if (size != get_capacity(md->disk))
2155 memset(&md->geometry, 0, sizeof(md->geometry));
2156
2157 __set_size(md, size);
2158
2159 dm_table_event_callback(t, event_callback, md);
2160
2161
2162
2163
2164
2165
2166
2167
2168 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2169 stop_queue(q);
2170
2171 __bind_mempools(md, t);
2172
2173 merge_is_optional = dm_table_merge_is_optional(t);
2174
2175 old_map = md->map;
2176 rcu_assign_pointer(md->map, t);
2177 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2178
2179 dm_table_set_restrictions(t, q, limits);
2180 if (merge_is_optional)
2181 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2182 else
2183 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2184 dm_sync_table(md);
2185
2186 return old_map;
2187}
2188
2189
2190
2191
2192static struct dm_table *__unbind(struct mapped_device *md)
2193{
2194 struct dm_table *map = md->map;
2195
2196 if (!map)
2197 return NULL;
2198
2199 dm_table_event_callback(map, NULL, NULL);
2200 rcu_assign_pointer(md->map, NULL);
2201 dm_sync_table(md);
2202
2203 return map;
2204}
2205
2206
2207
2208
2209int dm_create(int minor, struct mapped_device **result)
2210{
2211 struct mapped_device *md;
2212
2213 md = alloc_dev(minor);
2214 if (!md)
2215 return -ENXIO;
2216
2217 dm_sysfs_init(md);
2218
2219 *result = md;
2220 return 0;
2221}
2222
2223
2224
2225
2226
2227void dm_lock_md_type(struct mapped_device *md)
2228{
2229 mutex_lock(&md->type_lock);
2230}
2231
2232void dm_unlock_md_type(struct mapped_device *md)
2233{
2234 mutex_unlock(&md->type_lock);
2235}
2236
2237void dm_set_md_type(struct mapped_device *md, unsigned type)
2238{
2239 md->type = type;
2240}
2241
2242unsigned dm_get_md_type(struct mapped_device *md)
2243{
2244 return md->type;
2245}
2246
2247struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2248{
2249 return md->immutable_target_type;
2250}
2251
2252
2253
2254
2255static int dm_init_request_based_queue(struct mapped_device *md)
2256{
2257 struct request_queue *q = NULL;
2258
2259 if (md->queue->elevator)
2260 return 1;
2261
2262
2263 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2264 if (!q)
2265 return 0;
2266
2267 md->queue = q;
2268 dm_init_md_queue(md);
2269 blk_queue_softirq_done(md->queue, dm_softirq_done);
2270 blk_queue_prep_rq(md->queue, dm_prep_fn);
2271 blk_queue_lld_busy(md->queue, dm_lld_busy);
2272
2273 elv_register_queue(md->queue);
2274
2275 return 1;
2276}
2277
2278
2279
2280
2281int dm_setup_md_queue(struct mapped_device *md)
2282{
2283 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2284 !dm_init_request_based_queue(md)) {
2285 DMWARN("Cannot initialize queue for request-based mapped device");
2286 return -EINVAL;
2287 }
2288
2289 return 0;
2290}
2291
2292static struct mapped_device *dm_find_md(dev_t dev)
2293{
2294 struct mapped_device *md;
2295 unsigned minor = MINOR(dev);
2296
2297 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2298 return NULL;
2299
2300 spin_lock(&_minor_lock);
2301
2302 md = idr_find(&_minor_idr, minor);
2303 if (md && (md == MINOR_ALLOCED ||
2304 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2305 dm_deleting_md(md) ||
2306 test_bit(DMF_FREEING, &md->flags))) {
2307 md = NULL;
2308 goto out;
2309 }
2310
2311out:
2312 spin_unlock(&_minor_lock);
2313
2314 return md;
2315}
2316
2317struct mapped_device *dm_get_md(dev_t dev)
2318{
2319 struct mapped_device *md = dm_find_md(dev);
2320
2321 if (md)
2322 dm_get(md);
2323
2324 return md;
2325}
2326EXPORT_SYMBOL_GPL(dm_get_md);
2327
2328void *dm_get_mdptr(struct mapped_device *md)
2329{
2330 return md->interface_ptr;
2331}
2332
2333void dm_set_mdptr(struct mapped_device *md, void *ptr)
2334{
2335 md->interface_ptr = ptr;
2336}
2337
2338void dm_get(struct mapped_device *md)
2339{
2340 atomic_inc(&md->holders);
2341 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2342}
2343
2344const char *dm_device_name(struct mapped_device *md)
2345{
2346 return md->name;
2347}
2348EXPORT_SYMBOL_GPL(dm_device_name);
2349
2350static void __dm_destroy(struct mapped_device *md, bool wait)
2351{
2352 struct dm_table *map;
2353 int srcu_idx;
2354
2355 might_sleep();
2356
2357 spin_lock(&_minor_lock);
2358 map = dm_get_live_table(md, &srcu_idx);
2359 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2360 set_bit(DMF_FREEING, &md->flags);
2361 spin_unlock(&_minor_lock);
2362
2363 if (!dm_suspended_md(md)) {
2364 dm_table_presuspend_targets(map);
2365 dm_table_postsuspend_targets(map);
2366 }
2367
2368
2369 dm_put_live_table(md, srcu_idx);
2370
2371
2372
2373
2374
2375
2376
2377 if (wait)
2378 while (atomic_read(&md->holders))
2379 msleep(1);
2380 else if (atomic_read(&md->holders))
2381 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2382 dm_device_name(md), atomic_read(&md->holders));
2383
2384 dm_sysfs_exit(md);
2385 dm_table_destroy(__unbind(md));
2386 free_dev(md);
2387}
2388
2389void dm_destroy(struct mapped_device *md)
2390{
2391 __dm_destroy(md, true);
2392}
2393
2394void dm_destroy_immediate(struct mapped_device *md)
2395{
2396 __dm_destroy(md, false);
2397}
2398
2399void dm_put(struct mapped_device *md)
2400{
2401 atomic_dec(&md->holders);
2402}
2403EXPORT_SYMBOL_GPL(dm_put);
2404
2405static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2406{
2407 int r = 0;
2408 DECLARE_WAITQUEUE(wait, current);
2409
2410 add_wait_queue(&md->wait, &wait);
2411
2412 while (1) {
2413 set_current_state(interruptible);
2414
2415 if (!md_in_flight(md))
2416 break;
2417
2418 if (interruptible == TASK_INTERRUPTIBLE &&
2419 signal_pending(current)) {
2420 r = -EINTR;
2421 break;
2422 }
2423
2424 io_schedule();
2425 }
2426 set_current_state(TASK_RUNNING);
2427
2428 remove_wait_queue(&md->wait, &wait);
2429
2430 return r;
2431}
2432
2433
2434
2435
2436static void dm_wq_work(struct work_struct *work)
2437{
2438 struct mapped_device *md = container_of(work, struct mapped_device,
2439 work);
2440 struct bio *c;
2441 int srcu_idx;
2442 struct dm_table *map;
2443
2444 map = dm_get_live_table(md, &srcu_idx);
2445
2446 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2447 spin_lock_irq(&md->deferred_lock);
2448 c = bio_list_pop(&md->deferred);
2449 spin_unlock_irq(&md->deferred_lock);
2450
2451 if (!c)
2452 break;
2453
2454 if (dm_request_based(md))
2455 generic_make_request(c);
2456 else
2457 __split_and_process_bio(md, map, c);
2458 }
2459
2460 dm_put_live_table(md, srcu_idx);
2461}
2462
2463static void dm_queue_flush(struct mapped_device *md)
2464{
2465 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2466 smp_mb__after_clear_bit();
2467 queue_work(md->wq, &md->work);
2468}
2469
2470
2471
2472
2473struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2474{
2475 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2476 struct queue_limits limits;
2477 int r;
2478
2479 mutex_lock(&md->suspend_lock);
2480
2481
2482 if (!dm_suspended_md(md))
2483 goto out;
2484
2485
2486
2487
2488
2489
2490
2491 if (dm_table_has_no_data_devices(table)) {
2492 live_map = dm_get_live_table_fast(md);
2493 if (live_map)
2494 limits = md->queue->limits;
2495 dm_put_live_table_fast(md);
2496 }
2497
2498 if (!live_map) {
2499 r = dm_calculate_queue_limits(table, &limits);
2500 if (r) {
2501 map = ERR_PTR(r);
2502 goto out;
2503 }
2504 }
2505
2506 map = __bind(md, table, &limits);
2507
2508out:
2509 mutex_unlock(&md->suspend_lock);
2510 return map;
2511}
2512
2513
2514
2515
2516
2517static int lock_fs(struct mapped_device *md)
2518{
2519 int r;
2520
2521 WARN_ON(md->frozen_sb);
2522
2523 md->frozen_sb = freeze_bdev(md->bdev);
2524 if (IS_ERR(md->frozen_sb)) {
2525 r = PTR_ERR(md->frozen_sb);
2526 md->frozen_sb = NULL;
2527 return r;
2528 }
2529
2530 set_bit(DMF_FROZEN, &md->flags);
2531
2532 return 0;
2533}
2534
2535static void unlock_fs(struct mapped_device *md)
2536{
2537 if (!test_bit(DMF_FROZEN, &md->flags))
2538 return;
2539
2540 thaw_bdev(md->bdev, md->frozen_sb);
2541 md->frozen_sb = NULL;
2542 clear_bit(DMF_FROZEN, &md->flags);
2543}
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2562{
2563 struct dm_table *map = NULL;
2564 int r = 0;
2565 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2566 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
2567
2568 mutex_lock(&md->suspend_lock);
2569
2570 if (dm_suspended_md(md)) {
2571 r = -EINVAL;
2572 goto out_unlock;
2573 }
2574
2575 map = md->map;
2576
2577
2578
2579
2580
2581 if (noflush)
2582 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2583
2584
2585 dm_table_presuspend_targets(map);
2586
2587
2588
2589
2590
2591
2592
2593 if (!noflush && do_lockfs) {
2594 r = lock_fs(md);
2595 if (r)
2596 goto out_unlock;
2597 }
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2612 synchronize_srcu(&md->io_barrier);
2613
2614
2615
2616
2617
2618 if (dm_request_based(md))
2619 stop_queue(md->queue);
2620
2621 flush_workqueue(md->wq);
2622
2623
2624
2625
2626
2627
2628 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
2629
2630 if (noflush)
2631 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2632 synchronize_srcu(&md->io_barrier);
2633
2634
2635 if (r < 0) {
2636 dm_queue_flush(md);
2637
2638 if (dm_request_based(md))
2639 start_queue(md->queue);
2640
2641 unlock_fs(md);
2642 goto out_unlock;
2643 }
2644
2645
2646
2647
2648
2649
2650
2651 set_bit(DMF_SUSPENDED, &md->flags);
2652
2653 dm_table_postsuspend_targets(map);
2654
2655out_unlock:
2656 mutex_unlock(&md->suspend_lock);
2657 return r;
2658}
2659
2660int dm_resume(struct mapped_device *md)
2661{
2662 int r = -EINVAL;
2663 struct dm_table *map = NULL;
2664
2665 mutex_lock(&md->suspend_lock);
2666 if (!dm_suspended_md(md))
2667 goto out;
2668
2669 map = md->map;
2670 if (!map || !dm_table_get_size(map))
2671 goto out;
2672
2673 r = dm_table_resume_targets(map);
2674 if (r)
2675 goto out;
2676
2677 dm_queue_flush(md);
2678
2679
2680
2681
2682
2683
2684 if (dm_request_based(md))
2685 start_queue(md->queue);
2686
2687 unlock_fs(md);
2688
2689 clear_bit(DMF_SUSPENDED, &md->flags);
2690
2691 r = 0;
2692out:
2693 mutex_unlock(&md->suspend_lock);
2694
2695 return r;
2696}
2697
2698
2699
2700
2701int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2702 unsigned cookie)
2703{
2704 char udev_cookie[DM_COOKIE_LENGTH];
2705 char *envp[] = { udev_cookie, NULL };
2706
2707 if (!cookie)
2708 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2709 else {
2710 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2711 DM_COOKIE_ENV_VAR_NAME, cookie);
2712 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2713 action, envp);
2714 }
2715}
2716
2717uint32_t dm_next_uevent_seq(struct mapped_device *md)
2718{
2719 return atomic_add_return(1, &md->uevent_seq);
2720}
2721
2722uint32_t dm_get_event_nr(struct mapped_device *md)
2723{
2724 return atomic_read(&md->event_nr);
2725}
2726
2727int dm_wait_event(struct mapped_device *md, int event_nr)
2728{
2729 return wait_event_interruptible(md->eventq,
2730 (event_nr != atomic_read(&md->event_nr)));
2731}
2732
2733void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2734{
2735 unsigned long flags;
2736
2737 spin_lock_irqsave(&md->uevent_lock, flags);
2738 list_add(elist, &md->uevent_list);
2739 spin_unlock_irqrestore(&md->uevent_lock, flags);
2740}
2741
2742
2743
2744
2745
2746struct gendisk *dm_disk(struct mapped_device *md)
2747{
2748 return md->disk;
2749}
2750
2751struct kobject *dm_kobject(struct mapped_device *md)
2752{
2753 return &md->kobj;
2754}
2755
2756
2757
2758
2759
2760struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2761{
2762 struct mapped_device *md;
2763
2764 md = container_of(kobj, struct mapped_device, kobj);
2765 if (&md->kobj != kobj)
2766 return NULL;
2767
2768 if (test_bit(DMF_FREEING, &md->flags) ||
2769 dm_deleting_md(md))
2770 return NULL;
2771
2772 dm_get(md);
2773 return md;
2774}
2775
2776int dm_suspended_md(struct mapped_device *md)
2777{
2778 return test_bit(DMF_SUSPENDED, &md->flags);
2779}
2780
2781int dm_suspended(struct dm_target *ti)
2782{
2783 return dm_suspended_md(dm_table_get_md(ti->table));
2784}
2785EXPORT_SYMBOL_GPL(dm_suspended);
2786
2787int dm_noflush_suspending(struct dm_target *ti)
2788{
2789 return __noflush_suspending(dm_table_get_md(ti->table));
2790}
2791EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2792
2793struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
2794{
2795 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
2796 struct kmem_cache *cachep;
2797 unsigned int pool_size;
2798 unsigned int front_pad;
2799
2800 if (!pools)
2801 return NULL;
2802
2803 if (type == DM_TYPE_BIO_BASED) {
2804 cachep = _io_cache;
2805 pool_size = 16;
2806 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2807 } else if (type == DM_TYPE_REQUEST_BASED) {
2808 cachep = _rq_tio_cache;
2809 pool_size = MIN_IOS;
2810 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2811
2812 WARN_ON(per_bio_data_size != 0);
2813 } else
2814 goto out;
2815
2816 pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
2817 if (!pools->io_pool)
2818 goto out;
2819
2820 pools->bs = bioset_create(pool_size, front_pad);
2821 if (!pools->bs)
2822 goto out;
2823
2824 if (integrity && bioset_integrity_create(pools->bs, pool_size))
2825 goto out;
2826
2827 return pools;
2828
2829out:
2830 dm_free_md_mempools(pools);
2831
2832 return NULL;
2833}
2834
2835void dm_free_md_mempools(struct dm_md_mempools *pools)
2836{
2837 if (!pools)
2838 return;
2839
2840 if (pools->io_pool)
2841 mempool_destroy(pools->io_pool);
2842
2843 if (pools->bs)
2844 bioset_free(pools->bs);
2845
2846 kfree(pools);
2847}
2848
2849static const struct block_device_operations dm_blk_dops = {
2850 .open = dm_blk_open,
2851 .release = dm_blk_close,
2852 .ioctl = dm_blk_ioctl,
2853 .getgeo = dm_blk_getgeo,
2854 .owner = THIS_MODULE
2855};
2856
2857EXPORT_SYMBOL(dm_get_mapinfo);
2858
2859
2860
2861
2862module_init(dm_init);
2863module_exit(dm_exit);
2864
2865module_param(major, uint, 0);
2866MODULE_PARM_DESC(major, "The major number of the device mapper");
2867MODULE_DESCRIPTION(DM_NAME " driver");
2868MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2869MODULE_LICENSE("GPL");
2870