1
2
3
4
5
6
7
8#include "dm.h"
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/blkdev.h>
13#include <linux/namei.h>
14#include <linux/ctype.h>
15#include <linux/string.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/mutex.h>
19#include <linux/delay.h>
20#include <linux/atomic.h>
21
22#define DM_MSG_PREFIX "table"
23
24#define MAX_DEPTH 16
25#define NODE_SIZE L1_CACHE_BYTES
26#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
27#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42struct dm_table {
43 struct mapped_device *md;
44 atomic_t holders;
45 unsigned type;
46
47
48 unsigned int depth;
49 unsigned int counts[MAX_DEPTH];
50 sector_t *index[MAX_DEPTH];
51
52 unsigned int num_targets;
53 unsigned int num_allocated;
54 sector_t *highs;
55 struct dm_target *targets;
56
57 struct target_type *immutable_target_type;
58 unsigned integrity_supported:1;
59 unsigned singleton:1;
60
61
62
63
64
65
66 fmode_t mode;
67
68
69 struct list_head devices;
70
71
72 void (*event_fn)(void *);
73 void *event_context;
74
75 struct dm_md_mempools *mempools;
76
77 struct list_head target_callbacks;
78};
79
80
81
82
83static unsigned int int_log(unsigned int n, unsigned int base)
84{
85 int result = 0;
86
87 while (n > 1) {
88 n = dm_div_up(n, base);
89 result++;
90 }
91
92 return result;
93}
94
95
96
97
98static inline unsigned int get_child(unsigned int n, unsigned int k)
99{
100 return (n * CHILDREN_PER_NODE) + k;
101}
102
103
104
105
106static inline sector_t *get_node(struct dm_table *t,
107 unsigned int l, unsigned int n)
108{
109 return t->index[l] + (n * KEYS_PER_NODE);
110}
111
112
113
114
115
116static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
117{
118 for (; l < t->depth - 1; l++)
119 n = get_child(n, CHILDREN_PER_NODE - 1);
120
121 if (n >= t->counts[l])
122 return (sector_t) - 1;
123
124 return get_node(t, l, n)[KEYS_PER_NODE - 1];
125}
126
127
128
129
130
131static int setup_btree_index(unsigned int l, struct dm_table *t)
132{
133 unsigned int n, k;
134 sector_t *node;
135
136 for (n = 0U; n < t->counts[l]; n++) {
137 node = get_node(t, l, n);
138
139 for (k = 0U; k < KEYS_PER_NODE; k++)
140 node[k] = high(t, l + 1, get_child(n, k));
141 }
142
143 return 0;
144}
145
146void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
147{
148 unsigned long size;
149 void *addr;
150
151
152
153
154 if (nmemb > (ULONG_MAX / elem_size))
155 return NULL;
156
157 size = nmemb * elem_size;
158 addr = vzalloc(size);
159
160 return addr;
161}
162EXPORT_SYMBOL(dm_vcalloc);
163
164
165
166
167
168static int alloc_targets(struct dm_table *t, unsigned int num)
169{
170 sector_t *n_highs;
171 struct dm_target *n_targets;
172 int n = t->num_targets;
173
174
175
176
177
178
179 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
180 sizeof(sector_t));
181 if (!n_highs)
182 return -ENOMEM;
183
184 n_targets = (struct dm_target *) (n_highs + num);
185
186 if (n) {
187 memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
188 memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
189 }
190
191 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
192 vfree(t->highs);
193
194 t->num_allocated = num;
195 t->highs = n_highs;
196 t->targets = n_targets;
197
198 return 0;
199}
200
201int dm_table_create(struct dm_table **result, fmode_t mode,
202 unsigned num_targets, struct mapped_device *md)
203{
204 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
205
206 if (!t)
207 return -ENOMEM;
208
209 INIT_LIST_HEAD(&t->devices);
210 INIT_LIST_HEAD(&t->target_callbacks);
211 atomic_set(&t->holders, 0);
212
213 if (!num_targets)
214 num_targets = KEYS_PER_NODE;
215
216 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
217
218 if (alloc_targets(t, num_targets)) {
219 kfree(t);
220 return -ENOMEM;
221 }
222
223 t->mode = mode;
224 t->md = md;
225 *result = t;
226 return 0;
227}
228
229static void free_devices(struct list_head *devices)
230{
231 struct list_head *tmp, *next;
232
233 list_for_each_safe(tmp, next, devices) {
234 struct dm_dev_internal *dd =
235 list_entry(tmp, struct dm_dev_internal, list);
236 DMWARN("dm_table_destroy: dm_put_device call missing for %s",
237 dd->dm_dev.name);
238 kfree(dd);
239 }
240}
241
242void dm_table_destroy(struct dm_table *t)
243{
244 unsigned int i;
245
246 if (!t)
247 return;
248
249 while (atomic_read(&t->holders))
250 msleep(1);
251 smp_mb();
252
253
254 if (t->depth >= 2)
255 vfree(t->index[t->depth - 2]);
256
257
258 for (i = 0; i < t->num_targets; i++) {
259 struct dm_target *tgt = t->targets + i;
260
261 if (tgt->type->dtr)
262 tgt->type->dtr(tgt);
263
264 dm_put_target_type(tgt->type);
265 }
266
267 vfree(t->highs);
268
269
270 free_devices(&t->devices);
271
272 dm_free_md_mempools(t->mempools);
273
274 kfree(t);
275}
276
277void dm_table_get(struct dm_table *t)
278{
279 atomic_inc(&t->holders);
280}
281EXPORT_SYMBOL(dm_table_get);
282
283void dm_table_put(struct dm_table *t)
284{
285 if (!t)
286 return;
287
288 smp_mb__before_atomic_dec();
289 atomic_dec(&t->holders);
290}
291EXPORT_SYMBOL(dm_table_put);
292
293
294
295
296static inline int check_space(struct dm_table *t)
297{
298 if (t->num_targets >= t->num_allocated)
299 return alloc_targets(t, t->num_allocated * 2);
300
301 return 0;
302}
303
304
305
306
307static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
308{
309 struct dm_dev_internal *dd;
310
311 list_for_each_entry (dd, l, list)
312 if (dd->dm_dev.bdev->bd_dev == dev)
313 return dd;
314
315 return NULL;
316}
317
318
319
320
321static int open_dev(struct dm_dev_internal *d, dev_t dev,
322 struct mapped_device *md)
323{
324 static char *_claim_ptr = "I belong to device-mapper";
325 struct block_device *bdev;
326
327 int r;
328
329 BUG_ON(d->dm_dev.bdev);
330
331 bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
332 if (IS_ERR(bdev))
333 return PTR_ERR(bdev);
334
335 r = bd_link_disk_holder(bdev, dm_disk(md));
336 if (r) {
337 blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
338 return r;
339 }
340
341 d->dm_dev.bdev = bdev;
342 return 0;
343}
344
345
346
347
348static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
349{
350 if (!d->dm_dev.bdev)
351 return;
352
353 bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
354 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
355 d->dm_dev.bdev = NULL;
356}
357
358
359
360
361static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
362 sector_t start, sector_t len, void *data)
363{
364 struct request_queue *q;
365 struct queue_limits *limits = data;
366 struct block_device *bdev = dev->bdev;
367 sector_t dev_size =
368 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
369 unsigned short logical_block_size_sectors =
370 limits->logical_block_size >> SECTOR_SHIFT;
371 char b[BDEVNAME_SIZE];
372
373
374
375
376
377
378 q = bdev_get_queue(bdev);
379 if (!q || !q->make_request_fn) {
380 DMWARN("%s: %s is not yet initialised: "
381 "start=%llu, len=%llu, dev_size=%llu",
382 dm_device_name(ti->table->md), bdevname(bdev, b),
383 (unsigned long long)start,
384 (unsigned long long)len,
385 (unsigned long long)dev_size);
386 return 1;
387 }
388
389 if (!dev_size)
390 return 0;
391
392 if ((start >= dev_size) || (start + len > dev_size)) {
393 DMWARN("%s: %s too small for target: "
394 "start=%llu, len=%llu, dev_size=%llu",
395 dm_device_name(ti->table->md), bdevname(bdev, b),
396 (unsigned long long)start,
397 (unsigned long long)len,
398 (unsigned long long)dev_size);
399 return 1;
400 }
401
402 if (logical_block_size_sectors <= 1)
403 return 0;
404
405 if (start & (logical_block_size_sectors - 1)) {
406 DMWARN("%s: start=%llu not aligned to h/w "
407 "logical block size %u of %s",
408 dm_device_name(ti->table->md),
409 (unsigned long long)start,
410 limits->logical_block_size, bdevname(bdev, b));
411 return 1;
412 }
413
414 if (len & (logical_block_size_sectors - 1)) {
415 DMWARN("%s: len=%llu not aligned to h/w "
416 "logical block size %u of %s",
417 dm_device_name(ti->table->md),
418 (unsigned long long)len,
419 limits->logical_block_size, bdevname(bdev, b));
420 return 1;
421 }
422
423 return 0;
424}
425
426
427
428
429
430
431
432static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
433 struct mapped_device *md)
434{
435 int r;
436 struct dm_dev_internal dd_new, dd_old;
437
438 dd_new = dd_old = *dd;
439
440 dd_new.dm_dev.mode |= new_mode;
441 dd_new.dm_dev.bdev = NULL;
442
443 r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
444 if (r)
445 return r;
446
447 dd->dm_dev.mode |= new_mode;
448 close_dev(&dd_old, md);
449
450 return 0;
451}
452
453
454
455
456
457int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
458 struct dm_dev **result)
459{
460 int r;
461 dev_t uninitialized_var(dev);
462 struct dm_dev_internal *dd;
463 unsigned int major, minor;
464 struct dm_table *t = ti->table;
465 char dummy;
466
467 BUG_ON(!t);
468
469 if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
470
471 dev = MKDEV(major, minor);
472 if (MAJOR(dev) != major || MINOR(dev) != minor)
473 return -EOVERFLOW;
474 } else {
475
476 struct block_device *bdev = lookup_bdev(path);
477
478 if (IS_ERR(bdev))
479 return PTR_ERR(bdev);
480 dev = bdev->bd_dev;
481 bdput(bdev);
482 }
483
484 dd = find_device(&t->devices, dev);
485 if (!dd) {
486 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
487 if (!dd)
488 return -ENOMEM;
489
490 dd->dm_dev.mode = mode;
491 dd->dm_dev.bdev = NULL;
492
493 if ((r = open_dev(dd, dev, t->md))) {
494 kfree(dd);
495 return r;
496 }
497
498 format_dev_t(dd->dm_dev.name, dev);
499
500 atomic_set(&dd->count, 0);
501 list_add(&dd->list, &t->devices);
502
503 } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
504 r = upgrade_mode(dd, mode, t->md);
505 if (r)
506 return r;
507 }
508 atomic_inc(&dd->count);
509
510 *result = &dd->dm_dev;
511 return 0;
512}
513EXPORT_SYMBOL(dm_get_device);
514
515int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
516 sector_t start, sector_t len, void *data)
517{
518 struct queue_limits *limits = data;
519 struct block_device *bdev = dev->bdev;
520 struct request_queue *q = bdev_get_queue(bdev);
521 char b[BDEVNAME_SIZE];
522
523 if (unlikely(!q)) {
524 DMWARN("%s: Cannot set limits for nonexistent device %s",
525 dm_device_name(ti->table->md), bdevname(bdev, b));
526 return 0;
527 }
528
529 if (bdev_stack_limits(limits, bdev, start) < 0)
530 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
531 "physical_block_size=%u, logical_block_size=%u, "
532 "alignment_offset=%u, start=%llu",
533 dm_device_name(ti->table->md), bdevname(bdev, b),
534 q->limits.physical_block_size,
535 q->limits.logical_block_size,
536 q->limits.alignment_offset,
537 (unsigned long long) start << SECTOR_SHIFT);
538
539
540
541
542
543
544 if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
545 blk_limits_max_hw_sectors(limits,
546 (unsigned int) (PAGE_SIZE >> 9));
547 return 0;
548}
549EXPORT_SYMBOL_GPL(dm_set_device_limits);
550
551
552
553
554void dm_put_device(struct dm_target *ti, struct dm_dev *d)
555{
556 struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
557 dm_dev);
558
559 if (atomic_dec_and_test(&dd->count)) {
560 close_dev(dd, ti->table->md);
561 list_del(&dd->list);
562 kfree(dd);
563 }
564}
565EXPORT_SYMBOL(dm_put_device);
566
567
568
569
570static int adjoin(struct dm_table *table, struct dm_target *ti)
571{
572 struct dm_target *prev;
573
574 if (!table->num_targets)
575 return !ti->begin;
576
577 prev = &table->targets[table->num_targets - 1];
578 return (ti->begin == (prev->begin + prev->len));
579}
580
581
582
583
584static char **realloc_argv(unsigned *array_size, char **old_argv)
585{
586 char **argv;
587 unsigned new_size;
588
589 new_size = *array_size ? *array_size * 2 : 64;
590 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
591 if (argv) {
592 memcpy(argv, old_argv, *array_size * sizeof(*argv));
593 *array_size = new_size;
594 }
595
596 kfree(old_argv);
597 return argv;
598}
599
600
601
602
603int dm_split_args(int *argc, char ***argvp, char *input)
604{
605 char *start, *end = input, *out, **argv = NULL;
606 unsigned array_size = 0;
607
608 *argc = 0;
609
610 if (!input) {
611 *argvp = NULL;
612 return 0;
613 }
614
615 argv = realloc_argv(&array_size, argv);
616 if (!argv)
617 return -ENOMEM;
618
619 while (1) {
620
621 start = skip_spaces(end);
622
623 if (!*start)
624 break;
625
626
627 end = out = start;
628 while (*end) {
629
630 if (*end == '\\' && *(end + 1)) {
631 *out++ = *(end + 1);
632 end += 2;
633 continue;
634 }
635
636 if (isspace(*end))
637 break;
638
639 *out++ = *end++;
640 }
641
642
643 if ((*argc + 1) > array_size) {
644 argv = realloc_argv(&array_size, argv);
645 if (!argv)
646 return -ENOMEM;
647 }
648
649
650 if (*end)
651 end++;
652
653
654 *out = '\0';
655 argv[*argc] = start;
656 (*argc)++;
657 }
658
659 *argvp = argv;
660 return 0;
661}
662
663
664
665
666
667
668
669
670static int validate_hardware_logical_block_alignment(struct dm_table *table,
671 struct queue_limits *limits)
672{
673
674
675
676
677 unsigned short device_logical_block_size_sects =
678 limits->logical_block_size >> SECTOR_SHIFT;
679
680
681
682
683 unsigned short next_target_start = 0;
684
685
686
687
688
689 unsigned short remaining = 0;
690
691 struct dm_target *uninitialized_var(ti);
692 struct queue_limits ti_limits;
693 unsigned i = 0;
694
695
696
697
698 while (i < dm_table_get_num_targets(table)) {
699 ti = dm_table_get_target(table, i++);
700
701 blk_set_stacking_limits(&ti_limits);
702
703
704 if (ti->type->iterate_devices)
705 ti->type->iterate_devices(ti, dm_set_device_limits,
706 &ti_limits);
707
708
709
710
711
712 if (remaining < ti->len &&
713 remaining & ((ti_limits.logical_block_size >>
714 SECTOR_SHIFT) - 1))
715 break;
716
717 next_target_start =
718 (unsigned short) ((next_target_start + ti->len) &
719 (device_logical_block_size_sects - 1));
720 remaining = next_target_start ?
721 device_logical_block_size_sects - next_target_start : 0;
722 }
723
724 if (remaining) {
725 DMWARN("%s: table line %u (start sect %llu len %llu) "
726 "not aligned to h/w logical block size %u",
727 dm_device_name(table->md), i,
728 (unsigned long long) ti->begin,
729 (unsigned long long) ti->len,
730 limits->logical_block_size);
731 return -EINVAL;
732 }
733
734 return 0;
735}
736
737int dm_table_add_target(struct dm_table *t, const char *type,
738 sector_t start, sector_t len, char *params)
739{
740 int r = -EINVAL, argc;
741 char **argv;
742 struct dm_target *tgt;
743
744 if (t->singleton) {
745 DMERR("%s: target type %s must appear alone in table",
746 dm_device_name(t->md), t->targets->type->name);
747 return -EINVAL;
748 }
749
750 if ((r = check_space(t)))
751 return r;
752
753 tgt = t->targets + t->num_targets;
754 memset(tgt, 0, sizeof(*tgt));
755
756 if (!len) {
757 DMERR("%s: zero-length target", dm_device_name(t->md));
758 return -EINVAL;
759 }
760
761 tgt->type = dm_get_target_type(type);
762 if (!tgt->type) {
763 DMERR("%s: %s: unknown target type", dm_device_name(t->md),
764 type);
765 return -EINVAL;
766 }
767
768 if (dm_target_needs_singleton(tgt->type)) {
769 if (t->num_targets) {
770 DMERR("%s: target type %s must appear alone in table",
771 dm_device_name(t->md), type);
772 return -EINVAL;
773 }
774 t->singleton = 1;
775 }
776
777 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
778 DMERR("%s: target type %s may not be included in read-only tables",
779 dm_device_name(t->md), type);
780 return -EINVAL;
781 }
782
783 if (t->immutable_target_type) {
784 if (t->immutable_target_type != tgt->type) {
785 DMERR("%s: immutable target type %s cannot be mixed with other target types",
786 dm_device_name(t->md), t->immutable_target_type->name);
787 return -EINVAL;
788 }
789 } else if (dm_target_is_immutable(tgt->type)) {
790 if (t->num_targets) {
791 DMERR("%s: immutable target type %s cannot be mixed with other target types",
792 dm_device_name(t->md), tgt->type->name);
793 return -EINVAL;
794 }
795 t->immutable_target_type = tgt->type;
796 }
797
798 tgt->table = t;
799 tgt->begin = start;
800 tgt->len = len;
801 tgt->error = "Unknown error";
802
803
804
805
806 if (!adjoin(t, tgt)) {
807 tgt->error = "Gap in table";
808 r = -EINVAL;
809 goto bad;
810 }
811
812 r = dm_split_args(&argc, &argv, params);
813 if (r) {
814 tgt->error = "couldn't split parameters (insufficient memory)";
815 goto bad;
816 }
817
818 r = tgt->type->ctr(tgt, argc, argv);
819 kfree(argv);
820 if (r)
821 goto bad;
822
823 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
824
825 if (!tgt->num_discard_bios && tgt->discards_supported)
826 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
827 dm_device_name(t->md), type);
828
829 return 0;
830
831 bad:
832 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
833 dm_put_target_type(tgt->type);
834 return r;
835}
836
837
838
839
840static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
841 unsigned *value, char **error, unsigned grouped)
842{
843 const char *arg_str = dm_shift_arg(arg_set);
844 char dummy;
845
846 if (!arg_str ||
847 (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
848 (*value < arg->min) ||
849 (*value > arg->max) ||
850 (grouped && arg_set->argc < *value)) {
851 *error = arg->error;
852 return -EINVAL;
853 }
854
855 return 0;
856}
857
858int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
859 unsigned *value, char **error)
860{
861 return validate_next_arg(arg, arg_set, value, error, 0);
862}
863EXPORT_SYMBOL(dm_read_arg);
864
865int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
866 unsigned *value, char **error)
867{
868 return validate_next_arg(arg, arg_set, value, error, 1);
869}
870EXPORT_SYMBOL(dm_read_arg_group);
871
872const char *dm_shift_arg(struct dm_arg_set *as)
873{
874 char *r;
875
876 if (as->argc) {
877 as->argc--;
878 r = *as->argv;
879 as->argv++;
880 return r;
881 }
882
883 return NULL;
884}
885EXPORT_SYMBOL(dm_shift_arg);
886
887void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
888{
889 BUG_ON(as->argc < num_args);
890 as->argc -= num_args;
891 as->argv += num_args;
892}
893EXPORT_SYMBOL(dm_consume_args);
894
895static int dm_table_set_type(struct dm_table *t)
896{
897 unsigned i;
898 unsigned bio_based = 0, request_based = 0;
899 struct dm_target *tgt;
900 struct dm_dev_internal *dd;
901 struct list_head *devices;
902
903 for (i = 0; i < t->num_targets; i++) {
904 tgt = t->targets + i;
905 if (dm_target_request_based(tgt))
906 request_based = 1;
907 else
908 bio_based = 1;
909
910 if (bio_based && request_based) {
911 DMWARN("Inconsistent table: different target types"
912 " can't be mixed up");
913 return -EINVAL;
914 }
915 }
916
917 if (bio_based) {
918
919 t->type = DM_TYPE_BIO_BASED;
920 return 0;
921 }
922
923 BUG_ON(!request_based);
924
925
926 devices = dm_table_get_devices(t);
927 list_for_each_entry(dd, devices, list) {
928 if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
929 DMWARN("table load rejected: including"
930 " non-request-stackable devices");
931 return -EINVAL;
932 }
933 }
934
935
936
937
938
939
940
941 if (t->num_targets > 1) {
942 DMWARN("Request-based dm doesn't support multiple targets yet");
943 return -EINVAL;
944 }
945
946 t->type = DM_TYPE_REQUEST_BASED;
947
948 return 0;
949}
950
951unsigned dm_table_get_type(struct dm_table *t)
952{
953 return t->type;
954}
955
956struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
957{
958 return t->immutable_target_type;
959}
960
961bool dm_table_request_based(struct dm_table *t)
962{
963 return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
964}
965
966int dm_table_alloc_md_mempools(struct dm_table *t)
967{
968 unsigned type = dm_table_get_type(t);
969 unsigned per_bio_data_size = 0;
970 struct dm_target *tgt;
971 unsigned i;
972
973 if (unlikely(type == DM_TYPE_NONE)) {
974 DMWARN("no table type is set, can't allocate mempools");
975 return -EINVAL;
976 }
977
978 if (type == DM_TYPE_BIO_BASED)
979 for (i = 0; i < t->num_targets; i++) {
980 tgt = t->targets + i;
981 per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
982 }
983
984 t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
985 if (!t->mempools)
986 return -ENOMEM;
987
988 return 0;
989}
990
991void dm_table_free_md_mempools(struct dm_table *t)
992{
993 dm_free_md_mempools(t->mempools);
994 t->mempools = NULL;
995}
996
997struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
998{
999 return t->mempools;
1000}
1001
1002static int setup_indexes(struct dm_table *t)
1003{
1004 int i;
1005 unsigned int total = 0;
1006 sector_t *indexes;
1007
1008
1009 for (i = t->depth - 2; i >= 0; i--) {
1010 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1011 total += t->counts[i];
1012 }
1013
1014 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1015 if (!indexes)
1016 return -ENOMEM;
1017
1018
1019 for (i = t->depth - 2; i >= 0; i--) {
1020 t->index[i] = indexes;
1021 indexes += (KEYS_PER_NODE * t->counts[i]);
1022 setup_btree_index(i, t);
1023 }
1024
1025 return 0;
1026}
1027
1028
1029
1030
1031static int dm_table_build_index(struct dm_table *t)
1032{
1033 int r = 0;
1034 unsigned int leaf_nodes;
1035
1036
1037 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1038 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1039
1040
1041 t->counts[t->depth - 1] = leaf_nodes;
1042 t->index[t->depth - 1] = t->highs;
1043
1044 if (t->depth >= 2)
1045 r = setup_indexes(t);
1046
1047 return r;
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
1058 bool match_all)
1059{
1060 struct list_head *devices = dm_table_get_devices(t);
1061 struct dm_dev_internal *dd = NULL;
1062 struct gendisk *prev_disk = NULL, *template_disk = NULL;
1063
1064 list_for_each_entry(dd, devices, list) {
1065 template_disk = dd->dm_dev.bdev->bd_disk;
1066 if (!blk_get_integrity(template_disk))
1067 goto no_integrity;
1068 if (!match_all && !blk_integrity_is_initialized(template_disk))
1069 continue;
1070 else if (prev_disk &&
1071 blk_integrity_compare(prev_disk, template_disk) < 0)
1072 goto no_integrity;
1073 prev_disk = template_disk;
1074 }
1075
1076 return template_disk;
1077
1078no_integrity:
1079 if (prev_disk)
1080 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1081 dm_device_name(t->md),
1082 prev_disk->disk_name,
1083 template_disk->disk_name);
1084 return NULL;
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
1098{
1099 struct gendisk *template_disk = NULL;
1100
1101 template_disk = dm_table_get_integrity_disk(t, false);
1102 if (!template_disk)
1103 return 0;
1104
1105 if (!blk_integrity_is_initialized(dm_disk(md))) {
1106 t->integrity_supported = 1;
1107 return blk_integrity_register(dm_disk(md), NULL);
1108 }
1109
1110
1111
1112
1113
1114 if (blk_integrity_is_initialized(template_disk) &&
1115 blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1116 DMWARN("%s: conflict with existing integrity profile: "
1117 "%s profile mismatch",
1118 dm_device_name(t->md),
1119 template_disk->disk_name);
1120 return 1;
1121 }
1122
1123
1124 t->integrity_supported = 1;
1125 return 0;
1126}
1127
1128
1129
1130
1131
1132int dm_table_complete(struct dm_table *t)
1133{
1134 int r;
1135
1136 r = dm_table_set_type(t);
1137 if (r) {
1138 DMERR("unable to set table type");
1139 return r;
1140 }
1141
1142 r = dm_table_build_index(t);
1143 if (r) {
1144 DMERR("unable to build btrees");
1145 return r;
1146 }
1147
1148 r = dm_table_prealloc_integrity(t, t->md);
1149 if (r) {
1150 DMERR("could not register integrity profile.");
1151 return r;
1152 }
1153
1154 r = dm_table_alloc_md_mempools(t);
1155 if (r)
1156 DMERR("unable to allocate mempools");
1157
1158 return r;
1159}
1160
1161static DEFINE_MUTEX(_event_lock);
1162void dm_table_event_callback(struct dm_table *t,
1163 void (*fn)(void *), void *context)
1164{
1165 mutex_lock(&_event_lock);
1166 t->event_fn = fn;
1167 t->event_context = context;
1168 mutex_unlock(&_event_lock);
1169}
1170
1171void dm_table_event(struct dm_table *t)
1172{
1173
1174
1175
1176
1177 BUG_ON(in_interrupt());
1178
1179 mutex_lock(&_event_lock);
1180 if (t->event_fn)
1181 t->event_fn(t->event_context);
1182 mutex_unlock(&_event_lock);
1183}
1184EXPORT_SYMBOL(dm_table_event);
1185
1186sector_t dm_table_get_size(struct dm_table *t)
1187{
1188 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1189}
1190EXPORT_SYMBOL(dm_table_get_size);
1191
1192struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1193{
1194 if (index >= t->num_targets)
1195 return NULL;
1196
1197 return t->targets + index;
1198}
1199
1200
1201
1202
1203
1204
1205
1206struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1207{
1208 unsigned int l, n = 0, k = 0;
1209 sector_t *node;
1210
1211 for (l = 0; l < t->depth; l++) {
1212 n = get_child(n, k);
1213 node = get_node(t, l, n);
1214
1215 for (k = 0; k < KEYS_PER_NODE; k++)
1216 if (node[k] >= sector)
1217 break;
1218 }
1219
1220 return &t->targets[(KEYS_PER_NODE * n) + k];
1221}
1222
1223static int count_device(struct dm_target *ti, struct dm_dev *dev,
1224 sector_t start, sector_t len, void *data)
1225{
1226 unsigned *num_devices = data;
1227
1228 (*num_devices)++;
1229
1230 return 0;
1231}
1232
1233
1234
1235
1236
1237
1238
1239bool dm_table_has_no_data_devices(struct dm_table *table)
1240{
1241 struct dm_target *uninitialized_var(ti);
1242 unsigned i = 0, num_devices = 0;
1243
1244 while (i < dm_table_get_num_targets(table)) {
1245 ti = dm_table_get_target(table, i++);
1246
1247 if (!ti->type->iterate_devices)
1248 return false;
1249
1250 ti->type->iterate_devices(ti, count_device, &num_devices);
1251 if (num_devices)
1252 return false;
1253 }
1254
1255 return true;
1256}
1257
1258
1259
1260
1261int dm_calculate_queue_limits(struct dm_table *table,
1262 struct queue_limits *limits)
1263{
1264 struct dm_target *uninitialized_var(ti);
1265 struct queue_limits ti_limits;
1266 unsigned i = 0;
1267
1268 blk_set_stacking_limits(limits);
1269
1270 while (i < dm_table_get_num_targets(table)) {
1271 blk_set_stacking_limits(&ti_limits);
1272
1273 ti = dm_table_get_target(table, i++);
1274
1275 if (!ti->type->iterate_devices)
1276 goto combine_limits;
1277
1278
1279
1280
1281 ti->type->iterate_devices(ti, dm_set_device_limits,
1282 &ti_limits);
1283
1284
1285 if (ti->type->io_hints)
1286 ti->type->io_hints(ti, &ti_limits);
1287
1288
1289
1290
1291
1292 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1293 &ti_limits))
1294 return -EINVAL;
1295
1296combine_limits:
1297
1298
1299
1300
1301 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1302 DMWARN("%s: adding target device "
1303 "(start sect %llu len %llu) "
1304 "caused an alignment inconsistency",
1305 dm_device_name(table->md),
1306 (unsigned long long) ti->begin,
1307 (unsigned long long) ti->len);
1308 }
1309
1310 return validate_hardware_logical_block_alignment(table, limits);
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320static void dm_table_set_integrity(struct dm_table *t)
1321{
1322 struct gendisk *template_disk = NULL;
1323
1324 if (!blk_get_integrity(dm_disk(t->md)))
1325 return;
1326
1327 template_disk = dm_table_get_integrity_disk(t, true);
1328 if (template_disk)
1329 blk_integrity_register(dm_disk(t->md),
1330 blk_get_integrity(template_disk));
1331 else if (blk_integrity_is_initialized(dm_disk(t->md)))
1332 DMWARN("%s: device no longer has a valid integrity profile",
1333 dm_device_name(t->md));
1334 else
1335 DMWARN("%s: unable to establish an integrity profile",
1336 dm_device_name(t->md));
1337}
1338
1339static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1340 sector_t start, sector_t len, void *data)
1341{
1342 unsigned flush = (*(unsigned *)data);
1343 struct request_queue *q = bdev_get_queue(dev->bdev);
1344
1345 return q && (q->flush_flags & flush);
1346}
1347
1348static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1349{
1350 struct dm_target *ti;
1351 unsigned i = 0;
1352
1353
1354
1355
1356
1357
1358
1359 while (i < dm_table_get_num_targets(t)) {
1360 ti = dm_table_get_target(t, i++);
1361
1362 if (!ti->num_flush_bios)
1363 continue;
1364
1365 if (ti->flush_supported)
1366 return 1;
1367
1368 if (ti->type->iterate_devices &&
1369 ti->type->iterate_devices(ti, device_flush_capable, &flush))
1370 return 1;
1371 }
1372
1373 return 0;
1374}
1375
1376static bool dm_table_discard_zeroes_data(struct dm_table *t)
1377{
1378 struct dm_target *ti;
1379 unsigned i = 0;
1380
1381
1382 while (i < dm_table_get_num_targets(t)) {
1383 ti = dm_table_get_target(t, i++);
1384
1385 if (ti->discard_zeroes_data_unsupported)
1386 return 0;
1387 }
1388
1389 return 1;
1390}
1391
1392static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1393 sector_t start, sector_t len, void *data)
1394{
1395 struct request_queue *q = bdev_get_queue(dev->bdev);
1396
1397 return q && blk_queue_nonrot(q);
1398}
1399
1400static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1401 sector_t start, sector_t len, void *data)
1402{
1403 struct request_queue *q = bdev_get_queue(dev->bdev);
1404
1405 return q && !blk_queue_add_random(q);
1406}
1407
1408static bool dm_table_all_devices_attribute(struct dm_table *t,
1409 iterate_devices_callout_fn func)
1410{
1411 struct dm_target *ti;
1412 unsigned i = 0;
1413
1414 while (i < dm_table_get_num_targets(t)) {
1415 ti = dm_table_get_target(t, i++);
1416
1417 if (!ti->type->iterate_devices ||
1418 !ti->type->iterate_devices(ti, func, NULL))
1419 return 0;
1420 }
1421
1422 return 1;
1423}
1424
1425static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1426 sector_t start, sector_t len, void *data)
1427{
1428 struct request_queue *q = bdev_get_queue(dev->bdev);
1429
1430 return q && !q->limits.max_write_same_sectors;
1431}
1432
1433static bool dm_table_supports_write_same(struct dm_table *t)
1434{
1435 struct dm_target *ti;
1436 unsigned i = 0;
1437
1438 while (i < dm_table_get_num_targets(t)) {
1439 ti = dm_table_get_target(t, i++);
1440
1441 if (!ti->num_write_same_bios)
1442 return false;
1443
1444 if (!ti->type->iterate_devices ||
1445 ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1446 return false;
1447 }
1448
1449 return true;
1450}
1451
1452void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1453 struct queue_limits *limits)
1454{
1455 unsigned flush = 0;
1456
1457
1458
1459
1460 q->limits = *limits;
1461
1462 if (!dm_table_supports_discards(t))
1463 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1464 else
1465 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1466
1467 if (dm_table_supports_flush(t, REQ_FLUSH)) {
1468 flush |= REQ_FLUSH;
1469 if (dm_table_supports_flush(t, REQ_FUA))
1470 flush |= REQ_FUA;
1471 }
1472 blk_queue_flush(q, flush);
1473
1474 if (!dm_table_discard_zeroes_data(t))
1475 q->limits.discard_zeroes_data = 0;
1476
1477
1478 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1479 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1480 else
1481 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
1482
1483 if (!dm_table_supports_write_same(t))
1484 q->limits.max_write_same_sectors = 0;
1485
1486 dm_table_set_integrity(t);
1487
1488
1489
1490
1491
1492
1493
1494 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1495 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506 smp_mb();
1507 if (dm_table_request_based(t))
1508 queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1509}
1510
1511unsigned int dm_table_get_num_targets(struct dm_table *t)
1512{
1513 return t->num_targets;
1514}
1515
1516struct list_head *dm_table_get_devices(struct dm_table *t)
1517{
1518 return &t->devices;
1519}
1520
1521fmode_t dm_table_get_mode(struct dm_table *t)
1522{
1523 return t->mode;
1524}
1525EXPORT_SYMBOL(dm_table_get_mode);
1526
1527static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1528{
1529 int i = t->num_targets;
1530 struct dm_target *ti = t->targets;
1531
1532 while (i--) {
1533 if (postsuspend) {
1534 if (ti->type->postsuspend)
1535 ti->type->postsuspend(ti);
1536 } else if (ti->type->presuspend)
1537 ti->type->presuspend(ti);
1538
1539 ti++;
1540 }
1541}
1542
1543void dm_table_presuspend_targets(struct dm_table *t)
1544{
1545 if (!t)
1546 return;
1547
1548 suspend_targets(t, 0);
1549}
1550
1551void dm_table_postsuspend_targets(struct dm_table *t)
1552{
1553 if (!t)
1554 return;
1555
1556 suspend_targets(t, 1);
1557}
1558
1559int dm_table_resume_targets(struct dm_table *t)
1560{
1561 int i, r = 0;
1562
1563 for (i = 0; i < t->num_targets; i++) {
1564 struct dm_target *ti = t->targets + i;
1565
1566 if (!ti->type->preresume)
1567 continue;
1568
1569 r = ti->type->preresume(ti);
1570 if (r)
1571 return r;
1572 }
1573
1574 for (i = 0; i < t->num_targets; i++) {
1575 struct dm_target *ti = t->targets + i;
1576
1577 if (ti->type->resume)
1578 ti->type->resume(ti);
1579 }
1580
1581 return 0;
1582}
1583
1584void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1585{
1586 list_add(&cb->list, &t->target_callbacks);
1587}
1588EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1589
1590int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1591{
1592 struct dm_dev_internal *dd;
1593 struct list_head *devices = dm_table_get_devices(t);
1594 struct dm_target_callbacks *cb;
1595 int r = 0;
1596
1597 list_for_each_entry(dd, devices, list) {
1598 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1599 char b[BDEVNAME_SIZE];
1600
1601 if (likely(q))
1602 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1603 else
1604 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1605 dm_device_name(t->md),
1606 bdevname(dd->dm_dev.bdev, b));
1607 }
1608
1609 list_for_each_entry(cb, &t->target_callbacks, list)
1610 if (cb->congested_fn)
1611 r |= cb->congested_fn(cb, bdi_bits);
1612
1613 return r;
1614}
1615
1616int dm_table_any_busy_target(struct dm_table *t)
1617{
1618 unsigned i;
1619 struct dm_target *ti;
1620
1621 for (i = 0; i < t->num_targets; i++) {
1622 ti = t->targets + i;
1623 if (ti->type->busy && ti->type->busy(ti))
1624 return 1;
1625 }
1626
1627 return 0;
1628}
1629
1630struct mapped_device *dm_table_get_md(struct dm_table *t)
1631{
1632 return t->md;
1633}
1634EXPORT_SYMBOL(dm_table_get_md);
1635
1636static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1637 sector_t start, sector_t len, void *data)
1638{
1639 struct request_queue *q = bdev_get_queue(dev->bdev);
1640
1641 return q && blk_queue_discard(q);
1642}
1643
1644bool dm_table_supports_discards(struct dm_table *t)
1645{
1646 struct dm_target *ti;
1647 unsigned i = 0;
1648
1649
1650
1651
1652
1653
1654
1655
1656 while (i < dm_table_get_num_targets(t)) {
1657 ti = dm_table_get_target(t, i++);
1658
1659 if (!ti->num_discard_bios)
1660 continue;
1661
1662 if (ti->discards_supported)
1663 return 1;
1664
1665 if (ti->type->iterate_devices &&
1666 ti->type->iterate_devices(ti, device_discard_capable, NULL))
1667 return 1;
1668 }
1669
1670 return 0;
1671}
1672