1
2
3
4
5
6
7
8#include "dm-core.h"
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/blkdev.h>
13#include <linux/namei.h>
14#include <linux/ctype.h>
15#include <linux/string.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/mutex.h>
19#include <linux/delay.h>
20#include <linux/atomic.h>
21#include <linux/blk-mq.h>
22#include <linux/mount.h>
23#include <linux/dax.h>
24
25#define DM_MSG_PREFIX "table"
26
27#define MAX_DEPTH 16
28#define NODE_SIZE L1_CACHE_BYTES
29#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
30#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
31
32struct dm_table {
33 struct mapped_device *md;
34 enum dm_queue_mode type;
35
36
37 unsigned int depth;
38 unsigned int counts[MAX_DEPTH];
39 sector_t *index[MAX_DEPTH];
40
41 unsigned int num_targets;
42 unsigned int num_allocated;
43 sector_t *highs;
44 struct dm_target *targets;
45
46 struct target_type *immutable_target_type;
47
48 bool integrity_supported:1;
49 bool singleton:1;
50 unsigned integrity_added:1;
51
52
53
54
55
56
57 fmode_t mode;
58
59
60 struct list_head devices;
61
62
63 void (*event_fn)(void *);
64 void *event_context;
65
66 struct dm_md_mempools *mempools;
67
68 struct list_head target_callbacks;
69};
70
71
72
73
74static unsigned int int_log(unsigned int n, unsigned int base)
75{
76 int result = 0;
77
78 while (n > 1) {
79 n = dm_div_up(n, base);
80 result++;
81 }
82
83 return result;
84}
85
86
87
88
89static inline unsigned int get_child(unsigned int n, unsigned int k)
90{
91 return (n * CHILDREN_PER_NODE) + k;
92}
93
94
95
96
97static inline sector_t *get_node(struct dm_table *t,
98 unsigned int l, unsigned int n)
99{
100 return t->index[l] + (n * KEYS_PER_NODE);
101}
102
103
104
105
106
107static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
108{
109 for (; l < t->depth - 1; l++)
110 n = get_child(n, CHILDREN_PER_NODE - 1);
111
112 if (n >= t->counts[l])
113 return (sector_t) - 1;
114
115 return get_node(t, l, n)[KEYS_PER_NODE - 1];
116}
117
118
119
120
121
122static int setup_btree_index(unsigned int l, struct dm_table *t)
123{
124 unsigned int n, k;
125 sector_t *node;
126
127 for (n = 0U; n < t->counts[l]; n++) {
128 node = get_node(t, l, n);
129
130 for (k = 0U; k < KEYS_PER_NODE; k++)
131 node[k] = high(t, l + 1, get_child(n, k));
132 }
133
134 return 0;
135}
136
137void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
138{
139 unsigned long size;
140 void *addr;
141
142
143
144
145 if (nmemb > (ULONG_MAX / elem_size))
146 return NULL;
147
148 size = nmemb * elem_size;
149 addr = vzalloc(size);
150
151 return addr;
152}
153EXPORT_SYMBOL(dm_vcalloc);
154
155
156
157
158
159static int alloc_targets(struct dm_table *t, unsigned int num)
160{
161 sector_t *n_highs;
162 struct dm_target *n_targets;
163
164
165
166
167
168
169 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
170 sizeof(sector_t));
171 if (!n_highs)
172 return -ENOMEM;
173
174 n_targets = (struct dm_target *) (n_highs + num);
175
176 memset(n_highs, -1, sizeof(*n_highs) * num);
177 vfree(t->highs);
178
179 t->num_allocated = num;
180 t->highs = n_highs;
181 t->targets = n_targets;
182
183 return 0;
184}
185
186int dm_table_create(struct dm_table **result, fmode_t mode,
187 unsigned num_targets, struct mapped_device *md)
188{
189 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
190
191 if (!t)
192 return -ENOMEM;
193
194 INIT_LIST_HEAD(&t->devices);
195 INIT_LIST_HEAD(&t->target_callbacks);
196
197 if (!num_targets)
198 num_targets = KEYS_PER_NODE;
199
200 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
201
202 if (!num_targets) {
203 kfree(t);
204 return -ENOMEM;
205 }
206
207 if (alloc_targets(t, num_targets)) {
208 kfree(t);
209 return -ENOMEM;
210 }
211
212 t->type = DM_TYPE_NONE;
213 t->mode = mode;
214 t->md = md;
215 *result = t;
216 return 0;
217}
218
219static void free_devices(struct list_head *devices, struct mapped_device *md)
220{
221 struct list_head *tmp, *next;
222
223 list_for_each_safe(tmp, next, devices) {
224 struct dm_dev_internal *dd =
225 list_entry(tmp, struct dm_dev_internal, list);
226 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
227 dm_device_name(md), dd->dm_dev->name);
228 dm_put_table_device(md, dd->dm_dev);
229 kfree(dd);
230 }
231}
232
233void dm_table_destroy(struct dm_table *t)
234{
235 unsigned int i;
236
237 if (!t)
238 return;
239
240
241 if (t->depth >= 2)
242 vfree(t->index[t->depth - 2]);
243
244
245 for (i = 0; i < t->num_targets; i++) {
246 struct dm_target *tgt = t->targets + i;
247
248 if (tgt->type->dtr)
249 tgt->type->dtr(tgt);
250
251 dm_put_target_type(tgt->type);
252 }
253
254 vfree(t->highs);
255
256
257 free_devices(&t->devices, t->md);
258
259 dm_free_md_mempools(t->mempools);
260
261 kfree(t);
262}
263
264
265
266
267static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
268{
269 struct dm_dev_internal *dd;
270
271 list_for_each_entry (dd, l, list)
272 if (dd->dm_dev->bdev->bd_dev == dev)
273 return dd;
274
275 return NULL;
276}
277
278
279
280
281static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
282 sector_t start, sector_t len, void *data)
283{
284 struct request_queue *q;
285 struct queue_limits *limits = data;
286 struct block_device *bdev = dev->bdev;
287 sector_t dev_size =
288 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
289 unsigned short logical_block_size_sectors =
290 limits->logical_block_size >> SECTOR_SHIFT;
291 char b[BDEVNAME_SIZE];
292
293
294
295
296
297
298 q = bdev_get_queue(bdev);
299 if (!q || !q->make_request_fn) {
300 DMWARN("%s: %s is not yet initialised: "
301 "start=%llu, len=%llu, dev_size=%llu",
302 dm_device_name(ti->table->md), bdevname(bdev, b),
303 (unsigned long long)start,
304 (unsigned long long)len,
305 (unsigned long long)dev_size);
306 return 1;
307 }
308
309 if (!dev_size)
310 return 0;
311
312 if ((start >= dev_size) || (start + len > dev_size)) {
313 DMWARN("%s: %s too small for target: "
314 "start=%llu, len=%llu, dev_size=%llu",
315 dm_device_name(ti->table->md), bdevname(bdev, b),
316 (unsigned long long)start,
317 (unsigned long long)len,
318 (unsigned long long)dev_size);
319 return 1;
320 }
321
322
323
324
325
326 if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
327 unsigned int zone_sectors = bdev_zone_sectors(bdev);
328
329 if (start & (zone_sectors - 1)) {
330 DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
331 dm_device_name(ti->table->md),
332 (unsigned long long)start,
333 zone_sectors, bdevname(bdev, b));
334 return 1;
335 }
336
337
338
339
340
341
342
343
344
345
346 if (len & (zone_sectors - 1)) {
347 DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
348 dm_device_name(ti->table->md),
349 (unsigned long long)len,
350 zone_sectors, bdevname(bdev, b));
351 return 1;
352 }
353 }
354
355 if (logical_block_size_sectors <= 1)
356 return 0;
357
358 if (start & (logical_block_size_sectors - 1)) {
359 DMWARN("%s: start=%llu not aligned to h/w "
360 "logical block size %u of %s",
361 dm_device_name(ti->table->md),
362 (unsigned long long)start,
363 limits->logical_block_size, bdevname(bdev, b));
364 return 1;
365 }
366
367 if (len & (logical_block_size_sectors - 1)) {
368 DMWARN("%s: len=%llu not aligned to h/w "
369 "logical block size %u of %s",
370 dm_device_name(ti->table->md),
371 (unsigned long long)len,
372 limits->logical_block_size, bdevname(bdev, b));
373 return 1;
374 }
375
376 return 0;
377}
378
379
380
381
382
383
384
385static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
386 struct mapped_device *md)
387{
388 int r;
389 struct dm_dev *old_dev, *new_dev;
390
391 old_dev = dd->dm_dev;
392
393 r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
394 dd->dm_dev->mode | new_mode, &new_dev);
395 if (r)
396 return r;
397
398 dd->dm_dev = new_dev;
399 dm_put_table_device(md, old_dev);
400
401 return 0;
402}
403
404
405
406
407dev_t dm_get_dev_t(const char *path)
408{
409 dev_t dev;
410 struct block_device *bdev;
411
412 bdev = lookup_bdev(path);
413 if (IS_ERR(bdev))
414 dev = name_to_dev_t(path);
415 else {
416 dev = bdev->bd_dev;
417 bdput(bdev);
418 }
419
420 return dev;
421}
422EXPORT_SYMBOL_GPL(dm_get_dev_t);
423
424
425
426
427
428int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
429 struct dm_dev **result)
430{
431 int r;
432 dev_t dev;
433 struct dm_dev_internal *dd;
434 struct dm_table *t = ti->table;
435
436 BUG_ON(!t);
437
438 dev = dm_get_dev_t(path);
439 if (!dev)
440 return -ENODEV;
441
442 dd = find_device(&t->devices, dev);
443 if (!dd) {
444 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
445 if (!dd)
446 return -ENOMEM;
447
448 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
449 kfree(dd);
450 return r;
451 }
452
453 refcount_set(&dd->count, 1);
454 list_add(&dd->list, &t->devices);
455 goto out;
456
457 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
458 r = upgrade_mode(dd, mode, t->md);
459 if (r)
460 return r;
461 }
462 refcount_inc(&dd->count);
463out:
464 *result = dd->dm_dev;
465 return 0;
466}
467EXPORT_SYMBOL(dm_get_device);
468
469static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
470 sector_t start, sector_t len, void *data)
471{
472 struct queue_limits *limits = data;
473 struct block_device *bdev = dev->bdev;
474 struct request_queue *q = bdev_get_queue(bdev);
475 char b[BDEVNAME_SIZE];
476
477 if (unlikely(!q)) {
478 DMWARN("%s: Cannot set limits for nonexistent device %s",
479 dm_device_name(ti->table->md), bdevname(bdev, b));
480 return 0;
481 }
482
483 if (bdev_stack_limits(limits, bdev, start) < 0)
484 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
485 "physical_block_size=%u, logical_block_size=%u, "
486 "alignment_offset=%u, start=%llu",
487 dm_device_name(ti->table->md), bdevname(bdev, b),
488 q->limits.physical_block_size,
489 q->limits.logical_block_size,
490 q->limits.alignment_offset,
491 (unsigned long long) start << SECTOR_SHIFT);
492
493 limits->zoned = blk_queue_zoned_model(q);
494
495 return 0;
496}
497
498
499
500
501void dm_put_device(struct dm_target *ti, struct dm_dev *d)
502{
503 int found = 0;
504 struct list_head *devices = &ti->table->devices;
505 struct dm_dev_internal *dd;
506
507 list_for_each_entry(dd, devices, list) {
508 if (dd->dm_dev == d) {
509 found = 1;
510 break;
511 }
512 }
513 if (!found) {
514 DMWARN("%s: device %s not in table devices list",
515 dm_device_name(ti->table->md), d->name);
516 return;
517 }
518 if (refcount_dec_and_test(&dd->count)) {
519 dm_put_table_device(ti->table->md, d);
520 list_del(&dd->list);
521 kfree(dd);
522 }
523}
524EXPORT_SYMBOL(dm_put_device);
525
526
527
528
529static int adjoin(struct dm_table *table, struct dm_target *ti)
530{
531 struct dm_target *prev;
532
533 if (!table->num_targets)
534 return !ti->begin;
535
536 prev = &table->targets[table->num_targets - 1];
537 return (ti->begin == (prev->begin + prev->len));
538}
539
540
541
542
543
544
545
546
547
548
549
550static char **realloc_argv(unsigned *size, char **old_argv)
551{
552 char **argv;
553 unsigned new_size;
554 gfp_t gfp;
555
556 if (*size) {
557 new_size = *size * 2;
558 gfp = GFP_KERNEL;
559 } else {
560 new_size = 8;
561 gfp = GFP_NOIO;
562 }
563 argv = kmalloc_array(new_size, sizeof(*argv), gfp);
564 if (argv && old_argv) {
565 memcpy(argv, old_argv, *size * sizeof(*argv));
566 *size = new_size;
567 }
568
569 kfree(old_argv);
570 return argv;
571}
572
573
574
575
576int dm_split_args(int *argc, char ***argvp, char *input)
577{
578 char *start, *end = input, *out, **argv = NULL;
579 unsigned array_size = 0;
580
581 *argc = 0;
582
583 if (!input) {
584 *argvp = NULL;
585 return 0;
586 }
587
588 argv = realloc_argv(&array_size, argv);
589 if (!argv)
590 return -ENOMEM;
591
592 while (1) {
593
594 start = skip_spaces(end);
595
596 if (!*start)
597 break;
598
599
600 end = out = start;
601 while (*end) {
602
603 if (*end == '\\' && *(end + 1)) {
604 *out++ = *(end + 1);
605 end += 2;
606 continue;
607 }
608
609 if (isspace(*end))
610 break;
611
612 *out++ = *end++;
613 }
614
615
616 if ((*argc + 1) > array_size) {
617 argv = realloc_argv(&array_size, argv);
618 if (!argv)
619 return -ENOMEM;
620 }
621
622
623 if (*end)
624 end++;
625
626
627 *out = '\0';
628 argv[*argc] = start;
629 (*argc)++;
630 }
631
632 *argvp = argv;
633 return 0;
634}
635
636
637
638
639
640
641
642
643static int validate_hardware_logical_block_alignment(struct dm_table *table,
644 struct queue_limits *limits)
645{
646
647
648
649
650 unsigned short device_logical_block_size_sects =
651 limits->logical_block_size >> SECTOR_SHIFT;
652
653
654
655
656 unsigned short next_target_start = 0;
657
658
659
660
661
662 unsigned short remaining = 0;
663
664 struct dm_target *uninitialized_var(ti);
665 struct queue_limits ti_limits;
666 unsigned i;
667
668
669
670
671 for (i = 0; i < dm_table_get_num_targets(table); i++) {
672 ti = dm_table_get_target(table, i);
673
674 blk_set_stacking_limits(&ti_limits);
675
676
677 if (ti->type->iterate_devices)
678 ti->type->iterate_devices(ti, dm_set_device_limits,
679 &ti_limits);
680
681
682
683
684
685 if (remaining < ti->len &&
686 remaining & ((ti_limits.logical_block_size >>
687 SECTOR_SHIFT) - 1))
688 break;
689
690 next_target_start =
691 (unsigned short) ((next_target_start + ti->len) &
692 (device_logical_block_size_sects - 1));
693 remaining = next_target_start ?
694 device_logical_block_size_sects - next_target_start : 0;
695 }
696
697 if (remaining) {
698 DMWARN("%s: table line %u (start sect %llu len %llu) "
699 "not aligned to h/w logical block size %u",
700 dm_device_name(table->md), i,
701 (unsigned long long) ti->begin,
702 (unsigned long long) ti->len,
703 limits->logical_block_size);
704 return -EINVAL;
705 }
706
707 return 0;
708}
709
710int dm_table_add_target(struct dm_table *t, const char *type,
711 sector_t start, sector_t len, char *params)
712{
713 int r = -EINVAL, argc;
714 char **argv;
715 struct dm_target *tgt;
716
717 if (t->singleton) {
718 DMERR("%s: target type %s must appear alone in table",
719 dm_device_name(t->md), t->targets->type->name);
720 return -EINVAL;
721 }
722
723 BUG_ON(t->num_targets >= t->num_allocated);
724
725 tgt = t->targets + t->num_targets;
726 memset(tgt, 0, sizeof(*tgt));
727
728 if (!len) {
729 DMERR("%s: zero-length target", dm_device_name(t->md));
730 return -EINVAL;
731 }
732
733 tgt->type = dm_get_target_type(type);
734 if (!tgt->type) {
735 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
736 return -EINVAL;
737 }
738
739 if (dm_target_needs_singleton(tgt->type)) {
740 if (t->num_targets) {
741 tgt->error = "singleton target type must appear alone in table";
742 goto bad;
743 }
744 t->singleton = true;
745 }
746
747 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
748 tgt->error = "target type may not be included in a read-only table";
749 goto bad;
750 }
751
752 if (t->immutable_target_type) {
753 if (t->immutable_target_type != tgt->type) {
754 tgt->error = "immutable target type cannot be mixed with other target types";
755 goto bad;
756 }
757 } else if (dm_target_is_immutable(tgt->type)) {
758 if (t->num_targets) {
759 tgt->error = "immutable target type cannot be mixed with other target types";
760 goto bad;
761 }
762 t->immutable_target_type = tgt->type;
763 }
764
765 if (dm_target_has_integrity(tgt->type))
766 t->integrity_added = 1;
767
768 tgt->table = t;
769 tgt->begin = start;
770 tgt->len = len;
771 tgt->error = "Unknown error";
772
773
774
775
776 if (!adjoin(t, tgt)) {
777 tgt->error = "Gap in table";
778 goto bad;
779 }
780
781 r = dm_split_args(&argc, &argv, params);
782 if (r) {
783 tgt->error = "couldn't split parameters (insufficient memory)";
784 goto bad;
785 }
786
787 r = tgt->type->ctr(tgt, argc, argv);
788 kfree(argv);
789 if (r)
790 goto bad;
791
792 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
793
794 if (!tgt->num_discard_bios && tgt->discards_supported)
795 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
796 dm_device_name(t->md), type);
797
798 return 0;
799
800 bad:
801 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
802 dm_put_target_type(tgt->type);
803 return r;
804}
805
806
807
808
809static int validate_next_arg(const struct dm_arg *arg,
810 struct dm_arg_set *arg_set,
811 unsigned *value, char **error, unsigned grouped)
812{
813 const char *arg_str = dm_shift_arg(arg_set);
814 char dummy;
815
816 if (!arg_str ||
817 (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
818 (*value < arg->min) ||
819 (*value > arg->max) ||
820 (grouped && arg_set->argc < *value)) {
821 *error = arg->error;
822 return -EINVAL;
823 }
824
825 return 0;
826}
827
828int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
829 unsigned *value, char **error)
830{
831 return validate_next_arg(arg, arg_set, value, error, 0);
832}
833EXPORT_SYMBOL(dm_read_arg);
834
835int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
836 unsigned *value, char **error)
837{
838 return validate_next_arg(arg, arg_set, value, error, 1);
839}
840EXPORT_SYMBOL(dm_read_arg_group);
841
842const char *dm_shift_arg(struct dm_arg_set *as)
843{
844 char *r;
845
846 if (as->argc) {
847 as->argc--;
848 r = *as->argv;
849 as->argv++;
850 return r;
851 }
852
853 return NULL;
854}
855EXPORT_SYMBOL(dm_shift_arg);
856
857void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
858{
859 BUG_ON(as->argc < num_args);
860 as->argc -= num_args;
861 as->argv += num_args;
862}
863EXPORT_SYMBOL(dm_consume_args);
864
865static bool __table_type_bio_based(enum dm_queue_mode table_type)
866{
867 return (table_type == DM_TYPE_BIO_BASED ||
868 table_type == DM_TYPE_DAX_BIO_BASED ||
869 table_type == DM_TYPE_NVME_BIO_BASED);
870}
871
872static bool __table_type_request_based(enum dm_queue_mode table_type)
873{
874 return table_type == DM_TYPE_REQUEST_BASED;
875}
876
877void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
878{
879 t->type = type;
880}
881EXPORT_SYMBOL_GPL(dm_table_set_type);
882
883
884static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
885 sector_t start, sector_t len, void *data)
886{
887 int blocksize = *(int *) data;
888
889 return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
890 start, len);
891}
892
893bool dm_table_supports_dax(struct dm_table *t, int blocksize)
894{
895 struct dm_target *ti;
896 unsigned i;
897
898
899 for (i = 0; i < dm_table_get_num_targets(t); i++) {
900 ti = dm_table_get_target(t, i);
901
902 if (!ti->type->direct_access)
903 return false;
904
905 if (!ti->type->iterate_devices ||
906 !ti->type->iterate_devices(ti, device_supports_dax,
907 &blocksize))
908 return false;
909 }
910
911 return true;
912}
913
914static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
915
916struct verify_rq_based_data {
917 unsigned sq_count;
918 unsigned mq_count;
919};
920
921static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
922 sector_t start, sector_t len, void *data)
923{
924 struct request_queue *q = bdev_get_queue(dev->bdev);
925 struct verify_rq_based_data *v = data;
926
927 if (queue_is_mq(q))
928 v->mq_count++;
929 else
930 v->sq_count++;
931
932 return queue_is_mq(q);
933}
934
935static int dm_table_determine_type(struct dm_table *t)
936{
937 unsigned i;
938 unsigned bio_based = 0, request_based = 0, hybrid = 0;
939 struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
940 struct dm_target *tgt;
941 struct list_head *devices = dm_table_get_devices(t);
942 enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
943
944 if (t->type != DM_TYPE_NONE) {
945
946 if (t->type == DM_TYPE_BIO_BASED) {
947
948 goto verify_bio_based;
949 }
950 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
951 BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
952 goto verify_rq_based;
953 }
954
955 for (i = 0; i < t->num_targets; i++) {
956 tgt = t->targets + i;
957 if (dm_target_hybrid(tgt))
958 hybrid = 1;
959 else if (dm_target_request_based(tgt))
960 request_based = 1;
961 else
962 bio_based = 1;
963
964 if (bio_based && request_based) {
965 DMERR("Inconsistent table: different target types"
966 " can't be mixed up");
967 return -EINVAL;
968 }
969 }
970
971 if (hybrid && !bio_based && !request_based) {
972
973
974
975
976
977 if (__table_type_request_based(live_md_type))
978 request_based = 1;
979 else
980 bio_based = 1;
981 }
982
983 if (bio_based) {
984verify_bio_based:
985
986 t->type = DM_TYPE_BIO_BASED;
987 if (dm_table_supports_dax(t, PAGE_SIZE) ||
988 (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
989 t->type = DM_TYPE_DAX_BIO_BASED;
990 } else {
991
992 tgt = dm_table_get_immutable_target(t);
993 if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
994 t->type = DM_TYPE_NVME_BIO_BASED;
995 goto verify_rq_based;
996 } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
997 t->type = DM_TYPE_NVME_BIO_BASED;
998 }
999 }
1000 return 0;
1001 }
1002
1003 BUG_ON(!request_based);
1004
1005 t->type = DM_TYPE_REQUEST_BASED;
1006
1007verify_rq_based:
1008
1009
1010
1011
1012
1013
1014 if (t->num_targets > 1) {
1015 DMERR("%s DM doesn't support multiple targets",
1016 t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
1017 return -EINVAL;
1018 }
1019
1020 if (list_empty(devices)) {
1021 int srcu_idx;
1022 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
1023
1024
1025 if (live_table)
1026 t->type = live_table->type;
1027 dm_put_live_table(t->md, srcu_idx);
1028 return 0;
1029 }
1030
1031 tgt = dm_table_get_immutable_target(t);
1032 if (!tgt) {
1033 DMERR("table load rejected: immutable target is required");
1034 return -EINVAL;
1035 } else if (tgt->max_io_len) {
1036 DMERR("table load rejected: immutable target that splits IO is not supported");
1037 return -EINVAL;
1038 }
1039
1040
1041 if (!tgt->type->iterate_devices ||
1042 !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
1043 DMERR("table load rejected: including non-request-stackable devices");
1044 return -EINVAL;
1045 }
1046 if (v.sq_count > 0) {
1047 DMERR("table load rejected: not all devices are blk-mq request-stackable");
1048 return -EINVAL;
1049 }
1050
1051 return 0;
1052}
1053
1054enum dm_queue_mode dm_table_get_type(struct dm_table *t)
1055{
1056 return t->type;
1057}
1058
1059struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
1060{
1061 return t->immutable_target_type;
1062}
1063
1064struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
1065{
1066
1067 if (t->num_targets > 1 ||
1068 !dm_target_is_immutable(t->targets[0].type))
1069 return NULL;
1070
1071 return t->targets;
1072}
1073
1074struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
1075{
1076 struct dm_target *ti;
1077 unsigned i;
1078
1079 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1080 ti = dm_table_get_target(t, i);
1081 if (dm_target_is_wildcard(ti->type))
1082 return ti;
1083 }
1084
1085 return NULL;
1086}
1087
1088bool dm_table_bio_based(struct dm_table *t)
1089{
1090 return __table_type_bio_based(dm_table_get_type(t));
1091}
1092
1093bool dm_table_request_based(struct dm_table *t)
1094{
1095 return __table_type_request_based(dm_table_get_type(t));
1096}
1097
1098static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1099{
1100 enum dm_queue_mode type = dm_table_get_type(t);
1101 unsigned per_io_data_size = 0;
1102 unsigned min_pool_size = 0;
1103 struct dm_target *ti;
1104 unsigned i;
1105
1106 if (unlikely(type == DM_TYPE_NONE)) {
1107 DMWARN("no table type is set, can't allocate mempools");
1108 return -EINVAL;
1109 }
1110
1111 if (__table_type_bio_based(type))
1112 for (i = 0; i < t->num_targets; i++) {
1113 ti = t->targets + i;
1114 per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1115 min_pool_size = max(min_pool_size, ti->num_flush_bios);
1116 }
1117
1118 t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
1119 per_io_data_size, min_pool_size);
1120 if (!t->mempools)
1121 return -ENOMEM;
1122
1123 return 0;
1124}
1125
1126void dm_table_free_md_mempools(struct dm_table *t)
1127{
1128 dm_free_md_mempools(t->mempools);
1129 t->mempools = NULL;
1130}
1131
1132struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1133{
1134 return t->mempools;
1135}
1136
1137static int setup_indexes(struct dm_table *t)
1138{
1139 int i;
1140 unsigned int total = 0;
1141 sector_t *indexes;
1142
1143
1144 for (i = t->depth - 2; i >= 0; i--) {
1145 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1146 total += t->counts[i];
1147 }
1148
1149 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1150 if (!indexes)
1151 return -ENOMEM;
1152
1153
1154 for (i = t->depth - 2; i >= 0; i--) {
1155 t->index[i] = indexes;
1156 indexes += (KEYS_PER_NODE * t->counts[i]);
1157 setup_btree_index(i, t);
1158 }
1159
1160 return 0;
1161}
1162
1163
1164
1165
1166static int dm_table_build_index(struct dm_table *t)
1167{
1168 int r = 0;
1169 unsigned int leaf_nodes;
1170
1171
1172 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1173 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1174
1175
1176 t->counts[t->depth - 1] = leaf_nodes;
1177 t->index[t->depth - 1] = t->highs;
1178
1179 if (t->depth >= 2)
1180 r = setup_indexes(t);
1181
1182 return r;
1183}
1184
1185static bool integrity_profile_exists(struct gendisk *disk)
1186{
1187 return !!blk_get_integrity(disk);
1188}
1189
1190
1191
1192
1193
1194static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1195{
1196 struct list_head *devices = dm_table_get_devices(t);
1197 struct dm_dev_internal *dd = NULL;
1198 struct gendisk *prev_disk = NULL, *template_disk = NULL;
1199 unsigned i;
1200
1201 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1202 struct dm_target *ti = dm_table_get_target(t, i);
1203 if (!dm_target_passes_integrity(ti->type))
1204 goto no_integrity;
1205 }
1206
1207 list_for_each_entry(dd, devices, list) {
1208 template_disk = dd->dm_dev->bdev->bd_disk;
1209 if (!integrity_profile_exists(template_disk))
1210 goto no_integrity;
1211 else if (prev_disk &&
1212 blk_integrity_compare(prev_disk, template_disk) < 0)
1213 goto no_integrity;
1214 prev_disk = template_disk;
1215 }
1216
1217 return template_disk;
1218
1219no_integrity:
1220 if (prev_disk)
1221 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1222 dm_device_name(t->md),
1223 prev_disk->disk_name,
1224 template_disk->disk_name);
1225 return NULL;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238static int dm_table_register_integrity(struct dm_table *t)
1239{
1240 struct mapped_device *md = t->md;
1241 struct gendisk *template_disk = NULL;
1242
1243
1244 if (t->integrity_added)
1245 return 0;
1246
1247 template_disk = dm_table_get_integrity_disk(t);
1248 if (!template_disk)
1249 return 0;
1250
1251 if (!integrity_profile_exists(dm_disk(md))) {
1252 t->integrity_supported = true;
1253
1254
1255
1256
1257 blk_integrity_register(dm_disk(md),
1258 blk_get_integrity(template_disk));
1259 return 0;
1260 }
1261
1262
1263
1264
1265
1266 if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1267 DMWARN("%s: conflict with existing integrity profile: "
1268 "%s profile mismatch",
1269 dm_device_name(t->md),
1270 template_disk->disk_name);
1271 return 1;
1272 }
1273
1274
1275 t->integrity_supported = true;
1276 return 0;
1277}
1278
1279
1280
1281
1282
1283int dm_table_complete(struct dm_table *t)
1284{
1285 int r;
1286
1287 r = dm_table_determine_type(t);
1288 if (r) {
1289 DMERR("unable to determine table type");
1290 return r;
1291 }
1292
1293 r = dm_table_build_index(t);
1294 if (r) {
1295 DMERR("unable to build btrees");
1296 return r;
1297 }
1298
1299 r = dm_table_register_integrity(t);
1300 if (r) {
1301 DMERR("could not register integrity profile.");
1302 return r;
1303 }
1304
1305 r = dm_table_alloc_md_mempools(t, t->md);
1306 if (r)
1307 DMERR("unable to allocate mempools");
1308
1309 return r;
1310}
1311
1312static DEFINE_MUTEX(_event_lock);
1313void dm_table_event_callback(struct dm_table *t,
1314 void (*fn)(void *), void *context)
1315{
1316 mutex_lock(&_event_lock);
1317 t->event_fn = fn;
1318 t->event_context = context;
1319 mutex_unlock(&_event_lock);
1320}
1321
1322void dm_table_event(struct dm_table *t)
1323{
1324
1325
1326
1327
1328 BUG_ON(in_interrupt());
1329
1330 mutex_lock(&_event_lock);
1331 if (t->event_fn)
1332 t->event_fn(t->event_context);
1333 mutex_unlock(&_event_lock);
1334}
1335EXPORT_SYMBOL(dm_table_event);
1336
1337sector_t dm_table_get_size(struct dm_table *t)
1338{
1339 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1340}
1341EXPORT_SYMBOL(dm_table_get_size);
1342
1343struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1344{
1345 if (index >= t->num_targets)
1346 return NULL;
1347
1348 return t->targets + index;
1349}
1350
1351
1352
1353
1354
1355
1356
1357struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1358{
1359 unsigned int l, n = 0, k = 0;
1360 sector_t *node;
1361
1362 for (l = 0; l < t->depth; l++) {
1363 n = get_child(n, k);
1364 node = get_node(t, l, n);
1365
1366 for (k = 0; k < KEYS_PER_NODE; k++)
1367 if (node[k] >= sector)
1368 break;
1369 }
1370
1371 return &t->targets[(KEYS_PER_NODE * n) + k];
1372}
1373
1374static int count_device(struct dm_target *ti, struct dm_dev *dev,
1375 sector_t start, sector_t len, void *data)
1376{
1377 unsigned *num_devices = data;
1378
1379 (*num_devices)++;
1380
1381 return 0;
1382}
1383
1384
1385
1386
1387
1388
1389
1390bool dm_table_has_no_data_devices(struct dm_table *table)
1391{
1392 struct dm_target *ti;
1393 unsigned i, num_devices;
1394
1395 for (i = 0; i < dm_table_get_num_targets(table); i++) {
1396 ti = dm_table_get_target(table, i);
1397
1398 if (!ti->type->iterate_devices)
1399 return false;
1400
1401 num_devices = 0;
1402 ti->type->iterate_devices(ti, count_device, &num_devices);
1403 if (num_devices)
1404 return false;
1405 }
1406
1407 return true;
1408}
1409
1410static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1411 sector_t start, sector_t len, void *data)
1412{
1413 struct request_queue *q = bdev_get_queue(dev->bdev);
1414 enum blk_zoned_model *zoned_model = data;
1415
1416 return q && blk_queue_zoned_model(q) == *zoned_model;
1417}
1418
1419static bool dm_table_supports_zoned_model(struct dm_table *t,
1420 enum blk_zoned_model zoned_model)
1421{
1422 struct dm_target *ti;
1423 unsigned i;
1424
1425 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1426 ti = dm_table_get_target(t, i);
1427
1428 if (zoned_model == BLK_ZONED_HM &&
1429 !dm_target_supports_zoned_hm(ti->type))
1430 return false;
1431
1432 if (!ti->type->iterate_devices ||
1433 !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
1434 return false;
1435 }
1436
1437 return true;
1438}
1439
1440static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1441 sector_t start, sector_t len, void *data)
1442{
1443 struct request_queue *q = bdev_get_queue(dev->bdev);
1444 unsigned int *zone_sectors = data;
1445
1446 return q && blk_queue_zone_sectors(q) == *zone_sectors;
1447}
1448
1449static bool dm_table_matches_zone_sectors(struct dm_table *t,
1450 unsigned int zone_sectors)
1451{
1452 struct dm_target *ti;
1453 unsigned i;
1454
1455 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1456 ti = dm_table_get_target(t, i);
1457
1458 if (!ti->type->iterate_devices ||
1459 !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
1460 return false;
1461 }
1462
1463 return true;
1464}
1465
1466static int validate_hardware_zoned_model(struct dm_table *table,
1467 enum blk_zoned_model zoned_model,
1468 unsigned int zone_sectors)
1469{
1470 if (zoned_model == BLK_ZONED_NONE)
1471 return 0;
1472
1473 if (!dm_table_supports_zoned_model(table, zoned_model)) {
1474 DMERR("%s: zoned model is not consistent across all devices",
1475 dm_device_name(table->md));
1476 return -EINVAL;
1477 }
1478
1479
1480 if (!zone_sectors || !is_power_of_2(zone_sectors))
1481 return -EINVAL;
1482
1483 if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
1484 DMERR("%s: zone sectors is not consistent across all devices",
1485 dm_device_name(table->md));
1486 return -EINVAL;
1487 }
1488
1489 return 0;
1490}
1491
1492
1493
1494
1495int dm_calculate_queue_limits(struct dm_table *table,
1496 struct queue_limits *limits)
1497{
1498 struct dm_target *ti;
1499 struct queue_limits ti_limits;
1500 unsigned i;
1501 enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1502 unsigned int zone_sectors = 0;
1503
1504 blk_set_stacking_limits(limits);
1505
1506 for (i = 0; i < dm_table_get_num_targets(table); i++) {
1507 blk_set_stacking_limits(&ti_limits);
1508
1509 ti = dm_table_get_target(table, i);
1510
1511 if (!ti->type->iterate_devices)
1512 goto combine_limits;
1513
1514
1515
1516
1517 ti->type->iterate_devices(ti, dm_set_device_limits,
1518 &ti_limits);
1519
1520 if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1521
1522
1523
1524
1525 zoned_model = ti_limits.zoned;
1526 zone_sectors = ti_limits.chunk_sectors;
1527 }
1528
1529
1530 if (ti->type->io_hints)
1531 ti->type->io_hints(ti, &ti_limits);
1532
1533
1534
1535
1536
1537 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1538 &ti_limits))
1539 return -EINVAL;
1540
1541combine_limits:
1542
1543
1544
1545
1546 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1547 DMWARN("%s: adding target device "
1548 "(start sect %llu len %llu) "
1549 "caused an alignment inconsistency",
1550 dm_device_name(table->md),
1551 (unsigned long long) ti->begin,
1552 (unsigned long long) ti->len);
1553
1554
1555
1556
1557
1558 if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1559
1560
1561
1562
1563
1564
1565
1566
1567 limits->zoned = ti_limits.zoned;
1568 }
1569 }
1570
1571
1572
1573
1574
1575
1576
1577
1578 if (limits->zoned != BLK_ZONED_NONE) {
1579
1580
1581
1582
1583 zoned_model = limits->zoned;
1584 zone_sectors = limits->chunk_sectors;
1585 }
1586 if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1587 return -EINVAL;
1588
1589 return validate_hardware_logical_block_alignment(table, limits);
1590}
1591
1592
1593
1594
1595
1596
1597static void dm_table_verify_integrity(struct dm_table *t)
1598{
1599 struct gendisk *template_disk = NULL;
1600
1601 if (t->integrity_added)
1602 return;
1603
1604 if (t->integrity_supported) {
1605
1606
1607
1608
1609 template_disk = dm_table_get_integrity_disk(t);
1610 if (template_disk &&
1611 blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1612 return;
1613 }
1614
1615 if (integrity_profile_exists(dm_disk(t->md))) {
1616 DMWARN("%s: unable to establish an integrity profile",
1617 dm_device_name(t->md));
1618 blk_integrity_unregister(dm_disk(t->md));
1619 }
1620}
1621
1622static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1623 sector_t start, sector_t len, void *data)
1624{
1625 unsigned long flush = (unsigned long) data;
1626 struct request_queue *q = bdev_get_queue(dev->bdev);
1627
1628 return q && (q->queue_flags & flush);
1629}
1630
1631static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1632{
1633 struct dm_target *ti;
1634 unsigned i;
1635
1636
1637
1638
1639
1640
1641
1642 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1643 ti = dm_table_get_target(t, i);
1644
1645 if (!ti->num_flush_bios)
1646 continue;
1647
1648 if (ti->flush_supported)
1649 return true;
1650
1651 if (ti->type->iterate_devices &&
1652 ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1653 return true;
1654 }
1655
1656 return false;
1657}
1658
1659static int device_dax_write_cache_enabled(struct dm_target *ti,
1660 struct dm_dev *dev, sector_t start,
1661 sector_t len, void *data)
1662{
1663 struct dax_device *dax_dev = dev->dax_dev;
1664
1665 if (!dax_dev)
1666 return false;
1667
1668 if (dax_write_cache_enabled(dax_dev))
1669 return true;
1670 return false;
1671}
1672
1673static int dm_table_supports_dax_write_cache(struct dm_table *t)
1674{
1675 struct dm_target *ti;
1676 unsigned i;
1677
1678 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1679 ti = dm_table_get_target(t, i);
1680
1681 if (ti->type->iterate_devices &&
1682 ti->type->iterate_devices(ti,
1683 device_dax_write_cache_enabled, NULL))
1684 return true;
1685 }
1686
1687 return false;
1688}
1689
1690static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1691 sector_t start, sector_t len, void *data)
1692{
1693 struct request_queue *q = bdev_get_queue(dev->bdev);
1694
1695 return q && blk_queue_nonrot(q);
1696}
1697
1698static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1699 sector_t start, sector_t len, void *data)
1700{
1701 struct request_queue *q = bdev_get_queue(dev->bdev);
1702
1703 return q && !blk_queue_add_random(q);
1704}
1705
1706static bool dm_table_all_devices_attribute(struct dm_table *t,
1707 iterate_devices_callout_fn func)
1708{
1709 struct dm_target *ti;
1710 unsigned i;
1711
1712 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1713 ti = dm_table_get_target(t, i);
1714
1715 if (!ti->type->iterate_devices ||
1716 !ti->type->iterate_devices(ti, func, NULL))
1717 return false;
1718 }
1719
1720 return true;
1721}
1722
1723static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
1724 sector_t start, sector_t len, void *data)
1725{
1726 char b[BDEVNAME_SIZE];
1727
1728
1729 return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
1730}
1731
1732static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
1733{
1734 return dm_table_all_devices_attribute(t, device_no_partial_completion);
1735}
1736
1737static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1738 sector_t start, sector_t len, void *data)
1739{
1740 struct request_queue *q = bdev_get_queue(dev->bdev);
1741
1742 return q && !q->limits.max_write_same_sectors;
1743}
1744
1745static bool dm_table_supports_write_same(struct dm_table *t)
1746{
1747 struct dm_target *ti;
1748 unsigned i;
1749
1750 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1751 ti = dm_table_get_target(t, i);
1752
1753 if (!ti->num_write_same_bios)
1754 return false;
1755
1756 if (!ti->type->iterate_devices ||
1757 ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1758 return false;
1759 }
1760
1761 return true;
1762}
1763
1764static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1765 sector_t start, sector_t len, void *data)
1766{
1767 struct request_queue *q = bdev_get_queue(dev->bdev);
1768
1769 return q && !q->limits.max_write_zeroes_sectors;
1770}
1771
1772static bool dm_table_supports_write_zeroes(struct dm_table *t)
1773{
1774 struct dm_target *ti;
1775 unsigned i = 0;
1776
1777 while (i < dm_table_get_num_targets(t)) {
1778 ti = dm_table_get_target(t, i++);
1779
1780 if (!ti->num_write_zeroes_bios)
1781 return false;
1782
1783 if (!ti->type->iterate_devices ||
1784 ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1785 return false;
1786 }
1787
1788 return true;
1789}
1790
1791static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1792 sector_t start, sector_t len, void *data)
1793{
1794 struct request_queue *q = bdev_get_queue(dev->bdev);
1795
1796 return q && !blk_queue_discard(q);
1797}
1798
1799static bool dm_table_supports_discards(struct dm_table *t)
1800{
1801 struct dm_target *ti;
1802 unsigned i;
1803
1804 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1805 ti = dm_table_get_target(t, i);
1806
1807 if (!ti->num_discard_bios)
1808 return false;
1809
1810
1811
1812
1813
1814
1815 if (!ti->discards_supported &&
1816 (!ti->type->iterate_devices ||
1817 ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1818 return false;
1819 }
1820
1821 return true;
1822}
1823
1824static int device_not_secure_erase_capable(struct dm_target *ti,
1825 struct dm_dev *dev, sector_t start,
1826 sector_t len, void *data)
1827{
1828 struct request_queue *q = bdev_get_queue(dev->bdev);
1829
1830 return q && !blk_queue_secure_erase(q);
1831}
1832
1833static bool dm_table_supports_secure_erase(struct dm_table *t)
1834{
1835 struct dm_target *ti;
1836 unsigned int i;
1837
1838 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1839 ti = dm_table_get_target(t, i);
1840
1841 if (!ti->num_secure_erase_bios)
1842 return false;
1843
1844 if (!ti->type->iterate_devices ||
1845 ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1846 return false;
1847 }
1848
1849 return true;
1850}
1851
1852void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1853 struct queue_limits *limits)
1854{
1855 bool wc = false, fua = false;
1856
1857
1858
1859
1860 q->limits = *limits;
1861
1862 if (!dm_table_supports_discards(t)) {
1863 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1864
1865 q->limits.max_discard_sectors = 0;
1866 q->limits.max_hw_discard_sectors = 0;
1867 q->limits.discard_granularity = 0;
1868 q->limits.discard_alignment = 0;
1869 q->limits.discard_misaligned = 0;
1870 } else
1871 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1872
1873 if (dm_table_supports_secure_erase(t))
1874 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
1875
1876 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1877 wc = true;
1878 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1879 fua = true;
1880 }
1881 blk_queue_write_cache(q, wc, fua);
1882
1883 if (dm_table_supports_dax(t, PAGE_SIZE))
1884 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1885 else
1886 blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
1887
1888 if (dm_table_supports_dax_write_cache(t))
1889 dax_write_cache(t->md->dax_dev, true);
1890
1891
1892 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1893 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
1894 else
1895 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
1896
1897 if (!dm_table_supports_write_same(t))
1898 q->limits.max_write_same_sectors = 0;
1899 if (!dm_table_supports_write_zeroes(t))
1900 q->limits.max_write_zeroes_sectors = 0;
1901
1902 dm_table_verify_integrity(t);
1903
1904
1905
1906
1907
1908
1909
1910 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1911 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
1912
1913
1914
1915
1916
1917
1918
1919
1920 if (blk_queue_is_zoned(q))
1921 blk_revalidate_disk_zones(t->md->disk);
1922
1923
1924 q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
1925}
1926
1927unsigned int dm_table_get_num_targets(struct dm_table *t)
1928{
1929 return t->num_targets;
1930}
1931
1932struct list_head *dm_table_get_devices(struct dm_table *t)
1933{
1934 return &t->devices;
1935}
1936
1937fmode_t dm_table_get_mode(struct dm_table *t)
1938{
1939 return t->mode;
1940}
1941EXPORT_SYMBOL(dm_table_get_mode);
1942
1943enum suspend_mode {
1944 PRESUSPEND,
1945 PRESUSPEND_UNDO,
1946 POSTSUSPEND,
1947};
1948
1949static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1950{
1951 int i = t->num_targets;
1952 struct dm_target *ti = t->targets;
1953
1954 lockdep_assert_held(&t->md->suspend_lock);
1955
1956 while (i--) {
1957 switch (mode) {
1958 case PRESUSPEND:
1959 if (ti->type->presuspend)
1960 ti->type->presuspend(ti);
1961 break;
1962 case PRESUSPEND_UNDO:
1963 if (ti->type->presuspend_undo)
1964 ti->type->presuspend_undo(ti);
1965 break;
1966 case POSTSUSPEND:
1967 if (ti->type->postsuspend)
1968 ti->type->postsuspend(ti);
1969 break;
1970 }
1971 ti++;
1972 }
1973}
1974
1975void dm_table_presuspend_targets(struct dm_table *t)
1976{
1977 if (!t)
1978 return;
1979
1980 suspend_targets(t, PRESUSPEND);
1981}
1982
1983void dm_table_presuspend_undo_targets(struct dm_table *t)
1984{
1985 if (!t)
1986 return;
1987
1988 suspend_targets(t, PRESUSPEND_UNDO);
1989}
1990
1991void dm_table_postsuspend_targets(struct dm_table *t)
1992{
1993 if (!t)
1994 return;
1995
1996 suspend_targets(t, POSTSUSPEND);
1997}
1998
1999int dm_table_resume_targets(struct dm_table *t)
2000{
2001 int i, r = 0;
2002
2003 lockdep_assert_held(&t->md->suspend_lock);
2004
2005 for (i = 0; i < t->num_targets; i++) {
2006 struct dm_target *ti = t->targets + i;
2007
2008 if (!ti->type->preresume)
2009 continue;
2010
2011 r = ti->type->preresume(ti);
2012 if (r) {
2013 DMERR("%s: %s: preresume failed, error = %d",
2014 dm_device_name(t->md), ti->type->name, r);
2015 return r;
2016 }
2017 }
2018
2019 for (i = 0; i < t->num_targets; i++) {
2020 struct dm_target *ti = t->targets + i;
2021
2022 if (ti->type->resume)
2023 ti->type->resume(ti);
2024 }
2025
2026 return 0;
2027}
2028
2029void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
2030{
2031 list_add(&cb->list, &t->target_callbacks);
2032}
2033EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
2034
2035int dm_table_any_congested(struct dm_table *t, int bdi_bits)
2036{
2037 struct dm_dev_internal *dd;
2038 struct list_head *devices = dm_table_get_devices(t);
2039 struct dm_target_callbacks *cb;
2040 int r = 0;
2041
2042 list_for_each_entry(dd, devices, list) {
2043 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
2044 char b[BDEVNAME_SIZE];
2045
2046 if (likely(q))
2047 r |= bdi_congested(q->backing_dev_info, bdi_bits);
2048 else
2049 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
2050 dm_device_name(t->md),
2051 bdevname(dd->dm_dev->bdev, b));
2052 }
2053
2054 list_for_each_entry(cb, &t->target_callbacks, list)
2055 if (cb->congested_fn)
2056 r |= cb->congested_fn(cb, bdi_bits);
2057
2058 return r;
2059}
2060
2061struct mapped_device *dm_table_get_md(struct dm_table *t)
2062{
2063 return t->md;
2064}
2065EXPORT_SYMBOL(dm_table_get_md);
2066
2067const char *dm_table_device_name(struct dm_table *t)
2068{
2069 return dm_device_name(t->md);
2070}
2071EXPORT_SYMBOL_GPL(dm_table_device_name);
2072
2073void dm_table_run_md_queue_async(struct dm_table *t)
2074{
2075 struct mapped_device *md;
2076 struct request_queue *queue;
2077
2078 if (!dm_table_request_based(t))
2079 return;
2080
2081 md = dm_table_get_md(t);
2082 queue = dm_get_md_queue(md);
2083 if (queue)
2084 blk_mq_run_hw_queues(queue, true);
2085}
2086EXPORT_SYMBOL(dm_table_run_md_queue_async);
2087
2088