linux/drivers/md/dm-table.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-core.h"
   9
  10#include <linux/module.h>
  11#include <linux/vmalloc.h>
  12#include <linux/blkdev.h>
  13#include <linux/namei.h>
  14#include <linux/ctype.h>
  15#include <linux/string.h>
  16#include <linux/slab.h>
  17#include <linux/interrupt.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/atomic.h>
  21#include <linux/blk-mq.h>
  22#include <linux/mount.h>
  23
  24#define DM_MSG_PREFIX "table"
  25
  26#define MAX_DEPTH 16
  27#define NODE_SIZE L1_CACHE_BYTES
  28#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  29#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  30
  31struct dm_table {
  32        struct mapped_device *md;
  33        unsigned type;
  34
  35        /* btree table */
  36        unsigned int depth;
  37        unsigned int counts[MAX_DEPTH]; /* in nodes */
  38        sector_t *index[MAX_DEPTH];
  39
  40        unsigned int num_targets;
  41        unsigned int num_allocated;
  42        sector_t *highs;
  43        struct dm_target *targets;
  44
  45        struct target_type *immutable_target_type;
  46
  47        bool integrity_supported:1;
  48        bool singleton:1;
  49        bool all_blk_mq:1;
  50
  51        /*
  52         * Indicates the rw permissions for the new logical
  53         * device.  This should be a combination of FMODE_READ
  54         * and FMODE_WRITE.
  55         */
  56        fmode_t mode;
  57
  58        /* a list of devices used by this table */
  59        struct list_head devices;
  60
  61        /* events get handed up using this callback */
  62        void (*event_fn)(void *);
  63        void *event_context;
  64
  65        struct dm_md_mempools *mempools;
  66
  67        struct list_head target_callbacks;
  68};
  69
  70/*
  71 * Similar to ceiling(log_size(n))
  72 */
  73static unsigned int int_log(unsigned int n, unsigned int base)
  74{
  75        int result = 0;
  76
  77        while (n > 1) {
  78                n = dm_div_up(n, base);
  79                result++;
  80        }
  81
  82        return result;
  83}
  84
  85/*
  86 * Calculate the index of the child node of the n'th node k'th key.
  87 */
  88static inline unsigned int get_child(unsigned int n, unsigned int k)
  89{
  90        return (n * CHILDREN_PER_NODE) + k;
  91}
  92
  93/*
  94 * Return the n'th node of level l from table t.
  95 */
  96static inline sector_t *get_node(struct dm_table *t,
  97                                 unsigned int l, unsigned int n)
  98{
  99        return t->index[l] + (n * KEYS_PER_NODE);
 100}
 101
 102/*
 103 * Return the highest key that you could lookup from the n'th
 104 * node on level l of the btree.
 105 */
 106static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
 107{
 108        for (; l < t->depth - 1; l++)
 109                n = get_child(n, CHILDREN_PER_NODE - 1);
 110
 111        if (n >= t->counts[l])
 112                return (sector_t) - 1;
 113
 114        return get_node(t, l, n)[KEYS_PER_NODE - 1];
 115}
 116
 117/*
 118 * Fills in a level of the btree based on the highs of the level
 119 * below it.
 120 */
 121static int setup_btree_index(unsigned int l, struct dm_table *t)
 122{
 123        unsigned int n, k;
 124        sector_t *node;
 125
 126        for (n = 0U; n < t->counts[l]; n++) {
 127                node = get_node(t, l, n);
 128
 129                for (k = 0U; k < KEYS_PER_NODE; k++)
 130                        node[k] = high(t, l + 1, get_child(n, k));
 131        }
 132
 133        return 0;
 134}
 135
 136void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
 137{
 138        unsigned long size;
 139        void *addr;
 140
 141        /*
 142         * Check that we're not going to overflow.
 143         */
 144        if (nmemb > (ULONG_MAX / elem_size))
 145                return NULL;
 146
 147        size = nmemb * elem_size;
 148        addr = vzalloc(size);
 149
 150        return addr;
 151}
 152EXPORT_SYMBOL(dm_vcalloc);
 153
 154/*
 155 * highs, and targets are managed as dynamic arrays during a
 156 * table load.
 157 */
 158static int alloc_targets(struct dm_table *t, unsigned int num)
 159{
 160        sector_t *n_highs;
 161        struct dm_target *n_targets;
 162
 163        /*
 164         * Allocate both the target array and offset array at once.
 165         * Append an empty entry to catch sectors beyond the end of
 166         * the device.
 167         */
 168        n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
 169                                          sizeof(sector_t));
 170        if (!n_highs)
 171                return -ENOMEM;
 172
 173        n_targets = (struct dm_target *) (n_highs + num);
 174
 175        memset(n_highs, -1, sizeof(*n_highs) * num);
 176        vfree(t->highs);
 177
 178        t->num_allocated = num;
 179        t->highs = n_highs;
 180        t->targets = n_targets;
 181
 182        return 0;
 183}
 184
 185int dm_table_create(struct dm_table **result, fmode_t mode,
 186                    unsigned num_targets, struct mapped_device *md)
 187{
 188        struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
 189
 190        if (!t)
 191                return -ENOMEM;
 192
 193        INIT_LIST_HEAD(&t->devices);
 194        INIT_LIST_HEAD(&t->target_callbacks);
 195
 196        if (!num_targets)
 197                num_targets = KEYS_PER_NODE;
 198
 199        num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
 200
 201        if (!num_targets) {
 202                kfree(t);
 203                return -ENOMEM;
 204        }
 205
 206        if (alloc_targets(t, num_targets)) {
 207                kfree(t);
 208                return -ENOMEM;
 209        }
 210
 211        t->type = DM_TYPE_NONE;
 212        t->mode = mode;
 213        t->md = md;
 214        *result = t;
 215        return 0;
 216}
 217
 218static void free_devices(struct list_head *devices, struct mapped_device *md)
 219{
 220        struct list_head *tmp, *next;
 221
 222        list_for_each_safe(tmp, next, devices) {
 223                struct dm_dev_internal *dd =
 224                    list_entry(tmp, struct dm_dev_internal, list);
 225                DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
 226                       dm_device_name(md), dd->dm_dev->name);
 227                dm_put_table_device(md, dd->dm_dev);
 228                kfree(dd);
 229        }
 230}
 231
 232void dm_table_destroy(struct dm_table *t)
 233{
 234        unsigned int i;
 235
 236        if (!t)
 237                return;
 238
 239        /* free the indexes */
 240        if (t->depth >= 2)
 241                vfree(t->index[t->depth - 2]);
 242
 243        /* free the targets */
 244        for (i = 0; i < t->num_targets; i++) {
 245                struct dm_target *tgt = t->targets + i;
 246
 247                if (tgt->type->dtr)
 248                        tgt->type->dtr(tgt);
 249
 250                dm_put_target_type(tgt->type);
 251        }
 252
 253        vfree(t->highs);
 254
 255        /* free the device list */
 256        free_devices(&t->devices, t->md);
 257
 258        dm_free_md_mempools(t->mempools);
 259
 260        kfree(t);
 261}
 262
 263/*
 264 * See if we've already got a device in the list.
 265 */
 266static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
 267{
 268        struct dm_dev_internal *dd;
 269
 270        list_for_each_entry (dd, l, list)
 271                if (dd->dm_dev->bdev->bd_dev == dev)
 272                        return dd;
 273
 274        return NULL;
 275}
 276
 277/*
 278 * If possible, this checks an area of a destination device is invalid.
 279 */
 280static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
 281                                  sector_t start, sector_t len, void *data)
 282{
 283        struct request_queue *q;
 284        struct queue_limits *limits = data;
 285        struct block_device *bdev = dev->bdev;
 286        sector_t dev_size =
 287                i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
 288        unsigned short logical_block_size_sectors =
 289                limits->logical_block_size >> SECTOR_SHIFT;
 290        char b[BDEVNAME_SIZE];
 291
 292        /*
 293         * Some devices exist without request functions,
 294         * such as loop devices not yet bound to backing files.
 295         * Forbid the use of such devices.
 296         */
 297        q = bdev_get_queue(bdev);
 298        if (!q || !q->make_request_fn) {
 299                DMWARN("%s: %s is not yet initialised: "
 300                       "start=%llu, len=%llu, dev_size=%llu",
 301                       dm_device_name(ti->table->md), bdevname(bdev, b),
 302                       (unsigned long long)start,
 303                       (unsigned long long)len,
 304                       (unsigned long long)dev_size);
 305                return 1;
 306        }
 307
 308        if (!dev_size)
 309                return 0;
 310
 311        if ((start >= dev_size) || (start + len > dev_size)) {
 312                DMWARN("%s: %s too small for target: "
 313                       "start=%llu, len=%llu, dev_size=%llu",
 314                       dm_device_name(ti->table->md), bdevname(bdev, b),
 315                       (unsigned long long)start,
 316                       (unsigned long long)len,
 317                       (unsigned long long)dev_size);
 318                return 1;
 319        }
 320
 321        if (logical_block_size_sectors <= 1)
 322                return 0;
 323
 324        if (start & (logical_block_size_sectors - 1)) {
 325                DMWARN("%s: start=%llu not aligned to h/w "
 326                       "logical block size %u of %s",
 327                       dm_device_name(ti->table->md),
 328                       (unsigned long long)start,
 329                       limits->logical_block_size, bdevname(bdev, b));
 330                return 1;
 331        }
 332
 333        if (len & (logical_block_size_sectors - 1)) {
 334                DMWARN("%s: len=%llu not aligned to h/w "
 335                       "logical block size %u of %s",
 336                       dm_device_name(ti->table->md),
 337                       (unsigned long long)len,
 338                       limits->logical_block_size, bdevname(bdev, b));
 339                return 1;
 340        }
 341
 342        return 0;
 343}
 344
 345/*
 346 * This upgrades the mode on an already open dm_dev, being
 347 * careful to leave things as they were if we fail to reopen the
 348 * device and not to touch the existing bdev field in case
 349 * it is accessed concurrently inside dm_table_any_congested().
 350 */
 351static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
 352                        struct mapped_device *md)
 353{
 354        int r;
 355        struct dm_dev *old_dev, *new_dev;
 356
 357        old_dev = dd->dm_dev;
 358
 359        r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
 360                                dd->dm_dev->mode | new_mode, &new_dev);
 361        if (r)
 362                return r;
 363
 364        dd->dm_dev = new_dev;
 365        dm_put_table_device(md, old_dev);
 366
 367        return 0;
 368}
 369
 370/*
 371 * Convert the path to a device
 372 */
 373dev_t dm_get_dev_t(const char *path)
 374{
 375        dev_t uninitialized_var(dev);
 376        struct block_device *bdev;
 377
 378        bdev = lookup_bdev(path);
 379        if (IS_ERR(bdev))
 380                dev = name_to_dev_t(path);
 381        else {
 382                dev = bdev->bd_dev;
 383                bdput(bdev);
 384        }
 385
 386        return dev;
 387}
 388EXPORT_SYMBOL_GPL(dm_get_dev_t);
 389
 390/*
 391 * Add a device to the list, or just increment the usage count if
 392 * it's already present.
 393 */
 394int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 395                  struct dm_dev **result)
 396{
 397        int r;
 398        dev_t dev;
 399        struct dm_dev_internal *dd;
 400        struct dm_table *t = ti->table;
 401
 402        BUG_ON(!t);
 403
 404        dev = dm_get_dev_t(path);
 405        if (!dev)
 406                return -ENODEV;
 407
 408        dd = find_device(&t->devices, dev);
 409        if (!dd) {
 410                dd = kmalloc(sizeof(*dd), GFP_KERNEL);
 411                if (!dd)
 412                        return -ENOMEM;
 413
 414                if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
 415                        kfree(dd);
 416                        return r;
 417                }
 418
 419                atomic_set(&dd->count, 0);
 420                list_add(&dd->list, &t->devices);
 421
 422        } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
 423                r = upgrade_mode(dd, mode, t->md);
 424                if (r)
 425                        return r;
 426        }
 427        atomic_inc(&dd->count);
 428
 429        *result = dd->dm_dev;
 430        return 0;
 431}
 432EXPORT_SYMBOL(dm_get_device);
 433
 434static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 435                                sector_t start, sector_t len, void *data)
 436{
 437        struct queue_limits *limits = data;
 438        struct block_device *bdev = dev->bdev;
 439        struct request_queue *q = bdev_get_queue(bdev);
 440        char b[BDEVNAME_SIZE];
 441
 442        if (unlikely(!q)) {
 443                DMWARN("%s: Cannot set limits for nonexistent device %s",
 444                       dm_device_name(ti->table->md), bdevname(bdev, b));
 445                return 0;
 446        }
 447
 448        if (bdev_stack_limits(limits, bdev, start) < 0)
 449                DMWARN("%s: adding target device %s caused an alignment inconsistency: "
 450                       "physical_block_size=%u, logical_block_size=%u, "
 451                       "alignment_offset=%u, start=%llu",
 452                       dm_device_name(ti->table->md), bdevname(bdev, b),
 453                       q->limits.physical_block_size,
 454                       q->limits.logical_block_size,
 455                       q->limits.alignment_offset,
 456                       (unsigned long long) start << SECTOR_SHIFT);
 457
 458        return 0;
 459}
 460
 461/*
 462 * Decrement a device's use count and remove it if necessary.
 463 */
 464void dm_put_device(struct dm_target *ti, struct dm_dev *d)
 465{
 466        int found = 0;
 467        struct list_head *devices = &ti->table->devices;
 468        struct dm_dev_internal *dd;
 469
 470        list_for_each_entry(dd, devices, list) {
 471                if (dd->dm_dev == d) {
 472                        found = 1;
 473                        break;
 474                }
 475        }
 476        if (!found) {
 477                DMWARN("%s: device %s not in table devices list",
 478                       dm_device_name(ti->table->md), d->name);
 479                return;
 480        }
 481        if (atomic_dec_and_test(&dd->count)) {
 482                dm_put_table_device(ti->table->md, d);
 483                list_del(&dd->list);
 484                kfree(dd);
 485        }
 486}
 487EXPORT_SYMBOL(dm_put_device);
 488
 489/*
 490 * Checks to see if the target joins onto the end of the table.
 491 */
 492static int adjoin(struct dm_table *table, struct dm_target *ti)
 493{
 494        struct dm_target *prev;
 495
 496        if (!table->num_targets)
 497                return !ti->begin;
 498
 499        prev = &table->targets[table->num_targets - 1];
 500        return (ti->begin == (prev->begin + prev->len));
 501}
 502
 503/*
 504 * Used to dynamically allocate the arg array.
 505 *
 506 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 507 * process messages even if some device is suspended. These messages have a
 508 * small fixed number of arguments.
 509 *
 510 * On the other hand, dm-switch needs to process bulk data using messages and
 511 * excessive use of GFP_NOIO could cause trouble.
 512 */
 513static char **realloc_argv(unsigned *array_size, char **old_argv)
 514{
 515        char **argv;
 516        unsigned new_size;
 517        gfp_t gfp;
 518
 519        if (*array_size) {
 520                new_size = *array_size * 2;
 521                gfp = GFP_KERNEL;
 522        } else {
 523                new_size = 8;
 524                gfp = GFP_NOIO;
 525        }
 526        argv = kmalloc(new_size * sizeof(*argv), gfp);
 527        if (argv) {
 528                memcpy(argv, old_argv, *array_size * sizeof(*argv));
 529                *array_size = new_size;
 530        }
 531
 532        kfree(old_argv);
 533        return argv;
 534}
 535
 536/*
 537 * Destructively splits up the argument list to pass to ctr.
 538 */
 539int dm_split_args(int *argc, char ***argvp, char *input)
 540{
 541        char *start, *end = input, *out, **argv = NULL;
 542        unsigned array_size = 0;
 543
 544        *argc = 0;
 545
 546        if (!input) {
 547                *argvp = NULL;
 548                return 0;
 549        }
 550
 551        argv = realloc_argv(&array_size, argv);
 552        if (!argv)
 553                return -ENOMEM;
 554
 555        while (1) {
 556                /* Skip whitespace */
 557                start = skip_spaces(end);
 558
 559                if (!*start)
 560                        break;  /* success, we hit the end */
 561
 562                /* 'out' is used to remove any back-quotes */
 563                end = out = start;
 564                while (*end) {
 565                        /* Everything apart from '\0' can be quoted */
 566                        if (*end == '\\' && *(end + 1)) {
 567                                *out++ = *(end + 1);
 568                                end += 2;
 569                                continue;
 570                        }
 571
 572                        if (isspace(*end))
 573                                break;  /* end of token */
 574
 575                        *out++ = *end++;
 576                }
 577
 578                /* have we already filled the array ? */
 579                if ((*argc + 1) > array_size) {
 580                        argv = realloc_argv(&array_size, argv);
 581                        if (!argv)
 582                                return -ENOMEM;
 583                }
 584
 585                /* we know this is whitespace */
 586                if (*end)
 587                        end++;
 588
 589                /* terminate the string and put it in the array */
 590                *out = '\0';
 591                argv[*argc] = start;
 592                (*argc)++;
 593        }
 594
 595        *argvp = argv;
 596        return 0;
 597}
 598
 599/*
 600 * Impose necessary and sufficient conditions on a devices's table such
 601 * that any incoming bio which respects its logical_block_size can be
 602 * processed successfully.  If it falls across the boundary between
 603 * two or more targets, the size of each piece it gets split into must
 604 * be compatible with the logical_block_size of the target processing it.
 605 */
 606static int validate_hardware_logical_block_alignment(struct dm_table *table,
 607                                                 struct queue_limits *limits)
 608{
 609        /*
 610         * This function uses arithmetic modulo the logical_block_size
 611         * (in units of 512-byte sectors).
 612         */
 613        unsigned short device_logical_block_size_sects =
 614                limits->logical_block_size >> SECTOR_SHIFT;
 615
 616        /*
 617         * Offset of the start of the next table entry, mod logical_block_size.
 618         */
 619        unsigned short next_target_start = 0;
 620
 621        /*
 622         * Given an aligned bio that extends beyond the end of a
 623         * target, how many sectors must the next target handle?
 624         */
 625        unsigned short remaining = 0;
 626
 627        struct dm_target *uninitialized_var(ti);
 628        struct queue_limits ti_limits;
 629        unsigned i = 0;
 630
 631        /*
 632         * Check each entry in the table in turn.
 633         */
 634        while (i < dm_table_get_num_targets(table)) {
 635                ti = dm_table_get_target(table, i++);
 636
 637                blk_set_stacking_limits(&ti_limits);
 638
 639                /* combine all target devices' limits */
 640                if (ti->type->iterate_devices)
 641                        ti->type->iterate_devices(ti, dm_set_device_limits,
 642                                                  &ti_limits);
 643
 644                /*
 645                 * If the remaining sectors fall entirely within this
 646                 * table entry are they compatible with its logical_block_size?
 647                 */
 648                if (remaining < ti->len &&
 649                    remaining & ((ti_limits.logical_block_size >>
 650                                  SECTOR_SHIFT) - 1))
 651                        break;  /* Error */
 652
 653                next_target_start =
 654                    (unsigned short) ((next_target_start + ti->len) &
 655                                      (device_logical_block_size_sects - 1));
 656                remaining = next_target_start ?
 657                    device_logical_block_size_sects - next_target_start : 0;
 658        }
 659
 660        if (remaining) {
 661                DMWARN("%s: table line %u (start sect %llu len %llu) "
 662                       "not aligned to h/w logical block size %u",
 663                       dm_device_name(table->md), i,
 664                       (unsigned long long) ti->begin,
 665                       (unsigned long long) ti->len,
 666                       limits->logical_block_size);
 667                return -EINVAL;
 668        }
 669
 670        return 0;
 671}
 672
 673int dm_table_add_target(struct dm_table *t, const char *type,
 674                        sector_t start, sector_t len, char *params)
 675{
 676        int r = -EINVAL, argc;
 677        char **argv;
 678        struct dm_target *tgt;
 679
 680        if (t->singleton) {
 681                DMERR("%s: target type %s must appear alone in table",
 682                      dm_device_name(t->md), t->targets->type->name);
 683                return -EINVAL;
 684        }
 685
 686        BUG_ON(t->num_targets >= t->num_allocated);
 687
 688        tgt = t->targets + t->num_targets;
 689        memset(tgt, 0, sizeof(*tgt));
 690
 691        if (!len) {
 692                DMERR("%s: zero-length target", dm_device_name(t->md));
 693                return -EINVAL;
 694        }
 695
 696        tgt->type = dm_get_target_type(type);
 697        if (!tgt->type) {
 698                DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
 699                return -EINVAL;
 700        }
 701
 702        if (dm_target_needs_singleton(tgt->type)) {
 703                if (t->num_targets) {
 704                        tgt->error = "singleton target type must appear alone in table";
 705                        goto bad;
 706                }
 707                t->singleton = true;
 708        }
 709
 710        if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
 711                tgt->error = "target type may not be included in a read-only table";
 712                goto bad;
 713        }
 714
 715        if (t->immutable_target_type) {
 716                if (t->immutable_target_type != tgt->type) {
 717                        tgt->error = "immutable target type cannot be mixed with other target types";
 718                        goto bad;
 719                }
 720        } else if (dm_target_is_immutable(tgt->type)) {
 721                if (t->num_targets) {
 722                        tgt->error = "immutable target type cannot be mixed with other target types";
 723                        goto bad;
 724                }
 725                t->immutable_target_type = tgt->type;
 726        }
 727
 728        tgt->table = t;
 729        tgt->begin = start;
 730        tgt->len = len;
 731        tgt->error = "Unknown error";
 732
 733        /*
 734         * Does this target adjoin the previous one ?
 735         */
 736        if (!adjoin(t, tgt)) {
 737                tgt->error = "Gap in table";
 738                goto bad;
 739        }
 740
 741        r = dm_split_args(&argc, &argv, params);
 742        if (r) {
 743                tgt->error = "couldn't split parameters (insufficient memory)";
 744                goto bad;
 745        }
 746
 747        r = tgt->type->ctr(tgt, argc, argv);
 748        kfree(argv);
 749        if (r)
 750                goto bad;
 751
 752        t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
 753
 754        if (!tgt->num_discard_bios && tgt->discards_supported)
 755                DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
 756                       dm_device_name(t->md), type);
 757
 758        return 0;
 759
 760 bad:
 761        DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
 762        dm_put_target_type(tgt->type);
 763        return r;
 764}
 765
 766/*
 767 * Target argument parsing helpers.
 768 */
 769static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
 770                             unsigned *value, char **error, unsigned grouped)
 771{
 772        const char *arg_str = dm_shift_arg(arg_set);
 773        char dummy;
 774
 775        if (!arg_str ||
 776            (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
 777            (*value < arg->min) ||
 778            (*value > arg->max) ||
 779            (grouped && arg_set->argc < *value)) {
 780                *error = arg->error;
 781                return -EINVAL;
 782        }
 783
 784        return 0;
 785}
 786
 787int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
 788                unsigned *value, char **error)
 789{
 790        return validate_next_arg(arg, arg_set, value, error, 0);
 791}
 792EXPORT_SYMBOL(dm_read_arg);
 793
 794int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
 795                      unsigned *value, char **error)
 796{
 797        return validate_next_arg(arg, arg_set, value, error, 1);
 798}
 799EXPORT_SYMBOL(dm_read_arg_group);
 800
 801const char *dm_shift_arg(struct dm_arg_set *as)
 802{
 803        char *r;
 804
 805        if (as->argc) {
 806                as->argc--;
 807                r = *as->argv;
 808                as->argv++;
 809                return r;
 810        }
 811
 812        return NULL;
 813}
 814EXPORT_SYMBOL(dm_shift_arg);
 815
 816void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 817{
 818        BUG_ON(as->argc < num_args);
 819        as->argc -= num_args;
 820        as->argv += num_args;
 821}
 822EXPORT_SYMBOL(dm_consume_args);
 823
 824static bool __table_type_bio_based(unsigned table_type)
 825{
 826        return (table_type == DM_TYPE_BIO_BASED ||
 827                table_type == DM_TYPE_DAX_BIO_BASED);
 828}
 829
 830static bool __table_type_request_based(unsigned table_type)
 831{
 832        return (table_type == DM_TYPE_REQUEST_BASED ||
 833                table_type == DM_TYPE_MQ_REQUEST_BASED);
 834}
 835
 836void dm_table_set_type(struct dm_table *t, unsigned type)
 837{
 838        t->type = type;
 839}
 840EXPORT_SYMBOL_GPL(dm_table_set_type);
 841
 842static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
 843                               sector_t start, sector_t len, void *data)
 844{
 845        struct request_queue *q = bdev_get_queue(dev->bdev);
 846
 847        return q && blk_queue_dax(q);
 848}
 849
 850static bool dm_table_supports_dax(struct dm_table *t)
 851{
 852        struct dm_target *ti;
 853        unsigned i = 0;
 854
 855        /* Ensure that all targets support DAX. */
 856        while (i < dm_table_get_num_targets(t)) {
 857                ti = dm_table_get_target(t, i++);
 858
 859                if (!ti->type->direct_access)
 860                        return false;
 861
 862                if (!ti->type->iterate_devices ||
 863                    !ti->type->iterate_devices(ti, device_supports_dax, NULL))
 864                        return false;
 865        }
 866
 867        return true;
 868}
 869
 870static int dm_table_determine_type(struct dm_table *t)
 871{
 872        unsigned i;
 873        unsigned bio_based = 0, request_based = 0, hybrid = 0;
 874        unsigned sq_count = 0, mq_count = 0;
 875        struct dm_target *tgt;
 876        struct dm_dev_internal *dd;
 877        struct list_head *devices = dm_table_get_devices(t);
 878        unsigned live_md_type = dm_get_md_type(t->md);
 879
 880        if (t->type != DM_TYPE_NONE) {
 881                /* target already set the table's type */
 882                if (t->type == DM_TYPE_BIO_BASED)
 883                        return 0;
 884                BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
 885                goto verify_rq_based;
 886        }
 887
 888        for (i = 0; i < t->num_targets; i++) {
 889                tgt = t->targets + i;
 890                if (dm_target_hybrid(tgt))
 891                        hybrid = 1;
 892                else if (dm_target_request_based(tgt))
 893                        request_based = 1;
 894                else
 895                        bio_based = 1;
 896
 897                if (bio_based && request_based) {
 898                        DMWARN("Inconsistent table: different target types"
 899                               " can't be mixed up");
 900                        return -EINVAL;
 901                }
 902        }
 903
 904        if (hybrid && !bio_based && !request_based) {
 905                /*
 906                 * The targets can work either way.
 907                 * Determine the type from the live device.
 908                 * Default to bio-based if device is new.
 909                 */
 910                if (__table_type_request_based(live_md_type))
 911                        request_based = 1;
 912                else
 913                        bio_based = 1;
 914        }
 915
 916        if (bio_based) {
 917                /* We must use this table as bio-based */
 918                t->type = DM_TYPE_BIO_BASED;
 919                if (dm_table_supports_dax(t) ||
 920                    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED))
 921                        t->type = DM_TYPE_DAX_BIO_BASED;
 922                return 0;
 923        }
 924
 925        BUG_ON(!request_based); /* No targets in this table */
 926
 927        /*
 928         * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
 929         * having a compatible target use dm_table_set_type.
 930         */
 931        t->type = DM_TYPE_REQUEST_BASED;
 932
 933verify_rq_based:
 934        /*
 935         * Request-based dm supports only tables that have a single target now.
 936         * To support multiple targets, request splitting support is needed,
 937         * and that needs lots of changes in the block-layer.
 938         * (e.g. request completion process for partial completion.)
 939         */
 940        if (t->num_targets > 1) {
 941                DMWARN("Request-based dm doesn't support multiple targets yet");
 942                return -EINVAL;
 943        }
 944
 945        if (list_empty(devices)) {
 946                int srcu_idx;
 947                struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
 948
 949                /* inherit live table's type and all_blk_mq */
 950                if (live_table) {
 951                        t->type = live_table->type;
 952                        t->all_blk_mq = live_table->all_blk_mq;
 953                }
 954                dm_put_live_table(t->md, srcu_idx);
 955                return 0;
 956        }
 957
 958        /* Non-request-stackable devices can't be used for request-based dm */
 959        list_for_each_entry(dd, devices, list) {
 960                struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
 961
 962                if (!blk_queue_stackable(q)) {
 963                        DMERR("table load rejected: including"
 964                              " non-request-stackable devices");
 965                        return -EINVAL;
 966                }
 967
 968                if (q->mq_ops)
 969                        mq_count++;
 970                else
 971                        sq_count++;
 972        }
 973        if (sq_count && mq_count) {
 974                DMERR("table load rejected: not all devices are blk-mq request-stackable");
 975                return -EINVAL;
 976        }
 977        t->all_blk_mq = mq_count > 0;
 978
 979        if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
 980                DMERR("table load rejected: all devices are not blk-mq request-stackable");
 981                return -EINVAL;
 982        }
 983
 984        return 0;
 985}
 986
 987unsigned dm_table_get_type(struct dm_table *t)
 988{
 989        return t->type;
 990}
 991
 992struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 993{
 994        return t->immutable_target_type;
 995}
 996
 997struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
 998{
 999        /* Immutable target is implicitly a singleton */
1000        if (t->num_targets > 1 ||
1001            !dm_target_is_immutable(t->targets[0].type))
1002                return NULL;
1003
1004        return t->targets;
1005}
1006
1007struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
1008{
1009        struct dm_target *uninitialized_var(ti);
1010        unsigned i = 0;
1011
1012        while (i < dm_table_get_num_targets(t)) {
1013                ti = dm_table_get_target(t, i++);
1014                if (dm_target_is_wildcard(ti->type))
1015                        return ti;
1016        }
1017
1018        return NULL;
1019}
1020
1021bool dm_table_bio_based(struct dm_table *t)
1022{
1023        return __table_type_bio_based(dm_table_get_type(t));
1024}
1025
1026bool dm_table_request_based(struct dm_table *t)
1027{
1028        return __table_type_request_based(dm_table_get_type(t));
1029}
1030
1031bool dm_table_all_blk_mq_devices(struct dm_table *t)
1032{
1033        return t->all_blk_mq;
1034}
1035
1036static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1037{
1038        unsigned type = dm_table_get_type(t);
1039        unsigned per_io_data_size = 0;
1040        struct dm_target *tgt;
1041        unsigned i;
1042
1043        if (unlikely(type == DM_TYPE_NONE)) {
1044                DMWARN("no table type is set, can't allocate mempools");
1045                return -EINVAL;
1046        }
1047
1048        if (__table_type_bio_based(type))
1049                for (i = 0; i < t->num_targets; i++) {
1050                        tgt = t->targets + i;
1051                        per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
1052                }
1053
1054        t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
1055        if (!t->mempools)
1056                return -ENOMEM;
1057
1058        return 0;
1059}
1060
1061void dm_table_free_md_mempools(struct dm_table *t)
1062{
1063        dm_free_md_mempools(t->mempools);
1064        t->mempools = NULL;
1065}
1066
1067struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1068{
1069        return t->mempools;
1070}
1071
1072static int setup_indexes(struct dm_table *t)
1073{
1074        int i;
1075        unsigned int total = 0;
1076        sector_t *indexes;
1077
1078        /* allocate the space for *all* the indexes */
1079        for (i = t->depth - 2; i >= 0; i--) {
1080                t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1081                total += t->counts[i];
1082        }
1083
1084        indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1085        if (!indexes)
1086                return -ENOMEM;
1087
1088        /* set up internal nodes, bottom-up */
1089        for (i = t->depth - 2; i >= 0; i--) {
1090                t->index[i] = indexes;
1091                indexes += (KEYS_PER_NODE * t->counts[i]);
1092                setup_btree_index(i, t);
1093        }
1094
1095        return 0;
1096}
1097
1098/*
1099 * Builds the btree to index the map.
1100 */
1101static int dm_table_build_index(struct dm_table *t)
1102{
1103        int r = 0;
1104        unsigned int leaf_nodes;
1105
1106        /* how many indexes will the btree have ? */
1107        leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1108        t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1109
1110        /* leaf layer has already been set up */
1111        t->counts[t->depth - 1] = leaf_nodes;
1112        t->index[t->depth - 1] = t->highs;
1113
1114        if (t->depth >= 2)
1115                r = setup_indexes(t);
1116
1117        return r;
1118}
1119
1120static bool integrity_profile_exists(struct gendisk *disk)
1121{
1122        return !!blk_get_integrity(disk);
1123}
1124
1125/*
1126 * Get a disk whose integrity profile reflects the table's profile.
1127 * Returns NULL if integrity support was inconsistent or unavailable.
1128 */
1129static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1130{
1131        struct list_head *devices = dm_table_get_devices(t);
1132        struct dm_dev_internal *dd = NULL;
1133        struct gendisk *prev_disk = NULL, *template_disk = NULL;
1134
1135        list_for_each_entry(dd, devices, list) {
1136                template_disk = dd->dm_dev->bdev->bd_disk;
1137                if (!integrity_profile_exists(template_disk))
1138                        goto no_integrity;
1139                else if (prev_disk &&
1140                         blk_integrity_compare(prev_disk, template_disk) < 0)
1141                        goto no_integrity;
1142                prev_disk = template_disk;
1143        }
1144
1145        return template_disk;
1146
1147no_integrity:
1148        if (prev_disk)
1149                DMWARN("%s: integrity not set: %s and %s profile mismatch",
1150                       dm_device_name(t->md),
1151                       prev_disk->disk_name,
1152                       template_disk->disk_name);
1153        return NULL;
1154}
1155
1156/*
1157 * Register the mapped device for blk_integrity support if the
1158 * underlying devices have an integrity profile.  But all devices may
1159 * not have matching profiles (checking all devices isn't reliable
1160 * during table load because this table may use other DM device(s) which
1161 * must be resumed before they will have an initialized integity
1162 * profile).  Consequently, stacked DM devices force a 2 stage integrity
1163 * profile validation: First pass during table load, final pass during
1164 * resume.
1165 */
1166static int dm_table_register_integrity(struct dm_table *t)
1167{
1168        struct mapped_device *md = t->md;
1169        struct gendisk *template_disk = NULL;
1170
1171        template_disk = dm_table_get_integrity_disk(t);
1172        if (!template_disk)
1173                return 0;
1174
1175        if (!integrity_profile_exists(dm_disk(md))) {
1176                t->integrity_supported = true;
1177                /*
1178                 * Register integrity profile during table load; we can do
1179                 * this because the final profile must match during resume.
1180                 */
1181                blk_integrity_register(dm_disk(md),
1182                                       blk_get_integrity(template_disk));
1183                return 0;
1184        }
1185
1186        /*
1187         * If DM device already has an initialized integrity
1188         * profile the new profile should not conflict.
1189         */
1190        if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1191                DMWARN("%s: conflict with existing integrity profile: "
1192                       "%s profile mismatch",
1193                       dm_device_name(t->md),
1194                       template_disk->disk_name);
1195                return 1;
1196        }
1197
1198        /* Preserve existing integrity profile */
1199        t->integrity_supported = true;
1200        return 0;
1201}
1202
1203/*
1204 * Prepares the table for use by building the indices,
1205 * setting the type, and allocating mempools.
1206 */
1207int dm_table_complete(struct dm_table *t)
1208{
1209        int r;
1210
1211        r = dm_table_determine_type(t);
1212        if (r) {
1213                DMERR("unable to determine table type");
1214                return r;
1215        }
1216
1217        r = dm_table_build_index(t);
1218        if (r) {
1219                DMERR("unable to build btrees");
1220                return r;
1221        }
1222
1223        r = dm_table_register_integrity(t);
1224        if (r) {
1225                DMERR("could not register integrity profile.");
1226                return r;
1227        }
1228
1229        r = dm_table_alloc_md_mempools(t, t->md);
1230        if (r)
1231                DMERR("unable to allocate mempools");
1232
1233        return r;
1234}
1235
1236static DEFINE_MUTEX(_event_lock);
1237void dm_table_event_callback(struct dm_table *t,
1238                             void (*fn)(void *), void *context)
1239{
1240        mutex_lock(&_event_lock);
1241        t->event_fn = fn;
1242        t->event_context = context;
1243        mutex_unlock(&_event_lock);
1244}
1245
1246void dm_table_event(struct dm_table *t)
1247{
1248        /*
1249         * You can no longer call dm_table_event() from interrupt
1250         * context, use a bottom half instead.
1251         */
1252        BUG_ON(in_interrupt());
1253
1254        mutex_lock(&_event_lock);
1255        if (t->event_fn)
1256                t->event_fn(t->event_context);
1257        mutex_unlock(&_event_lock);
1258}
1259EXPORT_SYMBOL(dm_table_event);
1260
1261sector_t dm_table_get_size(struct dm_table *t)
1262{
1263        return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1264}
1265EXPORT_SYMBOL(dm_table_get_size);
1266
1267struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1268{
1269        if (index >= t->num_targets)
1270                return NULL;
1271
1272        return t->targets + index;
1273}
1274
1275/*
1276 * Search the btree for the correct target.
1277 *
1278 * Caller should check returned pointer with dm_target_is_valid()
1279 * to trap I/O beyond end of device.
1280 */
1281struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1282{
1283        unsigned int l, n = 0, k = 0;
1284        sector_t *node;
1285
1286        for (l = 0; l < t->depth; l++) {
1287                n = get_child(n, k);
1288                node = get_node(t, l, n);
1289
1290                for (k = 0; k < KEYS_PER_NODE; k++)
1291                        if (node[k] >= sector)
1292                                break;
1293        }
1294
1295        return &t->targets[(KEYS_PER_NODE * n) + k];
1296}
1297
1298static int count_device(struct dm_target *ti, struct dm_dev *dev,
1299                        sector_t start, sector_t len, void *data)
1300{
1301        unsigned *num_devices = data;
1302
1303        (*num_devices)++;
1304
1305        return 0;
1306}
1307
1308/*
1309 * Check whether a table has no data devices attached using each
1310 * target's iterate_devices method.
1311 * Returns false if the result is unknown because a target doesn't
1312 * support iterate_devices.
1313 */
1314bool dm_table_has_no_data_devices(struct dm_table *table)
1315{
1316        struct dm_target *uninitialized_var(ti);
1317        unsigned i = 0, num_devices = 0;
1318
1319        while (i < dm_table_get_num_targets(table)) {
1320                ti = dm_table_get_target(table, i++);
1321
1322                if (!ti->type->iterate_devices)
1323                        return false;
1324
1325                ti->type->iterate_devices(ti, count_device, &num_devices);
1326                if (num_devices)
1327                        return false;
1328        }
1329
1330        return true;
1331}
1332
1333/*
1334 * Establish the new table's queue_limits and validate them.
1335 */
1336int dm_calculate_queue_limits(struct dm_table *table,
1337                              struct queue_limits *limits)
1338{
1339        struct dm_target *uninitialized_var(ti);
1340        struct queue_limits ti_limits;
1341        unsigned i = 0;
1342
1343        blk_set_stacking_limits(limits);
1344
1345        while (i < dm_table_get_num_targets(table)) {
1346                blk_set_stacking_limits(&ti_limits);
1347
1348                ti = dm_table_get_target(table, i++);
1349
1350                if (!ti->type->iterate_devices)
1351                        goto combine_limits;
1352
1353                /*
1354                 * Combine queue limits of all the devices this target uses.
1355                 */
1356                ti->type->iterate_devices(ti, dm_set_device_limits,
1357                                          &ti_limits);
1358
1359                /* Set I/O hints portion of queue limits */
1360                if (ti->type->io_hints)
1361                        ti->type->io_hints(ti, &ti_limits);
1362
1363                /*
1364                 * Check each device area is consistent with the target's
1365                 * overall queue limits.
1366                 */
1367                if (ti->type->iterate_devices(ti, device_area_is_invalid,
1368                                              &ti_limits))
1369                        return -EINVAL;
1370
1371combine_limits:
1372                /*
1373                 * Merge this target's queue limits into the overall limits
1374                 * for the table.
1375                 */
1376                if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1377                        DMWARN("%s: adding target device "
1378                               "(start sect %llu len %llu) "
1379                               "caused an alignment inconsistency",
1380                               dm_device_name(table->md),
1381                               (unsigned long long) ti->begin,
1382                               (unsigned long long) ti->len);
1383        }
1384
1385        return validate_hardware_logical_block_alignment(table, limits);
1386}
1387
1388/*
1389 * Verify that all devices have an integrity profile that matches the
1390 * DM device's registered integrity profile.  If the profiles don't
1391 * match then unregister the DM device's integrity profile.
1392 */
1393static void dm_table_verify_integrity(struct dm_table *t)
1394{
1395        struct gendisk *template_disk = NULL;
1396
1397        if (t->integrity_supported) {
1398                /*
1399                 * Verify that the original integrity profile
1400                 * matches all the devices in this table.
1401                 */
1402                template_disk = dm_table_get_integrity_disk(t);
1403                if (template_disk &&
1404                    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1405                        return;
1406        }
1407
1408        if (integrity_profile_exists(dm_disk(t->md))) {
1409                DMWARN("%s: unable to establish an integrity profile",
1410                       dm_device_name(t->md));
1411                blk_integrity_unregister(dm_disk(t->md));
1412        }
1413}
1414
1415static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1416                                sector_t start, sector_t len, void *data)
1417{
1418        unsigned long flush = (unsigned long) data;
1419        struct request_queue *q = bdev_get_queue(dev->bdev);
1420
1421        return q && (q->queue_flags & flush);
1422}
1423
1424static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1425{
1426        struct dm_target *ti;
1427        unsigned i = 0;
1428
1429        /*
1430         * Require at least one underlying device to support flushes.
1431         * t->devices includes internal dm devices such as mirror logs
1432         * so we need to use iterate_devices here, which targets
1433         * supporting flushes must provide.
1434         */
1435        while (i < dm_table_get_num_targets(t)) {
1436                ti = dm_table_get_target(t, i++);
1437
1438                if (!ti->num_flush_bios)
1439                        continue;
1440
1441                if (ti->flush_supported)
1442                        return true;
1443
1444                if (ti->type->iterate_devices &&
1445                    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1446                        return true;
1447        }
1448
1449        return false;
1450}
1451
1452static bool dm_table_discard_zeroes_data(struct dm_table *t)
1453{
1454        struct dm_target *ti;
1455        unsigned i = 0;
1456
1457        /* Ensure that all targets supports discard_zeroes_data. */
1458        while (i < dm_table_get_num_targets(t)) {
1459                ti = dm_table_get_target(t, i++);
1460
1461                if (ti->discard_zeroes_data_unsupported)
1462                        return false;
1463        }
1464
1465        return true;
1466}
1467
1468static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1469                            sector_t start, sector_t len, void *data)
1470{
1471        struct request_queue *q = bdev_get_queue(dev->bdev);
1472
1473        return q && blk_queue_nonrot(q);
1474}
1475
1476static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1477                             sector_t start, sector_t len, void *data)
1478{
1479        struct request_queue *q = bdev_get_queue(dev->bdev);
1480
1481        return q && !blk_queue_add_random(q);
1482}
1483
1484static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
1485                                   sector_t start, sector_t len, void *data)
1486{
1487        struct request_queue *q = bdev_get_queue(dev->bdev);
1488
1489        return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
1490}
1491
1492static bool dm_table_all_devices_attribute(struct dm_table *t,
1493                                           iterate_devices_callout_fn func)
1494{
1495        struct dm_target *ti;
1496        unsigned i = 0;
1497
1498        while (i < dm_table_get_num_targets(t)) {
1499                ti = dm_table_get_target(t, i++);
1500
1501                if (!ti->type->iterate_devices ||
1502                    !ti->type->iterate_devices(ti, func, NULL))
1503                        return false;
1504        }
1505
1506        return true;
1507}
1508
1509static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1510                                         sector_t start, sector_t len, void *data)
1511{
1512        struct request_queue *q = bdev_get_queue(dev->bdev);
1513
1514        return q && !q->limits.max_write_same_sectors;
1515}
1516
1517static bool dm_table_supports_write_same(struct dm_table *t)
1518{
1519        struct dm_target *ti;
1520        unsigned i = 0;
1521
1522        while (i < dm_table_get_num_targets(t)) {
1523                ti = dm_table_get_target(t, i++);
1524
1525                if (!ti->num_write_same_bios)
1526                        return false;
1527
1528                if (!ti->type->iterate_devices ||
1529                    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1530                        return false;
1531        }
1532
1533        return true;
1534}
1535
1536static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1537                                  sector_t start, sector_t len, void *data)
1538{
1539        struct request_queue *q = bdev_get_queue(dev->bdev);
1540
1541        return q && blk_queue_discard(q);
1542}
1543
1544static bool dm_table_supports_discards(struct dm_table *t)
1545{
1546        struct dm_target *ti;
1547        unsigned i = 0;
1548
1549        /*
1550         * Unless any target used by the table set discards_supported,
1551         * require at least one underlying device to support discards.
1552         * t->devices includes internal dm devices such as mirror logs
1553         * so we need to use iterate_devices here, which targets
1554         * supporting discard selectively must provide.
1555         */
1556        while (i < dm_table_get_num_targets(t)) {
1557                ti = dm_table_get_target(t, i++);
1558
1559                if (!ti->num_discard_bios)
1560                        continue;
1561
1562                if (ti->discards_supported)
1563                        return true;
1564
1565                if (ti->type->iterate_devices &&
1566                    ti->type->iterate_devices(ti, device_discard_capable, NULL))
1567                        return true;
1568        }
1569
1570        return false;
1571}
1572
1573void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1574                               struct queue_limits *limits)
1575{
1576        bool wc = false, fua = false;
1577
1578        /*
1579         * Copy table's limits to the DM device's request_queue
1580         */
1581        q->limits = *limits;
1582
1583        if (!dm_table_supports_discards(t))
1584                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1585        else
1586                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1587
1588        if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1589                wc = true;
1590                if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1591                        fua = true;
1592        }
1593        blk_queue_write_cache(q, wc, fua);
1594
1595        if (!dm_table_discard_zeroes_data(t))
1596                q->limits.discard_zeroes_data = 0;
1597
1598        /* Ensure that all underlying devices are non-rotational. */
1599        if (dm_table_all_devices_attribute(t, device_is_nonrot))
1600                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1601        else
1602                queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
1603
1604        if (!dm_table_supports_write_same(t))
1605                q->limits.max_write_same_sectors = 0;
1606
1607        if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
1608                queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
1609        else
1610                queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
1611
1612        dm_table_verify_integrity(t);
1613
1614        /*
1615         * Determine whether or not this queue's I/O timings contribute
1616         * to the entropy pool, Only request-based targets use this.
1617         * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1618         * have it set.
1619         */
1620        if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1621                queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
1622
1623        /*
1624         * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1625         * visible to other CPUs because, once the flag is set, incoming bios
1626         * are processed by request-based dm, which refers to the queue
1627         * settings.
1628         * Until the flag set, bios are passed to bio-based dm and queued to
1629         * md->deferred where queue settings are not needed yet.
1630         * Those bios are passed to request-based dm at the resume time.
1631         */
1632        smp_mb();
1633        if (dm_table_request_based(t))
1634                queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1635}
1636
1637unsigned int dm_table_get_num_targets(struct dm_table *t)
1638{
1639        return t->num_targets;
1640}
1641
1642struct list_head *dm_table_get_devices(struct dm_table *t)
1643{
1644        return &t->devices;
1645}
1646
1647fmode_t dm_table_get_mode(struct dm_table *t)
1648{
1649        return t->mode;
1650}
1651EXPORT_SYMBOL(dm_table_get_mode);
1652
1653enum suspend_mode {
1654        PRESUSPEND,
1655        PRESUSPEND_UNDO,
1656        POSTSUSPEND,
1657};
1658
1659static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1660{
1661        int i = t->num_targets;
1662        struct dm_target *ti = t->targets;
1663
1664        while (i--) {
1665                switch (mode) {
1666                case PRESUSPEND:
1667                        if (ti->type->presuspend)
1668                                ti->type->presuspend(ti);
1669                        break;
1670                case PRESUSPEND_UNDO:
1671                        if (ti->type->presuspend_undo)
1672                                ti->type->presuspend_undo(ti);
1673                        break;
1674                case POSTSUSPEND:
1675                        if (ti->type->postsuspend)
1676                                ti->type->postsuspend(ti);
1677                        break;
1678                }
1679                ti++;
1680        }
1681}
1682
1683void dm_table_presuspend_targets(struct dm_table *t)
1684{
1685        if (!t)
1686                return;
1687
1688        suspend_targets(t, PRESUSPEND);
1689}
1690
1691void dm_table_presuspend_undo_targets(struct dm_table *t)
1692{
1693        if (!t)
1694                return;
1695
1696        suspend_targets(t, PRESUSPEND_UNDO);
1697}
1698
1699void dm_table_postsuspend_targets(struct dm_table *t)
1700{
1701        if (!t)
1702                return;
1703
1704        suspend_targets(t, POSTSUSPEND);
1705}
1706
1707int dm_table_resume_targets(struct dm_table *t)
1708{
1709        int i, r = 0;
1710
1711        for (i = 0; i < t->num_targets; i++) {
1712                struct dm_target *ti = t->targets + i;
1713
1714                if (!ti->type->preresume)
1715                        continue;
1716
1717                r = ti->type->preresume(ti);
1718                if (r) {
1719                        DMERR("%s: %s: preresume failed, error = %d",
1720                              dm_device_name(t->md), ti->type->name, r);
1721                        return r;
1722                }
1723        }
1724
1725        for (i = 0; i < t->num_targets; i++) {
1726                struct dm_target *ti = t->targets + i;
1727
1728                if (ti->type->resume)
1729                        ti->type->resume(ti);
1730        }
1731
1732        return 0;
1733}
1734
1735void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1736{
1737        list_add(&cb->list, &t->target_callbacks);
1738}
1739EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1740
1741int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1742{
1743        struct dm_dev_internal *dd;
1744        struct list_head *devices = dm_table_get_devices(t);
1745        struct dm_target_callbacks *cb;
1746        int r = 0;
1747
1748        list_for_each_entry(dd, devices, list) {
1749                struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
1750                char b[BDEVNAME_SIZE];
1751
1752                if (likely(q))
1753                        r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1754                else
1755                        DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1756                                     dm_device_name(t->md),
1757                                     bdevname(dd->dm_dev->bdev, b));
1758        }
1759
1760        list_for_each_entry(cb, &t->target_callbacks, list)
1761                if (cb->congested_fn)
1762                        r |= cb->congested_fn(cb, bdi_bits);
1763
1764        return r;
1765}
1766
1767struct mapped_device *dm_table_get_md(struct dm_table *t)
1768{
1769        return t->md;
1770}
1771EXPORT_SYMBOL(dm_table_get_md);
1772
1773void dm_table_run_md_queue_async(struct dm_table *t)
1774{
1775        struct mapped_device *md;
1776        struct request_queue *queue;
1777        unsigned long flags;
1778
1779        if (!dm_table_request_based(t))
1780                return;
1781
1782        md = dm_table_get_md(t);
1783        queue = dm_get_md_queue(md);
1784        if (queue) {
1785                if (queue->mq_ops)
1786                        blk_mq_run_hw_queues(queue, true);
1787                else {
1788                        spin_lock_irqsave(queue->queue_lock, flags);
1789                        blk_run_queue_async(queue);
1790                        spin_unlock_irqrestore(queue->queue_lock, flags);
1791                }
1792        }
1793}
1794EXPORT_SYMBOL(dm_table_run_md_queue_async);
1795
1796