linux/drivers/md/dm.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-core.h"
   9#include "dm-rq.h"
  10#include "dm-uevent.h"
  11
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/mutex.h>
  15#include <linux/blkpg.h>
  16#include <linux/bio.h>
  17#include <linux/mempool.h>
  18#include <linux/dax.h>
  19#include <linux/slab.h>
  20#include <linux/idr.h>
  21#include <linux/socket.h>
  22#include <linux/hdreg.h>
  23#include <linux/delay.h>
  24#include <linux/wait.h>
  25#include <linux/pr.h>
  26
  27#define DM_MSG_PREFIX "core"
  28
  29/*
  30 * Cookies are numeric values sent with CHANGE and REMOVE
  31 * uevents while resuming, removing or renaming the device.
  32 */
  33#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
  34#define DM_COOKIE_LENGTH 24
  35
  36static const char *_name = DM_NAME;
  37
  38static unsigned int major = 0;
  39static unsigned int _major = 0;
  40
  41static DEFINE_IDR(_minor_idr);
  42
  43static DEFINE_SPINLOCK(_minor_lock);
  44
  45static void do_deferred_remove(struct work_struct *w);
  46
  47static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
  48
  49static struct workqueue_struct *deferred_remove_workqueue;
  50
  51atomic_t dm_global_event_nr = ATOMIC_INIT(0);
  52DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
  53
  54void dm_issue_global_event(void)
  55{
  56        atomic_inc(&dm_global_event_nr);
  57        wake_up(&dm_global_eventq);
  58}
  59
  60/*
  61 * One of these is allocated per bio.
  62 */
  63struct dm_io {
  64        struct mapped_device *md;
  65        int error;
  66        atomic_t io_count;
  67        struct bio *bio;
  68        unsigned long start_time;
  69        spinlock_t endio_lock;
  70        struct dm_stats_aux stats_aux;
  71};
  72
  73#define MINOR_ALLOCED ((void *)-1)
  74
  75/*
  76 * Bits for the md->flags field.
  77 */
  78#define DMF_BLOCK_IO_FOR_SUSPEND 0
  79#define DMF_SUSPENDED 1
  80#define DMF_FROZEN 2
  81#define DMF_FREEING 3
  82#define DMF_DELETING 4
  83#define DMF_NOFLUSH_SUSPENDING 5
  84#define DMF_MERGE_IS_OPTIONAL 6
  85#define DMF_DEFERRED_REMOVE 7
  86#define DMF_SUSPENDED_INTERNALLY 8
  87
  88#define DM_NUMA_NODE NUMA_NO_NODE
  89static int dm_numa_node = DM_NUMA_NODE;
  90
  91/*
  92 * For mempools pre-allocation at the table loading time.
  93 */
  94struct dm_md_mempools {
  95        mempool_t *io_pool;
  96        mempool_t *rq_pool;
  97        struct bio_set *bs;
  98};
  99
 100struct table_device {
 101        struct list_head list;
 102        atomic_t count;
 103        struct dm_dev dm_dev;
 104};
 105
 106static struct kmem_cache *_io_cache;
 107static struct kmem_cache *_rq_tio_cache;
 108static struct kmem_cache *_rq_cache;
 109
 110/*
 111 * Bio-based DM's mempools' reserved IOs set by the user.
 112 */
 113#define RESERVED_BIO_BASED_IOS          16
 114static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
 115
 116static int __dm_get_module_param_int(int *module_param, int min, int max)
 117{
 118        int param = ACCESS_ONCE(*module_param);
 119        int modified_param = 0;
 120        bool modified = true;
 121
 122        if (param < min)
 123                modified_param = min;
 124        else if (param > max)
 125                modified_param = max;
 126        else
 127                modified = false;
 128
 129        if (modified) {
 130                (void)cmpxchg(module_param, param, modified_param);
 131                param = modified_param;
 132        }
 133
 134        return param;
 135}
 136
 137unsigned __dm_get_module_param(unsigned *module_param,
 138                               unsigned def, unsigned max)
 139{
 140        unsigned param = ACCESS_ONCE(*module_param);
 141        unsigned modified_param = 0;
 142
 143        if (!param)
 144                modified_param = def;
 145        else if (param > max)
 146                modified_param = max;
 147
 148        if (modified_param) {
 149                (void)cmpxchg(module_param, param, modified_param);
 150                param = modified_param;
 151        }
 152
 153        return param;
 154}
 155
 156unsigned dm_get_reserved_bio_based_ios(void)
 157{
 158        return __dm_get_module_param(&reserved_bio_based_ios,
 159                                     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
 160}
 161EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
 162
 163static unsigned dm_get_numa_node(void)
 164{
 165        return __dm_get_module_param_int(&dm_numa_node,
 166                                         DM_NUMA_NODE, num_online_nodes() - 1);
 167}
 168
 169static int __init local_init(void)
 170{
 171        int r = -ENOMEM;
 172
 173        /* allocate a slab for the dm_ios */
 174        _io_cache = KMEM_CACHE(dm_io, 0);
 175        if (!_io_cache)
 176                return r;
 177
 178        _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
 179        if (!_rq_tio_cache)
 180                goto out_free_io_cache;
 181
 182        _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
 183                                      __alignof__(struct request), 0, NULL);
 184        if (!_rq_cache)
 185                goto out_free_rq_tio_cache;
 186
 187        r = dm_uevent_init();
 188        if (r)
 189                goto out_free_rq_cache;
 190
 191        deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
 192        if (!deferred_remove_workqueue) {
 193                r = -ENOMEM;
 194                goto out_uevent_exit;
 195        }
 196
 197        _major = major;
 198        r = register_blkdev(_major, _name);
 199        if (r < 0)
 200                goto out_free_workqueue;
 201
 202        if (!_major)
 203                _major = r;
 204
 205        return 0;
 206
 207out_free_workqueue:
 208        destroy_workqueue(deferred_remove_workqueue);
 209out_uevent_exit:
 210        dm_uevent_exit();
 211out_free_rq_cache:
 212        kmem_cache_destroy(_rq_cache);
 213out_free_rq_tio_cache:
 214        kmem_cache_destroy(_rq_tio_cache);
 215out_free_io_cache:
 216        kmem_cache_destroy(_io_cache);
 217
 218        return r;
 219}
 220
 221static void local_exit(void)
 222{
 223        flush_scheduled_work();
 224        destroy_workqueue(deferred_remove_workqueue);
 225
 226        kmem_cache_destroy(_rq_cache);
 227        kmem_cache_destroy(_rq_tio_cache);
 228        kmem_cache_destroy(_io_cache);
 229        unregister_blkdev(_major, _name);
 230        dm_uevent_exit();
 231
 232        _major = 0;
 233
 234        DMINFO("cleaned up");
 235}
 236
 237static int (*_inits[])(void) __initdata = {
 238        local_init,
 239        dm_target_init,
 240        dm_linear_init,
 241        dm_stripe_init,
 242        dm_io_init,
 243        dm_kcopyd_init,
 244        dm_interface_init,
 245        dm_statistics_init,
 246};
 247
 248static void (*_exits[])(void) = {
 249        local_exit,
 250        dm_target_exit,
 251        dm_linear_exit,
 252        dm_stripe_exit,
 253        dm_io_exit,
 254        dm_kcopyd_exit,
 255        dm_interface_exit,
 256        dm_statistics_exit,
 257};
 258
 259static int __init dm_init(void)
 260{
 261        const int count = ARRAY_SIZE(_inits);
 262
 263        int r, i;
 264
 265        for (i = 0; i < count; i++) {
 266                r = _inits[i]();
 267                if (r)
 268                        goto bad;
 269        }
 270
 271        return 0;
 272
 273      bad:
 274        while (i--)
 275                _exits[i]();
 276
 277        return r;
 278}
 279
 280static void __exit dm_exit(void)
 281{
 282        int i = ARRAY_SIZE(_exits);
 283
 284        while (i--)
 285                _exits[i]();
 286
 287        /*
 288         * Should be empty by this point.
 289         */
 290        idr_destroy(&_minor_idr);
 291}
 292
 293/*
 294 * Block device functions
 295 */
 296int dm_deleting_md(struct mapped_device *md)
 297{
 298        return test_bit(DMF_DELETING, &md->flags);
 299}
 300
 301static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 302{
 303        struct mapped_device *md;
 304
 305        spin_lock(&_minor_lock);
 306
 307        md = bdev->bd_disk->private_data;
 308        if (!md)
 309                goto out;
 310
 311        if (test_bit(DMF_FREEING, &md->flags) ||
 312            dm_deleting_md(md)) {
 313                md = NULL;
 314                goto out;
 315        }
 316
 317        dm_get(md);
 318        atomic_inc(&md->open_count);
 319out:
 320        spin_unlock(&_minor_lock);
 321
 322        return md ? 0 : -ENXIO;
 323}
 324
 325static void dm_blk_close(struct gendisk *disk, fmode_t mode)
 326{
 327        struct mapped_device *md;
 328
 329        spin_lock(&_minor_lock);
 330
 331        md = disk->private_data;
 332        if (WARN_ON(!md))
 333                goto out;
 334
 335        if (atomic_dec_and_test(&md->open_count) &&
 336            (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
 337                queue_work(deferred_remove_workqueue, &deferred_remove_work);
 338
 339        dm_put(md);
 340out:
 341        spin_unlock(&_minor_lock);
 342}
 343
 344int dm_open_count(struct mapped_device *md)
 345{
 346        return atomic_read(&md->open_count);
 347}
 348
 349/*
 350 * Guarantees nothing is using the device before it's deleted.
 351 */
 352int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
 353{
 354        int r = 0;
 355
 356        spin_lock(&_minor_lock);
 357
 358        if (dm_open_count(md)) {
 359                r = -EBUSY;
 360                if (mark_deferred)
 361                        set_bit(DMF_DEFERRED_REMOVE, &md->flags);
 362        } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
 363                r = -EEXIST;
 364        else
 365                set_bit(DMF_DELETING, &md->flags);
 366
 367        spin_unlock(&_minor_lock);
 368
 369        return r;
 370}
 371
 372int dm_cancel_deferred_remove(struct mapped_device *md)
 373{
 374        int r = 0;
 375
 376        spin_lock(&_minor_lock);
 377
 378        if (test_bit(DMF_DELETING, &md->flags))
 379                r = -EBUSY;
 380        else
 381                clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
 382
 383        spin_unlock(&_minor_lock);
 384
 385        return r;
 386}
 387
 388static void do_deferred_remove(struct work_struct *w)
 389{
 390        dm_deferred_remove();
 391}
 392
 393sector_t dm_get_size(struct mapped_device *md)
 394{
 395        return get_capacity(md->disk);
 396}
 397
 398struct request_queue *dm_get_md_queue(struct mapped_device *md)
 399{
 400        return md->queue;
 401}
 402
 403struct dm_stats *dm_get_stats(struct mapped_device *md)
 404{
 405        return &md->stats;
 406}
 407
 408static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 409{
 410        struct mapped_device *md = bdev->bd_disk->private_data;
 411
 412        return dm_get_geometry(md, geo);
 413}
 414
 415static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
 416                            struct block_device **bdev)
 417        __acquires(md->io_barrier)
 418{
 419        struct dm_target *tgt;
 420        struct dm_table *map;
 421        int r;
 422
 423retry:
 424        r = -ENOTTY;
 425        map = dm_get_live_table(md, srcu_idx);
 426        if (!map || !dm_table_get_size(map))
 427                return r;
 428
 429        /* We only support devices that have a single target */
 430        if (dm_table_get_num_targets(map) != 1)
 431                return r;
 432
 433        tgt = dm_table_get_target(map, 0);
 434        if (!tgt->type->prepare_ioctl)
 435                return r;
 436
 437        if (dm_suspended_md(md))
 438                return -EAGAIN;
 439
 440        r = tgt->type->prepare_ioctl(tgt, bdev);
 441        if (r == -ENOTCONN && !fatal_signal_pending(current)) {
 442                dm_put_live_table(md, *srcu_idx);
 443                msleep(10);
 444                goto retry;
 445        }
 446
 447        return r;
 448}
 449
 450static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
 451        __releases(md->io_barrier)
 452{
 453        dm_put_live_table(md, srcu_idx);
 454}
 455
 456static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
 457                        unsigned int cmd, unsigned long arg)
 458{
 459        struct mapped_device *md = bdev->bd_disk->private_data;
 460        int r, srcu_idx;
 461
 462        r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
 463        if (r < 0)
 464                goto out;
 465
 466        if (r > 0) {
 467                /*
 468                 * Target determined this ioctl is being issued against
 469                 * a logical partition of the parent bdev; so extra
 470                 * validation is needed.
 471                 */
 472                r = scsi_verify_blk_ioctl(NULL, cmd);
 473                if (r)
 474                        goto out;
 475        }
 476
 477        r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 478out:
 479        dm_unprepare_ioctl(md, srcu_idx);
 480        return r;
 481}
 482
 483static struct dm_io *alloc_io(struct mapped_device *md)
 484{
 485        return mempool_alloc(md->io_pool, GFP_NOIO);
 486}
 487
 488static void free_io(struct mapped_device *md, struct dm_io *io)
 489{
 490        mempool_free(io, md->io_pool);
 491}
 492
 493static void free_tio(struct dm_target_io *tio)
 494{
 495        bio_put(&tio->clone);
 496}
 497
 498int md_in_flight(struct mapped_device *md)
 499{
 500        return atomic_read(&md->pending[READ]) +
 501               atomic_read(&md->pending[WRITE]);
 502}
 503
 504static void start_io_acct(struct dm_io *io)
 505{
 506        struct mapped_device *md = io->md;
 507        struct bio *bio = io->bio;
 508        int cpu;
 509        int rw = bio_data_dir(bio);
 510
 511        io->start_time = jiffies;
 512
 513        cpu = part_stat_lock();
 514        part_round_stats(md->queue, cpu, &dm_disk(md)->part0);
 515        part_stat_unlock();
 516        atomic_set(&dm_disk(md)->part0.in_flight[rw],
 517                atomic_inc_return(&md->pending[rw]));
 518
 519        if (unlikely(dm_stats_used(&md->stats)))
 520                dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
 521                                    bio_sectors(bio), false, 0, &io->stats_aux);
 522}
 523
 524static void end_io_acct(struct dm_io *io)
 525{
 526        struct mapped_device *md = io->md;
 527        struct bio *bio = io->bio;
 528        unsigned long duration = jiffies - io->start_time;
 529        int pending, cpu;
 530        int rw = bio_data_dir(bio);
 531
 532        cpu = part_stat_lock();
 533        part_round_stats(md->queue, cpu, &dm_disk(md)->part0);
 534        part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
 535        part_stat_unlock();
 536
 537        if (unlikely(dm_stats_used(&md->stats)))
 538                dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
 539                                    bio_sectors(bio), true, duration, &io->stats_aux);
 540
 541        /*
 542         * After this is decremented the bio must not be touched if it is
 543         * a flush.
 544         */
 545        pending = atomic_dec_return(&md->pending[rw]);
 546        atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
 547        pending += atomic_read(&md->pending[rw^0x1]);
 548
 549        /* nudge anyone waiting on suspend queue */
 550        if (!pending)
 551                wake_up(&md->wait);
 552}
 553
 554/*
 555 * Add the bio to the list of deferred io.
 556 */
 557static void queue_io(struct mapped_device *md, struct bio *bio)
 558{
 559        unsigned long flags;
 560
 561        spin_lock_irqsave(&md->deferred_lock, flags);
 562        bio_list_add(&md->deferred, bio);
 563        spin_unlock_irqrestore(&md->deferred_lock, flags);
 564        queue_work(md->wq, &md->work);
 565}
 566
 567/*
 568 * Everyone (including functions in this file), should use this
 569 * function to access the md->map field, and make sure they call
 570 * dm_put_live_table() when finished.
 571 */
 572struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
 573{
 574        *srcu_idx = srcu_read_lock(&md->io_barrier);
 575
 576        return srcu_dereference(md->map, &md->io_barrier);
 577}
 578
 579void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
 580{
 581        srcu_read_unlock(&md->io_barrier, srcu_idx);
 582}
 583
 584void dm_sync_table(struct mapped_device *md)
 585{
 586        synchronize_srcu(&md->io_barrier);
 587        synchronize_rcu_expedited();
 588}
 589
 590/*
 591 * A fast alternative to dm_get_live_table/dm_put_live_table.
 592 * The caller must not block between these two functions.
 593 */
 594static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
 595{
 596        rcu_read_lock();
 597        return rcu_dereference(md->map);
 598}
 599
 600static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
 601{
 602        rcu_read_unlock();
 603}
 604
 605static char *_dm_claim_ptr = "I belong to device-mapper";
 606
 607/*
 608 * Open a table device so we can use it as a map destination.
 609 */
 610static int open_table_device(struct table_device *td, dev_t dev,
 611                             struct mapped_device *md)
 612{
 613        struct block_device *bdev;
 614
 615        int r;
 616
 617        BUG_ON(td->dm_dev.bdev);
 618
 619        bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
 620        if (IS_ERR(bdev))
 621                return PTR_ERR(bdev);
 622
 623        r = bd_link_disk_holder(bdev, dm_disk(md));
 624        if (r) {
 625                blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
 626                return r;
 627        }
 628
 629        td->dm_dev.bdev = bdev;
 630        td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
 631        return 0;
 632}
 633
 634/*
 635 * Close a table device that we've been using.
 636 */
 637static void close_table_device(struct table_device *td, struct mapped_device *md)
 638{
 639        if (!td->dm_dev.bdev)
 640                return;
 641
 642        bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
 643        blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
 644        put_dax(td->dm_dev.dax_dev);
 645        td->dm_dev.bdev = NULL;
 646        td->dm_dev.dax_dev = NULL;
 647}
 648
 649static struct table_device *find_table_device(struct list_head *l, dev_t dev,
 650                                              fmode_t mode) {
 651        struct table_device *td;
 652
 653        list_for_each_entry(td, l, list)
 654                if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
 655                        return td;
 656
 657        return NULL;
 658}
 659
 660int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
 661                        struct dm_dev **result) {
 662        int r;
 663        struct table_device *td;
 664
 665        mutex_lock(&md->table_devices_lock);
 666        td = find_table_device(&md->table_devices, dev, mode);
 667        if (!td) {
 668                td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
 669                if (!td) {
 670                        mutex_unlock(&md->table_devices_lock);
 671                        return -ENOMEM;
 672                }
 673
 674                td->dm_dev.mode = mode;
 675                td->dm_dev.bdev = NULL;
 676
 677                if ((r = open_table_device(td, dev, md))) {
 678                        mutex_unlock(&md->table_devices_lock);
 679                        kfree(td);
 680                        return r;
 681                }
 682
 683                format_dev_t(td->dm_dev.name, dev);
 684
 685                atomic_set(&td->count, 0);
 686                list_add(&td->list, &md->table_devices);
 687        }
 688        atomic_inc(&td->count);
 689        mutex_unlock(&md->table_devices_lock);
 690
 691        *result = &td->dm_dev;
 692        return 0;
 693}
 694EXPORT_SYMBOL_GPL(dm_get_table_device);
 695
 696void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
 697{
 698        struct table_device *td = container_of(d, struct table_device, dm_dev);
 699
 700        mutex_lock(&md->table_devices_lock);
 701        if (atomic_dec_and_test(&td->count)) {
 702                close_table_device(td, md);
 703                list_del(&td->list);
 704                kfree(td);
 705        }
 706        mutex_unlock(&md->table_devices_lock);
 707}
 708EXPORT_SYMBOL(dm_put_table_device);
 709
 710static void free_table_devices(struct list_head *devices)
 711{
 712        struct list_head *tmp, *next;
 713
 714        list_for_each_safe(tmp, next, devices) {
 715                struct table_device *td = list_entry(tmp, struct table_device, list);
 716
 717                DMWARN("dm_destroy: %s still exists with %d references",
 718                       td->dm_dev.name, atomic_read(&td->count));
 719                kfree(td);
 720        }
 721}
 722
 723/*
 724 * Get the geometry associated with a dm device
 725 */
 726int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
 727{
 728        *geo = md->geometry;
 729
 730        return 0;
 731}
 732
 733/*
 734 * Set the geometry of a device.
 735 */
 736int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
 737{
 738        sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
 739
 740        if (geo->start > sz) {
 741                DMWARN("Start sector is beyond the geometry limits.");
 742                return -EINVAL;
 743        }
 744
 745        md->geometry = *geo;
 746
 747        return 0;
 748}
 749
 750/*-----------------------------------------------------------------
 751 * CRUD START:
 752 *   A more elegant soln is in the works that uses the queue
 753 *   merge fn, unfortunately there are a couple of changes to
 754 *   the block layer that I want to make for this.  So in the
 755 *   interests of getting something for people to use I give
 756 *   you this clearly demarcated crap.
 757 *---------------------------------------------------------------*/
 758
 759static int __noflush_suspending(struct mapped_device *md)
 760{
 761        return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
 762}
 763
 764/*
 765 * Decrements the number of outstanding ios that a bio has been
 766 * cloned into, completing the original io if necc.
 767 */
 768static void dec_pending(struct dm_io *io, int error)
 769{
 770        unsigned long flags;
 771        int io_error;
 772        struct bio *bio;
 773        struct mapped_device *md = io->md;
 774
 775        /* Push-back supersedes any I/O errors */
 776        if (unlikely(error)) {
 777                spin_lock_irqsave(&io->endio_lock, flags);
 778                if (!(io->error > 0 && __noflush_suspending(md)))
 779                        io->error = error;
 780                spin_unlock_irqrestore(&io->endio_lock, flags);
 781        }
 782
 783        if (atomic_dec_and_test(&io->io_count)) {
 784                if (io->error == DM_ENDIO_REQUEUE) {
 785                        /*
 786                         * Target requested pushing back the I/O.
 787                         */
 788                        spin_lock_irqsave(&md->deferred_lock, flags);
 789                        if (__noflush_suspending(md))
 790                                bio_list_add_head(&md->deferred, io->bio);
 791                        else
 792                                /* noflush suspend was interrupted. */
 793                                io->error = -EIO;
 794                        spin_unlock_irqrestore(&md->deferred_lock, flags);
 795                }
 796
 797                io_error = io->error;
 798                bio = io->bio;
 799                end_io_acct(io);
 800                free_io(md, io);
 801
 802                if (io_error == DM_ENDIO_REQUEUE)
 803                        return;
 804
 805                if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
 806                        /*
 807                         * Preflush done for flush with data, reissue
 808                         * without REQ_FLUSH.
 809                         */
 810                        bio->bi_rw &= ~REQ_FLUSH;
 811                        queue_io(md, bio);
 812                } else {
 813                        /* done with normal IO or empty flush */
 814                        trace_block_bio_complete(md->queue, bio, io_error);
 815                        bio_endio(bio, io_error);
 816                }
 817        }
 818}
 819
 820void disable_write_same(struct mapped_device *md)
 821{
 822        struct queue_limits *limits = dm_get_queue_limits(md);
 823
 824        /* device doesn't really support WRITE SAME, disable it */
 825        limits->max_write_same_sectors = 0;
 826}
 827
 828static void clone_endio(struct bio *bio, int error)
 829{
 830        int r = error;
 831        struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
 832        struct dm_io *io = tio->io;
 833        struct mapped_device *md = tio->io->md;
 834        dm_endio_fn endio = tio->ti->type->end_io;
 835
 836        if (!bio_flagged(bio, BIO_UPTODATE) && !error)
 837                error = -EIO;
 838
 839        if (endio) {
 840                r = endio(tio->ti, bio, error);
 841                if (r < 0 || r == DM_ENDIO_REQUEUE)
 842                        /*
 843                         * error and requeue request are handled
 844                         * in dec_pending().
 845                         */
 846                        error = r;
 847                else if (r == DM_ENDIO_INCOMPLETE)
 848                        /* The target will handle the io */
 849                        return;
 850                else if (r) {
 851                        DMWARN("unimplemented target endio return value: %d", r);
 852                        BUG();
 853                }
 854        }
 855
 856        if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
 857                     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
 858                disable_write_same(md);
 859
 860        free_tio(tio);
 861        dec_pending(io, error);
 862}
 863
 864/*
 865 * Return maximum size of I/O possible at the supplied sector up to the current
 866 * target boundary.
 867 */
 868static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
 869{
 870        sector_t target_offset = dm_target_offset(ti, sector);
 871
 872        return ti->len - target_offset;
 873}
 874
 875static sector_t max_io_len(sector_t sector, struct dm_target *ti)
 876{
 877        sector_t len = max_io_len_target_boundary(sector, ti);
 878        sector_t offset, max_len;
 879
 880        /*
 881         * Does the target need to split even further?
 882         */
 883        if (ti->max_io_len) {
 884                offset = dm_target_offset(ti, sector);
 885                if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
 886                        max_len = sector_div(offset, ti->max_io_len);
 887                else
 888                        max_len = offset & (ti->max_io_len - 1);
 889                max_len = ti->max_io_len - max_len;
 890
 891                if (len > max_len)
 892                        len = max_len;
 893        }
 894
 895        return len;
 896}
 897
 898int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
 899{
 900        if (len > UINT_MAX) {
 901                DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
 902                      (unsigned long long)len, UINT_MAX);
 903                ti->error = "Maximum size of target IO is too large";
 904                return -EINVAL;
 905        }
 906
 907        ti->max_io_len = (uint32_t) len;
 908
 909        return 0;
 910}
 911EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
 912
 913static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
 914                sector_t sector, int *srcu_idx)
 915{
 916        struct dm_table *map;
 917        struct dm_target *ti;
 918
 919        map = dm_get_live_table(md, srcu_idx);
 920        if (!map)
 921                return NULL;
 922
 923        ti = dm_table_find_target(map, sector);
 924        if (!dm_target_is_valid(ti))
 925                return NULL;
 926
 927        return ti;
 928}
 929
 930static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
 931                long nr_pages, void **kaddr, pfn_t *pfn)
 932{
 933        struct mapped_device *md = dax_get_private(dax_dev);
 934        sector_t sector = pgoff * PAGE_SECTORS;
 935        struct dm_target *ti;
 936        long len, ret = -EIO;
 937        int srcu_idx;
 938
 939        ti = dm_dax_get_live_target(md, sector, &srcu_idx);
 940
 941        if (!ti)
 942                goto out;
 943        if (!ti->type->direct_access)
 944                goto out;
 945        len = max_io_len(sector, ti) / PAGE_SECTORS;
 946        if (len < 1)
 947                goto out;
 948        nr_pages = min(len, nr_pages);
 949        ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
 950
 951 out:
 952        dm_put_live_table(md, srcu_idx);
 953
 954        return ret;
 955}
 956
 957static int dm_dax_memcpy_fromiovecend(struct dax_device *dax_dev, pgoff_t pgoff,
 958                                      void *addr, const struct iovec *iov,
 959                                      int offset, int len)
 960{
 961        struct mapped_device *md = dax_get_private(dax_dev);
 962        sector_t sector = pgoff * PAGE_SECTORS;
 963        struct dm_target *ti;
 964        long ret = 0;
 965        int srcu_idx;
 966
 967        ti = dm_dax_get_live_target(md, sector, &srcu_idx);
 968
 969        if (!ti)
 970                goto out;
 971        if (!ti->type->dax_memcpy_fromiovecend) {
 972                ret = memcpy_fromiovecend_partial_flushcache(addr, iov,
 973                                                             offset, len);
 974                goto out;
 975        }
 976        ret = ti->type->dax_memcpy_fromiovecend(ti, pgoff, addr,
 977                                                iov, offset, len);
 978 out:
 979        dm_put_live_table(md, srcu_idx);
 980
 981        return ret;
 982}
 983
 984static int dm_dax_memcpy_toiovecend(struct dax_device *dax_dev, pgoff_t pgoff,
 985                const struct iovec *iov, void *addr, int offset, int len)
 986{
 987        struct mapped_device *md = dax_get_private(dax_dev);
 988        sector_t sector = pgoff * PAGE_SECTORS;
 989        struct dm_target *ti;
 990        long ret = 0;
 991        int srcu_idx;
 992
 993        ti = dm_dax_get_live_target(md, sector, &srcu_idx);
 994
 995        if (!ti)
 996                goto out;
 997        if (!ti->type->dax_memcpy_toiovecend) {
 998                ret = memcpy_toiovecend_partial(iov, addr, offset, len);
 999                goto out;
1000        }
1001        ret = ti->type->dax_memcpy_toiovecend(ti, pgoff,
1002                                              iov, addr, offset, len);
1003 out:
1004        dm_put_live_table(md, srcu_idx);
1005
1006        return ret;
1007}
1008
1009/*
1010 * Flush current->bio_list when the target map method blocks.
1011 * This fixes deadlocks in snapshot and possibly in other targets.
1012 */
1013struct dm_offload {
1014        struct blk_plug plug;
1015        struct blk_plug_cb cb;
1016};
1017
1018static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
1019{
1020        struct dm_offload *o = container_of(cb, struct dm_offload, cb);
1021        struct bio_list list;
1022        struct bio *bio;
1023        int i;
1024
1025        INIT_LIST_HEAD(&o->cb.list);
1026
1027        if (unlikely(!current->bio_list))
1028                return;
1029
1030        for (i = 0; i < 2; i++) {
1031                list = current->bio_list[i];
1032                bio_list_init(&current->bio_list[i]);
1033
1034                while ((bio = bio_list_pop(&list))) {
1035                        struct bio_set *bs = bio->bi_pool;
1036                        if (unlikely(!bs) || bs == fs_bio_set) {
1037                                bio_list_add(&current->bio_list[i], bio);
1038                                continue;
1039                        }
1040
1041                        spin_lock(&bs->rescue_lock);
1042                        bio_list_add(&bs->rescue_list, bio);
1043                        queue_work(bs->rescue_workqueue, &bs->rescue_work);
1044                        spin_unlock(&bs->rescue_lock);
1045                }
1046        }
1047}
1048
1049static void dm_offload_start(struct dm_offload *o)
1050{
1051        blk_start_plug(&o->plug);
1052        o->cb.callback = flush_current_bio_list;
1053        list_add(&o->cb.list, &current->plug->cb_list);
1054}
1055
1056static void dm_offload_end(struct dm_offload *o)
1057{
1058        list_del(&o->cb.list);
1059        blk_finish_plug(&o->plug);
1060}
1061
1062static void __map_bio(struct dm_target_io *tio)
1063{
1064        int r;
1065        sector_t sector;
1066        struct dm_offload o;
1067        struct bio *clone = &tio->clone;
1068        struct dm_target *ti = tio->ti;
1069
1070        clone->bi_end_io = clone_endio;
1071
1072        /*
1073         * Map the clone.  If r == 0 we don't need to do
1074         * anything, the target has assumed ownership of
1075         * this io.
1076         */
1077        atomic_inc(&tio->io->io_count);
1078        sector = clone->bi_sector;
1079
1080        dm_offload_start(&o);
1081        r = ti->type->map(ti, clone);
1082        dm_offload_end(&o);
1083
1084        if (r == DM_MAPIO_REMAPPED) {
1085                /* the bio has been remapped so dispatch it */
1086
1087                trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1088                                      tio->io->bio->bi_bdev->bd_dev, sector);
1089
1090                generic_make_request(clone);
1091        } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1092                /* error the io and bail out, or requeue it if needed */
1093                dec_pending(tio->io, r);
1094                free_tio(tio);
1095        } else if (r != DM_MAPIO_SUBMITTED) {
1096                DMWARN("unimplemented target map return value: %d", r);
1097                BUG();
1098        }
1099}
1100
1101struct clone_info {
1102        struct mapped_device *md;
1103        struct dm_table *map;
1104        struct bio *bio;
1105        struct dm_io *io;
1106        sector_t sector;
1107        sector_t sector_count;
1108        unsigned short idx;
1109};
1110
1111static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
1112{
1113        bio->bi_sector = sector;
1114        bio->bi_size = to_bytes(len);
1115}
1116
1117static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
1118{
1119        bio->bi_idx = idx;
1120        bio->bi_vcnt = idx + bv_count;
1121        bio->bi_flags &= ~(1 << BIO_SEG_VALID);
1122}
1123
1124static int clone_bio_integrity(struct bio *bio, struct bio *clone,
1125                               unsigned short idx, unsigned len, unsigned offset,
1126                               bool trim)
1127{
1128        int r;
1129
1130        r = bio_integrity_clone(clone, bio, GFP_NOIO);
1131        if (r < 0)
1132                return r;
1133
1134        if (trim)
1135                bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
1136
1137        return 0;
1138}
1139
1140/*
1141 * Creates a little bio that just does part of a bvec.
1142 */
1143static int clone_split_bio(struct dm_target_io *tio, struct bio *bio,
1144                           sector_t sector, unsigned short idx,
1145                           unsigned offset, unsigned len)
1146{
1147        struct bio *clone = &tio->clone;
1148        struct bio_vec *bv = bio->bi_io_vec + idx;
1149
1150        *clone->bi_io_vec = *bv;
1151
1152        bio_setup_sector(clone, sector, len);
1153
1154        clone->bi_bdev = bio->bi_bdev;
1155        clone->bi_rw = bio->bi_rw;
1156        clone->bi_vcnt = 1;
1157        clone->bi_io_vec->bv_offset = offset;
1158        clone->bi_io_vec->bv_len = clone->bi_size;
1159        clone->bi_flags |= 1 << BIO_CLONED;
1160
1161        if (bio_integrity(bio)) {
1162                int r = clone_bio_integrity(bio, clone, idx, len, offset, true);
1163                if (r < 0)
1164                        return r;
1165        }
1166
1167        return 0;
1168}
1169
1170/*
1171 * Creates a bio that consists of range of complete bvecs.
1172 */
1173static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1174                     sector_t sector, unsigned short idx,
1175                     unsigned short bv_count, unsigned len)
1176{
1177        struct bio *clone = &tio->clone;
1178
1179        __bio_clone(clone, bio);
1180        bio_setup_sector(clone, sector, len);
1181        bio_setup_bv(clone, idx, bv_count);
1182
1183        if (bio_integrity(bio)) {
1184                int r;
1185                bool trim = false;
1186
1187                if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1188                        trim = true;
1189                r = clone_bio_integrity(bio, clone, idx, len, 0, trim);
1190                if (r < 0)
1191                        return r;
1192        }
1193
1194        return 0;
1195}
1196
1197static struct dm_target_io *alloc_tio(struct clone_info *ci,
1198                                      struct dm_target *ti, int nr_iovecs,
1199                                      unsigned target_bio_nr)
1200{
1201        struct dm_target_io *tio;
1202        struct bio *clone;
1203
1204        clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs);
1205        tio = container_of(clone, struct dm_target_io, clone);
1206
1207        tio->io = ci->io;
1208        tio->ti = ti;
1209        tio->target_bio_nr = target_bio_nr;
1210
1211        return tio;
1212}
1213
1214static void __clone_and_map_simple_bio(struct clone_info *ci,
1215                                       struct dm_target *ti,
1216                                       unsigned target_bio_nr, sector_t len)
1217{
1218        struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
1219        struct bio *clone = &tio->clone;
1220
1221        /*
1222         * Discard requests require the bio's inline iovecs be initialized.
1223         * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1224         * and discard, so no need for concern about wasted bvec allocations.
1225         */
1226         __bio_clone(clone, ci->bio);
1227        if (len)
1228                bio_setup_sector(clone, ci->sector, len);
1229
1230        __map_bio(tio);
1231}
1232
1233static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1234                                  unsigned num_bios, sector_t len)
1235{
1236        unsigned target_bio_nr;
1237
1238        for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1239                __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1240}
1241
1242static int __send_empty_flush(struct clone_info *ci)
1243{
1244        unsigned target_nr = 0;
1245        struct dm_target *ti;
1246
1247        BUG_ON(bio_has_data(ci->bio));
1248        while ((ti = dm_table_get_target(ci->map, target_nr++)))
1249                __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
1250
1251        return 0;
1252}
1253
1254static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1255                                    sector_t sector, int nr_iovecs,
1256                                    unsigned short idx, unsigned short bv_count,
1257                                    unsigned offset, unsigned len,
1258                                    bool split_bvec)
1259{
1260        struct bio *bio = ci->bio;
1261        struct dm_target_io *tio;
1262        unsigned target_bio_nr;
1263        unsigned num_target_bios = 1;
1264        int r = 0;
1265
1266        /*
1267         * Does the target want to receive duplicate copies of the bio?
1268         */
1269        if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1270                num_target_bios = ti->num_write_bios(ti, bio);
1271
1272        for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1273                tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
1274                if (split_bvec)
1275                        r = clone_split_bio(tio, bio, sector, idx, offset, len);
1276                else
1277                        r = clone_bio(tio, bio, sector, idx, bv_count, len);
1278                if (r < 0) {
1279                        free_tio(tio);
1280                        break;
1281                }
1282                __map_bio(tio);
1283        }
1284
1285        return r;
1286}
1287
1288typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1289
1290static unsigned get_num_discard_bios(struct dm_target *ti)
1291{
1292        return ti->num_discard_bios;
1293}
1294
1295static unsigned get_num_write_same_bios(struct dm_target *ti)
1296{
1297        return ti->num_write_same_bios;
1298}
1299
1300typedef bool (*is_split_required_fn)(struct dm_target *ti);
1301
1302static bool is_split_required_for_discard(struct dm_target *ti)
1303{
1304        return ti->split_discard_bios;
1305}
1306
1307static int __send_changing_extent_only(struct clone_info *ci,
1308                                       get_num_bios_fn get_num_bios,
1309                                       is_split_required_fn is_split_required)
1310{
1311        struct dm_target *ti;
1312        sector_t len;
1313        unsigned num_bios;
1314
1315        do {
1316                ti = dm_table_find_target(ci->map, ci->sector);
1317                if (!dm_target_is_valid(ti))
1318                        return -EIO;
1319
1320                /*
1321                 * Even though the device advertised support for this type of
1322                 * request, that does not mean every target supports it, and
1323                 * reconfiguration might also have changed that since the
1324                 * check was performed.
1325                 */
1326                num_bios = get_num_bios ? get_num_bios(ti) : 0;
1327                if (!num_bios)
1328                        return -EOPNOTSUPP;
1329
1330                if (is_split_required && !is_split_required(ti))
1331                        len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1332                else
1333                        len = min(ci->sector_count, max_io_len(ci->sector, ti));
1334
1335                __send_duplicate_bios(ci, ti, num_bios, len);
1336
1337                ci->sector += len;
1338        } while (ci->sector_count -= len);
1339
1340        return 0;
1341}
1342
1343static int __send_discard(struct clone_info *ci)
1344{
1345        return __send_changing_extent_only(ci, get_num_discard_bios,
1346                                           is_split_required_for_discard);
1347}
1348
1349static int __send_write_same(struct clone_info *ci)
1350{
1351        return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
1352}
1353
1354/*
1355 * Find maximum number of sectors / bvecs we can process with a single bio.
1356 */
1357static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
1358{
1359        struct bio *bio = ci->bio;
1360        sector_t bv_len, total_len = 0;
1361
1362        for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
1363                bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
1364
1365                if (bv_len > max)
1366                        break;
1367
1368                max -= bv_len;
1369                total_len += bv_len;
1370        }
1371
1372        return total_len;
1373}
1374
1375static int __split_bvec_across_targets(struct clone_info *ci,
1376                                       struct dm_target *ti, sector_t max)
1377{
1378        struct bio *bio = ci->bio;
1379        struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1380        sector_t remaining = to_sector(bv->bv_len);
1381        unsigned offset = 0;
1382        sector_t len;
1383        int r;
1384
1385        do {
1386                if (offset) {
1387                        ti = dm_table_find_target(ci->map, ci->sector);
1388                        if (!dm_target_is_valid(ti))
1389                                return -EIO;
1390
1391                        max = max_io_len(ci->sector, ti);
1392                }
1393
1394                len = min(remaining, max);
1395
1396                r = __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
1397                                             bv->bv_offset + offset, len, true);
1398                if (r < 0)
1399                        return r;
1400
1401                ci->sector += len;
1402                ci->sector_count -= len;
1403                offset += to_bytes(len);
1404        } while (remaining -= len);
1405
1406        ci->idx++;
1407
1408        return 0;
1409}
1410
1411/*
1412 * Select the correct strategy for processing a non-flush bio.
1413 */
1414static int __split_and_process_non_flush(struct clone_info *ci)
1415{
1416        struct bio *bio = ci->bio;
1417        struct dm_target *ti;
1418        sector_t len, max;
1419        int idx;
1420        int r;
1421
1422        if (unlikely(bio->bi_rw & REQ_DISCARD))
1423                return __send_discard(ci);
1424        else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1425                return __send_write_same(ci);
1426
1427        ti = dm_table_find_target(ci->map, ci->sector);
1428        if (!dm_target_is_valid(ti))
1429                return -EIO;
1430
1431        max = max_io_len(ci->sector, ti);
1432
1433        /*
1434         * Optimise for the simple case where we can do all of
1435         * the remaining io with a single clone.
1436         */
1437        if (ci->sector_count <= max) {
1438                r = __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1439                                             ci->idx, bio->bi_vcnt - ci->idx, 0,
1440                                             ci->sector_count, false);
1441                if (r < 0)
1442                        return r;
1443
1444                ci->sector_count = 0;
1445                return 0;
1446        }
1447
1448        /*
1449         * There are some bvecs that don't span targets.
1450         * Do as many of these as possible.
1451         */
1452        if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1453                len = __len_within_target(ci, max, &idx);
1454
1455                r = __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1456                                             ci->idx, idx - ci->idx, 0, len, false);
1457                if (r < 0)
1458                        return r;
1459
1460                ci->sector += len;
1461                ci->sector_count -= len;
1462                ci->idx = idx;
1463
1464                return 0;
1465        }
1466
1467        /*
1468         * Handle a bvec that must be split between two or more targets.
1469         */
1470        return __split_bvec_across_targets(ci, ti, max);
1471}
1472
1473/*
1474 * Entry point to split a bio into clones and submit them to the targets.
1475 */
1476static void __split_and_process_bio(struct mapped_device *md,
1477                                    struct dm_table *map, struct bio *bio)
1478{
1479        struct clone_info ci;
1480        int error = 0;
1481
1482        if (unlikely(!map)) {
1483                bio_io_error(bio);
1484                return;
1485        }
1486
1487        ci.map = map;
1488        ci.md = md;
1489        ci.io = alloc_io(md);
1490        ci.io->error = 0;
1491        atomic_set(&ci.io->io_count, 1);
1492        ci.io->bio = bio;
1493        ci.io->md = md;
1494        spin_lock_init(&ci.io->endio_lock);
1495        ci.sector = bio->bi_sector;
1496        ci.idx = bio->bi_idx;
1497
1498        start_io_acct(ci.io);
1499
1500        if (bio->bi_rw & REQ_FLUSH) {
1501                ci.bio = &ci.md->flush_bio;
1502                ci.sector_count = 0;
1503                error = __send_empty_flush(&ci);
1504                /* dec_pending submits any data associated with flush */
1505        } else {
1506                ci.bio = bio;
1507                ci.sector_count = bio_sectors(bio);
1508                while (ci.sector_count && !error)
1509                        error = __split_and_process_non_flush(&ci);
1510        }
1511
1512        /* drop the extra reference count */
1513        dec_pending(ci.io, error);
1514}
1515/*-----------------------------------------------------------------
1516 * CRUD END
1517 *---------------------------------------------------------------*/
1518
1519static int dm_merge_bvec(struct request_queue *q,
1520                         struct bvec_merge_data *bvm,
1521                         struct bio_vec *biovec)
1522{
1523        struct mapped_device *md = q->queuedata;
1524        struct dm_table *map = dm_get_live_table_fast(md);
1525        struct dm_target *ti;
1526        sector_t max_sectors;
1527        int max_size = 0;
1528
1529        if (unlikely(!map))
1530                goto out;
1531
1532        ti = dm_table_find_target(map, bvm->bi_sector);
1533        if (!dm_target_is_valid(ti))
1534                goto out;
1535
1536        /*
1537         * Find maximum amount of I/O that won't need splitting
1538         */
1539        max_sectors = min(max_io_len(bvm->bi_sector, ti),
1540                          (sector_t) BIO_MAX_SECTORS);
1541        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1542        if (max_size < 0)
1543                max_size = 0;
1544
1545        /*
1546         * merge_bvec_fn() returns number of bytes
1547         * it can accept at this offset
1548         * max is precomputed maximal io size
1549         */
1550        if (max_size && ti->type->merge)
1551                max_size = ti->type->merge(ti, bvm, biovec, max_size);
1552        /*
1553         * If the target doesn't support merge method and some of the devices
1554         * provided their merge_bvec method (we know this by looking at
1555         * queue_max_hw_sectors), then we can't allow bios with multiple vector
1556         * entries.  So always set max_size to 0, and the code below allows
1557         * just one page.
1558         */
1559        else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1560                max_size = 0;
1561
1562out:
1563        dm_put_live_table_fast(md);
1564        /*
1565         * Always allow an entire first page
1566         */
1567        if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1568                max_size = biovec->bv_len;
1569
1570        return max_size;
1571}
1572
1573/*
1574 * The request function that just remaps the bio built up by
1575 * dm_merge_bvec.
1576 */
1577static void dm_make_request(struct request_queue *q, struct bio *bio)
1578{
1579        int rw = bio_data_dir(bio);
1580        struct mapped_device *md = q->queuedata;
1581        int cpu;
1582        int srcu_idx;
1583        struct dm_table *map;
1584
1585        map = dm_get_live_table(md, &srcu_idx);
1586
1587        cpu = part_stat_lock();
1588        part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1589        part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1590        part_stat_unlock();
1591
1592        /* if we're suspended, we have to queue this io for later */
1593        if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1594                dm_put_live_table(md, srcu_idx);
1595
1596                if (bio_rw(bio) != READA)
1597                        queue_io(md, bio);
1598                else
1599                        bio_io_error(bio);
1600                return;
1601        }
1602
1603        __split_and_process_bio(md, map, bio);
1604        dm_put_live_table(md, srcu_idx);
1605        return;
1606}
1607
1608static int dm_any_congested(void *congested_data, int bdi_bits)
1609{
1610        int r = bdi_bits;
1611        struct mapped_device *md = congested_data;
1612        struct dm_table *map;
1613
1614        if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1615                if (dm_request_based(md)) {
1616                        /*
1617                         * With request-based DM we only need to check the
1618                         * top-level queue for congestion.
1619                         */
1620                        r = md->queue->backing_dev_info.state & bdi_bits;
1621                } else {
1622                        map = dm_get_live_table_fast(md);
1623                        if (map)
1624                                r = dm_table_any_congested(map, bdi_bits);
1625                        dm_put_live_table_fast(md);
1626                }
1627        }
1628
1629        return r;
1630}
1631
1632/*-----------------------------------------------------------------
1633 * An IDR is used to keep track of allocated minor numbers.
1634 *---------------------------------------------------------------*/
1635static void free_minor(int minor)
1636{
1637        spin_lock(&_minor_lock);
1638        idr_remove(&_minor_idr, minor);
1639        spin_unlock(&_minor_lock);
1640}
1641
1642/*
1643 * See if the device with a specific minor # is free.
1644 */
1645static int specific_minor(int minor)
1646{
1647        int r;
1648
1649        if (minor >= (1 << MINORBITS))
1650                return -EINVAL;
1651
1652        idr_preload(GFP_KERNEL);
1653        spin_lock(&_minor_lock);
1654
1655        r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1656
1657        spin_unlock(&_minor_lock);
1658        idr_preload_end();
1659        if (r < 0)
1660                return r == -ENOSPC ? -EBUSY : r;
1661        return 0;
1662}
1663
1664static int next_free_minor(int *minor)
1665{
1666        int r;
1667
1668        idr_preload(GFP_KERNEL);
1669        spin_lock(&_minor_lock);
1670
1671        r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1672
1673        spin_unlock(&_minor_lock);
1674        idr_preload_end();
1675        if (r < 0)
1676                return r;
1677        *minor = r;
1678        return 0;
1679}
1680
1681static const struct block_device_operations dm_blk_dops;
1682static const struct dax_operations dm_dax_ops;
1683
1684static void dm_wq_work(struct work_struct *work);
1685
1686void dm_init_md_queue(struct mapped_device *md)
1687{
1688        /*
1689         * Request-based dm devices cannot be stacked on top of bio-based dm
1690         * devices.  The type of this dm device may not have been decided yet.
1691         * The type is decided at the first table loading time.
1692         * To prevent problematic device stacking, clear the queue flag
1693         * for request stacking support until then.
1694         *
1695         * This queue is new, so no concurrency on the queue_flags.
1696         */
1697        queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1698
1699        /*
1700         * Initialize data that will only be used by a non-blk-mq DM queue
1701         * - must do so here (in alloc_dev callchain) before queue is used
1702         */
1703        md->queue->queuedata = md;
1704        md->queue->backing_dev_info.congested_data = md;
1705}
1706
1707void dm_init_normal_md_queue(struct mapped_device *md)
1708{
1709        md->use_blk_mq = false;
1710        dm_init_md_queue(md);
1711
1712        /*
1713         * Initialize aspects of queue that aren't relevant for blk-mq
1714         */
1715        md->queue->backing_dev_info.congested_fn = dm_any_congested;
1716        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1717}
1718
1719static void cleanup_mapped_device(struct mapped_device *md)
1720{
1721        if (md->wq)
1722                destroy_workqueue(md->wq);
1723        if (md->kworker_task)
1724                kthread_stop(md->kworker_task);
1725        mempool_destroy(md->io_pool);
1726        mempool_destroy(md->rq_pool);
1727        if (md->bs)
1728                bioset_free(md->bs);
1729
1730        if (md->dax_dev) {
1731                kill_dax(md->dax_dev);
1732                put_dax(md->dax_dev);
1733                md->dax_dev = NULL;
1734        }
1735
1736        if (md->disk) {
1737                spin_lock(&_minor_lock);
1738                md->disk->private_data = NULL;
1739                spin_unlock(&_minor_lock);
1740                if (blk_get_integrity(md->disk))
1741                        blk_integrity_unregister(md->disk);
1742                del_gendisk(md->disk);
1743                put_disk(md->disk);
1744        }
1745
1746        if (md->queue)
1747                blk_cleanup_queue(md->queue);
1748
1749        cleanup_srcu_struct(&md->io_barrier);
1750
1751        if (md->bdev) {
1752                bdput(md->bdev);
1753                md->bdev = NULL;
1754        }
1755
1756        dm_mq_cleanup_mapped_device(md);
1757}
1758
1759/*
1760 * Allocate and initialise a blank device with a given minor.
1761 */
1762static struct mapped_device *alloc_dev(int minor)
1763{
1764        int r, numa_node_id = dm_get_numa_node();
1765        struct dax_device *dax_dev = NULL;
1766        struct mapped_device *md;
1767        void *old_md;
1768
1769        md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1770        if (!md) {
1771                DMWARN("unable to allocate device, out of memory.");
1772                return NULL;
1773        }
1774
1775        if (!try_module_get(THIS_MODULE))
1776                goto bad_module_get;
1777
1778        /* get a minor number for the dev */
1779        if (minor == DM_ANY_MINOR)
1780                r = next_free_minor(&minor);
1781        else
1782                r = specific_minor(minor);
1783        if (r < 0)
1784                goto bad_minor;
1785
1786        r = init_srcu_struct(&md->io_barrier);
1787        if (r < 0)
1788                goto bad_io_barrier;
1789
1790        md->numa_node_id = numa_node_id;
1791        md->use_blk_mq = dm_use_blk_mq_default();
1792        md->init_tio_pdu = false;
1793        md->type = DM_TYPE_NONE;
1794        mutex_init(&md->suspend_lock);
1795        mutex_init(&md->type_lock);
1796        mutex_init(&md->table_devices_lock);
1797        spin_lock_init(&md->deferred_lock);
1798        atomic_set(&md->holders, 1);
1799        atomic_set(&md->open_count, 0);
1800        atomic_set(&md->event_nr, 0);
1801        atomic_set(&md->uevent_seq, 0);
1802        INIT_LIST_HEAD(&md->uevent_list);
1803        INIT_LIST_HEAD(&md->table_devices);
1804        spin_lock_init(&md->uevent_lock);
1805
1806        md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL);
1807        if (!md->queue)
1808                goto bad;
1809
1810        dm_init_md_queue(md);
1811
1812        md->disk = alloc_disk_node(1, numa_node_id);
1813        if (!md->disk)
1814                goto bad;
1815
1816        atomic_set(&md->pending[0], 0);
1817        atomic_set(&md->pending[1], 0);
1818        init_waitqueue_head(&md->wait);
1819        INIT_WORK(&md->work, dm_wq_work);
1820        init_waitqueue_head(&md->eventq);
1821        init_completion(&md->kobj_holder.completion);
1822        md->kworker_task = NULL;
1823
1824        md->disk->major = _major;
1825        md->disk->first_minor = minor;
1826        md->disk->fops = &dm_blk_dops;
1827        md->disk->queue = md->queue;
1828        md->disk->private_data = md;
1829        sprintf(md->disk->disk_name, "dm-%d", minor);
1830
1831        if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
1832                dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
1833                if (!dax_dev)
1834                        goto bad;
1835        }
1836        md->dax_dev = dax_dev;
1837
1838        add_disk_no_queue_reg(md->disk);
1839        format_dev_t(md->name, MKDEV(_major, minor));
1840
1841        md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
1842        if (!md->wq)
1843                goto bad;
1844
1845        md->bdev = bdget_disk(md->disk, 0);
1846        if (!md->bdev)
1847                goto bad;
1848
1849        bio_init(&md->flush_bio);
1850        md->flush_bio.bi_bdev = md->bdev;
1851        md->flush_bio.bi_rw = WRITE_FLUSH;
1852
1853        dm_stats_init(&md->stats);
1854
1855        /* Populate the mapping, nobody knows we exist yet */
1856        spin_lock(&_minor_lock);
1857        old_md = idr_replace(&_minor_idr, md, minor);
1858        spin_unlock(&_minor_lock);
1859
1860        BUG_ON(old_md != MINOR_ALLOCED);
1861
1862        return md;
1863
1864bad:
1865        cleanup_mapped_device(md);
1866bad_io_barrier:
1867        free_minor(minor);
1868bad_minor:
1869        module_put(THIS_MODULE);
1870bad_module_get:
1871        kvfree(md);
1872        return NULL;
1873}
1874
1875static void unlock_fs(struct mapped_device *md);
1876
1877static void free_dev(struct mapped_device *md)
1878{
1879        int minor = MINOR(disk_devt(md->disk));
1880
1881        unlock_fs(md);
1882
1883        cleanup_mapped_device(md);
1884
1885        free_table_devices(&md->table_devices);
1886        dm_stats_cleanup(&md->stats);
1887        free_minor(minor);
1888
1889        module_put(THIS_MODULE);
1890        kvfree(md);
1891}
1892
1893static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1894{
1895        struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1896
1897        if (md->bs) {
1898                /* The md already has necessary mempools. */
1899                if (dm_table_bio_based(t)) {
1900                        /*
1901                         * Reload bioset because front_pad may have changed
1902                         * because a different table was loaded.
1903                         */
1904                        bioset_free(md->bs);
1905                        md->bs = p->bs;
1906                        p->bs = NULL;
1907                }
1908                /*
1909                 * There's no need to reload with request-based dm
1910                 * because the size of front_pad doesn't change.
1911                 * Note for future: If you are to reload bioset,
1912                 * prep-ed requests in the queue may refer
1913                 * to bio from the old bioset, so you must walk
1914                 * through the queue to unprep.
1915                 */
1916                goto out;
1917        }
1918
1919        BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
1920
1921        md->io_pool = p->io_pool;
1922        p->io_pool = NULL;
1923        md->rq_pool = p->rq_pool;
1924        p->rq_pool = NULL;
1925        md->bs = p->bs;
1926        p->bs = NULL;
1927
1928out:
1929        /* mempool bind completed, no longer need any mempools in the table */
1930        dm_table_free_md_mempools(t);
1931}
1932
1933/*
1934 * Bind a table to the device.
1935 */
1936static void event_callback(void *context)
1937{
1938        unsigned long flags;
1939        LIST_HEAD(uevents);
1940        struct mapped_device *md = (struct mapped_device *) context;
1941
1942        spin_lock_irqsave(&md->uevent_lock, flags);
1943        list_splice_init(&md->uevent_list, &uevents);
1944        spin_unlock_irqrestore(&md->uevent_lock, flags);
1945
1946        dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1947
1948        atomic_inc(&md->event_nr);
1949        wake_up(&md->eventq);
1950        dm_issue_global_event();
1951}
1952
1953/*
1954 * Protected by md->suspend_lock obtained by dm_swap_table().
1955 */
1956static void __set_size(struct mapped_device *md, sector_t size)
1957{
1958        lockdep_assert_held(&md->suspend_lock);
1959
1960        set_capacity(md->disk, size);
1961
1962        i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1963}
1964
1965/*
1966 * Return 1 if the queue has a compulsory merge_bvec_fn function.
1967 *
1968 * If this function returns 0, then the device is either a non-dm
1969 * device without a merge_bvec_fn, or it is a dm device that is
1970 * able to split any bios it receives that are too big.
1971 */
1972int dm_queue_merge_is_compulsory(struct request_queue *q)
1973{
1974        struct mapped_device *dev_md;
1975
1976        if (!q->merge_bvec_fn)
1977                return 0;
1978
1979        if (q->make_request_fn == dm_make_request) {
1980                dev_md = q->queuedata;
1981                if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
1982                        return 0;
1983        }
1984
1985        return 1;
1986}
1987
1988static int dm_device_merge_is_compulsory(struct dm_target *ti,
1989                                         struct dm_dev *dev, sector_t start,
1990                                         sector_t len, void *data)
1991{
1992        struct block_device *bdev = dev->bdev;
1993        struct request_queue *q = bdev_get_queue(bdev);
1994
1995        return dm_queue_merge_is_compulsory(q);
1996}
1997
1998/*
1999 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2000 * on the properties of the underlying devices.
2001 */
2002static int dm_table_merge_is_optional(struct dm_table *table)
2003{
2004        unsigned i = 0;
2005        struct dm_target *ti;
2006
2007        while (i < dm_table_get_num_targets(table)) {
2008                ti = dm_table_get_target(table, i++);
2009
2010                if (ti->type->iterate_devices &&
2011                    ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2012                        return 0;
2013        }
2014
2015        return 1;
2016}
2017
2018/*
2019 * Returns old map, which caller must destroy.
2020 */
2021static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2022                               struct queue_limits *limits)
2023{
2024        struct dm_table *old_map;
2025        struct request_queue *q = md->queue;
2026        sector_t size;
2027        int merge_is_optional;
2028
2029        lockdep_assert_held(&md->suspend_lock);
2030
2031        size = dm_table_get_size(t);
2032
2033        /*
2034         * Wipe any geometry if the size of the table changed.
2035         */
2036        if (size != dm_get_size(md))
2037                memset(&md->geometry, 0, sizeof(md->geometry));
2038
2039        __set_size(md, size);
2040
2041        dm_table_event_callback(t, event_callback, md);
2042
2043        /*
2044         * The queue hasn't been stopped yet, if the old table type wasn't
2045         * for request-based during suspension.  So stop it to prevent
2046         * I/O mapping before resume.
2047         * This must be done before setting the queue restrictions,
2048         * because request-based dm may be run just after the setting.
2049         */
2050        if (dm_table_request_based(t)) {
2051                dm_stop_queue(q);
2052                /*
2053                 * Leverage the fact that request-based DM targets are
2054                 * immutable singletons and establish md->immutable_target
2055                 * - used to optimize both dm_request_fn and dm_mq_queue_rq
2056                 */
2057                md->immutable_target = dm_table_get_immutable_target(t);
2058        }
2059
2060        __bind_mempools(md, t);
2061
2062        merge_is_optional = dm_table_merge_is_optional(t);
2063
2064        old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2065        rcu_assign_pointer(md->map, (void *)t);
2066        md->immutable_target_type = dm_table_get_immutable_target_type(t);
2067
2068        dm_table_set_restrictions(t, q, limits);
2069        if (merge_is_optional)
2070                set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2071        else
2072                clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2073        if (old_map)
2074                dm_sync_table(md);
2075
2076        return old_map;
2077}
2078
2079/*
2080 * Returns unbound table for the caller to free.
2081 */
2082static struct dm_table *__unbind(struct mapped_device *md)
2083{
2084        struct dm_table *map = rcu_dereference_protected(md->map, 1);
2085
2086        if (!map)
2087                return NULL;
2088
2089        dm_table_event_callback(map, NULL, NULL);
2090        RCU_INIT_POINTER(md->map, NULL);
2091        dm_sync_table(md);
2092
2093        return map;
2094}
2095
2096/*
2097 * Constructor for a new device.
2098 */
2099int dm_create(int minor, struct mapped_device **result)
2100{
2101        struct mapped_device *md;
2102
2103        md = alloc_dev(minor);
2104        if (!md)
2105                return -ENXIO;
2106
2107        dm_sysfs_init(md);
2108
2109        *result = md;
2110        return 0;
2111}
2112
2113/*
2114 * Functions to manage md->type.
2115 * All are required to hold md->type_lock.
2116 */
2117void dm_lock_md_type(struct mapped_device *md)
2118{
2119        mutex_lock(&md->type_lock);
2120}
2121
2122void dm_unlock_md_type(struct mapped_device *md)
2123{
2124        mutex_unlock(&md->type_lock);
2125}
2126
2127void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2128{
2129        BUG_ON(!mutex_is_locked(&md->type_lock));
2130        md->type = type;
2131}
2132
2133enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2134{
2135        return md->type;
2136}
2137
2138struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2139{
2140        return md->immutable_target_type;
2141}
2142
2143/*
2144 * The queue_limits are only valid as long as you have a reference
2145 * count on 'md'.
2146 */
2147struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2148{
2149        BUG_ON(!atomic_read(&md->holders));
2150        return &md->queue->limits;
2151}
2152EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2153
2154/*
2155 * Setup the DM device's queue based on md's type
2156 */
2157int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2158{
2159        int r;
2160        struct queue_limits limits;
2161        struct queue_limits_aux limits_aux;
2162        enum dm_queue_mode type = dm_get_md_type(md);
2163
2164        switch (type) {
2165        case DM_TYPE_REQUEST_BASED:
2166                r = dm_old_init_request_queue(md);
2167                if (r) {
2168                        DMERR("Cannot initialize queue for request-based mapped device");
2169                        return r;
2170                }
2171                break;
2172        case DM_TYPE_MQ_REQUEST_BASED:
2173                r = dm_mq_init_request_queue(md, t);
2174                if (r) {
2175                        DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2176                        return r;
2177                }
2178                break;
2179        case DM_TYPE_BIO_BASED:
2180        case DM_TYPE_DAX_BIO_BASED:
2181                dm_init_normal_md_queue(md);
2182                blk_queue_make_request(md->queue, dm_make_request);
2183                blk_queue_merge_bvec(md->queue, dm_merge_bvec);
2184                break;
2185        case DM_TYPE_NONE:
2186                WARN_ON_ONCE(true);
2187                break;
2188        }
2189
2190        limits.limits_aux = &limits_aux;
2191        r = dm_calculate_queue_limits(t, &limits);
2192        if (r) {
2193                DMERR("Cannot calculate initial queue limits");
2194                return r;
2195        }
2196        dm_table_set_restrictions(t, md->queue, &limits);
2197        blk_register_queue(md->disk);
2198
2199        return 0;
2200}
2201
2202struct mapped_device *dm_get_md(dev_t dev)
2203{
2204        struct mapped_device *md;
2205        unsigned minor = MINOR(dev);
2206
2207        if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2208                return NULL;
2209
2210        spin_lock(&_minor_lock);
2211
2212        md = idr_find(&_minor_idr, minor);
2213        if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2214            test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2215                md = NULL;
2216                goto out;
2217        }
2218        dm_get(md);
2219out:
2220        spin_unlock(&_minor_lock);
2221
2222        return md;
2223}
2224EXPORT_SYMBOL_GPL(dm_get_md);
2225
2226void *dm_get_mdptr(struct mapped_device *md)
2227{
2228        return md->interface_ptr;
2229}
2230
2231void dm_set_mdptr(struct mapped_device *md, void *ptr)
2232{
2233        md->interface_ptr = ptr;
2234}
2235
2236void dm_get(struct mapped_device *md)
2237{
2238        atomic_inc(&md->holders);
2239        BUG_ON(test_bit(DMF_FREEING, &md->flags));
2240}
2241
2242int dm_hold(struct mapped_device *md)
2243{
2244        spin_lock(&_minor_lock);
2245        if (test_bit(DMF_FREEING, &md->flags)) {
2246                spin_unlock(&_minor_lock);
2247                return -EBUSY;
2248        }
2249        dm_get(md);
2250        spin_unlock(&_minor_lock);
2251        return 0;
2252}
2253EXPORT_SYMBOL_GPL(dm_hold);
2254
2255const char *dm_device_name(struct mapped_device *md)
2256{
2257        return md->name;
2258}
2259EXPORT_SYMBOL_GPL(dm_device_name);
2260
2261static void __dm_destroy(struct mapped_device *md, bool wait)
2262{
2263        struct request_queue *q = dm_get_md_queue(md);
2264        struct dm_table *map;
2265        int srcu_idx;
2266
2267        might_sleep();
2268
2269        spin_lock(&_minor_lock);
2270        idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2271        set_bit(DMF_FREEING, &md->flags);
2272        spin_unlock(&_minor_lock);
2273
2274        blk_set_queue_dying(q);
2275
2276        if (dm_request_based(md) && md->kworker_task)
2277                flush_kthread_worker(&md->kworker);
2278
2279        /*
2280         * Take suspend_lock so that presuspend and postsuspend methods
2281         * do not race with internal suspend.
2282         */
2283        mutex_lock(&md->suspend_lock);
2284        map = dm_get_live_table(md, &srcu_idx);
2285        if (!dm_suspended_md(md)) {
2286                dm_table_presuspend_targets(map);
2287                dm_table_postsuspend_targets(map);
2288        }
2289        /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2290        dm_put_live_table(md, srcu_idx);
2291        mutex_unlock(&md->suspend_lock);
2292
2293        /*
2294         * Rare, but there may be I/O requests still going to complete,
2295         * for example.  Wait for all references to disappear.
2296         * No one should increment the reference count of the mapped_device,
2297         * after the mapped_device state becomes DMF_FREEING.
2298         */
2299        if (wait)
2300                while (atomic_read(&md->holders))
2301                        msleep(1);
2302        else if (atomic_read(&md->holders))
2303                DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2304                       dm_device_name(md), atomic_read(&md->holders));
2305
2306        dm_sysfs_exit(md);
2307        dm_table_destroy(__unbind(md));
2308        free_dev(md);
2309}
2310
2311void dm_destroy(struct mapped_device *md)
2312{
2313        __dm_destroy(md, true);
2314}
2315
2316void dm_destroy_immediate(struct mapped_device *md)
2317{
2318        __dm_destroy(md, false);
2319}
2320
2321void dm_put(struct mapped_device *md)
2322{
2323        atomic_dec(&md->holders);
2324}
2325EXPORT_SYMBOL_GPL(dm_put);
2326
2327static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2328{
2329        int r = 0;
2330        DEFINE_WAIT(wait);
2331
2332        while (1) {
2333                prepare_to_wait(&md->wait, &wait, task_state);
2334
2335                if (!md_in_flight(md))
2336                        break;
2337
2338                if (signal_pending_state(task_state, current)) {
2339                        r = -EINTR;
2340                        break;
2341                }
2342
2343                io_schedule();
2344        }
2345        finish_wait(&md->wait, &wait);
2346
2347        return r;
2348}
2349
2350/*
2351 * Process the deferred bios
2352 */
2353static void dm_wq_work(struct work_struct *work)
2354{
2355        struct mapped_device *md = container_of(work, struct mapped_device,
2356                                                work);
2357        struct bio *c;
2358        int srcu_idx;
2359        struct dm_table *map;
2360
2361        map = dm_get_live_table(md, &srcu_idx);
2362
2363        while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2364                spin_lock_irq(&md->deferred_lock);
2365                c = bio_list_pop(&md->deferred);
2366                spin_unlock_irq(&md->deferred_lock);
2367
2368                if (!c)
2369                        break;
2370
2371                if (dm_request_based(md))
2372                        generic_make_request(c);
2373                else
2374                        __split_and_process_bio(md, map, c);
2375        }
2376
2377        dm_put_live_table(md, srcu_idx);
2378}
2379
2380static void dm_queue_flush(struct mapped_device *md)
2381{
2382        clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2383        smp_mb__after_clear_bit();
2384        queue_work(md->wq, &md->work);
2385}
2386
2387/*
2388 * Swap in a new table, returning the old one for the caller to destroy.
2389 */
2390struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2391{
2392        struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2393        struct queue_limits limits;
2394        struct queue_limits_aux limits_aux;
2395        int r;
2396
2397        mutex_lock(&md->suspend_lock);
2398
2399        /* device must be suspended */
2400        if (!dm_suspended_md(md))
2401                goto out;
2402
2403        /*
2404         * Initialize limits aux pointer to stack queue_limits_aux
2405         * members.
2406         */
2407        limits.limits_aux = &limits_aux;
2408
2409        /*
2410         * If the new table has no data devices, retain the existing limits.
2411         * This helps multipath with queue_if_no_path if all paths disappear,
2412         * then new I/O is queued based on these limits, and then some paths
2413         * reappear.
2414         */
2415        if (dm_table_has_no_data_devices(table)) {
2416                live_map = dm_get_live_table_fast(md);
2417                if (live_map)
2418                        limits = md->queue->limits;
2419                dm_put_live_table_fast(md);
2420        }
2421
2422        if (!live_map) {
2423                r = dm_calculate_queue_limits(table, &limits);
2424                if (r) {
2425                        map = ERR_PTR(r);
2426                        goto out;
2427                }
2428        }
2429
2430        map = __bind(md, table, &limits);
2431        dm_issue_global_event();
2432
2433out:
2434        mutex_unlock(&md->suspend_lock);
2435        return map;
2436}
2437
2438/*
2439 * Functions to lock and unlock any filesystem running on the
2440 * device.
2441 */
2442static int lock_fs(struct mapped_device *md)
2443{
2444        int r;
2445
2446        WARN_ON(md->frozen_sb);
2447
2448        md->frozen_sb = freeze_bdev(md->bdev);
2449        if (IS_ERR(md->frozen_sb)) {
2450                r = PTR_ERR(md->frozen_sb);
2451                md->frozen_sb = NULL;
2452                return r;
2453        }
2454
2455        set_bit(DMF_FROZEN, &md->flags);
2456
2457        return 0;
2458}
2459
2460static void unlock_fs(struct mapped_device *md)
2461{
2462        if (!test_bit(DMF_FROZEN, &md->flags))
2463                return;
2464
2465        thaw_bdev(md->bdev, md->frozen_sb);
2466        md->frozen_sb = NULL;
2467        clear_bit(DMF_FROZEN, &md->flags);
2468}
2469
2470/*
2471 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2472 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2473 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2474 *
2475 * If __dm_suspend returns 0, the device is completely quiescent
2476 * now. There is no request-processing activity. All new requests
2477 * are being added to md->deferred list.
2478 */
2479static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2480                        unsigned suspend_flags, long task_state,
2481                        int dmf_suspended_flag)
2482{
2483        bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2484        bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2485        int r;
2486
2487        lockdep_assert_held(&md->suspend_lock);
2488
2489        /*
2490         * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2491         * This flag is cleared before dm_suspend returns.
2492         */
2493        if (noflush)
2494                set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2495        else
2496                pr_debug("%s: suspending with flush\n", dm_device_name(md));
2497
2498        /*
2499         * This gets reverted if there's an error later and the targets
2500         * provide the .presuspend_undo hook.
2501         */
2502        dm_table_presuspend_targets(map);
2503
2504        /*
2505         * Flush I/O to the device.
2506         * Any I/O submitted after lock_fs() may not be flushed.
2507         * noflush takes precedence over do_lockfs.
2508         * (lock_fs() flushes I/Os and waits for them to complete.)
2509         */
2510        if (!noflush && do_lockfs) {
2511                r = lock_fs(md);
2512                if (r) {
2513                        dm_table_presuspend_undo_targets(map);
2514                        return r;
2515                }
2516        }
2517
2518        /*
2519         * Here we must make sure that no processes are submitting requests
2520         * to target drivers i.e. no one may be executing
2521         * __split_and_process_bio. This is called from dm_request and
2522         * dm_wq_work.
2523         *
2524         * To get all processes out of __split_and_process_bio in dm_request,
2525         * we take the write lock. To prevent any process from reentering
2526         * __split_and_process_bio from dm_request and quiesce the thread
2527         * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2528         * flush_workqueue(md->wq).
2529         */
2530        set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2531        if (map)
2532                synchronize_srcu(&md->io_barrier);
2533
2534        /*
2535         * Stop md->queue before flushing md->wq in case request-based
2536         * dm defers requests to md->wq from md->queue.
2537         */
2538        if (dm_request_based(md)) {
2539                dm_stop_queue(md->queue);
2540                if (md->kworker_task)
2541                        flush_kthread_worker(&md->kworker);
2542        }
2543
2544        flush_workqueue(md->wq);
2545
2546        /*
2547         * At this point no more requests are entering target request routines.
2548         * We call dm_wait_for_completion to wait for all existing requests
2549         * to finish.
2550         */
2551        r = dm_wait_for_completion(md, task_state);
2552        if (!r)
2553                set_bit(dmf_suspended_flag, &md->flags);
2554
2555        if (noflush)
2556                clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2557        if (map)
2558                synchronize_srcu(&md->io_barrier);
2559
2560        /* were we interrupted ? */
2561        if (r < 0) {
2562                dm_queue_flush(md);
2563
2564                if (dm_request_based(md))
2565                        dm_start_queue(md->queue);
2566
2567                unlock_fs(md);
2568                dm_table_presuspend_undo_targets(map);
2569                /* pushback list is already flushed, so skip flush */
2570        }
2571
2572        return r;
2573}
2574
2575/*
2576 * We need to be able to change a mapping table under a mounted
2577 * filesystem.  For example we might want to move some data in
2578 * the background.  Before the table can be swapped with
2579 * dm_bind_table, dm_suspend must be called to flush any in
2580 * flight bios and ensure that any further io gets deferred.
2581 */
2582/*
2583 * Suspend mechanism in request-based dm.
2584 *
2585 * 1. Flush all I/Os by lock_fs() if needed.
2586 * 2. Stop dispatching any I/O by stopping the request_queue.
2587 * 3. Wait for all in-flight I/Os to be completed or requeued.
2588 *
2589 * To abort suspend, start the request_queue.
2590 */
2591int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2592{
2593        struct dm_table *map = NULL;
2594        int r = 0;
2595
2596retry:
2597        mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2598
2599        if (dm_suspended_md(md)) {
2600                r = -EINVAL;
2601                goto out_unlock;
2602        }
2603
2604        if (dm_suspended_internally_md(md)) {
2605                /* already internally suspended, wait for internal resume */
2606                mutex_unlock(&md->suspend_lock);
2607                r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2608                if (r)
2609                        return r;
2610                goto retry;
2611        }
2612
2613        map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2614
2615        r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2616        if (r)
2617                goto out_unlock;
2618
2619        dm_table_postsuspend_targets(map);
2620
2621out_unlock:
2622        mutex_unlock(&md->suspend_lock);
2623        return r;
2624}
2625
2626static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2627{
2628        if (map) {
2629                int r = dm_table_resume_targets(map);
2630                if (r)
2631                        return r;
2632        }
2633
2634        dm_queue_flush(md);
2635
2636        /*
2637         * Flushing deferred I/Os must be done after targets are resumed
2638         * so that mapping of targets can work correctly.
2639         * Request-based dm is queueing the deferred I/Os in its request_queue.
2640         */
2641        if (dm_request_based(md))
2642                dm_start_queue(md->queue);
2643
2644        unlock_fs(md);
2645
2646        return 0;
2647}
2648
2649int dm_resume(struct mapped_device *md)
2650{
2651        int r;
2652        struct dm_table *map = NULL;
2653
2654retry:
2655        r = -EINVAL;
2656        mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2657
2658        if (!dm_suspended_md(md))
2659                goto out;
2660
2661        if (dm_suspended_internally_md(md)) {
2662                /* already internally suspended, wait for internal resume */
2663                mutex_unlock(&md->suspend_lock);
2664                r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2665                if (r)
2666                        return r;
2667                goto retry;
2668        }
2669
2670        map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2671        if (!map || !dm_table_get_size(map))
2672                goto out;
2673
2674        r = __dm_resume(md, map);
2675        if (r)
2676                goto out;
2677
2678        clear_bit(DMF_SUSPENDED, &md->flags);
2679out:
2680        mutex_unlock(&md->suspend_lock);
2681
2682        return r;
2683}
2684
2685/*
2686 * Internal suspend/resume works like userspace-driven suspend. It waits
2687 * until all bios finish and prevents issuing new bios to the target drivers.
2688 * It may be used only from the kernel.
2689 */
2690
2691static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2692{
2693        struct dm_table *map = NULL;
2694
2695        lockdep_assert_held(&md->suspend_lock);
2696
2697        if (md->internal_suspend_count++)
2698                return; /* nested internal suspend */
2699
2700        if (dm_suspended_md(md)) {
2701                set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2702                return; /* nest suspend */
2703        }
2704
2705        map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2706
2707        /*
2708         * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2709         * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
2710         * would require changing .presuspend to return an error -- avoid this
2711         * until there is a need for more elaborate variants of internal suspend.
2712         */
2713        (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2714                            DMF_SUSPENDED_INTERNALLY);
2715
2716        dm_table_postsuspend_targets(map);
2717}
2718
2719static void __dm_internal_resume(struct mapped_device *md)
2720{
2721        BUG_ON(!md->internal_suspend_count);
2722
2723        if (--md->internal_suspend_count)
2724                return; /* resume from nested internal suspend */
2725
2726        if (dm_suspended_md(md))
2727                goto done; /* resume from nested suspend */
2728
2729        /*
2730         * NOTE: existing callers don't need to call dm_table_resume_targets
2731         * (which may fail -- so best to avoid it for now by passing NULL map)
2732         */
2733        (void) __dm_resume(md, NULL);
2734
2735done:
2736        clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2737        smp_mb__after_atomic();
2738        wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2739}
2740
2741void dm_internal_suspend_noflush(struct mapped_device *md)
2742{
2743        mutex_lock(&md->suspend_lock);
2744        __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2745        mutex_unlock(&md->suspend_lock);
2746}
2747EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2748
2749void dm_internal_resume(struct mapped_device *md)
2750{
2751        mutex_lock(&md->suspend_lock);
2752        __dm_internal_resume(md);
2753        mutex_unlock(&md->suspend_lock);
2754}
2755EXPORT_SYMBOL_GPL(dm_internal_resume);
2756
2757/*
2758 * Fast variants of internal suspend/resume hold md->suspend_lock,
2759 * which prevents interaction with userspace-driven suspend.
2760 */
2761
2762void dm_internal_suspend_fast(struct mapped_device *md)
2763{
2764        mutex_lock(&md->suspend_lock);
2765        if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2766                return;
2767
2768        set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2769        synchronize_srcu(&md->io_barrier);
2770        flush_workqueue(md->wq);
2771        dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2772}
2773EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2774
2775void dm_internal_resume_fast(struct mapped_device *md)
2776{
2777        if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2778                goto done;
2779
2780        dm_queue_flush(md);
2781
2782done:
2783        mutex_unlock(&md->suspend_lock);
2784}
2785EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
2786
2787/*-----------------------------------------------------------------
2788 * Event notification.
2789 *---------------------------------------------------------------*/
2790int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2791                       unsigned cookie)
2792{
2793        char udev_cookie[DM_COOKIE_LENGTH];
2794        char *envp[] = { udev_cookie, NULL };
2795
2796        if (!cookie)
2797                return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2798        else {
2799                snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2800                         DM_COOKIE_ENV_VAR_NAME, cookie);
2801                return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2802                                          action, envp);
2803        }
2804}
2805
2806uint32_t dm_next_uevent_seq(struct mapped_device *md)
2807{
2808        return atomic_add_return(1, &md->uevent_seq);
2809}
2810
2811uint32_t dm_get_event_nr(struct mapped_device *md)
2812{
2813        return atomic_read(&md->event_nr);
2814}
2815
2816int dm_wait_event(struct mapped_device *md, int event_nr)
2817{
2818        return wait_event_interruptible(md->eventq,
2819                        (event_nr != atomic_read(&md->event_nr)));
2820}
2821
2822void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2823{
2824        unsigned long flags;
2825
2826        spin_lock_irqsave(&md->uevent_lock, flags);
2827        list_add(elist, &md->uevent_list);
2828        spin_unlock_irqrestore(&md->uevent_lock, flags);
2829}
2830
2831/*
2832 * The gendisk is only valid as long as you have a reference
2833 * count on 'md'.
2834 */
2835struct gendisk *dm_disk(struct mapped_device *md)
2836{
2837        return md->disk;
2838}
2839EXPORT_SYMBOL_GPL(dm_disk);
2840
2841struct kobject *dm_kobject(struct mapped_device *md)
2842{
2843        return &md->kobj_holder.kobj;
2844}
2845
2846struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2847{
2848        struct mapped_device *md;
2849
2850        md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2851
2852        spin_lock(&_minor_lock);
2853        if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2854                md = NULL;
2855                goto out;
2856        }
2857        dm_get(md);
2858out:
2859        spin_unlock(&_minor_lock);
2860
2861        return md;
2862}
2863
2864int dm_suspended_md(struct mapped_device *md)
2865{
2866        return test_bit(DMF_SUSPENDED, &md->flags);
2867}
2868
2869int dm_suspended_internally_md(struct mapped_device *md)
2870{
2871        return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2872}
2873
2874int dm_test_deferred_remove_flag(struct mapped_device *md)
2875{
2876        return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2877}
2878
2879int dm_suspended(struct dm_target *ti)
2880{
2881        return dm_suspended_md(dm_table_get_md(ti->table));
2882}
2883EXPORT_SYMBOL_GPL(dm_suspended);
2884
2885int dm_noflush_suspending(struct dm_target *ti)
2886{
2887        return __noflush_suspending(dm_table_get_md(ti->table));
2888}
2889EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2890
2891struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2892                                            unsigned integrity, unsigned per_io_data_size)
2893{
2894        struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2895        struct kmem_cache *cachep = NULL;
2896        unsigned int pool_size = 0;
2897        unsigned int front_pad;
2898
2899        if (!pools)
2900                return NULL;
2901
2902        switch (type) {
2903        case DM_TYPE_BIO_BASED:
2904        case DM_TYPE_DAX_BIO_BASED:
2905                cachep = _io_cache;
2906                pool_size = dm_get_reserved_bio_based_ios();
2907                front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2908                break;
2909        case DM_TYPE_REQUEST_BASED:
2910                cachep = _rq_tio_cache;
2911                pool_size = dm_get_reserved_rq_based_ios();
2912                pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
2913                if (!pools->rq_pool)
2914                        goto out;
2915                /* fall through to setup remaining rq-based pools */
2916        case DM_TYPE_MQ_REQUEST_BASED:
2917                if (!pool_size)
2918                        pool_size = dm_get_reserved_rq_based_ios();
2919                front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2920                /* per_io_data_size is used for blk-mq pdu at queue allocation */
2921                break;
2922        default:
2923                BUG();
2924        }
2925
2926        if (cachep) {
2927                pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2928                if (!pools->io_pool)
2929                        goto out;
2930        }
2931
2932        pools->bs = bioset_create(pool_size, front_pad);
2933        if (!pools->bs)
2934                goto out;
2935
2936        if (integrity && bioset_integrity_create(pools->bs, pool_size))
2937                goto out;
2938
2939        return pools;
2940
2941out:
2942        dm_free_md_mempools(pools);
2943
2944        return NULL;
2945}
2946
2947void dm_free_md_mempools(struct dm_md_mempools *pools)
2948{
2949        if (!pools)
2950                return;
2951
2952        mempool_destroy(pools->io_pool);
2953        mempool_destroy(pools->rq_pool);
2954
2955        if (pools->bs)
2956                bioset_free(pools->bs);
2957
2958        kfree(pools);
2959}
2960
2961struct dm_pr {
2962        u64     old_key;
2963        u64     new_key;
2964        u32     flags;
2965        bool    fail_early;
2966};
2967
2968static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
2969                      void *data)
2970{
2971        struct mapped_device *md = bdev->bd_disk->private_data;
2972        struct dm_table *table;
2973        struct dm_target *ti;
2974        int ret = -ENOTTY, srcu_idx;
2975
2976        table = dm_get_live_table(md, &srcu_idx);
2977        if (!table || !dm_table_get_size(table))
2978                goto out;
2979
2980        /* We only support devices that have a single target */
2981        if (dm_table_get_num_targets(table) != 1)
2982                goto out;
2983        ti = dm_table_get_target(table, 0);
2984
2985        ret = -EINVAL;
2986        if (!ti->type->iterate_devices)
2987                goto out;
2988
2989        ret = ti->type->iterate_devices(ti, fn, data);
2990out:
2991        dm_put_live_table(md, srcu_idx);
2992        return ret;
2993}
2994
2995/*
2996 * For register / unregister we need to manually call out to every path.
2997 */
2998static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
2999                            sector_t start, sector_t len, void *data)
3000{
3001        struct dm_pr *pr = data;
3002        const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3003
3004        if (!ops || !ops->pr_register)
3005                return -EOPNOTSUPP;
3006        return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3007}
3008
3009static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3010                          u32 flags)
3011{
3012        struct dm_pr pr = {
3013                .old_key        = old_key,
3014                .new_key        = new_key,
3015                .flags          = flags,
3016                .fail_early     = true,
3017        };
3018        int ret;
3019
3020        ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3021        if (ret && new_key) {
3022                /* unregister all paths if we failed to register any path */
3023                pr.old_key = new_key;
3024                pr.new_key = 0;
3025                pr.flags = 0;
3026                pr.fail_early = false;
3027                dm_call_pr(bdev, __dm_pr_register, &pr);
3028        }
3029
3030        return ret;
3031}
3032
3033static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3034                         u32 flags)
3035{
3036        struct mapped_device *md = bdev->bd_disk->private_data;
3037        const struct pr_ops *ops;
3038        int r, srcu_idx;
3039
3040        r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3041        if (r < 0)
3042                goto out;
3043
3044        ops = bdev->bd_disk->fops->pr_ops;
3045        if (ops && ops->pr_reserve)
3046                r = ops->pr_reserve(bdev, key, type, flags);
3047        else
3048                r = -EOPNOTSUPP;
3049out:
3050        dm_unprepare_ioctl(md, srcu_idx);
3051        return r;
3052}
3053
3054static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3055{
3056        struct mapped_device *md = bdev->bd_disk->private_data;
3057        const struct pr_ops *ops;
3058        int r, srcu_idx;
3059
3060        r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3061        if (r < 0)
3062                goto out;
3063
3064        ops = bdev->bd_disk->fops->pr_ops;
3065        if (ops && ops->pr_release)
3066                r = ops->pr_release(bdev, key, type);
3067        else
3068                r = -EOPNOTSUPP;
3069out:
3070        dm_unprepare_ioctl(md, srcu_idx);
3071        return r;
3072}
3073
3074static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3075                         enum pr_type type, bool abort)
3076{
3077        struct mapped_device *md = bdev->bd_disk->private_data;
3078        const struct pr_ops *ops;
3079        int r, srcu_idx;
3080
3081        r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3082        if (r < 0)
3083                goto out;
3084
3085        ops = bdev->bd_disk->fops->pr_ops;
3086        if (ops && ops->pr_preempt)
3087                r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
3088        else
3089                r = -EOPNOTSUPP;
3090out:
3091        dm_unprepare_ioctl(md, srcu_idx);
3092        return r;
3093}
3094
3095static int dm_pr_clear(struct block_device *bdev, u64 key)
3096{
3097        struct mapped_device *md = bdev->bd_disk->private_data;
3098        const struct pr_ops *ops;
3099        int r, srcu_idx;
3100
3101        r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3102        if (r < 0)
3103                goto out;
3104
3105        ops = bdev->bd_disk->fops->pr_ops;
3106        if (ops && ops->pr_clear)
3107                r = ops->pr_clear(bdev, key);
3108        else
3109                r = -EOPNOTSUPP;
3110out:
3111        dm_unprepare_ioctl(md, srcu_idx);
3112        return r;
3113}
3114
3115static const struct pr_ops dm_pr_ops = {
3116        .pr_register    = dm_pr_register,
3117        .pr_reserve     = dm_pr_reserve,
3118        .pr_release     = dm_pr_release,
3119        .pr_preempt     = dm_pr_preempt,
3120        .pr_clear       = dm_pr_clear,
3121};
3122
3123static const struct block_device_operations dm_blk_dops = {
3124        .open = dm_blk_open,
3125        .release = dm_blk_close,
3126        .ioctl = dm_blk_ioctl,
3127        .getgeo = dm_blk_getgeo,
3128        .pr_ops = &dm_pr_ops,
3129        .owner = THIS_MODULE
3130};
3131
3132static const struct dax_operations dm_dax_ops = {
3133        .direct_access = dm_dax_direct_access,
3134        .memcpy_fromiovecend = dm_dax_memcpy_fromiovecend,
3135        .memcpy_toiovecend = dm_dax_memcpy_toiovecend,
3136};
3137
3138/*
3139 * module hooks
3140 */
3141module_init(dm_init);
3142module_exit(dm_exit);
3143
3144module_param(major, uint, 0);
3145MODULE_PARM_DESC(major, "The major number of the device mapper");
3146
3147module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3148MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3149
3150module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3151MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3152
3153MODULE_DESCRIPTION(DM_NAME " driver");
3154MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3155MODULE_LICENSE("GPL");
3156