linux/drivers/md/dm-snap.c
<<
>>
Prefs
   1/*
   2 * dm-snapshot.c
   3 *
   4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/blkdev.h>
  10#include <linux/device-mapper.h>
  11#include <linux/delay.h>
  12#include <linux/fs.h>
  13#include <linux/init.h>
  14#include <linux/kdev_t.h>
  15#include <linux/list.h>
  16#include <linux/mempool.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/vmalloc.h>
  20#include <linux/log2.h>
  21#include <linux/dm-kcopyd.h>
  22
  23#include "dm-exception-store.h"
  24
  25#define DM_MSG_PREFIX "snapshots"
  26
  27static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
  28
  29#define dm_target_is_snapshot_merge(ti) \
  30        ((ti)->type->name == dm_snapshot_merge_target_name)
  31
  32/*
  33 * The size of the mempool used to track chunks in use.
  34 */
  35#define MIN_IOS 256
  36
  37#define DM_TRACKED_CHUNK_HASH_SIZE      16
  38#define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
  39                                         (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  40
  41struct dm_exception_table {
  42        uint32_t hash_mask;
  43        unsigned hash_shift;
  44        struct list_head *table;
  45};
  46
  47struct dm_snapshot {
  48        struct rw_semaphore lock;
  49
  50        struct dm_dev *origin;
  51        struct dm_dev *cow;
  52
  53        struct dm_target *ti;
  54
  55        /* List of snapshots per Origin */
  56        struct list_head list;
  57
  58        /*
  59         * You can't use a snapshot if this is 0 (e.g. if full).
  60         * A snapshot-merge target never clears this.
  61         */
  62        int valid;
  63
  64        /* Origin writes don't trigger exceptions until this is set */
  65        int active;
  66
  67        atomic_t pending_exceptions_count;
  68
  69        mempool_t *pending_pool;
  70
  71        struct dm_exception_table pending;
  72        struct dm_exception_table complete;
  73
  74        /*
  75         * pe_lock protects all pending_exception operations and access
  76         * as well as the snapshot_bios list.
  77         */
  78        spinlock_t pe_lock;
  79
  80        /* Chunks with outstanding reads */
  81        spinlock_t tracked_chunk_lock;
  82        mempool_t *tracked_chunk_pool;
  83        struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
  84
  85        /* The on disk metadata handler */
  86        struct dm_exception_store *store;
  87
  88        struct dm_kcopyd_client *kcopyd_client;
  89
  90        /* Wait for events based on state_bits */
  91        unsigned long state_bits;
  92
  93        /* Range of chunks currently being merged. */
  94        chunk_t first_merging_chunk;
  95        int num_merging_chunks;
  96
  97        /*
  98         * The merge operation failed if this flag is set.
  99         * Failure modes are handled as follows:
 100         * - I/O error reading the header
 101         *      => don't load the target; abort.
 102         * - Header does not have "valid" flag set
 103         *      => use the origin; forget about the snapshot.
 104         * - I/O error when reading exceptions
 105         *      => don't load the target; abort.
 106         *         (We can't use the intermediate origin state.)
 107         * - I/O error while merging
 108         *      => stop merging; set merge_failed; process I/O normally.
 109         */
 110        int merge_failed;
 111
 112        /*
 113         * Incoming bios that overlap with chunks being merged must wait
 114         * for them to be committed.
 115         */
 116        struct bio_list bios_queued_during_merge;
 117};
 118
 119/*
 120 * state_bits:
 121 *   RUNNING_MERGE  - Merge operation is in progress.
 122 *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
 123 *                    cleared afterwards.
 124 */
 125#define RUNNING_MERGE          0
 126#define SHUTDOWN_MERGE         1
 127
 128struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
 129{
 130        return s->origin;
 131}
 132EXPORT_SYMBOL(dm_snap_origin);
 133
 134struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
 135{
 136        return s->cow;
 137}
 138EXPORT_SYMBOL(dm_snap_cow);
 139
 140static sector_t chunk_to_sector(struct dm_exception_store *store,
 141                                chunk_t chunk)
 142{
 143        return chunk << store->chunk_shift;
 144}
 145
 146static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
 147{
 148        /*
 149         * There is only ever one instance of a particular block
 150         * device so we can compare pointers safely.
 151         */
 152        return lhs == rhs;
 153}
 154
 155struct dm_snap_pending_exception {
 156        struct dm_exception e;
 157
 158        /*
 159         * Origin buffers waiting for this to complete are held
 160         * in a bio list
 161         */
 162        struct bio_list origin_bios;
 163        struct bio_list snapshot_bios;
 164
 165        /* Pointer back to snapshot context */
 166        struct dm_snapshot *snap;
 167
 168        /*
 169         * 1 indicates the exception has already been sent to
 170         * kcopyd.
 171         */
 172        int started;
 173
 174        /*
 175         * For writing a complete chunk, bypassing the copy.
 176         */
 177        struct bio *full_bio;
 178        bio_end_io_t *full_bio_end_io;
 179        void *full_bio_private;
 180};
 181
 182/*
 183 * Hash table mapping origin volumes to lists of snapshots and
 184 * a lock to protect it
 185 */
 186static struct kmem_cache *exception_cache;
 187static struct kmem_cache *pending_cache;
 188
 189struct dm_snap_tracked_chunk {
 190        struct hlist_node node;
 191        chunk_t chunk;
 192};
 193
 194static struct kmem_cache *tracked_chunk_cache;
 195
 196static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
 197                                                 chunk_t chunk)
 198{
 199        struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
 200                                                        GFP_NOIO);
 201        unsigned long flags;
 202
 203        c->chunk = chunk;
 204
 205        spin_lock_irqsave(&s->tracked_chunk_lock, flags);
 206        hlist_add_head(&c->node,
 207                       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
 208        spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
 209
 210        return c;
 211}
 212
 213static void stop_tracking_chunk(struct dm_snapshot *s,
 214                                struct dm_snap_tracked_chunk *c)
 215{
 216        unsigned long flags;
 217
 218        spin_lock_irqsave(&s->tracked_chunk_lock, flags);
 219        hlist_del(&c->node);
 220        spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
 221
 222        mempool_free(c, s->tracked_chunk_pool);
 223}
 224
 225static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
 226{
 227        struct dm_snap_tracked_chunk *c;
 228        struct hlist_node *hn;
 229        int found = 0;
 230
 231        spin_lock_irq(&s->tracked_chunk_lock);
 232
 233        hlist_for_each_entry(c, hn,
 234            &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
 235                if (c->chunk == chunk) {
 236                        found = 1;
 237                        break;
 238                }
 239        }
 240
 241        spin_unlock_irq(&s->tracked_chunk_lock);
 242
 243        return found;
 244}
 245
 246/*
 247 * This conflicting I/O is extremely improbable in the caller,
 248 * so msleep(1) is sufficient and there is no need for a wait queue.
 249 */
 250static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
 251{
 252        while (__chunk_is_tracked(s, chunk))
 253                msleep(1);
 254}
 255
 256/*
 257 * One of these per registered origin, held in the snapshot_origins hash
 258 */
 259struct origin {
 260        /* The origin device */
 261        struct block_device *bdev;
 262
 263        struct list_head hash_list;
 264
 265        /* List of snapshots for this origin */
 266        struct list_head snapshots;
 267};
 268
 269/*
 270 * Size of the hash table for origin volumes. If we make this
 271 * the size of the minors list then it should be nearly perfect
 272 */
 273#define ORIGIN_HASH_SIZE 256
 274#define ORIGIN_MASK      0xFF
 275static struct list_head *_origins;
 276static struct rw_semaphore _origins_lock;
 277
 278static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
 279static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
 280static uint64_t _pending_exceptions_done_count;
 281
 282static int init_origin_hash(void)
 283{
 284        int i;
 285
 286        _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
 287                           GFP_KERNEL);
 288        if (!_origins) {
 289                DMERR("unable to allocate memory");
 290                return -ENOMEM;
 291        }
 292
 293        for (i = 0; i < ORIGIN_HASH_SIZE; i++)
 294                INIT_LIST_HEAD(_origins + i);
 295        init_rwsem(&_origins_lock);
 296
 297        return 0;
 298}
 299
 300static void exit_origin_hash(void)
 301{
 302        kfree(_origins);
 303}
 304
 305static unsigned origin_hash(struct block_device *bdev)
 306{
 307        return bdev->bd_dev & ORIGIN_MASK;
 308}
 309
 310static struct origin *__lookup_origin(struct block_device *origin)
 311{
 312        struct list_head *ol;
 313        struct origin *o;
 314
 315        ol = &_origins[origin_hash(origin)];
 316        list_for_each_entry (o, ol, hash_list)
 317                if (bdev_equal(o->bdev, origin))
 318                        return o;
 319
 320        return NULL;
 321}
 322
 323static void __insert_origin(struct origin *o)
 324{
 325        struct list_head *sl = &_origins[origin_hash(o->bdev)];
 326        list_add_tail(&o->hash_list, sl);
 327}
 328
 329/*
 330 * _origins_lock must be held when calling this function.
 331 * Returns number of snapshots registered using the supplied cow device, plus:
 332 * snap_src - a snapshot suitable for use as a source of exception handover
 333 * snap_dest - a snapshot capable of receiving exception handover.
 334 * snap_merge - an existing snapshot-merge target linked to the same origin.
 335 *   There can be at most one snapshot-merge target. The parameter is optional.
 336 *
 337 * Possible return values and states of snap_src and snap_dest.
 338 *   0: NULL, NULL  - first new snapshot
 339 *   1: snap_src, NULL - normal snapshot
 340 *   2: snap_src, snap_dest  - waiting for handover
 341 *   2: snap_src, NULL - handed over, waiting for old to be deleted
 342 *   1: NULL, snap_dest - source got destroyed without handover
 343 */
 344static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
 345                                        struct dm_snapshot **snap_src,
 346                                        struct dm_snapshot **snap_dest,
 347                                        struct dm_snapshot **snap_merge)
 348{
 349        struct dm_snapshot *s;
 350        struct origin *o;
 351        int count = 0;
 352        int active;
 353
 354        o = __lookup_origin(snap->origin->bdev);
 355        if (!o)
 356                goto out;
 357
 358        list_for_each_entry(s, &o->snapshots, list) {
 359                if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
 360                        *snap_merge = s;
 361                if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
 362                        continue;
 363
 364                down_read(&s->lock);
 365                active = s->active;
 366                up_read(&s->lock);
 367
 368                if (active) {
 369                        if (snap_src)
 370                                *snap_src = s;
 371                } else if (snap_dest)
 372                        *snap_dest = s;
 373
 374                count++;
 375        }
 376
 377out:
 378        return count;
 379}
 380
 381/*
 382 * On success, returns 1 if this snapshot is a handover destination,
 383 * otherwise returns 0.
 384 */
 385static int __validate_exception_handover(struct dm_snapshot *snap)
 386{
 387        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 388        struct dm_snapshot *snap_merge = NULL;
 389
 390        /* Does snapshot need exceptions handed over to it? */
 391        if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
 392                                          &snap_merge) == 2) ||
 393            snap_dest) {
 394                snap->ti->error = "Snapshot cow pairing for exception "
 395                                  "table handover failed";
 396                return -EINVAL;
 397        }
 398
 399        /*
 400         * If no snap_src was found, snap cannot become a handover
 401         * destination.
 402         */
 403        if (!snap_src)
 404                return 0;
 405
 406        /*
 407         * Non-snapshot-merge handover?
 408         */
 409        if (!dm_target_is_snapshot_merge(snap->ti))
 410                return 1;
 411
 412        /*
 413         * Do not allow more than one merging snapshot.
 414         */
 415        if (snap_merge) {
 416                snap->ti->error = "A snapshot is already merging.";
 417                return -EINVAL;
 418        }
 419
 420        if (!snap_src->store->type->prepare_merge ||
 421            !snap_src->store->type->commit_merge) {
 422                snap->ti->error = "Snapshot exception store does not "
 423                                  "support snapshot-merge.";
 424                return -EINVAL;
 425        }
 426
 427        return 1;
 428}
 429
 430static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
 431{
 432        struct dm_snapshot *l;
 433
 434        /* Sort the list according to chunk size, largest-first smallest-last */
 435        list_for_each_entry(l, &o->snapshots, list)
 436                if (l->store->chunk_size < s->store->chunk_size)
 437                        break;
 438        list_add_tail(&s->list, &l->list);
 439}
 440
 441/*
 442 * Make a note of the snapshot and its origin so we can look it
 443 * up when the origin has a write on it.
 444 *
 445 * Also validate snapshot exception store handovers.
 446 * On success, returns 1 if this registration is a handover destination,
 447 * otherwise returns 0.
 448 */
 449static int register_snapshot(struct dm_snapshot *snap)
 450{
 451        struct origin *o, *new_o = NULL;
 452        struct block_device *bdev = snap->origin->bdev;
 453        int r = 0;
 454
 455        new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
 456        if (!new_o)
 457                return -ENOMEM;
 458
 459        down_write(&_origins_lock);
 460
 461        r = __validate_exception_handover(snap);
 462        if (r < 0) {
 463                kfree(new_o);
 464                goto out;
 465        }
 466
 467        o = __lookup_origin(bdev);
 468        if (o)
 469                kfree(new_o);
 470        else {
 471                /* New origin */
 472                o = new_o;
 473
 474                /* Initialise the struct */
 475                INIT_LIST_HEAD(&o->snapshots);
 476                o->bdev = bdev;
 477
 478                __insert_origin(o);
 479        }
 480
 481        __insert_snapshot(o, snap);
 482
 483out:
 484        up_write(&_origins_lock);
 485
 486        return r;
 487}
 488
 489/*
 490 * Move snapshot to correct place in list according to chunk size.
 491 */
 492static void reregister_snapshot(struct dm_snapshot *s)
 493{
 494        struct block_device *bdev = s->origin->bdev;
 495
 496        down_write(&_origins_lock);
 497
 498        list_del(&s->list);
 499        __insert_snapshot(__lookup_origin(bdev), s);
 500
 501        up_write(&_origins_lock);
 502}
 503
 504static void unregister_snapshot(struct dm_snapshot *s)
 505{
 506        struct origin *o;
 507
 508        down_write(&_origins_lock);
 509        o = __lookup_origin(s->origin->bdev);
 510
 511        list_del(&s->list);
 512        if (o && list_empty(&o->snapshots)) {
 513                list_del(&o->hash_list);
 514                kfree(o);
 515        }
 516
 517        up_write(&_origins_lock);
 518}
 519
 520/*
 521 * Implementation of the exception hash tables.
 522 * The lowest hash_shift bits of the chunk number are ignored, allowing
 523 * some consecutive chunks to be grouped together.
 524 */
 525static int dm_exception_table_init(struct dm_exception_table *et,
 526                                   uint32_t size, unsigned hash_shift)
 527{
 528        unsigned int i;
 529
 530        et->hash_shift = hash_shift;
 531        et->hash_mask = size - 1;
 532        et->table = dm_vcalloc(size, sizeof(struct list_head));
 533        if (!et->table)
 534                return -ENOMEM;
 535
 536        for (i = 0; i < size; i++)
 537                INIT_LIST_HEAD(et->table + i);
 538
 539        return 0;
 540}
 541
 542static void dm_exception_table_exit(struct dm_exception_table *et,
 543                                    struct kmem_cache *mem)
 544{
 545        struct list_head *slot;
 546        struct dm_exception *ex, *next;
 547        int i, size;
 548
 549        size = et->hash_mask + 1;
 550        for (i = 0; i < size; i++) {
 551                slot = et->table + i;
 552
 553                list_for_each_entry_safe (ex, next, slot, hash_list)
 554                        kmem_cache_free(mem, ex);
 555        }
 556
 557        vfree(et->table);
 558}
 559
 560static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
 561{
 562        return (chunk >> et->hash_shift) & et->hash_mask;
 563}
 564
 565static void dm_remove_exception(struct dm_exception *e)
 566{
 567        list_del(&e->hash_list);
 568}
 569
 570/*
 571 * Return the exception data for a sector, or NULL if not
 572 * remapped.
 573 */
 574static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
 575                                                chunk_t chunk)
 576{
 577        struct list_head *slot;
 578        struct dm_exception *e;
 579
 580        slot = &et->table[exception_hash(et, chunk)];
 581        list_for_each_entry (e, slot, hash_list)
 582                if (chunk >= e->old_chunk &&
 583                    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
 584                        return e;
 585
 586        return NULL;
 587}
 588
 589static struct dm_exception *alloc_completed_exception(void)
 590{
 591        struct dm_exception *e;
 592
 593        e = kmem_cache_alloc(exception_cache, GFP_NOIO);
 594        if (!e)
 595                e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
 596
 597        return e;
 598}
 599
 600static void free_completed_exception(struct dm_exception *e)
 601{
 602        kmem_cache_free(exception_cache, e);
 603}
 604
 605static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
 606{
 607        struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
 608                                                             GFP_NOIO);
 609
 610        atomic_inc(&s->pending_exceptions_count);
 611        pe->snap = s;
 612
 613        return pe;
 614}
 615
 616static void free_pending_exception(struct dm_snap_pending_exception *pe)
 617{
 618        struct dm_snapshot *s = pe->snap;
 619
 620        mempool_free(pe, s->pending_pool);
 621        smp_mb__before_atomic_dec();
 622        atomic_dec(&s->pending_exceptions_count);
 623}
 624
 625static void dm_insert_exception(struct dm_exception_table *eh,
 626                                struct dm_exception *new_e)
 627{
 628        struct list_head *l;
 629        struct dm_exception *e = NULL;
 630
 631        l = &eh->table[exception_hash(eh, new_e->old_chunk)];
 632
 633        /* Add immediately if this table doesn't support consecutive chunks */
 634        if (!eh->hash_shift)
 635                goto out;
 636
 637        /* List is ordered by old_chunk */
 638        list_for_each_entry_reverse(e, l, hash_list) {
 639                /* Insert after an existing chunk? */
 640                if (new_e->old_chunk == (e->old_chunk +
 641                                         dm_consecutive_chunk_count(e) + 1) &&
 642                    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
 643                                         dm_consecutive_chunk_count(e) + 1)) {
 644                        dm_consecutive_chunk_count_inc(e);
 645                        free_completed_exception(new_e);
 646                        return;
 647                }
 648
 649                /* Insert before an existing chunk? */
 650                if (new_e->old_chunk == (e->old_chunk - 1) &&
 651                    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
 652                        dm_consecutive_chunk_count_inc(e);
 653                        e->old_chunk--;
 654                        e->new_chunk--;
 655                        free_completed_exception(new_e);
 656                        return;
 657                }
 658
 659                if (new_e->old_chunk > e->old_chunk)
 660                        break;
 661        }
 662
 663out:
 664        list_add(&new_e->hash_list, e ? &e->hash_list : l);
 665}
 666
 667/*
 668 * Callback used by the exception stores to load exceptions when
 669 * initialising.
 670 */
 671static int dm_add_exception(void *context, chunk_t old, chunk_t new)
 672{
 673        struct dm_snapshot *s = context;
 674        struct dm_exception *e;
 675
 676        e = alloc_completed_exception();
 677        if (!e)
 678                return -ENOMEM;
 679
 680        e->old_chunk = old;
 681
 682        /* Consecutive_count is implicitly initialised to zero */
 683        e->new_chunk = new;
 684
 685        dm_insert_exception(&s->complete, e);
 686
 687        return 0;
 688}
 689
 690/*
 691 * Return a minimum chunk size of all snapshots that have the specified origin.
 692 * Return zero if the origin has no snapshots.
 693 */
 694static sector_t __minimum_chunk_size(struct origin *o)
 695{
 696        struct dm_snapshot *snap;
 697        unsigned chunk_size = 0;
 698
 699        if (o)
 700                list_for_each_entry(snap, &o->snapshots, list)
 701                        chunk_size = min_not_zero(chunk_size,
 702                                                  snap->store->chunk_size);
 703
 704        return chunk_size;
 705}
 706
 707/*
 708 * Hard coded magic.
 709 */
 710static int calc_max_buckets(void)
 711{
 712        /* use a fixed size of 2MB */
 713        unsigned long mem = 2 * 1024 * 1024;
 714        mem /= sizeof(struct list_head);
 715
 716        return mem;
 717}
 718
 719/*
 720 * Allocate room for a suitable hash table.
 721 */
 722static int init_hash_tables(struct dm_snapshot *s)
 723{
 724        sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
 725
 726        /*
 727         * Calculate based on the size of the original volume or
 728         * the COW volume...
 729         */
 730        cow_dev_size = get_dev_size(s->cow->bdev);
 731        origin_dev_size = get_dev_size(s->origin->bdev);
 732        max_buckets = calc_max_buckets();
 733
 734        hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
 735        hash_size = min(hash_size, max_buckets);
 736
 737        if (hash_size < 64)
 738                hash_size = 64;
 739        hash_size = rounddown_pow_of_two(hash_size);
 740        if (dm_exception_table_init(&s->complete, hash_size,
 741                                    DM_CHUNK_CONSECUTIVE_BITS))
 742                return -ENOMEM;
 743
 744        /*
 745         * Allocate hash table for in-flight exceptions
 746         * Make this smaller than the real hash table
 747         */
 748        hash_size >>= 3;
 749        if (hash_size < 64)
 750                hash_size = 64;
 751
 752        if (dm_exception_table_init(&s->pending, hash_size, 0)) {
 753                dm_exception_table_exit(&s->complete, exception_cache);
 754                return -ENOMEM;
 755        }
 756
 757        return 0;
 758}
 759
 760static void merge_shutdown(struct dm_snapshot *s)
 761{
 762        clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
 763        smp_mb__after_clear_bit();
 764        wake_up_bit(&s->state_bits, RUNNING_MERGE);
 765}
 766
 767static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
 768{
 769        s->first_merging_chunk = 0;
 770        s->num_merging_chunks = 0;
 771
 772        return bio_list_get(&s->bios_queued_during_merge);
 773}
 774
 775/*
 776 * Remove one chunk from the index of completed exceptions.
 777 */
 778static int __remove_single_exception_chunk(struct dm_snapshot *s,
 779                                           chunk_t old_chunk)
 780{
 781        struct dm_exception *e;
 782
 783        e = dm_lookup_exception(&s->complete, old_chunk);
 784        if (!e) {
 785                DMERR("Corruption detected: exception for block %llu is "
 786                      "on disk but not in memory",
 787                      (unsigned long long)old_chunk);
 788                return -EINVAL;
 789        }
 790
 791        /*
 792         * If this is the only chunk using this exception, remove exception.
 793         */
 794        if (!dm_consecutive_chunk_count(e)) {
 795                dm_remove_exception(e);
 796                free_completed_exception(e);
 797                return 0;
 798        }
 799
 800        /*
 801         * The chunk may be either at the beginning or the end of a
 802         * group of consecutive chunks - never in the middle.  We are
 803         * removing chunks in the opposite order to that in which they
 804         * were added, so this should always be true.
 805         * Decrement the consecutive chunk counter and adjust the
 806         * starting point if necessary.
 807         */
 808        if (old_chunk == e->old_chunk) {
 809                e->old_chunk++;
 810                e->new_chunk++;
 811        } else if (old_chunk != e->old_chunk +
 812                   dm_consecutive_chunk_count(e)) {
 813                DMERR("Attempt to merge block %llu from the "
 814                      "middle of a chunk range [%llu - %llu]",
 815                      (unsigned long long)old_chunk,
 816                      (unsigned long long)e->old_chunk,
 817                      (unsigned long long)
 818                      e->old_chunk + dm_consecutive_chunk_count(e));
 819                return -EINVAL;
 820        }
 821
 822        dm_consecutive_chunk_count_dec(e);
 823
 824        return 0;
 825}
 826
 827static void flush_bios(struct bio *bio);
 828
 829static int remove_single_exception_chunk(struct dm_snapshot *s)
 830{
 831        struct bio *b = NULL;
 832        int r;
 833        chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
 834
 835        down_write(&s->lock);
 836
 837        /*
 838         * Process chunks (and associated exceptions) in reverse order
 839         * so that dm_consecutive_chunk_count_dec() accounting works.
 840         */
 841        do {
 842                r = __remove_single_exception_chunk(s, old_chunk);
 843                if (r)
 844                        goto out;
 845        } while (old_chunk-- > s->first_merging_chunk);
 846
 847        b = __release_queued_bios_after_merge(s);
 848
 849out:
 850        up_write(&s->lock);
 851        if (b)
 852                flush_bios(b);
 853
 854        return r;
 855}
 856
 857static int origin_write_extent(struct dm_snapshot *merging_snap,
 858                               sector_t sector, unsigned chunk_size);
 859
 860static void merge_callback(int read_err, unsigned long write_err,
 861                           void *context);
 862
 863static uint64_t read_pending_exceptions_done_count(void)
 864{
 865        uint64_t pending_exceptions_done;
 866
 867        spin_lock(&_pending_exceptions_done_spinlock);
 868        pending_exceptions_done = _pending_exceptions_done_count;
 869        spin_unlock(&_pending_exceptions_done_spinlock);
 870
 871        return pending_exceptions_done;
 872}
 873
 874static void increment_pending_exceptions_done_count(void)
 875{
 876        spin_lock(&_pending_exceptions_done_spinlock);
 877        _pending_exceptions_done_count++;
 878        spin_unlock(&_pending_exceptions_done_spinlock);
 879
 880        wake_up_all(&_pending_exceptions_done);
 881}
 882
 883static void snapshot_merge_next_chunks(struct dm_snapshot *s)
 884{
 885        int i, linear_chunks;
 886        chunk_t old_chunk, new_chunk;
 887        struct dm_io_region src, dest;
 888        sector_t io_size;
 889        uint64_t previous_count;
 890
 891        BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
 892        if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
 893                goto shut;
 894
 895        /*
 896         * valid flag never changes during merge, so no lock required.
 897         */
 898        if (!s->valid) {
 899                DMERR("Snapshot is invalid: can't merge");
 900                goto shut;
 901        }
 902
 903        linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
 904                                                      &new_chunk);
 905        if (linear_chunks <= 0) {
 906                if (linear_chunks < 0) {
 907                        DMERR("Read error in exception store: "
 908                              "shutting down merge");
 909                        down_write(&s->lock);
 910                        s->merge_failed = 1;
 911                        up_write(&s->lock);
 912                }
 913                goto shut;
 914        }
 915
 916        /* Adjust old_chunk and new_chunk to reflect start of linear region */
 917        old_chunk = old_chunk + 1 - linear_chunks;
 918        new_chunk = new_chunk + 1 - linear_chunks;
 919
 920        /*
 921         * Use one (potentially large) I/O to copy all 'linear_chunks'
 922         * from the exception store to the origin
 923         */
 924        io_size = linear_chunks * s->store->chunk_size;
 925
 926        dest.bdev = s->origin->bdev;
 927        dest.sector = chunk_to_sector(s->store, old_chunk);
 928        dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
 929
 930        src.bdev = s->cow->bdev;
 931        src.sector = chunk_to_sector(s->store, new_chunk);
 932        src.count = dest.count;
 933
 934        /*
 935         * Reallocate any exceptions needed in other snapshots then
 936         * wait for the pending exceptions to complete.
 937         * Each time any pending exception (globally on the system)
 938         * completes we are woken and repeat the process to find out
 939         * if we can proceed.  While this may not seem a particularly
 940         * efficient algorithm, it is not expected to have any
 941         * significant impact on performance.
 942         */
 943        previous_count = read_pending_exceptions_done_count();
 944        while (origin_write_extent(s, dest.sector, io_size)) {
 945                wait_event(_pending_exceptions_done,
 946                           (read_pending_exceptions_done_count() !=
 947                            previous_count));
 948                /* Retry after the wait, until all exceptions are done. */
 949                previous_count = read_pending_exceptions_done_count();
 950        }
 951
 952        down_write(&s->lock);
 953        s->first_merging_chunk = old_chunk;
 954        s->num_merging_chunks = linear_chunks;
 955        up_write(&s->lock);
 956
 957        /* Wait until writes to all 'linear_chunks' drain */
 958        for (i = 0; i < linear_chunks; i++)
 959                __check_for_conflicting_io(s, old_chunk + i);
 960
 961        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
 962        return;
 963
 964shut:
 965        merge_shutdown(s);
 966}
 967
 968static void error_bios(struct bio *bio);
 969
 970static void merge_callback(int read_err, unsigned long write_err, void *context)
 971{
 972        struct dm_snapshot *s = context;
 973        struct bio *b = NULL;
 974
 975        if (read_err || write_err) {
 976                if (read_err)
 977                        DMERR("Read error: shutting down merge.");
 978                else
 979                        DMERR("Write error: shutting down merge.");
 980                goto shut;
 981        }
 982
 983        if (s->store->type->commit_merge(s->store,
 984                                         s->num_merging_chunks) < 0) {
 985                DMERR("Write error in exception store: shutting down merge");
 986                goto shut;
 987        }
 988
 989        if (remove_single_exception_chunk(s) < 0)
 990                goto shut;
 991
 992        snapshot_merge_next_chunks(s);
 993
 994        return;
 995
 996shut:
 997        down_write(&s->lock);
 998        s->merge_failed = 1;
 999        b = __release_queued_bios_after_merge(s);
1000        up_write(&s->lock);
1001        error_bios(b);
1002
1003        merge_shutdown(s);
1004}
1005
1006static void start_merge(struct dm_snapshot *s)
1007{
1008        if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1009                snapshot_merge_next_chunks(s);
1010}
1011
1012static int wait_schedule(void *ptr)
1013{
1014        schedule();
1015
1016        return 0;
1017}
1018
1019/*
1020 * Stop the merging process and wait until it finishes.
1021 */
1022static void stop_merge(struct dm_snapshot *s)
1023{
1024        set_bit(SHUTDOWN_MERGE, &s->state_bits);
1025        wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1026                    TASK_UNINTERRUPTIBLE);
1027        clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1028}
1029
1030/*
1031 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1032 */
1033static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1034{
1035        struct dm_snapshot *s;
1036        int i;
1037        int r = -EINVAL;
1038        char *origin_path, *cow_path;
1039        unsigned args_used, num_flush_requests = 1;
1040        fmode_t origin_mode = FMODE_READ;
1041
1042        if (argc != 4) {
1043                ti->error = "requires exactly 4 arguments";
1044                r = -EINVAL;
1045                goto bad;
1046        }
1047
1048        if (dm_target_is_snapshot_merge(ti)) {
1049                num_flush_requests = 2;
1050                origin_mode = FMODE_WRITE;
1051        }
1052
1053        s = kmalloc(sizeof(*s), GFP_KERNEL);
1054        if (!s) {
1055                ti->error = "Cannot allocate private snapshot structure";
1056                r = -ENOMEM;
1057                goto bad;
1058        }
1059
1060        origin_path = argv[0];
1061        argv++;
1062        argc--;
1063
1064        r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1065        if (r) {
1066                ti->error = "Cannot get origin device";
1067                goto bad_origin;
1068        }
1069
1070        cow_path = argv[0];
1071        argv++;
1072        argc--;
1073
1074        r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1075        if (r) {
1076                ti->error = "Cannot get COW device";
1077                goto bad_cow;
1078        }
1079
1080        r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1081        if (r) {
1082                ti->error = "Couldn't create exception store";
1083                r = -EINVAL;
1084                goto bad_store;
1085        }
1086
1087        argv += args_used;
1088        argc -= args_used;
1089
1090        s->ti = ti;
1091        s->valid = 1;
1092        s->active = 0;
1093        atomic_set(&s->pending_exceptions_count, 0);
1094        init_rwsem(&s->lock);
1095        INIT_LIST_HEAD(&s->list);
1096        spin_lock_init(&s->pe_lock);
1097        s->state_bits = 0;
1098        s->merge_failed = 0;
1099        s->first_merging_chunk = 0;
1100        s->num_merging_chunks = 0;
1101        bio_list_init(&s->bios_queued_during_merge);
1102
1103        /* Allocate hash table for COW data */
1104        if (init_hash_tables(s)) {
1105                ti->error = "Unable to allocate hash table space";
1106                r = -ENOMEM;
1107                goto bad_hash_tables;
1108        }
1109
1110        s->kcopyd_client = dm_kcopyd_client_create();
1111        if (IS_ERR(s->kcopyd_client)) {
1112                r = PTR_ERR(s->kcopyd_client);
1113                ti->error = "Could not create kcopyd client";
1114                goto bad_kcopyd;
1115        }
1116
1117        s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1118        if (!s->pending_pool) {
1119                ti->error = "Could not allocate mempool for pending exceptions";
1120                goto bad_pending_pool;
1121        }
1122
1123        s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1124                                                         tracked_chunk_cache);
1125        if (!s->tracked_chunk_pool) {
1126                ti->error = "Could not allocate tracked_chunk mempool for "
1127                            "tracking reads";
1128                goto bad_tracked_chunk_pool;
1129        }
1130
1131        for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1132                INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1133
1134        spin_lock_init(&s->tracked_chunk_lock);
1135
1136        ti->private = s;
1137        ti->num_flush_requests = num_flush_requests;
1138
1139        /* Add snapshot to the list of snapshots for this origin */
1140        /* Exceptions aren't triggered till snapshot_resume() is called */
1141        r = register_snapshot(s);
1142        if (r == -ENOMEM) {
1143                ti->error = "Snapshot origin struct allocation failed";
1144                goto bad_load_and_register;
1145        } else if (r < 0) {
1146                /* invalid handover, register_snapshot has set ti->error */
1147                goto bad_load_and_register;
1148        }
1149
1150        /*
1151         * Metadata must only be loaded into one table at once, so skip this
1152         * if metadata will be handed over during resume.
1153         * Chunk size will be set during the handover - set it to zero to
1154         * ensure it's ignored.
1155         */
1156        if (r > 0) {
1157                s->store->chunk_size = 0;
1158                return 0;
1159        }
1160
1161        r = s->store->type->read_metadata(s->store, dm_add_exception,
1162                                          (void *)s);
1163        if (r < 0) {
1164                ti->error = "Failed to read snapshot metadata";
1165                goto bad_read_metadata;
1166        } else if (r > 0) {
1167                s->valid = 0;
1168                DMWARN("Snapshot is marked invalid.");
1169        }
1170
1171        if (!s->store->chunk_size) {
1172                ti->error = "Chunk size not set";
1173                goto bad_read_metadata;
1174        }
1175        ti->split_io = s->store->chunk_size;
1176
1177        return 0;
1178
1179bad_read_metadata:
1180        unregister_snapshot(s);
1181
1182bad_load_and_register:
1183        mempool_destroy(s->tracked_chunk_pool);
1184
1185bad_tracked_chunk_pool:
1186        mempool_destroy(s->pending_pool);
1187
1188bad_pending_pool:
1189        dm_kcopyd_client_destroy(s->kcopyd_client);
1190
1191bad_kcopyd:
1192        dm_exception_table_exit(&s->pending, pending_cache);
1193        dm_exception_table_exit(&s->complete, exception_cache);
1194
1195bad_hash_tables:
1196        dm_exception_store_destroy(s->store);
1197
1198bad_store:
1199        dm_put_device(ti, s->cow);
1200
1201bad_cow:
1202        dm_put_device(ti, s->origin);
1203
1204bad_origin:
1205        kfree(s);
1206
1207bad:
1208        return r;
1209}
1210
1211static void __free_exceptions(struct dm_snapshot *s)
1212{
1213        dm_kcopyd_client_destroy(s->kcopyd_client);
1214        s->kcopyd_client = NULL;
1215
1216        dm_exception_table_exit(&s->pending, pending_cache);
1217        dm_exception_table_exit(&s->complete, exception_cache);
1218}
1219
1220static void __handover_exceptions(struct dm_snapshot *snap_src,
1221                                  struct dm_snapshot *snap_dest)
1222{
1223        union {
1224                struct dm_exception_table table_swap;
1225                struct dm_exception_store *store_swap;
1226        } u;
1227
1228        /*
1229         * Swap all snapshot context information between the two instances.
1230         */
1231        u.table_swap = snap_dest->complete;
1232        snap_dest->complete = snap_src->complete;
1233        snap_src->complete = u.table_swap;
1234
1235        u.store_swap = snap_dest->store;
1236        snap_dest->store = snap_src->store;
1237        snap_src->store = u.store_swap;
1238
1239        snap_dest->store->snap = snap_dest;
1240        snap_src->store->snap = snap_src;
1241
1242        snap_dest->ti->split_io = snap_dest->store->chunk_size;
1243        snap_dest->valid = snap_src->valid;
1244
1245        /*
1246         * Set source invalid to ensure it receives no further I/O.
1247         */
1248        snap_src->valid = 0;
1249}
1250
1251static void snapshot_dtr(struct dm_target *ti)
1252{
1253#ifdef CONFIG_DM_DEBUG
1254        int i;
1255#endif
1256        struct dm_snapshot *s = ti->private;
1257        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1258
1259        down_read(&_origins_lock);
1260        /* Check whether exception handover must be cancelled */
1261        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1262        if (snap_src && snap_dest && (s == snap_src)) {
1263                down_write(&snap_dest->lock);
1264                snap_dest->valid = 0;
1265                up_write(&snap_dest->lock);
1266                DMERR("Cancelling snapshot handover.");
1267        }
1268        up_read(&_origins_lock);
1269
1270        if (dm_target_is_snapshot_merge(ti))
1271                stop_merge(s);
1272
1273        /* Prevent further origin writes from using this snapshot. */
1274        /* After this returns there can be no new kcopyd jobs. */
1275        unregister_snapshot(s);
1276
1277        while (atomic_read(&s->pending_exceptions_count))
1278                msleep(1);
1279        /*
1280         * Ensure instructions in mempool_destroy aren't reordered
1281         * before atomic_read.
1282         */
1283        smp_mb();
1284
1285#ifdef CONFIG_DM_DEBUG
1286        for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1287                BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1288#endif
1289
1290        mempool_destroy(s->tracked_chunk_pool);
1291
1292        __free_exceptions(s);
1293
1294        mempool_destroy(s->pending_pool);
1295
1296        dm_exception_store_destroy(s->store);
1297
1298        dm_put_device(ti, s->cow);
1299
1300        dm_put_device(ti, s->origin);
1301
1302        kfree(s);
1303}
1304
1305/*
1306 * Flush a list of buffers.
1307 */
1308static void flush_bios(struct bio *bio)
1309{
1310        struct bio *n;
1311
1312        while (bio) {
1313                n = bio->bi_next;
1314                bio->bi_next = NULL;
1315                generic_make_request(bio);
1316                bio = n;
1317        }
1318}
1319
1320static int do_origin(struct dm_dev *origin, struct bio *bio);
1321
1322/*
1323 * Flush a list of buffers.
1324 */
1325static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1326{
1327        struct bio *n;
1328        int r;
1329
1330        while (bio) {
1331                n = bio->bi_next;
1332                bio->bi_next = NULL;
1333                r = do_origin(s->origin, bio);
1334                if (r == DM_MAPIO_REMAPPED)
1335                        generic_make_request(bio);
1336                bio = n;
1337        }
1338}
1339
1340/*
1341 * Error a list of buffers.
1342 */
1343static void error_bios(struct bio *bio)
1344{
1345        struct bio *n;
1346
1347        while (bio) {
1348                n = bio->bi_next;
1349                bio->bi_next = NULL;
1350                bio_io_error(bio);
1351                bio = n;
1352        }
1353}
1354
1355static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1356{
1357        if (!s->valid)
1358                return;
1359
1360        if (err == -EIO)
1361                DMERR("Invalidating snapshot: Error reading/writing.");
1362        else if (err == -ENOMEM)
1363                DMERR("Invalidating snapshot: Unable to allocate exception.");
1364
1365        if (s->store->type->drop_snapshot)
1366                s->store->type->drop_snapshot(s->store);
1367
1368        s->valid = 0;
1369
1370        dm_table_event(s->ti->table);
1371}
1372
1373static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1374{
1375        struct dm_exception *e;
1376        struct dm_snapshot *s = pe->snap;
1377        struct bio *origin_bios = NULL;
1378        struct bio *snapshot_bios = NULL;
1379        struct bio *full_bio = NULL;
1380        int error = 0;
1381
1382        if (!success) {
1383                /* Read/write error - snapshot is unusable */
1384                down_write(&s->lock);
1385                __invalidate_snapshot(s, -EIO);
1386                error = 1;
1387                goto out;
1388        }
1389
1390        e = alloc_completed_exception();
1391        if (!e) {
1392                down_write(&s->lock);
1393                __invalidate_snapshot(s, -ENOMEM);
1394                error = 1;
1395                goto out;
1396        }
1397        *e = pe->e;
1398
1399        down_write(&s->lock);
1400        if (!s->valid) {
1401                free_completed_exception(e);
1402                error = 1;
1403                goto out;
1404        }
1405
1406        /* Check for conflicting reads */
1407        __check_for_conflicting_io(s, pe->e.old_chunk);
1408
1409        /*
1410         * Add a proper exception, and remove the
1411         * in-flight exception from the list.
1412         */
1413        dm_insert_exception(&s->complete, e);
1414
1415out:
1416        dm_remove_exception(&pe->e);
1417        snapshot_bios = bio_list_get(&pe->snapshot_bios);
1418        origin_bios = bio_list_get(&pe->origin_bios);
1419        full_bio = pe->full_bio;
1420        if (full_bio) {
1421                full_bio->bi_end_io = pe->full_bio_end_io;
1422                full_bio->bi_private = pe->full_bio_private;
1423        }
1424        free_pending_exception(pe);
1425
1426        increment_pending_exceptions_done_count();
1427
1428        up_write(&s->lock);
1429
1430        /* Submit any pending write bios */
1431        if (error) {
1432                if (full_bio)
1433                        bio_io_error(full_bio);
1434                error_bios(snapshot_bios);
1435        } else {
1436                if (full_bio)
1437                        bio_endio(full_bio, 0);
1438                flush_bios(snapshot_bios);
1439        }
1440
1441        retry_origin_bios(s, origin_bios);
1442}
1443
1444static void commit_callback(void *context, int success)
1445{
1446        struct dm_snap_pending_exception *pe = context;
1447
1448        pending_complete(pe, success);
1449}
1450
1451/*
1452 * Called when the copy I/O has finished.  kcopyd actually runs
1453 * this code so don't block.
1454 */
1455static void copy_callback(int read_err, unsigned long write_err, void *context)
1456{
1457        struct dm_snap_pending_exception *pe = context;
1458        struct dm_snapshot *s = pe->snap;
1459
1460        if (read_err || write_err)
1461                pending_complete(pe, 0);
1462
1463        else
1464                /* Update the metadata if we are persistent */
1465                s->store->type->commit_exception(s->store, &pe->e,
1466                                                 commit_callback, pe);
1467}
1468
1469/*
1470 * Dispatches the copy operation to kcopyd.
1471 */
1472static void start_copy(struct dm_snap_pending_exception *pe)
1473{
1474        struct dm_snapshot *s = pe->snap;
1475        struct dm_io_region src, dest;
1476        struct block_device *bdev = s->origin->bdev;
1477        sector_t dev_size;
1478
1479        dev_size = get_dev_size(bdev);
1480
1481        src.bdev = bdev;
1482        src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1483        src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1484
1485        dest.bdev = s->cow->bdev;
1486        dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1487        dest.count = src.count;
1488
1489        /* Hand over to kcopyd */
1490        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1491}
1492
1493static void full_bio_end_io(struct bio *bio, int error)
1494{
1495        void *callback_data = bio->bi_private;
1496
1497        dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1498}
1499
1500static void start_full_bio(struct dm_snap_pending_exception *pe,
1501                           struct bio *bio)
1502{
1503        struct dm_snapshot *s = pe->snap;
1504        void *callback_data;
1505
1506        pe->full_bio = bio;
1507        pe->full_bio_end_io = bio->bi_end_io;
1508        pe->full_bio_private = bio->bi_private;
1509
1510        callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1511                                                   copy_callback, pe);
1512
1513        bio->bi_end_io = full_bio_end_io;
1514        bio->bi_private = callback_data;
1515
1516        generic_make_request(bio);
1517}
1518
1519static struct dm_snap_pending_exception *
1520__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1521{
1522        struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1523
1524        if (!e)
1525                return NULL;
1526
1527        return container_of(e, struct dm_snap_pending_exception, e);
1528}
1529
1530/*
1531 * Looks to see if this snapshot already has a pending exception
1532 * for this chunk, otherwise it allocates a new one and inserts
1533 * it into the pending table.
1534 *
1535 * NOTE: a write lock must be held on snap->lock before calling
1536 * this.
1537 */
1538static struct dm_snap_pending_exception *
1539__find_pending_exception(struct dm_snapshot *s,
1540                         struct dm_snap_pending_exception *pe, chunk_t chunk)
1541{
1542        struct dm_snap_pending_exception *pe2;
1543
1544        pe2 = __lookup_pending_exception(s, chunk);
1545        if (pe2) {
1546                free_pending_exception(pe);
1547                return pe2;
1548        }
1549
1550        pe->e.old_chunk = chunk;
1551        bio_list_init(&pe->origin_bios);
1552        bio_list_init(&pe->snapshot_bios);
1553        pe->started = 0;
1554        pe->full_bio = NULL;
1555
1556        if (s->store->type->prepare_exception(s->store, &pe->e)) {
1557                free_pending_exception(pe);
1558                return NULL;
1559        }
1560
1561        dm_insert_exception(&s->pending, &pe->e);
1562
1563        return pe;
1564}
1565
1566static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1567                            struct bio *bio, chunk_t chunk)
1568{
1569        bio->bi_bdev = s->cow->bdev;
1570        bio->bi_sector = chunk_to_sector(s->store,
1571                                         dm_chunk_number(e->new_chunk) +
1572                                         (chunk - e->old_chunk)) +
1573                                         (bio->bi_sector &
1574                                          s->store->chunk_mask);
1575}
1576
1577static int snapshot_map(struct dm_target *ti, struct bio *bio,
1578                        union map_info *map_context)
1579{
1580        struct dm_exception *e;
1581        struct dm_snapshot *s = ti->private;
1582        int r = DM_MAPIO_REMAPPED;
1583        chunk_t chunk;
1584        struct dm_snap_pending_exception *pe = NULL;
1585
1586        if (bio->bi_rw & REQ_FLUSH) {
1587                bio->bi_bdev = s->cow->bdev;
1588                return DM_MAPIO_REMAPPED;
1589        }
1590
1591        chunk = sector_to_chunk(s->store, bio->bi_sector);
1592
1593        /* Full snapshots are not usable */
1594        /* To get here the table must be live so s->active is always set. */
1595        if (!s->valid)
1596                return -EIO;
1597
1598        /* FIXME: should only take write lock if we need
1599         * to copy an exception */
1600        down_write(&s->lock);
1601
1602        if (!s->valid) {
1603                r = -EIO;
1604                goto out_unlock;
1605        }
1606
1607        /* If the block is already remapped - use that, else remap it */
1608        e = dm_lookup_exception(&s->complete, chunk);
1609        if (e) {
1610                remap_exception(s, e, bio, chunk);
1611                goto out_unlock;
1612        }
1613
1614        /*
1615         * Write to snapshot - higher level takes care of RW/RO
1616         * flags so we should only get this if we are
1617         * writeable.
1618         */
1619        if (bio_rw(bio) == WRITE) {
1620                pe = __lookup_pending_exception(s, chunk);
1621                if (!pe) {
1622                        up_write(&s->lock);
1623                        pe = alloc_pending_exception(s);
1624                        down_write(&s->lock);
1625
1626                        if (!s->valid) {
1627                                free_pending_exception(pe);
1628                                r = -EIO;
1629                                goto out_unlock;
1630                        }
1631
1632                        e = dm_lookup_exception(&s->complete, chunk);
1633                        if (e) {
1634                                free_pending_exception(pe);
1635                                remap_exception(s, e, bio, chunk);
1636                                goto out_unlock;
1637                        }
1638
1639                        pe = __find_pending_exception(s, pe, chunk);
1640                        if (!pe) {
1641                                __invalidate_snapshot(s, -ENOMEM);
1642                                r = -EIO;
1643                                goto out_unlock;
1644                        }
1645                }
1646
1647                remap_exception(s, &pe->e, bio, chunk);
1648
1649                r = DM_MAPIO_SUBMITTED;
1650
1651                if (!pe->started &&
1652                    bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
1653                        pe->started = 1;
1654                        up_write(&s->lock);
1655                        start_full_bio(pe, bio);
1656                        goto out;
1657                }
1658
1659                bio_list_add(&pe->snapshot_bios, bio);
1660
1661                if (!pe->started) {
1662                        /* this is protected by snap->lock */
1663                        pe->started = 1;
1664                        up_write(&s->lock);
1665                        start_copy(pe);
1666                        goto out;
1667                }
1668        } else {
1669                bio->bi_bdev = s->origin->bdev;
1670                map_context->ptr = track_chunk(s, chunk);
1671        }
1672
1673out_unlock:
1674        up_write(&s->lock);
1675out:
1676        return r;
1677}
1678
1679/*
1680 * A snapshot-merge target behaves like a combination of a snapshot
1681 * target and a snapshot-origin target.  It only generates new
1682 * exceptions in other snapshots and not in the one that is being
1683 * merged.
1684 *
1685 * For each chunk, if there is an existing exception, it is used to
1686 * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1687 * which in turn might generate exceptions in other snapshots.
1688 * If merging is currently taking place on the chunk in question, the
1689 * I/O is deferred by adding it to s->bios_queued_during_merge.
1690 */
1691static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1692                              union map_info *map_context)
1693{
1694        struct dm_exception *e;
1695        struct dm_snapshot *s = ti->private;
1696        int r = DM_MAPIO_REMAPPED;
1697        chunk_t chunk;
1698
1699        if (bio->bi_rw & REQ_FLUSH) {
1700                if (!map_context->target_request_nr)
1701                        bio->bi_bdev = s->origin->bdev;
1702                else
1703                        bio->bi_bdev = s->cow->bdev;
1704                map_context->ptr = NULL;
1705                return DM_MAPIO_REMAPPED;
1706        }
1707
1708        chunk = sector_to_chunk(s->store, bio->bi_sector);
1709
1710        down_write(&s->lock);
1711
1712        /* Full merging snapshots are redirected to the origin */
1713        if (!s->valid)
1714                goto redirect_to_origin;
1715
1716        /* If the block is already remapped - use that */
1717        e = dm_lookup_exception(&s->complete, chunk);
1718        if (e) {
1719                /* Queue writes overlapping with chunks being merged */
1720                if (bio_rw(bio) == WRITE &&
1721                    chunk >= s->first_merging_chunk &&
1722                    chunk < (s->first_merging_chunk +
1723                             s->num_merging_chunks)) {
1724                        bio->bi_bdev = s->origin->bdev;
1725                        bio_list_add(&s->bios_queued_during_merge, bio);
1726                        r = DM_MAPIO_SUBMITTED;
1727                        goto out_unlock;
1728                }
1729
1730                remap_exception(s, e, bio, chunk);
1731
1732                if (bio_rw(bio) == WRITE)
1733                        map_context->ptr = track_chunk(s, chunk);
1734                goto out_unlock;
1735        }
1736
1737redirect_to_origin:
1738        bio->bi_bdev = s->origin->bdev;
1739
1740        if (bio_rw(bio) == WRITE) {
1741                up_write(&s->lock);
1742                return do_origin(s->origin, bio);
1743        }
1744
1745out_unlock:
1746        up_write(&s->lock);
1747
1748        return r;
1749}
1750
1751static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1752                           int error, union map_info *map_context)
1753{
1754        struct dm_snapshot *s = ti->private;
1755        struct dm_snap_tracked_chunk *c = map_context->ptr;
1756
1757        if (c)
1758                stop_tracking_chunk(s, c);
1759
1760        return 0;
1761}
1762
1763static void snapshot_merge_presuspend(struct dm_target *ti)
1764{
1765        struct dm_snapshot *s = ti->private;
1766
1767        stop_merge(s);
1768}
1769
1770static int snapshot_preresume(struct dm_target *ti)
1771{
1772        int r = 0;
1773        struct dm_snapshot *s = ti->private;
1774        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1775
1776        down_read(&_origins_lock);
1777        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1778        if (snap_src && snap_dest) {
1779                down_read(&snap_src->lock);
1780                if (s == snap_src) {
1781                        DMERR("Unable to resume snapshot source until "
1782                              "handover completes.");
1783                        r = -EINVAL;
1784                } else if (!dm_suspended(snap_src->ti)) {
1785                        DMERR("Unable to perform snapshot handover until "
1786                              "source is suspended.");
1787                        r = -EINVAL;
1788                }
1789                up_read(&snap_src->lock);
1790        }
1791        up_read(&_origins_lock);
1792
1793        return r;
1794}
1795
1796static void snapshot_resume(struct dm_target *ti)
1797{
1798        struct dm_snapshot *s = ti->private;
1799        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1800
1801        down_read(&_origins_lock);
1802        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1803        if (snap_src && snap_dest) {
1804                down_write(&snap_src->lock);
1805                down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1806                __handover_exceptions(snap_src, snap_dest);
1807                up_write(&snap_dest->lock);
1808                up_write(&snap_src->lock);
1809        }
1810        up_read(&_origins_lock);
1811
1812        /* Now we have correct chunk size, reregister */
1813        reregister_snapshot(s);
1814
1815        down_write(&s->lock);
1816        s->active = 1;
1817        up_write(&s->lock);
1818}
1819
1820static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1821{
1822        sector_t min_chunksize;
1823
1824        down_read(&_origins_lock);
1825        min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1826        up_read(&_origins_lock);
1827
1828        return min_chunksize;
1829}
1830
1831static void snapshot_merge_resume(struct dm_target *ti)
1832{
1833        struct dm_snapshot *s = ti->private;
1834
1835        /*
1836         * Handover exceptions from existing snapshot.
1837         */
1838        snapshot_resume(ti);
1839
1840        /*
1841         * snapshot-merge acts as an origin, so set ti->split_io
1842         */
1843        ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1844
1845        start_merge(s);
1846}
1847
1848static int snapshot_status(struct dm_target *ti, status_type_t type,
1849                           char *result, unsigned int maxlen)
1850{
1851        unsigned sz = 0;
1852        struct dm_snapshot *snap = ti->private;
1853
1854        switch (type) {
1855        case STATUSTYPE_INFO:
1856
1857                down_write(&snap->lock);
1858
1859                if (!snap->valid)
1860                        DMEMIT("Invalid");
1861                else if (snap->merge_failed)
1862                        DMEMIT("Merge failed");
1863                else {
1864                        if (snap->store->type->usage) {
1865                                sector_t total_sectors, sectors_allocated,
1866                                         metadata_sectors;
1867                                snap->store->type->usage(snap->store,
1868                                                         &total_sectors,
1869                                                         &sectors_allocated,
1870                                                         &metadata_sectors);
1871                                DMEMIT("%llu/%llu %llu",
1872                                       (unsigned long long)sectors_allocated,
1873                                       (unsigned long long)total_sectors,
1874                                       (unsigned long long)metadata_sectors);
1875                        }
1876                        else
1877                                DMEMIT("Unknown");
1878                }
1879
1880                up_write(&snap->lock);
1881
1882                break;
1883
1884        case STATUSTYPE_TABLE:
1885                /*
1886                 * kdevname returns a static pointer so we need
1887                 * to make private copies if the output is to
1888                 * make sense.
1889                 */
1890                DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1891                snap->store->type->status(snap->store, type, result + sz,
1892                                          maxlen - sz);
1893                break;
1894        }
1895
1896        return 0;
1897}
1898
1899static int snapshot_iterate_devices(struct dm_target *ti,
1900                                    iterate_devices_callout_fn fn, void *data)
1901{
1902        struct dm_snapshot *snap = ti->private;
1903        int r;
1904
1905        r = fn(ti, snap->origin, 0, ti->len, data);
1906
1907        if (!r)
1908                r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1909
1910        return r;
1911}
1912
1913
1914/*-----------------------------------------------------------------
1915 * Origin methods
1916 *---------------------------------------------------------------*/
1917
1918/*
1919 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1920 * supplied bio was ignored.  The caller may submit it immediately.
1921 * (No remapping actually occurs as the origin is always a direct linear
1922 * map.)
1923 *
1924 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1925 * and any supplied bio is added to a list to be submitted once all
1926 * the necessary exceptions exist.
1927 */
1928static int __origin_write(struct list_head *snapshots, sector_t sector,
1929                          struct bio *bio)
1930{
1931        int r = DM_MAPIO_REMAPPED;
1932        struct dm_snapshot *snap;
1933        struct dm_exception *e;
1934        struct dm_snap_pending_exception *pe;
1935        struct dm_snap_pending_exception *pe_to_start_now = NULL;
1936        struct dm_snap_pending_exception *pe_to_start_last = NULL;
1937        chunk_t chunk;
1938
1939        /* Do all the snapshots on this origin */
1940        list_for_each_entry (snap, snapshots, list) {
1941                /*
1942                 * Don't make new exceptions in a merging snapshot
1943                 * because it has effectively been deleted
1944                 */
1945                if (dm_target_is_snapshot_merge(snap->ti))
1946                        continue;
1947
1948                down_write(&snap->lock);
1949
1950                /* Only deal with valid and active snapshots */
1951                if (!snap->valid || !snap->active)
1952                        goto next_snapshot;
1953
1954                /* Nothing to do if writing beyond end of snapshot */
1955                if (sector >= dm_table_get_size(snap->ti->table))
1956                        goto next_snapshot;
1957
1958                /*
1959                 * Remember, different snapshots can have
1960                 * different chunk sizes.
1961                 */
1962                chunk = sector_to_chunk(snap->store, sector);
1963
1964                /*
1965                 * Check exception table to see if block
1966                 * is already remapped in this snapshot
1967                 * and trigger an exception if not.
1968                 */
1969                e = dm_lookup_exception(&snap->complete, chunk);
1970                if (e)
1971                        goto next_snapshot;
1972
1973                pe = __lookup_pending_exception(snap, chunk);
1974                if (!pe) {
1975                        up_write(&snap->lock);
1976                        pe = alloc_pending_exception(snap);
1977                        down_write(&snap->lock);
1978
1979                        if (!snap->valid) {
1980                                free_pending_exception(pe);
1981                                goto next_snapshot;
1982                        }
1983
1984                        e = dm_lookup_exception(&snap->complete, chunk);
1985                        if (e) {
1986                                free_pending_exception(pe);
1987                                goto next_snapshot;
1988                        }
1989
1990                        pe = __find_pending_exception(snap, pe, chunk);
1991                        if (!pe) {
1992                                __invalidate_snapshot(snap, -ENOMEM);
1993                                goto next_snapshot;
1994                        }
1995                }
1996
1997                r = DM_MAPIO_SUBMITTED;
1998
1999                /*
2000                 * If an origin bio was supplied, queue it to wait for the
2001                 * completion of this exception, and start this one last,
2002                 * at the end of the function.
2003                 */
2004                if (bio) {
2005                        bio_list_add(&pe->origin_bios, bio);
2006                        bio = NULL;
2007
2008                        if (!pe->started) {
2009                                pe->started = 1;
2010                                pe_to_start_last = pe;
2011                        }
2012                }
2013
2014                if (!pe->started) {
2015                        pe->started = 1;
2016                        pe_to_start_now = pe;
2017                }
2018
2019next_snapshot:
2020                up_write(&snap->lock);
2021
2022                if (pe_to_start_now) {
2023                        start_copy(pe_to_start_now);
2024                        pe_to_start_now = NULL;
2025                }
2026        }
2027
2028        /*
2029         * Submit the exception against which the bio is queued last,
2030         * to give the other exceptions a head start.
2031         */
2032        if (pe_to_start_last)
2033                start_copy(pe_to_start_last);
2034
2035        return r;
2036}
2037
2038/*
2039 * Called on a write from the origin driver.
2040 */
2041static int do_origin(struct dm_dev *origin, struct bio *bio)
2042{
2043        struct origin *o;
2044        int r = DM_MAPIO_REMAPPED;
2045
2046        down_read(&_origins_lock);
2047        o = __lookup_origin(origin->bdev);
2048        if (o)
2049                r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2050        up_read(&_origins_lock);
2051
2052        return r;
2053}
2054
2055/*
2056 * Trigger exceptions in all non-merging snapshots.
2057 *
2058 * The chunk size of the merging snapshot may be larger than the chunk
2059 * size of some other snapshot so we may need to reallocate multiple
2060 * chunks in other snapshots.
2061 *
2062 * We scan all the overlapping exceptions in the other snapshots.
2063 * Returns 1 if anything was reallocated and must be waited for,
2064 * otherwise returns 0.
2065 *
2066 * size must be a multiple of merging_snap's chunk_size.
2067 */
2068static int origin_write_extent(struct dm_snapshot *merging_snap,
2069                               sector_t sector, unsigned size)
2070{
2071        int must_wait = 0;
2072        sector_t n;
2073        struct origin *o;
2074
2075        /*
2076         * The origin's __minimum_chunk_size() got stored in split_io
2077         * by snapshot_merge_resume().
2078         */
2079        down_read(&_origins_lock);
2080        o = __lookup_origin(merging_snap->origin->bdev);
2081        for (n = 0; n < size; n += merging_snap->ti->split_io)
2082                if (__origin_write(&o->snapshots, sector + n, NULL) ==
2083                    DM_MAPIO_SUBMITTED)
2084                        must_wait = 1;
2085        up_read(&_origins_lock);
2086
2087        return must_wait;
2088}
2089
2090/*
2091 * Origin: maps a linear range of a device, with hooks for snapshotting.
2092 */
2093
2094/*
2095 * Construct an origin mapping: <dev_path>
2096 * The context for an origin is merely a 'struct dm_dev *'
2097 * pointing to the real device.
2098 */
2099static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2100{
2101        int r;
2102        struct dm_dev *dev;
2103
2104        if (argc != 1) {
2105                ti->error = "origin: incorrect number of arguments";
2106                return -EINVAL;
2107        }
2108
2109        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
2110        if (r) {
2111                ti->error = "Cannot get target device";
2112                return r;
2113        }
2114
2115        ti->private = dev;
2116        ti->num_flush_requests = 1;
2117
2118        return 0;
2119}
2120
2121static void origin_dtr(struct dm_target *ti)
2122{
2123        struct dm_dev *dev = ti->private;
2124        dm_put_device(ti, dev);
2125}
2126
2127static int origin_map(struct dm_target *ti, struct bio *bio,
2128                      union map_info *map_context)
2129{
2130        struct dm_dev *dev = ti->private;
2131        bio->bi_bdev = dev->bdev;
2132
2133        if (bio->bi_rw & REQ_FLUSH)
2134                return DM_MAPIO_REMAPPED;
2135
2136        /* Only tell snapshots if this is a write */
2137        return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2138}
2139
2140/*
2141 * Set the target "split_io" field to the minimum of all the snapshots'
2142 * chunk sizes.
2143 */
2144static void origin_resume(struct dm_target *ti)
2145{
2146        struct dm_dev *dev = ti->private;
2147
2148        ti->split_io = get_origin_minimum_chunksize(dev->bdev);
2149}
2150
2151static int origin_status(struct dm_target *ti, status_type_t type, char *result,
2152                         unsigned int maxlen)
2153{
2154        struct dm_dev *dev = ti->private;
2155
2156        switch (type) {
2157        case STATUSTYPE_INFO:
2158                result[0] = '\0';
2159                break;
2160
2161        case STATUSTYPE_TABLE:
2162                snprintf(result, maxlen, "%s", dev->name);
2163                break;
2164        }
2165
2166        return 0;
2167}
2168
2169static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2170                        struct bio_vec *biovec, int max_size)
2171{
2172        struct dm_dev *dev = ti->private;
2173        struct request_queue *q = bdev_get_queue(dev->bdev);
2174
2175        if (!q->merge_bvec_fn)
2176                return max_size;
2177
2178        bvm->bi_bdev = dev->bdev;
2179        bvm->bi_sector = bvm->bi_sector;
2180
2181        return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2182}
2183
2184static int origin_iterate_devices(struct dm_target *ti,
2185                                  iterate_devices_callout_fn fn, void *data)
2186{
2187        struct dm_dev *dev = ti->private;
2188
2189        return fn(ti, dev, 0, ti->len, data);
2190}
2191
2192static struct target_type origin_target = {
2193        .name    = "snapshot-origin",
2194        .version = {1, 7, 1},
2195        .module  = THIS_MODULE,
2196        .ctr     = origin_ctr,
2197        .dtr     = origin_dtr,
2198        .map     = origin_map,
2199        .resume  = origin_resume,
2200        .status  = origin_status,
2201        .merge   = origin_merge,
2202        .iterate_devices = origin_iterate_devices,
2203};
2204
2205static struct target_type snapshot_target = {
2206        .name    = "snapshot",
2207        .version = {1, 10, 0},
2208        .module  = THIS_MODULE,
2209        .ctr     = snapshot_ctr,
2210        .dtr     = snapshot_dtr,
2211        .map     = snapshot_map,
2212        .end_io  = snapshot_end_io,
2213        .preresume  = snapshot_preresume,
2214        .resume  = snapshot_resume,
2215        .status  = snapshot_status,
2216        .iterate_devices = snapshot_iterate_devices,
2217};
2218
2219static struct target_type merge_target = {
2220        .name    = dm_snapshot_merge_target_name,
2221        .version = {1, 1, 0},
2222        .module  = THIS_MODULE,
2223        .ctr     = snapshot_ctr,
2224        .dtr     = snapshot_dtr,
2225        .map     = snapshot_merge_map,
2226        .end_io  = snapshot_end_io,
2227        .presuspend = snapshot_merge_presuspend,
2228        .preresume  = snapshot_preresume,
2229        .resume  = snapshot_merge_resume,
2230        .status  = snapshot_status,
2231        .iterate_devices = snapshot_iterate_devices,
2232};
2233
2234static int __init dm_snapshot_init(void)
2235{
2236        int r;
2237
2238        r = dm_exception_store_init();
2239        if (r) {
2240                DMERR("Failed to initialize exception stores");
2241                return r;
2242        }
2243
2244        r = dm_register_target(&snapshot_target);
2245        if (r < 0) {
2246                DMERR("snapshot target register failed %d", r);
2247                goto bad_register_snapshot_target;
2248        }
2249
2250        r = dm_register_target(&origin_target);
2251        if (r < 0) {
2252                DMERR("Origin target register failed %d", r);
2253                goto bad_register_origin_target;
2254        }
2255
2256        r = dm_register_target(&merge_target);
2257        if (r < 0) {
2258                DMERR("Merge target register failed %d", r);
2259                goto bad_register_merge_target;
2260        }
2261
2262        r = init_origin_hash();
2263        if (r) {
2264                DMERR("init_origin_hash failed.");
2265                goto bad_origin_hash;
2266        }
2267
2268        exception_cache = KMEM_CACHE(dm_exception, 0);
2269        if (!exception_cache) {
2270                DMERR("Couldn't create exception cache.");
2271                r = -ENOMEM;
2272                goto bad_exception_cache;
2273        }
2274
2275        pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2276        if (!pending_cache) {
2277                DMERR("Couldn't create pending cache.");
2278                r = -ENOMEM;
2279                goto bad_pending_cache;
2280        }
2281
2282        tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2283        if (!tracked_chunk_cache) {
2284                DMERR("Couldn't create cache to track chunks in use.");
2285                r = -ENOMEM;
2286                goto bad_tracked_chunk_cache;
2287        }
2288
2289        return 0;
2290
2291bad_tracked_chunk_cache:
2292        kmem_cache_destroy(pending_cache);
2293bad_pending_cache:
2294        kmem_cache_destroy(exception_cache);
2295bad_exception_cache:
2296        exit_origin_hash();
2297bad_origin_hash:
2298        dm_unregister_target(&merge_target);
2299bad_register_merge_target:
2300        dm_unregister_target(&origin_target);
2301bad_register_origin_target:
2302        dm_unregister_target(&snapshot_target);
2303bad_register_snapshot_target:
2304        dm_exception_store_exit();
2305
2306        return r;
2307}
2308
2309static void __exit dm_snapshot_exit(void)
2310{
2311        dm_unregister_target(&snapshot_target);
2312        dm_unregister_target(&origin_target);
2313        dm_unregister_target(&merge_target);
2314
2315        exit_origin_hash();
2316        kmem_cache_destroy(pending_cache);
2317        kmem_cache_destroy(exception_cache);
2318        kmem_cache_destroy(tracked_chunk_cache);
2319
2320        dm_exception_store_exit();
2321}
2322
2323/* Module hooks */
2324module_init(dm_snapshot_init);
2325module_exit(dm_snapshot_exit);
2326
2327MODULE_DESCRIPTION(DM_NAME " snapshot target");
2328MODULE_AUTHOR("Joe Thornber");
2329MODULE_LICENSE("GPL");
2330