linux/drivers/md/dm-snap.c
<<
>>
Prefs
   1/*
   2 * dm-snapshot.c
   3 *
   4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/blkdev.h>
  10#include <linux/device-mapper.h>
  11#include <linux/delay.h>
  12#include <linux/fs.h>
  13#include <linux/init.h>
  14#include <linux/kdev_t.h>
  15#include <linux/list.h>
  16#include <linux/mempool.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/vmalloc.h>
  20#include <linux/log2.h>
  21#include <linux/dm-kcopyd.h>
  22
  23#include "dm.h"
  24
  25#include "dm-exception-store.h"
  26
  27#define DM_MSG_PREFIX "snapshots"
  28
  29static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
  30
  31#define dm_target_is_snapshot_merge(ti) \
  32        ((ti)->type->name == dm_snapshot_merge_target_name)
  33
  34/*
  35 * The size of the mempool used to track chunks in use.
  36 */
  37#define MIN_IOS 256
  38
  39#define DM_TRACKED_CHUNK_HASH_SIZE      16
  40#define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
  41                                         (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  42
  43struct dm_exception_table {
  44        uint32_t hash_mask;
  45        unsigned hash_shift;
  46        struct list_head *table;
  47};
  48
  49struct dm_snapshot {
  50        struct mutex lock;
  51
  52        struct dm_dev *origin;
  53        struct dm_dev *cow;
  54
  55        struct dm_target *ti;
  56
  57        /* List of snapshots per Origin */
  58        struct list_head list;
  59
  60        /*
  61         * You can't use a snapshot if this is 0 (e.g. if full).
  62         * A snapshot-merge target never clears this.
  63         */
  64        int valid;
  65
  66        /*
  67         * The snapshot overflowed because of a write to the snapshot device.
  68         * We don't have to invalidate the snapshot in this case, but we need
  69         * to prevent further writes.
  70         */
  71        int snapshot_overflowed;
  72
  73        /* Origin writes don't trigger exceptions until this is set */
  74        int active;
  75
  76        atomic_t pending_exceptions_count;
  77
  78        /* Protected by "lock" */
  79        sector_t exception_start_sequence;
  80
  81        /* Protected by kcopyd single-threaded callback */
  82        sector_t exception_complete_sequence;
  83
  84        /*
  85         * A list of pending exceptions that completed out of order.
  86         * Protected by kcopyd single-threaded callback.
  87         */
  88        struct rb_root out_of_order_tree;
  89
  90        mempool_t pending_pool;
  91
  92        struct dm_exception_table pending;
  93        struct dm_exception_table complete;
  94
  95        /*
  96         * pe_lock protects all pending_exception operations and access
  97         * as well as the snapshot_bios list.
  98         */
  99        spinlock_t pe_lock;
 100
 101        /* Chunks with outstanding reads */
 102        spinlock_t tracked_chunk_lock;
 103        struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
 104
 105        /* The on disk metadata handler */
 106        struct dm_exception_store *store;
 107
 108        struct dm_kcopyd_client *kcopyd_client;
 109
 110        /* Wait for events based on state_bits */
 111        unsigned long state_bits;
 112
 113        /* Range of chunks currently being merged. */
 114        chunk_t first_merging_chunk;
 115        int num_merging_chunks;
 116
 117        /*
 118         * The merge operation failed if this flag is set.
 119         * Failure modes are handled as follows:
 120         * - I/O error reading the header
 121         *      => don't load the target; abort.
 122         * - Header does not have "valid" flag set
 123         *      => use the origin; forget about the snapshot.
 124         * - I/O error when reading exceptions
 125         *      => don't load the target; abort.
 126         *         (We can't use the intermediate origin state.)
 127         * - I/O error while merging
 128         *      => stop merging; set merge_failed; process I/O normally.
 129         */
 130        int merge_failed;
 131
 132        /*
 133         * Incoming bios that overlap with chunks being merged must wait
 134         * for them to be committed.
 135         */
 136        struct bio_list bios_queued_during_merge;
 137};
 138
 139/*
 140 * state_bits:
 141 *   RUNNING_MERGE  - Merge operation is in progress.
 142 *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
 143 *                    cleared afterwards.
 144 */
 145#define RUNNING_MERGE          0
 146#define SHUTDOWN_MERGE         1
 147
 148DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
 149                "A percentage of time allocated for copy on write");
 150
 151struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
 152{
 153        return s->origin;
 154}
 155EXPORT_SYMBOL(dm_snap_origin);
 156
 157struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
 158{
 159        return s->cow;
 160}
 161EXPORT_SYMBOL(dm_snap_cow);
 162
 163static sector_t chunk_to_sector(struct dm_exception_store *store,
 164                                chunk_t chunk)
 165{
 166        return chunk << store->chunk_shift;
 167}
 168
 169static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
 170{
 171        /*
 172         * There is only ever one instance of a particular block
 173         * device so we can compare pointers safely.
 174         */
 175        return lhs == rhs;
 176}
 177
 178struct dm_snap_pending_exception {
 179        struct dm_exception e;
 180
 181        /*
 182         * Origin buffers waiting for this to complete are held
 183         * in a bio list
 184         */
 185        struct bio_list origin_bios;
 186        struct bio_list snapshot_bios;
 187
 188        /* Pointer back to snapshot context */
 189        struct dm_snapshot *snap;
 190
 191        /*
 192         * 1 indicates the exception has already been sent to
 193         * kcopyd.
 194         */
 195        int started;
 196
 197        /* There was copying error. */
 198        int copy_error;
 199
 200        /* A sequence number, it is used for in-order completion. */
 201        sector_t exception_sequence;
 202
 203        struct rb_node out_of_order_node;
 204
 205        /*
 206         * For writing a complete chunk, bypassing the copy.
 207         */
 208        struct bio *full_bio;
 209        bio_end_io_t *full_bio_end_io;
 210};
 211
 212/*
 213 * Hash table mapping origin volumes to lists of snapshots and
 214 * a lock to protect it
 215 */
 216static struct kmem_cache *exception_cache;
 217static struct kmem_cache *pending_cache;
 218
 219struct dm_snap_tracked_chunk {
 220        struct hlist_node node;
 221        chunk_t chunk;
 222};
 223
 224static void init_tracked_chunk(struct bio *bio)
 225{
 226        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 227        INIT_HLIST_NODE(&c->node);
 228}
 229
 230static bool is_bio_tracked(struct bio *bio)
 231{
 232        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 233        return !hlist_unhashed(&c->node);
 234}
 235
 236static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
 237{
 238        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 239
 240        c->chunk = chunk;
 241
 242        spin_lock_irq(&s->tracked_chunk_lock);
 243        hlist_add_head(&c->node,
 244                       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
 245        spin_unlock_irq(&s->tracked_chunk_lock);
 246}
 247
 248static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
 249{
 250        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 251        unsigned long flags;
 252
 253        spin_lock_irqsave(&s->tracked_chunk_lock, flags);
 254        hlist_del(&c->node);
 255        spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
 256}
 257
 258static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
 259{
 260        struct dm_snap_tracked_chunk *c;
 261        int found = 0;
 262
 263        spin_lock_irq(&s->tracked_chunk_lock);
 264
 265        hlist_for_each_entry(c,
 266            &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
 267                if (c->chunk == chunk) {
 268                        found = 1;
 269                        break;
 270                }
 271        }
 272
 273        spin_unlock_irq(&s->tracked_chunk_lock);
 274
 275        return found;
 276}
 277
 278/*
 279 * This conflicting I/O is extremely improbable in the caller,
 280 * so msleep(1) is sufficient and there is no need for a wait queue.
 281 */
 282static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
 283{
 284        while (__chunk_is_tracked(s, chunk))
 285                msleep(1);
 286}
 287
 288/*
 289 * One of these per registered origin, held in the snapshot_origins hash
 290 */
 291struct origin {
 292        /* The origin device */
 293        struct block_device *bdev;
 294
 295        struct list_head hash_list;
 296
 297        /* List of snapshots for this origin */
 298        struct list_head snapshots;
 299};
 300
 301/*
 302 * This structure is allocated for each origin target
 303 */
 304struct dm_origin {
 305        struct dm_dev *dev;
 306        struct dm_target *ti;
 307        unsigned split_boundary;
 308        struct list_head hash_list;
 309};
 310
 311/*
 312 * Size of the hash table for origin volumes. If we make this
 313 * the size of the minors list then it should be nearly perfect
 314 */
 315#define ORIGIN_HASH_SIZE 256
 316#define ORIGIN_MASK      0xFF
 317static struct list_head *_origins;
 318static struct list_head *_dm_origins;
 319static struct rw_semaphore _origins_lock;
 320
 321static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
 322static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
 323static uint64_t _pending_exceptions_done_count;
 324
 325static int init_origin_hash(void)
 326{
 327        int i;
 328
 329        _origins = kmalloc_array(ORIGIN_HASH_SIZE, sizeof(struct list_head),
 330                                 GFP_KERNEL);
 331        if (!_origins) {
 332                DMERR("unable to allocate memory for _origins");
 333                return -ENOMEM;
 334        }
 335        for (i = 0; i < ORIGIN_HASH_SIZE; i++)
 336                INIT_LIST_HEAD(_origins + i);
 337
 338        _dm_origins = kmalloc_array(ORIGIN_HASH_SIZE,
 339                                    sizeof(struct list_head),
 340                                    GFP_KERNEL);
 341        if (!_dm_origins) {
 342                DMERR("unable to allocate memory for _dm_origins");
 343                kfree(_origins);
 344                return -ENOMEM;
 345        }
 346        for (i = 0; i < ORIGIN_HASH_SIZE; i++)
 347                INIT_LIST_HEAD(_dm_origins + i);
 348
 349        init_rwsem(&_origins_lock);
 350
 351        return 0;
 352}
 353
 354static void exit_origin_hash(void)
 355{
 356        kfree(_origins);
 357        kfree(_dm_origins);
 358}
 359
 360static unsigned origin_hash(struct block_device *bdev)
 361{
 362        return bdev->bd_dev & ORIGIN_MASK;
 363}
 364
 365static struct origin *__lookup_origin(struct block_device *origin)
 366{
 367        struct list_head *ol;
 368        struct origin *o;
 369
 370        ol = &_origins[origin_hash(origin)];
 371        list_for_each_entry (o, ol, hash_list)
 372                if (bdev_equal(o->bdev, origin))
 373                        return o;
 374
 375        return NULL;
 376}
 377
 378static void __insert_origin(struct origin *o)
 379{
 380        struct list_head *sl = &_origins[origin_hash(o->bdev)];
 381        list_add_tail(&o->hash_list, sl);
 382}
 383
 384static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
 385{
 386        struct list_head *ol;
 387        struct dm_origin *o;
 388
 389        ol = &_dm_origins[origin_hash(origin)];
 390        list_for_each_entry (o, ol, hash_list)
 391                if (bdev_equal(o->dev->bdev, origin))
 392                        return o;
 393
 394        return NULL;
 395}
 396
 397static void __insert_dm_origin(struct dm_origin *o)
 398{
 399        struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
 400        list_add_tail(&o->hash_list, sl);
 401}
 402
 403static void __remove_dm_origin(struct dm_origin *o)
 404{
 405        list_del(&o->hash_list);
 406}
 407
 408/*
 409 * _origins_lock must be held when calling this function.
 410 * Returns number of snapshots registered using the supplied cow device, plus:
 411 * snap_src - a snapshot suitable for use as a source of exception handover
 412 * snap_dest - a snapshot capable of receiving exception handover.
 413 * snap_merge - an existing snapshot-merge target linked to the same origin.
 414 *   There can be at most one snapshot-merge target. The parameter is optional.
 415 *
 416 * Possible return values and states of snap_src and snap_dest.
 417 *   0: NULL, NULL  - first new snapshot
 418 *   1: snap_src, NULL - normal snapshot
 419 *   2: snap_src, snap_dest  - waiting for handover
 420 *   2: snap_src, NULL - handed over, waiting for old to be deleted
 421 *   1: NULL, snap_dest - source got destroyed without handover
 422 */
 423static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
 424                                        struct dm_snapshot **snap_src,
 425                                        struct dm_snapshot **snap_dest,
 426                                        struct dm_snapshot **snap_merge)
 427{
 428        struct dm_snapshot *s;
 429        struct origin *o;
 430        int count = 0;
 431        int active;
 432
 433        o = __lookup_origin(snap->origin->bdev);
 434        if (!o)
 435                goto out;
 436
 437        list_for_each_entry(s, &o->snapshots, list) {
 438                if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
 439                        *snap_merge = s;
 440                if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
 441                        continue;
 442
 443                mutex_lock(&s->lock);
 444                active = s->active;
 445                mutex_unlock(&s->lock);
 446
 447                if (active) {
 448                        if (snap_src)
 449                                *snap_src = s;
 450                } else if (snap_dest)
 451                        *snap_dest = s;
 452
 453                count++;
 454        }
 455
 456out:
 457        return count;
 458}
 459
 460/*
 461 * On success, returns 1 if this snapshot is a handover destination,
 462 * otherwise returns 0.
 463 */
 464static int __validate_exception_handover(struct dm_snapshot *snap)
 465{
 466        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 467        struct dm_snapshot *snap_merge = NULL;
 468
 469        /* Does snapshot need exceptions handed over to it? */
 470        if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
 471                                          &snap_merge) == 2) ||
 472            snap_dest) {
 473                snap->ti->error = "Snapshot cow pairing for exception "
 474                                  "table handover failed";
 475                return -EINVAL;
 476        }
 477
 478        /*
 479         * If no snap_src was found, snap cannot become a handover
 480         * destination.
 481         */
 482        if (!snap_src)
 483                return 0;
 484
 485        /*
 486         * Non-snapshot-merge handover?
 487         */
 488        if (!dm_target_is_snapshot_merge(snap->ti))
 489                return 1;
 490
 491        /*
 492         * Do not allow more than one merging snapshot.
 493         */
 494        if (snap_merge) {
 495                snap->ti->error = "A snapshot is already merging.";
 496                return -EINVAL;
 497        }
 498
 499        if (!snap_src->store->type->prepare_merge ||
 500            !snap_src->store->type->commit_merge) {
 501                snap->ti->error = "Snapshot exception store does not "
 502                                  "support snapshot-merge.";
 503                return -EINVAL;
 504        }
 505
 506        return 1;
 507}
 508
 509static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
 510{
 511        struct dm_snapshot *l;
 512
 513        /* Sort the list according to chunk size, largest-first smallest-last */
 514        list_for_each_entry(l, &o->snapshots, list)
 515                if (l->store->chunk_size < s->store->chunk_size)
 516                        break;
 517        list_add_tail(&s->list, &l->list);
 518}
 519
 520/*
 521 * Make a note of the snapshot and its origin so we can look it
 522 * up when the origin has a write on it.
 523 *
 524 * Also validate snapshot exception store handovers.
 525 * On success, returns 1 if this registration is a handover destination,
 526 * otherwise returns 0.
 527 */
 528static int register_snapshot(struct dm_snapshot *snap)
 529{
 530        struct origin *o, *new_o = NULL;
 531        struct block_device *bdev = snap->origin->bdev;
 532        int r = 0;
 533
 534        new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
 535        if (!new_o)
 536                return -ENOMEM;
 537
 538        down_write(&_origins_lock);
 539
 540        r = __validate_exception_handover(snap);
 541        if (r < 0) {
 542                kfree(new_o);
 543                goto out;
 544        }
 545
 546        o = __lookup_origin(bdev);
 547        if (o)
 548                kfree(new_o);
 549        else {
 550                /* New origin */
 551                o = new_o;
 552
 553                /* Initialise the struct */
 554                INIT_LIST_HEAD(&o->snapshots);
 555                o->bdev = bdev;
 556
 557                __insert_origin(o);
 558        }
 559
 560        __insert_snapshot(o, snap);
 561
 562out:
 563        up_write(&_origins_lock);
 564
 565        return r;
 566}
 567
 568/*
 569 * Move snapshot to correct place in list according to chunk size.
 570 */
 571static void reregister_snapshot(struct dm_snapshot *s)
 572{
 573        struct block_device *bdev = s->origin->bdev;
 574
 575        down_write(&_origins_lock);
 576
 577        list_del(&s->list);
 578        __insert_snapshot(__lookup_origin(bdev), s);
 579
 580        up_write(&_origins_lock);
 581}
 582
 583static void unregister_snapshot(struct dm_snapshot *s)
 584{
 585        struct origin *o;
 586
 587        down_write(&_origins_lock);
 588        o = __lookup_origin(s->origin->bdev);
 589
 590        list_del(&s->list);
 591        if (o && list_empty(&o->snapshots)) {
 592                list_del(&o->hash_list);
 593                kfree(o);
 594        }
 595
 596        up_write(&_origins_lock);
 597}
 598
 599/*
 600 * Implementation of the exception hash tables.
 601 * The lowest hash_shift bits of the chunk number are ignored, allowing
 602 * some consecutive chunks to be grouped together.
 603 */
 604static int dm_exception_table_init(struct dm_exception_table *et,
 605                                   uint32_t size, unsigned hash_shift)
 606{
 607        unsigned int i;
 608
 609        et->hash_shift = hash_shift;
 610        et->hash_mask = size - 1;
 611        et->table = dm_vcalloc(size, sizeof(struct list_head));
 612        if (!et->table)
 613                return -ENOMEM;
 614
 615        for (i = 0; i < size; i++)
 616                INIT_LIST_HEAD(et->table + i);
 617
 618        return 0;
 619}
 620
 621static void dm_exception_table_exit(struct dm_exception_table *et,
 622                                    struct kmem_cache *mem)
 623{
 624        struct list_head *slot;
 625        struct dm_exception *ex, *next;
 626        int i, size;
 627
 628        size = et->hash_mask + 1;
 629        for (i = 0; i < size; i++) {
 630                slot = et->table + i;
 631
 632                list_for_each_entry_safe (ex, next, slot, hash_list)
 633                        kmem_cache_free(mem, ex);
 634        }
 635
 636        vfree(et->table);
 637}
 638
 639static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
 640{
 641        return (chunk >> et->hash_shift) & et->hash_mask;
 642}
 643
 644static void dm_remove_exception(struct dm_exception *e)
 645{
 646        list_del(&e->hash_list);
 647}
 648
 649/*
 650 * Return the exception data for a sector, or NULL if not
 651 * remapped.
 652 */
 653static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
 654                                                chunk_t chunk)
 655{
 656        struct list_head *slot;
 657        struct dm_exception *e;
 658
 659        slot = &et->table[exception_hash(et, chunk)];
 660        list_for_each_entry (e, slot, hash_list)
 661                if (chunk >= e->old_chunk &&
 662                    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
 663                        return e;
 664
 665        return NULL;
 666}
 667
 668static struct dm_exception *alloc_completed_exception(gfp_t gfp)
 669{
 670        struct dm_exception *e;
 671
 672        e = kmem_cache_alloc(exception_cache, gfp);
 673        if (!e && gfp == GFP_NOIO)
 674                e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
 675
 676        return e;
 677}
 678
 679static void free_completed_exception(struct dm_exception *e)
 680{
 681        kmem_cache_free(exception_cache, e);
 682}
 683
 684static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
 685{
 686        struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool,
 687                                                             GFP_NOIO);
 688
 689        atomic_inc(&s->pending_exceptions_count);
 690        pe->snap = s;
 691
 692        return pe;
 693}
 694
 695static void free_pending_exception(struct dm_snap_pending_exception *pe)
 696{
 697        struct dm_snapshot *s = pe->snap;
 698
 699        mempool_free(pe, &s->pending_pool);
 700        smp_mb__before_atomic();
 701        atomic_dec(&s->pending_exceptions_count);
 702}
 703
 704static void dm_insert_exception(struct dm_exception_table *eh,
 705                                struct dm_exception *new_e)
 706{
 707        struct list_head *l;
 708        struct dm_exception *e = NULL;
 709
 710        l = &eh->table[exception_hash(eh, new_e->old_chunk)];
 711
 712        /* Add immediately if this table doesn't support consecutive chunks */
 713        if (!eh->hash_shift)
 714                goto out;
 715
 716        /* List is ordered by old_chunk */
 717        list_for_each_entry_reverse(e, l, hash_list) {
 718                /* Insert after an existing chunk? */
 719                if (new_e->old_chunk == (e->old_chunk +
 720                                         dm_consecutive_chunk_count(e) + 1) &&
 721                    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
 722                                         dm_consecutive_chunk_count(e) + 1)) {
 723                        dm_consecutive_chunk_count_inc(e);
 724                        free_completed_exception(new_e);
 725                        return;
 726                }
 727
 728                /* Insert before an existing chunk? */
 729                if (new_e->old_chunk == (e->old_chunk - 1) &&
 730                    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
 731                        dm_consecutive_chunk_count_inc(e);
 732                        e->old_chunk--;
 733                        e->new_chunk--;
 734                        free_completed_exception(new_e);
 735                        return;
 736                }
 737
 738                if (new_e->old_chunk > e->old_chunk)
 739                        break;
 740        }
 741
 742out:
 743        list_add(&new_e->hash_list, e ? &e->hash_list : l);
 744}
 745
 746/*
 747 * Callback used by the exception stores to load exceptions when
 748 * initialising.
 749 */
 750static int dm_add_exception(void *context, chunk_t old, chunk_t new)
 751{
 752        struct dm_snapshot *s = context;
 753        struct dm_exception *e;
 754
 755        e = alloc_completed_exception(GFP_KERNEL);
 756        if (!e)
 757                return -ENOMEM;
 758
 759        e->old_chunk = old;
 760
 761        /* Consecutive_count is implicitly initialised to zero */
 762        e->new_chunk = new;
 763
 764        dm_insert_exception(&s->complete, e);
 765
 766        return 0;
 767}
 768
 769/*
 770 * Return a minimum chunk size of all snapshots that have the specified origin.
 771 * Return zero if the origin has no snapshots.
 772 */
 773static uint32_t __minimum_chunk_size(struct origin *o)
 774{
 775        struct dm_snapshot *snap;
 776        unsigned chunk_size = 0;
 777
 778        if (o)
 779                list_for_each_entry(snap, &o->snapshots, list)
 780                        chunk_size = min_not_zero(chunk_size,
 781                                                  snap->store->chunk_size);
 782
 783        return (uint32_t) chunk_size;
 784}
 785
 786/*
 787 * Hard coded magic.
 788 */
 789static int calc_max_buckets(void)
 790{
 791        /* use a fixed size of 2MB */
 792        unsigned long mem = 2 * 1024 * 1024;
 793        mem /= sizeof(struct list_head);
 794
 795        return mem;
 796}
 797
 798/*
 799 * Allocate room for a suitable hash table.
 800 */
 801static int init_hash_tables(struct dm_snapshot *s)
 802{
 803        sector_t hash_size, cow_dev_size, max_buckets;
 804
 805        /*
 806         * Calculate based on the size of the original volume or
 807         * the COW volume...
 808         */
 809        cow_dev_size = get_dev_size(s->cow->bdev);
 810        max_buckets = calc_max_buckets();
 811
 812        hash_size = cow_dev_size >> s->store->chunk_shift;
 813        hash_size = min(hash_size, max_buckets);
 814
 815        if (hash_size < 64)
 816                hash_size = 64;
 817        hash_size = rounddown_pow_of_two(hash_size);
 818        if (dm_exception_table_init(&s->complete, hash_size,
 819                                    DM_CHUNK_CONSECUTIVE_BITS))
 820                return -ENOMEM;
 821
 822        /*
 823         * Allocate hash table for in-flight exceptions
 824         * Make this smaller than the real hash table
 825         */
 826        hash_size >>= 3;
 827        if (hash_size < 64)
 828                hash_size = 64;
 829
 830        if (dm_exception_table_init(&s->pending, hash_size, 0)) {
 831                dm_exception_table_exit(&s->complete, exception_cache);
 832                return -ENOMEM;
 833        }
 834
 835        return 0;
 836}
 837
 838static void merge_shutdown(struct dm_snapshot *s)
 839{
 840        clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
 841        smp_mb__after_atomic();
 842        wake_up_bit(&s->state_bits, RUNNING_MERGE);
 843}
 844
 845static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
 846{
 847        s->first_merging_chunk = 0;
 848        s->num_merging_chunks = 0;
 849
 850        return bio_list_get(&s->bios_queued_during_merge);
 851}
 852
 853/*
 854 * Remove one chunk from the index of completed exceptions.
 855 */
 856static int __remove_single_exception_chunk(struct dm_snapshot *s,
 857                                           chunk_t old_chunk)
 858{
 859        struct dm_exception *e;
 860
 861        e = dm_lookup_exception(&s->complete, old_chunk);
 862        if (!e) {
 863                DMERR("Corruption detected: exception for block %llu is "
 864                      "on disk but not in memory",
 865                      (unsigned long long)old_chunk);
 866                return -EINVAL;
 867        }
 868
 869        /*
 870         * If this is the only chunk using this exception, remove exception.
 871         */
 872        if (!dm_consecutive_chunk_count(e)) {
 873                dm_remove_exception(e);
 874                free_completed_exception(e);
 875                return 0;
 876        }
 877
 878        /*
 879         * The chunk may be either at the beginning or the end of a
 880         * group of consecutive chunks - never in the middle.  We are
 881         * removing chunks in the opposite order to that in which they
 882         * were added, so this should always be true.
 883         * Decrement the consecutive chunk counter and adjust the
 884         * starting point if necessary.
 885         */
 886        if (old_chunk == e->old_chunk) {
 887                e->old_chunk++;
 888                e->new_chunk++;
 889        } else if (old_chunk != e->old_chunk +
 890                   dm_consecutive_chunk_count(e)) {
 891                DMERR("Attempt to merge block %llu from the "
 892                      "middle of a chunk range [%llu - %llu]",
 893                      (unsigned long long)old_chunk,
 894                      (unsigned long long)e->old_chunk,
 895                      (unsigned long long)
 896                      e->old_chunk + dm_consecutive_chunk_count(e));
 897                return -EINVAL;
 898        }
 899
 900        dm_consecutive_chunk_count_dec(e);
 901
 902        return 0;
 903}
 904
 905static void flush_bios(struct bio *bio);
 906
 907static int remove_single_exception_chunk(struct dm_snapshot *s)
 908{
 909        struct bio *b = NULL;
 910        int r;
 911        chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
 912
 913        mutex_lock(&s->lock);
 914
 915        /*
 916         * Process chunks (and associated exceptions) in reverse order
 917         * so that dm_consecutive_chunk_count_dec() accounting works.
 918         */
 919        do {
 920                r = __remove_single_exception_chunk(s, old_chunk);
 921                if (r)
 922                        goto out;
 923        } while (old_chunk-- > s->first_merging_chunk);
 924
 925        b = __release_queued_bios_after_merge(s);
 926
 927out:
 928        mutex_unlock(&s->lock);
 929        if (b)
 930                flush_bios(b);
 931
 932        return r;
 933}
 934
 935static int origin_write_extent(struct dm_snapshot *merging_snap,
 936                               sector_t sector, unsigned chunk_size);
 937
 938static void merge_callback(int read_err, unsigned long write_err,
 939                           void *context);
 940
 941static uint64_t read_pending_exceptions_done_count(void)
 942{
 943        uint64_t pending_exceptions_done;
 944
 945        spin_lock(&_pending_exceptions_done_spinlock);
 946        pending_exceptions_done = _pending_exceptions_done_count;
 947        spin_unlock(&_pending_exceptions_done_spinlock);
 948
 949        return pending_exceptions_done;
 950}
 951
 952static void increment_pending_exceptions_done_count(void)
 953{
 954        spin_lock(&_pending_exceptions_done_spinlock);
 955        _pending_exceptions_done_count++;
 956        spin_unlock(&_pending_exceptions_done_spinlock);
 957
 958        wake_up_all(&_pending_exceptions_done);
 959}
 960
 961static void snapshot_merge_next_chunks(struct dm_snapshot *s)
 962{
 963        int i, linear_chunks;
 964        chunk_t old_chunk, new_chunk;
 965        struct dm_io_region src, dest;
 966        sector_t io_size;
 967        uint64_t previous_count;
 968
 969        BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
 970        if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
 971                goto shut;
 972
 973        /*
 974         * valid flag never changes during merge, so no lock required.
 975         */
 976        if (!s->valid) {
 977                DMERR("Snapshot is invalid: can't merge");
 978                goto shut;
 979        }
 980
 981        linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
 982                                                      &new_chunk);
 983        if (linear_chunks <= 0) {
 984                if (linear_chunks < 0) {
 985                        DMERR("Read error in exception store: "
 986                              "shutting down merge");
 987                        mutex_lock(&s->lock);
 988                        s->merge_failed = 1;
 989                        mutex_unlock(&s->lock);
 990                }
 991                goto shut;
 992        }
 993
 994        /* Adjust old_chunk and new_chunk to reflect start of linear region */
 995        old_chunk = old_chunk + 1 - linear_chunks;
 996        new_chunk = new_chunk + 1 - linear_chunks;
 997
 998        /*
 999         * Use one (potentially large) I/O to copy all 'linear_chunks'
1000         * from the exception store to the origin
1001         */
1002        io_size = linear_chunks * s->store->chunk_size;
1003
1004        dest.bdev = s->origin->bdev;
1005        dest.sector = chunk_to_sector(s->store, old_chunk);
1006        dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
1007
1008        src.bdev = s->cow->bdev;
1009        src.sector = chunk_to_sector(s->store, new_chunk);
1010        src.count = dest.count;
1011
1012        /*
1013         * Reallocate any exceptions needed in other snapshots then
1014         * wait for the pending exceptions to complete.
1015         * Each time any pending exception (globally on the system)
1016         * completes we are woken and repeat the process to find out
1017         * if we can proceed.  While this may not seem a particularly
1018         * efficient algorithm, it is not expected to have any
1019         * significant impact on performance.
1020         */
1021        previous_count = read_pending_exceptions_done_count();
1022        while (origin_write_extent(s, dest.sector, io_size)) {
1023                wait_event(_pending_exceptions_done,
1024                           (read_pending_exceptions_done_count() !=
1025                            previous_count));
1026                /* Retry after the wait, until all exceptions are done. */
1027                previous_count = read_pending_exceptions_done_count();
1028        }
1029
1030        mutex_lock(&s->lock);
1031        s->first_merging_chunk = old_chunk;
1032        s->num_merging_chunks = linear_chunks;
1033        mutex_unlock(&s->lock);
1034
1035        /* Wait until writes to all 'linear_chunks' drain */
1036        for (i = 0; i < linear_chunks; i++)
1037                __check_for_conflicting_io(s, old_chunk + i);
1038
1039        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
1040        return;
1041
1042shut:
1043        merge_shutdown(s);
1044}
1045
1046static void error_bios(struct bio *bio);
1047
1048static void merge_callback(int read_err, unsigned long write_err, void *context)
1049{
1050        struct dm_snapshot *s = context;
1051        struct bio *b = NULL;
1052
1053        if (read_err || write_err) {
1054                if (read_err)
1055                        DMERR("Read error: shutting down merge.");
1056                else
1057                        DMERR("Write error: shutting down merge.");
1058                goto shut;
1059        }
1060
1061        if (s->store->type->commit_merge(s->store,
1062                                         s->num_merging_chunks) < 0) {
1063                DMERR("Write error in exception store: shutting down merge");
1064                goto shut;
1065        }
1066
1067        if (remove_single_exception_chunk(s) < 0)
1068                goto shut;
1069
1070        snapshot_merge_next_chunks(s);
1071
1072        return;
1073
1074shut:
1075        mutex_lock(&s->lock);
1076        s->merge_failed = 1;
1077        b = __release_queued_bios_after_merge(s);
1078        mutex_unlock(&s->lock);
1079        error_bios(b);
1080
1081        merge_shutdown(s);
1082}
1083
1084static void start_merge(struct dm_snapshot *s)
1085{
1086        if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1087                snapshot_merge_next_chunks(s);
1088}
1089
1090/*
1091 * Stop the merging process and wait until it finishes.
1092 */
1093static void stop_merge(struct dm_snapshot *s)
1094{
1095        set_bit(SHUTDOWN_MERGE, &s->state_bits);
1096        wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
1097        clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1098}
1099
1100/*
1101 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
1102 */
1103static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1104{
1105        struct dm_snapshot *s;
1106        int i;
1107        int r = -EINVAL;
1108        char *origin_path, *cow_path;
1109        dev_t origin_dev, cow_dev;
1110        unsigned args_used, num_flush_bios = 1;
1111        fmode_t origin_mode = FMODE_READ;
1112
1113        if (argc != 4) {
1114                ti->error = "requires exactly 4 arguments";
1115                r = -EINVAL;
1116                goto bad;
1117        }
1118
1119        if (dm_target_is_snapshot_merge(ti)) {
1120                num_flush_bios = 2;
1121                origin_mode = FMODE_WRITE;
1122        }
1123
1124        s = kzalloc(sizeof(*s), GFP_KERNEL);
1125        if (!s) {
1126                ti->error = "Cannot allocate private snapshot structure";
1127                r = -ENOMEM;
1128                goto bad;
1129        }
1130
1131        origin_path = argv[0];
1132        argv++;
1133        argc--;
1134
1135        r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1136        if (r) {
1137                ti->error = "Cannot get origin device";
1138                goto bad_origin;
1139        }
1140        origin_dev = s->origin->bdev->bd_dev;
1141
1142        cow_path = argv[0];
1143        argv++;
1144        argc--;
1145
1146        cow_dev = dm_get_dev_t(cow_path);
1147        if (cow_dev && cow_dev == origin_dev) {
1148                ti->error = "COW device cannot be the same as origin device";
1149                r = -EINVAL;
1150                goto bad_cow;
1151        }
1152
1153        r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1154        if (r) {
1155                ti->error = "Cannot get COW device";
1156                goto bad_cow;
1157        }
1158
1159        r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1160        if (r) {
1161                ti->error = "Couldn't create exception store";
1162                r = -EINVAL;
1163                goto bad_store;
1164        }
1165
1166        argv += args_used;
1167        argc -= args_used;
1168
1169        s->ti = ti;
1170        s->valid = 1;
1171        s->snapshot_overflowed = 0;
1172        s->active = 0;
1173        atomic_set(&s->pending_exceptions_count, 0);
1174        s->exception_start_sequence = 0;
1175        s->exception_complete_sequence = 0;
1176        s->out_of_order_tree = RB_ROOT;
1177        mutex_init(&s->lock);
1178        INIT_LIST_HEAD(&s->list);
1179        spin_lock_init(&s->pe_lock);
1180        s->state_bits = 0;
1181        s->merge_failed = 0;
1182        s->first_merging_chunk = 0;
1183        s->num_merging_chunks = 0;
1184        bio_list_init(&s->bios_queued_during_merge);
1185
1186        /* Allocate hash table for COW data */
1187        if (init_hash_tables(s)) {
1188                ti->error = "Unable to allocate hash table space";
1189                r = -ENOMEM;
1190                goto bad_hash_tables;
1191        }
1192
1193        s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1194        if (IS_ERR(s->kcopyd_client)) {
1195                r = PTR_ERR(s->kcopyd_client);
1196                ti->error = "Could not create kcopyd client";
1197                goto bad_kcopyd;
1198        }
1199
1200        r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache);
1201        if (r) {
1202                ti->error = "Could not allocate mempool for pending exceptions";
1203                goto bad_pending_pool;
1204        }
1205
1206        for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1207                INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1208
1209        spin_lock_init(&s->tracked_chunk_lock);
1210
1211        ti->private = s;
1212        ti->num_flush_bios = num_flush_bios;
1213        ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
1214
1215        /* Add snapshot to the list of snapshots for this origin */
1216        /* Exceptions aren't triggered till snapshot_resume() is called */
1217        r = register_snapshot(s);
1218        if (r == -ENOMEM) {
1219                ti->error = "Snapshot origin struct allocation failed";
1220                goto bad_load_and_register;
1221        } else if (r < 0) {
1222                /* invalid handover, register_snapshot has set ti->error */
1223                goto bad_load_and_register;
1224        }
1225
1226        /*
1227         * Metadata must only be loaded into one table at once, so skip this
1228         * if metadata will be handed over during resume.
1229         * Chunk size will be set during the handover - set it to zero to
1230         * ensure it's ignored.
1231         */
1232        if (r > 0) {
1233                s->store->chunk_size = 0;
1234                return 0;
1235        }
1236
1237        r = s->store->type->read_metadata(s->store, dm_add_exception,
1238                                          (void *)s);
1239        if (r < 0) {
1240                ti->error = "Failed to read snapshot metadata";
1241                goto bad_read_metadata;
1242        } else if (r > 0) {
1243                s->valid = 0;
1244                DMWARN("Snapshot is marked invalid.");
1245        }
1246
1247        if (!s->store->chunk_size) {
1248                ti->error = "Chunk size not set";
1249                goto bad_read_metadata;
1250        }
1251
1252        r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1253        if (r)
1254                goto bad_read_metadata;
1255
1256        return 0;
1257
1258bad_read_metadata:
1259        unregister_snapshot(s);
1260
1261bad_load_and_register:
1262        mempool_exit(&s->pending_pool);
1263
1264bad_pending_pool:
1265        dm_kcopyd_client_destroy(s->kcopyd_client);
1266
1267bad_kcopyd:
1268        dm_exception_table_exit(&s->pending, pending_cache);
1269        dm_exception_table_exit(&s->complete, exception_cache);
1270
1271bad_hash_tables:
1272        dm_exception_store_destroy(s->store);
1273
1274bad_store:
1275        dm_put_device(ti, s->cow);
1276
1277bad_cow:
1278        dm_put_device(ti, s->origin);
1279
1280bad_origin:
1281        kfree(s);
1282
1283bad:
1284        return r;
1285}
1286
1287static void __free_exceptions(struct dm_snapshot *s)
1288{
1289        dm_kcopyd_client_destroy(s->kcopyd_client);
1290        s->kcopyd_client = NULL;
1291
1292        dm_exception_table_exit(&s->pending, pending_cache);
1293        dm_exception_table_exit(&s->complete, exception_cache);
1294}
1295
1296static void __handover_exceptions(struct dm_snapshot *snap_src,
1297                                  struct dm_snapshot *snap_dest)
1298{
1299        union {
1300                struct dm_exception_table table_swap;
1301                struct dm_exception_store *store_swap;
1302        } u;
1303
1304        /*
1305         * Swap all snapshot context information between the two instances.
1306         */
1307        u.table_swap = snap_dest->complete;
1308        snap_dest->complete = snap_src->complete;
1309        snap_src->complete = u.table_swap;
1310
1311        u.store_swap = snap_dest->store;
1312        snap_dest->store = snap_src->store;
1313        snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
1314        snap_src->store = u.store_swap;
1315
1316        snap_dest->store->snap = snap_dest;
1317        snap_src->store->snap = snap_src;
1318
1319        snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1320        snap_dest->valid = snap_src->valid;
1321        snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
1322
1323        /*
1324         * Set source invalid to ensure it receives no further I/O.
1325         */
1326        snap_src->valid = 0;
1327}
1328
1329static void snapshot_dtr(struct dm_target *ti)
1330{
1331#ifdef CONFIG_DM_DEBUG
1332        int i;
1333#endif
1334        struct dm_snapshot *s = ti->private;
1335        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1336
1337        down_read(&_origins_lock);
1338        /* Check whether exception handover must be cancelled */
1339        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1340        if (snap_src && snap_dest && (s == snap_src)) {
1341                mutex_lock(&snap_dest->lock);
1342                snap_dest->valid = 0;
1343                mutex_unlock(&snap_dest->lock);
1344                DMERR("Cancelling snapshot handover.");
1345        }
1346        up_read(&_origins_lock);
1347
1348        if (dm_target_is_snapshot_merge(ti))
1349                stop_merge(s);
1350
1351        /* Prevent further origin writes from using this snapshot. */
1352        /* After this returns there can be no new kcopyd jobs. */
1353        unregister_snapshot(s);
1354
1355        while (atomic_read(&s->pending_exceptions_count))
1356                msleep(1);
1357        /*
1358         * Ensure instructions in mempool_exit aren't reordered
1359         * before atomic_read.
1360         */
1361        smp_mb();
1362
1363#ifdef CONFIG_DM_DEBUG
1364        for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1365                BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1366#endif
1367
1368        __free_exceptions(s);
1369
1370        mempool_exit(&s->pending_pool);
1371
1372        dm_exception_store_destroy(s->store);
1373
1374        mutex_destroy(&s->lock);
1375
1376        dm_put_device(ti, s->cow);
1377
1378        dm_put_device(ti, s->origin);
1379
1380        kfree(s);
1381}
1382
1383/*
1384 * Flush a list of buffers.
1385 */
1386static void flush_bios(struct bio *bio)
1387{
1388        struct bio *n;
1389
1390        while (bio) {
1391                n = bio->bi_next;
1392                bio->bi_next = NULL;
1393                generic_make_request(bio);
1394                bio = n;
1395        }
1396}
1397
1398static int do_origin(struct dm_dev *origin, struct bio *bio);
1399
1400/*
1401 * Flush a list of buffers.
1402 */
1403static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1404{
1405        struct bio *n;
1406        int r;
1407
1408        while (bio) {
1409                n = bio->bi_next;
1410                bio->bi_next = NULL;
1411                r = do_origin(s->origin, bio);
1412                if (r == DM_MAPIO_REMAPPED)
1413                        generic_make_request(bio);
1414                bio = n;
1415        }
1416}
1417
1418/*
1419 * Error a list of buffers.
1420 */
1421static void error_bios(struct bio *bio)
1422{
1423        struct bio *n;
1424
1425        while (bio) {
1426                n = bio->bi_next;
1427                bio->bi_next = NULL;
1428                bio_io_error(bio);
1429                bio = n;
1430        }
1431}
1432
1433static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1434{
1435        if (!s->valid)
1436                return;
1437
1438        if (err == -EIO)
1439                DMERR("Invalidating snapshot: Error reading/writing.");
1440        else if (err == -ENOMEM)
1441                DMERR("Invalidating snapshot: Unable to allocate exception.");
1442
1443        if (s->store->type->drop_snapshot)
1444                s->store->type->drop_snapshot(s->store);
1445
1446        s->valid = 0;
1447
1448        dm_table_event(s->ti->table);
1449}
1450
1451static void pending_complete(void *context, int success)
1452{
1453        struct dm_snap_pending_exception *pe = context;
1454        struct dm_exception *e;
1455        struct dm_snapshot *s = pe->snap;
1456        struct bio *origin_bios = NULL;
1457        struct bio *snapshot_bios = NULL;
1458        struct bio *full_bio = NULL;
1459        int error = 0;
1460
1461        if (!success) {
1462                /* Read/write error - snapshot is unusable */
1463                mutex_lock(&s->lock);
1464                __invalidate_snapshot(s, -EIO);
1465                error = 1;
1466                goto out;
1467        }
1468
1469        e = alloc_completed_exception(GFP_NOIO);
1470        if (!e) {
1471                mutex_lock(&s->lock);
1472                __invalidate_snapshot(s, -ENOMEM);
1473                error = 1;
1474                goto out;
1475        }
1476        *e = pe->e;
1477
1478        mutex_lock(&s->lock);
1479        if (!s->valid) {
1480                free_completed_exception(e);
1481                error = 1;
1482                goto out;
1483        }
1484
1485        /* Check for conflicting reads */
1486        __check_for_conflicting_io(s, pe->e.old_chunk);
1487
1488        /*
1489         * Add a proper exception, and remove the
1490         * in-flight exception from the list.
1491         */
1492        dm_insert_exception(&s->complete, e);
1493
1494out:
1495        dm_remove_exception(&pe->e);
1496        snapshot_bios = bio_list_get(&pe->snapshot_bios);
1497        origin_bios = bio_list_get(&pe->origin_bios);
1498        full_bio = pe->full_bio;
1499        if (full_bio)
1500                full_bio->bi_end_io = pe->full_bio_end_io;
1501        increment_pending_exceptions_done_count();
1502
1503        mutex_unlock(&s->lock);
1504
1505        /* Submit any pending write bios */
1506        if (error) {
1507                if (full_bio)
1508                        bio_io_error(full_bio);
1509                error_bios(snapshot_bios);
1510        } else {
1511                if (full_bio)
1512                        bio_endio(full_bio);
1513                flush_bios(snapshot_bios);
1514        }
1515
1516        retry_origin_bios(s, origin_bios);
1517
1518        free_pending_exception(pe);
1519}
1520
1521static void complete_exception(struct dm_snap_pending_exception *pe)
1522{
1523        struct dm_snapshot *s = pe->snap;
1524
1525        /* Update the metadata if we are persistent */
1526        s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1527                                         pending_complete, pe);
1528}
1529
1530/*
1531 * Called when the copy I/O has finished.  kcopyd actually runs
1532 * this code so don't block.
1533 */
1534static void copy_callback(int read_err, unsigned long write_err, void *context)
1535{
1536        struct dm_snap_pending_exception *pe = context;
1537        struct dm_snapshot *s = pe->snap;
1538
1539        pe->copy_error = read_err || write_err;
1540
1541        if (pe->exception_sequence == s->exception_complete_sequence) {
1542                struct rb_node *next;
1543
1544                s->exception_complete_sequence++;
1545                complete_exception(pe);
1546
1547                next = rb_first(&s->out_of_order_tree);
1548                while (next) {
1549                        pe = rb_entry(next, struct dm_snap_pending_exception,
1550                                        out_of_order_node);
1551                        if (pe->exception_sequence != s->exception_complete_sequence)
1552                                break;
1553                        next = rb_next(next);
1554                        s->exception_complete_sequence++;
1555                        rb_erase(&pe->out_of_order_node, &s->out_of_order_tree);
1556                        complete_exception(pe);
1557                        cond_resched();
1558                }
1559        } else {
1560                struct rb_node *parent = NULL;
1561                struct rb_node **p = &s->out_of_order_tree.rb_node;
1562                struct dm_snap_pending_exception *pe2;
1563
1564                while (*p) {
1565                        pe2 = rb_entry(*p, struct dm_snap_pending_exception, out_of_order_node);
1566                        parent = *p;
1567
1568                        BUG_ON(pe->exception_sequence == pe2->exception_sequence);
1569                        if (pe->exception_sequence < pe2->exception_sequence)
1570                                p = &((*p)->rb_left);
1571                        else
1572                                p = &((*p)->rb_right);
1573                }
1574
1575                rb_link_node(&pe->out_of_order_node, parent, p);
1576                rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
1577        }
1578}
1579
1580/*
1581 * Dispatches the copy operation to kcopyd.
1582 */
1583static void start_copy(struct dm_snap_pending_exception *pe)
1584{
1585        struct dm_snapshot *s = pe->snap;
1586        struct dm_io_region src, dest;
1587        struct block_device *bdev = s->origin->bdev;
1588        sector_t dev_size;
1589
1590        dev_size = get_dev_size(bdev);
1591
1592        src.bdev = bdev;
1593        src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1594        src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1595
1596        dest.bdev = s->cow->bdev;
1597        dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1598        dest.count = src.count;
1599
1600        /* Hand over to kcopyd */
1601        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1602}
1603
1604static void full_bio_end_io(struct bio *bio)
1605{
1606        void *callback_data = bio->bi_private;
1607
1608        dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
1609}
1610
1611static void start_full_bio(struct dm_snap_pending_exception *pe,
1612                           struct bio *bio)
1613{
1614        struct dm_snapshot *s = pe->snap;
1615        void *callback_data;
1616
1617        pe->full_bio = bio;
1618        pe->full_bio_end_io = bio->bi_end_io;
1619
1620        callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1621                                                   copy_callback, pe);
1622
1623        bio->bi_end_io = full_bio_end_io;
1624        bio->bi_private = callback_data;
1625
1626        generic_make_request(bio);
1627}
1628
1629static struct dm_snap_pending_exception *
1630__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1631{
1632        struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1633
1634        if (!e)
1635                return NULL;
1636
1637        return container_of(e, struct dm_snap_pending_exception, e);
1638}
1639
1640/*
1641 * Looks to see if this snapshot already has a pending exception
1642 * for this chunk, otherwise it allocates a new one and inserts
1643 * it into the pending table.
1644 *
1645 * NOTE: a write lock must be held on snap->lock before calling
1646 * this.
1647 */
1648static struct dm_snap_pending_exception *
1649__find_pending_exception(struct dm_snapshot *s,
1650                         struct dm_snap_pending_exception *pe, chunk_t chunk)
1651{
1652        struct dm_snap_pending_exception *pe2;
1653
1654        pe2 = __lookup_pending_exception(s, chunk);
1655        if (pe2) {
1656                free_pending_exception(pe);
1657                return pe2;
1658        }
1659
1660        pe->e.old_chunk = chunk;
1661        bio_list_init(&pe->origin_bios);
1662        bio_list_init(&pe->snapshot_bios);
1663        pe->started = 0;
1664        pe->full_bio = NULL;
1665
1666        if (s->store->type->prepare_exception(s->store, &pe->e)) {
1667                free_pending_exception(pe);
1668                return NULL;
1669        }
1670
1671        pe->exception_sequence = s->exception_start_sequence++;
1672
1673        dm_insert_exception(&s->pending, &pe->e);
1674
1675        return pe;
1676}
1677
1678static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1679                            struct bio *bio, chunk_t chunk)
1680{
1681        bio_set_dev(bio, s->cow->bdev);
1682        bio->bi_iter.bi_sector =
1683                chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1684                                (chunk - e->old_chunk)) +
1685                (bio->bi_iter.bi_sector & s->store->chunk_mask);
1686}
1687
1688static int snapshot_map(struct dm_target *ti, struct bio *bio)
1689{
1690        struct dm_exception *e;
1691        struct dm_snapshot *s = ti->private;
1692        int r = DM_MAPIO_REMAPPED;
1693        chunk_t chunk;
1694        struct dm_snap_pending_exception *pe = NULL;
1695
1696        init_tracked_chunk(bio);
1697
1698        if (bio->bi_opf & REQ_PREFLUSH) {
1699                bio_set_dev(bio, s->cow->bdev);
1700                return DM_MAPIO_REMAPPED;
1701        }
1702
1703        chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1704
1705        /* Full snapshots are not usable */
1706        /* To get here the table must be live so s->active is always set. */
1707        if (!s->valid)
1708                return DM_MAPIO_KILL;
1709
1710        mutex_lock(&s->lock);
1711
1712        if (!s->valid || (unlikely(s->snapshot_overflowed) &&
1713            bio_data_dir(bio) == WRITE)) {
1714                r = DM_MAPIO_KILL;
1715                goto out_unlock;
1716        }
1717
1718        /* If the block is already remapped - use that, else remap it */
1719        e = dm_lookup_exception(&s->complete, chunk);
1720        if (e) {
1721                remap_exception(s, e, bio, chunk);
1722                goto out_unlock;
1723        }
1724
1725        /*
1726         * Write to snapshot - higher level takes care of RW/RO
1727         * flags so we should only get this if we are
1728         * writeable.
1729         */
1730        if (bio_data_dir(bio) == WRITE) {
1731                pe = __lookup_pending_exception(s, chunk);
1732                if (!pe) {
1733                        mutex_unlock(&s->lock);
1734                        pe = alloc_pending_exception(s);
1735                        mutex_lock(&s->lock);
1736
1737                        if (!s->valid || s->snapshot_overflowed) {
1738                                free_pending_exception(pe);
1739                                r = DM_MAPIO_KILL;
1740                                goto out_unlock;
1741                        }
1742
1743                        e = dm_lookup_exception(&s->complete, chunk);
1744                        if (e) {
1745                                free_pending_exception(pe);
1746                                remap_exception(s, e, bio, chunk);
1747                                goto out_unlock;
1748                        }
1749
1750                        pe = __find_pending_exception(s, pe, chunk);
1751                        if (!pe) {
1752                                if (s->store->userspace_supports_overflow) {
1753                                        s->snapshot_overflowed = 1;
1754                                        DMERR("Snapshot overflowed: Unable to allocate exception.");
1755                                } else
1756                                        __invalidate_snapshot(s, -ENOMEM);
1757                                r = DM_MAPIO_KILL;
1758                                goto out_unlock;
1759                        }
1760                }
1761
1762                remap_exception(s, &pe->e, bio, chunk);
1763
1764                r = DM_MAPIO_SUBMITTED;
1765
1766                if (!pe->started &&
1767                    bio->bi_iter.bi_size ==
1768                    (s->store->chunk_size << SECTOR_SHIFT)) {
1769                        pe->started = 1;
1770                        mutex_unlock(&s->lock);
1771                        start_full_bio(pe, bio);
1772                        goto out;
1773                }
1774
1775                bio_list_add(&pe->snapshot_bios, bio);
1776
1777                if (!pe->started) {
1778                        /* this is protected by snap->lock */
1779                        pe->started = 1;
1780                        mutex_unlock(&s->lock);
1781                        start_copy(pe);
1782                        goto out;
1783                }
1784        } else {
1785                bio_set_dev(bio, s->origin->bdev);
1786                track_chunk(s, bio, chunk);
1787        }
1788
1789out_unlock:
1790        mutex_unlock(&s->lock);
1791out:
1792        return r;
1793}
1794
1795/*
1796 * A snapshot-merge target behaves like a combination of a snapshot
1797 * target and a snapshot-origin target.  It only generates new
1798 * exceptions in other snapshots and not in the one that is being
1799 * merged.
1800 *
1801 * For each chunk, if there is an existing exception, it is used to
1802 * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1803 * which in turn might generate exceptions in other snapshots.
1804 * If merging is currently taking place on the chunk in question, the
1805 * I/O is deferred by adding it to s->bios_queued_during_merge.
1806 */
1807static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1808{
1809        struct dm_exception *e;
1810        struct dm_snapshot *s = ti->private;
1811        int r = DM_MAPIO_REMAPPED;
1812        chunk_t chunk;
1813
1814        init_tracked_chunk(bio);
1815
1816        if (bio->bi_opf & REQ_PREFLUSH) {
1817                if (!dm_bio_get_target_bio_nr(bio))
1818                        bio_set_dev(bio, s->origin->bdev);
1819                else
1820                        bio_set_dev(bio, s->cow->bdev);
1821                return DM_MAPIO_REMAPPED;
1822        }
1823
1824        chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1825
1826        mutex_lock(&s->lock);
1827
1828        /* Full merging snapshots are redirected to the origin */
1829        if (!s->valid)
1830                goto redirect_to_origin;
1831
1832        /* If the block is already remapped - use that */
1833        e = dm_lookup_exception(&s->complete, chunk);
1834        if (e) {
1835                /* Queue writes overlapping with chunks being merged */
1836                if (bio_data_dir(bio) == WRITE &&
1837                    chunk >= s->first_merging_chunk &&
1838                    chunk < (s->first_merging_chunk +
1839                             s->num_merging_chunks)) {
1840                        bio_set_dev(bio, s->origin->bdev);
1841                        bio_list_add(&s->bios_queued_during_merge, bio);
1842                        r = DM_MAPIO_SUBMITTED;
1843                        goto out_unlock;
1844                }
1845
1846                remap_exception(s, e, bio, chunk);
1847
1848                if (bio_data_dir(bio) == WRITE)
1849                        track_chunk(s, bio, chunk);
1850                goto out_unlock;
1851        }
1852
1853redirect_to_origin:
1854        bio_set_dev(bio, s->origin->bdev);
1855
1856        if (bio_data_dir(bio) == WRITE) {
1857                mutex_unlock(&s->lock);
1858                return do_origin(s->origin, bio);
1859        }
1860
1861out_unlock:
1862        mutex_unlock(&s->lock);
1863
1864        return r;
1865}
1866
1867static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1868                blk_status_t *error)
1869{
1870        struct dm_snapshot *s = ti->private;
1871
1872        if (is_bio_tracked(bio))
1873                stop_tracking_chunk(s, bio);
1874
1875        return DM_ENDIO_DONE;
1876}
1877
1878static void snapshot_merge_presuspend(struct dm_target *ti)
1879{
1880        struct dm_snapshot *s = ti->private;
1881
1882        stop_merge(s);
1883}
1884
1885static int snapshot_preresume(struct dm_target *ti)
1886{
1887        int r = 0;
1888        struct dm_snapshot *s = ti->private;
1889        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1890
1891        down_read(&_origins_lock);
1892        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1893        if (snap_src && snap_dest) {
1894                mutex_lock(&snap_src->lock);
1895                if (s == snap_src) {
1896                        DMERR("Unable to resume snapshot source until "
1897                              "handover completes.");
1898                        r = -EINVAL;
1899                } else if (!dm_suspended(snap_src->ti)) {
1900                        DMERR("Unable to perform snapshot handover until "
1901                              "source is suspended.");
1902                        r = -EINVAL;
1903                }
1904                mutex_unlock(&snap_src->lock);
1905        }
1906        up_read(&_origins_lock);
1907
1908        return r;
1909}
1910
1911static void snapshot_resume(struct dm_target *ti)
1912{
1913        struct dm_snapshot *s = ti->private;
1914        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
1915        struct dm_origin *o;
1916        struct mapped_device *origin_md = NULL;
1917        bool must_restart_merging = false;
1918
1919        down_read(&_origins_lock);
1920
1921        o = __lookup_dm_origin(s->origin->bdev);
1922        if (o)
1923                origin_md = dm_table_get_md(o->ti->table);
1924        if (!origin_md) {
1925                (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1926                if (snap_merging)
1927                        origin_md = dm_table_get_md(snap_merging->ti->table);
1928        }
1929        if (origin_md == dm_table_get_md(ti->table))
1930                origin_md = NULL;
1931        if (origin_md) {
1932                if (dm_hold(origin_md))
1933                        origin_md = NULL;
1934        }
1935
1936        up_read(&_origins_lock);
1937
1938        if (origin_md) {
1939                dm_internal_suspend_fast(origin_md);
1940                if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
1941                        must_restart_merging = true;
1942                        stop_merge(snap_merging);
1943                }
1944        }
1945
1946        down_read(&_origins_lock);
1947
1948        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1949        if (snap_src && snap_dest) {
1950                mutex_lock(&snap_src->lock);
1951                mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1952                __handover_exceptions(snap_src, snap_dest);
1953                mutex_unlock(&snap_dest->lock);
1954                mutex_unlock(&snap_src->lock);
1955        }
1956
1957        up_read(&_origins_lock);
1958
1959        if (origin_md) {
1960                if (must_restart_merging)
1961                        start_merge(snap_merging);
1962                dm_internal_resume_fast(origin_md);
1963                dm_put(origin_md);
1964        }
1965
1966        /* Now we have correct chunk size, reregister */
1967        reregister_snapshot(s);
1968
1969        mutex_lock(&s->lock);
1970        s->active = 1;
1971        mutex_unlock(&s->lock);
1972}
1973
1974static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1975{
1976        uint32_t min_chunksize;
1977
1978        down_read(&_origins_lock);
1979        min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1980        up_read(&_origins_lock);
1981
1982        return min_chunksize;
1983}
1984
1985static void snapshot_merge_resume(struct dm_target *ti)
1986{
1987        struct dm_snapshot *s = ti->private;
1988
1989        /*
1990         * Handover exceptions from existing snapshot.
1991         */
1992        snapshot_resume(ti);
1993
1994        /*
1995         * snapshot-merge acts as an origin, so set ti->max_io_len
1996         */
1997        ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1998
1999        start_merge(s);
2000}
2001
2002static void snapshot_status(struct dm_target *ti, status_type_t type,
2003                            unsigned status_flags, char *result, unsigned maxlen)
2004{
2005        unsigned sz = 0;
2006        struct dm_snapshot *snap = ti->private;
2007
2008        switch (type) {
2009        case STATUSTYPE_INFO:
2010
2011                mutex_lock(&snap->lock);
2012
2013                if (!snap->valid)
2014                        DMEMIT("Invalid");
2015                else if (snap->merge_failed)
2016                        DMEMIT("Merge failed");
2017                else if (snap->snapshot_overflowed)
2018                        DMEMIT("Overflow");
2019                else {
2020                        if (snap->store->type->usage) {
2021                                sector_t total_sectors, sectors_allocated,
2022                                         metadata_sectors;
2023                                snap->store->type->usage(snap->store,
2024                                                         &total_sectors,
2025                                                         &sectors_allocated,
2026                                                         &metadata_sectors);
2027                                DMEMIT("%llu/%llu %llu",
2028                                       (unsigned long long)sectors_allocated,
2029                                       (unsigned long long)total_sectors,
2030                                       (unsigned long long)metadata_sectors);
2031                        }
2032                        else
2033                                DMEMIT("Unknown");
2034                }
2035
2036                mutex_unlock(&snap->lock);
2037
2038                break;
2039
2040        case STATUSTYPE_TABLE:
2041                /*
2042                 * kdevname returns a static pointer so we need
2043                 * to make private copies if the output is to
2044                 * make sense.
2045                 */
2046                DMEMIT("%s %s", snap->origin->name, snap->cow->name);
2047                snap->store->type->status(snap->store, type, result + sz,
2048                                          maxlen - sz);
2049                break;
2050        }
2051}
2052
2053static int snapshot_iterate_devices(struct dm_target *ti,
2054                                    iterate_devices_callout_fn fn, void *data)
2055{
2056        struct dm_snapshot *snap = ti->private;
2057        int r;
2058
2059        r = fn(ti, snap->origin, 0, ti->len, data);
2060
2061        if (!r)
2062                r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
2063
2064        return r;
2065}
2066
2067
2068/*-----------------------------------------------------------------
2069 * Origin methods
2070 *---------------------------------------------------------------*/
2071
2072/*
2073 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2074 * supplied bio was ignored.  The caller may submit it immediately.
2075 * (No remapping actually occurs as the origin is always a direct linear
2076 * map.)
2077 *
2078 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2079 * and any supplied bio is added to a list to be submitted once all
2080 * the necessary exceptions exist.
2081 */
2082static int __origin_write(struct list_head *snapshots, sector_t sector,
2083                          struct bio *bio)
2084{
2085        int r = DM_MAPIO_REMAPPED;
2086        struct dm_snapshot *snap;
2087        struct dm_exception *e;
2088        struct dm_snap_pending_exception *pe;
2089        struct dm_snap_pending_exception *pe_to_start_now = NULL;
2090        struct dm_snap_pending_exception *pe_to_start_last = NULL;
2091        chunk_t chunk;
2092
2093        /* Do all the snapshots on this origin */
2094        list_for_each_entry (snap, snapshots, list) {
2095                /*
2096                 * Don't make new exceptions in a merging snapshot
2097                 * because it has effectively been deleted
2098                 */
2099                if (dm_target_is_snapshot_merge(snap->ti))
2100                        continue;
2101
2102                mutex_lock(&snap->lock);
2103
2104                /* Only deal with valid and active snapshots */
2105                if (!snap->valid || !snap->active)
2106                        goto next_snapshot;
2107
2108                /* Nothing to do if writing beyond end of snapshot */
2109                if (sector >= dm_table_get_size(snap->ti->table))
2110                        goto next_snapshot;
2111
2112                /*
2113                 * Remember, different snapshots can have
2114                 * different chunk sizes.
2115                 */
2116                chunk = sector_to_chunk(snap->store, sector);
2117
2118                /*
2119                 * Check exception table to see if block
2120                 * is already remapped in this snapshot
2121                 * and trigger an exception if not.
2122                 */
2123                e = dm_lookup_exception(&snap->complete, chunk);
2124                if (e)
2125                        goto next_snapshot;
2126
2127                pe = __lookup_pending_exception(snap, chunk);
2128                if (!pe) {
2129                        mutex_unlock(&snap->lock);
2130                        pe = alloc_pending_exception(snap);
2131                        mutex_lock(&snap->lock);
2132
2133                        if (!snap->valid) {
2134                                free_pending_exception(pe);
2135                                goto next_snapshot;
2136                        }
2137
2138                        e = dm_lookup_exception(&snap->complete, chunk);
2139                        if (e) {
2140                                free_pending_exception(pe);
2141                                goto next_snapshot;
2142                        }
2143
2144                        pe = __find_pending_exception(snap, pe, chunk);
2145                        if (!pe) {
2146                                __invalidate_snapshot(snap, -ENOMEM);
2147                                goto next_snapshot;
2148                        }
2149                }
2150
2151                r = DM_MAPIO_SUBMITTED;
2152
2153                /*
2154                 * If an origin bio was supplied, queue it to wait for the
2155                 * completion of this exception, and start this one last,
2156                 * at the end of the function.
2157                 */
2158                if (bio) {
2159                        bio_list_add(&pe->origin_bios, bio);
2160                        bio = NULL;
2161
2162                        if (!pe->started) {
2163                                pe->started = 1;
2164                                pe_to_start_last = pe;
2165                        }
2166                }
2167
2168                if (!pe->started) {
2169                        pe->started = 1;
2170                        pe_to_start_now = pe;
2171                }
2172
2173next_snapshot:
2174                mutex_unlock(&snap->lock);
2175
2176                if (pe_to_start_now) {
2177                        start_copy(pe_to_start_now);
2178                        pe_to_start_now = NULL;
2179                }
2180        }
2181
2182        /*
2183         * Submit the exception against which the bio is queued last,
2184         * to give the other exceptions a head start.
2185         */
2186        if (pe_to_start_last)
2187                start_copy(pe_to_start_last);
2188
2189        return r;
2190}
2191
2192/*
2193 * Called on a write from the origin driver.
2194 */
2195static int do_origin(struct dm_dev *origin, struct bio *bio)
2196{
2197        struct origin *o;
2198        int r = DM_MAPIO_REMAPPED;
2199
2200        down_read(&_origins_lock);
2201        o = __lookup_origin(origin->bdev);
2202        if (o)
2203                r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2204        up_read(&_origins_lock);
2205
2206        return r;
2207}
2208
2209/*
2210 * Trigger exceptions in all non-merging snapshots.
2211 *
2212 * The chunk size of the merging snapshot may be larger than the chunk
2213 * size of some other snapshot so we may need to reallocate multiple
2214 * chunks in other snapshots.
2215 *
2216 * We scan all the overlapping exceptions in the other snapshots.
2217 * Returns 1 if anything was reallocated and must be waited for,
2218 * otherwise returns 0.
2219 *
2220 * size must be a multiple of merging_snap's chunk_size.
2221 */
2222static int origin_write_extent(struct dm_snapshot *merging_snap,
2223                               sector_t sector, unsigned size)
2224{
2225        int must_wait = 0;
2226        sector_t n;
2227        struct origin *o;
2228
2229        /*
2230         * The origin's __minimum_chunk_size() got stored in max_io_len
2231         * by snapshot_merge_resume().
2232         */
2233        down_read(&_origins_lock);
2234        o = __lookup_origin(merging_snap->origin->bdev);
2235        for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2236                if (__origin_write(&o->snapshots, sector + n, NULL) ==
2237                    DM_MAPIO_SUBMITTED)
2238                        must_wait = 1;
2239        up_read(&_origins_lock);
2240
2241        return must_wait;
2242}
2243
2244/*
2245 * Origin: maps a linear range of a device, with hooks for snapshotting.
2246 */
2247
2248/*
2249 * Construct an origin mapping: <dev_path>
2250 * The context for an origin is merely a 'struct dm_dev *'
2251 * pointing to the real device.
2252 */
2253static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2254{
2255        int r;
2256        struct dm_origin *o;
2257
2258        if (argc != 1) {
2259                ti->error = "origin: incorrect number of arguments";
2260                return -EINVAL;
2261        }
2262
2263        o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2264        if (!o) {
2265                ti->error = "Cannot allocate private origin structure";
2266                r = -ENOMEM;
2267                goto bad_alloc;
2268        }
2269
2270        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2271        if (r) {
2272                ti->error = "Cannot get target device";
2273                goto bad_open;
2274        }
2275
2276        o->ti = ti;
2277        ti->private = o;
2278        ti->num_flush_bios = 1;
2279
2280        return 0;
2281
2282bad_open:
2283        kfree(o);
2284bad_alloc:
2285        return r;
2286}
2287
2288static void origin_dtr(struct dm_target *ti)
2289{
2290        struct dm_origin *o = ti->private;
2291
2292        dm_put_device(ti, o->dev);
2293        kfree(o);
2294}
2295
2296static int origin_map(struct dm_target *ti, struct bio *bio)
2297{
2298        struct dm_origin *o = ti->private;
2299        unsigned available_sectors;
2300
2301        bio_set_dev(bio, o->dev->bdev);
2302
2303        if (unlikely(bio->bi_opf & REQ_PREFLUSH))
2304                return DM_MAPIO_REMAPPED;
2305
2306        if (bio_data_dir(bio) != WRITE)
2307                return DM_MAPIO_REMAPPED;
2308
2309        available_sectors = o->split_boundary -
2310                ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2311
2312        if (bio_sectors(bio) > available_sectors)
2313                dm_accept_partial_bio(bio, available_sectors);
2314
2315        /* Only tell snapshots if this is a write */
2316        return do_origin(o->dev, bio);
2317}
2318
2319static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
2320                long nr_pages, void **kaddr, pfn_t *pfn)
2321{
2322        DMWARN("device does not support dax.");
2323        return -EIO;
2324}
2325
2326/*
2327 * Set the target "max_io_len" field to the minimum of all the snapshots'
2328 * chunk sizes.
2329 */
2330static void origin_resume(struct dm_target *ti)
2331{
2332        struct dm_origin *o = ti->private;
2333
2334        o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2335
2336        down_write(&_origins_lock);
2337        __insert_dm_origin(o);
2338        up_write(&_origins_lock);
2339}
2340
2341static void origin_postsuspend(struct dm_target *ti)
2342{
2343        struct dm_origin *o = ti->private;
2344
2345        down_write(&_origins_lock);
2346        __remove_dm_origin(o);
2347        up_write(&_origins_lock);
2348}
2349
2350static void origin_status(struct dm_target *ti, status_type_t type,
2351                          unsigned status_flags, char *result, unsigned maxlen)
2352{
2353        struct dm_origin *o = ti->private;
2354
2355        switch (type) {
2356        case STATUSTYPE_INFO:
2357                result[0] = '\0';
2358                break;
2359
2360        case STATUSTYPE_TABLE:
2361                snprintf(result, maxlen, "%s", o->dev->name);
2362                break;
2363        }
2364}
2365
2366static int origin_iterate_devices(struct dm_target *ti,
2367                                  iterate_devices_callout_fn fn, void *data)
2368{
2369        struct dm_origin *o = ti->private;
2370
2371        return fn(ti, o->dev, 0, ti->len, data);
2372}
2373
2374static struct target_type origin_target = {
2375        .name    = "snapshot-origin",
2376        .version = {1, 9, 0},
2377        .module  = THIS_MODULE,
2378        .ctr     = origin_ctr,
2379        .dtr     = origin_dtr,
2380        .map     = origin_map,
2381        .resume  = origin_resume,
2382        .postsuspend = origin_postsuspend,
2383        .status  = origin_status,
2384        .iterate_devices = origin_iterate_devices,
2385        .direct_access = origin_dax_direct_access,
2386};
2387
2388static struct target_type snapshot_target = {
2389        .name    = "snapshot",
2390        .version = {1, 15, 0},
2391        .module  = THIS_MODULE,
2392        .ctr     = snapshot_ctr,
2393        .dtr     = snapshot_dtr,
2394        .map     = snapshot_map,
2395        .end_io  = snapshot_end_io,
2396        .preresume  = snapshot_preresume,
2397        .resume  = snapshot_resume,
2398        .status  = snapshot_status,
2399        .iterate_devices = snapshot_iterate_devices,
2400};
2401
2402static struct target_type merge_target = {
2403        .name    = dm_snapshot_merge_target_name,
2404        .version = {1, 4, 0},
2405        .module  = THIS_MODULE,
2406        .ctr     = snapshot_ctr,
2407        .dtr     = snapshot_dtr,
2408        .map     = snapshot_merge_map,
2409        .end_io  = snapshot_end_io,
2410        .presuspend = snapshot_merge_presuspend,
2411        .preresume  = snapshot_preresume,
2412        .resume  = snapshot_merge_resume,
2413        .status  = snapshot_status,
2414        .iterate_devices = snapshot_iterate_devices,
2415};
2416
2417static int __init dm_snapshot_init(void)
2418{
2419        int r;
2420
2421        r = dm_exception_store_init();
2422        if (r) {
2423                DMERR("Failed to initialize exception stores");
2424                return r;
2425        }
2426
2427        r = init_origin_hash();
2428        if (r) {
2429                DMERR("init_origin_hash failed.");
2430                goto bad_origin_hash;
2431        }
2432
2433        exception_cache = KMEM_CACHE(dm_exception, 0);
2434        if (!exception_cache) {
2435                DMERR("Couldn't create exception cache.");
2436                r = -ENOMEM;
2437                goto bad_exception_cache;
2438        }
2439
2440        pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2441        if (!pending_cache) {
2442                DMERR("Couldn't create pending cache.");
2443                r = -ENOMEM;
2444                goto bad_pending_cache;
2445        }
2446
2447        r = dm_register_target(&snapshot_target);
2448        if (r < 0) {
2449                DMERR("snapshot target register failed %d", r);
2450                goto bad_register_snapshot_target;
2451        }
2452
2453        r = dm_register_target(&origin_target);
2454        if (r < 0) {
2455                DMERR("Origin target register failed %d", r);
2456                goto bad_register_origin_target;
2457        }
2458
2459        r = dm_register_target(&merge_target);
2460        if (r < 0) {
2461                DMERR("Merge target register failed %d", r);
2462                goto bad_register_merge_target;
2463        }
2464
2465        return 0;
2466
2467bad_register_merge_target:
2468        dm_unregister_target(&origin_target);
2469bad_register_origin_target:
2470        dm_unregister_target(&snapshot_target);
2471bad_register_snapshot_target:
2472        kmem_cache_destroy(pending_cache);
2473bad_pending_cache:
2474        kmem_cache_destroy(exception_cache);
2475bad_exception_cache:
2476        exit_origin_hash();
2477bad_origin_hash:
2478        dm_exception_store_exit();
2479
2480        return r;
2481}
2482
2483static void __exit dm_snapshot_exit(void)
2484{
2485        dm_unregister_target(&snapshot_target);
2486        dm_unregister_target(&origin_target);
2487        dm_unregister_target(&merge_target);
2488
2489        exit_origin_hash();
2490        kmem_cache_destroy(pending_cache);
2491        kmem_cache_destroy(exception_cache);
2492
2493        dm_exception_store_exit();
2494}
2495
2496/* Module hooks */
2497module_init(dm_snapshot_init);
2498module_exit(dm_snapshot_exit);
2499
2500MODULE_DESCRIPTION(DM_NAME " snapshot target");
2501MODULE_AUTHOR("Joe Thornber");
2502MODULE_LICENSE("GPL");
2503MODULE_ALIAS("dm-snapshot-origin");
2504MODULE_ALIAS("dm-snapshot-merge");
2505