linux/drivers/md/dm-snap-persistent.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
   3 * Copyright (C) 2006-2008 Red Hat GmbH
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-exception-store.h"
   9
  10#include <linux/mm.h>
  11#include <linux/pagemap.h>
  12#include <linux/vmalloc.h>
  13#include <linux/export.h>
  14#include <linux/slab.h>
  15#include <linux/dm-io.h>
  16
  17#define DM_MSG_PREFIX "persistent snapshot"
  18#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32        /* 16KB */
  19
  20/*-----------------------------------------------------------------
  21 * Persistent snapshots, by persistent we mean that the snapshot
  22 * will survive a reboot.
  23 *---------------------------------------------------------------*/
  24
  25/*
  26 * We need to store a record of which parts of the origin have
  27 * been copied to the snapshot device.  The snapshot code
  28 * requires that we copy exception chunks to chunk aligned areas
  29 * of the COW store.  It makes sense therefore, to store the
  30 * metadata in chunk size blocks.
  31 *
  32 * There is no backward or forward compatibility implemented,
  33 * snapshots with different disk versions than the kernel will
  34 * not be usable.  It is expected that "lvcreate" will blank out
  35 * the start of a fresh COW device before calling the snapshot
  36 * constructor.
  37 *
  38 * The first chunk of the COW device just contains the header.
  39 * After this there is a chunk filled with exception metadata,
  40 * followed by as many exception chunks as can fit in the
  41 * metadata areas.
  42 *
  43 * All on disk structures are in little-endian format.  The end
  44 * of the exceptions info is indicated by an exception with a
  45 * new_chunk of 0, which is invalid since it would point to the
  46 * header chunk.
  47 */
  48
  49/*
  50 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
  51 */
  52#define SNAP_MAGIC 0x70416e53
  53
  54/*
  55 * The on-disk version of the metadata.
  56 */
  57#define SNAPSHOT_DISK_VERSION 1
  58
  59#define NUM_SNAPSHOT_HDR_CHUNKS 1
  60
  61struct disk_header {
  62        __le32 magic;
  63
  64        /*
  65         * Is this snapshot valid.  There is no way of recovering
  66         * an invalid snapshot.
  67         */
  68        __le32 valid;
  69
  70        /*
  71         * Simple, incrementing version. no backward
  72         * compatibility.
  73         */
  74        __le32 version;
  75
  76        /* In sectors */
  77        __le32 chunk_size;
  78} __packed;
  79
  80struct disk_exception {
  81        __le64 old_chunk;
  82        __le64 new_chunk;
  83} __packed;
  84
  85struct core_exception {
  86        uint64_t old_chunk;
  87        uint64_t new_chunk;
  88};
  89
  90struct commit_callback {
  91        void (*callback)(void *, int success);
  92        void *context;
  93};
  94
  95/*
  96 * The top level structure for a persistent exception store.
  97 */
  98struct pstore {
  99        struct dm_exception_store *store;
 100        int version;
 101        int valid;
 102        uint32_t exceptions_per_area;
 103
 104        /*
 105         * Now that we have an asynchronous kcopyd there is no
 106         * need for large chunk sizes, so it wont hurt to have a
 107         * whole chunks worth of metadata in memory at once.
 108         */
 109        void *area;
 110
 111        /*
 112         * An area of zeros used to clear the next area.
 113         */
 114        void *zero_area;
 115
 116        /*
 117         * An area used for header. The header can be written
 118         * concurrently with metadata (when invalidating the snapshot),
 119         * so it needs a separate buffer.
 120         */
 121        void *header_area;
 122
 123        /*
 124         * Used to keep track of which metadata area the data in
 125         * 'chunk' refers to.
 126         */
 127        chunk_t current_area;
 128
 129        /*
 130         * The next free chunk for an exception.
 131         *
 132         * When creating exceptions, all the chunks here and above are
 133         * free.  It holds the next chunk to be allocated.  On rare
 134         * occasions (e.g. after a system crash) holes can be left in
 135         * the exception store because chunks can be committed out of
 136         * order.
 137         *
 138         * When merging exceptions, it does not necessarily mean all the
 139         * chunks here and above are free.  It holds the value it would
 140         * have held if all chunks had been committed in order of
 141         * allocation.  Consequently the value may occasionally be
 142         * slightly too low, but since it's only used for 'status' and
 143         * it can never reach its minimum value too early this doesn't
 144         * matter.
 145         */
 146
 147        chunk_t next_free;
 148
 149        /*
 150         * The index of next free exception in the current
 151         * metadata area.
 152         */
 153        uint32_t current_committed;
 154
 155        atomic_t pending_count;
 156        uint32_t callback_count;
 157        struct commit_callback *callbacks;
 158        struct dm_io_client *io_client;
 159
 160        struct workqueue_struct *metadata_wq;
 161};
 162
 163static int alloc_area(struct pstore *ps)
 164{
 165        int r = -ENOMEM;
 166        size_t len;
 167
 168        len = ps->store->chunk_size << SECTOR_SHIFT;
 169
 170        /*
 171         * Allocate the chunk_size block of memory that will hold
 172         * a single metadata area.
 173         */
 174        ps->area = vmalloc(len);
 175        if (!ps->area)
 176                goto err_area;
 177
 178        ps->zero_area = vzalloc(len);
 179        if (!ps->zero_area)
 180                goto err_zero_area;
 181
 182        ps->header_area = vmalloc(len);
 183        if (!ps->header_area)
 184                goto err_header_area;
 185
 186        return 0;
 187
 188err_header_area:
 189        vfree(ps->zero_area);
 190
 191err_zero_area:
 192        vfree(ps->area);
 193
 194err_area:
 195        return r;
 196}
 197
 198static void free_area(struct pstore *ps)
 199{
 200        if (ps->area)
 201                vfree(ps->area);
 202        ps->area = NULL;
 203
 204        if (ps->zero_area)
 205                vfree(ps->zero_area);
 206        ps->zero_area = NULL;
 207
 208        if (ps->header_area)
 209                vfree(ps->header_area);
 210        ps->header_area = NULL;
 211}
 212
 213struct mdata_req {
 214        struct dm_io_region *where;
 215        struct dm_io_request *io_req;
 216        struct work_struct work;
 217        int result;
 218};
 219
 220static void do_metadata(struct work_struct *work)
 221{
 222        struct mdata_req *req = container_of(work, struct mdata_req, work);
 223
 224        req->result = dm_io(req->io_req, 1, req->where, NULL);
 225}
 226
 227/*
 228 * Read or write a chunk aligned and sized block of data from a device.
 229 */
 230static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
 231                    int metadata)
 232{
 233        struct dm_io_region where = {
 234                .bdev = dm_snap_cow(ps->store->snap)->bdev,
 235                .sector = ps->store->chunk_size * chunk,
 236                .count = ps->store->chunk_size,
 237        };
 238        struct dm_io_request io_req = {
 239                .bi_rw = rw,
 240                .mem.type = DM_IO_VMA,
 241                .mem.ptr.vma = area,
 242                .client = ps->io_client,
 243                .notify.fn = NULL,
 244        };
 245        struct mdata_req req;
 246
 247        if (!metadata)
 248                return dm_io(&io_req, 1, &where, NULL);
 249
 250        req.where = &where;
 251        req.io_req = &io_req;
 252
 253        /*
 254         * Issue the synchronous I/O from a different thread
 255         * to avoid generic_make_request recursion.
 256         */
 257        INIT_WORK_ONSTACK(&req.work, do_metadata);
 258        queue_work(ps->metadata_wq, &req.work);
 259        flush_work(&req.work);
 260
 261        return req.result;
 262}
 263
 264/*
 265 * Convert a metadata area index to a chunk index.
 266 */
 267static chunk_t area_location(struct pstore *ps, chunk_t area)
 268{
 269        return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
 270}
 271
 272/*
 273 * Read or write a metadata area.  Remembering to skip the first
 274 * chunk which holds the header.
 275 */
 276static int area_io(struct pstore *ps, int rw)
 277{
 278        int r;
 279        chunk_t chunk;
 280
 281        chunk = area_location(ps, ps->current_area);
 282
 283        r = chunk_io(ps, ps->area, chunk, rw, 0);
 284        if (r)
 285                return r;
 286
 287        return 0;
 288}
 289
 290static void zero_memory_area(struct pstore *ps)
 291{
 292        memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
 293}
 294
 295static int zero_disk_area(struct pstore *ps, chunk_t area)
 296{
 297        return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
 298}
 299
 300static int read_header(struct pstore *ps, int *new_snapshot)
 301{
 302        int r;
 303        struct disk_header *dh;
 304        unsigned chunk_size;
 305        int chunk_size_supplied = 1;
 306        char *chunk_err;
 307
 308        /*
 309         * Use default chunk size (or logical_block_size, if larger)
 310         * if none supplied
 311         */
 312        if (!ps->store->chunk_size) {
 313                ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
 314                    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
 315                                            bdev) >> 9);
 316                ps->store->chunk_mask = ps->store->chunk_size - 1;
 317                ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
 318                chunk_size_supplied = 0;
 319        }
 320
 321        ps->io_client = dm_io_client_create();
 322        if (IS_ERR(ps->io_client))
 323                return PTR_ERR(ps->io_client);
 324
 325        r = alloc_area(ps);
 326        if (r)
 327                return r;
 328
 329        r = chunk_io(ps, ps->header_area, 0, READ, 1);
 330        if (r)
 331                goto bad;
 332
 333        dh = ps->header_area;
 334
 335        if (le32_to_cpu(dh->magic) == 0) {
 336                *new_snapshot = 1;
 337                return 0;
 338        }
 339
 340        if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
 341                DMWARN("Invalid or corrupt snapshot");
 342                r = -ENXIO;
 343                goto bad;
 344        }
 345
 346        *new_snapshot = 0;
 347        ps->valid = le32_to_cpu(dh->valid);
 348        ps->version = le32_to_cpu(dh->version);
 349        chunk_size = le32_to_cpu(dh->chunk_size);
 350
 351        if (ps->store->chunk_size == chunk_size)
 352                return 0;
 353
 354        if (chunk_size_supplied)
 355                DMWARN("chunk size %u in device metadata overrides "
 356                       "table chunk size of %u.",
 357                       chunk_size, ps->store->chunk_size);
 358
 359        /* We had a bogus chunk_size. Fix stuff up. */
 360        free_area(ps);
 361
 362        r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
 363                                              &chunk_err);
 364        if (r) {
 365                DMERR("invalid on-disk chunk size %u: %s.",
 366                      chunk_size, chunk_err);
 367                return r;
 368        }
 369
 370        r = alloc_area(ps);
 371        return r;
 372
 373bad:
 374        free_area(ps);
 375        return r;
 376}
 377
 378static int write_header(struct pstore *ps)
 379{
 380        struct disk_header *dh;
 381
 382        memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
 383
 384        dh = ps->header_area;
 385        dh->magic = cpu_to_le32(SNAP_MAGIC);
 386        dh->valid = cpu_to_le32(ps->valid);
 387        dh->version = cpu_to_le32(ps->version);
 388        dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
 389
 390        return chunk_io(ps, ps->header_area, 0, WRITE, 1);
 391}
 392
 393/*
 394 * Access functions for the disk exceptions, these do the endian conversions.
 395 */
 396static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
 397{
 398        BUG_ON(index >= ps->exceptions_per_area);
 399
 400        return ((struct disk_exception *) ps->area) + index;
 401}
 402
 403static void read_exception(struct pstore *ps,
 404                           uint32_t index, struct core_exception *result)
 405{
 406        struct disk_exception *de = get_exception(ps, index);
 407
 408        /* copy it */
 409        result->old_chunk = le64_to_cpu(de->old_chunk);
 410        result->new_chunk = le64_to_cpu(de->new_chunk);
 411}
 412
 413static void write_exception(struct pstore *ps,
 414                            uint32_t index, struct core_exception *e)
 415{
 416        struct disk_exception *de = get_exception(ps, index);
 417
 418        /* copy it */
 419        de->old_chunk = cpu_to_le64(e->old_chunk);
 420        de->new_chunk = cpu_to_le64(e->new_chunk);
 421}
 422
 423static void clear_exception(struct pstore *ps, uint32_t index)
 424{
 425        struct disk_exception *de = get_exception(ps, index);
 426
 427        /* clear it */
 428        de->old_chunk = 0;
 429        de->new_chunk = 0;
 430}
 431
 432/*
 433 * Registers the exceptions that are present in the current area.
 434 * 'full' is filled in to indicate if the area has been
 435 * filled.
 436 */
 437static int insert_exceptions(struct pstore *ps,
 438                             int (*callback)(void *callback_context,
 439                                             chunk_t old, chunk_t new),
 440                             void *callback_context,
 441                             int *full)
 442{
 443        int r;
 444        unsigned int i;
 445        struct core_exception e;
 446
 447        /* presume the area is full */
 448        *full = 1;
 449
 450        for (i = 0; i < ps->exceptions_per_area; i++) {
 451                read_exception(ps, i, &e);
 452
 453                /*
 454                 * If the new_chunk is pointing at the start of
 455                 * the COW device, where the first metadata area
 456                 * is we know that we've hit the end of the
 457                 * exceptions.  Therefore the area is not full.
 458                 */
 459                if (e.new_chunk == 0LL) {
 460                        ps->current_committed = i;
 461                        *full = 0;
 462                        break;
 463                }
 464
 465                /*
 466                 * Keep track of the start of the free chunks.
 467                 */
 468                if (ps->next_free <= e.new_chunk)
 469                        ps->next_free = e.new_chunk + 1;
 470
 471                /*
 472                 * Otherwise we add the exception to the snapshot.
 473                 */
 474                r = callback(callback_context, e.old_chunk, e.new_chunk);
 475                if (r)
 476                        return r;
 477        }
 478
 479        return 0;
 480}
 481
 482static int read_exceptions(struct pstore *ps,
 483                           int (*callback)(void *callback_context, chunk_t old,
 484                                           chunk_t new),
 485                           void *callback_context)
 486{
 487        int r, full = 1;
 488
 489        /*
 490         * Keeping reading chunks and inserting exceptions until
 491         * we find a partially full area.
 492         */
 493        for (ps->current_area = 0; full; ps->current_area++) {
 494                r = area_io(ps, READ);
 495                if (r)
 496                        return r;
 497
 498                r = insert_exceptions(ps, callback, callback_context, &full);
 499                if (r)
 500                        return r;
 501        }
 502
 503        ps->current_area--;
 504
 505        return 0;
 506}
 507
 508static struct pstore *get_info(struct dm_exception_store *store)
 509{
 510        return (struct pstore *) store->context;
 511}
 512
 513static void persistent_usage(struct dm_exception_store *store,
 514                             sector_t *total_sectors,
 515                             sector_t *sectors_allocated,
 516                             sector_t *metadata_sectors)
 517{
 518        struct pstore *ps = get_info(store);
 519
 520        *sectors_allocated = ps->next_free * store->chunk_size;
 521        *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
 522
 523        /*
 524         * First chunk is the fixed header.
 525         * Then there are (ps->current_area + 1) metadata chunks, each one
 526         * separated from the next by ps->exceptions_per_area data chunks.
 527         */
 528        *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
 529                            store->chunk_size;
 530}
 531
 532static void persistent_dtr(struct dm_exception_store *store)
 533{
 534        struct pstore *ps = get_info(store);
 535
 536        destroy_workqueue(ps->metadata_wq);
 537
 538        /* Created in read_header */
 539        if (ps->io_client)
 540                dm_io_client_destroy(ps->io_client);
 541        free_area(ps);
 542
 543        /* Allocated in persistent_read_metadata */
 544        if (ps->callbacks)
 545                vfree(ps->callbacks);
 546
 547        kfree(ps);
 548}
 549
 550static int persistent_read_metadata(struct dm_exception_store *store,
 551                                    int (*callback)(void *callback_context,
 552                                                    chunk_t old, chunk_t new),
 553                                    void *callback_context)
 554{
 555        int r, uninitialized_var(new_snapshot);
 556        struct pstore *ps = get_info(store);
 557
 558        /*
 559         * Read the snapshot header.
 560         */
 561        r = read_header(ps, &new_snapshot);
 562        if (r)
 563                return r;
 564
 565        /*
 566         * Now we know correct chunk_size, complete the initialisation.
 567         */
 568        ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
 569                                  sizeof(struct disk_exception);
 570        ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
 571                                   sizeof(*ps->callbacks));
 572        if (!ps->callbacks)
 573                return -ENOMEM;
 574
 575        /*
 576         * Do we need to setup a new snapshot ?
 577         */
 578        if (new_snapshot) {
 579                r = write_header(ps);
 580                if (r) {
 581                        DMWARN("write_header failed");
 582                        return r;
 583                }
 584
 585                ps->current_area = 0;
 586                zero_memory_area(ps);
 587                r = zero_disk_area(ps, 0);
 588                if (r)
 589                        DMWARN("zero_disk_area(0) failed");
 590                return r;
 591        }
 592        /*
 593         * Sanity checks.
 594         */
 595        if (ps->version != SNAPSHOT_DISK_VERSION) {
 596                DMWARN("unable to handle snapshot disk version %d",
 597                       ps->version);
 598                return -EINVAL;
 599        }
 600
 601        /*
 602         * Metadata are valid, but snapshot is invalidated
 603         */
 604        if (!ps->valid)
 605                return 1;
 606
 607        /*
 608         * Read the metadata.
 609         */
 610        r = read_exceptions(ps, callback, callback_context);
 611
 612        return r;
 613}
 614
 615static int persistent_prepare_exception(struct dm_exception_store *store,
 616                                        struct dm_exception *e)
 617{
 618        struct pstore *ps = get_info(store);
 619        uint32_t stride;
 620        chunk_t next_free;
 621        sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
 622
 623        /* Is there enough room ? */
 624        if (size < ((ps->next_free + 1) * store->chunk_size))
 625                return -ENOSPC;
 626
 627        e->new_chunk = ps->next_free;
 628
 629        /*
 630         * Move onto the next free pending, making sure to take
 631         * into account the location of the metadata chunks.
 632         */
 633        stride = (ps->exceptions_per_area + 1);
 634        next_free = ++ps->next_free;
 635        if (sector_div(next_free, stride) == 1)
 636                ps->next_free++;
 637
 638        atomic_inc(&ps->pending_count);
 639        return 0;
 640}
 641
 642static void persistent_commit_exception(struct dm_exception_store *store,
 643                                        struct dm_exception *e,
 644                                        void (*callback) (void *, int success),
 645                                        void *callback_context)
 646{
 647        unsigned int i;
 648        struct pstore *ps = get_info(store);
 649        struct core_exception ce;
 650        struct commit_callback *cb;
 651
 652        ce.old_chunk = e->old_chunk;
 653        ce.new_chunk = e->new_chunk;
 654        write_exception(ps, ps->current_committed++, &ce);
 655
 656        /*
 657         * Add the callback to the back of the array.  This code
 658         * is the only place where the callback array is
 659         * manipulated, and we know that it will never be called
 660         * multiple times concurrently.
 661         */
 662        cb = ps->callbacks + ps->callback_count++;
 663        cb->callback = callback;
 664        cb->context = callback_context;
 665
 666        /*
 667         * If there are exceptions in flight and we have not yet
 668         * filled this metadata area there's nothing more to do.
 669         */
 670        if (!atomic_dec_and_test(&ps->pending_count) &&
 671            (ps->current_committed != ps->exceptions_per_area))
 672                return;
 673
 674        /*
 675         * If we completely filled the current area, then wipe the next one.
 676         */
 677        if ((ps->current_committed == ps->exceptions_per_area) &&
 678            zero_disk_area(ps, ps->current_area + 1))
 679                ps->valid = 0;
 680
 681        /*
 682         * Commit exceptions to disk.
 683         */
 684        if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
 685                ps->valid = 0;
 686
 687        /*
 688         * Advance to the next area if this one is full.
 689         */
 690        if (ps->current_committed == ps->exceptions_per_area) {
 691                ps->current_committed = 0;
 692                ps->current_area++;
 693                zero_memory_area(ps);
 694        }
 695
 696        for (i = 0; i < ps->callback_count; i++) {
 697                cb = ps->callbacks + i;
 698                cb->callback(cb->context, ps->valid);
 699        }
 700
 701        ps->callback_count = 0;
 702}
 703
 704static int persistent_prepare_merge(struct dm_exception_store *store,
 705                                    chunk_t *last_old_chunk,
 706                                    chunk_t *last_new_chunk)
 707{
 708        struct pstore *ps = get_info(store);
 709        struct core_exception ce;
 710        int nr_consecutive;
 711        int r;
 712
 713        /*
 714         * When current area is empty, move back to preceding area.
 715         */
 716        if (!ps->current_committed) {
 717                /*
 718                 * Have we finished?
 719                 */
 720                if (!ps->current_area)
 721                        return 0;
 722
 723                ps->current_area--;
 724                r = area_io(ps, READ);
 725                if (r < 0)
 726                        return r;
 727                ps->current_committed = ps->exceptions_per_area;
 728        }
 729
 730        read_exception(ps, ps->current_committed - 1, &ce);
 731        *last_old_chunk = ce.old_chunk;
 732        *last_new_chunk = ce.new_chunk;
 733
 734        /*
 735         * Find number of consecutive chunks within the current area,
 736         * working backwards.
 737         */
 738        for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
 739             nr_consecutive++) {
 740                read_exception(ps, ps->current_committed - 1 - nr_consecutive,
 741                               &ce);
 742                if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
 743                    ce.new_chunk != *last_new_chunk - nr_consecutive)
 744                        break;
 745        }
 746
 747        return nr_consecutive;
 748}
 749
 750static int persistent_commit_merge(struct dm_exception_store *store,
 751                                   int nr_merged)
 752{
 753        int r, i;
 754        struct pstore *ps = get_info(store);
 755
 756        BUG_ON(nr_merged > ps->current_committed);
 757
 758        for (i = 0; i < nr_merged; i++)
 759                clear_exception(ps, ps->current_committed - 1 - i);
 760
 761        r = area_io(ps, WRITE_FLUSH_FUA);
 762        if (r < 0)
 763                return r;
 764
 765        ps->current_committed -= nr_merged;
 766
 767        /*
 768         * At this stage, only persistent_usage() uses ps->next_free, so
 769         * we make no attempt to keep ps->next_free strictly accurate
 770         * as exceptions may have been committed out-of-order originally.
 771         * Once a snapshot has become merging, we set it to the value it
 772         * would have held had all the exceptions been committed in order.
 773         *
 774         * ps->current_area does not get reduced by prepare_merge() until
 775         * after commit_merge() has removed the nr_merged previous exceptions.
 776         */
 777        ps->next_free = area_location(ps, ps->current_area) +
 778                        ps->current_committed + 1;
 779
 780        return 0;
 781}
 782
 783static void persistent_drop_snapshot(struct dm_exception_store *store)
 784{
 785        struct pstore *ps = get_info(store);
 786
 787        ps->valid = 0;
 788        if (write_header(ps))
 789                DMWARN("write header failed");
 790}
 791
 792static int persistent_ctr(struct dm_exception_store *store,
 793                          unsigned argc, char **argv)
 794{
 795        struct pstore *ps;
 796
 797        /* allocate the pstore */
 798        ps = kzalloc(sizeof(*ps), GFP_KERNEL);
 799        if (!ps)
 800                return -ENOMEM;
 801
 802        ps->store = store;
 803        ps->valid = 1;
 804        ps->version = SNAPSHOT_DISK_VERSION;
 805        ps->area = NULL;
 806        ps->zero_area = NULL;
 807        ps->header_area = NULL;
 808        ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
 809        ps->current_committed = 0;
 810
 811        ps->callback_count = 0;
 812        atomic_set(&ps->pending_count, 0);
 813        ps->callbacks = NULL;
 814
 815        ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
 816        if (!ps->metadata_wq) {
 817                kfree(ps);
 818                DMERR("couldn't start header metadata update thread");
 819                return -ENOMEM;
 820        }
 821
 822        store->context = ps;
 823
 824        return 0;
 825}
 826
 827static unsigned persistent_status(struct dm_exception_store *store,
 828                                  status_type_t status, char *result,
 829                                  unsigned maxlen)
 830{
 831        unsigned sz = 0;
 832
 833        switch (status) {
 834        case STATUSTYPE_INFO:
 835                break;
 836        case STATUSTYPE_TABLE:
 837                DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
 838        }
 839
 840        return sz;
 841}
 842
 843static struct dm_exception_store_type _persistent_type = {
 844        .name = "persistent",
 845        .module = THIS_MODULE,
 846        .ctr = persistent_ctr,
 847        .dtr = persistent_dtr,
 848        .read_metadata = persistent_read_metadata,
 849        .prepare_exception = persistent_prepare_exception,
 850        .commit_exception = persistent_commit_exception,
 851        .prepare_merge = persistent_prepare_merge,
 852        .commit_merge = persistent_commit_merge,
 853        .drop_snapshot = persistent_drop_snapshot,
 854        .usage = persistent_usage,
 855        .status = persistent_status,
 856};
 857
 858static struct dm_exception_store_type _persistent_compat_type = {
 859        .name = "P",
 860        .module = THIS_MODULE,
 861        .ctr = persistent_ctr,
 862        .dtr = persistent_dtr,
 863        .read_metadata = persistent_read_metadata,
 864        .prepare_exception = persistent_prepare_exception,
 865        .commit_exception = persistent_commit_exception,
 866        .prepare_merge = persistent_prepare_merge,
 867        .commit_merge = persistent_commit_merge,
 868        .drop_snapshot = persistent_drop_snapshot,
 869        .usage = persistent_usage,
 870        .status = persistent_status,
 871};
 872
 873int dm_persistent_snapshot_init(void)
 874{
 875        int r;
 876
 877        r = dm_exception_store_type_register(&_persistent_type);
 878        if (r) {
 879                DMERR("Unable to register persistent exception store type");
 880                return r;
 881        }
 882
 883        r = dm_exception_store_type_register(&_persistent_compat_type);
 884        if (r) {
 885                DMERR("Unable to register old-style persistent exception "
 886                      "store type");
 887                dm_exception_store_type_unregister(&_persistent_type);
 888                return r;
 889        }
 890
 891        return r;
 892}
 893
 894void dm_persistent_snapshot_exit(void)
 895{
 896        dm_exception_store_type_unregister(&_persistent_type);
 897        dm_exception_store_type_unregister(&_persistent_compat_type);
 898}
 899