linux/drivers/md/dm-era-target.c
<<
>>
Prefs
   1#include "dm.h"
   2#include "persistent-data/dm-transaction-manager.h"
   3#include "persistent-data/dm-bitset.h"
   4#include "persistent-data/dm-space-map.h"
   5
   6#include <linux/dm-io.h>
   7#include <linux/dm-kcopyd.h>
   8#include <linux/init.h>
   9#include <linux/mempool.h>
  10#include <linux/module.h>
  11#include <linux/slab.h>
  12#include <linux/vmalloc.h>
  13
  14#define DM_MSG_PREFIX "era"
  15
  16#define SUPERBLOCK_LOCATION 0
  17#define SUPERBLOCK_MAGIC 2126579579
  18#define SUPERBLOCK_CSUM_XOR 146538381
  19#define MIN_ERA_VERSION 1
  20#define MAX_ERA_VERSION 1
  21#define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
  22#define MIN_BLOCK_SIZE 8
  23
  24/*----------------------------------------------------------------
  25 * Writeset
  26 *--------------------------------------------------------------*/
  27struct writeset_metadata {
  28        uint32_t nr_bits;
  29        dm_block_t root;
  30};
  31
  32struct writeset {
  33        struct writeset_metadata md;
  34
  35        /*
  36         * An in core copy of the bits to save constantly doing look ups on
  37         * disk.
  38         */
  39        unsigned long *bits;
  40};
  41
  42/*
  43 * This does not free off the on disk bitset as this will normally be done
  44 * after digesting into the era array.
  45 */
  46static void writeset_free(struct writeset *ws)
  47{
  48        vfree(ws->bits);
  49}
  50
  51static int setup_on_disk_bitset(struct dm_disk_bitset *info,
  52                                unsigned nr_bits, dm_block_t *root)
  53{
  54        int r;
  55
  56        r = dm_bitset_empty(info, root);
  57        if (r)
  58                return r;
  59
  60        return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
  61}
  62
  63static size_t bitset_size(unsigned nr_bits)
  64{
  65        return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
  66}
  67
  68/*
  69 * Allocates memory for the in core bitset.
  70 */
  71static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
  72{
  73        ws->md.nr_bits = nr_blocks;
  74        ws->md.root = INVALID_WRITESET_ROOT;
  75        ws->bits = vzalloc(bitset_size(nr_blocks));
  76        if (!ws->bits) {
  77                DMERR("%s: couldn't allocate in memory bitset", __func__);
  78                return -ENOMEM;
  79        }
  80
  81        return 0;
  82}
  83
  84/*
  85 * Wipes the in-core bitset, and creates a new on disk bitset.
  86 */
  87static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
  88{
  89        int r;
  90
  91        memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
  92
  93        r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
  94        if (r) {
  95                DMERR("%s: setup_on_disk_bitset failed", __func__);
  96                return r;
  97        }
  98
  99        return 0;
 100}
 101
 102static bool writeset_marked(struct writeset *ws, dm_block_t block)
 103{
 104        return test_bit(block, ws->bits);
 105}
 106
 107static int writeset_marked_on_disk(struct dm_disk_bitset *info,
 108                                   struct writeset_metadata *m, dm_block_t block,
 109                                   bool *result)
 110{
 111        dm_block_t old = m->root;
 112
 113        /*
 114         * The bitset was flushed when it was archived, so we know there'll
 115         * be no change to the root.
 116         */
 117        int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
 118        if (r) {
 119                DMERR("%s: dm_bitset_test_bit failed", __func__);
 120                return r;
 121        }
 122
 123        BUG_ON(m->root != old);
 124
 125        return r;
 126}
 127
 128/*
 129 * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was.
 130 */
 131static int writeset_test_and_set(struct dm_disk_bitset *info,
 132                                 struct writeset *ws, uint32_t block)
 133{
 134        int r;
 135
 136        if (!test_and_set_bit(block, ws->bits)) {
 137                r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
 138                if (r) {
 139                        /* FIXME: fail mode */
 140                        return r;
 141                }
 142
 143                return 0;
 144        }
 145
 146        return 1;
 147}
 148
 149/*----------------------------------------------------------------
 150 * On disk metadata layout
 151 *--------------------------------------------------------------*/
 152#define SPACE_MAP_ROOT_SIZE 128
 153#define UUID_LEN 16
 154
 155struct writeset_disk {
 156        __le32 nr_bits;
 157        __le64 root;
 158} __packed;
 159
 160struct superblock_disk {
 161        __le32 csum;
 162        __le32 flags;
 163        __le64 blocknr;
 164
 165        __u8 uuid[UUID_LEN];
 166        __le64 magic;
 167        __le32 version;
 168
 169        __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
 170
 171        __le32 data_block_size;
 172        __le32 metadata_block_size;
 173        __le32 nr_blocks;
 174
 175        __le32 current_era;
 176        struct writeset_disk current_writeset;
 177
 178        /*
 179         * Only these two fields are valid within the metadata snapshot.
 180         */
 181        __le64 writeset_tree_root;
 182        __le64 era_array_root;
 183
 184        __le64 metadata_snap;
 185} __packed;
 186
 187/*----------------------------------------------------------------
 188 * Superblock validation
 189 *--------------------------------------------------------------*/
 190static void sb_prepare_for_write(struct dm_block_validator *v,
 191                                 struct dm_block *b,
 192                                 size_t sb_block_size)
 193{
 194        struct superblock_disk *disk = dm_block_data(b);
 195
 196        disk->blocknr = cpu_to_le64(dm_block_location(b));
 197        disk->csum = cpu_to_le32(dm_bm_checksum(&disk->flags,
 198                                                sb_block_size - sizeof(__le32),
 199                                                SUPERBLOCK_CSUM_XOR));
 200}
 201
 202static int check_metadata_version(struct superblock_disk *disk)
 203{
 204        uint32_t metadata_version = le32_to_cpu(disk->version);
 205        if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) {
 206                DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
 207                      metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION);
 208                return -EINVAL;
 209        }
 210
 211        return 0;
 212}
 213
 214static int sb_check(struct dm_block_validator *v,
 215                    struct dm_block *b,
 216                    size_t sb_block_size)
 217{
 218        struct superblock_disk *disk = dm_block_data(b);
 219        __le32 csum_le;
 220
 221        if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) {
 222                DMERR("sb_check failed: blocknr %llu: wanted %llu",
 223                      le64_to_cpu(disk->blocknr),
 224                      (unsigned long long)dm_block_location(b));
 225                return -ENOTBLK;
 226        }
 227
 228        if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) {
 229                DMERR("sb_check failed: magic %llu: wanted %llu",
 230                      le64_to_cpu(disk->magic),
 231                      (unsigned long long) SUPERBLOCK_MAGIC);
 232                return -EILSEQ;
 233        }
 234
 235        csum_le = cpu_to_le32(dm_bm_checksum(&disk->flags,
 236                                             sb_block_size - sizeof(__le32),
 237                                             SUPERBLOCK_CSUM_XOR));
 238        if (csum_le != disk->csum) {
 239                DMERR("sb_check failed: csum %u: wanted %u",
 240                      le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
 241                return -EILSEQ;
 242        }
 243
 244        return check_metadata_version(disk);
 245}
 246
 247static struct dm_block_validator sb_validator = {
 248        .name = "superblock",
 249        .prepare_for_write = sb_prepare_for_write,
 250        .check = sb_check
 251};
 252
 253/*----------------------------------------------------------------
 254 * Low level metadata handling
 255 *--------------------------------------------------------------*/
 256#define DM_ERA_METADATA_BLOCK_SIZE 4096
 257#define DM_ERA_METADATA_CACHE_SIZE 64
 258#define ERA_MAX_CONCURRENT_LOCKS 5
 259
 260struct era_metadata {
 261        struct block_device *bdev;
 262        struct dm_block_manager *bm;
 263        struct dm_space_map *sm;
 264        struct dm_transaction_manager *tm;
 265
 266        dm_block_t block_size;
 267        uint32_t nr_blocks;
 268
 269        uint32_t current_era;
 270
 271        /*
 272         * We preallocate 2 writesets.  When an era rolls over we
 273         * switch between them. This means the allocation is done at
 274         * preresume time, rather than on the io path.
 275         */
 276        struct writeset writesets[2];
 277        struct writeset *current_writeset;
 278
 279        dm_block_t writeset_tree_root;
 280        dm_block_t era_array_root;
 281
 282        struct dm_disk_bitset bitset_info;
 283        struct dm_btree_info writeset_tree_info;
 284        struct dm_array_info era_array_info;
 285
 286        dm_block_t metadata_snap;
 287
 288        /*
 289         * A flag that is set whenever a writeset has been archived.
 290         */
 291        bool archived_writesets;
 292
 293        /*
 294         * Reading the space map root can fail, so we read it into this
 295         * buffer before the superblock is locked and updated.
 296         */
 297        __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
 298};
 299
 300static int superblock_read_lock(struct era_metadata *md,
 301                                struct dm_block **sblock)
 302{
 303        return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION,
 304                               &sb_validator, sblock);
 305}
 306
 307static int superblock_lock_zero(struct era_metadata *md,
 308                                struct dm_block **sblock)
 309{
 310        return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION,
 311                                     &sb_validator, sblock);
 312}
 313
 314static int superblock_lock(struct era_metadata *md,
 315                           struct dm_block **sblock)
 316{
 317        return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION,
 318                                &sb_validator, sblock);
 319}
 320
 321/* FIXME: duplication with cache and thin */
 322static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
 323{
 324        int r;
 325        unsigned i;
 326        struct dm_block *b;
 327        __le64 *data_le, zero = cpu_to_le64(0);
 328        unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
 329
 330        /*
 331         * We can't use a validator here - it may be all zeroes.
 332         */
 333        r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &b);
 334        if (r)
 335                return r;
 336
 337        data_le = dm_block_data(b);
 338        *result = true;
 339        for (i = 0; i < sb_block_size; i++) {
 340                if (data_le[i] != zero) {
 341                        *result = false;
 342                        break;
 343                }
 344        }
 345
 346        dm_bm_unlock(b);
 347
 348        return 0;
 349}
 350
 351/*----------------------------------------------------------------*/
 352
 353static void ws_pack(const struct writeset_metadata *core, struct writeset_disk *disk)
 354{
 355        disk->nr_bits = cpu_to_le32(core->nr_bits);
 356        disk->root = cpu_to_le64(core->root);
 357}
 358
 359static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata *core)
 360{
 361        core->nr_bits = le32_to_cpu(disk->nr_bits);
 362        core->root = le64_to_cpu(disk->root);
 363}
 364
 365static void ws_inc(void *context, const void *value)
 366{
 367        struct era_metadata *md = context;
 368        struct writeset_disk ws_d;
 369        dm_block_t b;
 370
 371        memcpy(&ws_d, value, sizeof(ws_d));
 372        b = le64_to_cpu(ws_d.root);
 373
 374        dm_tm_inc(md->tm, b);
 375}
 376
 377static void ws_dec(void *context, const void *value)
 378{
 379        struct era_metadata *md = context;
 380        struct writeset_disk ws_d;
 381        dm_block_t b;
 382
 383        memcpy(&ws_d, value, sizeof(ws_d));
 384        b = le64_to_cpu(ws_d.root);
 385
 386        dm_bitset_del(&md->bitset_info, b);
 387}
 388
 389static int ws_eq(void *context, const void *value1, const void *value2)
 390{
 391        return !memcmp(value1, value2, sizeof(struct writeset_metadata));
 392}
 393
 394/*----------------------------------------------------------------*/
 395
 396static void setup_writeset_tree_info(struct era_metadata *md)
 397{
 398        struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
 399        md->writeset_tree_info.tm = md->tm;
 400        md->writeset_tree_info.levels = 1;
 401        vt->context = md;
 402        vt->size = sizeof(struct writeset_disk);
 403        vt->inc = ws_inc;
 404        vt->dec = ws_dec;
 405        vt->equal = ws_eq;
 406}
 407
 408static void setup_era_array_info(struct era_metadata *md)
 409
 410{
 411        struct dm_btree_value_type vt;
 412        vt.context = NULL;
 413        vt.size = sizeof(__le32);
 414        vt.inc = NULL;
 415        vt.dec = NULL;
 416        vt.equal = NULL;
 417
 418        dm_array_info_init(&md->era_array_info, md->tm, &vt);
 419}
 420
 421static void setup_infos(struct era_metadata *md)
 422{
 423        dm_disk_bitset_init(md->tm, &md->bitset_info);
 424        setup_writeset_tree_info(md);
 425        setup_era_array_info(md);
 426}
 427
 428/*----------------------------------------------------------------*/
 429
 430static int create_fresh_metadata(struct era_metadata *md)
 431{
 432        int r;
 433
 434        r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION,
 435                                 &md->tm, &md->sm);
 436        if (r < 0) {
 437                DMERR("dm_tm_create_with_sm failed");
 438                return r;
 439        }
 440
 441        setup_infos(md);
 442
 443        r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root);
 444        if (r) {
 445                DMERR("couldn't create new writeset tree");
 446                goto bad;
 447        }
 448
 449        r = dm_array_empty(&md->era_array_info, &md->era_array_root);
 450        if (r) {
 451                DMERR("couldn't create era array");
 452                goto bad;
 453        }
 454
 455        return 0;
 456
 457bad:
 458        dm_sm_destroy(md->sm);
 459        dm_tm_destroy(md->tm);
 460
 461        return r;
 462}
 463
 464static int save_sm_root(struct era_metadata *md)
 465{
 466        int r;
 467        size_t metadata_len;
 468
 469        r = dm_sm_root_size(md->sm, &metadata_len);
 470        if (r < 0)
 471                return r;
 472
 473        return dm_sm_copy_root(md->sm, &md->metadata_space_map_root,
 474                               metadata_len);
 475}
 476
 477static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk)
 478{
 479        memcpy(&disk->metadata_space_map_root,
 480               &md->metadata_space_map_root,
 481               sizeof(md->metadata_space_map_root));
 482}
 483
 484/*
 485 * Writes a superblock, including the static fields that don't get updated
 486 * with every commit (possible optimisation here).  'md' should be fully
 487 * constructed when this is called.
 488 */
 489static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk)
 490{
 491        disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC);
 492        disk->flags = cpu_to_le32(0ul);
 493
 494        /* FIXME: can't keep blanking the uuid (uuid is currently unused though) */
 495        memset(disk->uuid, 0, sizeof(disk->uuid));
 496        disk->version = cpu_to_le32(MAX_ERA_VERSION);
 497
 498        copy_sm_root(md, disk);
 499
 500        disk->data_block_size = cpu_to_le32(md->block_size);
 501        disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
 502        disk->nr_blocks = cpu_to_le32(md->nr_blocks);
 503        disk->current_era = cpu_to_le32(md->current_era);
 504
 505        ws_pack(&md->current_writeset->md, &disk->current_writeset);
 506        disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root);
 507        disk->era_array_root = cpu_to_le64(md->era_array_root);
 508        disk->metadata_snap = cpu_to_le64(md->metadata_snap);
 509}
 510
 511static int write_superblock(struct era_metadata *md)
 512{
 513        int r;
 514        struct dm_block *sblock;
 515        struct superblock_disk *disk;
 516
 517        r = save_sm_root(md);
 518        if (r) {
 519                DMERR("%s: save_sm_root failed", __func__);
 520                return r;
 521        }
 522
 523        r = superblock_lock_zero(md, &sblock);
 524        if (r)
 525                return r;
 526
 527        disk = dm_block_data(sblock);
 528        prepare_superblock(md, disk);
 529
 530        return dm_tm_commit(md->tm, sblock);
 531}
 532
 533/*
 534 * Assumes block_size and the infos are set.
 535 */
 536static int format_metadata(struct era_metadata *md)
 537{
 538        int r;
 539
 540        r = create_fresh_metadata(md);
 541        if (r)
 542                return r;
 543
 544        r = write_superblock(md);
 545        if (r) {
 546                dm_sm_destroy(md->sm);
 547                dm_tm_destroy(md->tm);
 548                return r;
 549        }
 550
 551        return 0;
 552}
 553
 554static int open_metadata(struct era_metadata *md)
 555{
 556        int r;
 557        struct dm_block *sblock;
 558        struct superblock_disk *disk;
 559
 560        r = superblock_read_lock(md, &sblock);
 561        if (r) {
 562                DMERR("couldn't read_lock superblock");
 563                return r;
 564        }
 565
 566        disk = dm_block_data(sblock);
 567        r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
 568                               disk->metadata_space_map_root,
 569                               sizeof(disk->metadata_space_map_root),
 570                               &md->tm, &md->sm);
 571        if (r) {
 572                DMERR("dm_tm_open_with_sm failed");
 573                goto bad;
 574        }
 575
 576        setup_infos(md);
 577
 578        md->block_size = le32_to_cpu(disk->data_block_size);
 579        md->nr_blocks = le32_to_cpu(disk->nr_blocks);
 580        md->current_era = le32_to_cpu(disk->current_era);
 581
 582        md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
 583        md->era_array_root = le64_to_cpu(disk->era_array_root);
 584        md->metadata_snap = le64_to_cpu(disk->metadata_snap);
 585        md->archived_writesets = true;
 586
 587        dm_bm_unlock(sblock);
 588
 589        return 0;
 590
 591bad:
 592        dm_bm_unlock(sblock);
 593        return r;
 594}
 595
 596static int open_or_format_metadata(struct era_metadata *md,
 597                                   bool may_format)
 598{
 599        int r;
 600        bool unformatted = false;
 601
 602        r = superblock_all_zeroes(md->bm, &unformatted);
 603        if (r)
 604                return r;
 605
 606        if (unformatted)
 607                return may_format ? format_metadata(md) : -EPERM;
 608
 609        return open_metadata(md);
 610}
 611
 612static int create_persistent_data_objects(struct era_metadata *md,
 613                                          bool may_format)
 614{
 615        int r;
 616
 617        md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE,
 618                                         DM_ERA_METADATA_CACHE_SIZE,
 619                                         ERA_MAX_CONCURRENT_LOCKS);
 620        if (IS_ERR(md->bm)) {
 621                DMERR("could not create block manager");
 622                return PTR_ERR(md->bm);
 623        }
 624
 625        r = open_or_format_metadata(md, may_format);
 626        if (r)
 627                dm_block_manager_destroy(md->bm);
 628
 629        return r;
 630}
 631
 632static void destroy_persistent_data_objects(struct era_metadata *md)
 633{
 634        dm_sm_destroy(md->sm);
 635        dm_tm_destroy(md->tm);
 636        dm_block_manager_destroy(md->bm);
 637}
 638
 639/*
 640 * This waits until all era_map threads have picked up the new filter.
 641 */
 642static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset)
 643{
 644        rcu_assign_pointer(md->current_writeset, new_writeset);
 645        synchronize_rcu();
 646}
 647
 648/*----------------------------------------------------------------
 649 * Writesets get 'digested' into the main era array.
 650 *
 651 * We're using a coroutine here so the worker thread can do the digestion,
 652 * thus avoiding synchronisation of the metadata.  Digesting a whole
 653 * writeset in one go would cause too much latency.
 654 *--------------------------------------------------------------*/
 655struct digest {
 656        uint32_t era;
 657        unsigned nr_bits, current_bit;
 658        struct writeset_metadata writeset;
 659        __le32 value;
 660        struct dm_disk_bitset info;
 661
 662        int (*step)(struct era_metadata *, struct digest *);
 663};
 664
 665static int metadata_digest_lookup_writeset(struct era_metadata *md,
 666                                           struct digest *d);
 667
 668static int metadata_digest_remove_writeset(struct era_metadata *md,
 669                                           struct digest *d)
 670{
 671        int r;
 672        uint64_t key = d->era;
 673
 674        r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root,
 675                            &key, &md->writeset_tree_root);
 676        if (r) {
 677                DMERR("%s: dm_btree_remove failed", __func__);
 678                return r;
 679        }
 680
 681        d->step = metadata_digest_lookup_writeset;
 682        return 0;
 683}
 684
 685#define INSERTS_PER_STEP 100
 686
 687static int metadata_digest_transcribe_writeset(struct era_metadata *md,
 688                                               struct digest *d)
 689{
 690        int r;
 691        bool marked;
 692        unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
 693
 694        for (b = d->current_bit; b < e; b++) {
 695                r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
 696                if (r) {
 697                        DMERR("%s: writeset_marked_on_disk failed", __func__);
 698                        return r;
 699                }
 700
 701                if (!marked)
 702                        continue;
 703
 704                __dm_bless_for_disk(&d->value);
 705                r = dm_array_set_value(&md->era_array_info, md->era_array_root,
 706                                       b, &d->value, &md->era_array_root);
 707                if (r) {
 708                        DMERR("%s: dm_array_set_value failed", __func__);
 709                        return r;
 710                }
 711        }
 712
 713        if (b == d->nr_bits)
 714                d->step = metadata_digest_remove_writeset;
 715        else
 716                d->current_bit = b;
 717
 718        return 0;
 719}
 720
 721static int metadata_digest_lookup_writeset(struct era_metadata *md,
 722                                           struct digest *d)
 723{
 724        int r;
 725        uint64_t key;
 726        struct writeset_disk disk;
 727
 728        r = dm_btree_find_lowest_key(&md->writeset_tree_info,
 729                                     md->writeset_tree_root, &key);
 730        if (r < 0)
 731                return r;
 732
 733        d->era = key;
 734
 735        r = dm_btree_lookup(&md->writeset_tree_info,
 736                            md->writeset_tree_root, &key, &disk);
 737        if (r) {
 738                if (r == -ENODATA) {
 739                        d->step = NULL;
 740                        return 0;
 741                }
 742
 743                DMERR("%s: dm_btree_lookup failed", __func__);
 744                return r;
 745        }
 746
 747        ws_unpack(&disk, &d->writeset);
 748        d->value = cpu_to_le32(key);
 749
 750        d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
 751        d->current_bit = 0;
 752        d->step = metadata_digest_transcribe_writeset;
 753
 754        return 0;
 755}
 756
 757static int metadata_digest_start(struct era_metadata *md, struct digest *d)
 758{
 759        if (d->step)
 760                return 0;
 761
 762        memset(d, 0, sizeof(*d));
 763
 764        /*
 765         * We initialise another bitset info to avoid any caching side
 766         * effects with the previous one.
 767         */
 768        dm_disk_bitset_init(md->tm, &d->info);
 769        d->step = metadata_digest_lookup_writeset;
 770
 771        return 0;
 772}
 773
 774/*----------------------------------------------------------------
 775 * High level metadata interface.  Target methods should use these, and not
 776 * the lower level ones.
 777 *--------------------------------------------------------------*/
 778static struct era_metadata *metadata_open(struct block_device *bdev,
 779                                          sector_t block_size,
 780                                          bool may_format)
 781{
 782        int r;
 783        struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL);
 784
 785        if (!md)
 786                return NULL;
 787
 788        md->bdev = bdev;
 789        md->block_size = block_size;
 790
 791        md->writesets[0].md.root = INVALID_WRITESET_ROOT;
 792        md->writesets[1].md.root = INVALID_WRITESET_ROOT;
 793        md->current_writeset = &md->writesets[0];
 794
 795        r = create_persistent_data_objects(md, may_format);
 796        if (r) {
 797                kfree(md);
 798                return ERR_PTR(r);
 799        }
 800
 801        return md;
 802}
 803
 804static void metadata_close(struct era_metadata *md)
 805{
 806        destroy_persistent_data_objects(md);
 807        kfree(md);
 808}
 809
 810static bool valid_nr_blocks(dm_block_t n)
 811{
 812        /*
 813         * dm_bitset restricts us to 2^32.  test_bit & co. restrict us
 814         * further to 2^31 - 1
 815         */
 816        return n < (1ull << 31);
 817}
 818
 819static int metadata_resize(struct era_metadata *md, void *arg)
 820{
 821        int r;
 822        dm_block_t *new_size = arg;
 823        __le32 value;
 824
 825        if (!valid_nr_blocks(*new_size)) {
 826                DMERR("Invalid number of origin blocks %llu",
 827                      (unsigned long long) *new_size);
 828                return -EINVAL;
 829        }
 830
 831        writeset_free(&md->writesets[0]);
 832        writeset_free(&md->writesets[1]);
 833
 834        r = writeset_alloc(&md->writesets[0], *new_size);
 835        if (r) {
 836                DMERR("%s: writeset_alloc failed for writeset 0", __func__);
 837                return r;
 838        }
 839
 840        r = writeset_alloc(&md->writesets[1], *new_size);
 841        if (r) {
 842                DMERR("%s: writeset_alloc failed for writeset 1", __func__);
 843                return r;
 844        }
 845
 846        value = cpu_to_le32(0u);
 847        __dm_bless_for_disk(&value);
 848        r = dm_array_resize(&md->era_array_info, md->era_array_root,
 849                            md->nr_blocks, *new_size,
 850                            &value, &md->era_array_root);
 851        if (r) {
 852                DMERR("%s: dm_array_resize failed", __func__);
 853                return r;
 854        }
 855
 856        md->nr_blocks = *new_size;
 857        return 0;
 858}
 859
 860static int metadata_era_archive(struct era_metadata *md)
 861{
 862        int r;
 863        uint64_t keys[1];
 864        struct writeset_disk value;
 865
 866        r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
 867                            &md->current_writeset->md.root);
 868        if (r) {
 869                DMERR("%s: dm_bitset_flush failed", __func__);
 870                return r;
 871        }
 872
 873        ws_pack(&md->current_writeset->md, &value);
 874        md->current_writeset->md.root = INVALID_WRITESET_ROOT;
 875
 876        keys[0] = md->current_era;
 877        __dm_bless_for_disk(&value);
 878        r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root,
 879                            keys, &value, &md->writeset_tree_root);
 880        if (r) {
 881                DMERR("%s: couldn't insert writeset into btree", __func__);
 882                /* FIXME: fail mode */
 883                return r;
 884        }
 885
 886        md->archived_writesets = true;
 887
 888        return 0;
 889}
 890
 891static struct writeset *next_writeset(struct era_metadata *md)
 892{
 893        return (md->current_writeset == &md->writesets[0]) ?
 894                &md->writesets[1] : &md->writesets[0];
 895}
 896
 897static int metadata_new_era(struct era_metadata *md)
 898{
 899        int r;
 900        struct writeset *new_writeset = next_writeset(md);
 901
 902        r = writeset_init(&md->bitset_info, new_writeset);
 903        if (r) {
 904                DMERR("%s: writeset_init failed", __func__);
 905                return r;
 906        }
 907
 908        swap_writeset(md, new_writeset);
 909        md->current_era++;
 910
 911        return 0;
 912}
 913
 914static int metadata_era_rollover(struct era_metadata *md)
 915{
 916        int r;
 917
 918        if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
 919                r = metadata_era_archive(md);
 920                if (r) {
 921                        DMERR("%s: metadata_archive_era failed", __func__);
 922                        /* FIXME: fail mode? */
 923                        return r;
 924                }
 925        }
 926
 927        r = metadata_new_era(md);
 928        if (r) {
 929                DMERR("%s: new era failed", __func__);
 930                /* FIXME: fail mode */
 931                return r;
 932        }
 933
 934        return 0;
 935}
 936
 937static bool metadata_current_marked(struct era_metadata *md, dm_block_t block)
 938{
 939        bool r;
 940        struct writeset *ws;
 941
 942        rcu_read_lock();
 943        ws = rcu_dereference(md->current_writeset);
 944        r = writeset_marked(ws, block);
 945        rcu_read_unlock();
 946
 947        return r;
 948}
 949
 950static int metadata_commit(struct era_metadata *md)
 951{
 952        int r;
 953        struct dm_block *sblock;
 954
 955        if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
 956                r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
 957                                    &md->current_writeset->md.root);
 958                if (r) {
 959                        DMERR("%s: bitset flush failed", __func__);
 960                        return r;
 961                }
 962        }
 963
 964        r = save_sm_root(md);
 965        if (r) {
 966                DMERR("%s: save_sm_root failed", __func__);
 967                return r;
 968        }
 969
 970        r = dm_tm_pre_commit(md->tm);
 971        if (r) {
 972                DMERR("%s: pre commit failed", __func__);
 973                return r;
 974        }
 975
 976        r = superblock_lock(md, &sblock);
 977        if (r) {
 978                DMERR("%s: superblock lock failed", __func__);
 979                return r;
 980        }
 981
 982        prepare_superblock(md, dm_block_data(sblock));
 983
 984        return dm_tm_commit(md->tm, sblock);
 985}
 986
 987static int metadata_checkpoint(struct era_metadata *md)
 988{
 989        /*
 990         * For now we just rollover, but later I want to put a check in to
 991         * avoid this if the filter is still pretty fresh.
 992         */
 993        return metadata_era_rollover(md);
 994}
 995
 996/*
 997 * Metadata snapshots allow userland to access era data.
 998 */
 999static int metadata_take_snap(struct era_metadata *md)
1000{
1001        int r, inc;
1002        struct dm_block *clone;
1003
1004        if (md->metadata_snap != SUPERBLOCK_LOCATION) {
1005                DMERR("%s: metadata snapshot already exists", __func__);
1006                return -EINVAL;
1007        }
1008
1009        r = metadata_era_rollover(md);
1010        if (r) {
1011                DMERR("%s: era rollover failed", __func__);
1012                return r;
1013        }
1014
1015        r = metadata_commit(md);
1016        if (r) {
1017                DMERR("%s: pre commit failed", __func__);
1018                return r;
1019        }
1020
1021        r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION);
1022        if (r) {
1023                DMERR("%s: couldn't increment superblock", __func__);
1024                return r;
1025        }
1026
1027        r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION,
1028                               &sb_validator, &clone, &inc);
1029        if (r) {
1030                DMERR("%s: couldn't shadow superblock", __func__);
1031                dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION);
1032                return r;
1033        }
1034        BUG_ON(!inc);
1035
1036        r = dm_sm_inc_block(md->sm, md->writeset_tree_root);
1037        if (r) {
1038                DMERR("%s: couldn't inc writeset tree root", __func__);
1039                dm_tm_unlock(md->tm, clone);
1040                return r;
1041        }
1042
1043        r = dm_sm_inc_block(md->sm, md->era_array_root);
1044        if (r) {
1045                DMERR("%s: couldn't inc era tree root", __func__);
1046                dm_sm_dec_block(md->sm, md->writeset_tree_root);
1047                dm_tm_unlock(md->tm, clone);
1048                return r;
1049        }
1050
1051        md->metadata_snap = dm_block_location(clone);
1052
1053        dm_tm_unlock(md->tm, clone);
1054
1055        return 0;
1056}
1057
1058static int metadata_drop_snap(struct era_metadata *md)
1059{
1060        int r;
1061        dm_block_t location;
1062        struct dm_block *clone;
1063        struct superblock_disk *disk;
1064
1065        if (md->metadata_snap == SUPERBLOCK_LOCATION) {
1066                DMERR("%s: no snap to drop", __func__);
1067                return -EINVAL;
1068        }
1069
1070        r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone);
1071        if (r) {
1072                DMERR("%s: couldn't read lock superblock clone", __func__);
1073                return r;
1074        }
1075
1076        /*
1077         * Whatever happens now we'll commit with no record of the metadata
1078         * snap.
1079         */
1080        md->metadata_snap = SUPERBLOCK_LOCATION;
1081
1082        disk = dm_block_data(clone);
1083        r = dm_btree_del(&md->writeset_tree_info,
1084                         le64_to_cpu(disk->writeset_tree_root));
1085        if (r) {
1086                DMERR("%s: error deleting writeset tree clone", __func__);
1087                dm_tm_unlock(md->tm, clone);
1088                return r;
1089        }
1090
1091        r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root));
1092        if (r) {
1093                DMERR("%s: error deleting era array clone", __func__);
1094                dm_tm_unlock(md->tm, clone);
1095                return r;
1096        }
1097
1098        location = dm_block_location(clone);
1099        dm_tm_unlock(md->tm, clone);
1100
1101        return dm_sm_dec_block(md->sm, location);
1102}
1103
1104struct metadata_stats {
1105        dm_block_t used;
1106        dm_block_t total;
1107        dm_block_t snap;
1108        uint32_t era;
1109};
1110
1111static int metadata_get_stats(struct era_metadata *md, void *ptr)
1112{
1113        int r;
1114        struct metadata_stats *s = ptr;
1115        dm_block_t nr_free, nr_total;
1116
1117        r = dm_sm_get_nr_free(md->sm, &nr_free);
1118        if (r) {
1119                DMERR("dm_sm_get_nr_free returned %d", r);
1120                return r;
1121        }
1122
1123        r = dm_sm_get_nr_blocks(md->sm, &nr_total);
1124        if (r) {
1125                DMERR("dm_pool_get_metadata_dev_size returned %d", r);
1126                return r;
1127        }
1128
1129        s->used = nr_total - nr_free;
1130        s->total = nr_total;
1131        s->snap = md->metadata_snap;
1132        s->era = md->current_era;
1133
1134        return 0;
1135}
1136
1137/*----------------------------------------------------------------*/
1138
1139struct era {
1140        struct dm_target *ti;
1141        struct dm_target_callbacks callbacks;
1142
1143        struct dm_dev *metadata_dev;
1144        struct dm_dev *origin_dev;
1145
1146        dm_block_t nr_blocks;
1147        uint32_t sectors_per_block;
1148        int sectors_per_block_shift;
1149        struct era_metadata *md;
1150
1151        struct workqueue_struct *wq;
1152        struct work_struct worker;
1153
1154        spinlock_t deferred_lock;
1155        struct bio_list deferred_bios;
1156
1157        spinlock_t rpc_lock;
1158        struct list_head rpc_calls;
1159
1160        struct digest digest;
1161        atomic_t suspended;
1162};
1163
1164struct rpc {
1165        struct list_head list;
1166
1167        int (*fn0)(struct era_metadata *);
1168        int (*fn1)(struct era_metadata *, void *);
1169        void *arg;
1170        int result;
1171
1172        struct completion complete;
1173};
1174
1175/*----------------------------------------------------------------
1176 * Remapping.
1177 *---------------------------------------------------------------*/
1178static bool block_size_is_power_of_two(struct era *era)
1179{
1180        return era->sectors_per_block_shift >= 0;
1181}
1182
1183static dm_block_t get_block(struct era *era, struct bio *bio)
1184{
1185        sector_t block_nr = bio->bi_iter.bi_sector;
1186
1187        if (!block_size_is_power_of_two(era))
1188                (void) sector_div(block_nr, era->sectors_per_block);
1189        else
1190                block_nr >>= era->sectors_per_block_shift;
1191
1192        return block_nr;
1193}
1194
1195static void remap_to_origin(struct era *era, struct bio *bio)
1196{
1197        bio->bi_bdev = era->origin_dev->bdev;
1198}
1199
1200/*----------------------------------------------------------------
1201 * Worker thread
1202 *--------------------------------------------------------------*/
1203static void wake_worker(struct era *era)
1204{
1205        if (!atomic_read(&era->suspended))
1206                queue_work(era->wq, &era->worker);
1207}
1208
1209static void process_old_eras(struct era *era)
1210{
1211        int r;
1212
1213        if (!era->digest.step)
1214                return;
1215
1216        r = era->digest.step(era->md, &era->digest);
1217        if (r < 0) {
1218                DMERR("%s: digest step failed, stopping digestion", __func__);
1219                era->digest.step = NULL;
1220
1221        } else if (era->digest.step)
1222                wake_worker(era);
1223}
1224
1225static void process_deferred_bios(struct era *era)
1226{
1227        int r;
1228        struct bio_list deferred_bios, marked_bios;
1229        struct bio *bio;
1230        bool commit_needed = false;
1231        bool failed = false;
1232
1233        bio_list_init(&deferred_bios);
1234        bio_list_init(&marked_bios);
1235
1236        spin_lock(&era->deferred_lock);
1237        bio_list_merge(&deferred_bios, &era->deferred_bios);
1238        bio_list_init(&era->deferred_bios);
1239        spin_unlock(&era->deferred_lock);
1240
1241        while ((bio = bio_list_pop(&deferred_bios))) {
1242                r = writeset_test_and_set(&era->md->bitset_info,
1243                                          era->md->current_writeset,
1244                                          get_block(era, bio));
1245                if (r < 0) {
1246                        /*
1247                         * This is bad news, we need to rollback.
1248                         * FIXME: finish.
1249                         */
1250                        failed = true;
1251
1252                } else if (r == 0)
1253                        commit_needed = true;
1254
1255                bio_list_add(&marked_bios, bio);
1256        }
1257
1258        if (commit_needed) {
1259                r = metadata_commit(era->md);
1260                if (r)
1261                        failed = true;
1262        }
1263
1264        if (failed)
1265                while ((bio = bio_list_pop(&marked_bios)))
1266                        bio_io_error(bio);
1267        else
1268                while ((bio = bio_list_pop(&marked_bios)))
1269                        generic_make_request(bio);
1270}
1271
1272static void process_rpc_calls(struct era *era)
1273{
1274        int r;
1275        bool need_commit = false;
1276        struct list_head calls;
1277        struct rpc *rpc, *tmp;
1278
1279        INIT_LIST_HEAD(&calls);
1280        spin_lock(&era->rpc_lock);
1281        list_splice_init(&era->rpc_calls, &calls);
1282        spin_unlock(&era->rpc_lock);
1283
1284        list_for_each_entry_safe(rpc, tmp, &calls, list) {
1285                rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg);
1286                need_commit = true;
1287        }
1288
1289        if (need_commit) {
1290                r = metadata_commit(era->md);
1291                if (r)
1292                        list_for_each_entry_safe(rpc, tmp, &calls, list)
1293                                rpc->result = r;
1294        }
1295
1296        list_for_each_entry_safe(rpc, tmp, &calls, list)
1297                complete(&rpc->complete);
1298}
1299
1300static void kick_off_digest(struct era *era)
1301{
1302        if (era->md->archived_writesets) {
1303                era->md->archived_writesets = false;
1304                metadata_digest_start(era->md, &era->digest);
1305        }
1306}
1307
1308static void do_work(struct work_struct *ws)
1309{
1310        struct era *era = container_of(ws, struct era, worker);
1311
1312        kick_off_digest(era);
1313        process_old_eras(era);
1314        process_deferred_bios(era);
1315        process_rpc_calls(era);
1316}
1317
1318static void defer_bio(struct era *era, struct bio *bio)
1319{
1320        spin_lock(&era->deferred_lock);
1321        bio_list_add(&era->deferred_bios, bio);
1322        spin_unlock(&era->deferred_lock);
1323
1324        wake_worker(era);
1325}
1326
1327/*
1328 * Make an rpc call to the worker to change the metadata.
1329 */
1330static int perform_rpc(struct era *era, struct rpc *rpc)
1331{
1332        rpc->result = 0;
1333        init_completion(&rpc->complete);
1334
1335        spin_lock(&era->rpc_lock);
1336        list_add(&rpc->list, &era->rpc_calls);
1337        spin_unlock(&era->rpc_lock);
1338
1339        wake_worker(era);
1340        wait_for_completion(&rpc->complete);
1341
1342        return rpc->result;
1343}
1344
1345static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
1346{
1347        struct rpc rpc;
1348        rpc.fn0 = fn;
1349        rpc.fn1 = NULL;
1350
1351        return perform_rpc(era, &rpc);
1352}
1353
1354static int in_worker1(struct era *era,
1355                      int (*fn)(struct era_metadata *, void *), void *arg)
1356{
1357        struct rpc rpc;
1358        rpc.fn0 = NULL;
1359        rpc.fn1 = fn;
1360        rpc.arg = arg;
1361
1362        return perform_rpc(era, &rpc);
1363}
1364
1365static void start_worker(struct era *era)
1366{
1367        atomic_set(&era->suspended, 0);
1368}
1369
1370static void stop_worker(struct era *era)
1371{
1372        atomic_set(&era->suspended, 1);
1373        flush_workqueue(era->wq);
1374}
1375
1376/*----------------------------------------------------------------
1377 * Target methods
1378 *--------------------------------------------------------------*/
1379static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
1380{
1381        struct request_queue *q = bdev_get_queue(dev->bdev);
1382        return bdi_congested(&q->backing_dev_info, bdi_bits);
1383}
1384
1385static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1386{
1387        struct era *era = container_of(cb, struct era, callbacks);
1388        return dev_is_congested(era->origin_dev, bdi_bits);
1389}
1390
1391static void era_destroy(struct era *era)
1392{
1393        if (era->md)
1394                metadata_close(era->md);
1395
1396        if (era->wq)
1397                destroy_workqueue(era->wq);
1398
1399        if (era->origin_dev)
1400                dm_put_device(era->ti, era->origin_dev);
1401
1402        if (era->metadata_dev)
1403                dm_put_device(era->ti, era->metadata_dev);
1404
1405        kfree(era);
1406}
1407
1408static dm_block_t calc_nr_blocks(struct era *era)
1409{
1410        return dm_sector_div_up(era->ti->len, era->sectors_per_block);
1411}
1412
1413static bool valid_block_size(dm_block_t block_size)
1414{
1415        bool greater_than_zero = block_size > 0;
1416        bool multiple_of_min_block_size = (block_size & (MIN_BLOCK_SIZE - 1)) == 0;
1417
1418        return greater_than_zero && multiple_of_min_block_size;
1419}
1420
1421/*
1422 * <metadata dev> <data dev> <data block size (sectors)>
1423 */
1424static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
1425{
1426        int r;
1427        char dummy;
1428        struct era *era;
1429        struct era_metadata *md;
1430
1431        if (argc != 3) {
1432                ti->error = "Invalid argument count";
1433                return -EINVAL;
1434        }
1435
1436        era = kzalloc(sizeof(*era), GFP_KERNEL);
1437        if (!era) {
1438                ti->error = "Error allocating era structure";
1439                return -ENOMEM;
1440        }
1441
1442        era->ti = ti;
1443
1444        r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev);
1445        if (r) {
1446                ti->error = "Error opening metadata device";
1447                era_destroy(era);
1448                return -EINVAL;
1449        }
1450
1451        r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev);
1452        if (r) {
1453                ti->error = "Error opening data device";
1454                era_destroy(era);
1455                return -EINVAL;
1456        }
1457
1458        r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
1459        if (r != 1) {
1460                ti->error = "Error parsing block size";
1461                era_destroy(era);
1462                return -EINVAL;
1463        }
1464
1465        r = dm_set_target_max_io_len(ti, era->sectors_per_block);
1466        if (r) {
1467                ti->error = "could not set max io len";
1468                era_destroy(era);
1469                return -EINVAL;
1470        }
1471
1472        if (!valid_block_size(era->sectors_per_block)) {
1473                ti->error = "Invalid block size";
1474                era_destroy(era);
1475                return -EINVAL;
1476        }
1477        if (era->sectors_per_block & (era->sectors_per_block - 1))
1478                era->sectors_per_block_shift = -1;
1479        else
1480                era->sectors_per_block_shift = __ffs(era->sectors_per_block);
1481
1482        md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
1483        if (IS_ERR(md)) {
1484                ti->error = "Error reading metadata";
1485                era_destroy(era);
1486                return PTR_ERR(md);
1487        }
1488        era->md = md;
1489
1490        era->nr_blocks = calc_nr_blocks(era);
1491
1492        r = metadata_resize(era->md, &era->nr_blocks);
1493        if (r) {
1494                ti->error = "couldn't resize metadata";
1495                era_destroy(era);
1496                return -ENOMEM;
1497        }
1498
1499        era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1500        if (!era->wq) {
1501                ti->error = "could not create workqueue for metadata object";
1502                era_destroy(era);
1503                return -ENOMEM;
1504        }
1505        INIT_WORK(&era->worker, do_work);
1506
1507        spin_lock_init(&era->deferred_lock);
1508        bio_list_init(&era->deferred_bios);
1509
1510        spin_lock_init(&era->rpc_lock);
1511        INIT_LIST_HEAD(&era->rpc_calls);
1512
1513        ti->private = era;
1514        ti->num_flush_bios = 1;
1515        ti->flush_supported = true;
1516
1517        ti->num_discard_bios = 1;
1518        ti->discards_supported = true;
1519        era->callbacks.congested_fn = era_is_congested;
1520        dm_table_add_target_callbacks(ti->table, &era->callbacks);
1521
1522        return 0;
1523}
1524
1525static void era_dtr(struct dm_target *ti)
1526{
1527        era_destroy(ti->private);
1528}
1529
1530static int era_map(struct dm_target *ti, struct bio *bio)
1531{
1532        struct era *era = ti->private;
1533        dm_block_t block = get_block(era, bio);
1534
1535        /*
1536         * All bios get remapped to the origin device.  We do this now, but
1537         * it may not get issued until later.  Depending on whether the
1538         * block is marked in this era.
1539         */
1540        remap_to_origin(era, bio);
1541
1542        /*
1543         * REQ_FLUSH bios carry no data, so we're not interested in them.
1544         */
1545        if (!(bio->bi_rw & REQ_FLUSH) &&
1546            (bio_data_dir(bio) == WRITE) &&
1547            !metadata_current_marked(era->md, block)) {
1548                defer_bio(era, bio);
1549                return DM_MAPIO_SUBMITTED;
1550        }
1551
1552        return DM_MAPIO_REMAPPED;
1553}
1554
1555static void era_postsuspend(struct dm_target *ti)
1556{
1557        int r;
1558        struct era *era = ti->private;
1559
1560        r = in_worker0(era, metadata_era_archive);
1561        if (r) {
1562                DMERR("%s: couldn't archive current era", __func__);
1563                /* FIXME: fail mode */
1564        }
1565
1566        stop_worker(era);
1567}
1568
1569static int era_preresume(struct dm_target *ti)
1570{
1571        int r;
1572        struct era *era = ti->private;
1573        dm_block_t new_size = calc_nr_blocks(era);
1574
1575        if (era->nr_blocks != new_size) {
1576                r = in_worker1(era, metadata_resize, &new_size);
1577                if (r)
1578                        return r;
1579
1580                era->nr_blocks = new_size;
1581        }
1582
1583        start_worker(era);
1584
1585        r = in_worker0(era, metadata_new_era);
1586        if (r) {
1587                DMERR("%s: metadata_era_rollover failed", __func__);
1588                return r;
1589        }
1590
1591        return 0;
1592}
1593
1594/*
1595 * Status format:
1596 *
1597 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1598 * <current era> <held metadata root | '-'>
1599 */
1600static void era_status(struct dm_target *ti, status_type_t type,
1601                       unsigned status_flags, char *result, unsigned maxlen)
1602{
1603        int r;
1604        struct era *era = ti->private;
1605        ssize_t sz = 0;
1606        struct metadata_stats stats;
1607        char buf[BDEVNAME_SIZE];
1608
1609        switch (type) {
1610        case STATUSTYPE_INFO:
1611                r = in_worker1(era, metadata_get_stats, &stats);
1612                if (r)
1613                        goto err;
1614
1615                DMEMIT("%u %llu/%llu %u",
1616                       (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
1617                       (unsigned long long) stats.used,
1618                       (unsigned long long) stats.total,
1619                       (unsigned) stats.era);
1620
1621                if (stats.snap != SUPERBLOCK_LOCATION)
1622                        DMEMIT(" %llu", stats.snap);
1623                else
1624                        DMEMIT(" -");
1625                break;
1626
1627        case STATUSTYPE_TABLE:
1628                format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
1629                DMEMIT("%s ", buf);
1630                format_dev_t(buf, era->origin_dev->bdev->bd_dev);
1631                DMEMIT("%s %u", buf, era->sectors_per_block);
1632                break;
1633        }
1634
1635        return;
1636
1637err:
1638        DMEMIT("Error");
1639}
1640
1641static int era_message(struct dm_target *ti, unsigned argc, char **argv)
1642{
1643        struct era *era = ti->private;
1644
1645        if (argc != 1) {
1646                DMERR("incorrect number of message arguments");
1647                return -EINVAL;
1648        }
1649
1650        if (!strcasecmp(argv[0], "checkpoint"))
1651                return in_worker0(era, metadata_checkpoint);
1652
1653        if (!strcasecmp(argv[0], "take_metadata_snap"))
1654                return in_worker0(era, metadata_take_snap);
1655
1656        if (!strcasecmp(argv[0], "drop_metadata_snap"))
1657                return in_worker0(era, metadata_drop_snap);
1658
1659        DMERR("unsupported message '%s'", argv[0]);
1660        return -EINVAL;
1661}
1662
1663static sector_t get_dev_size(struct dm_dev *dev)
1664{
1665        return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1666}
1667
1668static int era_iterate_devices(struct dm_target *ti,
1669                               iterate_devices_callout_fn fn, void *data)
1670{
1671        struct era *era = ti->private;
1672        return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
1673}
1674
1675static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
1676{
1677        struct era *era = ti->private;
1678        uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
1679
1680        /*
1681         * If the system-determined stacked limits are compatible with the
1682         * era device's blocksize (io_opt is a factor) do not override them.
1683         */
1684        if (io_opt_sectors < era->sectors_per_block ||
1685            do_div(io_opt_sectors, era->sectors_per_block)) {
1686                blk_limits_io_min(limits, 0);
1687                blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
1688        }
1689}
1690
1691/*----------------------------------------------------------------*/
1692
1693static struct target_type era_target = {
1694        .name = "era",
1695        .version = {1, 0, 0},
1696        .module = THIS_MODULE,
1697        .ctr = era_ctr,
1698        .dtr = era_dtr,
1699        .map = era_map,
1700        .postsuspend = era_postsuspend,
1701        .preresume = era_preresume,
1702        .status = era_status,
1703        .message = era_message,
1704        .iterate_devices = era_iterate_devices,
1705        .io_hints = era_io_hints
1706};
1707
1708static int __init dm_era_init(void)
1709{
1710        int r;
1711
1712        r = dm_register_target(&era_target);
1713        if (r) {
1714                DMERR("era target registration failed: %d", r);
1715                return r;
1716        }
1717
1718        return 0;
1719}
1720
1721static void __exit dm_era_exit(void)
1722{
1723        dm_unregister_target(&era_target);
1724}
1725
1726module_init(dm_era_init);
1727module_exit(dm_era_exit);
1728
1729MODULE_DESCRIPTION(DM_NAME " era target");
1730MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
1731MODULE_LICENSE("GPL");
1732