linux/drivers/md/dm-cache-metadata.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 Red Hat, Inc.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm-cache-metadata.h"
   8
   9#include "persistent-data/dm-array.h"
  10#include "persistent-data/dm-bitset.h"
  11#include "persistent-data/dm-space-map.h"
  12#include "persistent-data/dm-space-map-disk.h"
  13#include "persistent-data/dm-transaction-manager.h"
  14
  15#include <linux/device-mapper.h>
  16
  17/*----------------------------------------------------------------*/
  18
  19#define DM_MSG_PREFIX   "cache metadata"
  20
  21#define CACHE_SUPERBLOCK_MAGIC 06142003
  22#define CACHE_SUPERBLOCK_LOCATION 0
  23
  24/*
  25 * defines a range of metadata versions that this module can handle.
  26 */
  27#define MIN_CACHE_VERSION 1
  28#define MAX_CACHE_VERSION 2
  29
  30#define CACHE_METADATA_CACHE_SIZE 64
  31
  32/*
  33 *  3 for btree insert +
  34 *  2 for btree lookup used within space map
  35 */
  36#define CACHE_MAX_CONCURRENT_LOCKS 5
  37#define SPACE_MAP_ROOT_SIZE 128
  38
  39enum superblock_flag_bits {
  40        /* for spotting crashes that would invalidate the dirty bitset */
  41        CLEAN_SHUTDOWN,
  42        /* metadata must be checked using the tools */
  43        NEEDS_CHECK,
  44};
  45
  46/*
  47 * Each mapping from cache block -> origin block carries a set of flags.
  48 */
  49enum mapping_bits {
  50        /*
  51         * A valid mapping.  Because we're using an array we clear this
  52         * flag for an non existant mapping.
  53         */
  54        M_VALID = 1,
  55
  56        /*
  57         * The data on the cache is different from that on the origin.
  58         * This flag is only used by metadata format 1.
  59         */
  60        M_DIRTY = 2
  61};
  62
  63struct cache_disk_superblock {
  64        __le32 csum;
  65        __le32 flags;
  66        __le64 blocknr;
  67
  68        __u8 uuid[16];
  69        __le64 magic;
  70        __le32 version;
  71
  72        __u8 policy_name[CACHE_POLICY_NAME_SIZE];
  73        __le32 policy_hint_size;
  74
  75        __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
  76        __le64 mapping_root;
  77        __le64 hint_root;
  78
  79        __le64 discard_root;
  80        __le64 discard_block_size;
  81        __le64 discard_nr_blocks;
  82
  83        __le32 data_block_size;
  84        __le32 metadata_block_size;
  85        __le32 cache_blocks;
  86
  87        __le32 compat_flags;
  88        __le32 compat_ro_flags;
  89        __le32 incompat_flags;
  90
  91        __le32 read_hits;
  92        __le32 read_misses;
  93        __le32 write_hits;
  94        __le32 write_misses;
  95
  96        __le32 policy_version[CACHE_POLICY_VERSION_SIZE];
  97
  98        /*
  99         * Metadata format 2 fields.
 100         */
 101        __le64 dirty_root;
 102} __packed;
 103
 104struct dm_cache_metadata {
 105        atomic_t ref_count;
 106        struct list_head list;
 107
 108        unsigned version;
 109        struct block_device *bdev;
 110        struct dm_block_manager *bm;
 111        struct dm_space_map *metadata_sm;
 112        struct dm_transaction_manager *tm;
 113
 114        struct dm_array_info info;
 115        struct dm_array_info hint_info;
 116        struct dm_disk_bitset discard_info;
 117
 118        struct rw_semaphore root_lock;
 119        unsigned long flags;
 120        dm_block_t root;
 121        dm_block_t hint_root;
 122        dm_block_t discard_root;
 123
 124        sector_t discard_block_size;
 125        dm_dblock_t discard_nr_blocks;
 126
 127        sector_t data_block_size;
 128        dm_cblock_t cache_blocks;
 129        bool changed:1;
 130        bool clean_when_opened:1;
 131
 132        char policy_name[CACHE_POLICY_NAME_SIZE];
 133        unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
 134        size_t policy_hint_size;
 135        struct dm_cache_statistics stats;
 136
 137        /*
 138         * Reading the space map root can fail, so we read it into this
 139         * buffer before the superblock is locked and updated.
 140         */
 141        __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
 142
 143        /*
 144         * Set if a transaction has to be aborted but the attempt to roll
 145         * back to the previous (good) transaction failed.  The only
 146         * metadata operation permissible in this state is the closing of
 147         * the device.
 148         */
 149        bool fail_io:1;
 150
 151        /*
 152         * Metadata format 2 fields.
 153         */
 154        dm_block_t dirty_root;
 155        struct dm_disk_bitset dirty_info;
 156
 157        /*
 158         * These structures are used when loading metadata.  They're too
 159         * big to put on the stack.
 160         */
 161        struct dm_array_cursor mapping_cursor;
 162        struct dm_array_cursor hint_cursor;
 163        struct dm_bitset_cursor dirty_cursor;
 164};
 165
 166/*-------------------------------------------------------------------
 167 * superblock validator
 168 *-----------------------------------------------------------------*/
 169
 170#define SUPERBLOCK_CSUM_XOR 9031977
 171
 172static void sb_prepare_for_write(struct dm_block_validator *v,
 173                                 struct dm_block *b,
 174                                 size_t sb_block_size)
 175{
 176        struct cache_disk_superblock *disk_super = dm_block_data(b);
 177
 178        disk_super->blocknr = cpu_to_le64(dm_block_location(b));
 179        disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
 180                                                      sb_block_size - sizeof(__le32),
 181                                                      SUPERBLOCK_CSUM_XOR));
 182}
 183
 184static int check_metadata_version(struct cache_disk_superblock *disk_super)
 185{
 186        uint32_t metadata_version = le32_to_cpu(disk_super->version);
 187
 188        if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
 189                DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
 190                      metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
 191                return -EINVAL;
 192        }
 193
 194        return 0;
 195}
 196
 197static int sb_check(struct dm_block_validator *v,
 198                    struct dm_block *b,
 199                    size_t sb_block_size)
 200{
 201        struct cache_disk_superblock *disk_super = dm_block_data(b);
 202        __le32 csum_le;
 203
 204        if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
 205                DMERR("sb_check failed: blocknr %llu: wanted %llu",
 206                      le64_to_cpu(disk_super->blocknr),
 207                      (unsigned long long)dm_block_location(b));
 208                return -ENOTBLK;
 209        }
 210
 211        if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
 212                DMERR("sb_check failed: magic %llu: wanted %llu",
 213                      le64_to_cpu(disk_super->magic),
 214                      (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
 215                return -EILSEQ;
 216        }
 217
 218        csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
 219                                             sb_block_size - sizeof(__le32),
 220                                             SUPERBLOCK_CSUM_XOR));
 221        if (csum_le != disk_super->csum) {
 222                DMERR("sb_check failed: csum %u: wanted %u",
 223                      le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
 224                return -EILSEQ;
 225        }
 226
 227        return check_metadata_version(disk_super);
 228}
 229
 230static struct dm_block_validator sb_validator = {
 231        .name = "superblock",
 232        .prepare_for_write = sb_prepare_for_write,
 233        .check = sb_check
 234};
 235
 236/*----------------------------------------------------------------*/
 237
 238static int superblock_read_lock(struct dm_cache_metadata *cmd,
 239                                struct dm_block **sblock)
 240{
 241        return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 242                               &sb_validator, sblock);
 243}
 244
 245static int superblock_lock_zero(struct dm_cache_metadata *cmd,
 246                                struct dm_block **sblock)
 247{
 248        return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 249                                     &sb_validator, sblock);
 250}
 251
 252static int superblock_lock(struct dm_cache_metadata *cmd,
 253                           struct dm_block **sblock)
 254{
 255        return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 256                                &sb_validator, sblock);
 257}
 258
 259/*----------------------------------------------------------------*/
 260
 261static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
 262{
 263        int r;
 264        unsigned i;
 265        struct dm_block *b;
 266        __le64 *data_le, zero = cpu_to_le64(0);
 267        unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
 268
 269        /*
 270         * We can't use a validator here - it may be all zeroes.
 271         */
 272        r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
 273        if (r)
 274                return r;
 275
 276        data_le = dm_block_data(b);
 277        *result = true;
 278        for (i = 0; i < sb_block_size; i++) {
 279                if (data_le[i] != zero) {
 280                        *result = false;
 281                        break;
 282                }
 283        }
 284
 285        dm_bm_unlock(b);
 286
 287        return 0;
 288}
 289
 290static void __setup_mapping_info(struct dm_cache_metadata *cmd)
 291{
 292        struct dm_btree_value_type vt;
 293
 294        vt.context = NULL;
 295        vt.size = sizeof(__le64);
 296        vt.inc = NULL;
 297        vt.dec = NULL;
 298        vt.equal = NULL;
 299        dm_array_info_init(&cmd->info, cmd->tm, &vt);
 300
 301        if (cmd->policy_hint_size) {
 302                vt.size = sizeof(__le32);
 303                dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
 304        }
 305}
 306
 307static int __save_sm_root(struct dm_cache_metadata *cmd)
 308{
 309        int r;
 310        size_t metadata_len;
 311
 312        r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
 313        if (r < 0)
 314                return r;
 315
 316        return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
 317                               metadata_len);
 318}
 319
 320static void __copy_sm_root(struct dm_cache_metadata *cmd,
 321                           struct cache_disk_superblock *disk_super)
 322{
 323        memcpy(&disk_super->metadata_space_map_root,
 324               &cmd->metadata_space_map_root,
 325               sizeof(cmd->metadata_space_map_root));
 326}
 327
 328static bool separate_dirty_bits(struct dm_cache_metadata *cmd)
 329{
 330        return cmd->version >= 2;
 331}
 332
 333static int __write_initial_superblock(struct dm_cache_metadata *cmd)
 334{
 335        int r;
 336        struct dm_block *sblock;
 337        struct cache_disk_superblock *disk_super;
 338        sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
 339
 340        /* FIXME: see if we can lose the max sectors limit */
 341        if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
 342                bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
 343
 344        r = dm_tm_pre_commit(cmd->tm);
 345        if (r < 0)
 346                return r;
 347
 348        /*
 349         * dm_sm_copy_root() can fail.  So we need to do it before we start
 350         * updating the superblock.
 351         */
 352        r = __save_sm_root(cmd);
 353        if (r)
 354                return r;
 355
 356        r = superblock_lock_zero(cmd, &sblock);
 357        if (r)
 358                return r;
 359
 360        disk_super = dm_block_data(sblock);
 361        disk_super->flags = 0;
 362        memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
 363        disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
 364        disk_super->version = cpu_to_le32(cmd->version);
 365        memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
 366        memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
 367        disk_super->policy_hint_size = 0;
 368
 369        __copy_sm_root(cmd, disk_super);
 370
 371        disk_super->mapping_root = cpu_to_le64(cmd->root);
 372        disk_super->hint_root = cpu_to_le64(cmd->hint_root);
 373        disk_super->discard_root = cpu_to_le64(cmd->discard_root);
 374        disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
 375        disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
 376        disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
 377        disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
 378        disk_super->cache_blocks = cpu_to_le32(0);
 379
 380        disk_super->read_hits = cpu_to_le32(0);
 381        disk_super->read_misses = cpu_to_le32(0);
 382        disk_super->write_hits = cpu_to_le32(0);
 383        disk_super->write_misses = cpu_to_le32(0);
 384
 385        if (separate_dirty_bits(cmd))
 386                disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
 387
 388        return dm_tm_commit(cmd->tm, sblock);
 389}
 390
 391static int __format_metadata(struct dm_cache_metadata *cmd)
 392{
 393        int r;
 394
 395        r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 396                                 &cmd->tm, &cmd->metadata_sm);
 397        if (r < 0) {
 398                DMERR("tm_create_with_sm failed");
 399                return r;
 400        }
 401
 402        __setup_mapping_info(cmd);
 403
 404        r = dm_array_empty(&cmd->info, &cmd->root);
 405        if (r < 0)
 406                goto bad;
 407
 408        if (separate_dirty_bits(cmd)) {
 409                dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
 410                r = dm_bitset_empty(&cmd->dirty_info, &cmd->dirty_root);
 411                if (r < 0)
 412                        goto bad;
 413        }
 414
 415        dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
 416        r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
 417        if (r < 0)
 418                goto bad;
 419
 420        cmd->discard_block_size = 0;
 421        cmd->discard_nr_blocks = 0;
 422
 423        r = __write_initial_superblock(cmd);
 424        if (r)
 425                goto bad;
 426
 427        cmd->clean_when_opened = true;
 428        return 0;
 429
 430bad:
 431        dm_tm_destroy(cmd->tm);
 432        dm_sm_destroy(cmd->metadata_sm);
 433
 434        return r;
 435}
 436
 437static int __check_incompat_features(struct cache_disk_superblock *disk_super,
 438                                     struct dm_cache_metadata *cmd)
 439{
 440        uint32_t incompat_flags, features;
 441
 442        incompat_flags = le32_to_cpu(disk_super->incompat_flags);
 443        features = incompat_flags & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
 444        if (features) {
 445                DMERR("could not access metadata due to unsupported optional features (%lx).",
 446                      (unsigned long)features);
 447                return -EINVAL;
 448        }
 449
 450        /*
 451         * Check for read-only metadata to skip the following RDWR checks.
 452         */
 453        if (get_disk_ro(cmd->bdev->bd_disk))
 454                return 0;
 455
 456        features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
 457        if (features) {
 458                DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
 459                      (unsigned long)features);
 460                return -EINVAL;
 461        }
 462
 463        return 0;
 464}
 465
 466static int __open_metadata(struct dm_cache_metadata *cmd)
 467{
 468        int r;
 469        struct dm_block *sblock;
 470        struct cache_disk_superblock *disk_super;
 471        unsigned long sb_flags;
 472
 473        r = superblock_read_lock(cmd, &sblock);
 474        if (r < 0) {
 475                DMERR("couldn't read lock superblock");
 476                return r;
 477        }
 478
 479        disk_super = dm_block_data(sblock);
 480
 481        /* Verify the data block size hasn't changed */
 482        if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
 483                DMERR("changing the data block size (from %u to %llu) is not supported",
 484                      le32_to_cpu(disk_super->data_block_size),
 485                      (unsigned long long)cmd->data_block_size);
 486                r = -EINVAL;
 487                goto bad;
 488        }
 489
 490        r = __check_incompat_features(disk_super, cmd);
 491        if (r < 0)
 492                goto bad;
 493
 494        r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 495                               disk_super->metadata_space_map_root,
 496                               sizeof(disk_super->metadata_space_map_root),
 497                               &cmd->tm, &cmd->metadata_sm);
 498        if (r < 0) {
 499                DMERR("tm_open_with_sm failed");
 500                goto bad;
 501        }
 502
 503        __setup_mapping_info(cmd);
 504        dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
 505        dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
 506        sb_flags = le32_to_cpu(disk_super->flags);
 507        cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
 508        dm_bm_unlock(sblock);
 509
 510        return 0;
 511
 512bad:
 513        dm_bm_unlock(sblock);
 514        return r;
 515}
 516
 517static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
 518                                     bool format_device)
 519{
 520        int r;
 521        bool unformatted = false;
 522
 523        r = __superblock_all_zeroes(cmd->bm, &unformatted);
 524        if (r)
 525                return r;
 526
 527        if (unformatted)
 528                return format_device ? __format_metadata(cmd) : -EPERM;
 529
 530        return __open_metadata(cmd);
 531}
 532
 533static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
 534                                            bool may_format_device)
 535{
 536        int r;
 537        cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
 538                                          CACHE_METADATA_CACHE_SIZE,
 539                                          CACHE_MAX_CONCURRENT_LOCKS);
 540        if (IS_ERR(cmd->bm)) {
 541                DMERR("could not create block manager");
 542                return PTR_ERR(cmd->bm);
 543        }
 544
 545        r = __open_or_format_metadata(cmd, may_format_device);
 546        if (r)
 547                dm_block_manager_destroy(cmd->bm);
 548
 549        return r;
 550}
 551
 552static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
 553{
 554        dm_sm_destroy(cmd->metadata_sm);
 555        dm_tm_destroy(cmd->tm);
 556        dm_block_manager_destroy(cmd->bm);
 557}
 558
 559typedef unsigned long (*flags_mutator)(unsigned long);
 560
 561static void update_flags(struct cache_disk_superblock *disk_super,
 562                         flags_mutator mutator)
 563{
 564        uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
 565        disk_super->flags = cpu_to_le32(sb_flags);
 566}
 567
 568static unsigned long set_clean_shutdown(unsigned long flags)
 569{
 570        set_bit(CLEAN_SHUTDOWN, &flags);
 571        return flags;
 572}
 573
 574static unsigned long clear_clean_shutdown(unsigned long flags)
 575{
 576        clear_bit(CLEAN_SHUTDOWN, &flags);
 577        return flags;
 578}
 579
 580static void read_superblock_fields(struct dm_cache_metadata *cmd,
 581                                   struct cache_disk_superblock *disk_super)
 582{
 583        cmd->version = le32_to_cpu(disk_super->version);
 584        cmd->flags = le32_to_cpu(disk_super->flags);
 585        cmd->root = le64_to_cpu(disk_super->mapping_root);
 586        cmd->hint_root = le64_to_cpu(disk_super->hint_root);
 587        cmd->discard_root = le64_to_cpu(disk_super->discard_root);
 588        cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
 589        cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
 590        cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
 591        cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
 592        strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
 593        cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
 594        cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
 595        cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
 596        cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
 597
 598        cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
 599        cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
 600        cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
 601        cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
 602
 603        if (separate_dirty_bits(cmd))
 604                cmd->dirty_root = le64_to_cpu(disk_super->dirty_root);
 605
 606        cmd->changed = false;
 607}
 608
 609/*
 610 * The mutator updates the superblock flags.
 611 */
 612static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
 613                                     flags_mutator mutator)
 614{
 615        int r;
 616        struct cache_disk_superblock *disk_super;
 617        struct dm_block *sblock;
 618
 619        r = superblock_lock(cmd, &sblock);
 620        if (r)
 621                return r;
 622
 623        disk_super = dm_block_data(sblock);
 624        update_flags(disk_super, mutator);
 625        read_superblock_fields(cmd, disk_super);
 626        dm_bm_unlock(sblock);
 627
 628        return dm_bm_flush(cmd->bm);
 629}
 630
 631static int __begin_transaction(struct dm_cache_metadata *cmd)
 632{
 633        int r;
 634        struct cache_disk_superblock *disk_super;
 635        struct dm_block *sblock;
 636
 637        /*
 638         * We re-read the superblock every time.  Shouldn't need to do this
 639         * really.
 640         */
 641        r = superblock_read_lock(cmd, &sblock);
 642        if (r)
 643                return r;
 644
 645        disk_super = dm_block_data(sblock);
 646        read_superblock_fields(cmd, disk_super);
 647        dm_bm_unlock(sblock);
 648
 649        return 0;
 650}
 651
 652static int __commit_transaction(struct dm_cache_metadata *cmd,
 653                                flags_mutator mutator)
 654{
 655        int r;
 656        struct cache_disk_superblock *disk_super;
 657        struct dm_block *sblock;
 658
 659        /*
 660         * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
 661         */
 662        BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
 663
 664        if (separate_dirty_bits(cmd)) {
 665                r = dm_bitset_flush(&cmd->dirty_info, cmd->dirty_root,
 666                                    &cmd->dirty_root);
 667                if (r)
 668                        return r;
 669        }
 670
 671        r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
 672                            &cmd->discard_root);
 673        if (r)
 674                return r;
 675
 676        r = dm_tm_pre_commit(cmd->tm);
 677        if (r < 0)
 678                return r;
 679
 680        r = __save_sm_root(cmd);
 681        if (r)
 682                return r;
 683
 684        r = superblock_lock(cmd, &sblock);
 685        if (r)
 686                return r;
 687
 688        disk_super = dm_block_data(sblock);
 689
 690        disk_super->flags = cpu_to_le32(cmd->flags);
 691        if (mutator)
 692                update_flags(disk_super, mutator);
 693
 694        disk_super->mapping_root = cpu_to_le64(cmd->root);
 695        if (separate_dirty_bits(cmd))
 696                disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
 697        disk_super->hint_root = cpu_to_le64(cmd->hint_root);
 698        disk_super->discard_root = cpu_to_le64(cmd->discard_root);
 699        disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
 700        disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
 701        disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
 702        strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
 703        disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
 704        disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
 705        disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
 706
 707        disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
 708        disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
 709        disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
 710        disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
 711        __copy_sm_root(cmd, disk_super);
 712
 713        return dm_tm_commit(cmd->tm, sblock);
 714}
 715
 716/*----------------------------------------------------------------*/
 717
 718/*
 719 * The mappings are held in a dm-array that has 64-bit values stored in
 720 * little-endian format.  The index is the cblock, the high 48bits of the
 721 * value are the oblock and the low 16 bit the flags.
 722 */
 723#define FLAGS_MASK ((1 << 16) - 1)
 724
 725static __le64 pack_value(dm_oblock_t block, unsigned flags)
 726{
 727        uint64_t value = from_oblock(block);
 728        value <<= 16;
 729        value = value | (flags & FLAGS_MASK);
 730        return cpu_to_le64(value);
 731}
 732
 733static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
 734{
 735        uint64_t value = le64_to_cpu(value_le);
 736        uint64_t b = value >> 16;
 737        *block = to_oblock(b);
 738        *flags = value & FLAGS_MASK;
 739}
 740
 741/*----------------------------------------------------------------*/
 742
 743static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
 744                                               sector_t data_block_size,
 745                                               bool may_format_device,
 746                                               size_t policy_hint_size,
 747                                               unsigned metadata_version)
 748{
 749        int r;
 750        struct dm_cache_metadata *cmd;
 751
 752        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 753        if (!cmd) {
 754                DMERR("could not allocate metadata struct");
 755                return ERR_PTR(-ENOMEM);
 756        }
 757
 758        cmd->version = metadata_version;
 759        atomic_set(&cmd->ref_count, 1);
 760        init_rwsem(&cmd->root_lock);
 761        cmd->bdev = bdev;
 762        cmd->data_block_size = data_block_size;
 763        cmd->cache_blocks = 0;
 764        cmd->policy_hint_size = policy_hint_size;
 765        cmd->changed = true;
 766        cmd->fail_io = false;
 767
 768        r = __create_persistent_data_objects(cmd, may_format_device);
 769        if (r) {
 770                kfree(cmd);
 771                return ERR_PTR(r);
 772        }
 773
 774        r = __begin_transaction_flags(cmd, clear_clean_shutdown);
 775        if (r < 0) {
 776                dm_cache_metadata_close(cmd);
 777                return ERR_PTR(r);
 778        }
 779
 780        return cmd;
 781}
 782
 783/*
 784 * We keep a little list of ref counted metadata objects to prevent two
 785 * different target instances creating separate bufio instances.  This is
 786 * an issue if a table is reloaded before the suspend.
 787 */
 788static DEFINE_MUTEX(table_lock);
 789static LIST_HEAD(table);
 790
 791static struct dm_cache_metadata *lookup(struct block_device *bdev)
 792{
 793        struct dm_cache_metadata *cmd;
 794
 795        list_for_each_entry(cmd, &table, list)
 796                if (cmd->bdev == bdev) {
 797                        atomic_inc(&cmd->ref_count);
 798                        return cmd;
 799                }
 800
 801        return NULL;
 802}
 803
 804static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
 805                                                sector_t data_block_size,
 806                                                bool may_format_device,
 807                                                size_t policy_hint_size,
 808                                                unsigned metadata_version)
 809{
 810        struct dm_cache_metadata *cmd, *cmd2;
 811
 812        mutex_lock(&table_lock);
 813        cmd = lookup(bdev);
 814        mutex_unlock(&table_lock);
 815
 816        if (cmd)
 817                return cmd;
 818
 819        cmd = metadata_open(bdev, data_block_size, may_format_device,
 820                            policy_hint_size, metadata_version);
 821        if (!IS_ERR(cmd)) {
 822                mutex_lock(&table_lock);
 823                cmd2 = lookup(bdev);
 824                if (cmd2) {
 825                        mutex_unlock(&table_lock);
 826                        __destroy_persistent_data_objects(cmd);
 827                        kfree(cmd);
 828                        return cmd2;
 829                }
 830                list_add(&cmd->list, &table);
 831                mutex_unlock(&table_lock);
 832        }
 833
 834        return cmd;
 835}
 836
 837static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
 838{
 839        if (cmd->data_block_size != data_block_size) {
 840                DMERR("data_block_size (%llu) different from that in metadata (%llu)",
 841                      (unsigned long long) data_block_size,
 842                      (unsigned long long) cmd->data_block_size);
 843                return false;
 844        }
 845
 846        return true;
 847}
 848
 849struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
 850                                                 sector_t data_block_size,
 851                                                 bool may_format_device,
 852                                                 size_t policy_hint_size,
 853                                                 unsigned metadata_version)
 854{
 855        struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
 856                                                       policy_hint_size, metadata_version);
 857
 858        if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
 859                dm_cache_metadata_close(cmd);
 860                return ERR_PTR(-EINVAL);
 861        }
 862
 863        return cmd;
 864}
 865
 866void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
 867{
 868        if (atomic_dec_and_test(&cmd->ref_count)) {
 869                mutex_lock(&table_lock);
 870                list_del(&cmd->list);
 871                mutex_unlock(&table_lock);
 872
 873                if (!cmd->fail_io)
 874                        __destroy_persistent_data_objects(cmd);
 875                kfree(cmd);
 876        }
 877}
 878
 879/*
 880 * Checks that the given cache block is either unmapped or clean.
 881 */
 882static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t b,
 883                                      bool *result)
 884{
 885        int r;
 886        __le64 value;
 887        dm_oblock_t ob;
 888        unsigned flags;
 889
 890        r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
 891        if (r)
 892                return r;
 893
 894        unpack_value(value, &ob, &flags);
 895        *result = !((flags & M_VALID) && (flags & M_DIRTY));
 896
 897        return 0;
 898}
 899
 900static int blocks_are_clean_combined_dirty(struct dm_cache_metadata *cmd,
 901                                           dm_cblock_t begin, dm_cblock_t end,
 902                                           bool *result)
 903{
 904        int r;
 905        *result = true;
 906
 907        while (begin != end) {
 908                r = block_clean_combined_dirty(cmd, begin, result);
 909                if (r) {
 910                        DMERR("block_clean_combined_dirty failed");
 911                        return r;
 912                }
 913
 914                if (!*result) {
 915                        DMERR("cache block %llu is dirty",
 916                              (unsigned long long) from_cblock(begin));
 917                        return 0;
 918                }
 919
 920                begin = to_cblock(from_cblock(begin) + 1);
 921        }
 922
 923        return 0;
 924}
 925
 926static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
 927                                           dm_cblock_t begin, dm_cblock_t end,
 928                                           bool *result)
 929{
 930        int r;
 931        bool dirty_flag;
 932        *result = true;
 933
 934        r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
 935                                   from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
 936        if (r) {
 937                DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
 938                return r;
 939        }
 940
 941        r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin));
 942        if (r) {
 943                DMERR("%s: dm_bitset_cursor_skip for dirty failed", __func__);
 944                dm_bitset_cursor_end(&cmd->dirty_cursor);
 945                return r;
 946        }
 947
 948        while (begin != end) {
 949                /*
 950                 * We assume that unmapped blocks have their dirty bit
 951                 * cleared.
 952                 */
 953                dirty_flag = dm_bitset_cursor_get_value(&cmd->dirty_cursor);
 954                if (dirty_flag) {
 955                        DMERR("%s: cache block %llu is dirty", __func__,
 956                              (unsigned long long) from_cblock(begin));
 957                        dm_bitset_cursor_end(&cmd->dirty_cursor);
 958                        *result = false;
 959                        return 0;
 960                }
 961
 962                begin = to_cblock(from_cblock(begin) + 1);
 963                if (begin == end)
 964                        break;
 965
 966                r = dm_bitset_cursor_next(&cmd->dirty_cursor);
 967                if (r) {
 968                        DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
 969                        dm_bitset_cursor_end(&cmd->dirty_cursor);
 970                        return r;
 971                }
 972        }
 973
 974        dm_bitset_cursor_end(&cmd->dirty_cursor);
 975
 976        return 0;
 977}
 978
 979static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
 980                                        dm_cblock_t begin, dm_cblock_t end,
 981                                        bool *result)
 982{
 983        if (separate_dirty_bits(cmd))
 984                return blocks_are_clean_separate_dirty(cmd, begin, end, result);
 985        else
 986                return blocks_are_clean_combined_dirty(cmd, begin, end, result);
 987}
 988
 989static bool cmd_write_lock(struct dm_cache_metadata *cmd)
 990{
 991        down_write(&cmd->root_lock);
 992        if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
 993                up_write(&cmd->root_lock);
 994                return false;
 995        }
 996        return true;
 997}
 998
 999#define WRITE_LOCK(cmd)                         \
1000        do {                                    \
1001                if (!cmd_write_lock((cmd)))     \
1002                        return -EINVAL;         \
1003        } while(0)
1004
1005#define WRITE_LOCK_VOID(cmd)                    \
1006        do {                                    \
1007                if (!cmd_write_lock((cmd)))     \
1008                        return;                 \
1009        } while(0)
1010
1011#define WRITE_UNLOCK(cmd) \
1012        up_write(&(cmd)->root_lock)
1013
1014static bool cmd_read_lock(struct dm_cache_metadata *cmd)
1015{
1016        down_read(&cmd->root_lock);
1017        if (cmd->fail_io) {
1018                up_read(&cmd->root_lock);
1019                return false;
1020        }
1021        return true;
1022}
1023
1024#define READ_LOCK(cmd)                          \
1025        do {                                    \
1026                if (!cmd_read_lock((cmd)))      \
1027                        return -EINVAL;         \
1028        } while(0)
1029
1030#define READ_LOCK_VOID(cmd)                     \
1031        do {                                    \
1032                if (!cmd_read_lock((cmd)))      \
1033                        return;                 \
1034        } while(0)
1035
1036#define READ_UNLOCK(cmd) \
1037        up_read(&(cmd)->root_lock)
1038
1039int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
1040{
1041        int r;
1042        bool clean;
1043        __le64 null_mapping = pack_value(0, 0);
1044
1045        WRITE_LOCK(cmd);
1046        __dm_bless_for_disk(&null_mapping);
1047
1048        if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
1049                r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
1050                if (r) {
1051                        __dm_unbless_for_disk(&null_mapping);
1052                        goto out;
1053                }
1054
1055                if (!clean) {
1056                        DMERR("unable to shrink cache due to dirty blocks");
1057                        r = -EINVAL;
1058                        __dm_unbless_for_disk(&null_mapping);
1059                        goto out;
1060                }
1061        }
1062
1063        r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
1064                            from_cblock(new_cache_size),
1065                            &null_mapping, &cmd->root);
1066        if (r)
1067                goto out;
1068
1069        if (separate_dirty_bits(cmd)) {
1070                r = dm_bitset_resize(&cmd->dirty_info, cmd->dirty_root,
1071                                     from_cblock(cmd->cache_blocks), from_cblock(new_cache_size),
1072                                     false, &cmd->dirty_root);
1073                if (r)
1074                        goto out;
1075        }
1076
1077        cmd->cache_blocks = new_cache_size;
1078        cmd->changed = true;
1079
1080out:
1081        WRITE_UNLOCK(cmd);
1082
1083        return r;
1084}
1085
1086int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
1087                                   sector_t discard_block_size,
1088                                   dm_dblock_t new_nr_entries)
1089{
1090        int r;
1091
1092        WRITE_LOCK(cmd);
1093        r = dm_bitset_resize(&cmd->discard_info,
1094                             cmd->discard_root,
1095                             from_dblock(cmd->discard_nr_blocks),
1096                             from_dblock(new_nr_entries),
1097                             false, &cmd->discard_root);
1098        if (!r) {
1099                cmd->discard_block_size = discard_block_size;
1100                cmd->discard_nr_blocks = new_nr_entries;
1101        }
1102
1103        cmd->changed = true;
1104        WRITE_UNLOCK(cmd);
1105
1106        return r;
1107}
1108
1109static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1110{
1111        return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
1112                                 from_dblock(b), &cmd->discard_root);
1113}
1114
1115static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1116{
1117        return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
1118                                   from_dblock(b), &cmd->discard_root);
1119}
1120
1121static int __discard(struct dm_cache_metadata *cmd,
1122                     dm_dblock_t dblock, bool discard)
1123{
1124        int r;
1125
1126        r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
1127        if (r)
1128                return r;
1129
1130        cmd->changed = true;
1131        return 0;
1132}
1133
1134int dm_cache_set_discard(struct dm_cache_metadata *cmd,
1135                         dm_dblock_t dblock, bool discard)
1136{
1137        int r;
1138
1139        WRITE_LOCK(cmd);
1140        r = __discard(cmd, dblock, discard);
1141        WRITE_UNLOCK(cmd);
1142
1143        return r;
1144}
1145
1146static int __load_discards(struct dm_cache_metadata *cmd,
1147                           load_discard_fn fn, void *context)
1148{
1149        int r = 0;
1150        uint32_t b;
1151        struct dm_bitset_cursor c;
1152
1153        if (from_dblock(cmd->discard_nr_blocks) == 0)
1154                /* nothing to do */
1155                return 0;
1156
1157        if (cmd->clean_when_opened) {
1158                r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root, &cmd->discard_root);
1159                if (r)
1160                        return r;
1161
1162                r = dm_bitset_cursor_begin(&cmd->discard_info, cmd->discard_root,
1163                                           from_dblock(cmd->discard_nr_blocks), &c);
1164                if (r)
1165                        return r;
1166
1167                for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1168                        r = fn(context, cmd->discard_block_size, to_dblock(b),
1169                               dm_bitset_cursor_get_value(&c));
1170                        if (r)
1171                                break;
1172                }
1173
1174                dm_bitset_cursor_end(&c);
1175
1176        } else {
1177                for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1178                        r = fn(context, cmd->discard_block_size, to_dblock(b), false);
1179                        if (r)
1180                                return r;
1181                }
1182        }
1183
1184        return r;
1185}
1186
1187int dm_cache_load_discards(struct dm_cache_metadata *cmd,
1188                           load_discard_fn fn, void *context)
1189{
1190        int r;
1191
1192        READ_LOCK(cmd);
1193        r = __load_discards(cmd, fn, context);
1194        READ_UNLOCK(cmd);
1195
1196        return r;
1197}
1198
1199int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
1200{
1201        READ_LOCK(cmd);
1202        *result = cmd->cache_blocks;
1203        READ_UNLOCK(cmd);
1204
1205        return 0;
1206}
1207
1208static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1209{
1210        int r;
1211        __le64 value = pack_value(0, 0);
1212
1213        __dm_bless_for_disk(&value);
1214        r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1215                               &value, &cmd->root);
1216        if (r)
1217                return r;
1218
1219        cmd->changed = true;
1220        return 0;
1221}
1222
1223int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1224{
1225        int r;
1226
1227        WRITE_LOCK(cmd);
1228        r = __remove(cmd, cblock);
1229        WRITE_UNLOCK(cmd);
1230
1231        return r;
1232}
1233
1234static int __insert(struct dm_cache_metadata *cmd,
1235                    dm_cblock_t cblock, dm_oblock_t oblock)
1236{
1237        int r;
1238        __le64 value = pack_value(oblock, M_VALID);
1239        __dm_bless_for_disk(&value);
1240
1241        r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1242                               &value, &cmd->root);
1243        if (r)
1244                return r;
1245
1246        cmd->changed = true;
1247        return 0;
1248}
1249
1250int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
1251                            dm_cblock_t cblock, dm_oblock_t oblock)
1252{
1253        int r;
1254
1255        WRITE_LOCK(cmd);
1256        r = __insert(cmd, cblock, oblock);
1257        WRITE_UNLOCK(cmd);
1258
1259        return r;
1260}
1261
1262struct thunk {
1263        load_mapping_fn fn;
1264        void *context;
1265
1266        struct dm_cache_metadata *cmd;
1267        bool respect_dirty_flags;
1268        bool hints_valid;
1269};
1270
1271static bool policy_unchanged(struct dm_cache_metadata *cmd,
1272                             struct dm_cache_policy *policy)
1273{
1274        const char *policy_name = dm_cache_policy_get_name(policy);
1275        const unsigned *policy_version = dm_cache_policy_get_version(policy);
1276        size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
1277
1278        /*
1279         * Ensure policy names match.
1280         */
1281        if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
1282                return false;
1283
1284        /*
1285         * Ensure policy major versions match.
1286         */
1287        if (cmd->policy_version[0] != policy_version[0])
1288                return false;
1289
1290        /*
1291         * Ensure policy hint sizes match.
1292         */
1293        if (cmd->policy_hint_size != policy_hint_size)
1294                return false;
1295
1296        return true;
1297}
1298
1299static bool hints_array_initialized(struct dm_cache_metadata *cmd)
1300{
1301        return cmd->hint_root && cmd->policy_hint_size;
1302}
1303
1304static bool hints_array_available(struct dm_cache_metadata *cmd,
1305                                  struct dm_cache_policy *policy)
1306{
1307        return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
1308                hints_array_initialized(cmd);
1309}
1310
1311static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1312                             uint64_t cb, bool hints_valid,
1313                             struct dm_array_cursor *mapping_cursor,
1314                             struct dm_array_cursor *hint_cursor,
1315                             load_mapping_fn fn, void *context)
1316{
1317        int r = 0;
1318
1319        __le64 mapping;
1320        __le32 hint = 0;
1321
1322        __le64 *mapping_value_le;
1323        __le32 *hint_value_le;
1324
1325        dm_oblock_t oblock;
1326        unsigned flags;
1327
1328        dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1329        memcpy(&mapping, mapping_value_le, sizeof(mapping));
1330        unpack_value(mapping, &oblock, &flags);
1331
1332        if (flags & M_VALID) {
1333                if (hints_valid) {
1334                        dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1335                        memcpy(&hint, hint_value_le, sizeof(hint));
1336                }
1337
1338                r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
1339                       le32_to_cpu(hint), hints_valid);
1340                if (r) {
1341                        DMERR("policy couldn't load cache block %llu",
1342                              (unsigned long long) from_cblock(to_cblock(cb)));
1343                }
1344        }
1345
1346        return r;
1347}
1348
1349static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1350                             uint64_t cb, bool hints_valid,
1351                             struct dm_array_cursor *mapping_cursor,
1352                             struct dm_array_cursor *hint_cursor,
1353                             struct dm_bitset_cursor *dirty_cursor,
1354                             load_mapping_fn fn, void *context)
1355{
1356        int r = 0;
1357
1358        __le64 mapping;
1359        __le32 hint = 0;
1360
1361        __le64 *mapping_value_le;
1362        __le32 *hint_value_le;
1363
1364        dm_oblock_t oblock;
1365        unsigned flags;
1366        bool dirty;
1367
1368        dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1369        memcpy(&mapping, mapping_value_le, sizeof(mapping));
1370        unpack_value(mapping, &oblock, &flags);
1371
1372        if (flags & M_VALID) {
1373                if (hints_valid) {
1374                        dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1375                        memcpy(&hint, hint_value_le, sizeof(hint));
1376                }
1377
1378                dirty = dm_bitset_cursor_get_value(dirty_cursor);
1379                r = fn(context, oblock, to_cblock(cb), dirty,
1380                       le32_to_cpu(hint), hints_valid);
1381                if (r) {
1382                        DMERR("policy couldn't load cache block %llu",
1383                              (unsigned long long) from_cblock(to_cblock(cb)));
1384                }
1385        }
1386
1387        return r;
1388}
1389
1390static int __load_mappings(struct dm_cache_metadata *cmd,
1391                           struct dm_cache_policy *policy,
1392                           load_mapping_fn fn, void *context)
1393{
1394        int r;
1395        uint64_t cb;
1396
1397        bool hints_valid = hints_array_available(cmd, policy);
1398
1399        if (from_cblock(cmd->cache_blocks) == 0)
1400                /* Nothing to do */
1401                return 0;
1402
1403        r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
1404        if (r)
1405                return r;
1406
1407        if (hints_valid) {
1408                r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
1409                if (r) {
1410                        dm_array_cursor_end(&cmd->mapping_cursor);
1411                        return r;
1412                }
1413        }
1414
1415        if (separate_dirty_bits(cmd)) {
1416                r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
1417                                           from_cblock(cmd->cache_blocks),
1418                                           &cmd->dirty_cursor);
1419                if (r) {
1420                        dm_array_cursor_end(&cmd->hint_cursor);
1421                        dm_array_cursor_end(&cmd->mapping_cursor);
1422                        return r;
1423                }
1424        }
1425
1426        for (cb = 0; ; cb++) {
1427                if (separate_dirty_bits(cmd))
1428                        r = __load_mapping_v2(cmd, cb, hints_valid,
1429                                              &cmd->mapping_cursor,
1430                                              &cmd->hint_cursor,
1431                                              &cmd->dirty_cursor,
1432                                              fn, context);
1433                else
1434                        r = __load_mapping_v1(cmd, cb, hints_valid,
1435                                              &cmd->mapping_cursor, &cmd->hint_cursor,
1436                                              fn, context);
1437                if (r)
1438                        goto out;
1439
1440                /*
1441                 * We need to break out before we move the cursors.
1442                 */
1443                if (cb >= (from_cblock(cmd->cache_blocks) - 1))
1444                        break;
1445
1446                r = dm_array_cursor_next(&cmd->mapping_cursor);
1447                if (r) {
1448                        DMERR("dm_array_cursor_next for mapping failed");
1449                        goto out;
1450                }
1451
1452                if (hints_valid) {
1453                        r = dm_array_cursor_next(&cmd->hint_cursor);
1454                        if (r) {
1455                                DMERR("dm_array_cursor_next for hint failed");
1456                                goto out;
1457                        }
1458                }
1459
1460                if (separate_dirty_bits(cmd)) {
1461                        r = dm_bitset_cursor_next(&cmd->dirty_cursor);
1462                        if (r) {
1463                                DMERR("dm_bitset_cursor_next for dirty failed");
1464                                goto out;
1465                        }
1466                }
1467        }
1468out:
1469        dm_array_cursor_end(&cmd->mapping_cursor);
1470        if (hints_valid)
1471                dm_array_cursor_end(&cmd->hint_cursor);
1472
1473        if (separate_dirty_bits(cmd))
1474                dm_bitset_cursor_end(&cmd->dirty_cursor);
1475
1476        return r;
1477}
1478
1479int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
1480                           struct dm_cache_policy *policy,
1481                           load_mapping_fn fn, void *context)
1482{
1483        int r;
1484
1485        READ_LOCK(cmd);
1486        r = __load_mappings(cmd, policy, fn, context);
1487        READ_UNLOCK(cmd);
1488
1489        return r;
1490}
1491
1492static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
1493{
1494        int r = 0;
1495        __le64 value;
1496        dm_oblock_t oblock;
1497        unsigned flags;
1498
1499        memcpy(&value, leaf, sizeof(value));
1500        unpack_value(value, &oblock, &flags);
1501
1502        return r;
1503}
1504
1505static int __dump_mappings(struct dm_cache_metadata *cmd)
1506{
1507        return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
1508}
1509
1510void dm_cache_dump(struct dm_cache_metadata *cmd)
1511{
1512        READ_LOCK_VOID(cmd);
1513        __dump_mappings(cmd);
1514        READ_UNLOCK(cmd);
1515}
1516
1517int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
1518{
1519        int r;
1520
1521        READ_LOCK(cmd);
1522        r = cmd->changed;
1523        READ_UNLOCK(cmd);
1524
1525        return r;
1526}
1527
1528static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
1529{
1530        int r;
1531        unsigned flags;
1532        dm_oblock_t oblock;
1533        __le64 value;
1534
1535        r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
1536        if (r)
1537                return r;
1538
1539        unpack_value(value, &oblock, &flags);
1540
1541        if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
1542                /* nothing to be done */
1543                return 0;
1544
1545        value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
1546        __dm_bless_for_disk(&value);
1547
1548        r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1549                               &value, &cmd->root);
1550        if (r)
1551                return r;
1552
1553        cmd->changed = true;
1554        return 0;
1555
1556}
1557
1558static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
1559{
1560        int r;
1561        unsigned i;
1562        for (i = 0; i < nr_bits; i++) {
1563                r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
1564                if (r)
1565                        return r;
1566        }
1567
1568        return 0;
1569}
1570
1571static int is_dirty_callback(uint32_t index, bool *value, void *context)
1572{
1573        unsigned long *bits = context;
1574        *value = test_bit(index, bits);
1575        return 0;
1576}
1577
1578static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
1579{
1580        int r = 0;
1581
1582        /* nr_bits is really just a sanity check */
1583        if (nr_bits != from_cblock(cmd->cache_blocks)) {
1584                DMERR("dirty bitset is wrong size");
1585                return -EINVAL;
1586        }
1587
1588        r = dm_bitset_del(&cmd->dirty_info, cmd->dirty_root);
1589        if (r)
1590                return r;
1591
1592        cmd->changed = true;
1593        return dm_bitset_new(&cmd->dirty_info, &cmd->dirty_root, nr_bits, is_dirty_callback, bits);
1594}
1595
1596int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
1597                            unsigned nr_bits,
1598                            unsigned long *bits)
1599{
1600        int r;
1601
1602        WRITE_LOCK(cmd);
1603        if (separate_dirty_bits(cmd))
1604                r = __set_dirty_bits_v2(cmd, nr_bits, bits);
1605        else
1606                r = __set_dirty_bits_v1(cmd, nr_bits, bits);
1607        WRITE_UNLOCK(cmd);
1608
1609        return r;
1610}
1611
1612void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
1613                                 struct dm_cache_statistics *stats)
1614{
1615        READ_LOCK_VOID(cmd);
1616        *stats = cmd->stats;
1617        READ_UNLOCK(cmd);
1618}
1619
1620void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
1621                                 struct dm_cache_statistics *stats)
1622{
1623        WRITE_LOCK_VOID(cmd);
1624        cmd->stats = *stats;
1625        WRITE_UNLOCK(cmd);
1626}
1627
1628int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
1629{
1630        int r = -EINVAL;
1631        flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
1632                                 clear_clean_shutdown);
1633
1634        WRITE_LOCK(cmd);
1635        if (cmd->fail_io)
1636                goto out;
1637
1638        r = __commit_transaction(cmd, mutator);
1639        if (r)
1640                goto out;
1641
1642        r = __begin_transaction(cmd);
1643out:
1644        WRITE_UNLOCK(cmd);
1645        return r;
1646}
1647
1648int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
1649                                           dm_block_t *result)
1650{
1651        int r = -EINVAL;
1652
1653        READ_LOCK(cmd);
1654        if (!cmd->fail_io)
1655                r = dm_sm_get_nr_free(cmd->metadata_sm, result);
1656        READ_UNLOCK(cmd);
1657
1658        return r;
1659}
1660
1661int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
1662                                   dm_block_t *result)
1663{
1664        int r = -EINVAL;
1665
1666        READ_LOCK(cmd);
1667        if (!cmd->fail_io)
1668                r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
1669        READ_UNLOCK(cmd);
1670
1671        return r;
1672}
1673
1674/*----------------------------------------------------------------*/
1675
1676static int get_hint(uint32_t index, void *value_le, void *context)
1677{
1678        uint32_t value;
1679        struct dm_cache_policy *policy = context;
1680
1681        value = policy_get_hint(policy, to_cblock(index));
1682        *((__le32 *) value_le) = cpu_to_le32(value);
1683
1684        return 0;
1685}
1686
1687/*
1688 * It's quicker to always delete the hint array, and recreate with
1689 * dm_array_new().
1690 */
1691static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1692{
1693        int r;
1694        size_t hint_size;
1695        const char *policy_name = dm_cache_policy_get_name(policy);
1696        const unsigned *policy_version = dm_cache_policy_get_version(policy);
1697
1698        if (!policy_name[0] ||
1699            (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
1700                return -EINVAL;
1701
1702        strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
1703        memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
1704
1705        hint_size = dm_cache_policy_get_hint_size(policy);
1706        if (!hint_size)
1707                return 0; /* short-circuit hints initialization */
1708        cmd->policy_hint_size = hint_size;
1709
1710        if (cmd->hint_root) {
1711                r = dm_array_del(&cmd->hint_info, cmd->hint_root);
1712                if (r)
1713                        return r;
1714        }
1715
1716        return dm_array_new(&cmd->hint_info, &cmd->hint_root,
1717                            from_cblock(cmd->cache_blocks),
1718                            get_hint, policy);
1719}
1720
1721int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1722{
1723        int r;
1724
1725        WRITE_LOCK(cmd);
1726        r = write_hints(cmd, policy);
1727        WRITE_UNLOCK(cmd);
1728
1729        return r;
1730}
1731
1732int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
1733{
1734        int r;
1735
1736        READ_LOCK(cmd);
1737        r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
1738        READ_UNLOCK(cmd);
1739
1740        return r;
1741}
1742
1743void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
1744{
1745        WRITE_LOCK_VOID(cmd);
1746        dm_bm_set_read_only(cmd->bm);
1747        WRITE_UNLOCK(cmd);
1748}
1749
1750void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
1751{
1752        WRITE_LOCK_VOID(cmd);
1753        dm_bm_set_read_write(cmd->bm);
1754        WRITE_UNLOCK(cmd);
1755}
1756
1757int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
1758{
1759        int r;
1760        struct dm_block *sblock;
1761        struct cache_disk_superblock *disk_super;
1762
1763        WRITE_LOCK(cmd);
1764        set_bit(NEEDS_CHECK, &cmd->flags);
1765
1766        r = superblock_lock(cmd, &sblock);
1767        if (r) {
1768                DMERR("couldn't read superblock");
1769                goto out;
1770        }
1771
1772        disk_super = dm_block_data(sblock);
1773        disk_super->flags = cpu_to_le32(cmd->flags);
1774
1775        dm_bm_unlock(sblock);
1776
1777out:
1778        WRITE_UNLOCK(cmd);
1779        return r;
1780}
1781
1782int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
1783{
1784        READ_LOCK(cmd);
1785        *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
1786        READ_UNLOCK(cmd);
1787
1788        return 0;
1789}
1790
1791int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
1792{
1793        int r;
1794
1795        WRITE_LOCK(cmd);
1796        __destroy_persistent_data_objects(cmd);
1797        r = __create_persistent_data_objects(cmd, false);
1798        if (r)
1799                cmd->fail_io = true;
1800        WRITE_UNLOCK(cmd);
1801
1802        return r;
1803}
1804