linux/drivers/md/dm-log.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2003 Sistina Software
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the LGPL.
   6 */
   7
   8#include <linux/init.h>
   9#include <linux/slab.h>
  10#include <linux/module.h>
  11#include <linux/vmalloc.h>
  12#include <linux/dm-io.h>
  13#include <linux/dm-dirty-log.h>
  14
  15#include <linux/device-mapper.h>
  16
  17#define DM_MSG_PREFIX "dirty region log"
  18
  19static LIST_HEAD(_log_types);
  20static DEFINE_SPINLOCK(_lock);
  21
  22static struct dm_dirty_log_type *__find_dirty_log_type(const char *name)
  23{
  24        struct dm_dirty_log_type *log_type;
  25
  26        list_for_each_entry(log_type, &_log_types, list)
  27                if (!strcmp(name, log_type->name))
  28                        return log_type;
  29
  30        return NULL;
  31}
  32
  33static struct dm_dirty_log_type *_get_dirty_log_type(const char *name)
  34{
  35        struct dm_dirty_log_type *log_type;
  36
  37        spin_lock(&_lock);
  38
  39        log_type = __find_dirty_log_type(name);
  40        if (log_type && !try_module_get(log_type->module))
  41                log_type = NULL;
  42
  43        spin_unlock(&_lock);
  44
  45        return log_type;
  46}
  47
  48/*
  49 * get_type
  50 * @type_name
  51 *
  52 * Attempt to retrieve the dm_dirty_log_type by name.  If not already
  53 * available, attempt to load the appropriate module.
  54 *
  55 * Log modules are named "dm-log-" followed by the 'type_name'.
  56 * Modules may contain multiple types.
  57 * This function will first try the module "dm-log-<type_name>",
  58 * then truncate 'type_name' on the last '-' and try again.
  59 *
  60 * For example, if type_name was "clustered-disk", it would search
  61 * 'dm-log-clustered-disk' then 'dm-log-clustered'.
  62 *
  63 * Returns: dirty_log_type* on success, NULL on failure
  64 */
  65static struct dm_dirty_log_type *get_type(const char *type_name)
  66{
  67        char *p, *type_name_dup;
  68        struct dm_dirty_log_type *log_type;
  69
  70        if (!type_name)
  71                return NULL;
  72
  73        log_type = _get_dirty_log_type(type_name);
  74        if (log_type)
  75                return log_type;
  76
  77        type_name_dup = kstrdup(type_name, GFP_KERNEL);
  78        if (!type_name_dup) {
  79                DMWARN("No memory left to attempt log module load for \"%s\"",
  80                       type_name);
  81                return NULL;
  82        }
  83
  84        while (request_module("dm-log-%s", type_name_dup) ||
  85               !(log_type = _get_dirty_log_type(type_name))) {
  86                p = strrchr(type_name_dup, '-');
  87                if (!p)
  88                        break;
  89                p[0] = '\0';
  90        }
  91
  92        if (!log_type)
  93                DMWARN("Module for logging type \"%s\" not found.", type_name);
  94
  95        kfree(type_name_dup);
  96
  97        return log_type;
  98}
  99
 100static void put_type(struct dm_dirty_log_type *type)
 101{
 102        if (!type)
 103                return;
 104
 105        spin_lock(&_lock);
 106        if (!__find_dirty_log_type(type->name))
 107                goto out;
 108
 109        module_put(type->module);
 110
 111out:
 112        spin_unlock(&_lock);
 113}
 114
 115int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
 116{
 117        int r = 0;
 118
 119        spin_lock(&_lock);
 120        if (!__find_dirty_log_type(type->name))
 121                list_add(&type->list, &_log_types);
 122        else
 123                r = -EEXIST;
 124        spin_unlock(&_lock);
 125
 126        return r;
 127}
 128EXPORT_SYMBOL(dm_dirty_log_type_register);
 129
 130int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
 131{
 132        spin_lock(&_lock);
 133
 134        if (!__find_dirty_log_type(type->name)) {
 135                spin_unlock(&_lock);
 136                return -EINVAL;
 137        }
 138
 139        list_del(&type->list);
 140
 141        spin_unlock(&_lock);
 142
 143        return 0;
 144}
 145EXPORT_SYMBOL(dm_dirty_log_type_unregister);
 146
 147struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
 148                        struct dm_target *ti,
 149                        int (*flush_callback_fn)(struct dm_target *ti),
 150                        unsigned int argc, char **argv)
 151{
 152        struct dm_dirty_log_type *type;
 153        struct dm_dirty_log *log;
 154
 155        log = kmalloc(sizeof(*log), GFP_KERNEL);
 156        if (!log)
 157                return NULL;
 158
 159        type = get_type(type_name);
 160        if (!type) {
 161                kfree(log);
 162                return NULL;
 163        }
 164
 165        log->flush_callback_fn = flush_callback_fn;
 166        log->type = type;
 167        if (type->ctr(log, ti, argc, argv)) {
 168                kfree(log);
 169                put_type(type);
 170                return NULL;
 171        }
 172
 173        return log;
 174}
 175EXPORT_SYMBOL(dm_dirty_log_create);
 176
 177void dm_dirty_log_destroy(struct dm_dirty_log *log)
 178{
 179        log->type->dtr(log);
 180        put_type(log->type);
 181        kfree(log);
 182}
 183EXPORT_SYMBOL(dm_dirty_log_destroy);
 184
 185/*-----------------------------------------------------------------
 186 * Persistent and core logs share a lot of their implementation.
 187 * FIXME: need a reload method to be called from a resume
 188 *---------------------------------------------------------------*/
 189/*
 190 * Magic for persistent mirrors: "MiRr"
 191 */
 192#define MIRROR_MAGIC 0x4D695272
 193
 194/*
 195 * The on-disk version of the metadata.
 196 */
 197#define MIRROR_DISK_VERSION 2
 198#define LOG_OFFSET 2
 199
 200struct log_header_disk {
 201        __le32 magic;
 202
 203        /*
 204         * Simple, incrementing version. no backward
 205         * compatibility.
 206         */
 207        __le32 version;
 208        __le64 nr_regions;
 209} __packed;
 210
 211struct log_header_core {
 212        uint32_t magic;
 213        uint32_t version;
 214        uint64_t nr_regions;
 215};
 216
 217struct log_c {
 218        struct dm_target *ti;
 219        int touched_dirtied;
 220        int touched_cleaned;
 221        int flush_failed;
 222        uint32_t region_size;
 223        unsigned int region_count;
 224        region_t sync_count;
 225
 226        unsigned bitset_uint32_count;
 227        uint32_t *clean_bits;
 228        uint32_t *sync_bits;
 229        uint32_t *recovering_bits;      /* FIXME: this seems excessive */
 230
 231        int sync_search;
 232
 233        /* Resync flag */
 234        enum sync {
 235                DEFAULTSYNC,    /* Synchronize if necessary */
 236                NOSYNC,         /* Devices known to be already in sync */
 237                FORCESYNC,      /* Force a sync to happen */
 238        } sync;
 239
 240        struct dm_io_request io_req;
 241
 242        /*
 243         * Disk log fields
 244         */
 245        int log_dev_failed;
 246        int log_dev_flush_failed;
 247        struct dm_dev *log_dev;
 248        struct log_header_core header;
 249
 250        struct dm_io_region header_location;
 251        struct log_header_disk *disk_header;
 252};
 253
 254/*
 255 * The touched member needs to be updated every time we access
 256 * one of the bitsets.
 257 */
 258static inline int log_test_bit(uint32_t *bs, unsigned bit)
 259{
 260        return test_bit_le(bit, bs) ? 1 : 0;
 261}
 262
 263static inline void log_set_bit(struct log_c *l,
 264                               uint32_t *bs, unsigned bit)
 265{
 266        __set_bit_le(bit, bs);
 267        l->touched_cleaned = 1;
 268}
 269
 270static inline void log_clear_bit(struct log_c *l,
 271                                 uint32_t *bs, unsigned bit)
 272{
 273        __clear_bit_le(bit, bs);
 274        l->touched_dirtied = 1;
 275}
 276
 277/*----------------------------------------------------------------
 278 * Header IO
 279 *--------------------------------------------------------------*/
 280static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
 281{
 282        disk->magic = cpu_to_le32(core->magic);
 283        disk->version = cpu_to_le32(core->version);
 284        disk->nr_regions = cpu_to_le64(core->nr_regions);
 285}
 286
 287static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk)
 288{
 289        core->magic = le32_to_cpu(disk->magic);
 290        core->version = le32_to_cpu(disk->version);
 291        core->nr_regions = le64_to_cpu(disk->nr_regions);
 292}
 293
 294static int rw_header(struct log_c *lc, int rw)
 295{
 296        lc->io_req.bi_rw = rw;
 297
 298        return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
 299}
 300
 301static int flush_header(struct log_c *lc)
 302{
 303        struct dm_io_region null_location = {
 304                .bdev = lc->header_location.bdev,
 305                .sector = 0,
 306                .count = 0,
 307        };
 308
 309        lc->io_req.bi_rw = WRITE_FLUSH;
 310
 311        return dm_io(&lc->io_req, 1, &null_location, NULL);
 312}
 313
 314static int read_header(struct log_c *log)
 315{
 316        int r;
 317
 318        r = rw_header(log, READ);
 319        if (r)
 320                return r;
 321
 322        header_from_disk(&log->header, log->disk_header);
 323
 324        /* New log required? */
 325        if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
 326                log->header.magic = MIRROR_MAGIC;
 327                log->header.version = MIRROR_DISK_VERSION;
 328                log->header.nr_regions = 0;
 329        }
 330
 331#ifdef __LITTLE_ENDIAN
 332        if (log->header.version == 1)
 333                log->header.version = 2;
 334#endif
 335
 336        if (log->header.version != MIRROR_DISK_VERSION) {
 337                DMWARN("incompatible disk log version");
 338                return -EINVAL;
 339        }
 340
 341        return 0;
 342}
 343
 344static int _check_region_size(struct dm_target *ti, uint32_t region_size)
 345{
 346        if (region_size < 2 || region_size > ti->len)
 347                return 0;
 348
 349        if (!is_power_of_2(region_size))
 350                return 0;
 351
 352        return 1;
 353}
 354
 355/*----------------------------------------------------------------
 356 * core log constructor/destructor
 357 *
 358 * argv contains region_size followed optionally by [no]sync
 359 *--------------------------------------------------------------*/
 360#define BYTE_SHIFT 3
 361static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
 362                              unsigned int argc, char **argv,
 363                              struct dm_dev *dev)
 364{
 365        enum sync sync = DEFAULTSYNC;
 366
 367        struct log_c *lc;
 368        uint32_t region_size;
 369        unsigned int region_count;
 370        size_t bitset_size, buf_size;
 371        int r;
 372        char dummy;
 373
 374        if (argc < 1 || argc > 2) {
 375                DMWARN("wrong number of arguments to dirty region log");
 376                return -EINVAL;
 377        }
 378
 379        if (argc > 1) {
 380                if (!strcmp(argv[1], "sync"))
 381                        sync = FORCESYNC;
 382                else if (!strcmp(argv[1], "nosync"))
 383                        sync = NOSYNC;
 384                else {
 385                        DMWARN("unrecognised sync argument to "
 386                               "dirty region log: %s", argv[1]);
 387                        return -EINVAL;
 388                }
 389        }
 390
 391        if (sscanf(argv[0], "%u%c", &region_size, &dummy) != 1 ||
 392            !_check_region_size(ti, region_size)) {
 393                DMWARN("invalid region size %s", argv[0]);
 394                return -EINVAL;
 395        }
 396
 397        region_count = dm_sector_div_up(ti->len, region_size);
 398
 399        lc = kmalloc(sizeof(*lc), GFP_KERNEL);
 400        if (!lc) {
 401                DMWARN("couldn't allocate core log");
 402                return -ENOMEM;
 403        }
 404
 405        lc->ti = ti;
 406        lc->touched_dirtied = 0;
 407        lc->touched_cleaned = 0;
 408        lc->flush_failed = 0;
 409        lc->region_size = region_size;
 410        lc->region_count = region_count;
 411        lc->sync = sync;
 412
 413        /*
 414         * Work out how many "unsigned long"s we need to hold the bitset.
 415         */
 416        bitset_size = dm_round_up(region_count,
 417                                  sizeof(*lc->clean_bits) << BYTE_SHIFT);
 418        bitset_size >>= BYTE_SHIFT;
 419
 420        lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
 421
 422        /*
 423         * Disk log?
 424         */
 425        if (!dev) {
 426                lc->clean_bits = vmalloc(bitset_size);
 427                if (!lc->clean_bits) {
 428                        DMWARN("couldn't allocate clean bitset");
 429                        kfree(lc);
 430                        return -ENOMEM;
 431                }
 432                lc->disk_header = NULL;
 433        } else {
 434                lc->log_dev = dev;
 435                lc->log_dev_failed = 0;
 436                lc->log_dev_flush_failed = 0;
 437                lc->header_location.bdev = lc->log_dev->bdev;
 438                lc->header_location.sector = 0;
 439
 440                /*
 441                 * Buffer holds both header and bitset.
 442                 */
 443                buf_size =
 444                    dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size,
 445                                bdev_logical_block_size(lc->header_location.
 446                                                            bdev));
 447
 448                if (buf_size > i_size_read(dev->bdev->bd_inode)) {
 449                        DMWARN("log device %s too small: need %llu bytes",
 450                                dev->name, (unsigned long long)buf_size);
 451                        kfree(lc);
 452                        return -EINVAL;
 453                }
 454
 455                lc->header_location.count = buf_size >> SECTOR_SHIFT;
 456
 457                lc->io_req.mem.type = DM_IO_VMA;
 458                lc->io_req.notify.fn = NULL;
 459                lc->io_req.client = dm_io_client_create();
 460                if (IS_ERR(lc->io_req.client)) {
 461                        r = PTR_ERR(lc->io_req.client);
 462                        DMWARN("couldn't allocate disk io client");
 463                        kfree(lc);
 464                        return r;
 465                }
 466
 467                lc->disk_header = vmalloc(buf_size);
 468                if (!lc->disk_header) {
 469                        DMWARN("couldn't allocate disk log buffer");
 470                        dm_io_client_destroy(lc->io_req.client);
 471                        kfree(lc);
 472                        return -ENOMEM;
 473                }
 474
 475                lc->io_req.mem.ptr.vma = lc->disk_header;
 476                lc->clean_bits = (void *)lc->disk_header +
 477                                 (LOG_OFFSET << SECTOR_SHIFT);
 478        }
 479
 480        memset(lc->clean_bits, -1, bitset_size);
 481
 482        lc->sync_bits = vmalloc(bitset_size);
 483        if (!lc->sync_bits) {
 484                DMWARN("couldn't allocate sync bitset");
 485                if (!dev)
 486                        vfree(lc->clean_bits);
 487                else
 488                        dm_io_client_destroy(lc->io_req.client);
 489                vfree(lc->disk_header);
 490                kfree(lc);
 491                return -ENOMEM;
 492        }
 493        memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
 494        lc->sync_count = (sync == NOSYNC) ? region_count : 0;
 495
 496        lc->recovering_bits = vzalloc(bitset_size);
 497        if (!lc->recovering_bits) {
 498                DMWARN("couldn't allocate sync bitset");
 499                vfree(lc->sync_bits);
 500                if (!dev)
 501                        vfree(lc->clean_bits);
 502                else
 503                        dm_io_client_destroy(lc->io_req.client);
 504                vfree(lc->disk_header);
 505                kfree(lc);
 506                return -ENOMEM;
 507        }
 508        lc->sync_search = 0;
 509        log->context = lc;
 510
 511        return 0;
 512}
 513
 514static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 515                    unsigned int argc, char **argv)
 516{
 517        return create_log_context(log, ti, argc, argv, NULL);
 518}
 519
 520static void destroy_log_context(struct log_c *lc)
 521{
 522        vfree(lc->sync_bits);
 523        vfree(lc->recovering_bits);
 524        kfree(lc);
 525}
 526
 527static void core_dtr(struct dm_dirty_log *log)
 528{
 529        struct log_c *lc = (struct log_c *) log->context;
 530
 531        vfree(lc->clean_bits);
 532        destroy_log_context(lc);
 533}
 534
 535/*----------------------------------------------------------------
 536 * disk log constructor/destructor
 537 *
 538 * argv contains log_device region_size followed optionally by [no]sync
 539 *--------------------------------------------------------------*/
 540static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 541                    unsigned int argc, char **argv)
 542{
 543        int r;
 544        struct dm_dev *dev;
 545
 546        if (argc < 2 || argc > 3) {
 547                DMWARN("wrong number of arguments to disk dirty region log");
 548                return -EINVAL;
 549        }
 550
 551        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
 552        if (r)
 553                return r;
 554
 555        r = create_log_context(log, ti, argc - 1, argv + 1, dev);
 556        if (r) {
 557                dm_put_device(ti, dev);
 558                return r;
 559        }
 560
 561        return 0;
 562}
 563
 564static void disk_dtr(struct dm_dirty_log *log)
 565{
 566        struct log_c *lc = (struct log_c *) log->context;
 567
 568        dm_put_device(lc->ti, lc->log_dev);
 569        vfree(lc->disk_header);
 570        dm_io_client_destroy(lc->io_req.client);
 571        destroy_log_context(lc);
 572}
 573
 574static void fail_log_device(struct log_c *lc)
 575{
 576        if (lc->log_dev_failed)
 577                return;
 578
 579        lc->log_dev_failed = 1;
 580        dm_table_event(lc->ti->table);
 581}
 582
 583static int disk_resume(struct dm_dirty_log *log)
 584{
 585        int r;
 586        unsigned i;
 587        struct log_c *lc = (struct log_c *) log->context;
 588        size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
 589
 590        /* read the disk header */
 591        r = read_header(lc);
 592        if (r) {
 593                DMWARN("%s: Failed to read header on dirty region log device",
 594                       lc->log_dev->name);
 595                fail_log_device(lc);
 596                /*
 597                 * If the log device cannot be read, we must assume
 598                 * all regions are out-of-sync.  If we simply return
 599                 * here, the state will be uninitialized and could
 600                 * lead us to return 'in-sync' status for regions
 601                 * that are actually 'out-of-sync'.
 602                 */
 603                lc->header.nr_regions = 0;
 604        }
 605
 606        /* set or clear any new bits -- device has grown */
 607        if (lc->sync == NOSYNC)
 608                for (i = lc->header.nr_regions; i < lc->region_count; i++)
 609                        /* FIXME: amazingly inefficient */
 610                        log_set_bit(lc, lc->clean_bits, i);
 611        else
 612                for (i = lc->header.nr_regions; i < lc->region_count; i++)
 613                        /* FIXME: amazingly inefficient */
 614                        log_clear_bit(lc, lc->clean_bits, i);
 615
 616        /* clear any old bits -- device has shrunk */
 617        for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
 618                log_clear_bit(lc, lc->clean_bits, i);
 619
 620        /* copy clean across to sync */
 621        memcpy(lc->sync_bits, lc->clean_bits, size);
 622        lc->sync_count = memweight(lc->clean_bits,
 623                                lc->bitset_uint32_count * sizeof(uint32_t));
 624        lc->sync_search = 0;
 625
 626        /* set the correct number of regions in the header */
 627        lc->header.nr_regions = lc->region_count;
 628
 629        header_to_disk(&lc->header, lc->disk_header);
 630
 631        /* write the new header */
 632        r = rw_header(lc, WRITE);
 633        if (!r) {
 634                r = flush_header(lc);
 635                if (r)
 636                        lc->log_dev_flush_failed = 1;
 637        }
 638        if (r) {
 639                DMWARN("%s: Failed to write header on dirty region log device",
 640                       lc->log_dev->name);
 641                fail_log_device(lc);
 642        }
 643
 644        return r;
 645}
 646
 647static uint32_t core_get_region_size(struct dm_dirty_log *log)
 648{
 649        struct log_c *lc = (struct log_c *) log->context;
 650        return lc->region_size;
 651}
 652
 653static int core_resume(struct dm_dirty_log *log)
 654{
 655        struct log_c *lc = (struct log_c *) log->context;
 656        lc->sync_search = 0;
 657        return 0;
 658}
 659
 660static int core_is_clean(struct dm_dirty_log *log, region_t region)
 661{
 662        struct log_c *lc = (struct log_c *) log->context;
 663        return log_test_bit(lc->clean_bits, region);
 664}
 665
 666static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
 667{
 668        struct log_c *lc = (struct log_c *) log->context;
 669        return log_test_bit(lc->sync_bits, region);
 670}
 671
 672static int core_flush(struct dm_dirty_log *log)
 673{
 674        /* no op */
 675        return 0;
 676}
 677
 678static int disk_flush(struct dm_dirty_log *log)
 679{
 680        int r, i;
 681        struct log_c *lc = log->context;
 682
 683        /* only write if the log has changed */
 684        if (!lc->touched_cleaned && !lc->touched_dirtied)
 685                return 0;
 686
 687        if (lc->touched_cleaned && log->flush_callback_fn &&
 688            log->flush_callback_fn(lc->ti)) {
 689                /*
 690                 * At this point it is impossible to determine which
 691                 * regions are clean and which are dirty (without
 692                 * re-reading the log off disk). So mark all of them
 693                 * dirty.
 694                 */
 695                lc->flush_failed = 1;
 696                for (i = 0; i < lc->region_count; i++)
 697                        log_clear_bit(lc, lc->clean_bits, i);
 698        }
 699
 700        r = rw_header(lc, WRITE);
 701        if (r)
 702                fail_log_device(lc);
 703        else {
 704                if (lc->touched_dirtied) {
 705                        r = flush_header(lc);
 706                        if (r) {
 707                                lc->log_dev_flush_failed = 1;
 708                                fail_log_device(lc);
 709                        } else
 710                                lc->touched_dirtied = 0;
 711                }
 712                lc->touched_cleaned = 0;
 713        }
 714
 715        return r;
 716}
 717
 718static void core_mark_region(struct dm_dirty_log *log, region_t region)
 719{
 720        struct log_c *lc = (struct log_c *) log->context;
 721        log_clear_bit(lc, lc->clean_bits, region);
 722}
 723
 724static void core_clear_region(struct dm_dirty_log *log, region_t region)
 725{
 726        struct log_c *lc = (struct log_c *) log->context;
 727        if (likely(!lc->flush_failed))
 728                log_set_bit(lc, lc->clean_bits, region);
 729}
 730
 731static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
 732{
 733        struct log_c *lc = (struct log_c *) log->context;
 734
 735        if (lc->sync_search >= lc->region_count)
 736                return 0;
 737
 738        do {
 739                *region = find_next_zero_bit_le(lc->sync_bits,
 740                                             lc->region_count,
 741                                             lc->sync_search);
 742                lc->sync_search = *region + 1;
 743
 744                if (*region >= lc->region_count)
 745                        return 0;
 746
 747        } while (log_test_bit(lc->recovering_bits, *region));
 748
 749        log_set_bit(lc, lc->recovering_bits, *region);
 750        return 1;
 751}
 752
 753static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
 754                                 int in_sync)
 755{
 756        struct log_c *lc = (struct log_c *) log->context;
 757
 758        log_clear_bit(lc, lc->recovering_bits, region);
 759        if (in_sync) {
 760                log_set_bit(lc, lc->sync_bits, region);
 761                lc->sync_count++;
 762        } else if (log_test_bit(lc->sync_bits, region)) {
 763                lc->sync_count--;
 764                log_clear_bit(lc, lc->sync_bits, region);
 765        }
 766}
 767
 768static region_t core_get_sync_count(struct dm_dirty_log *log)
 769{
 770        struct log_c *lc = (struct log_c *) log->context;
 771
 772        return lc->sync_count;
 773}
 774
 775#define DMEMIT_SYNC \
 776        if (lc->sync != DEFAULTSYNC) \
 777                DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
 778
 779static int core_status(struct dm_dirty_log *log, status_type_t status,
 780                       char *result, unsigned int maxlen)
 781{
 782        int sz = 0;
 783        struct log_c *lc = log->context;
 784
 785        switch(status) {
 786        case STATUSTYPE_INFO:
 787                DMEMIT("1 %s", log->type->name);
 788                break;
 789
 790        case STATUSTYPE_TABLE:
 791                DMEMIT("%s %u %u ", log->type->name,
 792                       lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
 793                DMEMIT_SYNC;
 794        }
 795
 796        return sz;
 797}
 798
 799static int disk_status(struct dm_dirty_log *log, status_type_t status,
 800                       char *result, unsigned int maxlen)
 801{
 802        int sz = 0;
 803        struct log_c *lc = log->context;
 804
 805        switch(status) {
 806        case STATUSTYPE_INFO:
 807                DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
 808                       lc->log_dev_flush_failed ? 'F' :
 809                       lc->log_dev_failed ? 'D' :
 810                       'A');
 811                break;
 812
 813        case STATUSTYPE_TABLE:
 814                DMEMIT("%s %u %s %u ", log->type->name,
 815                       lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
 816                       lc->region_size);
 817                DMEMIT_SYNC;
 818        }
 819
 820        return sz;
 821}
 822
 823static struct dm_dirty_log_type _core_type = {
 824        .name = "core",
 825        .module = THIS_MODULE,
 826        .ctr = core_ctr,
 827        .dtr = core_dtr,
 828        .resume = core_resume,
 829        .get_region_size = core_get_region_size,
 830        .is_clean = core_is_clean,
 831        .in_sync = core_in_sync,
 832        .flush = core_flush,
 833        .mark_region = core_mark_region,
 834        .clear_region = core_clear_region,
 835        .get_resync_work = core_get_resync_work,
 836        .set_region_sync = core_set_region_sync,
 837        .get_sync_count = core_get_sync_count,
 838        .status = core_status,
 839};
 840
 841static struct dm_dirty_log_type _disk_type = {
 842        .name = "disk",
 843        .module = THIS_MODULE,
 844        .ctr = disk_ctr,
 845        .dtr = disk_dtr,
 846        .postsuspend = disk_flush,
 847        .resume = disk_resume,
 848        .get_region_size = core_get_region_size,
 849        .is_clean = core_is_clean,
 850        .in_sync = core_in_sync,
 851        .flush = disk_flush,
 852        .mark_region = core_mark_region,
 853        .clear_region = core_clear_region,
 854        .get_resync_work = core_get_resync_work,
 855        .set_region_sync = core_set_region_sync,
 856        .get_sync_count = core_get_sync_count,
 857        .status = disk_status,
 858};
 859
 860static int __init dm_dirty_log_init(void)
 861{
 862        int r;
 863
 864        r = dm_dirty_log_type_register(&_core_type);
 865        if (r)
 866                DMWARN("couldn't register core log");
 867
 868        r = dm_dirty_log_type_register(&_disk_type);
 869        if (r) {
 870                DMWARN("couldn't register disk type");
 871                dm_dirty_log_type_unregister(&_core_type);
 872        }
 873
 874        return r;
 875}
 876
 877static void __exit dm_dirty_log_exit(void)
 878{
 879        dm_dirty_log_type_unregister(&_disk_type);
 880        dm_dirty_log_type_unregister(&_core_type);
 881}
 882
 883module_init(dm_dirty_log_init);
 884module_exit(dm_dirty_log_exit);
 885
 886MODULE_DESCRIPTION(DM_NAME " dirty region log");
 887MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
 888MODULE_LICENSE("GPL");
 889