linux/drivers/md/dm-log.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2003 Sistina Software
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the LGPL.
   6 */
   7
   8#include <linux/init.h>
   9#include <linux/slab.h>
  10#include <linux/module.h>
  11#include <linux/vmalloc.h>
  12#include <linux/dm-io.h>
  13#include <linux/dm-dirty-log.h>
  14
  15#include <linux/device-mapper.h>
  16
  17#define DM_MSG_PREFIX "dirty region log"
  18
  19static LIST_HEAD(_log_types);
  20static DEFINE_SPINLOCK(_lock);
  21
  22static struct dm_dirty_log_type *__find_dirty_log_type(const char *name)
  23{
  24        struct dm_dirty_log_type *log_type;
  25
  26        list_for_each_entry(log_type, &_log_types, list)
  27                if (!strcmp(name, log_type->name))
  28                        return log_type;
  29
  30        return NULL;
  31}
  32
  33static struct dm_dirty_log_type *_get_dirty_log_type(const char *name)
  34{
  35        struct dm_dirty_log_type *log_type;
  36
  37        spin_lock(&_lock);
  38
  39        log_type = __find_dirty_log_type(name);
  40        if (log_type && !try_module_get(log_type->module))
  41                log_type = NULL;
  42
  43        spin_unlock(&_lock);
  44
  45        return log_type;
  46}
  47
  48/*
  49 * get_type
  50 * @type_name
  51 *
  52 * Attempt to retrieve the dm_dirty_log_type by name.  If not already
  53 * available, attempt to load the appropriate module.
  54 *
  55 * Log modules are named "dm-log-" followed by the 'type_name'.
  56 * Modules may contain multiple types.
  57 * This function will first try the module "dm-log-<type_name>",
  58 * then truncate 'type_name' on the last '-' and try again.
  59 *
  60 * For example, if type_name was "clustered-disk", it would search
  61 * 'dm-log-clustered-disk' then 'dm-log-clustered'.
  62 *
  63 * Returns: dirty_log_type* on success, NULL on failure
  64 */
  65static struct dm_dirty_log_type *get_type(const char *type_name)
  66{
  67        char *p, *type_name_dup;
  68        struct dm_dirty_log_type *log_type;
  69
  70        if (!type_name)
  71                return NULL;
  72
  73        log_type = _get_dirty_log_type(type_name);
  74        if (log_type)
  75                return log_type;
  76
  77        type_name_dup = kstrdup(type_name, GFP_KERNEL);
  78        if (!type_name_dup) {
  79                DMWARN("No memory left to attempt log module load for \"%s\"",
  80                       type_name);
  81                return NULL;
  82        }
  83
  84        while (request_module("dm-log-%s", type_name_dup) ||
  85               !(log_type = _get_dirty_log_type(type_name))) {
  86                p = strrchr(type_name_dup, '-');
  87                if (!p)
  88                        break;
  89                p[0] = '\0';
  90        }
  91
  92        if (!log_type)
  93                DMWARN("Module for logging type \"%s\" not found.", type_name);
  94
  95        kfree(type_name_dup);
  96
  97        return log_type;
  98}
  99
 100static void put_type(struct dm_dirty_log_type *type)
 101{
 102        if (!type)
 103                return;
 104
 105        spin_lock(&_lock);
 106        if (!__find_dirty_log_type(type->name))
 107                goto out;
 108
 109        module_put(type->module);
 110
 111out:
 112        spin_unlock(&_lock);
 113}
 114
 115int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
 116{
 117        int r = 0;
 118
 119        spin_lock(&_lock);
 120        if (!__find_dirty_log_type(type->name))
 121                list_add(&type->list, &_log_types);
 122        else
 123                r = -EEXIST;
 124        spin_unlock(&_lock);
 125
 126        return r;
 127}
 128EXPORT_SYMBOL(dm_dirty_log_type_register);
 129
 130int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
 131{
 132        spin_lock(&_lock);
 133
 134        if (!__find_dirty_log_type(type->name)) {
 135                spin_unlock(&_lock);
 136                return -EINVAL;
 137        }
 138
 139        list_del(&type->list);
 140
 141        spin_unlock(&_lock);
 142
 143        return 0;
 144}
 145EXPORT_SYMBOL(dm_dirty_log_type_unregister);
 146
 147struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
 148                        struct dm_target *ti,
 149                        int (*flush_callback_fn)(struct dm_target *ti),
 150                        unsigned int argc, char **argv)
 151{
 152        struct dm_dirty_log_type *type;
 153        struct dm_dirty_log *log;
 154
 155        log = kmalloc(sizeof(*log), GFP_KERNEL);
 156        if (!log)
 157                return NULL;
 158
 159        type = get_type(type_name);
 160        if (!type) {
 161                kfree(log);
 162                return NULL;
 163        }
 164
 165        log->flush_callback_fn = flush_callback_fn;
 166        log->type = type;
 167        if (type->ctr(log, ti, argc, argv)) {
 168                kfree(log);
 169                put_type(type);
 170                return NULL;
 171        }
 172
 173        return log;
 174}
 175EXPORT_SYMBOL(dm_dirty_log_create);
 176
 177void dm_dirty_log_destroy(struct dm_dirty_log *log)
 178{
 179        log->type->dtr(log);
 180        put_type(log->type);
 181        kfree(log);
 182}
 183EXPORT_SYMBOL(dm_dirty_log_destroy);
 184
 185/*-----------------------------------------------------------------
 186 * Persistent and core logs share a lot of their implementation.
 187 * FIXME: need a reload method to be called from a resume
 188 *---------------------------------------------------------------*/
 189/*
 190 * Magic for persistent mirrors: "MiRr"
 191 */
 192#define MIRROR_MAGIC 0x4D695272
 193
 194/*
 195 * The on-disk version of the metadata.
 196 */
 197#define MIRROR_DISK_VERSION 2
 198#define LOG_OFFSET 2
 199
 200struct log_header_disk {
 201        __le32 magic;
 202
 203        /*
 204         * Simple, incrementing version. no backward
 205         * compatibility.
 206         */
 207        __le32 version;
 208        __le64 nr_regions;
 209} __packed;
 210
 211struct log_header_core {
 212        uint32_t magic;
 213        uint32_t version;
 214        uint64_t nr_regions;
 215};
 216
 217struct log_c {
 218        struct dm_target *ti;
 219        int touched_dirtied;
 220        int touched_cleaned;
 221        int flush_failed;
 222        uint32_t region_size;
 223        unsigned int region_count;
 224        region_t sync_count;
 225
 226        unsigned bitset_uint32_count;
 227        uint32_t *clean_bits;
 228        uint32_t *sync_bits;
 229        uint32_t *recovering_bits;      /* FIXME: this seems excessive */
 230
 231        int sync_search;
 232
 233        /* Resync flag */
 234        enum sync {
 235                DEFAULTSYNC,    /* Synchronize if necessary */
 236                NOSYNC,         /* Devices known to be already in sync */
 237                FORCESYNC,      /* Force a sync to happen */
 238        } sync;
 239
 240        struct dm_io_request io_req;
 241
 242        /*
 243         * Disk log fields
 244         */
 245        int log_dev_failed;
 246        int log_dev_flush_failed;
 247        struct dm_dev *log_dev;
 248        struct log_header_core header;
 249
 250        struct dm_io_region header_location;
 251        struct log_header_disk *disk_header;
 252};
 253
 254/*
 255 * The touched member needs to be updated every time we access
 256 * one of the bitsets.
 257 */
 258static inline int log_test_bit(uint32_t *bs, unsigned bit)
 259{
 260        return test_bit_le(bit, bs) ? 1 : 0;
 261}
 262
 263static inline void log_set_bit(struct log_c *l,
 264                               uint32_t *bs, unsigned bit)
 265{
 266        __set_bit_le(bit, bs);
 267        l->touched_cleaned = 1;
 268}
 269
 270static inline void log_clear_bit(struct log_c *l,
 271                                 uint32_t *bs, unsigned bit)
 272{
 273        __clear_bit_le(bit, bs);
 274        l->touched_dirtied = 1;
 275}
 276
 277/*----------------------------------------------------------------
 278 * Header IO
 279 *--------------------------------------------------------------*/
 280static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
 281{
 282        disk->magic = cpu_to_le32(core->magic);
 283        disk->version = cpu_to_le32(core->version);
 284        disk->nr_regions = cpu_to_le64(core->nr_regions);
 285}
 286
 287static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk)
 288{
 289        core->magic = le32_to_cpu(disk->magic);
 290        core->version = le32_to_cpu(disk->version);
 291        core->nr_regions = le64_to_cpu(disk->nr_regions);
 292}
 293
 294static int rw_header(struct log_c *lc, int rw)
 295{
 296        lc->io_req.bi_rw = rw;
 297
 298        return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
 299}
 300
 301static int flush_header(struct log_c *lc)
 302{
 303        struct dm_io_region null_location = {
 304                .bdev = lc->header_location.bdev,
 305                .sector = 0,
 306                .count = 0,
 307        };
 308
 309        lc->io_req.bi_rw = WRITE_FLUSH;
 310
 311        return dm_io(&lc->io_req, 1, &null_location, NULL);
 312}
 313
 314static int read_header(struct log_c *log)
 315{
 316        int r;
 317
 318        r = rw_header(log, READ);
 319        if (r)
 320                return r;
 321
 322        header_from_disk(&log->header, log->disk_header);
 323
 324        /* New log required? */
 325        if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
 326                log->header.magic = MIRROR_MAGIC;
 327                log->header.version = MIRROR_DISK_VERSION;
 328                log->header.nr_regions = 0;
 329        }
 330
 331#ifdef __LITTLE_ENDIAN
 332        if (log->header.version == 1)
 333                log->header.version = 2;
 334#endif
 335
 336        if (log->header.version != MIRROR_DISK_VERSION) {
 337                DMWARN("incompatible disk log version");
 338                return -EINVAL;
 339        }
 340
 341        return 0;
 342}
 343
 344static int _check_region_size(struct dm_target *ti, uint32_t region_size)
 345{
 346        if (region_size < 2 || region_size > ti->len)
 347                return 0;
 348
 349        if (!is_power_of_2(region_size))
 350                return 0;
 351
 352        return 1;
 353}
 354
 355/*----------------------------------------------------------------
 356 * core log constructor/destructor
 357 *
 358 * argv contains region_size followed optionally by [no]sync
 359 *--------------------------------------------------------------*/
 360#define BYTE_SHIFT 3
 361static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
 362                              unsigned int argc, char **argv,
 363                              struct dm_dev *dev)
 364{
 365        enum sync sync = DEFAULTSYNC;
 366
 367        struct log_c *lc;
 368        uint32_t region_size;
 369        unsigned int region_count;
 370        size_t bitset_size, buf_size;
 371        int r;
 372        char dummy;
 373
 374        if (argc < 1 || argc > 2) {
 375                DMWARN("wrong number of arguments to dirty region log");
 376                return -EINVAL;
 377        }
 378
 379        if (argc > 1) {
 380                if (!strcmp(argv[1], "sync"))
 381                        sync = FORCESYNC;
 382                else if (!strcmp(argv[1], "nosync"))
 383                        sync = NOSYNC;
 384                else {
 385                        DMWARN("unrecognised sync argument to "
 386                               "dirty region log: %s", argv[1]);
 387                        return -EINVAL;
 388                }
 389        }
 390
 391        if (sscanf(argv[0], "%u%c", &region_size, &dummy) != 1 ||
 392            !_check_region_size(ti, region_size)) {
 393                DMWARN("invalid region size %s", argv[0]);
 394                return -EINVAL;
 395        }
 396
 397        region_count = dm_sector_div_up(ti->len, region_size);
 398
 399        lc = kmalloc(sizeof(*lc), GFP_KERNEL);
 400        if (!lc) {
 401                DMWARN("couldn't allocate core log");
 402                return -ENOMEM;
 403        }
 404
 405        lc->ti = ti;
 406        lc->touched_dirtied = 0;
 407        lc->touched_cleaned = 0;
 408        lc->flush_failed = 0;
 409        lc->region_size = region_size;
 410        lc->region_count = region_count;
 411        lc->sync = sync;
 412
 413        /*
 414         * Work out how many "unsigned long"s we need to hold the bitset.
 415         */
 416        bitset_size = dm_round_up(region_count,
 417                                  sizeof(*lc->clean_bits) << BYTE_SHIFT);
 418        bitset_size >>= BYTE_SHIFT;
 419
 420        lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
 421
 422        /*
 423         * Disk log?
 424         */
 425        if (!dev) {
 426                lc->clean_bits = vmalloc(bitset_size);
 427                if (!lc->clean_bits) {
 428                        DMWARN("couldn't allocate clean bitset");
 429                        kfree(lc);
 430                        return -ENOMEM;
 431                }
 432                lc->disk_header = NULL;
 433        } else {
 434                lc->log_dev = dev;
 435                lc->log_dev_failed = 0;
 436                lc->log_dev_flush_failed = 0;
 437                lc->header_location.bdev = lc->log_dev->bdev;
 438                lc->header_location.sector = 0;
 439
 440                /*
 441                 * Buffer holds both header and bitset.
 442                 */
 443                buf_size =
 444                    dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size,
 445                                bdev_logical_block_size(lc->header_location.
 446                                                            bdev));
 447
 448                if (buf_size > i_size_read(dev->bdev->bd_inode)) {
 449                        DMWARN("log device %s too small: need %llu bytes",
 450                                dev->name, (unsigned long long)buf_size);
 451                        kfree(lc);
 452                        return -EINVAL;
 453                }
 454
 455                lc->header_location.count = buf_size >> SECTOR_SHIFT;
 456
 457                lc->io_req.mem.type = DM_IO_VMA;
 458                lc->io_req.notify.fn = NULL;
 459                lc->io_req.client = dm_io_client_create();
 460                if (IS_ERR(lc->io_req.client)) {
 461                        r = PTR_ERR(lc->io_req.client);
 462                        DMWARN("couldn't allocate disk io client");
 463                        kfree(lc);
 464                        return r;
 465                }
 466
 467                lc->disk_header = vmalloc(buf_size);
 468                if (!lc->disk_header) {
 469                        DMWARN("couldn't allocate disk log buffer");
 470                        dm_io_client_destroy(lc->io_req.client);
 471                        kfree(lc);
 472                        return -ENOMEM;
 473                }
 474
 475                lc->io_req.mem.ptr.vma = lc->disk_header;
 476                lc->clean_bits = (void *)lc->disk_header +
 477                                 (LOG_OFFSET << SECTOR_SHIFT);
 478        }
 479
 480        memset(lc->clean_bits, -1, bitset_size);
 481
 482        lc->sync_bits = vmalloc(bitset_size);
 483        if (!lc->sync_bits) {
 484                DMWARN("couldn't allocate sync bitset");
 485                if (!dev)
 486                        vfree(lc->clean_bits);
 487                else
 488                        dm_io_client_destroy(lc->io_req.client);
 489                vfree(lc->disk_header);
 490                kfree(lc);
 491                return -ENOMEM;
 492        }
 493        memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
 494        lc->sync_count = (sync == NOSYNC) ? region_count : 0;
 495
 496        lc->recovering_bits = vzalloc(bitset_size);
 497        if (!lc->recovering_bits) {
 498                DMWARN("couldn't allocate sync bitset");
 499                vfree(lc->sync_bits);
 500                if (!dev)
 501                        vfree(lc->clean_bits);
 502                else
 503                        dm_io_client_destroy(lc->io_req.client);
 504                vfree(lc->disk_header);
 505                kfree(lc);
 506                return -ENOMEM;
 507        }
 508        lc->sync_search = 0;
 509        log->context = lc;
 510
 511        return 0;
 512}
 513
 514static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 515                    unsigned int argc, char **argv)
 516{
 517        return create_log_context(log, ti, argc, argv, NULL);
 518}
 519
 520static void destroy_log_context(struct log_c *lc)
 521{
 522        vfree(lc->sync_bits);
 523        vfree(lc->recovering_bits);
 524        kfree(lc);
 525}
 526
 527static void core_dtr(struct dm_dirty_log *log)
 528{
 529        struct log_c *lc = (struct log_c *) log->context;
 530
 531        vfree(lc->clean_bits);
 532        destroy_log_context(lc);
 533}
 534
 535/*----------------------------------------------------------------
 536 * disk log constructor/destructor
 537 *
 538 * argv contains log_device region_size followed optionally by [no]sync
 539 *--------------------------------------------------------------*/
 540static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 541                    unsigned int argc, char **argv)
 542{
 543        int r;
 544        struct dm_dev *dev;
 545
 546        if (argc < 2 || argc > 3) {
 547                DMWARN("wrong number of arguments to disk dirty region log");
 548                return -EINVAL;
 549        }
 550
 551        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
 552        if (r)
 553                return r;
 554
 555        r = create_log_context(log, ti, argc - 1, argv + 1, dev);
 556        if (r) {
 557                dm_put_device(ti, dev);
 558                return r;
 559        }
 560
 561        return 0;
 562}
 563
 564static void disk_dtr(struct dm_dirty_log *log)
 565{
 566        struct log_c *lc = (struct log_c *) log->context;
 567
 568        dm_put_device(lc->ti, lc->log_dev);
 569        vfree(lc->disk_header);
 570        dm_io_client_destroy(lc->io_req.client);
 571        destroy_log_context(lc);
 572}
 573
 574static int count_bits32(uint32_t *addr, unsigned size)
 575{
 576        int count = 0, i;
 577
 578        for (i = 0; i < size; i++) {
 579                count += hweight32(*(addr+i));
 580        }
 581        return count;
 582}
 583
 584static void fail_log_device(struct log_c *lc)
 585{
 586        if (lc->log_dev_failed)
 587                return;
 588
 589        lc->log_dev_failed = 1;
 590        dm_table_event(lc->ti->table);
 591}
 592
 593static int disk_resume(struct dm_dirty_log *log)
 594{
 595        int r;
 596        unsigned i;
 597        struct log_c *lc = (struct log_c *) log->context;
 598        size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
 599
 600        /* read the disk header */
 601        r = read_header(lc);
 602        if (r) {
 603                DMWARN("%s: Failed to read header on dirty region log device",
 604                       lc->log_dev->name);
 605                fail_log_device(lc);
 606                /*
 607                 * If the log device cannot be read, we must assume
 608                 * all regions are out-of-sync.  If we simply return
 609                 * here, the state will be uninitialized and could
 610                 * lead us to return 'in-sync' status for regions
 611                 * that are actually 'out-of-sync'.
 612                 */
 613                lc->header.nr_regions = 0;
 614        }
 615
 616        /* set or clear any new bits -- device has grown */
 617        if (lc->sync == NOSYNC)
 618                for (i = lc->header.nr_regions; i < lc->region_count; i++)
 619                        /* FIXME: amazingly inefficient */
 620                        log_set_bit(lc, lc->clean_bits, i);
 621        else
 622                for (i = lc->header.nr_regions; i < lc->region_count; i++)
 623                        /* FIXME: amazingly inefficient */
 624                        log_clear_bit(lc, lc->clean_bits, i);
 625
 626        /* clear any old bits -- device has shrunk */
 627        for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
 628                log_clear_bit(lc, lc->clean_bits, i);
 629
 630        /* copy clean across to sync */
 631        memcpy(lc->sync_bits, lc->clean_bits, size);
 632        lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
 633        lc->sync_search = 0;
 634
 635        /* set the correct number of regions in the header */
 636        lc->header.nr_regions = lc->region_count;
 637
 638        header_to_disk(&lc->header, lc->disk_header);
 639
 640        /* write the new header */
 641        r = rw_header(lc, WRITE);
 642        if (!r) {
 643                r = flush_header(lc);
 644                if (r)
 645                        lc->log_dev_flush_failed = 1;
 646        }
 647        if (r) {
 648                DMWARN("%s: Failed to write header on dirty region log device",
 649                       lc->log_dev->name);
 650                fail_log_device(lc);
 651        }
 652
 653        return r;
 654}
 655
 656static uint32_t core_get_region_size(struct dm_dirty_log *log)
 657{
 658        struct log_c *lc = (struct log_c *) log->context;
 659        return lc->region_size;
 660}
 661
 662static int core_resume(struct dm_dirty_log *log)
 663{
 664        struct log_c *lc = (struct log_c *) log->context;
 665        lc->sync_search = 0;
 666        return 0;
 667}
 668
 669static int core_is_clean(struct dm_dirty_log *log, region_t region)
 670{
 671        struct log_c *lc = (struct log_c *) log->context;
 672        return log_test_bit(lc->clean_bits, region);
 673}
 674
 675static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
 676{
 677        struct log_c *lc = (struct log_c *) log->context;
 678        return log_test_bit(lc->sync_bits, region);
 679}
 680
 681static int core_flush(struct dm_dirty_log *log)
 682{
 683        /* no op */
 684        return 0;
 685}
 686
 687static int disk_flush(struct dm_dirty_log *log)
 688{
 689        int r, i;
 690        struct log_c *lc = log->context;
 691
 692        /* only write if the log has changed */
 693        if (!lc->touched_cleaned && !lc->touched_dirtied)
 694                return 0;
 695
 696        if (lc->touched_cleaned && log->flush_callback_fn &&
 697            log->flush_callback_fn(lc->ti)) {
 698                /*
 699                 * At this point it is impossible to determine which
 700                 * regions are clean and which are dirty (without
 701                 * re-reading the log off disk). So mark all of them
 702                 * dirty.
 703                 */
 704                lc->flush_failed = 1;
 705                for (i = 0; i < lc->region_count; i++)
 706                        log_clear_bit(lc, lc->clean_bits, i);
 707        }
 708
 709        r = rw_header(lc, WRITE);
 710        if (r)
 711                fail_log_device(lc);
 712        else {
 713                if (lc->touched_dirtied) {
 714                        r = flush_header(lc);
 715                        if (r) {
 716                                lc->log_dev_flush_failed = 1;
 717                                fail_log_device(lc);
 718                        } else
 719                                lc->touched_dirtied = 0;
 720                }
 721                lc->touched_cleaned = 0;
 722        }
 723
 724        return r;
 725}
 726
 727static void core_mark_region(struct dm_dirty_log *log, region_t region)
 728{
 729        struct log_c *lc = (struct log_c *) log->context;
 730        log_clear_bit(lc, lc->clean_bits, region);
 731}
 732
 733static void core_clear_region(struct dm_dirty_log *log, region_t region)
 734{
 735        struct log_c *lc = (struct log_c *) log->context;
 736        if (likely(!lc->flush_failed))
 737                log_set_bit(lc, lc->clean_bits, region);
 738}
 739
 740static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
 741{
 742        struct log_c *lc = (struct log_c *) log->context;
 743
 744        if (lc->sync_search >= lc->region_count)
 745                return 0;
 746
 747        do {
 748                *region = find_next_zero_bit_le(lc->sync_bits,
 749                                             lc->region_count,
 750                                             lc->sync_search);
 751                lc->sync_search = *region + 1;
 752
 753                if (*region >= lc->region_count)
 754                        return 0;
 755
 756        } while (log_test_bit(lc->recovering_bits, *region));
 757
 758        log_set_bit(lc, lc->recovering_bits, *region);
 759        return 1;
 760}
 761
 762static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
 763                                 int in_sync)
 764{
 765        struct log_c *lc = (struct log_c *) log->context;
 766
 767        log_clear_bit(lc, lc->recovering_bits, region);
 768        if (in_sync) {
 769                log_set_bit(lc, lc->sync_bits, region);
 770                lc->sync_count++;
 771        } else if (log_test_bit(lc->sync_bits, region)) {
 772                lc->sync_count--;
 773                log_clear_bit(lc, lc->sync_bits, region);
 774        }
 775}
 776
 777static region_t core_get_sync_count(struct dm_dirty_log *log)
 778{
 779        struct log_c *lc = (struct log_c *) log->context;
 780
 781        return lc->sync_count;
 782}
 783
 784#define DMEMIT_SYNC \
 785        if (lc->sync != DEFAULTSYNC) \
 786                DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
 787
 788static int core_status(struct dm_dirty_log *log, status_type_t status,
 789                       char *result, unsigned int maxlen)
 790{
 791        int sz = 0;
 792        struct log_c *lc = log->context;
 793
 794        switch(status) {
 795        case STATUSTYPE_INFO:
 796                DMEMIT("1 %s", log->type->name);
 797                break;
 798
 799        case STATUSTYPE_TABLE:
 800                DMEMIT("%s %u %u ", log->type->name,
 801                       lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
 802                DMEMIT_SYNC;
 803        }
 804
 805        return sz;
 806}
 807
 808static int disk_status(struct dm_dirty_log *log, status_type_t status,
 809                       char *result, unsigned int maxlen)
 810{
 811        int sz = 0;
 812        struct log_c *lc = log->context;
 813
 814        switch(status) {
 815        case STATUSTYPE_INFO:
 816                DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
 817                       lc->log_dev_flush_failed ? 'F' :
 818                       lc->log_dev_failed ? 'D' :
 819                       'A');
 820                break;
 821
 822        case STATUSTYPE_TABLE:
 823                DMEMIT("%s %u %s %u ", log->type->name,
 824                       lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
 825                       lc->region_size);
 826                DMEMIT_SYNC;
 827        }
 828
 829        return sz;
 830}
 831
 832static struct dm_dirty_log_type _core_type = {
 833        .name = "core",
 834        .module = THIS_MODULE,
 835        .ctr = core_ctr,
 836        .dtr = core_dtr,
 837        .resume = core_resume,
 838        .get_region_size = core_get_region_size,
 839        .is_clean = core_is_clean,
 840        .in_sync = core_in_sync,
 841        .flush = core_flush,
 842        .mark_region = core_mark_region,
 843        .clear_region = core_clear_region,
 844        .get_resync_work = core_get_resync_work,
 845        .set_region_sync = core_set_region_sync,
 846        .get_sync_count = core_get_sync_count,
 847        .status = core_status,
 848};
 849
 850static struct dm_dirty_log_type _disk_type = {
 851        .name = "disk",
 852        .module = THIS_MODULE,
 853        .ctr = disk_ctr,
 854        .dtr = disk_dtr,
 855        .postsuspend = disk_flush,
 856        .resume = disk_resume,
 857        .get_region_size = core_get_region_size,
 858        .is_clean = core_is_clean,
 859        .in_sync = core_in_sync,
 860        .flush = disk_flush,
 861        .mark_region = core_mark_region,
 862        .clear_region = core_clear_region,
 863        .get_resync_work = core_get_resync_work,
 864        .set_region_sync = core_set_region_sync,
 865        .get_sync_count = core_get_sync_count,
 866        .status = disk_status,
 867};
 868
 869static int __init dm_dirty_log_init(void)
 870{
 871        int r;
 872
 873        r = dm_dirty_log_type_register(&_core_type);
 874        if (r)
 875                DMWARN("couldn't register core log");
 876
 877        r = dm_dirty_log_type_register(&_disk_type);
 878        if (r) {
 879                DMWARN("couldn't register disk type");
 880                dm_dirty_log_type_unregister(&_core_type);
 881        }
 882
 883        return r;
 884}
 885
 886static void __exit dm_dirty_log_exit(void)
 887{
 888        dm_dirty_log_type_unregister(&_disk_type);
 889        dm_dirty_log_type_unregister(&_core_type);
 890}
 891
 892module_init(dm_dirty_log_init);
 893module_exit(dm_dirty_log_exit);
 894
 895MODULE_DESCRIPTION(DM_NAME " dirty region log");
 896MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
 897MODULE_LICENSE("GPL");
 898