linux/drivers/md/dm-log-userspace-base.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006-2009 Red Hat, Inc.
   3 *
   4 * This file is released under the LGPL.
   5 */
   6
   7#include <linux/bio.h>
   8#include <linux/slab.h>
   9#include <linux/jiffies.h>
  10#include <linux/dm-dirty-log.h>
  11#include <linux/device-mapper.h>
  12#include <linux/dm-log-userspace.h>
  13#include <linux/module.h>
  14#include <linux/workqueue.h>
  15
  16#include "dm-log-userspace-transfer.h"
  17
  18#define DM_LOG_USERSPACE_VSN "1.3.0"
  19
  20#define FLUSH_ENTRY_POOL_SIZE 16
  21
  22struct dm_dirty_log_flush_entry {
  23        int type;
  24        region_t region;
  25        struct list_head list;
  26};
  27
  28/*
  29 * This limit on the number of mark and clear request is, to a degree,
  30 * arbitrary.  However, there is some basis for the choice in the limits
  31 * imposed on the size of data payload by dm-log-userspace-transfer.c:
  32 * dm_consult_userspace().
  33 */
  34#define MAX_FLUSH_GROUP_COUNT 32
  35
  36struct log_c {
  37        struct dm_target *ti;
  38        struct dm_dev *log_dev;
  39
  40        char *usr_argv_str;
  41        uint32_t usr_argc;
  42
  43        uint32_t region_size;
  44        region_t region_count;
  45        uint64_t luid;
  46        char uuid[DM_UUID_LEN];
  47
  48        /*
  49         * Mark and clear requests are held until a flush is issued
  50         * so that we can group, and thereby limit, the amount of
  51         * network traffic between kernel and userspace.  The 'flush_lock'
  52         * is used to protect these lists.
  53         */
  54        spinlock_t flush_lock;
  55        struct list_head mark_list;
  56        struct list_head clear_list;
  57
  58        /*
  59         * in_sync_hint gets set when doing is_remote_recovering.  It
  60         * represents the first region that needs recovery.  IOW, the
  61         * first zero bit of sync_bits.  This can be useful for to limit
  62         * traffic for calls like is_remote_recovering and get_resync_work,
  63         * but be take care in its use for anything else.
  64         */
  65        uint64_t in_sync_hint;
  66
  67        /*
  68         * Workqueue for flush of clear region requests.
  69         */
  70        struct workqueue_struct *dmlog_wq;
  71        struct delayed_work flush_log_work;
  72        atomic_t sched_flush;
  73
  74        /*
  75         * Combine userspace flush and mark requests for efficiency.
  76         */
  77        uint32_t integrated_flush;
  78
  79        mempool_t *flush_entry_pool;
  80};
  81
  82static struct kmem_cache *_flush_entry_cache;
  83
  84static int userspace_do_request(struct log_c *lc, const char *uuid,
  85                                int request_type, char *data, size_t data_size,
  86                                char *rdata, size_t *rdata_size)
  87{
  88        int r;
  89
  90        /*
  91         * If the server isn't there, -ESRCH is returned,
  92         * and we must keep trying until the server is
  93         * restored.
  94         */
  95retry:
  96        r = dm_consult_userspace(uuid, lc->luid, request_type, data,
  97                                 data_size, rdata, rdata_size);
  98
  99        if (r != -ESRCH)
 100                return r;
 101
 102        DMERR(" Userspace log server not found.");
 103        while (1) {
 104                set_current_state(TASK_INTERRUPTIBLE);
 105                schedule_timeout(2*HZ);
 106                DMWARN("Attempting to contact userspace log server...");
 107                r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
 108                                         lc->usr_argv_str,
 109                                         strlen(lc->usr_argv_str) + 1,
 110                                         NULL, NULL);
 111                if (!r)
 112                        break;
 113        }
 114        DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
 115        r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
 116                                 0, NULL, NULL);
 117        if (!r)
 118                goto retry;
 119
 120        DMERR("Error trying to resume userspace log: %d", r);
 121
 122        return -ESRCH;
 123}
 124
 125static int build_constructor_string(struct dm_target *ti,
 126                                    unsigned argc, char **argv,
 127                                    char **ctr_str)
 128{
 129        int i, str_size;
 130        char *str = NULL;
 131
 132        *ctr_str = NULL;
 133
 134        /*
 135         * Determine overall size of the string.
 136         */
 137        for (i = 0, str_size = 0; i < argc; i++)
 138                str_size += strlen(argv[i]) + 1; /* +1 for space between args */
 139
 140        str_size += 20; /* Max number of chars in a printed u64 number */
 141
 142        str = kzalloc(str_size, GFP_KERNEL);
 143        if (!str) {
 144                DMWARN("Unable to allocate memory for constructor string");
 145                return -ENOMEM;
 146        }
 147
 148        str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
 149        for (i = 0; i < argc; i++)
 150                str_size += sprintf(str + str_size, " %s", argv[i]);
 151
 152        *ctr_str = str;
 153        return str_size;
 154}
 155
 156static void do_flush(struct work_struct *work)
 157{
 158        int r;
 159        struct log_c *lc = container_of(work, struct log_c, flush_log_work.work);
 160
 161        atomic_set(&lc->sched_flush, 0);
 162
 163        r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL);
 164
 165        if (r)
 166                dm_table_event(lc->ti->table);
 167}
 168
 169/*
 170 * userspace_ctr
 171 *
 172 * argv contains:
 173 *      <UUID> [integrated_flush] <other args>
 174 * Where 'other args' are the userspace implementation-specific log
 175 * arguments.
 176 *
 177 * Example:
 178 *      <UUID> [integrated_flush] clustered-disk <arg count> <log dev>
 179 *      <region_size> [[no]sync]
 180 *
 181 * This module strips off the <UUID> and uses it for identification
 182 * purposes when communicating with userspace about a log.
 183 *
 184 * If integrated_flush is defined, the kernel combines flush
 185 * and mark requests.
 186 *
 187 * The rest of the line, beginning with 'clustered-disk', is passed
 188 * to the userspace ctr function.
 189 */
 190static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 191                         unsigned argc, char **argv)
 192{
 193        int r = 0;
 194        int str_size;
 195        char *ctr_str = NULL;
 196        struct log_c *lc = NULL;
 197        uint64_t rdata;
 198        size_t rdata_size = sizeof(rdata);
 199        char *devices_rdata = NULL;
 200        size_t devices_rdata_size = DM_NAME_LEN;
 201
 202        if (argc < 3) {
 203                DMWARN("Too few arguments to userspace dirty log");
 204                return -EINVAL;
 205        }
 206
 207        lc = kzalloc(sizeof(*lc), GFP_KERNEL);
 208        if (!lc) {
 209                DMWARN("Unable to allocate userspace log context.");
 210                return -ENOMEM;
 211        }
 212
 213        /* The ptr value is sufficient for local unique id */
 214        lc->luid = (unsigned long)lc;
 215
 216        lc->ti = ti;
 217
 218        if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
 219                DMWARN("UUID argument too long.");
 220                kfree(lc);
 221                return -EINVAL;
 222        }
 223
 224        lc->usr_argc = argc;
 225
 226        strncpy(lc->uuid, argv[0], DM_UUID_LEN);
 227        argc--;
 228        argv++;
 229        spin_lock_init(&lc->flush_lock);
 230        INIT_LIST_HEAD(&lc->mark_list);
 231        INIT_LIST_HEAD(&lc->clear_list);
 232
 233        if (!strcasecmp(argv[0], "integrated_flush")) {
 234                lc->integrated_flush = 1;
 235                argc--;
 236                argv++;
 237        }
 238
 239        str_size = build_constructor_string(ti, argc, argv, &ctr_str);
 240        if (str_size < 0) {
 241                kfree(lc);
 242                return str_size;
 243        }
 244
 245        devices_rdata = kzalloc(devices_rdata_size, GFP_KERNEL);
 246        if (!devices_rdata) {
 247                DMERR("Failed to allocate memory for device information");
 248                r = -ENOMEM;
 249                goto out;
 250        }
 251
 252        lc->flush_entry_pool = mempool_create_slab_pool(FLUSH_ENTRY_POOL_SIZE,
 253                                                        _flush_entry_cache);
 254        if (!lc->flush_entry_pool) {
 255                DMERR("Failed to create flush_entry_pool");
 256                r = -ENOMEM;
 257                goto out;
 258        }
 259
 260        /*
 261         * Send table string and get back any opened device.
 262         */
 263        r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
 264                                 ctr_str, str_size,
 265                                 devices_rdata, &devices_rdata_size);
 266
 267        if (r < 0) {
 268                if (r == -ESRCH)
 269                        DMERR("Userspace log server not found");
 270                else
 271                        DMERR("Userspace log server failed to create log");
 272                goto out;
 273        }
 274
 275        /* Since the region size does not change, get it now */
 276        rdata_size = sizeof(rdata);
 277        r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
 278                                 NULL, 0, (char *)&rdata, &rdata_size);
 279
 280        if (r) {
 281                DMERR("Failed to get region size of dirty log");
 282                goto out;
 283        }
 284
 285        lc->region_size = (uint32_t)rdata;
 286        lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
 287
 288        if (devices_rdata_size) {
 289                if (devices_rdata[devices_rdata_size - 1] != '\0') {
 290                        DMERR("DM_ULOG_CTR device return string not properly terminated");
 291                        r = -EINVAL;
 292                        goto out;
 293                }
 294                r = dm_get_device(ti, devices_rdata,
 295                                  dm_table_get_mode(ti->table), &lc->log_dev);
 296                if (r)
 297                        DMERR("Failed to register %s with device-mapper",
 298                              devices_rdata);
 299        }
 300
 301        if (lc->integrated_flush) {
 302                lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0);
 303                if (!lc->dmlog_wq) {
 304                        DMERR("couldn't start dmlogd");
 305                        r = -ENOMEM;
 306                        goto out;
 307                }
 308
 309                INIT_DELAYED_WORK(&lc->flush_log_work, do_flush);
 310                atomic_set(&lc->sched_flush, 0);
 311        }
 312
 313out:
 314        kfree(devices_rdata);
 315        if (r) {
 316                mempool_destroy(lc->flush_entry_pool);
 317                kfree(lc);
 318                kfree(ctr_str);
 319        } else {
 320                lc->usr_argv_str = ctr_str;
 321                log->context = lc;
 322        }
 323
 324        return r;
 325}
 326
 327static void userspace_dtr(struct dm_dirty_log *log)
 328{
 329        struct log_c *lc = log->context;
 330
 331        if (lc->integrated_flush) {
 332                /* flush workqueue */
 333                if (atomic_read(&lc->sched_flush))
 334                        flush_delayed_work(&lc->flush_log_work);
 335
 336                destroy_workqueue(lc->dmlog_wq);
 337        }
 338
 339        (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
 340                                    NULL, 0, NULL, NULL);
 341
 342        if (lc->log_dev)
 343                dm_put_device(lc->ti, lc->log_dev);
 344
 345        mempool_destroy(lc->flush_entry_pool);
 346
 347        kfree(lc->usr_argv_str);
 348        kfree(lc);
 349
 350        return;
 351}
 352
 353static int userspace_presuspend(struct dm_dirty_log *log)
 354{
 355        int r;
 356        struct log_c *lc = log->context;
 357
 358        r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
 359                                 NULL, 0, NULL, NULL);
 360
 361        return r;
 362}
 363
 364static int userspace_postsuspend(struct dm_dirty_log *log)
 365{
 366        int r;
 367        struct log_c *lc = log->context;
 368
 369        /*
 370         * Run planned flush earlier.
 371         */
 372        if (lc->integrated_flush && atomic_read(&lc->sched_flush))
 373                flush_delayed_work(&lc->flush_log_work);
 374
 375        r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
 376                                 NULL, 0, NULL, NULL);
 377
 378        return r;
 379}
 380
 381static int userspace_resume(struct dm_dirty_log *log)
 382{
 383        int r;
 384        struct log_c *lc = log->context;
 385
 386        lc->in_sync_hint = 0;
 387        r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
 388                                 NULL, 0, NULL, NULL);
 389
 390        return r;
 391}
 392
 393static uint32_t userspace_get_region_size(struct dm_dirty_log *log)
 394{
 395        struct log_c *lc = log->context;
 396
 397        return lc->region_size;
 398}
 399
 400/*
 401 * userspace_is_clean
 402 *
 403 * Check whether a region is clean.  If there is any sort of
 404 * failure when consulting the server, we return not clean.
 405 *
 406 * Returns: 1 if clean, 0 otherwise
 407 */
 408static int userspace_is_clean(struct dm_dirty_log *log, region_t region)
 409{
 410        int r;
 411        uint64_t region64 = (uint64_t)region;
 412        int64_t is_clean;
 413        size_t rdata_size;
 414        struct log_c *lc = log->context;
 415
 416        rdata_size = sizeof(is_clean);
 417        r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
 418                                 (char *)&region64, sizeof(region64),
 419                                 (char *)&is_clean, &rdata_size);
 420
 421        return (r) ? 0 : (int)is_clean;
 422}
 423
 424/*
 425 * userspace_in_sync
 426 *
 427 * Check if the region is in-sync.  If there is any sort
 428 * of failure when consulting the server, we assume that
 429 * the region is not in sync.
 430 *
 431 * If 'can_block' is set, return immediately
 432 *
 433 * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK
 434 */
 435static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
 436                             int can_block)
 437{
 438        int r;
 439        uint64_t region64 = region;
 440        int64_t in_sync;
 441        size_t rdata_size;
 442        struct log_c *lc = log->context;
 443
 444        /*
 445         * We can never respond directly - even if in_sync_hint is
 446         * set.  This is because another machine could see a device
 447         * failure and mark the region out-of-sync.  If we don't go
 448         * to userspace to ask, we might think the region is in-sync
 449         * and allow a read to pick up data that is stale.  (This is
 450         * very unlikely if a device actually fails; but it is very
 451         * likely if a connection to one device from one machine fails.)
 452         *
 453         * There still might be a problem if the mirror caches the region
 454         * state as in-sync... but then this call would not be made.  So,
 455         * that is a mirror problem.
 456         */
 457        if (!can_block)
 458                return -EWOULDBLOCK;
 459
 460        rdata_size = sizeof(in_sync);
 461        r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
 462                                 (char *)&region64, sizeof(region64),
 463                                 (char *)&in_sync, &rdata_size);
 464        return (r) ? 0 : (int)in_sync;
 465}
 466
 467static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
 468{
 469        int r = 0;
 470        struct dm_dirty_log_flush_entry *fe;
 471
 472        list_for_each_entry(fe, flush_list, list) {
 473                r = userspace_do_request(lc, lc->uuid, fe->type,
 474                                         (char *)&fe->region,
 475                                         sizeof(fe->region),
 476                                         NULL, NULL);
 477                if (r)
 478                        break;
 479        }
 480
 481        return r;
 482}
 483
 484static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
 485                          int flush_with_payload)
 486{
 487        int r = 0;
 488        int count;
 489        uint32_t type = 0;
 490        struct dm_dirty_log_flush_entry *fe, *tmp_fe;
 491        LIST_HEAD(tmp_list);
 492        uint64_t group[MAX_FLUSH_GROUP_COUNT];
 493
 494        /*
 495         * Group process the requests
 496         */
 497        while (!list_empty(flush_list)) {
 498                count = 0;
 499
 500                list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
 501                        group[count] = fe->region;
 502                        count++;
 503
 504                        list_move(&fe->list, &tmp_list);
 505
 506                        type = fe->type;
 507                        if (count >= MAX_FLUSH_GROUP_COUNT)
 508                                break;
 509                }
 510
 511                if (flush_with_payload) {
 512                        r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
 513                                                 (char *)(group),
 514                                                 count * sizeof(uint64_t),
 515                                                 NULL, NULL);
 516                        /*
 517                         * Integrated flush failed.
 518                         */
 519                        if (r)
 520                                break;
 521                } else {
 522                        r = userspace_do_request(lc, lc->uuid, type,
 523                                                 (char *)(group),
 524                                                 count * sizeof(uint64_t),
 525                                                 NULL, NULL);
 526                        if (r) {
 527                                /*
 528                                 * Group send failed.  Attempt one-by-one.
 529                                 */
 530                                list_splice_init(&tmp_list, flush_list);
 531                                r = flush_one_by_one(lc, flush_list);
 532                                break;
 533                        }
 534                }
 535        }
 536
 537        /*
 538         * Must collect flush_entrys that were successfully processed
 539         * as a group so that they will be free'd by the caller.
 540         */
 541        list_splice_init(&tmp_list, flush_list);
 542
 543        return r;
 544}
 545
 546/*
 547 * userspace_flush
 548 *
 549 * This function is ok to block.
 550 * The flush happens in two stages.  First, it sends all
 551 * clear/mark requests that are on the list.  Then it
 552 * tells the server to commit them.  This gives the
 553 * server a chance to optimise the commit, instead of
 554 * doing it for every request.
 555 *
 556 * Additionally, we could implement another thread that
 557 * sends the requests up to the server - reducing the
 558 * load on flush.  Then the flush would have less in
 559 * the list and be responsible for the finishing commit.
 560 *
 561 * Returns: 0 on success, < 0 on failure
 562 */
 563static int userspace_flush(struct dm_dirty_log *log)
 564{
 565        int r = 0;
 566        unsigned long flags;
 567        struct log_c *lc = log->context;
 568        LIST_HEAD(mark_list);
 569        LIST_HEAD(clear_list);
 570        int mark_list_is_empty;
 571        int clear_list_is_empty;
 572        struct dm_dirty_log_flush_entry *fe, *tmp_fe;
 573        mempool_t *flush_entry_pool = lc->flush_entry_pool;
 574
 575        spin_lock_irqsave(&lc->flush_lock, flags);
 576        list_splice_init(&lc->mark_list, &mark_list);
 577        list_splice_init(&lc->clear_list, &clear_list);
 578        spin_unlock_irqrestore(&lc->flush_lock, flags);
 579
 580        mark_list_is_empty = list_empty(&mark_list);
 581        clear_list_is_empty = list_empty(&clear_list);
 582
 583        if (mark_list_is_empty && clear_list_is_empty)
 584                return 0;
 585
 586        r = flush_by_group(lc, &clear_list, 0);
 587        if (r)
 588                goto out;
 589
 590        if (!lc->integrated_flush) {
 591                r = flush_by_group(lc, &mark_list, 0);
 592                if (r)
 593                        goto out;
 594                r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
 595                                         NULL, 0, NULL, NULL);
 596                goto out;
 597        }
 598
 599        /*
 600         * Send integrated flush request with mark_list as payload.
 601         */
 602        r = flush_by_group(lc, &mark_list, 1);
 603        if (r)
 604                goto out;
 605
 606        if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) {
 607                /*
 608                 * When there are only clear region requests,
 609                 * we schedule a flush in the future.
 610                 */
 611                queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ);
 612                atomic_set(&lc->sched_flush, 1);
 613        } else {
 614                /*
 615                 * Cancel pending flush because we
 616                 * have already flushed in mark_region.
 617                 */
 618                cancel_delayed_work(&lc->flush_log_work);
 619                atomic_set(&lc->sched_flush, 0);
 620        }
 621
 622out:
 623        /*
 624         * We can safely remove these entries, even after failure.
 625         * Calling code will receive an error and will know that
 626         * the log facility has failed.
 627         */
 628        list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
 629                list_del(&fe->list);
 630                mempool_free(fe, flush_entry_pool);
 631        }
 632        list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
 633                list_del(&fe->list);
 634                mempool_free(fe, flush_entry_pool);
 635        }
 636
 637        if (r)
 638                dm_table_event(lc->ti->table);
 639
 640        return r;
 641}
 642
 643/*
 644 * userspace_mark_region
 645 *
 646 * This function should avoid blocking unless absolutely required.
 647 * (Memory allocation is valid for blocking.)
 648 */
 649static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
 650{
 651        unsigned long flags;
 652        struct log_c *lc = log->context;
 653        struct dm_dirty_log_flush_entry *fe;
 654
 655        /* Wait for an allocation, but _never_ fail */
 656        fe = mempool_alloc(lc->flush_entry_pool, GFP_NOIO);
 657        BUG_ON(!fe);
 658
 659        spin_lock_irqsave(&lc->flush_lock, flags);
 660        fe->type = DM_ULOG_MARK_REGION;
 661        fe->region = region;
 662        list_add(&fe->list, &lc->mark_list);
 663        spin_unlock_irqrestore(&lc->flush_lock, flags);
 664
 665        return;
 666}
 667
 668/*
 669 * userspace_clear_region
 670 *
 671 * This function must not block.
 672 * So, the alloc can't block.  In the worst case, it is ok to
 673 * fail.  It would simply mean we can't clear the region.
 674 * Does nothing to current sync context, but does mean
 675 * the region will be re-sync'ed on a reload of the mirror
 676 * even though it is in-sync.
 677 */
 678static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
 679{
 680        unsigned long flags;
 681        struct log_c *lc = log->context;
 682        struct dm_dirty_log_flush_entry *fe;
 683
 684        /*
 685         * If we fail to allocate, we skip the clearing of
 686         * the region.  This doesn't hurt us in any way, except
 687         * to cause the region to be resync'ed when the
 688         * device is activated next time.
 689         */
 690        fe = mempool_alloc(lc->flush_entry_pool, GFP_ATOMIC);
 691        if (!fe) {
 692                DMERR("Failed to allocate memory to clear region.");
 693                return;
 694        }
 695
 696        spin_lock_irqsave(&lc->flush_lock, flags);
 697        fe->type = DM_ULOG_CLEAR_REGION;
 698        fe->region = region;
 699        list_add(&fe->list, &lc->clear_list);
 700        spin_unlock_irqrestore(&lc->flush_lock, flags);
 701
 702        return;
 703}
 704
 705/*
 706 * userspace_get_resync_work
 707 *
 708 * Get a region that needs recovery.  It is valid to return
 709 * an error for this function.
 710 *
 711 * Returns: 1 if region filled, 0 if no work, <0 on error
 712 */
 713static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
 714{
 715        int r;
 716        size_t rdata_size;
 717        struct log_c *lc = log->context;
 718        struct {
 719                int64_t i; /* 64-bit for mix arch compatibility */
 720                region_t r;
 721        } pkg;
 722
 723        if (lc->in_sync_hint >= lc->region_count)
 724                return 0;
 725
 726        rdata_size = sizeof(pkg);
 727        r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
 728                                 NULL, 0, (char *)&pkg, &rdata_size);
 729
 730        *region = pkg.r;
 731        return (r) ? r : (int)pkg.i;
 732}
 733
 734/*
 735 * userspace_set_region_sync
 736 *
 737 * Set the sync status of a given region.  This function
 738 * must not fail.
 739 */
 740static void userspace_set_region_sync(struct dm_dirty_log *log,
 741                                      region_t region, int in_sync)
 742{
 743        struct log_c *lc = log->context;
 744        struct {
 745                region_t r;
 746                int64_t i;
 747        } pkg;
 748
 749        pkg.r = region;
 750        pkg.i = (int64_t)in_sync;
 751
 752        (void) userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
 753                                    (char *)&pkg, sizeof(pkg), NULL, NULL);
 754
 755        /*
 756         * It would be nice to be able to report failures.
 757         * However, it is easy enough to detect and resolve.
 758         */
 759        return;
 760}
 761
 762/*
 763 * userspace_get_sync_count
 764 *
 765 * If there is any sort of failure when consulting the server,
 766 * we assume that the sync count is zero.
 767 *
 768 * Returns: sync count on success, 0 on failure
 769 */
 770static region_t userspace_get_sync_count(struct dm_dirty_log *log)
 771{
 772        int r;
 773        size_t rdata_size;
 774        uint64_t sync_count;
 775        struct log_c *lc = log->context;
 776
 777        rdata_size = sizeof(sync_count);
 778        r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
 779                                 NULL, 0, (char *)&sync_count, &rdata_size);
 780
 781        if (r)
 782                return 0;
 783
 784        if (sync_count >= lc->region_count)
 785                lc->in_sync_hint = lc->region_count;
 786
 787        return (region_t)sync_count;
 788}
 789
 790/*
 791 * userspace_status
 792 *
 793 * Returns: amount of space consumed
 794 */
 795static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
 796                            char *result, unsigned maxlen)
 797{
 798        int r = 0;
 799        char *table_args;
 800        size_t sz = (size_t)maxlen;
 801        struct log_c *lc = log->context;
 802
 803        switch (status_type) {
 804        case STATUSTYPE_INFO:
 805                r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
 806                                         NULL, 0, result, &sz);
 807
 808                if (r) {
 809                        sz = 0;
 810                        DMEMIT("%s 1 COM_FAILURE", log->type->name);
 811                }
 812                break;
 813        case STATUSTYPE_TABLE:
 814                sz = 0;
 815                table_args = strchr(lc->usr_argv_str, ' ');
 816                BUG_ON(!table_args); /* There will always be a ' ' */
 817                table_args++;
 818
 819                DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid);
 820                if (lc->integrated_flush)
 821                        DMEMIT("integrated_flush ");
 822                DMEMIT("%s ", table_args);
 823                break;
 824        }
 825        return (r) ? 0 : (int)sz;
 826}
 827
 828/*
 829 * userspace_is_remote_recovering
 830 *
 831 * Returns: 1 if region recovering, 0 otherwise
 832 */
 833static int userspace_is_remote_recovering(struct dm_dirty_log *log,
 834                                          region_t region)
 835{
 836        int r;
 837        uint64_t region64 = region;
 838        struct log_c *lc = log->context;
 839        static unsigned long limit;
 840        struct {
 841                int64_t is_recovering;
 842                uint64_t in_sync_hint;
 843        } pkg;
 844        size_t rdata_size = sizeof(pkg);
 845
 846        /*
 847         * Once the mirror has been reported to be in-sync,
 848         * it will never again ask for recovery work.  So,
 849         * we can safely say there is not a remote machine
 850         * recovering if the device is in-sync.  (in_sync_hint
 851         * must be reset at resume time.)
 852         */
 853        if (region < lc->in_sync_hint)
 854                return 0;
 855        else if (time_after(limit, jiffies))
 856                return 1;
 857
 858        limit = jiffies + (HZ / 4);
 859        r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
 860                                 (char *)&region64, sizeof(region64),
 861                                 (char *)&pkg, &rdata_size);
 862        if (r)
 863                return 1;
 864
 865        lc->in_sync_hint = pkg.in_sync_hint;
 866
 867        return (int)pkg.is_recovering;
 868}
 869
 870static struct dm_dirty_log_type _userspace_type = {
 871        .name = "userspace",
 872        .module = THIS_MODULE,
 873        .ctr = userspace_ctr,
 874        .dtr = userspace_dtr,
 875        .presuspend = userspace_presuspend,
 876        .postsuspend = userspace_postsuspend,
 877        .resume = userspace_resume,
 878        .get_region_size = userspace_get_region_size,
 879        .is_clean = userspace_is_clean,
 880        .in_sync = userspace_in_sync,
 881        .flush = userspace_flush,
 882        .mark_region = userspace_mark_region,
 883        .clear_region = userspace_clear_region,
 884        .get_resync_work = userspace_get_resync_work,
 885        .set_region_sync = userspace_set_region_sync,
 886        .get_sync_count = userspace_get_sync_count,
 887        .status = userspace_status,
 888        .is_remote_recovering = userspace_is_remote_recovering,
 889};
 890
 891static int __init userspace_dirty_log_init(void)
 892{
 893        int r = 0;
 894
 895        _flush_entry_cache = KMEM_CACHE(dm_dirty_log_flush_entry, 0);
 896        if (!_flush_entry_cache) {
 897                DMWARN("Unable to create flush_entry_cache: No memory.");
 898                return -ENOMEM;
 899        }
 900
 901        r = dm_ulog_tfr_init();
 902        if (r) {
 903                DMWARN("Unable to initialize userspace log communications");
 904                kmem_cache_destroy(_flush_entry_cache);
 905                return r;
 906        }
 907
 908        r = dm_dirty_log_type_register(&_userspace_type);
 909        if (r) {
 910                DMWARN("Couldn't register userspace dirty log type");
 911                dm_ulog_tfr_exit();
 912                kmem_cache_destroy(_flush_entry_cache);
 913                return r;
 914        }
 915
 916        DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
 917        return 0;
 918}
 919
 920static void __exit userspace_dirty_log_exit(void)
 921{
 922        dm_dirty_log_type_unregister(&_userspace_type);
 923        dm_ulog_tfr_exit();
 924        kmem_cache_destroy(_flush_entry_cache);
 925
 926        DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
 927        return;
 928}
 929
 930module_init(userspace_dirty_log_init);
 931module_exit(userspace_dirty_log_exit);
 932
 933MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
 934MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
 935MODULE_LICENSE("GPL");
 936