qemu/block/gluster.c
<<
>>
Prefs
   1/*
   2 * GlusterFS backend for QEMU
   3 *
   4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   7 * See the COPYING file in the top-level directory.
   8 *
   9 */
  10#include "qemu/osdep.h"
  11#include <glusterfs/api/glfs.h>
  12#include "block/block_int.h"
  13#include "qapi/error.h"
  14#include "qapi/qmp/qerror.h"
  15#include "qemu/uri.h"
  16#include "qemu/error-report.h"
  17#include "qemu/cutils.h"
  18
  19#define GLUSTER_OPT_FILENAME        "filename"
  20#define GLUSTER_OPT_VOLUME          "volume"
  21#define GLUSTER_OPT_PATH            "path"
  22#define GLUSTER_OPT_TYPE            "type"
  23#define GLUSTER_OPT_SERVER_PATTERN  "server."
  24#define GLUSTER_OPT_HOST            "host"
  25#define GLUSTER_OPT_PORT            "port"
  26#define GLUSTER_OPT_TO              "to"
  27#define GLUSTER_OPT_IPV4            "ipv4"
  28#define GLUSTER_OPT_IPV6            "ipv6"
  29#define GLUSTER_OPT_SOCKET          "socket"
  30#define GLUSTER_OPT_DEBUG           "debug"
  31#define GLUSTER_DEFAULT_PORT        24007
  32#define GLUSTER_DEBUG_DEFAULT       4
  33#define GLUSTER_DEBUG_MAX           9
  34#define GLUSTER_OPT_LOGFILE         "logfile"
  35#define GLUSTER_LOGFILE_DEFAULT     "-" /* handled in libgfapi as /dev/stderr */
  36
  37#define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n"
  38
  39typedef struct GlusterAIOCB {
  40    int64_t size;
  41    int ret;
  42    Coroutine *coroutine;
  43    AioContext *aio_context;
  44} GlusterAIOCB;
  45
  46typedef struct BDRVGlusterState {
  47    struct glfs *glfs;
  48    struct glfs_fd *fd;
  49    char *logfile;
  50    bool supports_seek_data;
  51    int debug;
  52} BDRVGlusterState;
  53
  54typedef struct BDRVGlusterReopenState {
  55    struct glfs *glfs;
  56    struct glfs_fd *fd;
  57} BDRVGlusterReopenState;
  58
  59
  60typedef struct GlfsPreopened {
  61    char *volume;
  62    glfs_t *fs;
  63    int ref;
  64} GlfsPreopened;
  65
  66typedef struct ListElement {
  67    QLIST_ENTRY(ListElement) list;
  68    GlfsPreopened saved;
  69} ListElement;
  70
  71static QLIST_HEAD(glfs_list, ListElement) glfs_list;
  72
  73static QemuOptsList qemu_gluster_create_opts = {
  74    .name = "qemu-gluster-create-opts",
  75    .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
  76    .desc = {
  77        {
  78            .name = BLOCK_OPT_SIZE,
  79            .type = QEMU_OPT_SIZE,
  80            .help = "Virtual disk size"
  81        },
  82        {
  83            .name = BLOCK_OPT_PREALLOC,
  84            .type = QEMU_OPT_STRING,
  85            .help = "Preallocation mode (allowed values: off, full)"
  86        },
  87        {
  88            .name = GLUSTER_OPT_DEBUG,
  89            .type = QEMU_OPT_NUMBER,
  90            .help = "Gluster log level, valid range is 0-9",
  91        },
  92        {
  93            .name = GLUSTER_OPT_LOGFILE,
  94            .type = QEMU_OPT_STRING,
  95            .help = "Logfile path of libgfapi",
  96        },
  97        { /* end of list */ }
  98    }
  99};
 100
 101static QemuOptsList runtime_opts = {
 102    .name = "gluster",
 103    .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
 104    .desc = {
 105        {
 106            .name = GLUSTER_OPT_FILENAME,
 107            .type = QEMU_OPT_STRING,
 108            .help = "URL to the gluster image",
 109        },
 110        {
 111            .name = GLUSTER_OPT_DEBUG,
 112            .type = QEMU_OPT_NUMBER,
 113            .help = "Gluster log level, valid range is 0-9",
 114        },
 115        {
 116            .name = GLUSTER_OPT_LOGFILE,
 117            .type = QEMU_OPT_STRING,
 118            .help = "Logfile path of libgfapi",
 119        },
 120        { /* end of list */ }
 121    },
 122};
 123
 124static QemuOptsList runtime_json_opts = {
 125    .name = "gluster_json",
 126    .head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
 127    .desc = {
 128        {
 129            .name = GLUSTER_OPT_VOLUME,
 130            .type = QEMU_OPT_STRING,
 131            .help = "name of gluster volume where VM image resides",
 132        },
 133        {
 134            .name = GLUSTER_OPT_PATH,
 135            .type = QEMU_OPT_STRING,
 136            .help = "absolute path to image file in gluster volume",
 137        },
 138        {
 139            .name = GLUSTER_OPT_DEBUG,
 140            .type = QEMU_OPT_NUMBER,
 141            .help = "Gluster log level, valid range is 0-9",
 142        },
 143        { /* end of list */ }
 144    },
 145};
 146
 147static QemuOptsList runtime_type_opts = {
 148    .name = "gluster_type",
 149    .head = QTAILQ_HEAD_INITIALIZER(runtime_type_opts.head),
 150    .desc = {
 151        {
 152            .name = GLUSTER_OPT_TYPE,
 153            .type = QEMU_OPT_STRING,
 154            .help = "tcp|unix",
 155        },
 156        { /* end of list */ }
 157    },
 158};
 159
 160static QemuOptsList runtime_unix_opts = {
 161    .name = "gluster_unix",
 162    .head = QTAILQ_HEAD_INITIALIZER(runtime_unix_opts.head),
 163    .desc = {
 164        {
 165            .name = GLUSTER_OPT_SOCKET,
 166            .type = QEMU_OPT_STRING,
 167            .help = "socket file path)",
 168        },
 169        { /* end of list */ }
 170    },
 171};
 172
 173static QemuOptsList runtime_tcp_opts = {
 174    .name = "gluster_tcp",
 175    .head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head),
 176    .desc = {
 177        {
 178            .name = GLUSTER_OPT_TYPE,
 179            .type = QEMU_OPT_STRING,
 180            .help = "tcp|unix",
 181        },
 182        {
 183            .name = GLUSTER_OPT_HOST,
 184            .type = QEMU_OPT_STRING,
 185            .help = "host address (hostname/ipv4/ipv6 addresses)",
 186        },
 187        {
 188            .name = GLUSTER_OPT_PORT,
 189            .type = QEMU_OPT_STRING,
 190            .help = "port number on which glusterd is listening (default 24007)",
 191        },
 192        {
 193            .name = "to",
 194            .type = QEMU_OPT_NUMBER,
 195            .help = "max port number, not supported by gluster",
 196        },
 197        {
 198            .name = "ipv4",
 199            .type = QEMU_OPT_BOOL,
 200            .help = "ipv4 bool value, not supported by gluster",
 201        },
 202        {
 203            .name = "ipv6",
 204            .type = QEMU_OPT_BOOL,
 205            .help = "ipv6 bool value, not supported by gluster",
 206        },
 207        { /* end of list */ }
 208    },
 209};
 210
 211static void glfs_set_preopened(const char *volume, glfs_t *fs)
 212{
 213    ListElement *entry = NULL;
 214
 215    entry = g_new(ListElement, 1);
 216
 217    entry->saved.volume = g_strdup(volume);
 218
 219    entry->saved.fs = fs;
 220    entry->saved.ref = 1;
 221
 222    QLIST_INSERT_HEAD(&glfs_list, entry, list);
 223}
 224
 225static glfs_t *glfs_find_preopened(const char *volume)
 226{
 227    ListElement *entry = NULL;
 228
 229     QLIST_FOREACH(entry, &glfs_list, list) {
 230        if (strcmp(entry->saved.volume, volume) == 0) {
 231            entry->saved.ref++;
 232            return entry->saved.fs;
 233        }
 234     }
 235
 236    return NULL;
 237}
 238
 239static void glfs_clear_preopened(glfs_t *fs)
 240{
 241    ListElement *entry = NULL;
 242    ListElement *next;
 243
 244    if (fs == NULL) {
 245        return;
 246    }
 247
 248    QLIST_FOREACH_SAFE(entry, &glfs_list, list, next) {
 249        if (entry->saved.fs == fs) {
 250            if (--entry->saved.ref) {
 251                return;
 252            }
 253
 254            QLIST_REMOVE(entry, list);
 255
 256            glfs_fini(entry->saved.fs);
 257            g_free(entry->saved.volume);
 258            g_free(entry);
 259        }
 260    }
 261}
 262
 263static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 264{
 265    char *p, *q;
 266
 267    if (!path) {
 268        return -EINVAL;
 269    }
 270
 271    /* volume */
 272    p = q = path + strspn(path, "/");
 273    p += strcspn(p, "/");
 274    if (*p == '\0') {
 275        return -EINVAL;
 276    }
 277    gconf->volume = g_strndup(q, p - q);
 278
 279    /* path */
 280    p += strspn(p, "/");
 281    if (*p == '\0') {
 282        return -EINVAL;
 283    }
 284    gconf->path = g_strdup(p);
 285    return 0;
 286}
 287
 288/*
 289 * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
 290 *
 291 * 'gluster' is the protocol.
 292 *
 293 * 'transport' specifies the transport type used to connect to gluster
 294 * management daemon (glusterd). Valid transport types are
 295 * tcp or unix. If a transport type isn't specified, then tcp type is assumed.
 296 *
 297 * 'host' specifies the host where the volume file specification for
 298 * the given volume resides. This can be either hostname or ipv4 address.
 299 * If transport type is 'unix', then 'host' field should not be specified.
 300 * The 'socket' field needs to be populated with the path to unix domain
 301 * socket.
 302 *
 303 * 'port' is the port number on which glusterd is listening. This is optional
 304 * and if not specified, QEMU will send 0 which will make gluster to use the
 305 * default port. If the transport type is unix, then 'port' should not be
 306 * specified.
 307 *
 308 * 'volume' is the name of the gluster volume which contains the VM image.
 309 *
 310 * 'path' is the path to the actual VM image that resides on gluster volume.
 311 *
 312 * Examples:
 313 *
 314 * file=gluster://1.2.3.4/testvol/a.img
 315 * file=gluster+tcp://1.2.3.4/testvol/a.img
 316 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
 317 * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
 318 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
 319 */
 320static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf,
 321                                  const char *filename)
 322{
 323    GlusterServer *gsconf;
 324    URI *uri;
 325    QueryParams *qp = NULL;
 326    bool is_unix = false;
 327    int ret = 0;
 328
 329    uri = uri_parse(filename);
 330    if (!uri) {
 331        return -EINVAL;
 332    }
 333
 334    gconf->server = g_new0(GlusterServerList, 1);
 335    gconf->server->value = gsconf = g_new0(GlusterServer, 1);
 336
 337    /* transport */
 338    if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
 339        gsconf->type = GLUSTER_TRANSPORT_TCP;
 340    } else if (!strcmp(uri->scheme, "gluster+tcp")) {
 341        gsconf->type = GLUSTER_TRANSPORT_TCP;
 342    } else if (!strcmp(uri->scheme, "gluster+unix")) {
 343        gsconf->type = GLUSTER_TRANSPORT_UNIX;
 344        is_unix = true;
 345    } else if (!strcmp(uri->scheme, "gluster+rdma")) {
 346        gsconf->type = GLUSTER_TRANSPORT_TCP;
 347        error_report("Warning: rdma feature is not supported, falling "
 348                     "back to tcp");
 349    } else {
 350        ret = -EINVAL;
 351        goto out;
 352    }
 353
 354    ret = parse_volume_options(gconf, uri->path);
 355    if (ret < 0) {
 356        goto out;
 357    }
 358
 359    qp = query_params_parse(uri->query);
 360    if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
 361        ret = -EINVAL;
 362        goto out;
 363    }
 364
 365    if (is_unix) {
 366        if (uri->server || uri->port) {
 367            ret = -EINVAL;
 368            goto out;
 369        }
 370        if (strcmp(qp->p[0].name, "socket")) {
 371            ret = -EINVAL;
 372            goto out;
 373        }
 374        gsconf->u.q_unix.path = g_strdup(qp->p[0].value);
 375    } else {
 376        gsconf->u.tcp.host = g_strdup(uri->server ? uri->server : "localhost");
 377        if (uri->port) {
 378            gsconf->u.tcp.port = g_strdup_printf("%d", uri->port);
 379        } else {
 380            gsconf->u.tcp.port = g_strdup_printf("%d", GLUSTER_DEFAULT_PORT);
 381        }
 382    }
 383
 384out:
 385    if (qp) {
 386        query_params_free(qp);
 387    }
 388    uri_free(uri);
 389    return ret;
 390}
 391
 392static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 393                                           Error **errp)
 394{
 395    struct glfs *glfs;
 396    int ret;
 397    int old_errno;
 398    GlusterServerList *server;
 399    unsigned long long port;
 400
 401    glfs = glfs_find_preopened(gconf->volume);
 402    if (glfs) {
 403        return glfs;
 404    }
 405
 406    glfs = glfs_new(gconf->volume);
 407    if (!glfs) {
 408        goto out;
 409    }
 410
 411    glfs_set_preopened(gconf->volume, glfs);
 412
 413    for (server = gconf->server; server; server = server->next) {
 414        if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
 415            ret = glfs_set_volfile_server(glfs,
 416                                   GlusterTransport_lookup[server->value->type],
 417                                   server->value->u.q_unix.path, 0);
 418        } else {
 419            if (parse_uint_full(server->value->u.tcp.port, &port, 10) < 0 ||
 420                port > 65535) {
 421                error_setg(errp, "'%s' is not a valid port number",
 422                           server->value->u.tcp.port);
 423                errno = EINVAL;
 424                goto out;
 425            }
 426            ret = glfs_set_volfile_server(glfs,
 427                                   GlusterTransport_lookup[server->value->type],
 428                                   server->value->u.tcp.host,
 429                                   (int)port);
 430        }
 431
 432        if (ret < 0) {
 433            goto out;
 434        }
 435    }
 436
 437    ret = glfs_set_logging(glfs, gconf->logfile, gconf->debug);
 438    if (ret < 0) {
 439        goto out;
 440    }
 441
 442    ret = glfs_init(glfs);
 443    if (ret) {
 444        error_setg(errp, "Gluster connection for volume %s, path %s failed"
 445                         " to connect", gconf->volume, gconf->path);
 446        for (server = gconf->server; server; server = server->next) {
 447            if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
 448                error_append_hint(errp, "hint: failed on socket %s ",
 449                                  server->value->u.q_unix.path);
 450            } else {
 451                error_append_hint(errp, "hint: failed on host %s and port %s ",
 452                                  server->value->u.tcp.host,
 453                                  server->value->u.tcp.port);
 454            }
 455        }
 456
 457        error_append_hint(errp, "Please refer to gluster logs for more info\n");
 458
 459        /* glfs_init sometimes doesn't set errno although docs suggest that */
 460        if (errno == 0) {
 461            errno = EINVAL;
 462        }
 463
 464        goto out;
 465    }
 466    return glfs;
 467
 468out:
 469    if (glfs) {
 470        old_errno = errno;
 471        glfs_clear_preopened(glfs);
 472        errno = old_errno;
 473    }
 474    return NULL;
 475}
 476
 477static int qapi_enum_parse(const char *opt)
 478{
 479    int i;
 480
 481    if (!opt) {
 482        return GLUSTER_TRANSPORT__MAX;
 483    }
 484
 485    for (i = 0; i < GLUSTER_TRANSPORT__MAX; i++) {
 486        if (!strcmp(opt, GlusterTransport_lookup[i])) {
 487            return i;
 488        }
 489    }
 490
 491    return i;
 492}
 493
 494/*
 495 * Convert the json formatted command line into qapi.
 496*/
 497static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
 498                                  QDict *options, Error **errp)
 499{
 500    QemuOpts *opts;
 501    GlusterServer *gsconf;
 502    GlusterServerList *curr = NULL;
 503    QDict *backing_options = NULL;
 504    Error *local_err = NULL;
 505    char *str = NULL;
 506    const char *ptr;
 507    size_t num_servers;
 508    int i;
 509
 510    /* create opts info from runtime_json_opts list */
 511    opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort);
 512    qemu_opts_absorb_qdict(opts, options, &local_err);
 513    if (local_err) {
 514        goto out;
 515    }
 516
 517    num_servers = qdict_array_entries(options, GLUSTER_OPT_SERVER_PATTERN);
 518    if (num_servers < 1) {
 519        error_setg(&local_err, QERR_MISSING_PARAMETER, "server");
 520        goto out;
 521    }
 522
 523    ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME);
 524    if (!ptr) {
 525        error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_VOLUME);
 526        goto out;
 527    }
 528    gconf->volume = g_strdup(ptr);
 529
 530    ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH);
 531    if (!ptr) {
 532        error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_PATH);
 533        goto out;
 534    }
 535    gconf->path = g_strdup(ptr);
 536    qemu_opts_del(opts);
 537
 538    for (i = 0; i < num_servers; i++) {
 539        str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i);
 540        qdict_extract_subqdict(options, &backing_options, str);
 541
 542        /* create opts info from runtime_type_opts list */
 543        opts = qemu_opts_create(&runtime_type_opts, NULL, 0, &error_abort);
 544        qemu_opts_absorb_qdict(opts, backing_options, &local_err);
 545        if (local_err) {
 546            goto out;
 547        }
 548
 549        ptr = qemu_opt_get(opts, GLUSTER_OPT_TYPE);
 550        gsconf = g_new0(GlusterServer, 1);
 551        gsconf->type = qapi_enum_parse(ptr);
 552        if (!ptr) {
 553            error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_TYPE);
 554            error_append_hint(&local_err, GERR_INDEX_HINT, i);
 555            goto out;
 556
 557        }
 558        if (gsconf->type == GLUSTER_TRANSPORT__MAX) {
 559            error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE,
 560                       GLUSTER_OPT_TYPE, "tcp or unix");
 561            error_append_hint(&local_err, GERR_INDEX_HINT, i);
 562            goto out;
 563        }
 564        qemu_opts_del(opts);
 565
 566        if (gsconf->type == GLUSTER_TRANSPORT_TCP) {
 567            /* create opts info from runtime_tcp_opts list */
 568            opts = qemu_opts_create(&runtime_tcp_opts, NULL, 0, &error_abort);
 569            qemu_opts_absorb_qdict(opts, backing_options, &local_err);
 570            if (local_err) {
 571                goto out;
 572            }
 573
 574            ptr = qemu_opt_get(opts, GLUSTER_OPT_HOST);
 575            if (!ptr) {
 576                error_setg(&local_err, QERR_MISSING_PARAMETER,
 577                           GLUSTER_OPT_HOST);
 578                error_append_hint(&local_err, GERR_INDEX_HINT, i);
 579                goto out;
 580            }
 581            gsconf->u.tcp.host = g_strdup(ptr);
 582            ptr = qemu_opt_get(opts, GLUSTER_OPT_PORT);
 583            if (!ptr) {
 584                error_setg(&local_err, QERR_MISSING_PARAMETER,
 585                           GLUSTER_OPT_PORT);
 586                error_append_hint(&local_err, GERR_INDEX_HINT, i);
 587                goto out;
 588            }
 589            gsconf->u.tcp.port = g_strdup(ptr);
 590
 591            /* defend for unsupported fields in InetSocketAddress,
 592             * i.e. @ipv4, @ipv6  and @to
 593             */
 594            ptr = qemu_opt_get(opts, GLUSTER_OPT_TO);
 595            if (ptr) {
 596                gsconf->u.tcp.has_to = true;
 597            }
 598            ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV4);
 599            if (ptr) {
 600                gsconf->u.tcp.has_ipv4 = true;
 601            }
 602            ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV6);
 603            if (ptr) {
 604                gsconf->u.tcp.has_ipv6 = true;
 605            }
 606            if (gsconf->u.tcp.has_to) {
 607                error_setg(&local_err, "Parameter 'to' not supported");
 608                goto out;
 609            }
 610            if (gsconf->u.tcp.has_ipv4 || gsconf->u.tcp.has_ipv6) {
 611                error_setg(&local_err, "Parameters 'ipv4/ipv6' not supported");
 612                goto out;
 613            }
 614            qemu_opts_del(opts);
 615        } else {
 616            /* create opts info from runtime_unix_opts list */
 617            opts = qemu_opts_create(&runtime_unix_opts, NULL, 0, &error_abort);
 618            qemu_opts_absorb_qdict(opts, backing_options, &local_err);
 619            if (local_err) {
 620                goto out;
 621            }
 622
 623            ptr = qemu_opt_get(opts, GLUSTER_OPT_SOCKET);
 624            if (!ptr) {
 625                error_setg(&local_err, QERR_MISSING_PARAMETER,
 626                           GLUSTER_OPT_SOCKET);
 627                error_append_hint(&local_err, GERR_INDEX_HINT, i);
 628                goto out;
 629            }
 630            gsconf->u.q_unix.path = g_strdup(ptr);
 631            qemu_opts_del(opts);
 632        }
 633
 634        if (gconf->server == NULL) {
 635            gconf->server = g_new0(GlusterServerList, 1);
 636            gconf->server->value = gsconf;
 637            curr = gconf->server;
 638        } else {
 639            curr->next = g_new0(GlusterServerList, 1);
 640            curr->next->value = gsconf;
 641            curr = curr->next;
 642        }
 643
 644        qdict_del(backing_options, str);
 645        g_free(str);
 646        str = NULL;
 647    }
 648
 649    return 0;
 650
 651out:
 652    error_propagate(errp, local_err);
 653    qemu_opts_del(opts);
 654    if (str) {
 655        qdict_del(backing_options, str);
 656        g_free(str);
 657    }
 658    errno = EINVAL;
 659    return -errno;
 660}
 661
 662static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf,
 663                                      const char *filename,
 664                                      QDict *options, Error **errp)
 665{
 666    int ret;
 667    if (filename) {
 668        ret = qemu_gluster_parse_uri(gconf, filename);
 669        if (ret < 0) {
 670            error_setg(errp, "invalid URI");
 671            error_append_hint(errp, "Usage: file=gluster[+transport]://"
 672                                    "[host[:port]]volume/path[?socket=...]"
 673                                    "[,file.debug=N]"
 674                                    "[,file.logfile=/path/filename.log]\n");
 675            errno = -ret;
 676            return NULL;
 677        }
 678    } else {
 679        ret = qemu_gluster_parse_json(gconf, options, errp);
 680        if (ret < 0) {
 681            error_append_hint(errp, "Usage: "
 682                             "-drive driver=qcow2,file.driver=gluster,"
 683                             "file.volume=testvol,file.path=/path/a.qcow2"
 684                             "[,file.debug=9]"
 685                             "[,file.logfile=/path/filename.log],"
 686                             "file.server.0.type=tcp,"
 687                             "file.server.0.host=1.2.3.4,"
 688                             "file.server.0.port=24007,"
 689                             "file.server.1.transport=unix,"
 690                             "file.server.1.socket=/var/run/glusterd.socket ..."
 691                             "\n");
 692            errno = -ret;
 693            return NULL;
 694        }
 695
 696    }
 697
 698    return qemu_gluster_glfs_init(gconf, errp);
 699}
 700
 701static void qemu_gluster_complete_aio(void *opaque)
 702{
 703    GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
 704
 705    qemu_coroutine_enter(acb->coroutine);
 706}
 707
 708/*
 709 * AIO callback routine called from GlusterFS thread.
 710 */
 711static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
 712{
 713    GlusterAIOCB *acb = (GlusterAIOCB *)arg;
 714
 715    if (!ret || ret == acb->size) {
 716        acb->ret = 0; /* Success */
 717    } else if (ret < 0) {
 718        acb->ret = -errno; /* Read/Write failed */
 719    } else {
 720        acb->ret = -EIO; /* Partial read/write - fail it */
 721    }
 722
 723    aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb);
 724}
 725
 726static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 727{
 728    assert(open_flags != NULL);
 729
 730    *open_flags |= O_BINARY;
 731
 732    if (bdrv_flags & BDRV_O_RDWR) {
 733        *open_flags |= O_RDWR;
 734    } else {
 735        *open_flags |= O_RDONLY;
 736    }
 737
 738    if ((bdrv_flags & BDRV_O_NOCACHE)) {
 739        *open_flags |= O_DIRECT;
 740    }
 741}
 742
 743/*
 744 * Do SEEK_DATA/HOLE to detect if it is functional. Older broken versions of
 745 * gfapi incorrectly return the current offset when SEEK_DATA/HOLE is used.
 746 * - Corrected versions return -1 and set errno to EINVAL.
 747 * - Versions that support SEEK_DATA/HOLE correctly, will return -1 and set
 748 *   errno to ENXIO when SEEK_DATA is called with a position of EOF.
 749 */
 750static bool qemu_gluster_test_seek(struct glfs_fd *fd)
 751{
 752    off_t ret = 0;
 753
 754#if defined SEEK_HOLE && defined SEEK_DATA
 755    off_t eof;
 756
 757    eof = glfs_lseek(fd, 0, SEEK_END);
 758    if (eof < 0) {
 759        /* this should never occur */
 760        return false;
 761    }
 762
 763    /* this should always fail with ENXIO if SEEK_DATA is supported */
 764    ret = glfs_lseek(fd, eof, SEEK_DATA);
 765#endif
 766
 767    return (ret < 0) && (errno == ENXIO);
 768}
 769
 770static int qemu_gluster_open(BlockDriverState *bs,  QDict *options,
 771                             int bdrv_flags, Error **errp)
 772{
 773    BDRVGlusterState *s = bs->opaque;
 774    int open_flags = 0;
 775    int ret = 0;
 776    BlockdevOptionsGluster *gconf = NULL;
 777    QemuOpts *opts;
 778    Error *local_err = NULL;
 779    const char *filename, *logfile;
 780
 781    opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
 782    qemu_opts_absorb_qdict(opts, options, &local_err);
 783    if (local_err) {
 784        error_propagate(errp, local_err);
 785        ret = -EINVAL;
 786        goto out;
 787    }
 788
 789    filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME);
 790
 791    s->debug = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG,
 792                                   GLUSTER_DEBUG_DEFAULT);
 793    if (s->debug < 0) {
 794        s->debug = 0;
 795    } else if (s->debug > GLUSTER_DEBUG_MAX) {
 796        s->debug = GLUSTER_DEBUG_MAX;
 797    }
 798
 799    gconf = g_new0(BlockdevOptionsGluster, 1);
 800    gconf->debug = s->debug;
 801    gconf->has_debug = true;
 802
 803    logfile = qemu_opt_get(opts, GLUSTER_OPT_LOGFILE);
 804    s->logfile = g_strdup(logfile ? logfile : GLUSTER_LOGFILE_DEFAULT);
 805
 806    gconf->logfile = g_strdup(s->logfile);
 807    gconf->has_logfile = true;
 808
 809    s->glfs = qemu_gluster_init(gconf, filename, options, errp);
 810    if (!s->glfs) {
 811        ret = -errno;
 812        goto out;
 813    }
 814
 815#ifdef CONFIG_GLUSTERFS_XLATOR_OPT
 816    /* Without this, if fsync fails for a recoverable reason (for instance,
 817     * ENOSPC), gluster will dump its cache, preventing retries.  This means
 818     * almost certain data loss.  Not all gluster versions support the
 819     * 'resync-failed-syncs-after-fsync' key value, but there is no way to
 820     * discover during runtime if it is supported (this api returns success for
 821     * unknown key/value pairs) */
 822    ret = glfs_set_xlator_option(s->glfs, "*-write-behind",
 823                                          "resync-failed-syncs-after-fsync",
 824                                          "on");
 825    if (ret < 0) {
 826        error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
 827        ret = -errno;
 828        goto out;
 829    }
 830#endif
 831
 832    qemu_gluster_parse_flags(bdrv_flags, &open_flags);
 833
 834    s->fd = glfs_open(s->glfs, gconf->path, open_flags);
 835    if (!s->fd) {
 836        ret = -errno;
 837    }
 838
 839    s->supports_seek_data = qemu_gluster_test_seek(s->fd);
 840
 841out:
 842    qemu_opts_del(opts);
 843    qapi_free_BlockdevOptionsGluster(gconf);
 844    if (!ret) {
 845        return ret;
 846    }
 847    g_free(s->logfile);
 848    if (s->fd) {
 849        glfs_close(s->fd);
 850    }
 851
 852    glfs_clear_preopened(s->glfs);
 853
 854    return ret;
 855}
 856
 857static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
 858                                       BlockReopenQueue *queue, Error **errp)
 859{
 860    int ret = 0;
 861    BDRVGlusterState *s;
 862    BDRVGlusterReopenState *reop_s;
 863    BlockdevOptionsGluster *gconf;
 864    int open_flags = 0;
 865
 866    assert(state != NULL);
 867    assert(state->bs != NULL);
 868
 869    s = state->bs->opaque;
 870
 871    state->opaque = g_new0(BDRVGlusterReopenState, 1);
 872    reop_s = state->opaque;
 873
 874    qemu_gluster_parse_flags(state->flags, &open_flags);
 875
 876    gconf = g_new0(BlockdevOptionsGluster, 1);
 877    gconf->debug = s->debug;
 878    gconf->has_debug = true;
 879    gconf->logfile = g_strdup(s->logfile);
 880    gconf->has_logfile = true;
 881    reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, NULL, errp);
 882    if (reop_s->glfs == NULL) {
 883        ret = -errno;
 884        goto exit;
 885    }
 886
 887#ifdef CONFIG_GLUSTERFS_XLATOR_OPT
 888    ret = glfs_set_xlator_option(reop_s->glfs, "*-write-behind",
 889                                 "resync-failed-syncs-after-fsync", "on");
 890    if (ret < 0) {
 891        error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
 892        ret = -errno;
 893        goto exit;
 894    }
 895#endif
 896
 897    reop_s->fd = glfs_open(reop_s->glfs, gconf->path, open_flags);
 898    if (reop_s->fd == NULL) {
 899        /* reops->glfs will be cleaned up in _abort */
 900        ret = -errno;
 901        goto exit;
 902    }
 903
 904exit:
 905    /* state->opaque will be freed in either the _abort or _commit */
 906    qapi_free_BlockdevOptionsGluster(gconf);
 907    return ret;
 908}
 909
 910static void qemu_gluster_reopen_commit(BDRVReopenState *state)
 911{
 912    BDRVGlusterReopenState *reop_s = state->opaque;
 913    BDRVGlusterState *s = state->bs->opaque;
 914
 915
 916    /* close the old */
 917    if (s->fd) {
 918        glfs_close(s->fd);
 919    }
 920
 921    glfs_clear_preopened(s->glfs);
 922
 923    /* use the newly opened image / connection */
 924    s->fd         = reop_s->fd;
 925    s->glfs       = reop_s->glfs;
 926
 927    g_free(state->opaque);
 928    state->opaque = NULL;
 929
 930    return;
 931}
 932
 933
 934static void qemu_gluster_reopen_abort(BDRVReopenState *state)
 935{
 936    BDRVGlusterReopenState *reop_s = state->opaque;
 937
 938    if (reop_s == NULL) {
 939        return;
 940    }
 941
 942    if (reop_s->fd) {
 943        glfs_close(reop_s->fd);
 944    }
 945
 946    glfs_clear_preopened(reop_s->glfs);
 947
 948    g_free(state->opaque);
 949    state->opaque = NULL;
 950
 951    return;
 952}
 953
 954#ifdef CONFIG_GLUSTERFS_ZEROFILL
 955static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
 956                                                      int64_t offset,
 957                                                      int size,
 958                                                      BdrvRequestFlags flags)
 959{
 960    int ret;
 961    GlusterAIOCB acb;
 962    BDRVGlusterState *s = bs->opaque;
 963
 964    acb.size = size;
 965    acb.ret = 0;
 966    acb.coroutine = qemu_coroutine_self();
 967    acb.aio_context = bdrv_get_aio_context(bs);
 968
 969    ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
 970    if (ret < 0) {
 971        return -errno;
 972    }
 973
 974    qemu_coroutine_yield();
 975    return acb.ret;
 976}
 977
 978static inline bool gluster_supports_zerofill(void)
 979{
 980    return 1;
 981}
 982
 983static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
 984                                        int64_t size)
 985{
 986    return glfs_zerofill(fd, offset, size);
 987}
 988
 989#else
 990static inline bool gluster_supports_zerofill(void)
 991{
 992    return 0;
 993}
 994
 995static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
 996                                        int64_t size)
 997{
 998    return 0;
 999}
1000#endif
1001
1002static int qemu_gluster_create(const char *filename,
1003                               QemuOpts *opts, Error **errp)
1004{
1005    BlockdevOptionsGluster *gconf;
1006    struct glfs *glfs;
1007    struct glfs_fd *fd;
1008    int ret = 0;
1009    int prealloc = 0;
1010    int64_t total_size = 0;
1011    char *tmp = NULL;
1012
1013    gconf = g_new0(BlockdevOptionsGluster, 1);
1014    gconf->debug = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG,
1015                                           GLUSTER_DEBUG_DEFAULT);
1016    if (gconf->debug < 0) {
1017        gconf->debug = 0;
1018    } else if (gconf->debug > GLUSTER_DEBUG_MAX) {
1019        gconf->debug = GLUSTER_DEBUG_MAX;
1020    }
1021    gconf->has_debug = true;
1022
1023    gconf->logfile = qemu_opt_get_del(opts, GLUSTER_OPT_LOGFILE);
1024    if (!gconf->logfile) {
1025        gconf->logfile = g_strdup(GLUSTER_LOGFILE_DEFAULT);
1026    }
1027    gconf->has_logfile = true;
1028
1029    glfs = qemu_gluster_init(gconf, filename, NULL, errp);
1030    if (!glfs) {
1031        ret = -errno;
1032        goto out;
1033    }
1034
1035    total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
1036                          BDRV_SECTOR_SIZE);
1037
1038    tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
1039    if (!tmp || !strcmp(tmp, "off")) {
1040        prealloc = 0;
1041    } else if (!strcmp(tmp, "full") && gluster_supports_zerofill()) {
1042        prealloc = 1;
1043    } else {
1044        error_setg(errp, "Invalid preallocation mode: '%s'"
1045                         " or GlusterFS doesn't support zerofill API", tmp);
1046        ret = -EINVAL;
1047        goto out;
1048    }
1049
1050    fd = glfs_creat(glfs, gconf->path,
1051                    O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
1052    if (!fd) {
1053        ret = -errno;
1054    } else {
1055        if (!glfs_ftruncate(fd, total_size)) {
1056            if (prealloc && qemu_gluster_zerofill(fd, 0, total_size)) {
1057                ret = -errno;
1058            }
1059        } else {
1060            ret = -errno;
1061        }
1062
1063        if (glfs_close(fd) != 0) {
1064            ret = -errno;
1065        }
1066    }
1067out:
1068    g_free(tmp);
1069    qapi_free_BlockdevOptionsGluster(gconf);
1070    glfs_clear_preopened(glfs);
1071    return ret;
1072}
1073
1074static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
1075                                           int64_t sector_num, int nb_sectors,
1076                                           QEMUIOVector *qiov, int write)
1077{
1078    int ret;
1079    GlusterAIOCB acb;
1080    BDRVGlusterState *s = bs->opaque;
1081    size_t size = nb_sectors * BDRV_SECTOR_SIZE;
1082    off_t offset = sector_num * BDRV_SECTOR_SIZE;
1083
1084    acb.size = size;
1085    acb.ret = 0;
1086    acb.coroutine = qemu_coroutine_self();
1087    acb.aio_context = bdrv_get_aio_context(bs);
1088
1089    if (write) {
1090        ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
1091                                 gluster_finish_aiocb, &acb);
1092    } else {
1093        ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
1094                                gluster_finish_aiocb, &acb);
1095    }
1096
1097    if (ret < 0) {
1098        return -errno;
1099    }
1100
1101    qemu_coroutine_yield();
1102    return acb.ret;
1103}
1104
1105static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
1106{
1107    int ret;
1108    BDRVGlusterState *s = bs->opaque;
1109
1110    ret = glfs_ftruncate(s->fd, offset);
1111    if (ret < 0) {
1112        return -errno;
1113    }
1114
1115    return 0;
1116}
1117
1118static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
1119                                              int64_t sector_num,
1120                                              int nb_sectors,
1121                                              QEMUIOVector *qiov)
1122{
1123    return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
1124}
1125
1126static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
1127                                               int64_t sector_num,
1128                                               int nb_sectors,
1129                                               QEMUIOVector *qiov)
1130{
1131    return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
1132}
1133
1134static void qemu_gluster_close(BlockDriverState *bs)
1135{
1136    BDRVGlusterState *s = bs->opaque;
1137
1138    g_free(s->logfile);
1139    if (s->fd) {
1140        glfs_close(s->fd);
1141        s->fd = NULL;
1142    }
1143    glfs_clear_preopened(s->glfs);
1144}
1145
1146static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
1147{
1148    int ret;
1149    GlusterAIOCB acb;
1150    BDRVGlusterState *s = bs->opaque;
1151
1152    acb.size = 0;
1153    acb.ret = 0;
1154    acb.coroutine = qemu_coroutine_self();
1155    acb.aio_context = bdrv_get_aio_context(bs);
1156
1157    ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
1158    if (ret < 0) {
1159        ret = -errno;
1160        goto error;
1161    }
1162
1163    qemu_coroutine_yield();
1164    if (acb.ret < 0) {
1165        ret = acb.ret;
1166        goto error;
1167    }
1168
1169    return acb.ret;
1170
1171error:
1172    /* Some versions of Gluster (3.5.6 -> 3.5.8?) will not retain its cache
1173     * after a fsync failure, so we have no way of allowing the guest to safely
1174     * continue.  Gluster versions prior to 3.5.6 don't retain the cache
1175     * either, but will invalidate the fd on error, so this is again our only
1176     * option.
1177     *
1178     * The 'resync-failed-syncs-after-fsync' xlator option for the
1179     * write-behind cache will cause later gluster versions to retain its
1180     * cache after error, so long as the fd remains open.  However, we
1181     * currently have no way of knowing if this option is supported.
1182     *
1183     * TODO: Once gluster provides a way for us to determine if the option
1184     * is supported, bypass the closure and setting drv to NULL.  */
1185    qemu_gluster_close(bs);
1186    bs->drv = NULL;
1187    return ret;
1188}
1189
1190#ifdef CONFIG_GLUSTERFS_DISCARD
1191static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
1192                                                 int64_t offset, int size)
1193{
1194    int ret;
1195    GlusterAIOCB acb;
1196    BDRVGlusterState *s = bs->opaque;
1197
1198    acb.size = 0;
1199    acb.ret = 0;
1200    acb.coroutine = qemu_coroutine_self();
1201    acb.aio_context = bdrv_get_aio_context(bs);
1202
1203    ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
1204    if (ret < 0) {
1205        return -errno;
1206    }
1207
1208    qemu_coroutine_yield();
1209    return acb.ret;
1210}
1211#endif
1212
1213static int64_t qemu_gluster_getlength(BlockDriverState *bs)
1214{
1215    BDRVGlusterState *s = bs->opaque;
1216    int64_t ret;
1217
1218    ret = glfs_lseek(s->fd, 0, SEEK_END);
1219    if (ret < 0) {
1220        return -errno;
1221    } else {
1222        return ret;
1223    }
1224}
1225
1226static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
1227{
1228    BDRVGlusterState *s = bs->opaque;
1229    struct stat st;
1230    int ret;
1231
1232    ret = glfs_fstat(s->fd, &st);
1233    if (ret < 0) {
1234        return -errno;
1235    } else {
1236        return st.st_blocks * 512;
1237    }
1238}
1239
1240static int qemu_gluster_has_zero_init(BlockDriverState *bs)
1241{
1242    /* GlusterFS volume could be backed by a block device */
1243    return 0;
1244}
1245
1246/*
1247 * Find allocation range in @bs around offset @start.
1248 * May change underlying file descriptor's file offset.
1249 * If @start is not in a hole, store @start in @data, and the
1250 * beginning of the next hole in @hole, and return 0.
1251 * If @start is in a non-trailing hole, store @start in @hole and the
1252 * beginning of the next non-hole in @data, and return 0.
1253 * If @start is in a trailing hole or beyond EOF, return -ENXIO.
1254 * If we can't find out, return a negative errno other than -ENXIO.
1255 *
1256 * (Shamefully copied from raw-posix.c, only miniscule adaptions.)
1257 */
1258static int find_allocation(BlockDriverState *bs, off_t start,
1259                           off_t *data, off_t *hole)
1260{
1261    BDRVGlusterState *s = bs->opaque;
1262
1263    if (!s->supports_seek_data) {
1264        goto exit;
1265    }
1266
1267#if defined SEEK_HOLE && defined SEEK_DATA
1268    off_t offs;
1269
1270    /*
1271     * SEEK_DATA cases:
1272     * D1. offs == start: start is in data
1273     * D2. offs > start: start is in a hole, next data at offs
1274     * D3. offs < 0, errno = ENXIO: either start is in a trailing hole
1275     *                              or start is beyond EOF
1276     *     If the latter happens, the file has been truncated behind
1277     *     our back since we opened it.  All bets are off then.
1278     *     Treating like a trailing hole is simplest.
1279     * D4. offs < 0, errno != ENXIO: we learned nothing
1280     */
1281    offs = glfs_lseek(s->fd, start, SEEK_DATA);
1282    if (offs < 0) {
1283        return -errno;          /* D3 or D4 */
1284    }
1285    assert(offs >= start);
1286
1287    if (offs > start) {
1288        /* D2: in hole, next data at offs */
1289        *hole = start;
1290        *data = offs;
1291        return 0;
1292    }
1293
1294    /* D1: in data, end not yet known */
1295
1296    /*
1297     * SEEK_HOLE cases:
1298     * H1. offs == start: start is in a hole
1299     *     If this happens here, a hole has been dug behind our back
1300     *     since the previous lseek().
1301     * H2. offs > start: either start is in data, next hole at offs,
1302     *                   or start is in trailing hole, EOF at offs
1303     *     Linux treats trailing holes like any other hole: offs ==
1304     *     start.  Solaris seeks to EOF instead: offs > start (blech).
1305     *     If that happens here, a hole has been dug behind our back
1306     *     since the previous lseek().
1307     * H3. offs < 0, errno = ENXIO: start is beyond EOF
1308     *     If this happens, the file has been truncated behind our
1309     *     back since we opened it.  Treat it like a trailing hole.
1310     * H4. offs < 0, errno != ENXIO: we learned nothing
1311     *     Pretend we know nothing at all, i.e. "forget" about D1.
1312     */
1313    offs = glfs_lseek(s->fd, start, SEEK_HOLE);
1314    if (offs < 0) {
1315        return -errno;          /* D1 and (H3 or H4) */
1316    }
1317    assert(offs >= start);
1318
1319    if (offs > start) {
1320        /*
1321         * D1 and H2: either in data, next hole at offs, or it was in
1322         * data but is now in a trailing hole.  In the latter case,
1323         * all bets are off.  Treating it as if it there was data all
1324         * the way to EOF is safe, so simply do that.
1325         */
1326        *data = start;
1327        *hole = offs;
1328        return 0;
1329    }
1330
1331    /* D1 and H1 */
1332    return -EBUSY;
1333#endif
1334
1335exit:
1336    return -ENOTSUP;
1337}
1338
1339/*
1340 * Returns the allocation status of the specified sectors.
1341 *
1342 * If 'sector_num' is beyond the end of the disk image the return value is 0
1343 * and 'pnum' is set to 0.
1344 *
1345 * 'pnum' is set to the number of sectors (including and immediately following
1346 * the specified sector) that are known to be in the same
1347 * allocated/unallocated state.
1348 *
1349 * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
1350 * beyond the end of the disk image it will be clamped.
1351 *
1352 * (Based on raw_co_get_block_status() from raw-posix.c.)
1353 */
1354static int64_t coroutine_fn qemu_gluster_co_get_block_status(
1355        BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
1356        BlockDriverState **file)
1357{
1358    BDRVGlusterState *s = bs->opaque;
1359    off_t start, data = 0, hole = 0;
1360    int64_t total_size;
1361    int ret = -EINVAL;
1362
1363    if (!s->fd) {
1364        return ret;
1365    }
1366
1367    start = sector_num * BDRV_SECTOR_SIZE;
1368    total_size = bdrv_getlength(bs);
1369    if (total_size < 0) {
1370        return total_size;
1371    } else if (start >= total_size) {
1372        *pnum = 0;
1373        return 0;
1374    } else if (start + nb_sectors * BDRV_SECTOR_SIZE > total_size) {
1375        nb_sectors = DIV_ROUND_UP(total_size - start, BDRV_SECTOR_SIZE);
1376    }
1377
1378    ret = find_allocation(bs, start, &data, &hole);
1379    if (ret == -ENXIO) {
1380        /* Trailing hole */
1381        *pnum = nb_sectors;
1382        ret = BDRV_BLOCK_ZERO;
1383    } else if (ret < 0) {
1384        /* No info available, so pretend there are no holes */
1385        *pnum = nb_sectors;
1386        ret = BDRV_BLOCK_DATA;
1387    } else if (data == start) {
1388        /* On a data extent, compute sectors to the end of the extent,
1389         * possibly including a partial sector at EOF. */
1390        *pnum = MIN(nb_sectors, DIV_ROUND_UP(hole - start, BDRV_SECTOR_SIZE));
1391        ret = BDRV_BLOCK_DATA;
1392    } else {
1393        /* On a hole, compute sectors to the beginning of the next extent.  */
1394        assert(hole == start);
1395        *pnum = MIN(nb_sectors, (data - start) / BDRV_SECTOR_SIZE);
1396        ret = BDRV_BLOCK_ZERO;
1397    }
1398
1399    *file = bs;
1400
1401    return ret | BDRV_BLOCK_OFFSET_VALID | start;
1402}
1403
1404
1405static BlockDriver bdrv_gluster = {
1406    .format_name                  = "gluster",
1407    .protocol_name                = "gluster",
1408    .instance_size                = sizeof(BDRVGlusterState),
1409    .bdrv_needs_filename          = false,
1410    .bdrv_file_open               = qemu_gluster_open,
1411    .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
1412    .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
1413    .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
1414    .bdrv_close                   = qemu_gluster_close,
1415    .bdrv_create                  = qemu_gluster_create,
1416    .bdrv_getlength               = qemu_gluster_getlength,
1417    .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
1418    .bdrv_truncate                = qemu_gluster_truncate,
1419    .bdrv_co_readv                = qemu_gluster_co_readv,
1420    .bdrv_co_writev               = qemu_gluster_co_writev,
1421    .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
1422    .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
1423#ifdef CONFIG_GLUSTERFS_DISCARD
1424    .bdrv_co_pdiscard             = qemu_gluster_co_pdiscard,
1425#endif
1426#ifdef CONFIG_GLUSTERFS_ZEROFILL
1427    .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
1428#endif
1429    .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
1430    .create_opts                  = &qemu_gluster_create_opts,
1431};
1432
1433static BlockDriver bdrv_gluster_tcp = {
1434    .format_name                  = "gluster",
1435    .protocol_name                = "gluster+tcp",
1436    .instance_size                = sizeof(BDRVGlusterState),
1437    .bdrv_needs_filename          = false,
1438    .bdrv_file_open               = qemu_gluster_open,
1439    .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
1440    .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
1441    .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
1442    .bdrv_close                   = qemu_gluster_close,
1443    .bdrv_create                  = qemu_gluster_create,
1444    .bdrv_getlength               = qemu_gluster_getlength,
1445    .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
1446    .bdrv_truncate                = qemu_gluster_truncate,
1447    .bdrv_co_readv                = qemu_gluster_co_readv,
1448    .bdrv_co_writev               = qemu_gluster_co_writev,
1449    .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
1450    .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
1451#ifdef CONFIG_GLUSTERFS_DISCARD
1452    .bdrv_co_pdiscard             = qemu_gluster_co_pdiscard,
1453#endif
1454#ifdef CONFIG_GLUSTERFS_ZEROFILL
1455    .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
1456#endif
1457    .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
1458    .create_opts                  = &qemu_gluster_create_opts,
1459};
1460
1461static BlockDriver bdrv_gluster_unix = {
1462    .format_name                  = "gluster",
1463    .protocol_name                = "gluster+unix",
1464    .instance_size                = sizeof(BDRVGlusterState),
1465    .bdrv_needs_filename          = true,
1466    .bdrv_file_open               = qemu_gluster_open,
1467    .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
1468    .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
1469    .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
1470    .bdrv_close                   = qemu_gluster_close,
1471    .bdrv_create                  = qemu_gluster_create,
1472    .bdrv_getlength               = qemu_gluster_getlength,
1473    .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
1474    .bdrv_truncate                = qemu_gluster_truncate,
1475    .bdrv_co_readv                = qemu_gluster_co_readv,
1476    .bdrv_co_writev               = qemu_gluster_co_writev,
1477    .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
1478    .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
1479#ifdef CONFIG_GLUSTERFS_DISCARD
1480    .bdrv_co_pdiscard             = qemu_gluster_co_pdiscard,
1481#endif
1482#ifdef CONFIG_GLUSTERFS_ZEROFILL
1483    .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
1484#endif
1485    .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
1486    .create_opts                  = &qemu_gluster_create_opts,
1487};
1488
1489/* rdma is deprecated (actually never supported for volfile fetch).
1490 * Let's maintain it for the protocol compatibility, to make sure things
1491 * won't break immediately. For now, gluster+rdma will fall back to gluster+tcp
1492 * protocol with a warning.
1493 * TODO: remove gluster+rdma interface support
1494 */
1495static BlockDriver bdrv_gluster_rdma = {
1496    .format_name                  = "gluster",
1497    .protocol_name                = "gluster+rdma",
1498    .instance_size                = sizeof(BDRVGlusterState),
1499    .bdrv_needs_filename          = true,
1500    .bdrv_file_open               = qemu_gluster_open,
1501    .bdrv_reopen_prepare          = qemu_gluster_reopen_prepare,
1502    .bdrv_reopen_commit           = qemu_gluster_reopen_commit,
1503    .bdrv_reopen_abort            = qemu_gluster_reopen_abort,
1504    .bdrv_close                   = qemu_gluster_close,
1505    .bdrv_create                  = qemu_gluster_create,
1506    .bdrv_getlength               = qemu_gluster_getlength,
1507    .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
1508    .bdrv_truncate                = qemu_gluster_truncate,
1509    .bdrv_co_readv                = qemu_gluster_co_readv,
1510    .bdrv_co_writev               = qemu_gluster_co_writev,
1511    .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
1512    .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
1513#ifdef CONFIG_GLUSTERFS_DISCARD
1514    .bdrv_co_pdiscard             = qemu_gluster_co_pdiscard,
1515#endif
1516#ifdef CONFIG_GLUSTERFS_ZEROFILL
1517    .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
1518#endif
1519    .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
1520    .create_opts                  = &qemu_gluster_create_opts,
1521};
1522
1523static void bdrv_gluster_init(void)
1524{
1525    bdrv_register(&bdrv_gluster_rdma);
1526    bdrv_register(&bdrv_gluster_unix);
1527    bdrv_register(&bdrv_gluster_tcp);
1528    bdrv_register(&bdrv_gluster);
1529}
1530
1531block_init(bdrv_gluster_init);
1532