qemu/hw/block/xen_disk.c
<<
>>
Prefs
   1/*
   2 *  xen paravirt block device backend
   3 *
   4 *  (c) Gerd Hoffmann <kraxel@redhat.com>
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; under version 2 of the License.
   9 *
  10 *  This program is distributed in the hope that it will be useful,
  11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 *  GNU General Public License for more details.
  14 *
  15 *  You should have received a copy of the GNU General Public License along
  16 *  with this program; if not, see <http://www.gnu.org/licenses/>.
  17 *
  18 *  Contributions after 2012-01-13 are licensed under the terms of the
  19 *  GNU GPL, version 2 or (at your option) any later version.
  20 */
  21
  22#include "qemu/osdep.h"
  23#include <sys/ioctl.h>
  24#include <sys/uio.h>
  25
  26#include "hw/hw.h"
  27#include "hw/xen/xen_backend.h"
  28#include "xen_blkif.h"
  29#include "sysemu/blockdev.h"
  30#include "sysemu/block-backend.h"
  31#include "qapi/error.h"
  32#include "qapi/qmp/qdict.h"
  33#include "qapi/qmp/qstring.h"
  34
  35/* ------------------------------------------------------------- */
  36
  37static int batch_maps   = 0;
  38
  39/* ------------------------------------------------------------- */
  40
  41#define BLOCK_SIZE  512
  42#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
  43
  44struct PersistentGrant {
  45    void *page;
  46    struct XenBlkDev *blkdev;
  47};
  48
  49typedef struct PersistentGrant PersistentGrant;
  50
  51struct PersistentRegion {
  52    void *addr;
  53    int num;
  54};
  55
  56typedef struct PersistentRegion PersistentRegion;
  57
  58struct ioreq {
  59    blkif_request_t     req;
  60    int16_t             status;
  61
  62    /* parsed request */
  63    off_t               start;
  64    QEMUIOVector        v;
  65    int                 presync;
  66    uint8_t             mapped;
  67
  68    /* grant mapping */
  69    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  70    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  71    int                 prot;
  72    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  73    void                *pages;
  74    int                 num_unmap;
  75
  76    /* aio status */
  77    int                 aio_inflight;
  78    int                 aio_errors;
  79
  80    struct XenBlkDev    *blkdev;
  81    QLIST_ENTRY(ioreq)   list;
  82    BlockAcctCookie     acct;
  83};
  84
  85#define MAX_RING_PAGE_ORDER 4
  86
  87struct XenBlkDev {
  88    struct XenDevice    xendev;  /* must be first */
  89    char                *params;
  90    char                *mode;
  91    char                *type;
  92    char                *dev;
  93    char                *devtype;
  94    bool                directiosafe;
  95    const char          *fileproto;
  96    const char          *filename;
  97    unsigned int        ring_ref[1 << MAX_RING_PAGE_ORDER];
  98    unsigned int        nr_ring_ref;
  99    void                *sring;
 100    int64_t             file_blk;
 101    int64_t             file_size;
 102    int                 protocol;
 103    blkif_back_rings_t  rings;
 104    int                 more_work;
 105    int                 cnt_map;
 106
 107    /* request lists */
 108    QLIST_HEAD(inflight_head, ioreq) inflight;
 109    QLIST_HEAD(finished_head, ioreq) finished;
 110    QLIST_HEAD(freelist_head, ioreq) freelist;
 111    int                 requests_total;
 112    int                 requests_inflight;
 113    int                 requests_finished;
 114    unsigned int        max_requests;
 115
 116    /* Persistent grants extension */
 117    gboolean            feature_discard;
 118    gboolean            feature_persistent;
 119    GTree               *persistent_gnts;
 120    GSList              *persistent_regions;
 121    unsigned int        persistent_gnt_count;
 122    unsigned int        max_grants;
 123
 124    /* qemu block driver */
 125    DriveInfo           *dinfo;
 126    BlockBackend        *blk;
 127    QEMUBH              *bh;
 128};
 129
 130/* ------------------------------------------------------------- */
 131
 132static void ioreq_reset(struct ioreq *ioreq)
 133{
 134    memset(&ioreq->req, 0, sizeof(ioreq->req));
 135    ioreq->status = 0;
 136    ioreq->start = 0;
 137    ioreq->presync = 0;
 138    ioreq->mapped = 0;
 139
 140    memset(ioreq->domids, 0, sizeof(ioreq->domids));
 141    memset(ioreq->refs, 0, sizeof(ioreq->refs));
 142    ioreq->prot = 0;
 143    memset(ioreq->page, 0, sizeof(ioreq->page));
 144    ioreq->pages = NULL;
 145
 146    ioreq->aio_inflight = 0;
 147    ioreq->aio_errors = 0;
 148
 149    ioreq->blkdev = NULL;
 150    memset(&ioreq->list, 0, sizeof(ioreq->list));
 151    memset(&ioreq->acct, 0, sizeof(ioreq->acct));
 152
 153    qemu_iovec_reset(&ioreq->v);
 154}
 155
 156static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
 157{
 158    uint ua = GPOINTER_TO_UINT(a);
 159    uint ub = GPOINTER_TO_UINT(b);
 160    return (ua > ub) - (ua < ub);
 161}
 162
 163static void destroy_grant(gpointer pgnt)
 164{
 165    PersistentGrant *grant = pgnt;
 166    xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
 167
 168    if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
 169        xen_pv_printf(&grant->blkdev->xendev, 0,
 170                      "xengnttab_unmap failed: %s\n",
 171                      strerror(errno));
 172    }
 173    grant->blkdev->persistent_gnt_count--;
 174    xen_pv_printf(&grant->blkdev->xendev, 3,
 175                  "unmapped grant %p\n", grant->page);
 176    g_free(grant);
 177}
 178
 179static void remove_persistent_region(gpointer data, gpointer dev)
 180{
 181    PersistentRegion *region = data;
 182    struct XenBlkDev *blkdev = dev;
 183    xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
 184
 185    if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
 186        xen_pv_printf(&blkdev->xendev, 0,
 187                      "xengnttab_unmap region %p failed: %s\n",
 188                      region->addr, strerror(errno));
 189    }
 190    xen_pv_printf(&blkdev->xendev, 3,
 191                  "unmapped grant region %p with %d pages\n",
 192                  region->addr, region->num);
 193    g_free(region);
 194}
 195
 196static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
 197{
 198    struct ioreq *ioreq = NULL;
 199
 200    if (QLIST_EMPTY(&blkdev->freelist)) {
 201        if (blkdev->requests_total >= blkdev->max_requests) {
 202            goto out;
 203        }
 204        /* allocate new struct */
 205        ioreq = g_malloc0(sizeof(*ioreq));
 206        ioreq->blkdev = blkdev;
 207        blkdev->requests_total++;
 208        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
 209    } else {
 210        /* get one from freelist */
 211        ioreq = QLIST_FIRST(&blkdev->freelist);
 212        QLIST_REMOVE(ioreq, list);
 213    }
 214    QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
 215    blkdev->requests_inflight++;
 216
 217out:
 218    return ioreq;
 219}
 220
 221static void ioreq_finish(struct ioreq *ioreq)
 222{
 223    struct XenBlkDev *blkdev = ioreq->blkdev;
 224
 225    QLIST_REMOVE(ioreq, list);
 226    QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
 227    blkdev->requests_inflight--;
 228    blkdev->requests_finished++;
 229}
 230
 231static void ioreq_release(struct ioreq *ioreq, bool finish)
 232{
 233    struct XenBlkDev *blkdev = ioreq->blkdev;
 234
 235    QLIST_REMOVE(ioreq, list);
 236    ioreq_reset(ioreq);
 237    ioreq->blkdev = blkdev;
 238    QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
 239    if (finish) {
 240        blkdev->requests_finished--;
 241    } else {
 242        blkdev->requests_inflight--;
 243    }
 244}
 245
 246/*
 247 * translate request into iovec + start offset
 248 * do sanity checks along the way
 249 */
 250static int ioreq_parse(struct ioreq *ioreq)
 251{
 252    struct XenBlkDev *blkdev = ioreq->blkdev;
 253    uintptr_t mem;
 254    size_t len;
 255    int i;
 256
 257    xen_pv_printf(&blkdev->xendev, 3,
 258                  "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
 259                  ioreq->req.operation, ioreq->req.nr_segments,
 260                  ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
 261    switch (ioreq->req.operation) {
 262    case BLKIF_OP_READ:
 263        ioreq->prot = PROT_WRITE; /* to memory */
 264        break;
 265    case BLKIF_OP_FLUSH_DISKCACHE:
 266        ioreq->presync = 1;
 267        if (!ioreq->req.nr_segments) {
 268            return 0;
 269        }
 270        /* fall through */
 271    case BLKIF_OP_WRITE:
 272        ioreq->prot = PROT_READ; /* from memory */
 273        break;
 274    case BLKIF_OP_DISCARD:
 275        return 0;
 276    default:
 277        xen_pv_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
 278                      ioreq->req.operation);
 279        goto err;
 280    };
 281
 282    if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
 283        xen_pv_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
 284        goto err;
 285    }
 286
 287    ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
 288    for (i = 0; i < ioreq->req.nr_segments; i++) {
 289        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
 290            xen_pv_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
 291            goto err;
 292        }
 293        if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
 294            xen_pv_printf(&blkdev->xendev, 0, "error: first > last sector\n");
 295            goto err;
 296        }
 297        if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
 298            xen_pv_printf(&blkdev->xendev, 0, "error: page crossing\n");
 299            goto err;
 300        }
 301
 302        ioreq->domids[i] = blkdev->xendev.dom;
 303        ioreq->refs[i]   = ioreq->req.seg[i].gref;
 304
 305        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
 306        len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
 307        qemu_iovec_add(&ioreq->v, (void*)mem, len);
 308    }
 309    if (ioreq->start + ioreq->v.size > blkdev->file_size) {
 310        xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
 311        goto err;
 312    }
 313    return 0;
 314
 315err:
 316    ioreq->status = BLKIF_RSP_ERROR;
 317    return -1;
 318}
 319
 320static void ioreq_unmap(struct ioreq *ioreq)
 321{
 322    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
 323    int i;
 324
 325    if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
 326        return;
 327    }
 328    if (batch_maps) {
 329        if (!ioreq->pages) {
 330            return;
 331        }
 332        if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
 333            xen_pv_printf(&ioreq->blkdev->xendev, 0,
 334                          "xengnttab_unmap failed: %s\n",
 335                          strerror(errno));
 336        }
 337        ioreq->blkdev->cnt_map -= ioreq->num_unmap;
 338        ioreq->pages = NULL;
 339    } else {
 340        for (i = 0; i < ioreq->num_unmap; i++) {
 341            if (!ioreq->page[i]) {
 342                continue;
 343            }
 344            if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
 345                xen_pv_printf(&ioreq->blkdev->xendev, 0,
 346                              "xengnttab_unmap failed: %s\n",
 347                              strerror(errno));
 348            }
 349            ioreq->blkdev->cnt_map--;
 350            ioreq->page[i] = NULL;
 351        }
 352    }
 353    ioreq->mapped = 0;
 354}
 355
 356static int ioreq_map(struct ioreq *ioreq)
 357{
 358    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
 359    uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 360    uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 361    void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 362    int i, j, new_maps = 0;
 363    PersistentGrant *grant;
 364    PersistentRegion *region;
 365    /* domids and refs variables will contain the information necessary
 366     * to map the grants that are needed to fulfill this request.
 367     *
 368     * After mapping the needed grants, the page array will contain the
 369     * memory address of each granted page in the order specified in ioreq
 370     * (disregarding if it's a persistent grant or not).
 371     */
 372
 373    if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
 374        return 0;
 375    }
 376    if (ioreq->blkdev->feature_persistent) {
 377        for (i = 0; i < ioreq->v.niov; i++) {
 378            grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
 379                                    GUINT_TO_POINTER(ioreq->refs[i]));
 380
 381            if (grant != NULL) {
 382                page[i] = grant->page;
 383                xen_pv_printf(&ioreq->blkdev->xendev, 3,
 384                              "using persistent-grant %" PRIu32 "\n",
 385                              ioreq->refs[i]);
 386            } else {
 387                    /* Add the grant to the list of grants that
 388                     * should be mapped
 389                     */
 390                    domids[new_maps] = ioreq->domids[i];
 391                    refs[new_maps] = ioreq->refs[i];
 392                    page[i] = NULL;
 393                    new_maps++;
 394            }
 395        }
 396        /* Set the protection to RW, since grants may be reused later
 397         * with a different protection than the one needed for this request
 398         */
 399        ioreq->prot = PROT_WRITE | PROT_READ;
 400    } else {
 401        /* All grants in the request should be mapped */
 402        memcpy(refs, ioreq->refs, sizeof(refs));
 403        memcpy(domids, ioreq->domids, sizeof(domids));
 404        memset(page, 0, sizeof(page));
 405        new_maps = ioreq->v.niov;
 406    }
 407
 408    if (batch_maps && new_maps) {
 409        ioreq->pages = xengnttab_map_grant_refs
 410            (gnt, new_maps, domids, refs, ioreq->prot);
 411        if (ioreq->pages == NULL) {
 412            xen_pv_printf(&ioreq->blkdev->xendev, 0,
 413                          "can't map %d grant refs (%s, %d maps)\n",
 414                          new_maps, strerror(errno), ioreq->blkdev->cnt_map);
 415            return -1;
 416        }
 417        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
 418            if (page[i] == NULL) {
 419                page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
 420            }
 421        }
 422        ioreq->blkdev->cnt_map += new_maps;
 423    } else if (new_maps)  {
 424        for (i = 0; i < new_maps; i++) {
 425            ioreq->page[i] = xengnttab_map_grant_ref
 426                (gnt, domids[i], refs[i], ioreq->prot);
 427            if (ioreq->page[i] == NULL) {
 428                xen_pv_printf(&ioreq->blkdev->xendev, 0,
 429                              "can't map grant ref %d (%s, %d maps)\n",
 430                              refs[i], strerror(errno), ioreq->blkdev->cnt_map);
 431                ioreq->mapped = 1;
 432                ioreq_unmap(ioreq);
 433                return -1;
 434            }
 435            ioreq->blkdev->cnt_map++;
 436        }
 437        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
 438            if (page[i] == NULL) {
 439                page[i] = ioreq->page[j++];
 440            }
 441        }
 442    }
 443    if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
 444        (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
 445        ioreq->blkdev->max_grants))) {
 446        /*
 447         * If we are using persistent grants and batch mappings only
 448         * add the new maps to the list of persistent grants if the whole
 449         * area can be persistently mapped.
 450         */
 451        if (batch_maps) {
 452            region = g_malloc0(sizeof(*region));
 453            region->addr = ioreq->pages;
 454            region->num = new_maps;
 455            ioreq->blkdev->persistent_regions = g_slist_append(
 456                                            ioreq->blkdev->persistent_regions,
 457                                            region);
 458        }
 459        while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
 460              && new_maps) {
 461            /* Go through the list of newly mapped grants and add as many
 462             * as possible to the list of persistently mapped grants.
 463             *
 464             * Since we start at the end of ioreq->page(s), we only need
 465             * to decrease new_maps to prevent this granted pages from
 466             * being unmapped in ioreq_unmap.
 467             */
 468            grant = g_malloc0(sizeof(*grant));
 469            new_maps--;
 470            if (batch_maps) {
 471                grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
 472            } else {
 473                grant->page = ioreq->page[new_maps];
 474            }
 475            grant->blkdev = ioreq->blkdev;
 476            xen_pv_printf(&ioreq->blkdev->xendev, 3,
 477                          "adding grant %" PRIu32 " page: %p\n",
 478                          refs[new_maps], grant->page);
 479            g_tree_insert(ioreq->blkdev->persistent_gnts,
 480                          GUINT_TO_POINTER(refs[new_maps]),
 481                          grant);
 482            ioreq->blkdev->persistent_gnt_count++;
 483        }
 484        assert(!batch_maps || new_maps == 0);
 485    }
 486    for (i = 0; i < ioreq->v.niov; i++) {
 487        ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
 488    }
 489    ioreq->mapped = 1;
 490    ioreq->num_unmap = new_maps;
 491    return 0;
 492}
 493
 494#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
 495
 496static void ioreq_free_copy_buffers(struct ioreq *ioreq)
 497{
 498    int i;
 499
 500    for (i = 0; i < ioreq->v.niov; i++) {
 501        ioreq->page[i] = NULL;
 502    }
 503
 504    qemu_vfree(ioreq->pages);
 505}
 506
 507static int ioreq_init_copy_buffers(struct ioreq *ioreq)
 508{
 509    int i;
 510
 511    if (ioreq->v.niov == 0) {
 512        return 0;
 513    }
 514
 515    ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE);
 516
 517    for (i = 0; i < ioreq->v.niov; i++) {
 518        ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
 519        ioreq->v.iov[i].iov_base = ioreq->page[i];
 520    }
 521
 522    return 0;
 523}
 524
 525static int ioreq_grant_copy(struct ioreq *ioreq)
 526{
 527    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
 528    xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 529    int i, count, rc;
 530    int64_t file_blk = ioreq->blkdev->file_blk;
 531
 532    if (ioreq->v.niov == 0) {
 533        return 0;
 534    }
 535
 536    count = ioreq->v.niov;
 537
 538    for (i = 0; i < count; i++) {
 539        if (ioreq->req.operation == BLKIF_OP_READ) {
 540            segs[i].flags = GNTCOPY_dest_gref;
 541            segs[i].dest.foreign.ref = ioreq->refs[i];
 542            segs[i].dest.foreign.domid = ioreq->domids[i];
 543            segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
 544            segs[i].source.virt = ioreq->v.iov[i].iov_base;
 545        } else {
 546            segs[i].flags = GNTCOPY_source_gref;
 547            segs[i].source.foreign.ref = ioreq->refs[i];
 548            segs[i].source.foreign.domid = ioreq->domids[i];
 549            segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
 550            segs[i].dest.virt = ioreq->v.iov[i].iov_base;
 551        }
 552        segs[i].len = (ioreq->req.seg[i].last_sect
 553                       - ioreq->req.seg[i].first_sect + 1) * file_blk;
 554    }
 555
 556    rc = xengnttab_grant_copy(gnt, count, segs);
 557
 558    if (rc) {
 559        xen_pv_printf(&ioreq->blkdev->xendev, 0,
 560                      "failed to copy data %d\n", rc);
 561        ioreq->aio_errors++;
 562        return -1;
 563    }
 564
 565    for (i = 0; i < count; i++) {
 566        if (segs[i].status != GNTST_okay) {
 567            xen_pv_printf(&ioreq->blkdev->xendev, 3,
 568                          "failed to copy data %d for gref %d, domid %d\n",
 569                          segs[i].status, ioreq->refs[i], ioreq->domids[i]);
 570            ioreq->aio_errors++;
 571            rc = -1;
 572        }
 573    }
 574
 575    return rc;
 576}
 577#else
 578static void ioreq_free_copy_buffers(struct ioreq *ioreq)
 579{
 580    abort();
 581}
 582
 583static int ioreq_init_copy_buffers(struct ioreq *ioreq)
 584{
 585    abort();
 586}
 587
 588static int ioreq_grant_copy(struct ioreq *ioreq)
 589{
 590    abort();
 591}
 592#endif
 593
 594static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
 595
 596static void qemu_aio_complete(void *opaque, int ret)
 597{
 598    struct ioreq *ioreq = opaque;
 599
 600    if (ret != 0) {
 601        xen_pv_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
 602                      ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
 603        ioreq->aio_errors++;
 604    }
 605
 606    ioreq->aio_inflight--;
 607    if (ioreq->presync) {
 608        ioreq->presync = 0;
 609        ioreq_runio_qemu_aio(ioreq);
 610        return;
 611    }
 612    if (ioreq->aio_inflight > 0) {
 613        return;
 614    }
 615
 616    if (xen_feature_grant_copy) {
 617        switch (ioreq->req.operation) {
 618        case BLKIF_OP_READ:
 619            /* in case of failure ioreq->aio_errors is increased */
 620            if (ret == 0) {
 621                ioreq_grant_copy(ioreq);
 622            }
 623            ioreq_free_copy_buffers(ioreq);
 624            break;
 625        case BLKIF_OP_WRITE:
 626        case BLKIF_OP_FLUSH_DISKCACHE:
 627            if (!ioreq->req.nr_segments) {
 628                break;
 629            }
 630            ioreq_free_copy_buffers(ioreq);
 631            break;
 632        default:
 633            break;
 634        }
 635    }
 636
 637    ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
 638    if (!xen_feature_grant_copy) {
 639        ioreq_unmap(ioreq);
 640    }
 641    ioreq_finish(ioreq);
 642    switch (ioreq->req.operation) {
 643    case BLKIF_OP_WRITE:
 644    case BLKIF_OP_FLUSH_DISKCACHE:
 645        if (!ioreq->req.nr_segments) {
 646            break;
 647        }
 648    case BLKIF_OP_READ:
 649        if (ioreq->status == BLKIF_RSP_OKAY) {
 650            block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
 651        } else {
 652            block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
 653        }
 654        break;
 655    case BLKIF_OP_DISCARD:
 656    default:
 657        break;
 658    }
 659    qemu_bh_schedule(ioreq->blkdev->bh);
 660}
 661
 662static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
 663                              uint64_t nr_sectors)
 664{
 665    struct XenBlkDev *blkdev = ioreq->blkdev;
 666    int64_t byte_offset;
 667    int byte_chunk;
 668    uint64_t byte_remaining, limit;
 669    uint64_t sec_start = sector_number;
 670    uint64_t sec_count = nr_sectors;
 671
 672    /* Wrap around, or overflowing byte limit? */
 673    if (sec_start + sec_count < sec_count ||
 674        sec_start + sec_count > INT64_MAX >> BDRV_SECTOR_BITS) {
 675        return false;
 676    }
 677
 678    limit = BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS;
 679    byte_offset = sec_start << BDRV_SECTOR_BITS;
 680    byte_remaining = sec_count << BDRV_SECTOR_BITS;
 681
 682    do {
 683        byte_chunk = byte_remaining > limit ? limit : byte_remaining;
 684        ioreq->aio_inflight++;
 685        blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
 686                         qemu_aio_complete, ioreq);
 687        byte_remaining -= byte_chunk;
 688        byte_offset += byte_chunk;
 689    } while (byte_remaining > 0);
 690
 691    return true;
 692}
 693
 694static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
 695{
 696    struct XenBlkDev *blkdev = ioreq->blkdev;
 697
 698    if (xen_feature_grant_copy) {
 699        ioreq_init_copy_buffers(ioreq);
 700        if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
 701            ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
 702            ioreq_grant_copy(ioreq)) {
 703                ioreq_free_copy_buffers(ioreq);
 704                goto err;
 705        }
 706    } else {
 707        if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
 708            goto err;
 709        }
 710    }
 711
 712    ioreq->aio_inflight++;
 713    if (ioreq->presync) {
 714        blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
 715        return 0;
 716    }
 717
 718    switch (ioreq->req.operation) {
 719    case BLKIF_OP_READ:
 720        block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
 721                         ioreq->v.size, BLOCK_ACCT_READ);
 722        ioreq->aio_inflight++;
 723        blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
 724                       qemu_aio_complete, ioreq);
 725        break;
 726    case BLKIF_OP_WRITE:
 727    case BLKIF_OP_FLUSH_DISKCACHE:
 728        if (!ioreq->req.nr_segments) {
 729            break;
 730        }
 731
 732        block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
 733                         ioreq->v.size,
 734                         ioreq->req.operation == BLKIF_OP_WRITE ?
 735                         BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
 736        ioreq->aio_inflight++;
 737        blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
 738                        qemu_aio_complete, ioreq);
 739        break;
 740    case BLKIF_OP_DISCARD:
 741    {
 742        struct blkif_request_discard *req = (void *)&ioreq->req;
 743        if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
 744            goto err;
 745        }
 746        break;
 747    }
 748    default:
 749        /* unknown operation (shouldn't happen -- parse catches this) */
 750        if (!xen_feature_grant_copy) {
 751            ioreq_unmap(ioreq);
 752        }
 753        goto err;
 754    }
 755
 756    qemu_aio_complete(ioreq, 0);
 757
 758    return 0;
 759
 760err:
 761    ioreq_finish(ioreq);
 762    ioreq->status = BLKIF_RSP_ERROR;
 763    return -1;
 764}
 765
 766static int blk_send_response_one(struct ioreq *ioreq)
 767{
 768    struct XenBlkDev  *blkdev = ioreq->blkdev;
 769    int               send_notify   = 0;
 770    int               have_requests = 0;
 771    blkif_response_t  *resp;
 772
 773    /* Place on the response ring for the relevant domain. */
 774    switch (blkdev->protocol) {
 775    case BLKIF_PROTOCOL_NATIVE:
 776        resp = (blkif_response_t *) RING_GET_RESPONSE(&blkdev->rings.native,
 777                                 blkdev->rings.native.rsp_prod_pvt);
 778        break;
 779    case BLKIF_PROTOCOL_X86_32:
 780        resp = (blkif_response_t *) RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
 781                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
 782        break;
 783    case BLKIF_PROTOCOL_X86_64:
 784        resp = (blkif_response_t *) RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
 785                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
 786        break;
 787    default:
 788        return 0;
 789    }
 790
 791    resp->id        = ioreq->req.id;
 792    resp->operation = ioreq->req.operation;
 793    resp->status    = ioreq->status;
 794
 795    blkdev->rings.common.rsp_prod_pvt++;
 796
 797    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
 798    if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
 799        /*
 800         * Tail check for pending requests. Allows frontend to avoid
 801         * notifications if requests are already in flight (lower
 802         * overheads and promotes batching).
 803         */
 804        RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
 805    } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
 806        have_requests = 1;
 807    }
 808
 809    if (have_requests) {
 810        blkdev->more_work++;
 811    }
 812    return send_notify;
 813}
 814
 815/* walk finished list, send outstanding responses, free requests */
 816static void blk_send_response_all(struct XenBlkDev *blkdev)
 817{
 818    struct ioreq *ioreq;
 819    int send_notify = 0;
 820
 821    while (!QLIST_EMPTY(&blkdev->finished)) {
 822        ioreq = QLIST_FIRST(&blkdev->finished);
 823        send_notify += blk_send_response_one(ioreq);
 824        ioreq_release(ioreq, true);
 825    }
 826    if (send_notify) {
 827        xen_pv_send_notify(&blkdev->xendev);
 828    }
 829}
 830
 831static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
 832{
 833    switch (blkdev->protocol) {
 834    case BLKIF_PROTOCOL_NATIVE:
 835        memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
 836               sizeof(ioreq->req));
 837        break;
 838    case BLKIF_PROTOCOL_X86_32:
 839        blkif_get_x86_32_req(&ioreq->req,
 840                             RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
 841        break;
 842    case BLKIF_PROTOCOL_X86_64:
 843        blkif_get_x86_64_req(&ioreq->req,
 844                             RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
 845        break;
 846    }
 847    /* Prevent the compiler from accessing the on-ring fields instead. */
 848    barrier();
 849    return 0;
 850}
 851
 852static void blk_handle_requests(struct XenBlkDev *blkdev)
 853{
 854    RING_IDX rc, rp;
 855    struct ioreq *ioreq;
 856
 857    blkdev->more_work = 0;
 858
 859    rc = blkdev->rings.common.req_cons;
 860    rp = blkdev->rings.common.sring->req_prod;
 861    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
 862
 863    blk_send_response_all(blkdev);
 864    while (rc != rp) {
 865        /* pull request from ring */
 866        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
 867            break;
 868        }
 869        ioreq = ioreq_start(blkdev);
 870        if (ioreq == NULL) {
 871            blkdev->more_work++;
 872            break;
 873        }
 874        blk_get_request(blkdev, ioreq, rc);
 875        blkdev->rings.common.req_cons = ++rc;
 876
 877        /* parse them */
 878        if (ioreq_parse(ioreq) != 0) {
 879
 880            switch (ioreq->req.operation) {
 881            case BLKIF_OP_READ:
 882                block_acct_invalid(blk_get_stats(blkdev->blk),
 883                                   BLOCK_ACCT_READ);
 884                break;
 885            case BLKIF_OP_WRITE:
 886                block_acct_invalid(blk_get_stats(blkdev->blk),
 887                                   BLOCK_ACCT_WRITE);
 888                break;
 889            case BLKIF_OP_FLUSH_DISKCACHE:
 890                block_acct_invalid(blk_get_stats(blkdev->blk),
 891                                   BLOCK_ACCT_FLUSH);
 892            default:
 893                break;
 894            };
 895
 896            if (blk_send_response_one(ioreq)) {
 897                xen_pv_send_notify(&blkdev->xendev);
 898            }
 899            ioreq_release(ioreq, false);
 900            continue;
 901        }
 902
 903        ioreq_runio_qemu_aio(ioreq);
 904    }
 905
 906    if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
 907        qemu_bh_schedule(blkdev->bh);
 908    }
 909}
 910
 911/* ------------------------------------------------------------- */
 912
 913static void blk_bh(void *opaque)
 914{
 915    struct XenBlkDev *blkdev = opaque;
 916    blk_handle_requests(blkdev);
 917}
 918
 919static void blk_alloc(struct XenDevice *xendev)
 920{
 921    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
 922
 923    QLIST_INIT(&blkdev->inflight);
 924    QLIST_INIT(&blkdev->finished);
 925    QLIST_INIT(&blkdev->freelist);
 926    blkdev->bh = qemu_bh_new(blk_bh, blkdev);
 927    if (xen_mode != XEN_EMULATE) {
 928        batch_maps = 1;
 929    }
 930}
 931
 932static void blk_parse_discard(struct XenBlkDev *blkdev)
 933{
 934    int enable;
 935
 936    blkdev->feature_discard = true;
 937
 938    if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
 939        blkdev->feature_discard = !!enable;
 940    }
 941
 942    if (blkdev->feature_discard) {
 943        xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
 944    }
 945}
 946
 947static int blk_init(struct XenDevice *xendev)
 948{
 949    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
 950    int info = 0;
 951    char *directiosafe = NULL;
 952
 953    /* read xenstore entries */
 954    if (blkdev->params == NULL) {
 955        char *h = NULL;
 956        blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
 957        if (blkdev->params != NULL) {
 958            h = strchr(blkdev->params, ':');
 959        }
 960        if (h != NULL) {
 961            blkdev->fileproto = blkdev->params;
 962            blkdev->filename  = h+1;
 963            *h = 0;
 964        } else {
 965            blkdev->fileproto = "<unset>";
 966            blkdev->filename  = blkdev->params;
 967        }
 968    }
 969    if (!strcmp("aio", blkdev->fileproto)) {
 970        blkdev->fileproto = "raw";
 971    }
 972    if (!strcmp("vhd", blkdev->fileproto)) {
 973        blkdev->fileproto = "vpc";
 974    }
 975    if (blkdev->mode == NULL) {
 976        blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
 977    }
 978    if (blkdev->type == NULL) {
 979        blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
 980    }
 981    if (blkdev->dev == NULL) {
 982        blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
 983    }
 984    if (blkdev->devtype == NULL) {
 985        blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
 986    }
 987    directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
 988    blkdev->directiosafe = (directiosafe && atoi(directiosafe));
 989
 990    /* do we have all we need? */
 991    if (blkdev->params == NULL ||
 992        blkdev->mode == NULL   ||
 993        blkdev->type == NULL   ||
 994        blkdev->dev == NULL) {
 995        goto out_error;
 996    }
 997
 998    /* read-only ? */
 999    if (strcmp(blkdev->mode, "w")) {
1000        info  |= VDISK_READONLY;
1001    }
1002
1003    /* cdrom ? */
1004    if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
1005        info  |= VDISK_CDROM;
1006    }
1007
1008    blkdev->file_blk  = BLOCK_SIZE;
1009
1010    xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
1011                  xen_feature_grant_copy ? "enabled" : "disabled");
1012
1013    /* fill info
1014     * blk_connect supplies sector-size and sectors
1015     */
1016    xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
1017    xenstore_write_be_int(&blkdev->xendev, "feature-persistent",
1018                          !xen_feature_grant_copy);
1019    xenstore_write_be_int(&blkdev->xendev, "info", info);
1020
1021    xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order",
1022                          MAX_RING_PAGE_ORDER);
1023
1024    blk_parse_discard(blkdev);
1025
1026    g_free(directiosafe);
1027    return 0;
1028
1029out_error:
1030    g_free(blkdev->params);
1031    blkdev->params = NULL;
1032    g_free(blkdev->mode);
1033    blkdev->mode = NULL;
1034    g_free(blkdev->type);
1035    blkdev->type = NULL;
1036    g_free(blkdev->dev);
1037    blkdev->dev = NULL;
1038    g_free(blkdev->devtype);
1039    blkdev->devtype = NULL;
1040    g_free(directiosafe);
1041    blkdev->directiosafe = false;
1042    return -1;
1043}
1044
1045/*
1046 * We need to account for the grant allocations requiring contiguous
1047 * chunks; the worst case number would be
1048 *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
1049 * but in order to keep things simple just use
1050 *     2 * max_req * max_seg.
1051 */
1052#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
1053
1054static int blk_connect(struct XenDevice *xendev)
1055{
1056    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1057    int pers, index, qflags;
1058    bool readonly = true;
1059    bool writethrough = true;
1060    int order, ring_ref;
1061    unsigned int ring_size, max_grants;
1062    unsigned int i;
1063    uint32_t *domids;
1064
1065    /* read-only ? */
1066    if (blkdev->directiosafe) {
1067        qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
1068    } else {
1069        qflags = 0;
1070        writethrough = false;
1071    }
1072    if (strcmp(blkdev->mode, "w") == 0) {
1073        qflags |= BDRV_O_RDWR;
1074        readonly = false;
1075    }
1076    if (blkdev->feature_discard) {
1077        qflags |= BDRV_O_UNMAP;
1078    }
1079
1080    /* init qemu block driver */
1081    index = (blkdev->xendev.dev - 202 * 256) / 16;
1082    blkdev->dinfo = drive_get(IF_XEN, 0, index);
1083    if (!blkdev->dinfo) {
1084        Error *local_err = NULL;
1085        QDict *options = NULL;
1086
1087        if (strcmp(blkdev->fileproto, "<unset>")) {
1088            options = qdict_new();
1089            qdict_put_str(options, "driver", blkdev->fileproto);
1090        }
1091
1092        /* setup via xenbus -> create new block driver instance */
1093        xen_pv_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
1094        blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
1095                                   qflags, &local_err);
1096        if (!blkdev->blk) {
1097            xen_pv_printf(&blkdev->xendev, 0, "error: %s\n",
1098                          error_get_pretty(local_err));
1099            error_free(local_err);
1100            return -1;
1101        }
1102        blk_set_enable_write_cache(blkdev->blk, !writethrough);
1103    } else {
1104        /* setup via qemu cmdline -> already setup for us */
1105        xen_pv_printf(&blkdev->xendev, 2,
1106                      "get configured bdrv (cmdline setup)\n");
1107        blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
1108        if (blk_is_read_only(blkdev->blk) && !readonly) {
1109            xen_pv_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
1110            blkdev->blk = NULL;
1111            return -1;
1112        }
1113        /* blkdev->blk is not create by us, we get a reference
1114         * so we can blk_unref() unconditionally */
1115        blk_ref(blkdev->blk);
1116    }
1117    blk_attach_dev_legacy(blkdev->blk, blkdev);
1118    blkdev->file_size = blk_getlength(blkdev->blk);
1119    if (blkdev->file_size < 0) {
1120        BlockDriverState *bs = blk_bs(blkdev->blk);
1121        const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
1122        xen_pv_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
1123                      (int)blkdev->file_size, strerror(-blkdev->file_size),
1124                      drv_name ?: "-");
1125        blkdev->file_size = 0;
1126    }
1127
1128    xen_pv_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
1129                  " size %" PRId64 " (%" PRId64 " MB)\n",
1130                  blkdev->type, blkdev->fileproto, blkdev->filename,
1131                  blkdev->file_size, blkdev->file_size >> 20);
1132
1133    /* Fill in number of sector size and number of sectors */
1134    xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
1135    xenstore_write_be_int64(&blkdev->xendev, "sectors",
1136                            blkdev->file_size / blkdev->file_blk);
1137
1138    if (xenstore_read_fe_int(&blkdev->xendev, "ring-page-order",
1139                             &order) == -1) {
1140        blkdev->nr_ring_ref = 1;
1141
1142        if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref",
1143                                 &ring_ref) == -1) {
1144            return -1;
1145        }
1146        blkdev->ring_ref[0] = ring_ref;
1147
1148    } else if (order >= 0 && order <= MAX_RING_PAGE_ORDER) {
1149        blkdev->nr_ring_ref = 1 << order;
1150
1151        for (i = 0; i < blkdev->nr_ring_ref; i++) {
1152            char *key;
1153
1154            key = g_strdup_printf("ring-ref%u", i);
1155            if (!key) {
1156                return -1;
1157            }
1158
1159            if (xenstore_read_fe_int(&blkdev->xendev, key,
1160                                     &ring_ref) == -1) {
1161                g_free(key);
1162                return -1;
1163            }
1164            blkdev->ring_ref[i] = ring_ref;
1165
1166            g_free(key);
1167        }
1168    } else {
1169        xen_pv_printf(xendev, 0, "invalid ring-page-order: %d\n",
1170                      order);
1171        return -1;
1172    }
1173
1174    if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
1175                             &blkdev->xendev.remote_port) == -1) {
1176        return -1;
1177    }
1178    if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
1179        blkdev->feature_persistent = FALSE;
1180    } else {
1181        blkdev->feature_persistent = !!pers;
1182    }
1183
1184    if (!blkdev->xendev.protocol) {
1185        blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1186    } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_NATIVE) == 0) {
1187        blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1188    } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
1189        blkdev->protocol = BLKIF_PROTOCOL_X86_32;
1190    } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
1191        blkdev->protocol = BLKIF_PROTOCOL_X86_64;
1192    } else {
1193        blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1194    }
1195
1196    ring_size = XC_PAGE_SIZE * blkdev->nr_ring_ref;
1197    switch (blkdev->protocol) {
1198    case BLKIF_PROTOCOL_NATIVE:
1199    {
1200        blkdev->max_requests = __CONST_RING_SIZE(blkif, ring_size);
1201        break;
1202    }
1203    case BLKIF_PROTOCOL_X86_32:
1204    {
1205        blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
1206        break;
1207    }
1208    case BLKIF_PROTOCOL_X86_64:
1209    {
1210        blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
1211        break;
1212    }
1213    default:
1214        return -1;
1215    }
1216
1217    /* Calculate the maximum number of grants needed by ioreqs */
1218    max_grants = MAX_GRANTS(blkdev->max_requests,
1219                            BLKIF_MAX_SEGMENTS_PER_REQUEST);
1220    /* Add on the number needed for the ring pages */
1221    max_grants += blkdev->nr_ring_ref;
1222
1223    blkdev->xendev.gnttabdev = xengnttab_open(NULL, 0);
1224    if (blkdev->xendev.gnttabdev == NULL) {
1225        xen_pv_printf(xendev, 0, "xengnttab_open failed: %s\n",
1226                      strerror(errno));
1227        return -1;
1228    }
1229    if (xengnttab_set_max_grants(blkdev->xendev.gnttabdev, max_grants)) {
1230        xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
1231                      strerror(errno));
1232        return -1;
1233    }
1234
1235    domids = g_new0(uint32_t, blkdev->nr_ring_ref);
1236    for (i = 0; i < blkdev->nr_ring_ref; i++) {
1237        domids[i] = blkdev->xendev.dom;
1238    }
1239
1240    blkdev->sring = xengnttab_map_grant_refs(blkdev->xendev.gnttabdev,
1241                                             blkdev->nr_ring_ref,
1242                                             domids,
1243                                             blkdev->ring_ref,
1244                                             PROT_READ | PROT_WRITE);
1245
1246    g_free(domids);
1247
1248    if (!blkdev->sring) {
1249        return -1;
1250    }
1251
1252    blkdev->cnt_map++;
1253
1254    switch (blkdev->protocol) {
1255    case BLKIF_PROTOCOL_NATIVE:
1256    {
1257        blkif_sring_t *sring_native = blkdev->sring;
1258        BACK_RING_INIT(&blkdev->rings.native, sring_native, ring_size);
1259        break;
1260    }
1261    case BLKIF_PROTOCOL_X86_32:
1262    {
1263        blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1264
1265        BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, ring_size);
1266        break;
1267    }
1268    case BLKIF_PROTOCOL_X86_64:
1269    {
1270        blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1271
1272        BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, ring_size);
1273        break;
1274    }
1275    }
1276
1277    if (blkdev->feature_persistent) {
1278        /* Init persistent grants */
1279        blkdev->max_grants = blkdev->max_requests *
1280            BLKIF_MAX_SEGMENTS_PER_REQUEST;
1281        blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1282                                             NULL, NULL,
1283                                             batch_maps ?
1284                                             (GDestroyNotify)g_free :
1285                                             (GDestroyNotify)destroy_grant);
1286        blkdev->persistent_regions = NULL;
1287        blkdev->persistent_gnt_count = 0;
1288    }
1289
1290    xen_be_bind_evtchn(&blkdev->xendev);
1291
1292    xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, nr-ring-ref %u, "
1293                  "remote port %d, local port %d\n",
1294                  blkdev->xendev.protocol, blkdev->nr_ring_ref,
1295                  blkdev->xendev.remote_port, blkdev->xendev.local_port);
1296    return 0;
1297}
1298
1299static void blk_disconnect(struct XenDevice *xendev)
1300{
1301    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1302
1303    if (blkdev->blk) {
1304        blk_detach_dev(blkdev->blk, blkdev);
1305        blk_unref(blkdev->blk);
1306        blkdev->blk = NULL;
1307    }
1308    xen_pv_unbind_evtchn(&blkdev->xendev);
1309
1310    if (blkdev->sring) {
1311        xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring,
1312                        blkdev->nr_ring_ref);
1313        blkdev->cnt_map--;
1314        blkdev->sring = NULL;
1315    }
1316
1317    /*
1318     * Unmap persistent grants before switching to the closed state
1319     * so the frontend can free them.
1320     *
1321     * In the !batch_maps case g_tree_destroy will take care of unmapping
1322     * the grant, but in the batch_maps case we need to iterate over every
1323     * region in persistent_regions and unmap it.
1324     */
1325    if (blkdev->feature_persistent) {
1326        g_tree_destroy(blkdev->persistent_gnts);
1327        assert(batch_maps || blkdev->persistent_gnt_count == 0);
1328        if (batch_maps) {
1329            blkdev->persistent_gnt_count = 0;
1330            g_slist_foreach(blkdev->persistent_regions,
1331                            (GFunc)remove_persistent_region, blkdev);
1332            g_slist_free(blkdev->persistent_regions);
1333        }
1334        blkdev->feature_persistent = false;
1335    }
1336
1337    if (blkdev->xendev.gnttabdev) {
1338        xengnttab_close(blkdev->xendev.gnttabdev);
1339        blkdev->xendev.gnttabdev = NULL;
1340    }
1341}
1342
1343static int blk_free(struct XenDevice *xendev)
1344{
1345    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1346    struct ioreq *ioreq;
1347
1348    blk_disconnect(xendev);
1349
1350    while (!QLIST_EMPTY(&blkdev->freelist)) {
1351        ioreq = QLIST_FIRST(&blkdev->freelist);
1352        QLIST_REMOVE(ioreq, list);
1353        qemu_iovec_destroy(&ioreq->v);
1354        g_free(ioreq);
1355    }
1356
1357    g_free(blkdev->params);
1358    g_free(blkdev->mode);
1359    g_free(blkdev->type);
1360    g_free(blkdev->dev);
1361    g_free(blkdev->devtype);
1362    qemu_bh_delete(blkdev->bh);
1363    return 0;
1364}
1365
1366static void blk_event(struct XenDevice *xendev)
1367{
1368    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1369
1370    qemu_bh_schedule(blkdev->bh);
1371}
1372
1373struct XenDevOps xen_blkdev_ops = {
1374    .size       = sizeof(struct XenBlkDev),
1375    .alloc      = blk_alloc,
1376    .init       = blk_init,
1377    .initialise    = blk_connect,
1378    .disconnect = blk_disconnect,
1379    .event      = blk_event,
1380    .free       = blk_free,
1381};
1382