qemu/hw/block/dataplane/xen-block.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2018  Citrix Systems Inc.
   3 * (c) Gerd Hoffmann <kraxel@redhat.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; under version 2 of the License.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along
  15 * with this program; if not, see <http://www.gnu.org/licenses/>.
  16 *
  17 * Contributions after 2012-01-13 are licensed under the terms of the
  18 * GNU GPL, version 2 or (at your option) any later version.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qemu/error-report.h"
  23#include "qemu/main-loop.h"
  24#include "qemu/memalign.h"
  25#include "qapi/error.h"
  26#include "hw/xen/xen_common.h"
  27#include "hw/block/xen_blkif.h"
  28#include "sysemu/block-backend.h"
  29#include "sysemu/iothread.h"
  30#include "xen-block.h"
  31
  32typedef struct XenBlockRequest {
  33    blkif_request_t req;
  34    int16_t status;
  35    off_t start;
  36    QEMUIOVector v;
  37    void *buf;
  38    size_t size;
  39    int presync;
  40    int aio_inflight;
  41    int aio_errors;
  42    XenBlockDataPlane *dataplane;
  43    QLIST_ENTRY(XenBlockRequest) list;
  44    BlockAcctCookie acct;
  45} XenBlockRequest;
  46
  47struct XenBlockDataPlane {
  48    XenDevice *xendev;
  49    XenEventChannel *event_channel;
  50    unsigned int *ring_ref;
  51    unsigned int nr_ring_ref;
  52    void *sring;
  53    int protocol;
  54    blkif_back_rings_t rings;
  55    int more_work;
  56    QLIST_HEAD(inflight_head, XenBlockRequest) inflight;
  57    QLIST_HEAD(freelist_head, XenBlockRequest) freelist;
  58    int requests_total;
  59    int requests_inflight;
  60    unsigned int max_requests;
  61    BlockBackend *blk;
  62    unsigned int sector_size;
  63    QEMUBH *bh;
  64    IOThread *iothread;
  65    AioContext *ctx;
  66};
  67
  68static int xen_block_send_response(XenBlockRequest *request);
  69
  70static void reset_request(XenBlockRequest *request)
  71{
  72    memset(&request->req, 0, sizeof(request->req));
  73    request->status = 0;
  74    request->start = 0;
  75    request->size = 0;
  76    request->presync = 0;
  77
  78    request->aio_inflight = 0;
  79    request->aio_errors = 0;
  80
  81    request->dataplane = NULL;
  82    memset(&request->list, 0, sizeof(request->list));
  83    memset(&request->acct, 0, sizeof(request->acct));
  84
  85    qemu_iovec_reset(&request->v);
  86}
  87
  88static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane)
  89{
  90    XenBlockRequest *request = NULL;
  91
  92    if (QLIST_EMPTY(&dataplane->freelist)) {
  93        if (dataplane->requests_total >= dataplane->max_requests) {
  94            goto out;
  95        }
  96        /* allocate new struct */
  97        request = g_malloc0(sizeof(*request));
  98        request->dataplane = dataplane;
  99        /*
 100         * We cannot need more pages per requests than this, and since we
 101         * re-use requests, allocate the memory once here. It will be freed
 102         * xen_block_dataplane_destroy() when the request list is freed.
 103         */
 104        request->buf = qemu_memalign(XC_PAGE_SIZE,
 105                                     BLKIF_MAX_SEGMENTS_PER_REQUEST *
 106                                     XC_PAGE_SIZE);
 107        dataplane->requests_total++;
 108        qemu_iovec_init(&request->v, 1);
 109    } else {
 110        /* get one from freelist */
 111        request = QLIST_FIRST(&dataplane->freelist);
 112        QLIST_REMOVE(request, list);
 113    }
 114    QLIST_INSERT_HEAD(&dataplane->inflight, request, list);
 115    dataplane->requests_inflight++;
 116
 117out:
 118    return request;
 119}
 120
 121static void xen_block_complete_request(XenBlockRequest *request)
 122{
 123    XenBlockDataPlane *dataplane = request->dataplane;
 124
 125    if (xen_block_send_response(request)) {
 126        Error *local_err = NULL;
 127
 128        xen_device_notify_event_channel(dataplane->xendev,
 129                                        dataplane->event_channel,
 130                                        &local_err);
 131        if (local_err) {
 132            error_report_err(local_err);
 133        }
 134    }
 135
 136    QLIST_REMOVE(request, list);
 137    dataplane->requests_inflight--;
 138    reset_request(request);
 139    request->dataplane = dataplane;
 140    QLIST_INSERT_HEAD(&dataplane->freelist, request, list);
 141}
 142
 143/*
 144 * translate request into iovec + start offset
 145 * do sanity checks along the way
 146 */
 147static int xen_block_parse_request(XenBlockRequest *request)
 148{
 149    XenBlockDataPlane *dataplane = request->dataplane;
 150    size_t len;
 151    int i;
 152
 153    switch (request->req.operation) {
 154    case BLKIF_OP_READ:
 155        break;
 156    case BLKIF_OP_FLUSH_DISKCACHE:
 157        request->presync = 1;
 158        if (!request->req.nr_segments) {
 159            return 0;
 160        }
 161        /* fall through */
 162    case BLKIF_OP_WRITE:
 163        break;
 164    case BLKIF_OP_DISCARD:
 165        return 0;
 166    default:
 167        error_report("error: unknown operation (%d)", request->req.operation);
 168        goto err;
 169    };
 170
 171    if (request->req.operation != BLKIF_OP_READ &&
 172        !blk_is_writable(dataplane->blk)) {
 173        error_report("error: write req for ro device");
 174        goto err;
 175    }
 176
 177    request->start = request->req.sector_number * dataplane->sector_size;
 178    for (i = 0; i < request->req.nr_segments; i++) {
 179        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
 180            error_report("error: nr_segments too big");
 181            goto err;
 182        }
 183        if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) {
 184            error_report("error: first > last sector");
 185            goto err;
 186        }
 187        if (request->req.seg[i].last_sect * dataplane->sector_size >=
 188            XC_PAGE_SIZE) {
 189            error_report("error: page crossing");
 190            goto err;
 191        }
 192
 193        len = (request->req.seg[i].last_sect -
 194               request->req.seg[i].first_sect + 1) * dataplane->sector_size;
 195        request->size += len;
 196    }
 197    if (request->start + request->size > blk_getlength(dataplane->blk)) {
 198        error_report("error: access beyond end of file");
 199        goto err;
 200    }
 201    return 0;
 202
 203err:
 204    request->status = BLKIF_RSP_ERROR;
 205    return -1;
 206}
 207
 208static int xen_block_copy_request(XenBlockRequest *request)
 209{
 210    XenBlockDataPlane *dataplane = request->dataplane;
 211    XenDevice *xendev = dataplane->xendev;
 212    XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 213    int i, count;
 214    bool to_domain = (request->req.operation == BLKIF_OP_READ);
 215    void *virt = request->buf;
 216    Error *local_err = NULL;
 217
 218    if (request->req.nr_segments == 0) {
 219        return 0;
 220    }
 221
 222    count = request->req.nr_segments;
 223
 224    for (i = 0; i < count; i++) {
 225        if (to_domain) {
 226            segs[i].dest.foreign.ref = request->req.seg[i].gref;
 227            segs[i].dest.foreign.offset = request->req.seg[i].first_sect *
 228                dataplane->sector_size;
 229            segs[i].source.virt = virt;
 230        } else {
 231            segs[i].source.foreign.ref = request->req.seg[i].gref;
 232            segs[i].source.foreign.offset = request->req.seg[i].first_sect *
 233                dataplane->sector_size;
 234            segs[i].dest.virt = virt;
 235        }
 236        segs[i].len = (request->req.seg[i].last_sect -
 237                       request->req.seg[i].first_sect + 1) *
 238                      dataplane->sector_size;
 239        virt += segs[i].len;
 240    }
 241
 242    xen_device_copy_grant_refs(xendev, to_domain, segs, count, &local_err);
 243
 244    if (local_err) {
 245        error_reportf_err(local_err, "failed to copy data: ");
 246
 247        request->aio_errors++;
 248        return -1;
 249    }
 250
 251    return 0;
 252}
 253
 254static int xen_block_do_aio(XenBlockRequest *request);
 255
 256static void xen_block_complete_aio(void *opaque, int ret)
 257{
 258    XenBlockRequest *request = opaque;
 259    XenBlockDataPlane *dataplane = request->dataplane;
 260
 261    aio_context_acquire(dataplane->ctx);
 262
 263    if (ret != 0) {
 264        error_report("%s I/O error",
 265                     request->req.operation == BLKIF_OP_READ ?
 266                     "read" : "write");
 267        request->aio_errors++;
 268    }
 269
 270    request->aio_inflight--;
 271    if (request->presync) {
 272        request->presync = 0;
 273        xen_block_do_aio(request);
 274        goto done;
 275    }
 276    if (request->aio_inflight > 0) {
 277        goto done;
 278    }
 279
 280    switch (request->req.operation) {
 281    case BLKIF_OP_READ:
 282        /* in case of failure request->aio_errors is increased */
 283        if (ret == 0) {
 284            xen_block_copy_request(request);
 285        }
 286        break;
 287    case BLKIF_OP_WRITE:
 288    case BLKIF_OP_FLUSH_DISKCACHE:
 289    default:
 290        break;
 291    }
 292
 293    request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
 294
 295    switch (request->req.operation) {
 296    case BLKIF_OP_WRITE:
 297    case BLKIF_OP_FLUSH_DISKCACHE:
 298        if (!request->req.nr_segments) {
 299            break;
 300        }
 301        /* fall through */
 302    case BLKIF_OP_READ:
 303        if (request->status == BLKIF_RSP_OKAY) {
 304            block_acct_done(blk_get_stats(dataplane->blk), &request->acct);
 305        } else {
 306            block_acct_failed(blk_get_stats(dataplane->blk), &request->acct);
 307        }
 308        break;
 309    case BLKIF_OP_DISCARD:
 310    default:
 311        break;
 312    }
 313
 314    xen_block_complete_request(request);
 315
 316    if (dataplane->more_work) {
 317        qemu_bh_schedule(dataplane->bh);
 318    }
 319
 320done:
 321    aio_context_release(dataplane->ctx);
 322}
 323
 324static bool xen_block_split_discard(XenBlockRequest *request,
 325                                    blkif_sector_t sector_number,
 326                                    uint64_t nr_sectors)
 327{
 328    XenBlockDataPlane *dataplane = request->dataplane;
 329    int64_t byte_offset;
 330    int byte_chunk;
 331    uint64_t byte_remaining;
 332    uint64_t sec_start = sector_number;
 333    uint64_t sec_count = nr_sectors;
 334
 335    /* Wrap around, or overflowing byte limit? */
 336    if (sec_start + sec_count < sec_count ||
 337        sec_start + sec_count > INT64_MAX / dataplane->sector_size) {
 338        return false;
 339    }
 340
 341    byte_offset = sec_start * dataplane->sector_size;
 342    byte_remaining = sec_count * dataplane->sector_size;
 343
 344    do {
 345        byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ?
 346            BDRV_REQUEST_MAX_BYTES : byte_remaining;
 347        request->aio_inflight++;
 348        blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk,
 349                         xen_block_complete_aio, request);
 350        byte_remaining -= byte_chunk;
 351        byte_offset += byte_chunk;
 352    } while (byte_remaining > 0);
 353
 354    return true;
 355}
 356
 357static int xen_block_do_aio(XenBlockRequest *request)
 358{
 359    XenBlockDataPlane *dataplane = request->dataplane;
 360
 361    if (request->req.nr_segments &&
 362        (request->req.operation == BLKIF_OP_WRITE ||
 363         request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
 364        xen_block_copy_request(request)) {
 365        goto err;
 366    }
 367
 368    request->aio_inflight++;
 369    if (request->presync) {
 370        blk_aio_flush(request->dataplane->blk, xen_block_complete_aio,
 371                      request);
 372        return 0;
 373    }
 374
 375    switch (request->req.operation) {
 376    case BLKIF_OP_READ:
 377        qemu_iovec_add(&request->v, request->buf, request->size);
 378        block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
 379                         request->v.size, BLOCK_ACCT_READ);
 380        request->aio_inflight++;
 381        blk_aio_preadv(dataplane->blk, request->start, &request->v, 0,
 382                       xen_block_complete_aio, request);
 383        break;
 384    case BLKIF_OP_WRITE:
 385    case BLKIF_OP_FLUSH_DISKCACHE:
 386        if (!request->req.nr_segments) {
 387            break;
 388        }
 389
 390        qemu_iovec_add(&request->v, request->buf, request->size);
 391        block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
 392                         request->v.size,
 393                         request->req.operation == BLKIF_OP_WRITE ?
 394                         BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
 395        request->aio_inflight++;
 396        blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0,
 397                        xen_block_complete_aio, request);
 398        break;
 399    case BLKIF_OP_DISCARD:
 400    {
 401        struct blkif_request_discard *req = (void *)&request->req;
 402        if (!xen_block_split_discard(request, req->sector_number,
 403                                     req->nr_sectors)) {
 404            goto err;
 405        }
 406        break;
 407    }
 408    default:
 409        /* unknown operation (shouldn't happen -- parse catches this) */
 410        goto err;
 411    }
 412
 413    xen_block_complete_aio(request, 0);
 414
 415    return 0;
 416
 417err:
 418    request->status = BLKIF_RSP_ERROR;
 419    xen_block_complete_request(request);
 420    return -1;
 421}
 422
 423static int xen_block_send_response(XenBlockRequest *request)
 424{
 425    XenBlockDataPlane *dataplane = request->dataplane;
 426    int send_notify = 0;
 427    int have_requests = 0;
 428    blkif_response_t *resp;
 429
 430    /* Place on the response ring for the relevant domain. */
 431    switch (dataplane->protocol) {
 432    case BLKIF_PROTOCOL_NATIVE:
 433        resp = (blkif_response_t *)RING_GET_RESPONSE(
 434            &dataplane->rings.native,
 435            dataplane->rings.native.rsp_prod_pvt);
 436        break;
 437    case BLKIF_PROTOCOL_X86_32:
 438        resp = (blkif_response_t *)RING_GET_RESPONSE(
 439            &dataplane->rings.x86_32_part,
 440            dataplane->rings.x86_32_part.rsp_prod_pvt);
 441        break;
 442    case BLKIF_PROTOCOL_X86_64:
 443        resp = (blkif_response_t *)RING_GET_RESPONSE(
 444            &dataplane->rings.x86_64_part,
 445            dataplane->rings.x86_64_part.rsp_prod_pvt);
 446        break;
 447    default:
 448        return 0;
 449    }
 450
 451    resp->id = request->req.id;
 452    resp->operation = request->req.operation;
 453    resp->status = request->status;
 454
 455    dataplane->rings.common.rsp_prod_pvt++;
 456
 457    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common,
 458                                         send_notify);
 459    if (dataplane->rings.common.rsp_prod_pvt ==
 460        dataplane->rings.common.req_cons) {
 461        /*
 462         * Tail check for pending requests. Allows frontend to avoid
 463         * notifications if requests are already in flight (lower
 464         * overheads and promotes batching).
 465         */
 466        RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common,
 467                                      have_requests);
 468    } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) {
 469        have_requests = 1;
 470    }
 471
 472    if (have_requests) {
 473        dataplane->more_work++;
 474    }
 475    return send_notify;
 476}
 477
 478static int xen_block_get_request(XenBlockDataPlane *dataplane,
 479                                 XenBlockRequest *request, RING_IDX rc)
 480{
 481    switch (dataplane->protocol) {
 482    case BLKIF_PROTOCOL_NATIVE: {
 483        blkif_request_t *req =
 484            RING_GET_REQUEST(&dataplane->rings.native, rc);
 485
 486        memcpy(&request->req, req, sizeof(request->req));
 487        break;
 488    }
 489    case BLKIF_PROTOCOL_X86_32: {
 490        blkif_x86_32_request_t *req =
 491            RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc);
 492
 493        blkif_get_x86_32_req(&request->req, req);
 494        break;
 495    }
 496    case BLKIF_PROTOCOL_X86_64: {
 497        blkif_x86_64_request_t *req =
 498            RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc);
 499
 500        blkif_get_x86_64_req(&request->req, req);
 501        break;
 502    }
 503    }
 504    /* Prevent the compiler from accessing the on-ring fields instead. */
 505    barrier();
 506    return 0;
 507}
 508
 509/*
 510 * Threshold of in-flight requests above which we will start using
 511 * blk_io_plug()/blk_io_unplug() to batch requests.
 512 */
 513#define IO_PLUG_THRESHOLD 1
 514
 515static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
 516{
 517    RING_IDX rc, rp;
 518    XenBlockRequest *request;
 519    int inflight_atstart = dataplane->requests_inflight;
 520    int batched = 0;
 521    bool done_something = false;
 522
 523    dataplane->more_work = 0;
 524
 525    rc = dataplane->rings.common.req_cons;
 526    rp = dataplane->rings.common.sring->req_prod;
 527    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
 528
 529    /*
 530     * If there was more than IO_PLUG_THRESHOLD requests in flight
 531     * when we got here, this is an indication that there the bottleneck
 532     * is below us, so it's worth beginning to batch up I/O requests
 533     * rather than submitting them immediately. The maximum number
 534     * of requests we're willing to batch is the number already in
 535     * flight, so it can grow up to max_requests when the bottleneck
 536     * is below us.
 537     */
 538    if (inflight_atstart > IO_PLUG_THRESHOLD) {
 539        blk_io_plug(dataplane->blk);
 540    }
 541    while (rc != rp) {
 542        /* pull request from ring */
 543        if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
 544            break;
 545        }
 546        request = xen_block_start_request(dataplane);
 547        if (request == NULL) {
 548            dataplane->more_work++;
 549            break;
 550        }
 551        xen_block_get_request(dataplane, request, rc);
 552        dataplane->rings.common.req_cons = ++rc;
 553        done_something = true;
 554
 555        /* parse them */
 556        if (xen_block_parse_request(request) != 0) {
 557            switch (request->req.operation) {
 558            case BLKIF_OP_READ:
 559                block_acct_invalid(blk_get_stats(dataplane->blk),
 560                                   BLOCK_ACCT_READ);
 561                break;
 562            case BLKIF_OP_WRITE:
 563                block_acct_invalid(blk_get_stats(dataplane->blk),
 564                                   BLOCK_ACCT_WRITE);
 565                break;
 566            case BLKIF_OP_FLUSH_DISKCACHE:
 567                block_acct_invalid(blk_get_stats(dataplane->blk),
 568                                   BLOCK_ACCT_FLUSH);
 569            default:
 570                break;
 571            };
 572
 573            xen_block_complete_request(request);
 574            continue;
 575        }
 576
 577        if (inflight_atstart > IO_PLUG_THRESHOLD &&
 578            batched >= inflight_atstart) {
 579            blk_io_unplug(dataplane->blk);
 580        }
 581        xen_block_do_aio(request);
 582        if (inflight_atstart > IO_PLUG_THRESHOLD) {
 583            if (batched >= inflight_atstart) {
 584                blk_io_plug(dataplane->blk);
 585                batched = 0;
 586            } else {
 587                batched++;
 588            }
 589        }
 590    }
 591    if (inflight_atstart > IO_PLUG_THRESHOLD) {
 592        blk_io_unplug(dataplane->blk);
 593    }
 594
 595    return done_something;
 596}
 597
 598static void xen_block_dataplane_bh(void *opaque)
 599{
 600    XenBlockDataPlane *dataplane = opaque;
 601
 602    aio_context_acquire(dataplane->ctx);
 603    xen_block_handle_requests(dataplane);
 604    aio_context_release(dataplane->ctx);
 605}
 606
 607static bool xen_block_dataplane_event(void *opaque)
 608{
 609    XenBlockDataPlane *dataplane = opaque;
 610
 611    return xen_block_handle_requests(dataplane);
 612}
 613
 614XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
 615                                              BlockBackend *blk,
 616                                              unsigned int sector_size,
 617                                              IOThread *iothread)
 618{
 619    XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1);
 620
 621    dataplane->xendev = xendev;
 622    dataplane->blk = blk;
 623    dataplane->sector_size = sector_size;
 624
 625    QLIST_INIT(&dataplane->inflight);
 626    QLIST_INIT(&dataplane->freelist);
 627
 628    if (iothread) {
 629        dataplane->iothread = iothread;
 630        object_ref(OBJECT(dataplane->iothread));
 631        dataplane->ctx = iothread_get_aio_context(dataplane->iothread);
 632    } else {
 633        dataplane->ctx = qemu_get_aio_context();
 634    }
 635    dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh,
 636                               dataplane);
 637
 638    return dataplane;
 639}
 640
 641void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane)
 642{
 643    XenBlockRequest *request;
 644
 645    if (!dataplane) {
 646        return;
 647    }
 648
 649    while (!QLIST_EMPTY(&dataplane->freelist)) {
 650        request = QLIST_FIRST(&dataplane->freelist);
 651        QLIST_REMOVE(request, list);
 652        qemu_iovec_destroy(&request->v);
 653        qemu_vfree(request->buf);
 654        g_free(request);
 655    }
 656
 657    qemu_bh_delete(dataplane->bh);
 658    if (dataplane->iothread) {
 659        object_unref(OBJECT(dataplane->iothread));
 660    }
 661
 662    g_free(dataplane);
 663}
 664
 665void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
 666{
 667    XenDevice *xendev;
 668
 669    if (!dataplane) {
 670        return;
 671    }
 672
 673    xendev = dataplane->xendev;
 674
 675    aio_context_acquire(dataplane->ctx);
 676    if (dataplane->event_channel) {
 677        /* Only reason for failure is a NULL channel */
 678        xen_device_set_event_channel_context(xendev, dataplane->event_channel,
 679                                             qemu_get_aio_context(),
 680                                             &error_abort);
 681    }
 682    /* Xen doesn't have multiple users for nodes, so this can't fail */
 683    blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
 684    aio_context_release(dataplane->ctx);
 685
 686    /*
 687     * Now that the context has been moved onto the main thread, cancel
 688     * further processing.
 689     */
 690    qemu_bh_cancel(dataplane->bh);
 691
 692    if (dataplane->event_channel) {
 693        Error *local_err = NULL;
 694
 695        xen_device_unbind_event_channel(xendev, dataplane->event_channel,
 696                                        &local_err);
 697        dataplane->event_channel = NULL;
 698
 699        if (local_err) {
 700            error_report_err(local_err);
 701        }
 702    }
 703
 704    if (dataplane->sring) {
 705        Error *local_err = NULL;
 706
 707        xen_device_unmap_grant_refs(xendev, dataplane->sring,
 708                                    dataplane->nr_ring_ref, &local_err);
 709        dataplane->sring = NULL;
 710
 711        if (local_err) {
 712            error_report_err(local_err);
 713        }
 714    }
 715
 716    g_free(dataplane->ring_ref);
 717    dataplane->ring_ref = NULL;
 718}
 719
 720void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
 721                               const unsigned int ring_ref[],
 722                               unsigned int nr_ring_ref,
 723                               unsigned int event_channel,
 724                               unsigned int protocol,
 725                               Error **errp)
 726{
 727    ERRP_GUARD();
 728    XenDevice *xendev = dataplane->xendev;
 729    AioContext *old_context;
 730    unsigned int ring_size;
 731    unsigned int i;
 732
 733    dataplane->nr_ring_ref = nr_ring_ref;
 734    dataplane->ring_ref = g_new(unsigned int, nr_ring_ref);
 735
 736    for (i = 0; i < nr_ring_ref; i++) {
 737        dataplane->ring_ref[i] = ring_ref[i];
 738    }
 739
 740    dataplane->protocol = protocol;
 741
 742    ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
 743    switch (dataplane->protocol) {
 744    case BLKIF_PROTOCOL_NATIVE:
 745    {
 746        dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size);
 747        break;
 748    }
 749    case BLKIF_PROTOCOL_X86_32:
 750    {
 751        dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
 752        break;
 753    }
 754    case BLKIF_PROTOCOL_X86_64:
 755    {
 756        dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
 757        break;
 758    }
 759    default:
 760        error_setg(errp, "unknown protocol %u", dataplane->protocol);
 761        return;
 762    }
 763
 764    xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref,
 765                                  errp);
 766    if (*errp) {
 767        goto stop;
 768    }
 769
 770    dataplane->sring = xen_device_map_grant_refs(xendev,
 771                                              dataplane->ring_ref,
 772                                              dataplane->nr_ring_ref,
 773                                              PROT_READ | PROT_WRITE,
 774                                              errp);
 775    if (*errp) {
 776        goto stop;
 777    }
 778
 779    switch (dataplane->protocol) {
 780    case BLKIF_PROTOCOL_NATIVE:
 781    {
 782        blkif_sring_t *sring_native = dataplane->sring;
 783
 784        BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size);
 785        break;
 786    }
 787    case BLKIF_PROTOCOL_X86_32:
 788    {
 789        blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring;
 790
 791        BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32,
 792                       ring_size);
 793        break;
 794    }
 795    case BLKIF_PROTOCOL_X86_64:
 796    {
 797        blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring;
 798
 799        BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64,
 800                       ring_size);
 801        break;
 802    }
 803    }
 804
 805    dataplane->event_channel =
 806        xen_device_bind_event_channel(xendev, event_channel,
 807                                      xen_block_dataplane_event, dataplane,
 808                                      errp);
 809    if (*errp) {
 810        goto stop;
 811    }
 812
 813    old_context = blk_get_aio_context(dataplane->blk);
 814    aio_context_acquire(old_context);
 815    /* If other users keep the BlockBackend in the iothread, that's ok */
 816    blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
 817    aio_context_release(old_context);
 818
 819    /* Only reason for failure is a NULL channel */
 820    aio_context_acquire(dataplane->ctx);
 821    xen_device_set_event_channel_context(xendev, dataplane->event_channel,
 822                                         dataplane->ctx, &error_abort);
 823    aio_context_release(dataplane->ctx);
 824
 825    return;
 826
 827stop:
 828    xen_block_dataplane_stop(dataplane);
 829}
 830