qemu/hw/block/dataplane/xen-block.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2018  Citrix Systems Inc.
   3 * (c) Gerd Hoffmann <kraxel@redhat.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; under version 2 of the License.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along
  15 * with this program; if not, see <http://www.gnu.org/licenses/>.
  16 *
  17 * Contributions after 2012-01-13 are licensed under the terms of the
  18 * GNU GPL, version 2 or (at your option) any later version.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qemu/error-report.h"
  23#include "qapi/error.h"
  24#include "hw/hw.h"
  25#include "hw/xen/xen_common.h"
  26#include "hw/block/xen_blkif.h"
  27#include "sysemu/block-backend.h"
  28#include "sysemu/iothread.h"
  29#include "xen-block.h"
  30
  31typedef struct XenBlockRequest {
  32    blkif_request_t req;
  33    int16_t status;
  34    off_t start;
  35    QEMUIOVector v;
  36    void *buf;
  37    size_t size;
  38    int presync;
  39    int aio_inflight;
  40    int aio_errors;
  41    XenBlockDataPlane *dataplane;
  42    QLIST_ENTRY(XenBlockRequest) list;
  43    BlockAcctCookie acct;
  44} XenBlockRequest;
  45
  46struct XenBlockDataPlane {
  47    XenDevice *xendev;
  48    XenEventChannel *event_channel;
  49    unsigned int *ring_ref;
  50    unsigned int nr_ring_ref;
  51    void *sring;
  52    int protocol;
  53    blkif_back_rings_t rings;
  54    int more_work;
  55    QLIST_HEAD(inflight_head, XenBlockRequest) inflight;
  56    QLIST_HEAD(freelist_head, XenBlockRequest) freelist;
  57    int requests_total;
  58    int requests_inflight;
  59    unsigned int max_requests;
  60    BlockBackend *blk;
  61    unsigned int sector_size;
  62    QEMUBH *bh;
  63    IOThread *iothread;
  64    AioContext *ctx;
  65};
  66
  67static void reset_request(XenBlockRequest *request)
  68{
  69    memset(&request->req, 0, sizeof(request->req));
  70    request->status = 0;
  71    request->start = 0;
  72    request->size = 0;
  73    request->presync = 0;
  74
  75    request->aio_inflight = 0;
  76    request->aio_errors = 0;
  77
  78    request->dataplane = NULL;
  79    memset(&request->list, 0, sizeof(request->list));
  80    memset(&request->acct, 0, sizeof(request->acct));
  81
  82    qemu_iovec_reset(&request->v);
  83}
  84
  85static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane)
  86{
  87    XenBlockRequest *request = NULL;
  88
  89    if (QLIST_EMPTY(&dataplane->freelist)) {
  90        if (dataplane->requests_total >= dataplane->max_requests) {
  91            goto out;
  92        }
  93        /* allocate new struct */
  94        request = g_malloc0(sizeof(*request));
  95        request->dataplane = dataplane;
  96        /*
  97         * We cannot need more pages per requests than this, and since we
  98         * re-use requests, allocate the memory once here. It will be freed
  99         * xen_block_dataplane_destroy() when the request list is freed.
 100         */
 101        request->buf = qemu_memalign(XC_PAGE_SIZE,
 102                                     BLKIF_MAX_SEGMENTS_PER_REQUEST *
 103                                     XC_PAGE_SIZE);
 104        dataplane->requests_total++;
 105        qemu_iovec_init(&request->v, 1);
 106    } else {
 107        /* get one from freelist */
 108        request = QLIST_FIRST(&dataplane->freelist);
 109        QLIST_REMOVE(request, list);
 110    }
 111    QLIST_INSERT_HEAD(&dataplane->inflight, request, list);
 112    dataplane->requests_inflight++;
 113
 114out:
 115    return request;
 116}
 117
 118static void xen_block_finish_request(XenBlockRequest *request)
 119{
 120    XenBlockDataPlane *dataplane = request->dataplane;
 121
 122    QLIST_REMOVE(request, list);
 123    dataplane->requests_inflight--;
 124}
 125
 126static void xen_block_release_request(XenBlockRequest *request)
 127{
 128    XenBlockDataPlane *dataplane = request->dataplane;
 129
 130    QLIST_REMOVE(request, list);
 131    reset_request(request);
 132    request->dataplane = dataplane;
 133    QLIST_INSERT_HEAD(&dataplane->freelist, request, list);
 134    dataplane->requests_inflight--;
 135}
 136
 137/*
 138 * translate request into iovec + start offset
 139 * do sanity checks along the way
 140 */
 141static int xen_block_parse_request(XenBlockRequest *request)
 142{
 143    XenBlockDataPlane *dataplane = request->dataplane;
 144    size_t len;
 145    int i;
 146
 147    switch (request->req.operation) {
 148    case BLKIF_OP_READ:
 149        break;
 150    case BLKIF_OP_FLUSH_DISKCACHE:
 151        request->presync = 1;
 152        if (!request->req.nr_segments) {
 153            return 0;
 154        }
 155        /* fall through */
 156    case BLKIF_OP_WRITE:
 157        break;
 158    case BLKIF_OP_DISCARD:
 159        return 0;
 160    default:
 161        error_report("error: unknown operation (%d)", request->req.operation);
 162        goto err;
 163    };
 164
 165    if (request->req.operation != BLKIF_OP_READ &&
 166        blk_is_read_only(dataplane->blk)) {
 167        error_report("error: write req for ro device");
 168        goto err;
 169    }
 170
 171    request->start = request->req.sector_number * dataplane->sector_size;
 172    for (i = 0; i < request->req.nr_segments; i++) {
 173        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
 174            error_report("error: nr_segments too big");
 175            goto err;
 176        }
 177        if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) {
 178            error_report("error: first > last sector");
 179            goto err;
 180        }
 181        if (request->req.seg[i].last_sect * dataplane->sector_size >=
 182            XC_PAGE_SIZE) {
 183            error_report("error: page crossing");
 184            goto err;
 185        }
 186
 187        len = (request->req.seg[i].last_sect -
 188               request->req.seg[i].first_sect + 1) * dataplane->sector_size;
 189        request->size += len;
 190    }
 191    if (request->start + request->size > blk_getlength(dataplane->blk)) {
 192        error_report("error: access beyond end of file");
 193        goto err;
 194    }
 195    return 0;
 196
 197err:
 198    request->status = BLKIF_RSP_ERROR;
 199    return -1;
 200}
 201
 202static int xen_block_copy_request(XenBlockRequest *request)
 203{
 204    XenBlockDataPlane *dataplane = request->dataplane;
 205    XenDevice *xendev = dataplane->xendev;
 206    XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 207    int i, count;
 208    bool to_domain = (request->req.operation == BLKIF_OP_READ);
 209    void *virt = request->buf;
 210    Error *local_err = NULL;
 211
 212    if (request->req.nr_segments == 0) {
 213        return 0;
 214    }
 215
 216    count = request->req.nr_segments;
 217
 218    for (i = 0; i < count; i++) {
 219        if (to_domain) {
 220            segs[i].dest.foreign.ref = request->req.seg[i].gref;
 221            segs[i].dest.foreign.offset = request->req.seg[i].first_sect *
 222                dataplane->sector_size;
 223            segs[i].source.virt = virt;
 224        } else {
 225            segs[i].source.foreign.ref = request->req.seg[i].gref;
 226            segs[i].source.foreign.offset = request->req.seg[i].first_sect *
 227                dataplane->sector_size;
 228            segs[i].dest.virt = virt;
 229        }
 230        segs[i].len = (request->req.seg[i].last_sect -
 231                       request->req.seg[i].first_sect + 1) *
 232                      dataplane->sector_size;
 233        virt += segs[i].len;
 234    }
 235
 236    xen_device_copy_grant_refs(xendev, to_domain, segs, count, &local_err);
 237
 238    if (local_err) {
 239        error_reportf_err(local_err, "failed to copy data: ");
 240
 241        request->aio_errors++;
 242        return -1;
 243    }
 244
 245    return 0;
 246}
 247
 248static int xen_block_do_aio(XenBlockRequest *request);
 249static int xen_block_send_response(XenBlockRequest *request);
 250
 251static void xen_block_complete_aio(void *opaque, int ret)
 252{
 253    XenBlockRequest *request = opaque;
 254    XenBlockDataPlane *dataplane = request->dataplane;
 255
 256    aio_context_acquire(dataplane->ctx);
 257
 258    if (ret != 0) {
 259        error_report("%s I/O error",
 260                     request->req.operation == BLKIF_OP_READ ?
 261                     "read" : "write");
 262        request->aio_errors++;
 263    }
 264
 265    request->aio_inflight--;
 266    if (request->presync) {
 267        request->presync = 0;
 268        xen_block_do_aio(request);
 269        goto done;
 270    }
 271    if (request->aio_inflight > 0) {
 272        goto done;
 273    }
 274
 275    switch (request->req.operation) {
 276    case BLKIF_OP_READ:
 277        /* in case of failure request->aio_errors is increased */
 278        if (ret == 0) {
 279            xen_block_copy_request(request);
 280        }
 281        break;
 282    case BLKIF_OP_WRITE:
 283    case BLKIF_OP_FLUSH_DISKCACHE:
 284    default:
 285        break;
 286    }
 287
 288    request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
 289    xen_block_finish_request(request);
 290
 291    switch (request->req.operation) {
 292    case BLKIF_OP_WRITE:
 293    case BLKIF_OP_FLUSH_DISKCACHE:
 294        if (!request->req.nr_segments) {
 295            break;
 296        }
 297        /* fall through */
 298    case BLKIF_OP_READ:
 299        if (request->status == BLKIF_RSP_OKAY) {
 300            block_acct_done(blk_get_stats(dataplane->blk), &request->acct);
 301        } else {
 302            block_acct_failed(blk_get_stats(dataplane->blk), &request->acct);
 303        }
 304        break;
 305    case BLKIF_OP_DISCARD:
 306    default:
 307        break;
 308    }
 309    if (xen_block_send_response(request)) {
 310        Error *local_err = NULL;
 311
 312        xen_device_notify_event_channel(dataplane->xendev,
 313                                        dataplane->event_channel,
 314                                        &local_err);
 315        if (local_err) {
 316            error_report_err(local_err);
 317        }
 318    }
 319    xen_block_release_request(request);
 320
 321    if (dataplane->more_work) {
 322        qemu_bh_schedule(dataplane->bh);
 323    }
 324
 325done:
 326    aio_context_release(dataplane->ctx);
 327}
 328
 329static bool xen_block_split_discard(XenBlockRequest *request,
 330                                    blkif_sector_t sector_number,
 331                                    uint64_t nr_sectors)
 332{
 333    XenBlockDataPlane *dataplane = request->dataplane;
 334    int64_t byte_offset;
 335    int byte_chunk;
 336    uint64_t byte_remaining;
 337    uint64_t sec_start = sector_number;
 338    uint64_t sec_count = nr_sectors;
 339
 340    /* Wrap around, or overflowing byte limit? */
 341    if (sec_start + sec_count < sec_count ||
 342        sec_start + sec_count > INT64_MAX / dataplane->sector_size) {
 343        return false;
 344    }
 345
 346    byte_offset = sec_start * dataplane->sector_size;
 347    byte_remaining = sec_count * dataplane->sector_size;
 348
 349    do {
 350        byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ?
 351            BDRV_REQUEST_MAX_BYTES : byte_remaining;
 352        request->aio_inflight++;
 353        blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk,
 354                         xen_block_complete_aio, request);
 355        byte_remaining -= byte_chunk;
 356        byte_offset += byte_chunk;
 357    } while (byte_remaining > 0);
 358
 359    return true;
 360}
 361
 362static int xen_block_do_aio(XenBlockRequest *request)
 363{
 364    XenBlockDataPlane *dataplane = request->dataplane;
 365
 366    if (request->req.nr_segments &&
 367        (request->req.operation == BLKIF_OP_WRITE ||
 368         request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
 369        xen_block_copy_request(request)) {
 370        goto err;
 371    }
 372
 373    request->aio_inflight++;
 374    if (request->presync) {
 375        blk_aio_flush(request->dataplane->blk, xen_block_complete_aio,
 376                      request);
 377        return 0;
 378    }
 379
 380    switch (request->req.operation) {
 381    case BLKIF_OP_READ:
 382        qemu_iovec_add(&request->v, request->buf, request->size);
 383        block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
 384                         request->v.size, BLOCK_ACCT_READ);
 385        request->aio_inflight++;
 386        blk_aio_preadv(dataplane->blk, request->start, &request->v, 0,
 387                       xen_block_complete_aio, request);
 388        break;
 389    case BLKIF_OP_WRITE:
 390    case BLKIF_OP_FLUSH_DISKCACHE:
 391        if (!request->req.nr_segments) {
 392            break;
 393        }
 394
 395        qemu_iovec_add(&request->v, request->buf, request->size);
 396        block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
 397                         request->v.size,
 398                         request->req.operation == BLKIF_OP_WRITE ?
 399                         BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
 400        request->aio_inflight++;
 401        blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0,
 402                        xen_block_complete_aio, request);
 403        break;
 404    case BLKIF_OP_DISCARD:
 405    {
 406        struct blkif_request_discard *req = (void *)&request->req;
 407        if (!xen_block_split_discard(request, req->sector_number,
 408                                     req->nr_sectors)) {
 409            goto err;
 410        }
 411        break;
 412    }
 413    default:
 414        /* unknown operation (shouldn't happen -- parse catches this) */
 415        goto err;
 416    }
 417
 418    xen_block_complete_aio(request, 0);
 419
 420    return 0;
 421
 422err:
 423    xen_block_finish_request(request);
 424    request->status = BLKIF_RSP_ERROR;
 425    return -1;
 426}
 427
 428static int xen_block_send_response(XenBlockRequest *request)
 429{
 430    XenBlockDataPlane *dataplane = request->dataplane;
 431    int send_notify = 0;
 432    int have_requests = 0;
 433    blkif_response_t *resp;
 434
 435    /* Place on the response ring for the relevant domain. */
 436    switch (dataplane->protocol) {
 437    case BLKIF_PROTOCOL_NATIVE:
 438        resp = (blkif_response_t *)RING_GET_RESPONSE(
 439            &dataplane->rings.native,
 440            dataplane->rings.native.rsp_prod_pvt);
 441        break;
 442    case BLKIF_PROTOCOL_X86_32:
 443        resp = (blkif_response_t *)RING_GET_RESPONSE(
 444            &dataplane->rings.x86_32_part,
 445            dataplane->rings.x86_32_part.rsp_prod_pvt);
 446        break;
 447    case BLKIF_PROTOCOL_X86_64:
 448        resp = (blkif_response_t *)RING_GET_RESPONSE(
 449            &dataplane->rings.x86_64_part,
 450            dataplane->rings.x86_64_part.rsp_prod_pvt);
 451        break;
 452    default:
 453        return 0;
 454    }
 455
 456    resp->id = request->req.id;
 457    resp->operation = request->req.operation;
 458    resp->status = request->status;
 459
 460    dataplane->rings.common.rsp_prod_pvt++;
 461
 462    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common,
 463                                         send_notify);
 464    if (dataplane->rings.common.rsp_prod_pvt ==
 465        dataplane->rings.common.req_cons) {
 466        /*
 467         * Tail check for pending requests. Allows frontend to avoid
 468         * notifications if requests are already in flight (lower
 469         * overheads and promotes batching).
 470         */
 471        RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common,
 472                                      have_requests);
 473    } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) {
 474        have_requests = 1;
 475    }
 476
 477    if (have_requests) {
 478        dataplane->more_work++;
 479    }
 480    return send_notify;
 481}
 482
 483static int xen_block_get_request(XenBlockDataPlane *dataplane,
 484                                 XenBlockRequest *request, RING_IDX rc)
 485{
 486    switch (dataplane->protocol) {
 487    case BLKIF_PROTOCOL_NATIVE: {
 488        blkif_request_t *req =
 489            RING_GET_REQUEST(&dataplane->rings.native, rc);
 490
 491        memcpy(&request->req, req, sizeof(request->req));
 492        break;
 493    }
 494    case BLKIF_PROTOCOL_X86_32: {
 495        blkif_x86_32_request_t *req =
 496            RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc);
 497
 498        blkif_get_x86_32_req(&request->req, req);
 499        break;
 500    }
 501    case BLKIF_PROTOCOL_X86_64: {
 502        blkif_x86_64_request_t *req =
 503            RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc);
 504
 505        blkif_get_x86_64_req(&request->req, req);
 506        break;
 507    }
 508    }
 509    /* Prevent the compiler from accessing the on-ring fields instead. */
 510    barrier();
 511    return 0;
 512}
 513
 514/*
 515 * Threshold of in-flight requests above which we will start using
 516 * blk_io_plug()/blk_io_unplug() to batch requests.
 517 */
 518#define IO_PLUG_THRESHOLD 1
 519
 520static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
 521{
 522    RING_IDX rc, rp;
 523    XenBlockRequest *request;
 524    int inflight_atstart = dataplane->requests_inflight;
 525    int batched = 0;
 526    bool done_something = false;
 527
 528    dataplane->more_work = 0;
 529
 530    rc = dataplane->rings.common.req_cons;
 531    rp = dataplane->rings.common.sring->req_prod;
 532    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
 533
 534    /*
 535     * If there was more than IO_PLUG_THRESHOLD requests in flight
 536     * when we got here, this is an indication that there the bottleneck
 537     * is below us, so it's worth beginning to batch up I/O requests
 538     * rather than submitting them immediately. The maximum number
 539     * of requests we're willing to batch is the number already in
 540     * flight, so it can grow up to max_requests when the bottleneck
 541     * is below us.
 542     */
 543    if (inflight_atstart > IO_PLUG_THRESHOLD) {
 544        blk_io_plug(dataplane->blk);
 545    }
 546    while (rc != rp) {
 547        /* pull request from ring */
 548        if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
 549            break;
 550        }
 551        request = xen_block_start_request(dataplane);
 552        if (request == NULL) {
 553            dataplane->more_work++;
 554            break;
 555        }
 556        xen_block_get_request(dataplane, request, rc);
 557        dataplane->rings.common.req_cons = ++rc;
 558        done_something = true;
 559
 560        /* parse them */
 561        if (xen_block_parse_request(request) != 0) {
 562            switch (request->req.operation) {
 563            case BLKIF_OP_READ:
 564                block_acct_invalid(blk_get_stats(dataplane->blk),
 565                                   BLOCK_ACCT_READ);
 566                break;
 567            case BLKIF_OP_WRITE:
 568                block_acct_invalid(blk_get_stats(dataplane->blk),
 569                                   BLOCK_ACCT_WRITE);
 570                break;
 571            case BLKIF_OP_FLUSH_DISKCACHE:
 572                block_acct_invalid(blk_get_stats(dataplane->blk),
 573                                   BLOCK_ACCT_FLUSH);
 574            default:
 575                break;
 576            };
 577
 578            if (xen_block_send_response(request)) {
 579                Error *local_err = NULL;
 580
 581                xen_device_notify_event_channel(dataplane->xendev,
 582                                                dataplane->event_channel,
 583                                                &local_err);
 584                if (local_err) {
 585                    error_report_err(local_err);
 586                }
 587            }
 588            xen_block_release_request(request);
 589            continue;
 590        }
 591
 592        if (inflight_atstart > IO_PLUG_THRESHOLD &&
 593            batched >= inflight_atstart) {
 594            blk_io_unplug(dataplane->blk);
 595        }
 596        xen_block_do_aio(request);
 597        if (inflight_atstart > IO_PLUG_THRESHOLD) {
 598            if (batched >= inflight_atstart) {
 599                blk_io_plug(dataplane->blk);
 600                batched = 0;
 601            } else {
 602                batched++;
 603            }
 604        }
 605    }
 606    if (inflight_atstart > IO_PLUG_THRESHOLD) {
 607        blk_io_unplug(dataplane->blk);
 608    }
 609
 610    return done_something;
 611}
 612
 613static void xen_block_dataplane_bh(void *opaque)
 614{
 615    XenBlockDataPlane *dataplane = opaque;
 616
 617    aio_context_acquire(dataplane->ctx);
 618    xen_block_handle_requests(dataplane);
 619    aio_context_release(dataplane->ctx);
 620}
 621
 622static bool xen_block_dataplane_event(void *opaque)
 623{
 624    XenBlockDataPlane *dataplane = opaque;
 625
 626    return xen_block_handle_requests(dataplane);
 627}
 628
 629XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
 630                                              BlockBackend *blk,
 631                                              unsigned int sector_size,
 632                                              IOThread *iothread)
 633{
 634    XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1);
 635
 636    dataplane->xendev = xendev;
 637    dataplane->blk = blk;
 638    dataplane->sector_size = sector_size;
 639
 640    QLIST_INIT(&dataplane->inflight);
 641    QLIST_INIT(&dataplane->freelist);
 642
 643    if (iothread) {
 644        dataplane->iothread = iothread;
 645        object_ref(OBJECT(dataplane->iothread));
 646        dataplane->ctx = iothread_get_aio_context(dataplane->iothread);
 647    } else {
 648        dataplane->ctx = qemu_get_aio_context();
 649    }
 650    dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh,
 651                               dataplane);
 652
 653    return dataplane;
 654}
 655
 656void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane)
 657{
 658    XenBlockRequest *request;
 659
 660    if (!dataplane) {
 661        return;
 662    }
 663
 664    while (!QLIST_EMPTY(&dataplane->freelist)) {
 665        request = QLIST_FIRST(&dataplane->freelist);
 666        QLIST_REMOVE(request, list);
 667        qemu_iovec_destroy(&request->v);
 668        qemu_vfree(request->buf);
 669        g_free(request);
 670    }
 671
 672    qemu_bh_delete(dataplane->bh);
 673    if (dataplane->iothread) {
 674        object_unref(OBJECT(dataplane->iothread));
 675    }
 676
 677    g_free(dataplane);
 678}
 679
 680void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
 681{
 682    XenDevice *xendev;
 683
 684    if (!dataplane) {
 685        return;
 686    }
 687
 688    aio_context_acquire(dataplane->ctx);
 689    /* Xen doesn't have multiple users for nodes, so this can't fail */
 690    blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
 691    aio_context_release(dataplane->ctx);
 692
 693    xendev = dataplane->xendev;
 694
 695    if (dataplane->event_channel) {
 696        Error *local_err = NULL;
 697
 698        xen_device_unbind_event_channel(xendev, dataplane->event_channel,
 699                                        &local_err);
 700        dataplane->event_channel = NULL;
 701
 702        if (local_err) {
 703            error_report_err(local_err);
 704        }
 705    }
 706
 707    if (dataplane->sring) {
 708        Error *local_err = NULL;
 709
 710        xen_device_unmap_grant_refs(xendev, dataplane->sring,
 711                                    dataplane->nr_ring_ref, &local_err);
 712        dataplane->sring = NULL;
 713
 714        if (local_err) {
 715            error_report_err(local_err);
 716        }
 717    }
 718
 719    g_free(dataplane->ring_ref);
 720    dataplane->ring_ref = NULL;
 721}
 722
 723void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
 724                               const unsigned int ring_ref[],
 725                               unsigned int nr_ring_ref,
 726                               unsigned int event_channel,
 727                               unsigned int protocol,
 728                               Error **errp)
 729{
 730    XenDevice *xendev = dataplane->xendev;
 731    Error *local_err = NULL;
 732    unsigned int ring_size;
 733    unsigned int i;
 734
 735    dataplane->nr_ring_ref = nr_ring_ref;
 736    dataplane->ring_ref = g_new(unsigned int, nr_ring_ref);
 737
 738    for (i = 0; i < nr_ring_ref; i++) {
 739        dataplane->ring_ref[i] = ring_ref[i];
 740    }
 741
 742    dataplane->protocol = protocol;
 743
 744    ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
 745    switch (dataplane->protocol) {
 746    case BLKIF_PROTOCOL_NATIVE:
 747    {
 748        dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size);
 749        break;
 750    }
 751    case BLKIF_PROTOCOL_X86_32:
 752    {
 753        dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
 754        break;
 755    }
 756    case BLKIF_PROTOCOL_X86_64:
 757    {
 758        dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
 759        break;
 760    }
 761    default:
 762        error_setg(errp, "unknown protocol %u", dataplane->protocol);
 763        return;
 764    }
 765
 766    xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref,
 767                                  &local_err);
 768    if (local_err) {
 769        error_propagate(errp, local_err);
 770        goto stop;
 771    }
 772
 773    dataplane->sring = xen_device_map_grant_refs(xendev,
 774                                              dataplane->ring_ref,
 775                                              dataplane->nr_ring_ref,
 776                                              PROT_READ | PROT_WRITE,
 777                                              &local_err);
 778    if (local_err) {
 779        error_propagate(errp, local_err);
 780        goto stop;
 781    }
 782
 783    switch (dataplane->protocol) {
 784    case BLKIF_PROTOCOL_NATIVE:
 785    {
 786        blkif_sring_t *sring_native = dataplane->sring;
 787
 788        BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size);
 789        break;
 790    }
 791    case BLKIF_PROTOCOL_X86_32:
 792    {
 793        blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring;
 794
 795        BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32,
 796                       ring_size);
 797        break;
 798    }
 799    case BLKIF_PROTOCOL_X86_64:
 800    {
 801        blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring;
 802
 803        BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64,
 804                       ring_size);
 805        break;
 806    }
 807    }
 808
 809    dataplane->event_channel =
 810        xen_device_bind_event_channel(xendev, dataplane->ctx, event_channel,
 811                                      xen_block_dataplane_event, dataplane,
 812                                      &local_err);
 813    if (local_err) {
 814        error_propagate(errp, local_err);
 815        goto stop;
 816    }
 817
 818    aio_context_acquire(dataplane->ctx);
 819    /* If other users keep the BlockBackend in the iothread, that's ok */
 820    blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
 821    aio_context_release(dataplane->ctx);
 822    return;
 823
 824stop:
 825    xen_block_dataplane_stop(dataplane);
 826}
 827