qemu/hw/9pfs/xen-9p-backend.c
<<
>>
Prefs
   1/*
   2 * Xen 9p backend
   3 *
   4 * Copyright Aporeto 2017
   5 *
   6 * Authors:
   7 *  Stefano Stabellini <stefano@aporeto.com>
   8 *
   9 */
  10
  11#include "qemu/osdep.h"
  12
  13#include "hw/hw.h"
  14#include "hw/9pfs/9p.h"
  15#include "hw/xen/xen-legacy-backend.h"
  16#include "hw/9pfs/xen-9pfs.h"
  17#include "qapi/error.h"
  18#include "qemu/config-file.h"
  19#include "qemu/option.h"
  20#include "fsdev/qemu-fsdev.h"
  21
  22#define VERSIONS "1"
  23#define MAX_RINGS 8
  24#define MAX_RING_ORDER 8
  25
  26typedef struct Xen9pfsRing {
  27    struct Xen9pfsDev *priv;
  28
  29    int ref;
  30    xenevtchn_handle   *evtchndev;
  31    int evtchn;
  32    int local_port;
  33    int ring_order;
  34    struct xen_9pfs_data_intf *intf;
  35    unsigned char *data;
  36    struct xen_9pfs_data ring;
  37
  38    struct iovec *sg;
  39    QEMUBH *bh;
  40
  41    /* local copies, so that we can read/write PDU data directly from
  42     * the ring */
  43    RING_IDX out_cons, out_size, in_cons;
  44    bool inprogress;
  45} Xen9pfsRing;
  46
  47typedef struct Xen9pfsDev {
  48    struct XenLegacyDevice xendev;  /* must be first */
  49    V9fsState state;
  50    char *path;
  51    char *security_model;
  52    char *tag;
  53    char *id;
  54
  55    int num_rings;
  56    Xen9pfsRing *rings;
  57} Xen9pfsDev;
  58
  59static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev);
  60
  61static void xen_9pfs_in_sg(Xen9pfsRing *ring,
  62                           struct iovec *in_sg,
  63                           int *num,
  64                           uint32_t idx,
  65                           uint32_t size)
  66{
  67    RING_IDX cons, prod, masked_prod, masked_cons;
  68
  69    cons = ring->intf->in_cons;
  70    prod = ring->intf->in_prod;
  71    xen_rmb();
  72    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
  73    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
  74
  75    if (masked_prod < masked_cons) {
  76        in_sg[0].iov_base = ring->ring.in + masked_prod;
  77        in_sg[0].iov_len = masked_cons - masked_prod;
  78        *num = 1;
  79    } else {
  80        in_sg[0].iov_base = ring->ring.in + masked_prod;
  81        in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
  82        in_sg[1].iov_base = ring->ring.in;
  83        in_sg[1].iov_len = masked_cons;
  84        *num = 2;
  85    }
  86}
  87
  88static void xen_9pfs_out_sg(Xen9pfsRing *ring,
  89                            struct iovec *out_sg,
  90                            int *num,
  91                            uint32_t idx)
  92{
  93    RING_IDX cons, prod, masked_prod, masked_cons;
  94
  95    cons = ring->intf->out_cons;
  96    prod = ring->intf->out_prod;
  97    xen_rmb();
  98    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
  99    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
 100
 101    if (masked_cons < masked_prod) {
 102        out_sg[0].iov_base = ring->ring.out + masked_cons;
 103        out_sg[0].iov_len = ring->out_size;
 104        *num = 1;
 105    } else {
 106        if (ring->out_size >
 107            (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
 108            out_sg[0].iov_base = ring->ring.out + masked_cons;
 109            out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
 110                                masked_cons;
 111            out_sg[1].iov_base = ring->ring.out;
 112            out_sg[1].iov_len = ring->out_size -
 113                                (XEN_FLEX_RING_SIZE(ring->ring_order) -
 114                                 masked_cons);
 115            *num = 2;
 116        } else {
 117            out_sg[0].iov_base = ring->ring.out + masked_cons;
 118            out_sg[0].iov_len = ring->out_size;
 119            *num = 1;
 120        }
 121    }
 122}
 123
 124static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
 125                                     size_t offset,
 126                                     const char *fmt,
 127                                     va_list ap)
 128{
 129    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
 130    struct iovec in_sg[2];
 131    int num;
 132    ssize_t ret;
 133
 134    xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
 135                   in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
 136
 137    ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
 138    if (ret < 0) {
 139        xen_pv_printf(&xen_9pfs->xendev, 0,
 140                      "Failed to encode VirtFS request type %d\n", pdu->id + 1);
 141        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
 142        xen_9pfs_disconnect(&xen_9pfs->xendev);
 143    }
 144    return ret;
 145}
 146
 147static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
 148                                       size_t offset,
 149                                       const char *fmt,
 150                                       va_list ap)
 151{
 152    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
 153    struct iovec out_sg[2];
 154    int num;
 155    ssize_t ret;
 156
 157    xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
 158                    out_sg, &num, pdu->idx);
 159
 160    ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
 161    if (ret < 0) {
 162        xen_pv_printf(&xen_9pfs->xendev, 0,
 163                      "Failed to decode VirtFS request type %d\n", pdu->id);
 164        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
 165        xen_9pfs_disconnect(&xen_9pfs->xendev);
 166    }
 167    return ret;
 168}
 169
 170static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
 171                                           struct iovec **piov,
 172                                           unsigned int *pniov,
 173                                           size_t size)
 174{
 175    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
 176    Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
 177    int num;
 178
 179    g_free(ring->sg);
 180
 181    ring->sg = g_new0(struct iovec, 2);
 182    xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx);
 183    *piov = ring->sg;
 184    *pniov = num;
 185}
 186
 187static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
 188                                          struct iovec **piov,
 189                                          unsigned int *pniov,
 190                                          size_t size)
 191{
 192    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
 193    Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
 194    int num;
 195    size_t buf_size;
 196
 197    g_free(ring->sg);
 198
 199    ring->sg = g_new0(struct iovec, 2);
 200    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
 201
 202    buf_size = iov_size(ring->sg, num);
 203    if (buf_size  < size) {
 204        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
 205                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
 206                buf_size);
 207        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
 208        xen_9pfs_disconnect(&xen_9pfs->xendev);
 209    }
 210
 211    *piov = ring->sg;
 212    *pniov = num;
 213}
 214
 215static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
 216{
 217    RING_IDX prod;
 218    Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state);
 219    Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
 220
 221    g_free(ring->sg);
 222    ring->sg = NULL;
 223
 224    ring->intf->out_cons = ring->out_cons;
 225    xen_wmb();
 226
 227    prod = ring->intf->in_prod;
 228    xen_rmb();
 229    ring->intf->in_prod = prod + pdu->size;
 230    xen_wmb();
 231
 232    ring->inprogress = false;
 233    xenevtchn_notify(ring->evtchndev, ring->local_port);
 234
 235    qemu_bh_schedule(ring->bh);
 236}
 237
 238static const V9fsTransport xen_9p_transport = {
 239    .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
 240    .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
 241    .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
 242    .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
 243    .push_and_notify = xen_9pfs_push_and_notify,
 244};
 245
 246static int xen_9pfs_init(struct XenLegacyDevice *xendev)
 247{
 248    return 0;
 249}
 250
 251static int xen_9pfs_receive(Xen9pfsRing *ring)
 252{
 253    P9MsgHeader h;
 254    RING_IDX cons, prod, masked_prod, masked_cons, queued;
 255    V9fsPDU *pdu;
 256
 257    if (ring->inprogress) {
 258        return 0;
 259    }
 260
 261    cons = ring->intf->out_cons;
 262    prod = ring->intf->out_prod;
 263    xen_rmb();
 264
 265    queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order));
 266    if (queued < sizeof(h)) {
 267        return 0;
 268    }
 269    ring->inprogress = true;
 270
 271    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
 272    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
 273
 274    xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
 275                         masked_prod, &masked_cons,
 276                         XEN_FLEX_RING_SIZE(ring->ring_order));
 277    if (queued < le32_to_cpu(h.size_le)) {
 278        return 0;
 279    }
 280
 281    /* cannot fail, because we only handle one request per ring at a time */
 282    pdu = pdu_alloc(&ring->priv->state);
 283    ring->out_size = le32_to_cpu(h.size_le);
 284    ring->out_cons = cons + le32_to_cpu(h.size_le);
 285
 286    pdu_submit(pdu, &h);
 287
 288    return 0;
 289}
 290
 291static void xen_9pfs_bh(void *opaque)
 292{
 293    Xen9pfsRing *ring = opaque;
 294    xen_9pfs_receive(ring);
 295}
 296
 297static void xen_9pfs_evtchn_event(void *opaque)
 298{
 299    Xen9pfsRing *ring = opaque;
 300    evtchn_port_t port;
 301
 302    port = xenevtchn_pending(ring->evtchndev);
 303    xenevtchn_unmask(ring->evtchndev, port);
 304
 305    qemu_bh_schedule(ring->bh);
 306}
 307
 308static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev)
 309{
 310    Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
 311    int i;
 312
 313    for (i = 0; i < xen_9pdev->num_rings; i++) {
 314        if (xen_9pdev->rings[i].evtchndev != NULL) {
 315            qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
 316                    NULL, NULL, NULL);
 317            xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
 318                             xen_9pdev->rings[i].local_port);
 319            xen_9pdev->rings[i].evtchndev = NULL;
 320        }
 321    }
 322}
 323
 324static int xen_9pfs_free(struct XenLegacyDevice *xendev)
 325{
 326    Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
 327    int i;
 328
 329    if (xen_9pdev->rings[0].evtchndev != NULL) {
 330        xen_9pfs_disconnect(xendev);
 331    }
 332
 333    for (i = 0; i < xen_9pdev->num_rings; i++) {
 334        if (xen_9pdev->rings[i].data != NULL) {
 335            xen_be_unmap_grant_refs(&xen_9pdev->xendev,
 336                                    xen_9pdev->rings[i].data,
 337                                    (1 << xen_9pdev->rings[i].ring_order));
 338        }
 339        if (xen_9pdev->rings[i].intf != NULL) {
 340            xen_be_unmap_grant_refs(&xen_9pdev->xendev,
 341                                    xen_9pdev->rings[i].intf,
 342                                    1);
 343        }
 344        if (xen_9pdev->rings[i].bh != NULL) {
 345            qemu_bh_delete(xen_9pdev->rings[i].bh);
 346        }
 347    }
 348
 349    g_free(xen_9pdev->id);
 350    g_free(xen_9pdev->tag);
 351    g_free(xen_9pdev->path);
 352    g_free(xen_9pdev->security_model);
 353    g_free(xen_9pdev->rings);
 354    return 0;
 355}
 356
 357static int xen_9pfs_connect(struct XenLegacyDevice *xendev)
 358{
 359    Error *err = NULL;
 360    int i;
 361    Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
 362    V9fsState *s = &xen_9pdev->state;
 363    QemuOpts *fsdev;
 364
 365    if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
 366                             &xen_9pdev->num_rings) == -1 ||
 367        xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
 368        return -1;
 369    }
 370
 371    xen_9pdev->rings = g_new0(Xen9pfsRing, xen_9pdev->num_rings);
 372    for (i = 0; i < xen_9pdev->num_rings; i++) {
 373        char *str;
 374        int ring_order;
 375
 376        xen_9pdev->rings[i].priv = xen_9pdev;
 377        xen_9pdev->rings[i].evtchn = -1;
 378        xen_9pdev->rings[i].local_port = -1;
 379
 380        str = g_strdup_printf("ring-ref%u", i);
 381        if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
 382                                 &xen_9pdev->rings[i].ref) == -1) {
 383            g_free(str);
 384            goto out;
 385        }
 386        g_free(str);
 387        str = g_strdup_printf("event-channel-%u", i);
 388        if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
 389                                 &xen_9pdev->rings[i].evtchn) == -1) {
 390            g_free(str);
 391            goto out;
 392        }
 393        g_free(str);
 394
 395        xen_9pdev->rings[i].intf =
 396            xen_be_map_grant_ref(&xen_9pdev->xendev,
 397                                 xen_9pdev->rings[i].ref,
 398                                 PROT_READ | PROT_WRITE);
 399        if (!xen_9pdev->rings[i].intf) {
 400            goto out;
 401        }
 402        ring_order = xen_9pdev->rings[i].intf->ring_order;
 403        if (ring_order > MAX_RING_ORDER) {
 404            goto out;
 405        }
 406        xen_9pdev->rings[i].ring_order = ring_order;
 407        xen_9pdev->rings[i].data =
 408            xen_be_map_grant_refs(&xen_9pdev->xendev,
 409                                  xen_9pdev->rings[i].intf->ref,
 410                                  (1 << ring_order),
 411                                  PROT_READ | PROT_WRITE);
 412        if (!xen_9pdev->rings[i].data) {
 413            goto out;
 414        }
 415        xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
 416        xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
 417                                       XEN_FLEX_RING_SIZE(ring_order);
 418
 419        xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
 420        xen_9pdev->rings[i].out_cons = 0;
 421        xen_9pdev->rings[i].out_size = 0;
 422        xen_9pdev->rings[i].inprogress = false;
 423
 424
 425        xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
 426        if (xen_9pdev->rings[i].evtchndev == NULL) {
 427            goto out;
 428        }
 429        qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev));
 430        xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
 431                                            (xen_9pdev->rings[i].evtchndev,
 432                                             xendev->dom,
 433                                             xen_9pdev->rings[i].evtchn);
 434        if (xen_9pdev->rings[i].local_port == -1) {
 435            xen_pv_printf(xendev, 0,
 436                          "xenevtchn_bind_interdomain failed port=%d\n",
 437                          xen_9pdev->rings[i].evtchn);
 438            goto out;
 439        }
 440        xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
 441        qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
 442                xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
 443    }
 444
 445    xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
 446    xen_9pdev->path = xenstore_read_be_str(xendev, "path");
 447    xen_9pdev->id = s->fsconf.fsdev_id =
 448        g_strdup_printf("xen9p%d", xendev->dev);
 449    xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
 450    fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
 451            s->fsconf.tag,
 452            1, NULL);
 453    qemu_opt_set(fsdev, "fsdriver", "local", NULL);
 454    qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL);
 455    qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL);
 456    qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
 457    qemu_fsdev_add(fsdev, &err);
 458    if (err) {
 459        error_report_err(err);
 460    }
 461    v9fs_device_realize_common(s, &xen_9p_transport, NULL);
 462
 463    return 0;
 464
 465out:
 466    xen_9pfs_free(xendev);
 467    return -1;
 468}
 469
 470static void xen_9pfs_alloc(struct XenLegacyDevice *xendev)
 471{
 472    xenstore_write_be_str(xendev, "versions", VERSIONS);
 473    xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
 474    xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
 475}
 476
 477struct XenDevOps xen_9pfs_ops = {
 478    .size       = sizeof(Xen9pfsDev),
 479    .flags      = DEVOPS_FLAG_NEED_GNTDEV,
 480    .alloc      = xen_9pfs_alloc,
 481    .init       = xen_9pfs_init,
 482    .initialise = xen_9pfs_connect,
 483    .disconnect = xen_9pfs_disconnect,
 484    .free       = xen_9pfs_free,
 485};
 486