qemu/hw/9pfs/xen-9p-backend.c
<<
>>
Prefs
   1/*
   2 * Xen 9p backend
   3 *
   4 * Copyright Aporeto 2017
   5 *
   6 * Authors:
   7 *  Stefano Stabellini <stefano@aporeto.com>
   8 *
   9 */
  10
  11#include "qemu/osdep.h"
  12
  13#include "hw/hw.h"
  14#include "hw/9pfs/9p.h"
  15#include "hw/xen/xen_backend.h"
  16#include "hw/9pfs/xen-9pfs.h"
  17#include "qemu/config-file.h"
  18#include "fsdev/qemu-fsdev.h"
  19
  20#define VERSIONS "1"
  21#define MAX_RINGS 8
  22#define MAX_RING_ORDER 8
  23
  24typedef struct Xen9pfsRing {
  25    struct Xen9pfsDev *priv;
  26
  27    int ref;
  28    xenevtchn_handle   *evtchndev;
  29    int evtchn;
  30    int local_port;
  31    int ring_order;
  32    struct xen_9pfs_data_intf *intf;
  33    unsigned char *data;
  34    struct xen_9pfs_data ring;
  35
  36    struct iovec *sg;
  37    QEMUBH *bh;
  38
  39    /* local copies, so that we can read/write PDU data directly from
  40     * the ring */
  41    RING_IDX out_cons, out_size, in_cons;
  42    bool inprogress;
  43} Xen9pfsRing;
  44
  45typedef struct Xen9pfsDev {
  46    struct XenDevice xendev;  /* must be first */
  47    V9fsState state;
  48    char *path;
  49    char *security_model;
  50    char *tag;
  51    char *id;
  52
  53    int num_rings;
  54    Xen9pfsRing *rings;
  55} Xen9pfsDev;
  56
  57static void xen_9pfs_disconnect(struct XenDevice *xendev);
  58
  59static void xen_9pfs_in_sg(Xen9pfsRing *ring,
  60                           struct iovec *in_sg,
  61                           int *num,
  62                           uint32_t idx,
  63                           uint32_t size)
  64{
  65    RING_IDX cons, prod, masked_prod, masked_cons;
  66
  67    cons = ring->intf->in_cons;
  68    prod = ring->intf->in_prod;
  69    xen_rmb();
  70    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
  71    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
  72
  73    if (masked_prod < masked_cons) {
  74        in_sg[0].iov_base = ring->ring.in + masked_prod;
  75        in_sg[0].iov_len = masked_cons - masked_prod;
  76        *num = 1;
  77    } else {
  78        in_sg[0].iov_base = ring->ring.in + masked_prod;
  79        in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
  80        in_sg[1].iov_base = ring->ring.in;
  81        in_sg[1].iov_len = masked_cons;
  82        *num = 2;
  83    }
  84}
  85
  86static void xen_9pfs_out_sg(Xen9pfsRing *ring,
  87                            struct iovec *out_sg,
  88                            int *num,
  89                            uint32_t idx)
  90{
  91    RING_IDX cons, prod, masked_prod, masked_cons;
  92
  93    cons = ring->intf->out_cons;
  94    prod = ring->intf->out_prod;
  95    xen_rmb();
  96    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
  97    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
  98
  99    if (masked_cons < masked_prod) {
 100        out_sg[0].iov_base = ring->ring.out + masked_cons;
 101        out_sg[0].iov_len = ring->out_size;
 102        *num = 1;
 103    } else {
 104        if (ring->out_size >
 105            (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
 106            out_sg[0].iov_base = ring->ring.out + masked_cons;
 107            out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
 108                                masked_cons;
 109            out_sg[1].iov_base = ring->ring.out;
 110            out_sg[1].iov_len = ring->out_size -
 111                                (XEN_FLEX_RING_SIZE(ring->ring_order) -
 112                                 masked_cons);
 113            *num = 2;
 114        } else {
 115            out_sg[0].iov_base = ring->ring.out + masked_cons;
 116            out_sg[0].iov_len = ring->out_size;
 117            *num = 1;
 118        }
 119    }
 120}
 121
 122static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
 123                                     size_t offset,
 124                                     const char *fmt,
 125                                     va_list ap)
 126{
 127    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
 128    struct iovec in_sg[2];
 129    int num;
 130    ssize_t ret;
 131
 132    xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
 133                   in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
 134
 135    ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
 136    if (ret < 0) {
 137        xen_pv_printf(&xen_9pfs->xendev, 0,
 138                      "Failed to encode VirtFS request type %d\n", pdu->id + 1);
 139        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
 140        xen_9pfs_disconnect(&xen_9pfs->xendev);
 141    }
 142    return ret;
 143}
 144
 145static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
 146                                       size_t offset,
 147                                       const char *fmt,
 148                                       va_list ap)
 149{
 150    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
 151    struct iovec out_sg[2];
 152    int num;
 153    ssize_t ret;
 154
 155    xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
 156                    out_sg, &num, pdu->idx);
 157
 158    ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
 159    if (ret < 0) {
 160        xen_pv_printf(&xen_9pfs->xendev, 0,
 161                      "Failed to decode VirtFS request type %d\n", pdu->id);
 162        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
 163        xen_9pfs_disconnect(&xen_9pfs->xendev);
 164    }
 165    return ret;
 166}
 167
 168static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
 169                                           struct iovec **piov,
 170                                           unsigned int *pniov,
 171                                           size_t size)
 172{
 173    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
 174    Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
 175    int num;
 176
 177    g_free(ring->sg);
 178
 179    ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
 180    xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx);
 181    *piov = ring->sg;
 182    *pniov = num;
 183}
 184
 185static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
 186                                          struct iovec **piov,
 187                                          unsigned int *pniov,
 188                                          size_t size)
 189{
 190    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
 191    Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
 192    int num;
 193    size_t buf_size;
 194
 195    g_free(ring->sg);
 196
 197    ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
 198    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
 199
 200    buf_size = iov_size(ring->sg, num);
 201    if (buf_size  < size) {
 202        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
 203                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
 204                buf_size);
 205        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
 206        xen_9pfs_disconnect(&xen_9pfs->xendev);
 207    }
 208
 209    *piov = ring->sg;
 210    *pniov = num;
 211}
 212
 213static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
 214{
 215    RING_IDX prod;
 216    Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state);
 217    Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
 218
 219    g_free(ring->sg);
 220    ring->sg = NULL;
 221
 222    ring->intf->out_cons = ring->out_cons;
 223    xen_wmb();
 224
 225    prod = ring->intf->in_prod;
 226    xen_rmb();
 227    ring->intf->in_prod = prod + pdu->size;
 228    xen_wmb();
 229
 230    ring->inprogress = false;
 231    xenevtchn_notify(ring->evtchndev, ring->local_port);
 232
 233    qemu_bh_schedule(ring->bh);
 234}
 235
 236static const struct V9fsTransport xen_9p_transport = {
 237    .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
 238    .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
 239    .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
 240    .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
 241    .push_and_notify = xen_9pfs_push_and_notify,
 242};
 243
 244static int xen_9pfs_init(struct XenDevice *xendev)
 245{
 246    return 0;
 247}
 248
 249static int xen_9pfs_receive(Xen9pfsRing *ring)
 250{
 251    P9MsgHeader h;
 252    RING_IDX cons, prod, masked_prod, masked_cons, queued;
 253    V9fsPDU *pdu;
 254
 255    if (ring->inprogress) {
 256        return 0;
 257    }
 258
 259    cons = ring->intf->out_cons;
 260    prod = ring->intf->out_prod;
 261    xen_rmb();
 262
 263    queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order));
 264    if (queued < sizeof(h)) {
 265        return 0;
 266    }
 267    ring->inprogress = true;
 268
 269    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
 270    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
 271
 272    xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
 273                         masked_prod, &masked_cons,
 274                         XEN_FLEX_RING_SIZE(ring->ring_order));
 275    if (queued < le32_to_cpu(h.size_le)) {
 276        return 0;
 277    }
 278
 279    /* cannot fail, because we only handle one request per ring at a time */
 280    pdu = pdu_alloc(&ring->priv->state);
 281    ring->out_size = le32_to_cpu(h.size_le);
 282    ring->out_cons = cons + le32_to_cpu(h.size_le);
 283
 284    pdu_submit(pdu, &h);
 285
 286    return 0;
 287}
 288
 289static void xen_9pfs_bh(void *opaque)
 290{
 291    Xen9pfsRing *ring = opaque;
 292    xen_9pfs_receive(ring);
 293}
 294
 295static void xen_9pfs_evtchn_event(void *opaque)
 296{
 297    Xen9pfsRing *ring = opaque;
 298    evtchn_port_t port;
 299
 300    port = xenevtchn_pending(ring->evtchndev);
 301    xenevtchn_unmask(ring->evtchndev, port);
 302
 303    qemu_bh_schedule(ring->bh);
 304}
 305
 306static void xen_9pfs_disconnect(struct XenDevice *xendev)
 307{
 308    Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
 309    int i;
 310
 311    for (i = 0; i < xen_9pdev->num_rings; i++) {
 312        if (xen_9pdev->rings[i].evtchndev != NULL) {
 313            qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
 314                    NULL, NULL, NULL);
 315            xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
 316                             xen_9pdev->rings[i].local_port);
 317            xen_9pdev->rings[i].evtchndev = NULL;
 318        }
 319    }
 320}
 321
 322static int xen_9pfs_free(struct XenDevice *xendev)
 323{
 324    Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
 325    int i;
 326
 327    if (xen_9pdev->rings[0].evtchndev != NULL) {
 328        xen_9pfs_disconnect(xendev);
 329    }
 330
 331    for (i = 0; i < xen_9pdev->num_rings; i++) {
 332        if (xen_9pdev->rings[i].data != NULL) {
 333            xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
 334                    xen_9pdev->rings[i].data,
 335                    (1 << xen_9pdev->rings[i].ring_order));
 336        }
 337        if (xen_9pdev->rings[i].intf != NULL) {
 338            xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
 339                    xen_9pdev->rings[i].intf,
 340                    1);
 341        }
 342        if (xen_9pdev->rings[i].bh != NULL) {
 343            qemu_bh_delete(xen_9pdev->rings[i].bh);
 344        }
 345    }
 346
 347    g_free(xen_9pdev->id);
 348    g_free(xen_9pdev->tag);
 349    g_free(xen_9pdev->path);
 350    g_free(xen_9pdev->security_model);
 351    g_free(xen_9pdev->rings);
 352    return 0;
 353}
 354
 355static int xen_9pfs_connect(struct XenDevice *xendev)
 356{
 357    int i;
 358    Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
 359    V9fsState *s = &xen_9pdev->state;
 360    QemuOpts *fsdev;
 361
 362    if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
 363                             &xen_9pdev->num_rings) == -1 ||
 364        xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
 365        return -1;
 366    }
 367
 368    xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing));
 369    for (i = 0; i < xen_9pdev->num_rings; i++) {
 370        char *str;
 371        int ring_order;
 372
 373        xen_9pdev->rings[i].priv = xen_9pdev;
 374        xen_9pdev->rings[i].evtchn = -1;
 375        xen_9pdev->rings[i].local_port = -1;
 376
 377        str = g_strdup_printf("ring-ref%u", i);
 378        if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
 379                                 &xen_9pdev->rings[i].ref) == -1) {
 380            g_free(str);
 381            goto out;
 382        }
 383        g_free(str);
 384        str = g_strdup_printf("event-channel-%u", i);
 385        if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
 386                                 &xen_9pdev->rings[i].evtchn) == -1) {
 387            g_free(str);
 388            goto out;
 389        }
 390        g_free(str);
 391
 392        xen_9pdev->rings[i].intf =  xengnttab_map_grant_ref(
 393                xen_9pdev->xendev.gnttabdev,
 394                xen_9pdev->xendev.dom,
 395                xen_9pdev->rings[i].ref,
 396                PROT_READ | PROT_WRITE);
 397        if (!xen_9pdev->rings[i].intf) {
 398            goto out;
 399        }
 400        ring_order = xen_9pdev->rings[i].intf->ring_order;
 401        if (ring_order > MAX_RING_ORDER) {
 402            goto out;
 403        }
 404        xen_9pdev->rings[i].ring_order = ring_order;
 405        xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs(
 406                xen_9pdev->xendev.gnttabdev,
 407                (1 << ring_order),
 408                xen_9pdev->xendev.dom,
 409                xen_9pdev->rings[i].intf->ref,
 410                PROT_READ | PROT_WRITE);
 411        if (!xen_9pdev->rings[i].data) {
 412            goto out;
 413        }
 414        xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
 415        xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
 416                                       XEN_FLEX_RING_SIZE(ring_order);
 417
 418        xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
 419        xen_9pdev->rings[i].out_cons = 0;
 420        xen_9pdev->rings[i].out_size = 0;
 421        xen_9pdev->rings[i].inprogress = false;
 422
 423
 424        xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
 425        if (xen_9pdev->rings[i].evtchndev == NULL) {
 426            goto out;
 427        }
 428        qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev));
 429        xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
 430                                            (xen_9pdev->rings[i].evtchndev,
 431                                             xendev->dom,
 432                                             xen_9pdev->rings[i].evtchn);
 433        if (xen_9pdev->rings[i].local_port == -1) {
 434            xen_pv_printf(xendev, 0,
 435                          "xenevtchn_bind_interdomain failed port=%d\n",
 436                          xen_9pdev->rings[i].evtchn);
 437            goto out;
 438        }
 439        xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
 440        qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
 441                xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
 442    }
 443
 444    xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
 445    xen_9pdev->path = xenstore_read_be_str(xendev, "path");
 446    xen_9pdev->id = s->fsconf.fsdev_id =
 447        g_strdup_printf("xen9p%d", xendev->dev);
 448    xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
 449    v9fs_register_transport(s, &xen_9p_transport);
 450    fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
 451            s->fsconf.tag,
 452            1, NULL);
 453    qemu_opt_set(fsdev, "fsdriver", "local", NULL);
 454    qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL);
 455    qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL);
 456    qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
 457    qemu_fsdev_add(fsdev);
 458    v9fs_device_realize_common(s, NULL);
 459
 460    return 0;
 461
 462out:
 463    xen_9pfs_free(xendev);
 464    return -1;
 465}
 466
 467static void xen_9pfs_alloc(struct XenDevice *xendev)
 468{
 469    xenstore_write_be_str(xendev, "versions", VERSIONS);
 470    xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
 471    xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
 472}
 473
 474struct XenDevOps xen_9pfs_ops = {
 475    .size       = sizeof(Xen9pfsDev),
 476    .flags      = DEVOPS_FLAG_NEED_GNTDEV,
 477    .alloc      = xen_9pfs_alloc,
 478    .init       = xen_9pfs_init,
 479    .initialise = xen_9pfs_connect,
 480    .disconnect = xen_9pfs_disconnect,
 481    .free       = xen_9pfs_free,
 482};
 483