qemu/hw/net/xen_nic.c
<<
>>
Prefs
   1/*
   2 *  xen paravirt network card backend
   3 *
   4 *  (c) Gerd Hoffmann <kraxel@redhat.com>
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; under version 2 of the License.
   9 *
  10 *  This program is distributed in the hope that it will be useful,
  11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 *  GNU General Public License for more details.
  14 *
  15 *  You should have received a copy of the GNU General Public License along
  16 *  with this program; if not, see <http://www.gnu.org/licenses/>.
  17 *
  18 *  Contributions after 2012-01-13 are licensed under the terms of the
  19 *  GNU GPL, version 2 or (at your option) any later version.
  20 */
  21
  22#include "qemu/osdep.h"
  23#include <sys/socket.h>
  24#include <sys/ioctl.h>
  25#include <sys/wait.h>
  26
  27#include "hw/hw.h"
  28#include "net/net.h"
  29#include "net/checksum.h"
  30#include "net/util.h"
  31#include "hw/xen/xen_backend.h"
  32
  33#include <xen/io/netif.h>
  34
  35/* ------------------------------------------------------------- */
  36
  37struct XenNetDev {
  38    struct XenDevice      xendev;  /* must be first */
  39    char                  *mac;
  40    int                   tx_work;
  41    int                   tx_ring_ref;
  42    int                   rx_ring_ref;
  43    struct netif_tx_sring *txs;
  44    struct netif_rx_sring *rxs;
  45    netif_tx_back_ring_t  tx_ring;
  46    netif_rx_back_ring_t  rx_ring;
  47    NICConf               conf;
  48    NICState              *nic;
  49};
  50
  51/* ------------------------------------------------------------- */
  52
  53static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st)
  54{
  55    RING_IDX i = netdev->tx_ring.rsp_prod_pvt;
  56    netif_tx_response_t *resp;
  57    int notify;
  58
  59    resp = RING_GET_RESPONSE(&netdev->tx_ring, i);
  60    resp->id     = txp->id;
  61    resp->status = st;
  62
  63#if 0
  64    if (txp->flags & NETTXF_extra_info) {
  65        RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL;
  66    }
  67#endif
  68
  69    netdev->tx_ring.rsp_prod_pvt = ++i;
  70    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify);
  71    if (notify) {
  72        xen_pv_send_notify(&netdev->xendev);
  73    }
  74
  75    if (i == netdev->tx_ring.req_cons) {
  76        int more_to_do;
  77        RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do);
  78        if (more_to_do) {
  79            netdev->tx_work++;
  80        }
  81    }
  82}
  83
  84static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end)
  85{
  86#if 0
  87    /*
  88     * Hmm, why netback fails everything in the ring?
  89     * Should we do that even when not supporting SG and TSO?
  90     */
  91    RING_IDX cons = netdev->tx_ring.req_cons;
  92
  93    do {
  94        make_tx_response(netif, txp, NETIF_RSP_ERROR);
  95        if (cons >= end) {
  96            break;
  97        }
  98        txp = RING_GET_REQUEST(&netdev->tx_ring, cons++);
  99    } while (1);
 100    netdev->tx_ring.req_cons = cons;
 101    netif_schedule_work(netif);
 102    netif_put(netif);
 103#else
 104    net_tx_response(netdev, txp, NETIF_RSP_ERROR);
 105#endif
 106}
 107
 108static void net_tx_packets(struct XenNetDev *netdev)
 109{
 110    netif_tx_request_t txreq;
 111    RING_IDX rc, rp;
 112    void *page;
 113    void *tmpbuf = NULL;
 114
 115    for (;;) {
 116        rc = netdev->tx_ring.req_cons;
 117        rp = netdev->tx_ring.sring->req_prod;
 118        xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
 119
 120        while ((rc != rp)) {
 121            if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) {
 122                break;
 123            }
 124            memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq));
 125            netdev->tx_ring.req_cons = ++rc;
 126
 127#if 1
 128            /* should not happen in theory, we don't announce the *
 129             * feature-{sg,gso,whatelse} flags in xenstore (yet?) */
 130            if (txreq.flags & NETTXF_extra_info) {
 131                xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n");
 132                net_tx_error(netdev, &txreq, rc);
 133                continue;
 134            }
 135            if (txreq.flags & NETTXF_more_data) {
 136                xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n");
 137                net_tx_error(netdev, &txreq, rc);
 138                continue;
 139            }
 140#endif
 141
 142            if (txreq.size < 14) {
 143                xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n",
 144                              txreq.size);
 145                net_tx_error(netdev, &txreq, rc);
 146                continue;
 147            }
 148
 149            if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
 150                xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
 151                net_tx_error(netdev, &txreq, rc);
 152                continue;
 153            }
 154
 155            xen_pv_printf(&netdev->xendev, 3,
 156                          "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n",
 157                          txreq.gref, txreq.offset, txreq.size, txreq.flags,
 158                          (txreq.flags & NETTXF_csum_blank)     ? " csum_blank"     : "",
 159                          (txreq.flags & NETTXF_data_validated) ? " data_validated" : "",
 160                          (txreq.flags & NETTXF_more_data)      ? " more_data"      : "",
 161                          (txreq.flags & NETTXF_extra_info)     ? " extra_info"     : "");
 162
 163            page = xengnttab_map_grant_ref(netdev->xendev.gnttabdev,
 164                                           netdev->xendev.dom,
 165                                           txreq.gref, PROT_READ);
 166            if (page == NULL) {
 167                xen_pv_printf(&netdev->xendev, 0,
 168                              "error: tx gref dereference failed (%d)\n",
 169                             txreq.gref);
 170                net_tx_error(netdev, &txreq, rc);
 171                continue;
 172            }
 173            if (txreq.flags & NETTXF_csum_blank) {
 174                /* have read-only mapping -> can't fill checksum in-place */
 175                if (!tmpbuf) {
 176                    tmpbuf = g_malloc(XC_PAGE_SIZE);
 177                }
 178                memcpy(tmpbuf, page + txreq.offset, txreq.size);
 179                net_checksum_calculate(tmpbuf, txreq.size);
 180                qemu_send_packet(qemu_get_queue(netdev->nic), tmpbuf,
 181                                 txreq.size);
 182            } else {
 183                qemu_send_packet(qemu_get_queue(netdev->nic),
 184                                 page + txreq.offset, txreq.size);
 185            }
 186            xengnttab_unmap(netdev->xendev.gnttabdev, page, 1);
 187            net_tx_response(netdev, &txreq, NETIF_RSP_OKAY);
 188        }
 189        if (!netdev->tx_work) {
 190            break;
 191        }
 192        netdev->tx_work = 0;
 193    }
 194    g_free(tmpbuf);
 195}
 196
 197/* ------------------------------------------------------------- */
 198
 199static void net_rx_response(struct XenNetDev *netdev,
 200                            netif_rx_request_t *req, int8_t st,
 201                            uint16_t offset, uint16_t size,
 202                            uint16_t flags)
 203{
 204    RING_IDX i = netdev->rx_ring.rsp_prod_pvt;
 205    netif_rx_response_t *resp;
 206    int notify;
 207
 208    resp = RING_GET_RESPONSE(&netdev->rx_ring, i);
 209    resp->offset     = offset;
 210    resp->flags      = flags;
 211    resp->id         = req->id;
 212    resp->status     = (int16_t)size;
 213    if (st < 0) {
 214        resp->status = (int16_t)st;
 215    }
 216
 217    xen_pv_printf(&netdev->xendev, 3,
 218                  "rx response: idx %d, status %d, flags 0x%x\n",
 219                  i, resp->status, resp->flags);
 220
 221    netdev->rx_ring.rsp_prod_pvt = ++i;
 222    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify);
 223    if (notify) {
 224        xen_pv_send_notify(&netdev->xendev);
 225    }
 226}
 227
 228#define NET_IP_ALIGN 2
 229
 230static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size)
 231{
 232    struct XenNetDev *netdev = qemu_get_nic_opaque(nc);
 233    netif_rx_request_t rxreq;
 234    RING_IDX rc, rp;
 235    void *page;
 236
 237    if (netdev->xendev.be_state != XenbusStateConnected) {
 238        return -1;
 239    }
 240
 241    rc = netdev->rx_ring.req_cons;
 242    rp = netdev->rx_ring.sring->req_prod;
 243    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
 244
 245    if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
 246        return 0;
 247    }
 248    if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
 249        xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
 250                      (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
 251        return -1;
 252    }
 253
 254    memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq));
 255    netdev->rx_ring.req_cons = ++rc;
 256
 257    page = xengnttab_map_grant_ref(netdev->xendev.gnttabdev,
 258                                   netdev->xendev.dom,
 259                                   rxreq.gref, PROT_WRITE);
 260    if (page == NULL) {
 261        xen_pv_printf(&netdev->xendev, 0,
 262                      "error: rx gref dereference failed (%d)\n",
 263                      rxreq.gref);
 264        net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0);
 265        return -1;
 266    }
 267    memcpy(page + NET_IP_ALIGN, buf, size);
 268    xengnttab_unmap(netdev->xendev.gnttabdev, page, 1);
 269    net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0);
 270
 271    return size;
 272}
 273
 274/* ------------------------------------------------------------- */
 275
 276static NetClientInfo net_xen_info = {
 277    .type = NET_CLIENT_DRIVER_NIC,
 278    .size = sizeof(NICState),
 279    .receive = net_rx_packet,
 280};
 281
 282static int net_init(struct XenDevice *xendev)
 283{
 284    struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
 285
 286    /* read xenstore entries */
 287    if (netdev->mac == NULL) {
 288        netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac");
 289    }
 290
 291    /* do we have all we need? */
 292    if (netdev->mac == NULL) {
 293        return -1;
 294    }
 295
 296    if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) {
 297        return -1;
 298    }
 299
 300    netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf,
 301                               "xen", NULL, netdev);
 302
 303    snprintf(qemu_get_queue(netdev->nic)->info_str,
 304             sizeof(qemu_get_queue(netdev->nic)->info_str),
 305             "nic: xenbus vif macaddr=%s", netdev->mac);
 306
 307    /* fill info */
 308    xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1);
 309    xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0);
 310
 311    return 0;
 312}
 313
 314static int net_connect(struct XenDevice *xendev)
 315{
 316    struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
 317    int rx_copy;
 318
 319    if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref",
 320                             &netdev->tx_ring_ref) == -1) {
 321        return -1;
 322    }
 323    if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref",
 324                             &netdev->rx_ring_ref) == -1) {
 325        return 1;
 326    }
 327    if (xenstore_read_fe_int(&netdev->xendev, "event-channel",
 328                             &netdev->xendev.remote_port) == -1) {
 329        return -1;
 330    }
 331
 332    if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) {
 333        rx_copy = 0;
 334    }
 335    if (rx_copy == 0) {
 336        xen_pv_printf(&netdev->xendev, 0,
 337                      "frontend doesn't support rx-copy.\n");
 338        return -1;
 339    }
 340
 341    netdev->txs = xengnttab_map_grant_ref(netdev->xendev.gnttabdev,
 342                                          netdev->xendev.dom,
 343                                          netdev->tx_ring_ref,
 344                                          PROT_READ | PROT_WRITE);
 345    if (!netdev->txs) {
 346        return -1;
 347    }
 348    netdev->rxs = xengnttab_map_grant_ref(netdev->xendev.gnttabdev,
 349                                          netdev->xendev.dom,
 350                                          netdev->rx_ring_ref,
 351                                          PROT_READ | PROT_WRITE);
 352    if (!netdev->rxs) {
 353        xengnttab_unmap(netdev->xendev.gnttabdev, netdev->txs, 1);
 354        netdev->txs = NULL;
 355        return -1;
 356    }
 357    BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
 358    BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);
 359
 360    xen_be_bind_evtchn(&netdev->xendev);
 361
 362    xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, "
 363                  "remote port %d, local port %d\n",
 364                  netdev->tx_ring_ref, netdev->rx_ring_ref,
 365                  netdev->xendev.remote_port, netdev->xendev.local_port);
 366
 367    net_tx_packets(netdev);
 368    return 0;
 369}
 370
 371static void net_disconnect(struct XenDevice *xendev)
 372{
 373    struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
 374
 375    xen_pv_unbind_evtchn(&netdev->xendev);
 376
 377    if (netdev->txs) {
 378        xengnttab_unmap(netdev->xendev.gnttabdev, netdev->txs, 1);
 379        netdev->txs = NULL;
 380    }
 381    if (netdev->rxs) {
 382        xengnttab_unmap(netdev->xendev.gnttabdev, netdev->rxs, 1);
 383        netdev->rxs = NULL;
 384    }
 385}
 386
 387static void net_event(struct XenDevice *xendev)
 388{
 389    struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
 390    net_tx_packets(netdev);
 391    qemu_flush_queued_packets(qemu_get_queue(netdev->nic));
 392}
 393
 394static int net_free(struct XenDevice *xendev)
 395{
 396    struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
 397
 398    if (netdev->nic) {
 399        qemu_del_nic(netdev->nic);
 400        netdev->nic = NULL;
 401    }
 402    g_free(netdev->mac);
 403    netdev->mac = NULL;
 404    return 0;
 405}
 406
 407/* ------------------------------------------------------------- */
 408
 409struct XenDevOps xen_netdev_ops = {
 410    .size       = sizeof(struct XenNetDev),
 411    .flags      = DEVOPS_FLAG_NEED_GNTDEV,
 412    .init       = net_init,
 413    .initialise    = net_connect,
 414    .event      = net_event,
 415    .disconnect = net_disconnect,
 416    .free       = net_free,
 417};
 418