linux/drivers/vhost/net.c
<<
>>
Prefs
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Author: Michael S. Tsirkin <mst@redhat.com>
   3 *
   4 * This work is licensed under the terms of the GNU GPL, version 2.
   5 *
   6 * virtio-net server in host kernel.
   7 */
   8
   9#include <linux/compat.h>
  10#include <linux/eventfd.h>
  11#include <linux/vhost.h>
  12#include <linux/virtio_net.h>
  13#include <linux/miscdevice.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/mutex.h>
  17#include <linux/workqueue.h>
  18#include <linux/file.h>
  19#include <linux/slab.h>
  20#include <linux/vmalloc.h>
  21
  22#include <linux/net.h>
  23#include <linux/if_packet.h>
  24#include <linux/if_arp.h>
  25#include <linux/if_tun.h>
  26#include <linux/if_macvlan.h>
  27#include <linux/if_vlan.h>
  28
  29#include <net/sock.h>
  30
  31#include "vhost.h"
  32
  33static int experimental_zcopytx = 1;
  34module_param(experimental_zcopytx, int, 0444);
  35MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
  36                                       " 1 -Enable; 0 - Disable");
  37
  38/* Max number of bytes transferred before requeueing the job.
  39 * Using this limit prevents one virtqueue from starving others. */
  40#define VHOST_NET_WEIGHT 0x80000
  41
  42/* MAX number of TX used buffers for outstanding zerocopy */
  43#define VHOST_MAX_PEND 128
  44#define VHOST_GOODCOPY_LEN 256
  45
  46/*
  47 * For transmit, used buffer len is unused; we override it to track buffer
  48 * status internally; used for zerocopy tx only.
  49 */
  50/* Lower device DMA failed */
  51#define VHOST_DMA_FAILED_LEN    3
  52/* Lower device DMA done */
  53#define VHOST_DMA_DONE_LEN      2
  54/* Lower device DMA in progress */
  55#define VHOST_DMA_IN_PROGRESS   1
  56/* Buffer unused */
  57#define VHOST_DMA_CLEAR_LEN     0
  58
  59#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
  60
  61enum {
  62        VHOST_NET_FEATURES = VHOST_FEATURES |
  63                         (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
  64                         (1ULL << VIRTIO_NET_F_MRG_RXBUF),
  65};
  66
  67enum {
  68        VHOST_NET_VQ_RX = 0,
  69        VHOST_NET_VQ_TX = 1,
  70        VHOST_NET_VQ_MAX = 2,
  71};
  72
  73struct vhost_net_ubuf_ref {
  74        /* refcount follows semantics similar to kref:
  75         *  0: object is released
  76         *  1: no outstanding ubufs
  77         * >1: outstanding ubufs
  78         */
  79        atomic_t refcount;
  80        wait_queue_head_t wait;
  81        struct vhost_virtqueue *vq;
  82};
  83
  84struct vhost_net_virtqueue {
  85        struct vhost_virtqueue vq;
  86        /* hdr is used to store the virtio header.
  87         * Since each iovec has >= 1 byte length, we never need more than
  88         * header length entries to store the header. */
  89        struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
  90        size_t vhost_hlen;
  91        size_t sock_hlen;
  92        /* vhost zerocopy support fields below: */
  93        /* last used idx for outstanding DMA zerocopy buffers */
  94        int upend_idx;
  95        /* first used idx for DMA done zerocopy buffers */
  96        int done_idx;
  97        /* an array of userspace buffers info */
  98        struct ubuf_info *ubuf_info;
  99        /* Reference counting for outstanding ubufs.
 100         * Protected by vq mutex. Writers must also take device mutex. */
 101        struct vhost_net_ubuf_ref *ubufs;
 102};
 103
 104struct vhost_net {
 105        struct vhost_dev dev;
 106        struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
 107        struct vhost_poll poll[VHOST_NET_VQ_MAX];
 108        /* Number of TX recently submitted.
 109         * Protected by tx vq lock. */
 110        unsigned tx_packets;
 111        /* Number of times zerocopy TX recently failed.
 112         * Protected by tx vq lock. */
 113        unsigned tx_zcopy_err;
 114        /* Flush in progress. Protected by tx vq lock. */
 115        bool tx_flush;
 116};
 117
 118static unsigned vhost_net_zcopy_mask __read_mostly;
 119
 120static void vhost_net_enable_zcopy(int vq)
 121{
 122        vhost_net_zcopy_mask |= 0x1 << vq;
 123}
 124
 125static struct vhost_net_ubuf_ref *
 126vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
 127{
 128        struct vhost_net_ubuf_ref *ubufs;
 129        /* No zero copy backend? Nothing to count. */
 130        if (!zcopy)
 131                return NULL;
 132        ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
 133        if (!ubufs)
 134                return ERR_PTR(-ENOMEM);
 135        atomic_set(&ubufs->refcount, 1);
 136        init_waitqueue_head(&ubufs->wait);
 137        ubufs->vq = vq;
 138        return ubufs;
 139}
 140
 141static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
 142{
 143        int r = atomic_sub_return(1, &ubufs->refcount);
 144        if (unlikely(!r))
 145                wake_up(&ubufs->wait);
 146        return r;
 147}
 148
 149static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
 150{
 151        vhost_net_ubuf_put(ubufs);
 152        wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
 153}
 154
 155static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
 156{
 157        vhost_net_ubuf_put_and_wait(ubufs);
 158        kfree(ubufs);
 159}
 160
 161static void vhost_net_clear_ubuf_info(struct vhost_net *n)
 162{
 163        int i;
 164
 165        for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
 166                kfree(n->vqs[i].ubuf_info);
 167                n->vqs[i].ubuf_info = NULL;
 168        }
 169}
 170
 171static int vhost_net_set_ubuf_info(struct vhost_net *n)
 172{
 173        bool zcopy;
 174        int i;
 175
 176        for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
 177                zcopy = vhost_net_zcopy_mask & (0x1 << i);
 178                if (!zcopy)
 179                        continue;
 180                n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
 181                                              UIO_MAXIOV, GFP_KERNEL);
 182                if  (!n->vqs[i].ubuf_info)
 183                        goto err;
 184        }
 185        return 0;
 186
 187err:
 188        vhost_net_clear_ubuf_info(n);
 189        return -ENOMEM;
 190}
 191
 192static void vhost_net_vq_reset(struct vhost_net *n)
 193{
 194        int i;
 195
 196        vhost_net_clear_ubuf_info(n);
 197
 198        for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
 199                n->vqs[i].done_idx = 0;
 200                n->vqs[i].upend_idx = 0;
 201                n->vqs[i].ubufs = NULL;
 202                n->vqs[i].vhost_hlen = 0;
 203                n->vqs[i].sock_hlen = 0;
 204        }
 205
 206}
 207
 208static void vhost_net_tx_packet(struct vhost_net *net)
 209{
 210        ++net->tx_packets;
 211        if (net->tx_packets < 1024)
 212                return;
 213        net->tx_packets = 0;
 214        net->tx_zcopy_err = 0;
 215}
 216
 217static void vhost_net_tx_err(struct vhost_net *net)
 218{
 219        ++net->tx_zcopy_err;
 220}
 221
 222static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
 223{
 224        /* TX flush waits for outstanding DMAs to be done.
 225         * Don't start new DMAs.
 226         */
 227        return !net->tx_flush &&
 228                net->tx_packets / 64 >= net->tx_zcopy_err;
 229}
 230
 231static bool vhost_sock_zcopy(struct socket *sock)
 232{
 233        return unlikely(experimental_zcopytx) &&
 234                sock_flag(sock->sk, SOCK_ZEROCOPY);
 235}
 236
 237/* Pop first len bytes from iovec. Return number of segments used. */
 238static int move_iovec_hdr(struct iovec *from, struct iovec *to,
 239                          size_t len, int iov_count)
 240{
 241        int seg = 0;
 242        size_t size;
 243
 244        while (len && seg < iov_count) {
 245                size = min(from->iov_len, len);
 246                to->iov_base = from->iov_base;
 247                to->iov_len = size;
 248                from->iov_len -= size;
 249                from->iov_base += size;
 250                len -= size;
 251                ++from;
 252                ++to;
 253                ++seg;
 254        }
 255        return seg;
 256}
 257/* Copy iovec entries for len bytes from iovec. */
 258static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
 259                           size_t len, int iovcount)
 260{
 261        int seg = 0;
 262        size_t size;
 263
 264        while (len && seg < iovcount) {
 265                size = min(from->iov_len, len);
 266                to->iov_base = from->iov_base;
 267                to->iov_len = size;
 268                len -= size;
 269                ++from;
 270                ++to;
 271                ++seg;
 272        }
 273}
 274
 275/* In case of DMA done not in order in lower device driver for some reason.
 276 * upend_idx is used to track end of used idx, done_idx is used to track head
 277 * of used idx. Once lower device DMA done contiguously, we will signal KVM
 278 * guest used idx.
 279 */
 280static void vhost_zerocopy_signal_used(struct vhost_net *net,
 281                                       struct vhost_virtqueue *vq)
 282{
 283        struct vhost_net_virtqueue *nvq =
 284                container_of(vq, struct vhost_net_virtqueue, vq);
 285        int i, add;
 286        int j = 0;
 287
 288        for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
 289                if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
 290                        vhost_net_tx_err(net);
 291                if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
 292                        vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
 293                        ++j;
 294                } else
 295                        break;
 296        }
 297        while (j) {
 298                add = min(UIO_MAXIOV - nvq->done_idx, j);
 299                vhost_add_used_and_signal_n(vq->dev, vq,
 300                                            &vq->heads[nvq->done_idx], add);
 301                nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
 302                j -= add;
 303        }
 304}
 305
 306static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
 307{
 308        struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
 309        struct vhost_virtqueue *vq = ubufs->vq;
 310        int cnt;
 311
 312        rcu_read_lock_bh();
 313
 314        /* set len to mark this desc buffers done DMA */
 315        vq->heads[ubuf->desc].len = success ?
 316                VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
 317        cnt = vhost_net_ubuf_put(ubufs);
 318
 319        /*
 320         * Trigger polling thread if guest stopped submitting new buffers:
 321         * in this case, the refcount after decrement will eventually reach 1.
 322         * We also trigger polling periodically after each 16 packets
 323         * (the value 16 here is more or less arbitrary, it's tuned to trigger
 324         * less than 10% of times).
 325         */
 326        if (cnt <= 1 || !(cnt % 16))
 327                vhost_poll_queue(&vq->poll);
 328
 329        rcu_read_unlock_bh();
 330}
 331
 332/* Expects to be always run from workqueue - which acts as
 333 * read-size critical section for our kind of RCU. */
 334static void handle_tx(struct vhost_net *net)
 335{
 336        struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
 337        struct vhost_virtqueue *vq = &nvq->vq;
 338        unsigned out, in, s;
 339        int head;
 340        struct msghdr msg = {
 341                .msg_name = NULL,
 342                .msg_namelen = 0,
 343                .msg_control = NULL,
 344                .msg_controllen = 0,
 345                .msg_iov = vq->iov,
 346                .msg_flags = MSG_DONTWAIT,
 347        };
 348        size_t len, total_len = 0;
 349        int err;
 350        size_t hdr_size;
 351        struct socket *sock;
 352        struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
 353        bool zcopy, zcopy_used;
 354
 355        mutex_lock(&vq->mutex);
 356        sock = vq->private_data;
 357        if (!sock)
 358                goto out;
 359
 360        vhost_disable_notify(&net->dev, vq);
 361
 362        hdr_size = nvq->vhost_hlen;
 363        zcopy = nvq->ubufs;
 364
 365        for (;;) {
 366                /* Release DMAs done buffers first */
 367                if (zcopy)
 368                        vhost_zerocopy_signal_used(net, vq);
 369
 370                /* If more outstanding DMAs, queue the work.
 371                 * Handle upend_idx wrap around
 372                 */
 373                if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
 374                              % UIO_MAXIOV == nvq->done_idx))
 375                        break;
 376
 377                head = vhost_get_vq_desc(vq, vq->iov,
 378                                         ARRAY_SIZE(vq->iov),
 379                                         &out, &in,
 380                                         NULL, NULL);
 381                /* On error, stop handling until the next kick. */
 382                if (unlikely(head < 0))
 383                        break;
 384                /* Nothing new?  Wait for eventfd to tell us they refilled. */
 385                if (head == vq->num) {
 386                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
 387                                vhost_disable_notify(&net->dev, vq);
 388                                continue;
 389                        }
 390                        break;
 391                }
 392                if (in) {
 393                        vq_err(vq, "Unexpected descriptor format for TX: "
 394                               "out %d, int %d\n", out, in);
 395                        break;
 396                }
 397                /* Skip header. TODO: support TSO. */
 398                s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
 399                msg.msg_iovlen = out;
 400                len = iov_length(vq->iov, out);
 401                /* Sanity check */
 402                if (!len) {
 403                        vq_err(vq, "Unexpected header len for TX: "
 404                               "%zd expected %zd\n",
 405                               iov_length(nvq->hdr, s), hdr_size);
 406                        break;
 407                }
 408
 409                zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
 410                                   && (nvq->upend_idx + 1) % UIO_MAXIOV !=
 411                                      nvq->done_idx
 412                                   && vhost_net_tx_select_zcopy(net);
 413
 414                /* use msg_control to pass vhost zerocopy ubuf info to skb */
 415                if (zcopy_used) {
 416                        struct ubuf_info *ubuf;
 417                        ubuf = nvq->ubuf_info + nvq->upend_idx;
 418
 419                        vq->heads[nvq->upend_idx].id = head;
 420                        vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
 421                        ubuf->callback = vhost_zerocopy_callback;
 422                        ubuf->ctx = nvq->ubufs;
 423                        ubuf->desc = nvq->upend_idx;
 424                        msg.msg_control = ubuf;
 425                        msg.msg_controllen = sizeof(ubuf);
 426                        ubufs = nvq->ubufs;
 427                        atomic_inc(&ubufs->refcount);
 428                        nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
 429                } else {
 430                        msg.msg_control = NULL;
 431                        ubufs = NULL;
 432                }
 433                /* TODO: Check specific error and bomb out unless ENOBUFS? */
 434                err = sock->ops->sendmsg(NULL, sock, &msg, len);
 435                if (unlikely(err < 0)) {
 436                        if (zcopy_used) {
 437                                vhost_net_ubuf_put(ubufs);
 438                                nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
 439                                        % UIO_MAXIOV;
 440                        }
 441                        vhost_discard_vq_desc(vq, 1);
 442                        break;
 443                }
 444                if (err != len)
 445                        pr_debug("Truncated TX packet: "
 446                                 " len %d != %zd\n", err, len);
 447                if (!zcopy_used)
 448                        vhost_add_used_and_signal(&net->dev, vq, head, 0);
 449                else
 450                        vhost_zerocopy_signal_used(net, vq);
 451                total_len += len;
 452                vhost_net_tx_packet(net);
 453                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
 454                        vhost_poll_queue(&vq->poll);
 455                        break;
 456                }
 457        }
 458out:
 459        mutex_unlock(&vq->mutex);
 460}
 461
 462static int peek_head_len(struct sock *sk)
 463{
 464        struct sk_buff *head;
 465        int len = 0;
 466        unsigned long flags;
 467
 468        spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
 469        head = skb_peek(&sk->sk_receive_queue);
 470        if (likely(head)) {
 471                len = head->len;
 472                if (vlan_tx_tag_present(head))
 473                        len += VLAN_HLEN;
 474        }
 475
 476        spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
 477        return len;
 478}
 479
 480/* This is a multi-buffer version of vhost_get_desc, that works if
 481 *      vq has read descriptors only.
 482 * @vq          - the relevant virtqueue
 483 * @datalen     - data length we'll be reading
 484 * @iovcount    - returned count of io vectors we fill
 485 * @log         - vhost log
 486 * @log_num     - log offset
 487 * @quota       - headcount quota, 1 for big buffer
 488 *      returns number of buffer heads allocated, negative on error
 489 */
 490static int get_rx_bufs(struct vhost_virtqueue *vq,
 491                       struct vring_used_elem *heads,
 492                       int datalen,
 493                       unsigned *iovcount,
 494                       struct vhost_log *log,
 495                       unsigned *log_num,
 496                       unsigned int quota)
 497{
 498        unsigned int out, in;
 499        int seg = 0;
 500        int headcount = 0;
 501        unsigned d;
 502        int r, nlogs = 0;
 503
 504        while (datalen > 0 && headcount < quota) {
 505                if (unlikely(seg >= UIO_MAXIOV)) {
 506                        r = -ENOBUFS;
 507                        goto err;
 508                }
 509                r = vhost_get_vq_desc(vq, vq->iov + seg,
 510                                      ARRAY_SIZE(vq->iov) - seg, &out,
 511                                      &in, log, log_num);
 512                if (unlikely(r < 0))
 513                        goto err;
 514
 515                d = r;
 516                if (d == vq->num) {
 517                        r = 0;
 518                        goto err;
 519                }
 520                if (unlikely(out || in <= 0)) {
 521                        vq_err(vq, "unexpected descriptor format for RX: "
 522                                "out %d, in %d\n", out, in);
 523                        r = -EINVAL;
 524                        goto err;
 525                }
 526                if (unlikely(log)) {
 527                        nlogs += *log_num;
 528                        log += *log_num;
 529                }
 530                heads[headcount].id = d;
 531                heads[headcount].len = iov_length(vq->iov + seg, in);
 532                datalen -= heads[headcount].len;
 533                ++headcount;
 534                seg += in;
 535        }
 536        heads[headcount - 1].len += datalen;
 537        *iovcount = seg;
 538        if (unlikely(log))
 539                *log_num = nlogs;
 540
 541        /* Detect overrun */
 542        if (unlikely(datalen > 0)) {
 543                r = UIO_MAXIOV + 1;
 544                goto err;
 545        }
 546        return headcount;
 547err:
 548        vhost_discard_vq_desc(vq, headcount);
 549        return r;
 550}
 551
 552/* Expects to be always run from workqueue - which acts as
 553 * read-size critical section for our kind of RCU. */
 554static void handle_rx(struct vhost_net *net)
 555{
 556        struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
 557        struct vhost_virtqueue *vq = &nvq->vq;
 558        unsigned uninitialized_var(in), log;
 559        struct vhost_log *vq_log;
 560        struct msghdr msg = {
 561                .msg_name = NULL,
 562                .msg_namelen = 0,
 563                .msg_control = NULL, /* FIXME: get and handle RX aux data. */
 564                .msg_controllen = 0,
 565                .msg_iov = vq->iov,
 566                .msg_flags = MSG_DONTWAIT,
 567        };
 568        struct virtio_net_hdr_mrg_rxbuf hdr = {
 569                .hdr.flags = 0,
 570                .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
 571        };
 572        size_t total_len = 0;
 573        int err, mergeable;
 574        s16 headcount;
 575        size_t vhost_hlen, sock_hlen;
 576        size_t vhost_len, sock_len;
 577        struct socket *sock;
 578
 579        mutex_lock(&vq->mutex);
 580        sock = vq->private_data;
 581        if (!sock)
 582                goto out;
 583        vhost_disable_notify(&net->dev, vq);
 584
 585        vhost_hlen = nvq->vhost_hlen;
 586        sock_hlen = nvq->sock_hlen;
 587
 588        vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
 589                vq->log : NULL;
 590        mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
 591
 592        while ((sock_len = peek_head_len(sock->sk))) {
 593                sock_len += sock_hlen;
 594                vhost_len = sock_len + vhost_hlen;
 595                headcount = get_rx_bufs(vq, vq->heads, vhost_len,
 596                                        &in, vq_log, &log,
 597                                        likely(mergeable) ? UIO_MAXIOV : 1);
 598                /* On error, stop handling until the next kick. */
 599                if (unlikely(headcount < 0))
 600                        break;
 601                /* On overrun, truncate and discard */
 602                if (unlikely(headcount > UIO_MAXIOV)) {
 603                        msg.msg_iovlen = 1;
 604                        err = sock->ops->recvmsg(NULL, sock, &msg,
 605                                                 1, MSG_DONTWAIT | MSG_TRUNC);
 606                        pr_debug("Discarded rx packet: len %zd\n", sock_len);
 607                        continue;
 608                }
 609                /* OK, now we need to know about added descriptors. */
 610                if (!headcount) {
 611                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
 612                                /* They have slipped one in as we were
 613                                 * doing that: check again. */
 614                                vhost_disable_notify(&net->dev, vq);
 615                                continue;
 616                        }
 617                        /* Nothing new?  Wait for eventfd to tell us
 618                         * they refilled. */
 619                        break;
 620                }
 621                /* We don't need to be notified again. */
 622                if (unlikely((vhost_hlen)))
 623                        /* Skip header. TODO: support TSO. */
 624                        move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
 625                else
 626                        /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
 627                         * needed because recvmsg can modify msg_iov. */
 628                        copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
 629                msg.msg_iovlen = in;
 630                err = sock->ops->recvmsg(NULL, sock, &msg,
 631                                         sock_len, MSG_DONTWAIT | MSG_TRUNC);
 632                /* Userspace might have consumed the packet meanwhile:
 633                 * it's not supposed to do this usually, but might be hard
 634                 * to prevent. Discard data we got (if any) and keep going. */
 635                if (unlikely(err != sock_len)) {
 636                        pr_debug("Discarded rx packet: "
 637                                 " len %d, expected %zd\n", err, sock_len);
 638                        vhost_discard_vq_desc(vq, headcount);
 639                        continue;
 640                }
 641                if (unlikely(vhost_hlen) &&
 642                    memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
 643                                      vhost_hlen)) {
 644                        vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
 645                               vq->iov->iov_base);
 646                        break;
 647                }
 648                /* TODO: Should check and handle checksum. */
 649                if (likely(mergeable) &&
 650                    memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
 651                                      offsetof(typeof(hdr), num_buffers),
 652                                      sizeof hdr.num_buffers)) {
 653                        vq_err(vq, "Failed num_buffers write");
 654                        vhost_discard_vq_desc(vq, headcount);
 655                        break;
 656                }
 657                vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
 658                                            headcount);
 659                if (unlikely(vq_log))
 660                        vhost_log_write(vq, vq_log, log, vhost_len);
 661                total_len += vhost_len;
 662                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
 663                        vhost_poll_queue(&vq->poll);
 664                        break;
 665                }
 666        }
 667out:
 668        mutex_unlock(&vq->mutex);
 669}
 670
 671static void handle_tx_kick(struct vhost_work *work)
 672{
 673        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
 674                                                  poll.work);
 675        struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
 676
 677        handle_tx(net);
 678}
 679
 680static void handle_rx_kick(struct vhost_work *work)
 681{
 682        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
 683                                                  poll.work);
 684        struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
 685
 686        handle_rx(net);
 687}
 688
 689static void handle_tx_net(struct vhost_work *work)
 690{
 691        struct vhost_net *net = container_of(work, struct vhost_net,
 692                                             poll[VHOST_NET_VQ_TX].work);
 693        handle_tx(net);
 694}
 695
 696static void handle_rx_net(struct vhost_work *work)
 697{
 698        struct vhost_net *net = container_of(work, struct vhost_net,
 699                                             poll[VHOST_NET_VQ_RX].work);
 700        handle_rx(net);
 701}
 702
 703static int vhost_net_open(struct inode *inode, struct file *f)
 704{
 705        struct vhost_net *n;
 706        struct vhost_dev *dev;
 707        struct vhost_virtqueue **vqs;
 708        int i;
 709
 710        n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
 711        if (!n) {
 712                n = vmalloc(sizeof *n);
 713                if (!n)
 714                        return -ENOMEM;
 715        }
 716        vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
 717        if (!vqs) {
 718                kvfree(n);
 719                return -ENOMEM;
 720        }
 721
 722        dev = &n->dev;
 723        vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
 724        vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
 725        n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
 726        n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
 727        for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
 728                n->vqs[i].ubufs = NULL;
 729                n->vqs[i].ubuf_info = NULL;
 730                n->vqs[i].upend_idx = 0;
 731                n->vqs[i].done_idx = 0;
 732                n->vqs[i].vhost_hlen = 0;
 733                n->vqs[i].sock_hlen = 0;
 734        }
 735        vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
 736
 737        vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
 738        vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
 739
 740        f->private_data = n;
 741
 742        return 0;
 743}
 744
 745static void vhost_net_disable_vq(struct vhost_net *n,
 746                                 struct vhost_virtqueue *vq)
 747{
 748        struct vhost_net_virtqueue *nvq =
 749                container_of(vq, struct vhost_net_virtqueue, vq);
 750        struct vhost_poll *poll = n->poll + (nvq - n->vqs);
 751        if (!vq->private_data)
 752                return;
 753        vhost_poll_stop(poll);
 754}
 755
 756static int vhost_net_enable_vq(struct vhost_net *n,
 757                                struct vhost_virtqueue *vq)
 758{
 759        struct vhost_net_virtqueue *nvq =
 760                container_of(vq, struct vhost_net_virtqueue, vq);
 761        struct vhost_poll *poll = n->poll + (nvq - n->vqs);
 762        struct socket *sock;
 763
 764        sock = vq->private_data;
 765        if (!sock)
 766                return 0;
 767
 768        return vhost_poll_start(poll, sock->file);
 769}
 770
 771static struct socket *vhost_net_stop_vq(struct vhost_net *n,
 772                                        struct vhost_virtqueue *vq)
 773{
 774        struct socket *sock;
 775
 776        mutex_lock(&vq->mutex);
 777        sock = vq->private_data;
 778        vhost_net_disable_vq(n, vq);
 779        vq->private_data = NULL;
 780        mutex_unlock(&vq->mutex);
 781        return sock;
 782}
 783
 784static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
 785                           struct socket **rx_sock)
 786{
 787        *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
 788        *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
 789}
 790
 791static void vhost_net_flush_vq(struct vhost_net *n, int index)
 792{
 793        vhost_poll_flush(n->poll + index);
 794        vhost_poll_flush(&n->vqs[index].vq.poll);
 795}
 796
 797static void vhost_net_flush(struct vhost_net *n)
 798{
 799        vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
 800        vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
 801        if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
 802                mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 803                n->tx_flush = true;
 804                mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 805                /* Wait for all lower device DMAs done. */
 806                vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
 807                mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 808                n->tx_flush = false;
 809                atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
 810                mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 811        }
 812}
 813
 814static int vhost_net_release(struct inode *inode, struct file *f)
 815{
 816        struct vhost_net *n = f->private_data;
 817        struct socket *tx_sock;
 818        struct socket *rx_sock;
 819
 820        vhost_net_stop(n, &tx_sock, &rx_sock);
 821        vhost_net_flush(n);
 822        vhost_dev_stop(&n->dev);
 823        vhost_dev_cleanup(&n->dev, false);
 824        vhost_net_vq_reset(n);
 825        if (tx_sock)
 826                sockfd_put(tx_sock);
 827        if (rx_sock)
 828                sockfd_put(rx_sock);
 829        /* Make sure no callbacks are outstanding */
 830        synchronize_rcu_bh();
 831        /* We do an extra flush before freeing memory,
 832         * since jobs can re-queue themselves. */
 833        vhost_net_flush(n);
 834        kfree(n->dev.vqs);
 835        kvfree(n);
 836        return 0;
 837}
 838
 839static struct socket *get_raw_socket(int fd)
 840{
 841        struct {
 842                struct sockaddr_ll sa;
 843                char  buf[MAX_ADDR_LEN];
 844        } uaddr;
 845        int uaddr_len = sizeof uaddr, r;
 846        struct socket *sock = sockfd_lookup(fd, &r);
 847
 848        if (!sock)
 849                return ERR_PTR(-ENOTSOCK);
 850
 851        /* Parameter checking */
 852        if (sock->sk->sk_type != SOCK_RAW) {
 853                r = -ESOCKTNOSUPPORT;
 854                goto err;
 855        }
 856
 857        r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
 858                               &uaddr_len, 0);
 859        if (r)
 860                goto err;
 861
 862        if (uaddr.sa.sll_family != AF_PACKET) {
 863                r = -EPFNOSUPPORT;
 864                goto err;
 865        }
 866        return sock;
 867err:
 868        sockfd_put(sock);
 869        return ERR_PTR(r);
 870}
 871
 872static struct socket *get_tap_socket(int fd)
 873{
 874        struct file *file = fget(fd);
 875        struct socket *sock;
 876
 877        if (!file)
 878                return ERR_PTR(-EBADF);
 879        sock = tun_get_socket(file);
 880        if (!IS_ERR(sock))
 881                return sock;
 882        sock = macvtap_get_socket(file);
 883        if (IS_ERR(sock))
 884                fput(file);
 885        return sock;
 886}
 887
 888static struct socket *get_socket(int fd)
 889{
 890        struct socket *sock;
 891
 892        /* special case to disable backend */
 893        if (fd == -1)
 894                return NULL;
 895        sock = get_raw_socket(fd);
 896        if (!IS_ERR(sock))
 897                return sock;
 898        sock = get_tap_socket(fd);
 899        if (!IS_ERR(sock))
 900                return sock;
 901        return ERR_PTR(-ENOTSOCK);
 902}
 903
 904static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
 905{
 906        struct socket *sock, *oldsock;
 907        struct vhost_virtqueue *vq;
 908        struct vhost_net_virtqueue *nvq;
 909        struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
 910        int r;
 911
 912        mutex_lock(&n->dev.mutex);
 913        r = vhost_dev_check_owner(&n->dev);
 914        if (r)
 915                goto err;
 916
 917        if (index >= VHOST_NET_VQ_MAX) {
 918                r = -ENOBUFS;
 919                goto err;
 920        }
 921        vq = &n->vqs[index].vq;
 922        nvq = &n->vqs[index];
 923        mutex_lock(&vq->mutex);
 924
 925        /* Verify that ring has been setup correctly. */
 926        if (!vhost_vq_access_ok(vq)) {
 927                r = -EFAULT;
 928                goto err_vq;
 929        }
 930        sock = get_socket(fd);
 931        if (IS_ERR(sock)) {
 932                r = PTR_ERR(sock);
 933                goto err_vq;
 934        }
 935
 936        /* start polling new socket */
 937        oldsock = vq->private_data;
 938        if (sock != oldsock) {
 939                ubufs = vhost_net_ubuf_alloc(vq,
 940                                             sock && vhost_sock_zcopy(sock));
 941                if (IS_ERR(ubufs)) {
 942                        r = PTR_ERR(ubufs);
 943                        goto err_ubufs;
 944                }
 945
 946                vhost_net_disable_vq(n, vq);
 947                vq->private_data = sock;
 948                r = vhost_init_used(vq);
 949                if (r)
 950                        goto err_used;
 951                r = vhost_net_enable_vq(n, vq);
 952                if (r)
 953                        goto err_used;
 954
 955                oldubufs = nvq->ubufs;
 956                nvq->ubufs = ubufs;
 957
 958                n->tx_packets = 0;
 959                n->tx_zcopy_err = 0;
 960                n->tx_flush = false;
 961        }
 962
 963        mutex_unlock(&vq->mutex);
 964
 965        if (oldubufs) {
 966                vhost_net_ubuf_put_wait_and_free(oldubufs);
 967                mutex_lock(&vq->mutex);
 968                vhost_zerocopy_signal_used(n, vq);
 969                mutex_unlock(&vq->mutex);
 970        }
 971
 972        if (oldsock) {
 973                vhost_net_flush_vq(n, index);
 974                sockfd_put(oldsock);
 975        }
 976
 977        mutex_unlock(&n->dev.mutex);
 978        return 0;
 979
 980err_used:
 981        vq->private_data = oldsock;
 982        vhost_net_enable_vq(n, vq);
 983        if (ubufs)
 984                vhost_net_ubuf_put_wait_and_free(ubufs);
 985err_ubufs:
 986        sockfd_put(sock);
 987err_vq:
 988        mutex_unlock(&vq->mutex);
 989err:
 990        mutex_unlock(&n->dev.mutex);
 991        return r;
 992}
 993
 994static long vhost_net_reset_owner(struct vhost_net *n)
 995{
 996        struct socket *tx_sock = NULL;
 997        struct socket *rx_sock = NULL;
 998        long err;
 999        struct vhost_memory *memory;
1000
1001        mutex_lock(&n->dev.mutex);
1002        err = vhost_dev_check_owner(&n->dev);
1003        if (err)
1004                goto done;
1005        memory = vhost_dev_reset_owner_prepare();
1006        if (!memory) {
1007                err = -ENOMEM;
1008                goto done;
1009        }
1010        vhost_net_stop(n, &tx_sock, &rx_sock);
1011        vhost_net_flush(n);
1012        vhost_dev_reset_owner(&n->dev, memory);
1013        vhost_net_vq_reset(n);
1014done:
1015        mutex_unlock(&n->dev.mutex);
1016        if (tx_sock)
1017                sockfd_put(tx_sock);
1018        if (rx_sock)
1019                sockfd_put(rx_sock);
1020        return err;
1021}
1022
1023static int vhost_net_set_features(struct vhost_net *n, u64 features)
1024{
1025        size_t vhost_hlen, sock_hlen, hdr_len;
1026        int i;
1027
1028        hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
1029                        sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1030                        sizeof(struct virtio_net_hdr);
1031        if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1032                /* vhost provides vnet_hdr */
1033                vhost_hlen = hdr_len;
1034                sock_hlen = 0;
1035        } else {
1036                /* socket provides vnet_hdr */
1037                vhost_hlen = 0;
1038                sock_hlen = hdr_len;
1039        }
1040        mutex_lock(&n->dev.mutex);
1041        if ((features & (1 << VHOST_F_LOG_ALL)) &&
1042            !vhost_log_access_ok(&n->dev)) {
1043                mutex_unlock(&n->dev.mutex);
1044                return -EFAULT;
1045        }
1046        for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1047                mutex_lock(&n->vqs[i].vq.mutex);
1048                n->vqs[i].vq.acked_features = features;
1049                n->vqs[i].vhost_hlen = vhost_hlen;
1050                n->vqs[i].sock_hlen = sock_hlen;
1051                mutex_unlock(&n->vqs[i].vq.mutex);
1052        }
1053        mutex_unlock(&n->dev.mutex);
1054        return 0;
1055}
1056
1057static long vhost_net_set_owner(struct vhost_net *n)
1058{
1059        int r;
1060
1061        mutex_lock(&n->dev.mutex);
1062        if (vhost_dev_has_owner(&n->dev)) {
1063                r = -EBUSY;
1064                goto out;
1065        }
1066        r = vhost_net_set_ubuf_info(n);
1067        if (r)
1068                goto out;
1069        r = vhost_dev_set_owner(&n->dev);
1070        if (r)
1071                vhost_net_clear_ubuf_info(n);
1072        vhost_net_flush(n);
1073out:
1074        mutex_unlock(&n->dev.mutex);
1075        return r;
1076}
1077
1078static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1079                            unsigned long arg)
1080{
1081        struct vhost_net *n = f->private_data;
1082        void __user *argp = (void __user *)arg;
1083        u64 __user *featurep = argp;
1084        struct vhost_vring_file backend;
1085        u64 features;
1086        int r;
1087
1088        switch (ioctl) {
1089        case VHOST_NET_SET_BACKEND:
1090                if (copy_from_user(&backend, argp, sizeof backend))
1091                        return -EFAULT;
1092                return vhost_net_set_backend(n, backend.index, backend.fd);
1093        case VHOST_GET_FEATURES:
1094                features = VHOST_NET_FEATURES;
1095                if (copy_to_user(featurep, &features, sizeof features))
1096                        return -EFAULT;
1097                return 0;
1098        case VHOST_SET_FEATURES:
1099                if (copy_from_user(&features, featurep, sizeof features))
1100                        return -EFAULT;
1101                if (features & ~VHOST_NET_FEATURES)
1102                        return -EOPNOTSUPP;
1103                return vhost_net_set_features(n, features);
1104        case VHOST_RESET_OWNER:
1105                return vhost_net_reset_owner(n);
1106        case VHOST_SET_OWNER:
1107                return vhost_net_set_owner(n);
1108        default:
1109                mutex_lock(&n->dev.mutex);
1110                r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1111                if (r == -ENOIOCTLCMD)
1112                        r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1113                else
1114                        vhost_net_flush(n);
1115                mutex_unlock(&n->dev.mutex);
1116                return r;
1117        }
1118}
1119
1120#ifdef CONFIG_COMPAT
1121static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
1122                                   unsigned long arg)
1123{
1124        return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1125}
1126#endif
1127
1128static const struct file_operations vhost_net_fops = {
1129        .owner          = THIS_MODULE,
1130        .release        = vhost_net_release,
1131        .unlocked_ioctl = vhost_net_ioctl,
1132#ifdef CONFIG_COMPAT
1133        .compat_ioctl   = vhost_net_compat_ioctl,
1134#endif
1135        .open           = vhost_net_open,
1136        .llseek         = noop_llseek,
1137};
1138
1139static struct miscdevice vhost_net_misc = {
1140        .minor = VHOST_NET_MINOR,
1141        .name = "vhost-net",
1142        .fops = &vhost_net_fops,
1143};
1144
1145static int vhost_net_init(void)
1146{
1147        if (experimental_zcopytx)
1148                vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1149        return misc_register(&vhost_net_misc);
1150}
1151module_init(vhost_net_init);
1152
1153static void vhost_net_exit(void)
1154{
1155        misc_deregister(&vhost_net_misc);
1156}
1157module_exit(vhost_net_exit);
1158
1159MODULE_VERSION("0.0.1");
1160MODULE_LICENSE("GPL v2");
1161MODULE_AUTHOR("Michael S. Tsirkin");
1162MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1163MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1164MODULE_ALIAS("devname:vhost-net");
1165