linux/drivers/net/virtio_net.c
<<
>>
Prefs
   1/* A network driver using virtio.
   2 *
   3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19//#define DEBUG
  20#include <linux/netdevice.h>
  21#include <linux/etherdevice.h>
  22#include <linux/ethtool.h>
  23#include <linux/module.h>
  24#include <linux/virtio.h>
  25#include <linux/virtio_net.h>
  26#include <linux/scatterlist.h>
  27#include <linux/if_vlan.h>
  28
  29static int napi_weight = 128;
  30module_param(napi_weight, int, 0444);
  31
  32static int csum = 1, gso = 1;
  33module_param(csum, bool, 0444);
  34module_param(gso, bool, 0444);
  35
  36/* FIXME: MTU in config. */
  37#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
  38#define GOOD_COPY_LEN   128
  39
  40#define VIRTNET_SEND_COMMAND_SG_MAX    2
  41
  42struct virtnet_info
  43{
  44        struct virtio_device *vdev;
  45        struct virtqueue *rvq, *svq, *cvq;
  46        struct net_device *dev;
  47        struct napi_struct napi;
  48        unsigned int status;
  49
  50        /* Number of input buffers, and max we've ever had. */
  51        unsigned int num, max;
  52
  53        /* I like... big packets and I cannot lie! */
  54        bool big_packets;
  55
  56        /* Host will merge rx buffers for big packets (shake it! shake it!) */
  57        bool mergeable_rx_bufs;
  58
  59        /* Receive & send queues. */
  60        struct sk_buff_head recv;
  61        struct sk_buff_head send;
  62
  63        /* Work struct for refilling if we run low on memory. */
  64        struct delayed_work refill;
  65
  66        /* Chain pages by the private ptr. */
  67        struct page *pages;
  68};
  69
  70struct skb_vnet_hdr {
  71        union {
  72                struct virtio_net_hdr hdr;
  73                struct virtio_net_hdr_mrg_rxbuf mhdr;
  74        };
  75        unsigned int num_sg;
  76};
  77
  78static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
  79{
  80        return (struct skb_vnet_hdr *)skb->cb;
  81}
  82
  83static void give_a_page(struct virtnet_info *vi, struct page *page)
  84{
  85        page->private = (unsigned long)vi->pages;
  86        vi->pages = page;
  87}
  88
  89static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
  90{
  91        unsigned int i;
  92
  93        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
  94                give_a_page(vi, skb_shinfo(skb)->frags[i].page);
  95        skb_shinfo(skb)->nr_frags = 0;
  96        skb->data_len = 0;
  97}
  98
  99static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
 100{
 101        struct page *p = vi->pages;
 102
 103        if (p)
 104                vi->pages = (struct page *)p->private;
 105        else
 106                p = alloc_page(gfp_mask);
 107        return p;
 108}
 109
 110static void skb_xmit_done(struct virtqueue *svq)
 111{
 112        struct virtnet_info *vi = svq->vdev->priv;
 113
 114        /* Suppress further interrupts. */
 115        svq->vq_ops->disable_cb(svq);
 116
 117        /* We were probably waiting for more output buffers. */
 118        netif_wake_queue(vi->dev);
 119}
 120
 121static void receive_skb(struct net_device *dev, struct sk_buff *skb,
 122                        unsigned len)
 123{
 124        struct virtnet_info *vi = netdev_priv(dev);
 125        struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
 126        int err;
 127        int i;
 128
 129        if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
 130                pr_debug("%s: short packet %i\n", dev->name, len);
 131                dev->stats.rx_length_errors++;
 132                goto drop;
 133        }
 134
 135        if (vi->mergeable_rx_bufs) {
 136                unsigned int copy;
 137                char *p = page_address(skb_shinfo(skb)->frags[0].page);
 138
 139                if (len > PAGE_SIZE)
 140                        len = PAGE_SIZE;
 141                len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
 142
 143                memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
 144                p += sizeof(hdr->mhdr);
 145
 146                copy = len;
 147                if (copy > skb_tailroom(skb))
 148                        copy = skb_tailroom(skb);
 149
 150                memcpy(skb_put(skb, copy), p, copy);
 151
 152                len -= copy;
 153
 154                if (!len) {
 155                        give_a_page(vi, skb_shinfo(skb)->frags[0].page);
 156                        skb_shinfo(skb)->nr_frags--;
 157                } else {
 158                        skb_shinfo(skb)->frags[0].page_offset +=
 159                                sizeof(hdr->mhdr) + copy;
 160                        skb_shinfo(skb)->frags[0].size = len;
 161                        skb->data_len += len;
 162                        skb->len += len;
 163                }
 164
 165                while (--hdr->mhdr.num_buffers) {
 166                        struct sk_buff *nskb;
 167
 168                        i = skb_shinfo(skb)->nr_frags;
 169                        if (i >= MAX_SKB_FRAGS) {
 170                                pr_debug("%s: packet too long %d\n", dev->name,
 171                                         len);
 172                                dev->stats.rx_length_errors++;
 173                                goto drop;
 174                        }
 175
 176                        nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
 177                        if (!nskb) {
 178                                pr_debug("%s: rx error: %d buffers missing\n",
 179                                         dev->name, hdr->mhdr.num_buffers);
 180                                dev->stats.rx_length_errors++;
 181                                goto drop;
 182                        }
 183
 184                        __skb_unlink(nskb, &vi->recv);
 185                        vi->num--;
 186
 187                        skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
 188                        skb_shinfo(nskb)->nr_frags = 0;
 189                        kfree_skb(nskb);
 190
 191                        if (len > PAGE_SIZE)
 192                                len = PAGE_SIZE;
 193
 194                        skb_shinfo(skb)->frags[i].size = len;
 195                        skb_shinfo(skb)->nr_frags++;
 196                        skb->data_len += len;
 197                        skb->len += len;
 198                }
 199        } else {
 200                len -= sizeof(hdr->hdr);
 201
 202                if (len <= MAX_PACKET_LEN)
 203                        trim_pages(vi, skb);
 204
 205                err = pskb_trim(skb, len);
 206                if (err) {
 207                        pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
 208                                 len, err);
 209                        dev->stats.rx_dropped++;
 210                        goto drop;
 211                }
 212        }
 213
 214        skb->truesize += skb->data_len;
 215        dev->stats.rx_bytes += skb->len;
 216        dev->stats.rx_packets++;
 217
 218        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 219                pr_debug("Needs csum!\n");
 220                if (!skb_partial_csum_set(skb,
 221                                          hdr->hdr.csum_start,
 222                                          hdr->hdr.csum_offset))
 223                        goto frame_err;
 224        }
 225
 226        skb->protocol = eth_type_trans(skb, dev);
 227        pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
 228                 ntohs(skb->protocol), skb->len, skb->pkt_type);
 229
 230        if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
 231                pr_debug("GSO!\n");
 232                switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 233                case VIRTIO_NET_HDR_GSO_TCPV4:
 234                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 235                        break;
 236                case VIRTIO_NET_HDR_GSO_UDP:
 237                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
 238                        break;
 239                case VIRTIO_NET_HDR_GSO_TCPV6:
 240                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 241                        break;
 242                default:
 243                        if (net_ratelimit())
 244                                printk(KERN_WARNING "%s: bad gso type %u.\n",
 245                                       dev->name, hdr->hdr.gso_type);
 246                        goto frame_err;
 247                }
 248
 249                if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
 250                        skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 251
 252                skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
 253                if (skb_shinfo(skb)->gso_size == 0) {
 254                        if (net_ratelimit())
 255                                printk(KERN_WARNING "%s: zero gso size.\n",
 256                                       dev->name);
 257                        goto frame_err;
 258                }
 259
 260                /* Header must be checked, and gso_segs computed. */
 261                skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 262                skb_shinfo(skb)->gso_segs = 0;
 263        }
 264
 265        netif_receive_skb(skb);
 266        return;
 267
 268frame_err:
 269        dev->stats.rx_frame_errors++;
 270drop:
 271        dev_kfree_skb(skb);
 272}
 273
 274static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
 275{
 276        struct sk_buff *skb;
 277        struct scatterlist sg[2+MAX_SKB_FRAGS];
 278        int num, err, i;
 279        bool oom = false;
 280
 281        sg_init_table(sg, 2+MAX_SKB_FRAGS);
 282        do {
 283                struct skb_vnet_hdr *hdr;
 284
 285                skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
 286                if (unlikely(!skb)) {
 287                        oom = true;
 288                        break;
 289                }
 290
 291                skb_reserve(skb, NET_IP_ALIGN);
 292                skb_put(skb, MAX_PACKET_LEN);
 293
 294                hdr = skb_vnet_hdr(skb);
 295                sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
 296
 297                if (vi->big_packets) {
 298                        for (i = 0; i < MAX_SKB_FRAGS; i++) {
 299                                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 300                                f->page = get_a_page(vi, gfp);
 301                                if (!f->page)
 302                                        break;
 303
 304                                f->page_offset = 0;
 305                                f->size = PAGE_SIZE;
 306
 307                                skb->data_len += PAGE_SIZE;
 308                                skb->len += PAGE_SIZE;
 309
 310                                skb_shinfo(skb)->nr_frags++;
 311                        }
 312                }
 313
 314                num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
 315                skb_queue_head(&vi->recv, skb);
 316
 317                err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
 318                if (err < 0) {
 319                        skb_unlink(skb, &vi->recv);
 320                        trim_pages(vi, skb);
 321                        kfree_skb(skb);
 322                        break;
 323                }
 324                vi->num++;
 325        } while (err >= num);
 326        if (unlikely(vi->num > vi->max))
 327                vi->max = vi->num;
 328        vi->rvq->vq_ops->kick(vi->rvq);
 329        return !oom;
 330}
 331
 332/* Returns false if we couldn't fill entirely (OOM). */
 333static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
 334{
 335        struct sk_buff *skb;
 336        struct scatterlist sg[1];
 337        int err;
 338        bool oom = false;
 339
 340        if (!vi->mergeable_rx_bufs)
 341                return try_fill_recv_maxbufs(vi, gfp);
 342
 343        do {
 344                skb_frag_t *f;
 345
 346                skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
 347                if (unlikely(!skb)) {
 348                        oom = true;
 349                        break;
 350                }
 351
 352                skb_reserve(skb, NET_IP_ALIGN);
 353
 354                f = &skb_shinfo(skb)->frags[0];
 355                f->page = get_a_page(vi, gfp);
 356                if (!f->page) {
 357                        oom = true;
 358                        kfree_skb(skb);
 359                        break;
 360                }
 361
 362                f->page_offset = 0;
 363                f->size = PAGE_SIZE;
 364
 365                skb_shinfo(skb)->nr_frags++;
 366
 367                sg_init_one(sg, page_address(f->page), PAGE_SIZE);
 368                skb_queue_head(&vi->recv, skb);
 369
 370                err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
 371                if (err < 0) {
 372                        skb_unlink(skb, &vi->recv);
 373                        kfree_skb(skb);
 374                        break;
 375                }
 376                vi->num++;
 377        } while (err > 0);
 378        if (unlikely(vi->num > vi->max))
 379                vi->max = vi->num;
 380        vi->rvq->vq_ops->kick(vi->rvq);
 381        return !oom;
 382}
 383
 384static void skb_recv_done(struct virtqueue *rvq)
 385{
 386        struct virtnet_info *vi = rvq->vdev->priv;
 387        /* Schedule NAPI, Suppress further interrupts if successful. */
 388        if (napi_schedule_prep(&vi->napi)) {
 389                rvq->vq_ops->disable_cb(rvq);
 390                __napi_schedule(&vi->napi);
 391        }
 392}
 393
 394static void refill_work(struct work_struct *work)
 395{
 396        struct virtnet_info *vi;
 397        bool still_empty;
 398
 399        vi = container_of(work, struct virtnet_info, refill.work);
 400        napi_disable(&vi->napi);
 401        try_fill_recv(vi, GFP_KERNEL);
 402        still_empty = (vi->num == 0);
 403        napi_enable(&vi->napi);
 404
 405        /* In theory, this can happen: if we don't get any buffers in
 406         * we will *never* try to fill again. */
 407        if (still_empty)
 408                schedule_delayed_work(&vi->refill, HZ/2);
 409}
 410
 411static int virtnet_poll(struct napi_struct *napi, int budget)
 412{
 413        struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
 414        struct sk_buff *skb = NULL;
 415        unsigned int len, received = 0;
 416
 417again:
 418        while (received < budget &&
 419               (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
 420                __skb_unlink(skb, &vi->recv);
 421                receive_skb(vi->dev, skb, len);
 422                vi->num--;
 423                received++;
 424        }
 425
 426        if (vi->num < vi->max / 2) {
 427                if (!try_fill_recv(vi, GFP_ATOMIC))
 428                        schedule_delayed_work(&vi->refill, 0);
 429        }
 430
 431        /* Out of packets? */
 432        if (received < budget) {
 433                napi_complete(napi);
 434                if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
 435                    && napi_schedule_prep(napi)) {
 436                        vi->rvq->vq_ops->disable_cb(vi->rvq);
 437                        __napi_schedule(napi);
 438                        goto again;
 439                }
 440        }
 441
 442        return received;
 443}
 444
 445static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
 446{
 447        struct sk_buff *skb;
 448        unsigned int len, tot_sgs = 0;
 449
 450        while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
 451                pr_debug("Sent skb %p\n", skb);
 452                __skb_unlink(skb, &vi->send);
 453                vi->dev->stats.tx_bytes += skb->len;
 454                vi->dev->stats.tx_packets++;
 455                tot_sgs += skb_vnet_hdr(skb)->num_sg;
 456                dev_kfree_skb_any(skb);
 457        }
 458        return tot_sgs;
 459}
 460
 461static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
 462{
 463        struct scatterlist sg[2+MAX_SKB_FRAGS];
 464        struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
 465        const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
 466
 467        sg_init_table(sg, 2+MAX_SKB_FRAGS);
 468
 469        pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
 470
 471        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 472                hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 473                hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
 474                hdr->hdr.csum_offset = skb->csum_offset;
 475        } else {
 476                hdr->hdr.flags = 0;
 477                hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
 478        }
 479
 480        if (skb_is_gso(skb)) {
 481                hdr->hdr.hdr_len = skb_headlen(skb);
 482                hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
 483                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
 484                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
 485                else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
 486                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
 487                else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
 488                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
 489                else
 490                        BUG();
 491                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
 492                        hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
 493        } else {
 494                hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
 495                hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
 496        }
 497
 498        hdr->mhdr.num_buffers = 0;
 499
 500        /* Encode metadata header at front. */
 501        if (vi->mergeable_rx_bufs)
 502                sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
 503        else
 504                sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
 505
 506        hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
 507        return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
 508}
 509
 510static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 511{
 512        struct virtnet_info *vi = netdev_priv(dev);
 513        int capacity;
 514
 515again:
 516        /* Free up any pending old buffers before queueing new ones. */
 517        free_old_xmit_skbs(vi);
 518
 519        /* Try to transmit */
 520        capacity = xmit_skb(vi, skb);
 521
 522        /* This can happen with OOM and indirect buffers. */
 523        if (unlikely(capacity < 0)) {
 524                netif_stop_queue(dev);
 525                dev_warn(&dev->dev, "Unexpected full queue\n");
 526                if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
 527                        vi->svq->vq_ops->disable_cb(vi->svq);
 528                        netif_start_queue(dev);
 529                        goto again;
 530                }
 531                return NETDEV_TX_BUSY;
 532        }
 533        vi->svq->vq_ops->kick(vi->svq);
 534
 535        /*
 536         * Put new one in send queue.  You'd expect we'd need this before
 537         * xmit_skb calls add_buf(), since the callback can be triggered
 538         * immediately after that.  But since the callback just triggers
 539         * another call back here, normal network xmit locking prevents the
 540         * race.
 541         */
 542        __skb_queue_head(&vi->send, skb);
 543
 544        /* Don't wait up for transmitted skbs to be freed. */
 545        skb_orphan(skb);
 546        nf_reset(skb);
 547
 548        /* Apparently nice girls don't return TX_BUSY; stop the queue
 549         * before it gets out of hand.  Naturally, this wastes entries. */
 550        if (capacity < 2+MAX_SKB_FRAGS) {
 551                netif_stop_queue(dev);
 552                if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
 553                        /* More just got used, free them then recheck. */
 554                        capacity += free_old_xmit_skbs(vi);
 555                        if (capacity >= 2+MAX_SKB_FRAGS) {
 556                                netif_start_queue(dev);
 557                                vi->svq->vq_ops->disable_cb(vi->svq);
 558                        }
 559                }
 560        }
 561
 562        return NETDEV_TX_OK;
 563}
 564
 565static int virtnet_set_mac_address(struct net_device *dev, void *p)
 566{
 567        struct virtnet_info *vi = netdev_priv(dev);
 568        struct virtio_device *vdev = vi->vdev;
 569        int ret;
 570
 571        ret = eth_mac_addr(dev, p);
 572        if (ret)
 573                return ret;
 574
 575        if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
 576                vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
 577                                  dev->dev_addr, dev->addr_len);
 578
 579        return 0;
 580}
 581
 582#ifdef CONFIG_NET_POLL_CONTROLLER
 583static void virtnet_netpoll(struct net_device *dev)
 584{
 585        struct virtnet_info *vi = netdev_priv(dev);
 586
 587        napi_schedule(&vi->napi);
 588}
 589#endif
 590
 591static int virtnet_open(struct net_device *dev)
 592{
 593        struct virtnet_info *vi = netdev_priv(dev);
 594
 595        napi_enable(&vi->napi);
 596
 597        /* If all buffers were filled by other side before we napi_enabled, we
 598         * won't get another interrupt, so process any outstanding packets
 599         * now.  virtnet_poll wants re-enable the queue, so we disable here.
 600         * We synchronize against interrupts via NAPI_STATE_SCHED */
 601        if (napi_schedule_prep(&vi->napi)) {
 602                vi->rvq->vq_ops->disable_cb(vi->rvq);
 603                __napi_schedule(&vi->napi);
 604        }
 605        return 0;
 606}
 607
 608/*
 609 * Send command via the control virtqueue and check status.  Commands
 610 * supported by the hypervisor, as indicated by feature bits, should
 611 * never fail unless improperly formated.
 612 */
 613static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 614                                 struct scatterlist *data, int out, int in)
 615{
 616        struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
 617        struct virtio_net_ctrl_hdr ctrl;
 618        virtio_net_ctrl_ack status = ~0;
 619        unsigned int tmp;
 620        int i;
 621
 622        /* Caller should know better */
 623        BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
 624                (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
 625
 626        out++; /* Add header */
 627        in++; /* Add return status */
 628
 629        ctrl.class = class;
 630        ctrl.cmd = cmd;
 631
 632        sg_init_table(sg, out + in);
 633
 634        sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
 635        for_each_sg(data, s, out + in - 2, i)
 636                sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
 637        sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
 638
 639        BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
 640
 641        vi->cvq->vq_ops->kick(vi->cvq);
 642
 643        /*
 644         * Spin for a response, the kick causes an ioport write, trapping
 645         * into the hypervisor, so the request should be handled immediately.
 646         */
 647        while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
 648                cpu_relax();
 649
 650        return status == VIRTIO_NET_OK;
 651}
 652
 653static int virtnet_close(struct net_device *dev)
 654{
 655        struct virtnet_info *vi = netdev_priv(dev);
 656
 657        napi_disable(&vi->napi);
 658
 659        return 0;
 660}
 661
 662static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
 663{
 664        struct virtnet_info *vi = netdev_priv(dev);
 665        struct virtio_device *vdev = vi->vdev;
 666
 667        if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
 668                return -ENOSYS;
 669
 670        return ethtool_op_set_tx_hw_csum(dev, data);
 671}
 672
 673static void virtnet_set_rx_mode(struct net_device *dev)
 674{
 675        struct virtnet_info *vi = netdev_priv(dev);
 676        struct scatterlist sg[2];
 677        u8 promisc, allmulti;
 678        struct virtio_net_ctrl_mac *mac_data;
 679        struct dev_addr_list *addr;
 680        struct netdev_hw_addr *ha;
 681        void *buf;
 682        int i;
 683
 684        /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
 685        if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
 686                return;
 687
 688        promisc = ((dev->flags & IFF_PROMISC) != 0);
 689        allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
 690
 691        sg_init_one(sg, &promisc, sizeof(promisc));
 692
 693        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
 694                                  VIRTIO_NET_CTRL_RX_PROMISC,
 695                                  sg, 1, 0))
 696                dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
 697                         promisc ? "en" : "dis");
 698
 699        sg_init_one(sg, &allmulti, sizeof(allmulti));
 700
 701        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
 702                                  VIRTIO_NET_CTRL_RX_ALLMULTI,
 703                                  sg, 1, 0))
 704                dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
 705                         allmulti ? "en" : "dis");
 706
 707        /* MAC filter - use one buffer for both lists */
 708        mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
 709                                 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
 710        if (!buf) {
 711                dev_warn(&dev->dev, "No memory for MAC address buffer\n");
 712                return;
 713        }
 714
 715        sg_init_table(sg, 2);
 716
 717        /* Store the unicast list and count in the front of the buffer */
 718        mac_data->entries = dev->uc.count;
 719        i = 0;
 720        list_for_each_entry(ha, &dev->uc.list, list)
 721                memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
 722
 723        sg_set_buf(&sg[0], mac_data,
 724                   sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
 725
 726        /* multicast list and count fill the end */
 727        mac_data = (void *)&mac_data->macs[dev->uc.count][0];
 728
 729        mac_data->entries = dev->mc_count;
 730        addr = dev->mc_list;
 731        for (i = 0; i < dev->mc_count; i++, addr = addr->next)
 732                memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
 733
 734        sg_set_buf(&sg[1], mac_data,
 735                   sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
 736
 737        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
 738                                  VIRTIO_NET_CTRL_MAC_TABLE_SET,
 739                                  sg, 2, 0))
 740                dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
 741
 742        kfree(buf);
 743}
 744
 745static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
 746{
 747        struct virtnet_info *vi = netdev_priv(dev);
 748        struct scatterlist sg;
 749
 750        sg_init_one(&sg, &vid, sizeof(vid));
 751
 752        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 753                                  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
 754                dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
 755}
 756
 757static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
 758{
 759        struct virtnet_info *vi = netdev_priv(dev);
 760        struct scatterlist sg;
 761
 762        sg_init_one(&sg, &vid, sizeof(vid));
 763
 764        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 765                                  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
 766                dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
 767}
 768
 769static const struct ethtool_ops virtnet_ethtool_ops = {
 770        .set_tx_csum = virtnet_set_tx_csum,
 771        .set_sg = ethtool_op_set_sg,
 772        .set_tso = ethtool_op_set_tso,
 773        .set_ufo = ethtool_op_set_ufo,
 774        .get_link = ethtool_op_get_link,
 775};
 776
 777#define MIN_MTU 68
 778#define MAX_MTU 65535
 779
 780static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
 781{
 782        if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
 783                return -EINVAL;
 784        dev->mtu = new_mtu;
 785        return 0;
 786}
 787
 788static const struct net_device_ops virtnet_netdev = {
 789        .ndo_open            = virtnet_open,
 790        .ndo_stop            = virtnet_close,
 791        .ndo_start_xmit      = start_xmit,
 792        .ndo_validate_addr   = eth_validate_addr,
 793        .ndo_set_mac_address = virtnet_set_mac_address,
 794        .ndo_set_rx_mode     = virtnet_set_rx_mode,
 795        .ndo_change_mtu      = virtnet_change_mtu,
 796        .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
 797        .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
 798#ifdef CONFIG_NET_POLL_CONTROLLER
 799        .ndo_poll_controller = virtnet_netpoll,
 800#endif
 801};
 802
 803static void virtnet_update_status(struct virtnet_info *vi)
 804{
 805        u16 v;
 806
 807        if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
 808                return;
 809
 810        vi->vdev->config->get(vi->vdev,
 811                              offsetof(struct virtio_net_config, status),
 812                              &v, sizeof(v));
 813
 814        /* Ignore unknown (future) status bits */
 815        v &= VIRTIO_NET_S_LINK_UP;
 816
 817        if (vi->status == v)
 818                return;
 819
 820        vi->status = v;
 821
 822        if (vi->status & VIRTIO_NET_S_LINK_UP) {
 823                netif_carrier_on(vi->dev);
 824                netif_wake_queue(vi->dev);
 825        } else {
 826                netif_carrier_off(vi->dev);
 827                netif_stop_queue(vi->dev);
 828        }
 829}
 830
 831static void virtnet_config_changed(struct virtio_device *vdev)
 832{
 833        struct virtnet_info *vi = vdev->priv;
 834
 835        virtnet_update_status(vi);
 836}
 837
 838static int virtnet_probe(struct virtio_device *vdev)
 839{
 840        int err;
 841        struct net_device *dev;
 842        struct virtnet_info *vi;
 843        struct virtqueue *vqs[3];
 844        vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
 845        const char *names[] = { "input", "output", "control" };
 846        int nvqs;
 847
 848        /* Allocate ourselves a network device with room for our info */
 849        dev = alloc_etherdev(sizeof(struct virtnet_info));
 850        if (!dev)
 851                return -ENOMEM;
 852
 853        /* Set up network device as normal. */
 854        dev->netdev_ops = &virtnet_netdev;
 855        dev->features = NETIF_F_HIGHDMA;
 856        SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
 857        SET_NETDEV_DEV(dev, &vdev->dev);
 858
 859        /* Do we support "hardware" checksums? */
 860        if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
 861                /* This opens up the world of extra features. */
 862                dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
 863                if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
 864                        dev->features |= NETIF_F_TSO | NETIF_F_UFO
 865                                | NETIF_F_TSO_ECN | NETIF_F_TSO6;
 866                }
 867                /* Individual feature bits: what can host handle? */
 868                if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
 869                        dev->features |= NETIF_F_TSO;
 870                if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
 871                        dev->features |= NETIF_F_TSO6;
 872                if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
 873                        dev->features |= NETIF_F_TSO_ECN;
 874                if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
 875                        dev->features |= NETIF_F_UFO;
 876        }
 877
 878        /* Configuration may specify what MAC to use.  Otherwise random. */
 879        if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
 880                vdev->config->get(vdev,
 881                                  offsetof(struct virtio_net_config, mac),
 882                                  dev->dev_addr, dev->addr_len);
 883        } else
 884                random_ether_addr(dev->dev_addr);
 885
 886        /* Set up our device-specific information */
 887        vi = netdev_priv(dev);
 888        netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
 889        vi->dev = dev;
 890        vi->vdev = vdev;
 891        vdev->priv = vi;
 892        vi->pages = NULL;
 893        INIT_DELAYED_WORK(&vi->refill, refill_work);
 894
 895        /* If we can receive ANY GSO packets, we must allocate large ones. */
 896        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
 897            || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
 898            || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
 899                vi->big_packets = true;
 900
 901        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
 902                vi->mergeable_rx_bufs = true;
 903
 904        /* We expect two virtqueues, receive then send,
 905         * and optionally control. */
 906        nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
 907
 908        err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
 909        if (err)
 910                goto free;
 911
 912        vi->rvq = vqs[0];
 913        vi->svq = vqs[1];
 914
 915        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
 916                vi->cvq = vqs[2];
 917
 918                if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
 919                        dev->features |= NETIF_F_HW_VLAN_FILTER;
 920        }
 921
 922        /* Initialize our empty receive and send queues. */
 923        skb_queue_head_init(&vi->recv);
 924        skb_queue_head_init(&vi->send);
 925
 926        err = register_netdev(dev);
 927        if (err) {
 928                pr_debug("virtio_net: registering device failed\n");
 929                goto free_vqs;
 930        }
 931
 932        /* Last of all, set up some receive buffers. */
 933        try_fill_recv(vi, GFP_KERNEL);
 934
 935        /* If we didn't even get one input buffer, we're useless. */
 936        if (vi->num == 0) {
 937                err = -ENOMEM;
 938                goto unregister;
 939        }
 940
 941        vi->status = VIRTIO_NET_S_LINK_UP;
 942        virtnet_update_status(vi);
 943        netif_carrier_on(dev);
 944
 945        pr_debug("virtnet: registered device %s\n", dev->name);
 946        return 0;
 947
 948unregister:
 949        unregister_netdev(dev);
 950        cancel_delayed_work_sync(&vi->refill);
 951free_vqs:
 952        vdev->config->del_vqs(vdev);
 953free:
 954        free_netdev(dev);
 955        return err;
 956}
 957
 958static void __devexit virtnet_remove(struct virtio_device *vdev)
 959{
 960        struct virtnet_info *vi = vdev->priv;
 961        struct sk_buff *skb;
 962
 963        /* Stop all the virtqueues. */
 964        vdev->config->reset(vdev);
 965
 966        /* Free our skbs in send and recv queues, if any. */
 967        while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
 968                kfree_skb(skb);
 969                vi->num--;
 970        }
 971        __skb_queue_purge(&vi->send);
 972
 973        BUG_ON(vi->num != 0);
 974
 975        unregister_netdev(vi->dev);
 976        cancel_delayed_work_sync(&vi->refill);
 977
 978        vdev->config->del_vqs(vi->vdev);
 979
 980        while (vi->pages)
 981                __free_pages(get_a_page(vi, GFP_KERNEL), 0);
 982
 983        free_netdev(vi->dev);
 984}
 985
 986static struct virtio_device_id id_table[] = {
 987        { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
 988        { 0 },
 989};
 990
 991static unsigned int features[] = {
 992        VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
 993        VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
 994        VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
 995        VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
 996        VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
 997        VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
 998        VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
 999};
1000
1001static struct virtio_driver virtio_net_driver = {
1002        .feature_table = features,
1003        .feature_table_size = ARRAY_SIZE(features),
1004        .driver.name =  KBUILD_MODNAME,
1005        .driver.owner = THIS_MODULE,
1006        .id_table =     id_table,
1007        .probe =        virtnet_probe,
1008        .remove =       __devexit_p(virtnet_remove),
1009        .config_changed = virtnet_config_changed,
1010};
1011
1012static int __init init(void)
1013{
1014        return register_virtio_driver(&virtio_net_driver);
1015}
1016
1017static void __exit fini(void)
1018{
1019        unregister_virtio_driver(&virtio_net_driver);
1020}
1021module_init(init);
1022module_exit(fini);
1023
1024MODULE_DEVICE_TABLE(virtio, id_table);
1025MODULE_DESCRIPTION("Virtio network driver");
1026MODULE_LICENSE("GPL");
1027