linux/drivers/net/xen-netback/interface.c
<<
>>
Prefs
   1/*
   2 * Network-device interface management.
   3 *
   4 * Copyright (c) 2004-2005, Keir Fraser
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License version 2
   8 * as published by the Free Software Foundation; or, when distributed
   9 * separately from the Linux kernel or incorporated into other
  10 * software packages, subject to the following license:
  11 *
  12 * Permission is hereby granted, free of charge, to any person obtaining a copy
  13 * of this source file (the "Software"), to deal in the Software without
  14 * restriction, including without limitation the rights to use, copy, modify,
  15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  16 * and to permit persons to whom the Software is furnished to do so, subject to
  17 * the following conditions:
  18 *
  19 * The above copyright notice and this permission notice shall be included in
  20 * all copies or substantial portions of the Software.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  28 * IN THE SOFTWARE.
  29 */
  30
  31#include "common.h"
  32
  33#include <linux/kthread.h>
  34#include <linux/ethtool.h>
  35#include <linux/rtnetlink.h>
  36#include <linux/if_vlan.h>
  37#include <linux/vmalloc.h>
  38
  39#include <xen/events.h>
  40#include <asm/xen/hypercall.h>
  41#include <xen/balloon.h>
  42
  43#define XENVIF_QUEUE_LENGTH 32
  44#define XENVIF_NAPI_WEIGHT  64
  45
  46static inline void xenvif_stop_queue(struct xenvif_queue *queue)
  47{
  48        struct net_device *dev = queue->vif->dev;
  49
  50        if (!queue->vif->can_queue)
  51                return;
  52
  53        netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
  54}
  55
  56int xenvif_schedulable(struct xenvif *vif)
  57{
  58        return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
  59}
  60
  61static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
  62{
  63        struct xenvif_queue *queue = dev_id;
  64
  65        if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
  66                napi_schedule(&queue->napi);
  67
  68        return IRQ_HANDLED;
  69}
  70
  71int xenvif_poll(struct napi_struct *napi, int budget)
  72{
  73        struct xenvif_queue *queue =
  74                container_of(napi, struct xenvif_queue, napi);
  75        int work_done;
  76
  77        /* This vif is rogue, we pretend we've there is nothing to do
  78         * for this vif to deschedule it from NAPI. But this interface
  79         * will be turned off in thread context later.
  80         */
  81        if (unlikely(queue->vif->disabled)) {
  82                napi_complete(napi);
  83                return 0;
  84        }
  85
  86        work_done = xenvif_tx_action(queue, budget);
  87
  88        if (work_done < budget) {
  89                napi_complete(napi);
  90                xenvif_napi_schedule_or_enable_events(queue);
  91        }
  92
  93        return work_done;
  94}
  95
  96static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
  97{
  98        struct xenvif_queue *queue = dev_id;
  99
 100        xenvif_kick_thread(queue);
 101
 102        return IRQ_HANDLED;
 103}
 104
 105static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
 106{
 107        xenvif_tx_interrupt(irq, dev_id);
 108        xenvif_rx_interrupt(irq, dev_id);
 109
 110        return IRQ_HANDLED;
 111}
 112
 113int xenvif_queue_stopped(struct xenvif_queue *queue)
 114{
 115        struct net_device *dev = queue->vif->dev;
 116        unsigned int id = queue->id;
 117        return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
 118}
 119
 120void xenvif_wake_queue(struct xenvif_queue *queue)
 121{
 122        struct net_device *dev = queue->vif->dev;
 123        unsigned int id = queue->id;
 124        netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
 125}
 126
 127/* Callback to wake the queue and drain it on timeout */
 128static void xenvif_wake_queue_callback(unsigned long data)
 129{
 130        struct xenvif_queue *queue = (struct xenvif_queue *)data;
 131
 132        if (xenvif_queue_stopped(queue)) {
 133                netdev_err(queue->vif->dev, "draining TX queue\n");
 134                queue->rx_queue_purge = true;
 135                xenvif_kick_thread(queue);
 136                xenvif_wake_queue(queue);
 137        }
 138}
 139
 140static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 141{
 142        struct xenvif *vif = netdev_priv(dev);
 143        struct xenvif_queue *queue = NULL;
 144        unsigned int num_queues = vif->num_queues;
 145        u16 index;
 146        int min_slots_needed;
 147
 148        BUG_ON(skb->dev != dev);
 149
 150        /* Drop the packet if queues are not set up */
 151        if (num_queues < 1)
 152                goto drop;
 153
 154        /* Obtain the queue to be used to transmit this packet */
 155        index = skb_get_queue_mapping(skb);
 156        if (index >= num_queues) {
 157                pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
 158                                    index, vif->dev->name);
 159                index %= num_queues;
 160        }
 161        queue = &vif->queues[index];
 162
 163        /* Drop the packet if queue is not ready */
 164        if (queue->task == NULL ||
 165            queue->dealloc_task == NULL ||
 166            !xenvif_schedulable(vif))
 167                goto drop;
 168
 169        /* At best we'll need one slot for the header and one for each
 170         * frag.
 171         */
 172        min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
 173
 174        /* If the skb is GSO then we'll also need an extra slot for the
 175         * metadata.
 176         */
 177        if (skb_is_gso(skb))
 178                min_slots_needed++;
 179
 180        /* If the skb can't possibly fit in the remaining slots
 181         * then turn off the queue to give the ring a chance to
 182         * drain.
 183         */
 184        if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
 185                queue->wake_queue.function = xenvif_wake_queue_callback;
 186                queue->wake_queue.data = (unsigned long)queue;
 187                xenvif_stop_queue(queue);
 188                mod_timer(&queue->wake_queue,
 189                        jiffies + rx_drain_timeout_jiffies);
 190        }
 191
 192        skb_queue_tail(&queue->rx_queue, skb);
 193        xenvif_kick_thread(queue);
 194
 195        return NETDEV_TX_OK;
 196
 197 drop:
 198        vif->dev->stats.tx_dropped++;
 199        dev_kfree_skb(skb);
 200        return NETDEV_TX_OK;
 201}
 202
 203static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
 204{
 205        struct xenvif *vif = netdev_priv(dev);
 206        struct xenvif_queue *queue = NULL;
 207        unsigned int num_queues = vif->num_queues;
 208        unsigned long rx_bytes = 0;
 209        unsigned long rx_packets = 0;
 210        unsigned long tx_bytes = 0;
 211        unsigned long tx_packets = 0;
 212        unsigned int index;
 213
 214        if (vif->queues == NULL)
 215                goto out;
 216
 217        /* Aggregate tx and rx stats from each queue */
 218        for (index = 0; index < num_queues; ++index) {
 219                queue = &vif->queues[index];
 220                rx_bytes += queue->stats.rx_bytes;
 221                rx_packets += queue->stats.rx_packets;
 222                tx_bytes += queue->stats.tx_bytes;
 223                tx_packets += queue->stats.tx_packets;
 224        }
 225
 226out:
 227        vif->dev->stats.rx_bytes = rx_bytes;
 228        vif->dev->stats.rx_packets = rx_packets;
 229        vif->dev->stats.tx_bytes = tx_bytes;
 230        vif->dev->stats.tx_packets = tx_packets;
 231
 232        return &vif->dev->stats;
 233}
 234
 235static void xenvif_up(struct xenvif *vif)
 236{
 237        struct xenvif_queue *queue = NULL;
 238        unsigned int num_queues = vif->num_queues;
 239        unsigned int queue_index;
 240
 241        for (queue_index = 0; queue_index < num_queues; ++queue_index) {
 242                queue = &vif->queues[queue_index];
 243                napi_enable(&queue->napi);
 244                enable_irq(queue->tx_irq);
 245                if (queue->tx_irq != queue->rx_irq)
 246                        enable_irq(queue->rx_irq);
 247                xenvif_napi_schedule_or_enable_events(queue);
 248        }
 249}
 250
 251static void xenvif_down(struct xenvif *vif)
 252{
 253        struct xenvif_queue *queue = NULL;
 254        unsigned int num_queues = vif->num_queues;
 255        unsigned int queue_index;
 256
 257        for (queue_index = 0; queue_index < num_queues; ++queue_index) {
 258                queue = &vif->queues[queue_index];
 259                napi_disable(&queue->napi);
 260                disable_irq(queue->tx_irq);
 261                if (queue->tx_irq != queue->rx_irq)
 262                        disable_irq(queue->rx_irq);
 263                del_timer_sync(&queue->credit_timeout);
 264        }
 265}
 266
 267static int xenvif_open(struct net_device *dev)
 268{
 269        struct xenvif *vif = netdev_priv(dev);
 270        if (netif_carrier_ok(dev))
 271                xenvif_up(vif);
 272        netif_tx_start_all_queues(dev);
 273        return 0;
 274}
 275
 276static int xenvif_close(struct net_device *dev)
 277{
 278        struct xenvif *vif = netdev_priv(dev);
 279        if (netif_carrier_ok(dev))
 280                xenvif_down(vif);
 281        netif_tx_stop_all_queues(dev);
 282        return 0;
 283}
 284
 285static int xenvif_change_mtu(struct net_device *dev, int mtu)
 286{
 287        struct xenvif *vif = netdev_priv(dev);
 288        int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
 289
 290        if (mtu > max)
 291                return -EINVAL;
 292        dev->mtu = mtu;
 293        return 0;
 294}
 295
 296static netdev_features_t xenvif_fix_features(struct net_device *dev,
 297        netdev_features_t features)
 298{
 299        struct xenvif *vif = netdev_priv(dev);
 300
 301        if (!vif->can_sg)
 302                features &= ~NETIF_F_SG;
 303        if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
 304                features &= ~NETIF_F_TSO;
 305        if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
 306                features &= ~NETIF_F_TSO6;
 307        if (!vif->ip_csum)
 308                features &= ~NETIF_F_IP_CSUM;
 309        if (!vif->ipv6_csum)
 310                features &= ~NETIF_F_IPV6_CSUM;
 311
 312        return features;
 313}
 314
 315static const struct xenvif_stat {
 316        char name[ETH_GSTRING_LEN];
 317        u16 offset;
 318} xenvif_stats[] = {
 319        {
 320                "rx_gso_checksum_fixup",
 321                offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
 322        },
 323        /* If (sent != success + fail), there are probably packets never
 324         * freed up properly!
 325         */
 326        {
 327                "tx_zerocopy_sent",
 328                offsetof(struct xenvif_stats, tx_zerocopy_sent),
 329        },
 330        {
 331                "tx_zerocopy_success",
 332                offsetof(struct xenvif_stats, tx_zerocopy_success),
 333        },
 334        {
 335                "tx_zerocopy_fail",
 336                offsetof(struct xenvif_stats, tx_zerocopy_fail)
 337        },
 338        /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
 339         * a guest with the same MAX_SKB_FRAG
 340         */
 341        {
 342                "tx_frag_overflow",
 343                offsetof(struct xenvif_stats, tx_frag_overflow)
 344        },
 345};
 346
 347static int xenvif_get_sset_count(struct net_device *dev, int string_set)
 348{
 349        switch (string_set) {
 350        case ETH_SS_STATS:
 351                return ARRAY_SIZE(xenvif_stats);
 352        default:
 353                return -EINVAL;
 354        }
 355}
 356
 357static void xenvif_get_ethtool_stats(struct net_device *dev,
 358                                     struct ethtool_stats *stats, u64 * data)
 359{
 360        struct xenvif *vif = netdev_priv(dev);
 361        unsigned int num_queues = vif->num_queues;
 362        int i;
 363        unsigned int queue_index;
 364        struct xenvif_stats *vif_stats;
 365
 366        for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
 367                unsigned long accum = 0;
 368                for (queue_index = 0; queue_index < num_queues; ++queue_index) {
 369                        vif_stats = &vif->queues[queue_index].stats;
 370                        accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
 371                }
 372                data[i] = accum;
 373        }
 374}
 375
 376static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
 377{
 378        int i;
 379
 380        switch (stringset) {
 381        case ETH_SS_STATS:
 382                for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
 383                        memcpy(data + i * ETH_GSTRING_LEN,
 384                               xenvif_stats[i].name, ETH_GSTRING_LEN);
 385                break;
 386        }
 387}
 388
 389static const struct ethtool_ops xenvif_ethtool_ops = {
 390        .get_link       = ethtool_op_get_link,
 391
 392        .get_sset_count = xenvif_get_sset_count,
 393        .get_ethtool_stats = xenvif_get_ethtool_stats,
 394        .get_strings = xenvif_get_strings,
 395};
 396
 397static const struct net_device_ops xenvif_netdev_ops = {
 398        .ndo_start_xmit = xenvif_start_xmit,
 399        .ndo_get_stats  = xenvif_get_stats,
 400        .ndo_open       = xenvif_open,
 401        .ndo_stop       = xenvif_close,
 402        .ndo_change_mtu = xenvif_change_mtu,
 403        .ndo_fix_features = xenvif_fix_features,
 404        .ndo_set_mac_address = eth_mac_addr,
 405        .ndo_validate_addr   = eth_validate_addr,
 406};
 407
 408struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
 409                            unsigned int handle)
 410{
 411        int err;
 412        struct net_device *dev;
 413        struct xenvif *vif;
 414        char name[IFNAMSIZ] = {};
 415
 416        snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
 417        /* Allocate a netdev with the max. supported number of queues.
 418         * When the guest selects the desired number, it will be updated
 419         * via netif_set_real_num_*_queues().
 420         */
 421        dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
 422                              xenvif_max_queues);
 423        if (dev == NULL) {
 424                pr_warn("Could not allocate netdev for %s\n", name);
 425                return ERR_PTR(-ENOMEM);
 426        }
 427
 428        SET_NETDEV_DEV(dev, parent);
 429
 430        vif = netdev_priv(dev);
 431
 432        vif->domid  = domid;
 433        vif->handle = handle;
 434        vif->can_sg = 1;
 435        vif->ip_csum = 1;
 436        vif->dev = dev;
 437        vif->disabled = false;
 438
 439        /* Start out with no queues. */
 440        vif->queues = NULL;
 441        vif->num_queues = 0;
 442
 443        dev->netdev_ops = &xenvif_netdev_ops;
 444        dev->hw_features = NETIF_F_SG |
 445                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 446                NETIF_F_TSO | NETIF_F_TSO6;
 447        dev->features = dev->hw_features | NETIF_F_RXCSUM;
 448        dev->ethtool_ops = &xenvif_ethtool_ops;
 449
 450        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
 451
 452        /*
 453         * Initialise a dummy MAC address. We choose the numerically
 454         * largest non-broadcast address to prevent the address getting
 455         * stolen by an Ethernet bridge for STP purposes.
 456         * (FE:FF:FF:FF:FF:FF)
 457         */
 458        memset(dev->dev_addr, 0xFF, ETH_ALEN);
 459        dev->dev_addr[0] &= ~0x01;
 460
 461        netif_carrier_off(dev);
 462
 463        err = register_netdev(dev);
 464        if (err) {
 465                netdev_warn(dev, "Could not register device: err=%d\n", err);
 466                free_netdev(dev);
 467                return ERR_PTR(err);
 468        }
 469
 470        netdev_dbg(dev, "Successfully created xenvif\n");
 471
 472        __module_get(THIS_MODULE);
 473
 474        return vif;
 475}
 476
 477int xenvif_init_queue(struct xenvif_queue *queue)
 478{
 479        int err, i;
 480
 481        queue->credit_bytes = queue->remaining_credit = ~0UL;
 482        queue->credit_usec  = 0UL;
 483        init_timer(&queue->credit_timeout);
 484        queue->credit_window_start = get_jiffies_64();
 485
 486        skb_queue_head_init(&queue->rx_queue);
 487        skb_queue_head_init(&queue->tx_queue);
 488
 489        queue->pending_cons = 0;
 490        queue->pending_prod = MAX_PENDING_REQS;
 491        for (i = 0; i < MAX_PENDING_REQS; ++i)
 492                queue->pending_ring[i] = i;
 493
 494        spin_lock_init(&queue->callback_lock);
 495        spin_lock_init(&queue->response_lock);
 496
 497        /* If ballooning is disabled, this will consume real memory, so you
 498         * better enable it. The long term solution would be to use just a
 499         * bunch of valid page descriptors, without dependency on ballooning
 500         */
 501        err = alloc_xenballooned_pages(MAX_PENDING_REQS,
 502                                       queue->mmap_pages,
 503                                       false);
 504        if (err) {
 505                netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
 506                return -ENOMEM;
 507        }
 508
 509        for (i = 0; i < MAX_PENDING_REQS; i++) {
 510                queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
 511                        { .callback = xenvif_zerocopy_callback,
 512                          .ctx = NULL,
 513                          .desc = i };
 514                queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
 515        }
 516
 517        init_timer(&queue->wake_queue);
 518
 519        netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
 520                        XENVIF_NAPI_WEIGHT);
 521
 522        return 0;
 523}
 524
 525void xenvif_carrier_on(struct xenvif *vif)
 526{
 527        rtnl_lock();
 528        if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
 529                dev_set_mtu(vif->dev, ETH_DATA_LEN);
 530        netdev_update_features(vif->dev);
 531        netif_carrier_on(vif->dev);
 532        if (netif_running(vif->dev))
 533                xenvif_up(vif);
 534        rtnl_unlock();
 535}
 536
 537int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
 538                   unsigned long rx_ring_ref, unsigned int tx_evtchn,
 539                   unsigned int rx_evtchn)
 540{
 541        struct task_struct *task;
 542        int err = -ENOMEM;
 543
 544        BUG_ON(queue->tx_irq);
 545        BUG_ON(queue->task);
 546        BUG_ON(queue->dealloc_task);
 547
 548        err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
 549        if (err < 0)
 550                goto err;
 551
 552        init_waitqueue_head(&queue->wq);
 553        init_waitqueue_head(&queue->dealloc_wq);
 554
 555        if (tx_evtchn == rx_evtchn) {
 556                /* feature-split-event-channels == 0 */
 557                err = bind_interdomain_evtchn_to_irqhandler(
 558                        queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
 559                        queue->name, queue);
 560                if (err < 0)
 561                        goto err_unmap;
 562                queue->tx_irq = queue->rx_irq = err;
 563                disable_irq(queue->tx_irq);
 564        } else {
 565                /* feature-split-event-channels == 1 */
 566                snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
 567                         "%s-tx", queue->name);
 568                err = bind_interdomain_evtchn_to_irqhandler(
 569                        queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
 570                        queue->tx_irq_name, queue);
 571                if (err < 0)
 572                        goto err_unmap;
 573                queue->tx_irq = err;
 574                disable_irq(queue->tx_irq);
 575
 576                snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
 577                         "%s-rx", queue->name);
 578                err = bind_interdomain_evtchn_to_irqhandler(
 579                        queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
 580                        queue->rx_irq_name, queue);
 581                if (err < 0)
 582                        goto err_tx_unbind;
 583                queue->rx_irq = err;
 584                disable_irq(queue->rx_irq);
 585        }
 586
 587        task = kthread_create(xenvif_kthread_guest_rx,
 588                              (void *)queue, "%s-guest-rx", queue->name);
 589        if (IS_ERR(task)) {
 590                pr_warn("Could not allocate kthread for %s\n", queue->name);
 591                err = PTR_ERR(task);
 592                goto err_rx_unbind;
 593        }
 594        queue->task = task;
 595
 596        task = kthread_create(xenvif_dealloc_kthread,
 597                              (void *)queue, "%s-dealloc", queue->name);
 598        if (IS_ERR(task)) {
 599                pr_warn("Could not allocate kthread for %s\n", queue->name);
 600                err = PTR_ERR(task);
 601                goto err_rx_unbind;
 602        }
 603        queue->dealloc_task = task;
 604
 605        wake_up_process(queue->task);
 606        wake_up_process(queue->dealloc_task);
 607
 608        return 0;
 609
 610err_rx_unbind:
 611        unbind_from_irqhandler(queue->rx_irq, queue);
 612        queue->rx_irq = 0;
 613err_tx_unbind:
 614        unbind_from_irqhandler(queue->tx_irq, queue);
 615        queue->tx_irq = 0;
 616err_unmap:
 617        xenvif_unmap_frontend_rings(queue);
 618err:
 619        module_put(THIS_MODULE);
 620        return err;
 621}
 622
 623void xenvif_carrier_off(struct xenvif *vif)
 624{
 625        struct net_device *dev = vif->dev;
 626
 627        rtnl_lock();
 628        netif_carrier_off(dev); /* discard queued packets */
 629        if (netif_running(dev))
 630                xenvif_down(vif);
 631        rtnl_unlock();
 632}
 633
 634static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
 635                                      unsigned int worst_case_skb_lifetime)
 636{
 637        int i, unmap_timeout = 0;
 638
 639        for (i = 0; i < MAX_PENDING_REQS; ++i) {
 640                if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
 641                        unmap_timeout++;
 642                        schedule_timeout(msecs_to_jiffies(1000));
 643                        if (unmap_timeout > worst_case_skb_lifetime &&
 644                            net_ratelimit())
 645                                netdev_err(queue->vif->dev,
 646                                           "Page still granted! Index: %x\n",
 647                                           i);
 648                        i = -1;
 649                }
 650        }
 651}
 652
 653void xenvif_disconnect(struct xenvif *vif)
 654{
 655        struct xenvif_queue *queue = NULL;
 656        unsigned int num_queues = vif->num_queues;
 657        unsigned int queue_index;
 658
 659        if (netif_carrier_ok(vif->dev))
 660                xenvif_carrier_off(vif);
 661
 662        for (queue_index = 0; queue_index < num_queues; ++queue_index) {
 663                queue = &vif->queues[queue_index];
 664
 665                if (queue->task) {
 666                        del_timer_sync(&queue->wake_queue);
 667                        kthread_stop(queue->task);
 668                        queue->task = NULL;
 669                }
 670
 671                if (queue->dealloc_task) {
 672                        kthread_stop(queue->dealloc_task);
 673                        queue->dealloc_task = NULL;
 674                }
 675
 676                if (queue->tx_irq) {
 677                        if (queue->tx_irq == queue->rx_irq)
 678                                unbind_from_irqhandler(queue->tx_irq, queue);
 679                        else {
 680                                unbind_from_irqhandler(queue->tx_irq, queue);
 681                                unbind_from_irqhandler(queue->rx_irq, queue);
 682                        }
 683                        queue->tx_irq = 0;
 684                }
 685
 686                xenvif_unmap_frontend_rings(queue);
 687        }
 688}
 689
 690/* Reverse the relevant parts of xenvif_init_queue().
 691 * Used for queue teardown from xenvif_free(), and on the
 692 * error handling paths in xenbus.c:connect().
 693 */
 694void xenvif_deinit_queue(struct xenvif_queue *queue)
 695{
 696        free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
 697        netif_napi_del(&queue->napi);
 698}
 699
 700void xenvif_free(struct xenvif *vif)
 701{
 702        struct xenvif_queue *queue = NULL;
 703        unsigned int num_queues = vif->num_queues;
 704        unsigned int queue_index;
 705        /* Here we want to avoid timeout messages if an skb can be legitimately
 706         * stuck somewhere else. Realistically this could be an another vif's
 707         * internal or QDisc queue. That another vif also has this
 708         * rx_drain_timeout_msecs timeout, but the timer only ditches the
 709         * internal queue. After that, the QDisc queue can put in worst case
 710         * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
 711         * internal queue, so we need several rounds of such timeouts until we
 712         * can be sure that no another vif should have skb's from us. We are
 713         * not sending more skb's, so newly stuck packets are not interesting
 714         * for us here.
 715         */
 716        unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
 717                DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
 718
 719        unregister_netdev(vif->dev);
 720
 721        for (queue_index = 0; queue_index < num_queues; ++queue_index) {
 722                queue = &vif->queues[queue_index];
 723                xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
 724                xenvif_deinit_queue(queue);
 725        }
 726
 727        vfree(vif->queues);
 728        vif->queues = NULL;
 729        vif->num_queues = 0;
 730
 731        free_netdev(vif->dev);
 732
 733        module_put(THIS_MODULE);
 734}
 735