linux/drivers/net/ethernet/intel/ixgb/ixgb_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2008 Intel Corporation. */
   3
   4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   5
   6#include <linux/prefetch.h>
   7#include "ixgb.h"
   8
   9char ixgb_driver_name[] = "ixgb";
  10static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
  11
  12static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
  13
  14#define IXGB_CB_LENGTH 256
  15static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
  16module_param(copybreak, uint, 0644);
  17MODULE_PARM_DESC(copybreak,
  18        "Maximum size of packet that is copied to a new buffer on receive");
  19
  20/* ixgb_pci_tbl - PCI Device ID Table
  21 *
  22 * Wildcard entries (PCI_ANY_ID) should come last
  23 * Last entry must be all 0s
  24 *
  25 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  26 *   Class, Class Mask, private data (not used) }
  27 */
  28static const struct pci_device_id ixgb_pci_tbl[] = {
  29        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
  30         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  31        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
  32         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  33        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
  34         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  35        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
  36         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  37
  38        /* required last entry */
  39        {0,}
  40};
  41
  42MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
  43
  44/* Local Function Prototypes */
  45static int ixgb_init_module(void);
  46static void ixgb_exit_module(void);
  47static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  48static void ixgb_remove(struct pci_dev *pdev);
  49static int ixgb_sw_init(struct ixgb_adapter *adapter);
  50static int ixgb_open(struct net_device *netdev);
  51static int ixgb_close(struct net_device *netdev);
  52static void ixgb_configure_tx(struct ixgb_adapter *adapter);
  53static void ixgb_configure_rx(struct ixgb_adapter *adapter);
  54static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
  55static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
  56static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
  57static void ixgb_set_multi(struct net_device *netdev);
  58static void ixgb_watchdog(struct timer_list *t);
  59static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
  60                                   struct net_device *netdev);
  61static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
  62static int ixgb_set_mac(struct net_device *netdev, void *p);
  63static irqreturn_t ixgb_intr(int irq, void *data);
  64static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
  65
  66static int ixgb_clean(struct napi_struct *, int);
  67static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
  68static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
  69
  70static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
  71static void ixgb_tx_timeout_task(struct work_struct *work);
  72
  73static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
  74static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
  75static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
  76                                __be16 proto, u16 vid);
  77static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
  78                                 __be16 proto, u16 vid);
  79static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
  80
  81static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
  82                             pci_channel_state_t state);
  83static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
  84static void ixgb_io_resume (struct pci_dev *pdev);
  85
  86static const struct pci_error_handlers ixgb_err_handler = {
  87        .error_detected = ixgb_io_error_detected,
  88        .slot_reset = ixgb_io_slot_reset,
  89        .resume = ixgb_io_resume,
  90};
  91
  92static struct pci_driver ixgb_driver = {
  93        .name     = ixgb_driver_name,
  94        .id_table = ixgb_pci_tbl,
  95        .probe    = ixgb_probe,
  96        .remove   = ixgb_remove,
  97        .err_handler = &ixgb_err_handler
  98};
  99
 100MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 101MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
 102MODULE_LICENSE("GPL v2");
 103
 104#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 105static int debug = -1;
 106module_param(debug, int, 0);
 107MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 108
 109/**
 110 * ixgb_init_module - Driver Registration Routine
 111 *
 112 * ixgb_init_module is the first routine called when the driver is
 113 * loaded. All it does is register with the PCI subsystem.
 114 **/
 115
 116static int __init
 117ixgb_init_module(void)
 118{
 119        pr_info("%s\n", ixgb_driver_string);
 120        pr_info("%s\n", ixgb_copyright);
 121
 122        return pci_register_driver(&ixgb_driver);
 123}
 124
 125module_init(ixgb_init_module);
 126
 127/**
 128 * ixgb_exit_module - Driver Exit Cleanup Routine
 129 *
 130 * ixgb_exit_module is called just before the driver is removed
 131 * from memory.
 132 **/
 133
 134static void __exit
 135ixgb_exit_module(void)
 136{
 137        pci_unregister_driver(&ixgb_driver);
 138}
 139
 140module_exit(ixgb_exit_module);
 141
 142/**
 143 * ixgb_irq_disable - Mask off interrupt generation on the NIC
 144 * @adapter: board private structure
 145 **/
 146
 147static void
 148ixgb_irq_disable(struct ixgb_adapter *adapter)
 149{
 150        IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
 151        IXGB_WRITE_FLUSH(&adapter->hw);
 152        synchronize_irq(adapter->pdev->irq);
 153}
 154
 155/**
 156 * ixgb_irq_enable - Enable default interrupt generation settings
 157 * @adapter: board private structure
 158 **/
 159
 160static void
 161ixgb_irq_enable(struct ixgb_adapter *adapter)
 162{
 163        u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
 164                  IXGB_INT_TXDW | IXGB_INT_LSC;
 165        if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
 166                val |= IXGB_INT_GPI0;
 167        IXGB_WRITE_REG(&adapter->hw, IMS, val);
 168        IXGB_WRITE_FLUSH(&adapter->hw);
 169}
 170
 171int
 172ixgb_up(struct ixgb_adapter *adapter)
 173{
 174        struct net_device *netdev = adapter->netdev;
 175        int err, irq_flags = IRQF_SHARED;
 176        int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 177        struct ixgb_hw *hw = &adapter->hw;
 178
 179        /* hardware has been reset, we need to reload some things */
 180
 181        ixgb_rar_set(hw, netdev->dev_addr, 0);
 182        ixgb_set_multi(netdev);
 183
 184        ixgb_restore_vlan(adapter);
 185
 186        ixgb_configure_tx(adapter);
 187        ixgb_setup_rctl(adapter);
 188        ixgb_configure_rx(adapter);
 189        ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
 190
 191        /* disable interrupts and get the hardware into a known state */
 192        IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
 193
 194        /* only enable MSI if bus is in PCI-X mode */
 195        if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
 196                err = pci_enable_msi(adapter->pdev);
 197                if (!err) {
 198                        adapter->have_msi = true;
 199                        irq_flags = 0;
 200                }
 201                /* proceed to try to request regular interrupt */
 202        }
 203
 204        err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
 205                          netdev->name, netdev);
 206        if (err) {
 207                if (adapter->have_msi)
 208                        pci_disable_msi(adapter->pdev);
 209                netif_err(adapter, probe, adapter->netdev,
 210                          "Unable to allocate interrupt Error: %d\n", err);
 211                return err;
 212        }
 213
 214        if ((hw->max_frame_size != max_frame) ||
 215                (hw->max_frame_size !=
 216                (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
 217
 218                hw->max_frame_size = max_frame;
 219
 220                IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
 221
 222                if (hw->max_frame_size >
 223                   IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
 224                        u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
 225
 226                        if (!(ctrl0 & IXGB_CTRL0_JFE)) {
 227                                ctrl0 |= IXGB_CTRL0_JFE;
 228                                IXGB_WRITE_REG(hw, CTRL0, ctrl0);
 229                        }
 230                }
 231        }
 232
 233        clear_bit(__IXGB_DOWN, &adapter->flags);
 234
 235        napi_enable(&adapter->napi);
 236        ixgb_irq_enable(adapter);
 237
 238        netif_wake_queue(netdev);
 239
 240        mod_timer(&adapter->watchdog_timer, jiffies);
 241
 242        return 0;
 243}
 244
 245void
 246ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
 247{
 248        struct net_device *netdev = adapter->netdev;
 249
 250        /* prevent the interrupt handler from restarting watchdog */
 251        set_bit(__IXGB_DOWN, &adapter->flags);
 252
 253        netif_carrier_off(netdev);
 254
 255        napi_disable(&adapter->napi);
 256        /* waiting for NAPI to complete can re-enable interrupts */
 257        ixgb_irq_disable(adapter);
 258        free_irq(adapter->pdev->irq, netdev);
 259
 260        if (adapter->have_msi)
 261                pci_disable_msi(adapter->pdev);
 262
 263        if (kill_watchdog)
 264                del_timer_sync(&adapter->watchdog_timer);
 265
 266        adapter->link_speed = 0;
 267        adapter->link_duplex = 0;
 268        netif_stop_queue(netdev);
 269
 270        ixgb_reset(adapter);
 271        ixgb_clean_tx_ring(adapter);
 272        ixgb_clean_rx_ring(adapter);
 273}
 274
 275void
 276ixgb_reset(struct ixgb_adapter *adapter)
 277{
 278        struct ixgb_hw *hw = &adapter->hw;
 279
 280        ixgb_adapter_stop(hw);
 281        if (!ixgb_init_hw(hw))
 282                netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
 283
 284        /* restore frame size information */
 285        IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
 286        if (hw->max_frame_size >
 287            IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
 288                u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
 289                if (!(ctrl0 & IXGB_CTRL0_JFE)) {
 290                        ctrl0 |= IXGB_CTRL0_JFE;
 291                        IXGB_WRITE_REG(hw, CTRL0, ctrl0);
 292                }
 293        }
 294}
 295
 296static netdev_features_t
 297ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
 298{
 299        /*
 300         * Tx VLAN insertion does not work per HW design when Rx stripping is
 301         * disabled.
 302         */
 303        if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
 304                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 305
 306        return features;
 307}
 308
 309static int
 310ixgb_set_features(struct net_device *netdev, netdev_features_t features)
 311{
 312        struct ixgb_adapter *adapter = netdev_priv(netdev);
 313        netdev_features_t changed = features ^ netdev->features;
 314
 315        if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
 316                return 0;
 317
 318        adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 319
 320        if (netif_running(netdev)) {
 321                ixgb_down(adapter, true);
 322                ixgb_up(adapter);
 323                ixgb_set_speed_duplex(netdev);
 324        } else
 325                ixgb_reset(adapter);
 326
 327        return 0;
 328}
 329
 330
 331static const struct net_device_ops ixgb_netdev_ops = {
 332        .ndo_open               = ixgb_open,
 333        .ndo_stop               = ixgb_close,
 334        .ndo_start_xmit         = ixgb_xmit_frame,
 335        .ndo_set_rx_mode        = ixgb_set_multi,
 336        .ndo_validate_addr      = eth_validate_addr,
 337        .ndo_set_mac_address    = ixgb_set_mac,
 338        .ndo_change_mtu         = ixgb_change_mtu,
 339        .ndo_tx_timeout         = ixgb_tx_timeout,
 340        .ndo_vlan_rx_add_vid    = ixgb_vlan_rx_add_vid,
 341        .ndo_vlan_rx_kill_vid   = ixgb_vlan_rx_kill_vid,
 342        .ndo_fix_features       = ixgb_fix_features,
 343        .ndo_set_features       = ixgb_set_features,
 344};
 345
 346/**
 347 * ixgb_probe - Device Initialization Routine
 348 * @pdev: PCI device information struct
 349 * @ent: entry in ixgb_pci_tbl
 350 *
 351 * Returns 0 on success, negative on failure
 352 *
 353 * ixgb_probe initializes an adapter identified by a pci_dev structure.
 354 * The OS initialization, configuring of the adapter private structure,
 355 * and a hardware reset occur.
 356 **/
 357
 358static int
 359ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 360{
 361        struct net_device *netdev = NULL;
 362        struct ixgb_adapter *adapter;
 363        static int cards_found = 0;
 364        int pci_using_dac;
 365        int i;
 366        int err;
 367
 368        err = pci_enable_device(pdev);
 369        if (err)
 370                return err;
 371
 372        pci_using_dac = 0;
 373        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 374        if (!err) {
 375                pci_using_dac = 1;
 376        } else {
 377                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 378                if (err) {
 379                        pr_err("No usable DMA configuration, aborting\n");
 380                        goto err_dma_mask;
 381                }
 382        }
 383
 384        err = pci_request_regions(pdev, ixgb_driver_name);
 385        if (err)
 386                goto err_request_regions;
 387
 388        pci_set_master(pdev);
 389
 390        netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
 391        if (!netdev) {
 392                err = -ENOMEM;
 393                goto err_alloc_etherdev;
 394        }
 395
 396        SET_NETDEV_DEV(netdev, &pdev->dev);
 397
 398        pci_set_drvdata(pdev, netdev);
 399        adapter = netdev_priv(netdev);
 400        adapter->netdev = netdev;
 401        adapter->pdev = pdev;
 402        adapter->hw.back = adapter;
 403        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 404
 405        adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
 406        if (!adapter->hw.hw_addr) {
 407                err = -EIO;
 408                goto err_ioremap;
 409        }
 410
 411        for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
 412                if (pci_resource_len(pdev, i) == 0)
 413                        continue;
 414                if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 415                        adapter->hw.io_base = pci_resource_start(pdev, i);
 416                        break;
 417                }
 418        }
 419
 420        netdev->netdev_ops = &ixgb_netdev_ops;
 421        ixgb_set_ethtool_ops(netdev);
 422        netdev->watchdog_timeo = 5 * HZ;
 423        netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
 424
 425        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 426
 427        adapter->bd_number = cards_found;
 428        adapter->link_speed = 0;
 429        adapter->link_duplex = 0;
 430
 431        /* setup the private structure */
 432
 433        err = ixgb_sw_init(adapter);
 434        if (err)
 435                goto err_sw_init;
 436
 437        netdev->hw_features = NETIF_F_SG |
 438                           NETIF_F_TSO |
 439                           NETIF_F_HW_CSUM |
 440                           NETIF_F_HW_VLAN_CTAG_TX |
 441                           NETIF_F_HW_VLAN_CTAG_RX;
 442        netdev->features = netdev->hw_features |
 443                           NETIF_F_HW_VLAN_CTAG_FILTER;
 444        netdev->hw_features |= NETIF_F_RXCSUM;
 445
 446        if (pci_using_dac) {
 447                netdev->features |= NETIF_F_HIGHDMA;
 448                netdev->vlan_features |= NETIF_F_HIGHDMA;
 449        }
 450
 451        /* MTU range: 68 - 16114 */
 452        netdev->min_mtu = ETH_MIN_MTU;
 453        netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
 454
 455        /* make sure the EEPROM is good */
 456
 457        if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
 458                netif_err(adapter, probe, adapter->netdev,
 459                          "The EEPROM Checksum Is Not Valid\n");
 460                err = -EIO;
 461                goto err_eeprom;
 462        }
 463
 464        ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
 465
 466        if (!is_valid_ether_addr(netdev->dev_addr)) {
 467                netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
 468                err = -EIO;
 469                goto err_eeprom;
 470        }
 471
 472        adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
 473
 474        timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
 475
 476        INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
 477
 478        strcpy(netdev->name, "eth%d");
 479        err = register_netdev(netdev);
 480        if (err)
 481                goto err_register;
 482
 483        /* carrier off reporting is important to ethtool even BEFORE open */
 484        netif_carrier_off(netdev);
 485
 486        netif_info(adapter, probe, adapter->netdev,
 487                   "Intel(R) PRO/10GbE Network Connection\n");
 488        ixgb_check_options(adapter);
 489        /* reset the hardware with the new settings */
 490
 491        ixgb_reset(adapter);
 492
 493        cards_found++;
 494        return 0;
 495
 496err_register:
 497err_sw_init:
 498err_eeprom:
 499        iounmap(adapter->hw.hw_addr);
 500err_ioremap:
 501        free_netdev(netdev);
 502err_alloc_etherdev:
 503        pci_release_regions(pdev);
 504err_request_regions:
 505err_dma_mask:
 506        pci_disable_device(pdev);
 507        return err;
 508}
 509
 510/**
 511 * ixgb_remove - Device Removal Routine
 512 * @pdev: PCI device information struct
 513 *
 514 * ixgb_remove is called by the PCI subsystem to alert the driver
 515 * that it should release a PCI device.  The could be caused by a
 516 * Hot-Plug event, or because the driver is going to be removed from
 517 * memory.
 518 **/
 519
 520static void
 521ixgb_remove(struct pci_dev *pdev)
 522{
 523        struct net_device *netdev = pci_get_drvdata(pdev);
 524        struct ixgb_adapter *adapter = netdev_priv(netdev);
 525
 526        cancel_work_sync(&adapter->tx_timeout_task);
 527
 528        unregister_netdev(netdev);
 529
 530        iounmap(adapter->hw.hw_addr);
 531        pci_release_regions(pdev);
 532
 533        free_netdev(netdev);
 534        pci_disable_device(pdev);
 535}
 536
 537/**
 538 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
 539 * @adapter: board private structure to initialize
 540 *
 541 * ixgb_sw_init initializes the Adapter private data structure.
 542 * Fields are initialized based on PCI device information and
 543 * OS network device settings (MTU size).
 544 **/
 545
 546static int
 547ixgb_sw_init(struct ixgb_adapter *adapter)
 548{
 549        struct ixgb_hw *hw = &adapter->hw;
 550        struct net_device *netdev = adapter->netdev;
 551        struct pci_dev *pdev = adapter->pdev;
 552
 553        /* PCI config space info */
 554
 555        hw->vendor_id = pdev->vendor;
 556        hw->device_id = pdev->device;
 557        hw->subsystem_vendor_id = pdev->subsystem_vendor;
 558        hw->subsystem_id = pdev->subsystem_device;
 559
 560        hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 561        adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
 562
 563        if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
 564            (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
 565            (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
 566            (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
 567                hw->mac_type = ixgb_82597;
 568        else {
 569                /* should never have loaded on this device */
 570                netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
 571        }
 572
 573        /* enable flow control to be programmed */
 574        hw->fc.send_xon = 1;
 575
 576        set_bit(__IXGB_DOWN, &adapter->flags);
 577        return 0;
 578}
 579
 580/**
 581 * ixgb_open - Called when a network interface is made active
 582 * @netdev: network interface device structure
 583 *
 584 * Returns 0 on success, negative value on failure
 585 *
 586 * The open entry point is called when a network interface is made
 587 * active by the system (IFF_UP).  At this point all resources needed
 588 * for transmit and receive operations are allocated, the interrupt
 589 * handler is registered with the OS, the watchdog timer is started,
 590 * and the stack is notified that the interface is ready.
 591 **/
 592
 593static int
 594ixgb_open(struct net_device *netdev)
 595{
 596        struct ixgb_adapter *adapter = netdev_priv(netdev);
 597        int err;
 598
 599        /* allocate transmit descriptors */
 600        err = ixgb_setup_tx_resources(adapter);
 601        if (err)
 602                goto err_setup_tx;
 603
 604        netif_carrier_off(netdev);
 605
 606        /* allocate receive descriptors */
 607
 608        err = ixgb_setup_rx_resources(adapter);
 609        if (err)
 610                goto err_setup_rx;
 611
 612        err = ixgb_up(adapter);
 613        if (err)
 614                goto err_up;
 615
 616        netif_start_queue(netdev);
 617
 618        return 0;
 619
 620err_up:
 621        ixgb_free_rx_resources(adapter);
 622err_setup_rx:
 623        ixgb_free_tx_resources(adapter);
 624err_setup_tx:
 625        ixgb_reset(adapter);
 626
 627        return err;
 628}
 629
 630/**
 631 * ixgb_close - Disables a network interface
 632 * @netdev: network interface device structure
 633 *
 634 * Returns 0, this is not allowed to fail
 635 *
 636 * The close entry point is called when an interface is de-activated
 637 * by the OS.  The hardware is still under the drivers control, but
 638 * needs to be disabled.  A global MAC reset is issued to stop the
 639 * hardware, and all transmit and receive resources are freed.
 640 **/
 641
 642static int
 643ixgb_close(struct net_device *netdev)
 644{
 645        struct ixgb_adapter *adapter = netdev_priv(netdev);
 646
 647        ixgb_down(adapter, true);
 648
 649        ixgb_free_tx_resources(adapter);
 650        ixgb_free_rx_resources(adapter);
 651
 652        return 0;
 653}
 654
 655/**
 656 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
 657 * @adapter: board private structure
 658 *
 659 * Return 0 on success, negative on failure
 660 **/
 661
 662int
 663ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
 664{
 665        struct ixgb_desc_ring *txdr = &adapter->tx_ring;
 666        struct pci_dev *pdev = adapter->pdev;
 667        int size;
 668
 669        size = sizeof(struct ixgb_buffer) * txdr->count;
 670        txdr->buffer_info = vzalloc(size);
 671        if (!txdr->buffer_info)
 672                return -ENOMEM;
 673
 674        /* round up to nearest 4K */
 675
 676        txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
 677        txdr->size = ALIGN(txdr->size, 4096);
 678
 679        txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
 680                                        GFP_KERNEL);
 681        if (!txdr->desc) {
 682                vfree(txdr->buffer_info);
 683                return -ENOMEM;
 684        }
 685
 686        txdr->next_to_use = 0;
 687        txdr->next_to_clean = 0;
 688
 689        return 0;
 690}
 691
 692/**
 693 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
 694 * @adapter: board private structure
 695 *
 696 * Configure the Tx unit of the MAC after a reset.
 697 **/
 698
 699static void
 700ixgb_configure_tx(struct ixgb_adapter *adapter)
 701{
 702        u64 tdba = adapter->tx_ring.dma;
 703        u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
 704        u32 tctl;
 705        struct ixgb_hw *hw = &adapter->hw;
 706
 707        /* Setup the Base and Length of the Tx Descriptor Ring
 708         * tx_ring.dma can be either a 32 or 64 bit value
 709         */
 710
 711        IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
 712        IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
 713
 714        IXGB_WRITE_REG(hw, TDLEN, tdlen);
 715
 716        /* Setup the HW Tx Head and Tail descriptor pointers */
 717
 718        IXGB_WRITE_REG(hw, TDH, 0);
 719        IXGB_WRITE_REG(hw, TDT, 0);
 720
 721        /* don't set up txdctl, it induces performance problems if configured
 722         * incorrectly */
 723        /* Set the Tx Interrupt Delay register */
 724
 725        IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
 726
 727        /* Program the Transmit Control Register */
 728
 729        tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
 730        IXGB_WRITE_REG(hw, TCTL, tctl);
 731
 732        /* Setup Transmit Descriptor Settings for this adapter */
 733        adapter->tx_cmd_type =
 734                IXGB_TX_DESC_TYPE |
 735                (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
 736}
 737
 738/**
 739 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
 740 * @adapter: board private structure
 741 *
 742 * Returns 0 on success, negative on failure
 743 **/
 744
 745int
 746ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
 747{
 748        struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
 749        struct pci_dev *pdev = adapter->pdev;
 750        int size;
 751
 752        size = sizeof(struct ixgb_buffer) * rxdr->count;
 753        rxdr->buffer_info = vzalloc(size);
 754        if (!rxdr->buffer_info)
 755                return -ENOMEM;
 756
 757        /* Round up to nearest 4K */
 758
 759        rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
 760        rxdr->size = ALIGN(rxdr->size, 4096);
 761
 762        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
 763                                        GFP_KERNEL);
 764
 765        if (!rxdr->desc) {
 766                vfree(rxdr->buffer_info);
 767                return -ENOMEM;
 768        }
 769
 770        rxdr->next_to_clean = 0;
 771        rxdr->next_to_use = 0;
 772
 773        return 0;
 774}
 775
 776/**
 777 * ixgb_setup_rctl - configure the receive control register
 778 * @adapter: Board private structure
 779 **/
 780
 781static void
 782ixgb_setup_rctl(struct ixgb_adapter *adapter)
 783{
 784        u32 rctl;
 785
 786        rctl = IXGB_READ_REG(&adapter->hw, RCTL);
 787
 788        rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
 789
 790        rctl |=
 791                IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
 792                IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
 793                (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
 794
 795        rctl |= IXGB_RCTL_SECRC;
 796
 797        if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
 798                rctl |= IXGB_RCTL_BSIZE_2048;
 799        else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
 800                rctl |= IXGB_RCTL_BSIZE_4096;
 801        else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
 802                rctl |= IXGB_RCTL_BSIZE_8192;
 803        else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
 804                rctl |= IXGB_RCTL_BSIZE_16384;
 805
 806        IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
 807}
 808
 809/**
 810 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
 811 * @adapter: board private structure
 812 *
 813 * Configure the Rx unit of the MAC after a reset.
 814 **/
 815
 816static void
 817ixgb_configure_rx(struct ixgb_adapter *adapter)
 818{
 819        u64 rdba = adapter->rx_ring.dma;
 820        u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
 821        struct ixgb_hw *hw = &adapter->hw;
 822        u32 rctl;
 823        u32 rxcsum;
 824
 825        /* make sure receives are disabled while setting up the descriptors */
 826
 827        rctl = IXGB_READ_REG(hw, RCTL);
 828        IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
 829
 830        /* set the Receive Delay Timer Register */
 831
 832        IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
 833
 834        /* Setup the Base and Length of the Rx Descriptor Ring */
 835
 836        IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
 837        IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
 838
 839        IXGB_WRITE_REG(hw, RDLEN, rdlen);
 840
 841        /* Setup the HW Rx Head and Tail Descriptor Pointers */
 842        IXGB_WRITE_REG(hw, RDH, 0);
 843        IXGB_WRITE_REG(hw, RDT, 0);
 844
 845        /* due to the hardware errata with RXDCTL, we are unable to use any of
 846         * the performance enhancing features of it without causing other
 847         * subtle bugs, some of the bugs could include receive length
 848         * corruption at high data rates (WTHRESH > 0) and/or receive
 849         * descriptor ring irregularites (particularly in hardware cache) */
 850        IXGB_WRITE_REG(hw, RXDCTL, 0);
 851
 852        /* Enable Receive Checksum Offload for TCP and UDP */
 853        if (adapter->rx_csum) {
 854                rxcsum = IXGB_READ_REG(hw, RXCSUM);
 855                rxcsum |= IXGB_RXCSUM_TUOFL;
 856                IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
 857        }
 858
 859        /* Enable Receives */
 860
 861        IXGB_WRITE_REG(hw, RCTL, rctl);
 862}
 863
 864/**
 865 * ixgb_free_tx_resources - Free Tx Resources
 866 * @adapter: board private structure
 867 *
 868 * Free all transmit software resources
 869 **/
 870
 871void
 872ixgb_free_tx_resources(struct ixgb_adapter *adapter)
 873{
 874        struct pci_dev *pdev = adapter->pdev;
 875
 876        ixgb_clean_tx_ring(adapter);
 877
 878        vfree(adapter->tx_ring.buffer_info);
 879        adapter->tx_ring.buffer_info = NULL;
 880
 881        dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
 882                          adapter->tx_ring.desc, adapter->tx_ring.dma);
 883
 884        adapter->tx_ring.desc = NULL;
 885}
 886
 887static void
 888ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
 889                                struct ixgb_buffer *buffer_info)
 890{
 891        if (buffer_info->dma) {
 892                if (buffer_info->mapped_as_page)
 893                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
 894                                       buffer_info->length, DMA_TO_DEVICE);
 895                else
 896                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 897                                         buffer_info->length, DMA_TO_DEVICE);
 898                buffer_info->dma = 0;
 899        }
 900
 901        if (buffer_info->skb) {
 902                dev_kfree_skb_any(buffer_info->skb);
 903                buffer_info->skb = NULL;
 904        }
 905        buffer_info->time_stamp = 0;
 906        /* these fields must always be initialized in tx
 907         * buffer_info->length = 0;
 908         * buffer_info->next_to_watch = 0; */
 909}
 910
 911/**
 912 * ixgb_clean_tx_ring - Free Tx Buffers
 913 * @adapter: board private structure
 914 **/
 915
 916static void
 917ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
 918{
 919        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
 920        struct ixgb_buffer *buffer_info;
 921        unsigned long size;
 922        unsigned int i;
 923
 924        /* Free all the Tx ring sk_buffs */
 925
 926        for (i = 0; i < tx_ring->count; i++) {
 927                buffer_info = &tx_ring->buffer_info[i];
 928                ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
 929        }
 930
 931        size = sizeof(struct ixgb_buffer) * tx_ring->count;
 932        memset(tx_ring->buffer_info, 0, size);
 933
 934        /* Zero out the descriptor ring */
 935
 936        memset(tx_ring->desc, 0, tx_ring->size);
 937
 938        tx_ring->next_to_use = 0;
 939        tx_ring->next_to_clean = 0;
 940
 941        IXGB_WRITE_REG(&adapter->hw, TDH, 0);
 942        IXGB_WRITE_REG(&adapter->hw, TDT, 0);
 943}
 944
 945/**
 946 * ixgb_free_rx_resources - Free Rx Resources
 947 * @adapter: board private structure
 948 *
 949 * Free all receive software resources
 950 **/
 951
 952void
 953ixgb_free_rx_resources(struct ixgb_adapter *adapter)
 954{
 955        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
 956        struct pci_dev *pdev = adapter->pdev;
 957
 958        ixgb_clean_rx_ring(adapter);
 959
 960        vfree(rx_ring->buffer_info);
 961        rx_ring->buffer_info = NULL;
 962
 963        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
 964                          rx_ring->dma);
 965
 966        rx_ring->desc = NULL;
 967}
 968
 969/**
 970 * ixgb_clean_rx_ring - Free Rx Buffers
 971 * @adapter: board private structure
 972 **/
 973
 974static void
 975ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
 976{
 977        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
 978        struct ixgb_buffer *buffer_info;
 979        struct pci_dev *pdev = adapter->pdev;
 980        unsigned long size;
 981        unsigned int i;
 982
 983        /* Free all the Rx ring sk_buffs */
 984
 985        for (i = 0; i < rx_ring->count; i++) {
 986                buffer_info = &rx_ring->buffer_info[i];
 987                if (buffer_info->dma) {
 988                        dma_unmap_single(&pdev->dev,
 989                                         buffer_info->dma,
 990                                         buffer_info->length,
 991                                         DMA_FROM_DEVICE);
 992                        buffer_info->dma = 0;
 993                        buffer_info->length = 0;
 994                }
 995
 996                if (buffer_info->skb) {
 997                        dev_kfree_skb(buffer_info->skb);
 998                        buffer_info->skb = NULL;
 999                }
1000        }
1001
1002        size = sizeof(struct ixgb_buffer) * rx_ring->count;
1003        memset(rx_ring->buffer_info, 0, size);
1004
1005        /* Zero out the descriptor ring */
1006
1007        memset(rx_ring->desc, 0, rx_ring->size);
1008
1009        rx_ring->next_to_clean = 0;
1010        rx_ring->next_to_use = 0;
1011
1012        IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1013        IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1014}
1015
1016/**
1017 * ixgb_set_mac - Change the Ethernet Address of the NIC
1018 * @netdev: network interface device structure
1019 * @p: pointer to an address structure
1020 *
1021 * Returns 0 on success, negative on failure
1022 **/
1023
1024static int
1025ixgb_set_mac(struct net_device *netdev, void *p)
1026{
1027        struct ixgb_adapter *adapter = netdev_priv(netdev);
1028        struct sockaddr *addr = p;
1029
1030        if (!is_valid_ether_addr(addr->sa_data))
1031                return -EADDRNOTAVAIL;
1032
1033        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1034
1035        ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1036
1037        return 0;
1038}
1039
1040/**
1041 * ixgb_set_multi - Multicast and Promiscuous mode set
1042 * @netdev: network interface device structure
1043 *
1044 * The set_multi entry point is called whenever the multicast address
1045 * list or the network interface flags are updated.  This routine is
1046 * responsible for configuring the hardware for proper multicast,
1047 * promiscuous mode, and all-multi behavior.
1048 **/
1049
1050static void
1051ixgb_set_multi(struct net_device *netdev)
1052{
1053        struct ixgb_adapter *adapter = netdev_priv(netdev);
1054        struct ixgb_hw *hw = &adapter->hw;
1055        struct netdev_hw_addr *ha;
1056        u32 rctl;
1057
1058        /* Check for Promiscuous and All Multicast modes */
1059
1060        rctl = IXGB_READ_REG(hw, RCTL);
1061
1062        if (netdev->flags & IFF_PROMISC) {
1063                rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1064                /* disable VLAN filtering */
1065                rctl &= ~IXGB_RCTL_CFIEN;
1066                rctl &= ~IXGB_RCTL_VFE;
1067        } else {
1068                if (netdev->flags & IFF_ALLMULTI) {
1069                        rctl |= IXGB_RCTL_MPE;
1070                        rctl &= ~IXGB_RCTL_UPE;
1071                } else {
1072                        rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1073                }
1074                /* enable VLAN filtering */
1075                rctl |= IXGB_RCTL_VFE;
1076                rctl &= ~IXGB_RCTL_CFIEN;
1077        }
1078
1079        if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1080                rctl |= IXGB_RCTL_MPE;
1081                IXGB_WRITE_REG(hw, RCTL, rctl);
1082        } else {
1083                u8 *mta = kmalloc_array(ETH_ALEN,
1084                                        IXGB_MAX_NUM_MULTICAST_ADDRESSES,
1085                                        GFP_ATOMIC);
1086                u8 *addr;
1087                if (!mta)
1088                        goto alloc_failed;
1089
1090                IXGB_WRITE_REG(hw, RCTL, rctl);
1091
1092                addr = mta;
1093                netdev_for_each_mc_addr(ha, netdev) {
1094                        memcpy(addr, ha->addr, ETH_ALEN);
1095                        addr += ETH_ALEN;
1096                }
1097
1098                ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1099                kfree(mta);
1100        }
1101
1102alloc_failed:
1103        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1104                ixgb_vlan_strip_enable(adapter);
1105        else
1106                ixgb_vlan_strip_disable(adapter);
1107
1108}
1109
1110/**
1111 * ixgb_watchdog - Timer Call-back
1112 * @t: pointer to timer_list containing our private info pointer
1113 **/
1114
1115static void
1116ixgb_watchdog(struct timer_list *t)
1117{
1118        struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1119        struct net_device *netdev = adapter->netdev;
1120        struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1121
1122        ixgb_check_for_link(&adapter->hw);
1123
1124        if (ixgb_check_for_bad_link(&adapter->hw)) {
1125                /* force the reset path */
1126                netif_stop_queue(netdev);
1127        }
1128
1129        if (adapter->hw.link_up) {
1130                if (!netif_carrier_ok(netdev)) {
1131                        netdev_info(netdev,
1132                                    "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1133                                    (adapter->hw.fc.type == ixgb_fc_full) ?
1134                                    "RX/TX" :
1135                                    (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1136                                     "RX" :
1137                                    (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1138                                    "TX" : "None");
1139                        adapter->link_speed = 10000;
1140                        adapter->link_duplex = FULL_DUPLEX;
1141                        netif_carrier_on(netdev);
1142                }
1143        } else {
1144                if (netif_carrier_ok(netdev)) {
1145                        adapter->link_speed = 0;
1146                        adapter->link_duplex = 0;
1147                        netdev_info(netdev, "NIC Link is Down\n");
1148                        netif_carrier_off(netdev);
1149                }
1150        }
1151
1152        ixgb_update_stats(adapter);
1153
1154        if (!netif_carrier_ok(netdev)) {
1155                if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1156                        /* We've lost link, so the controller stops DMA,
1157                         * but we've got queued Tx work that's never going
1158                         * to get done, so reset controller to flush Tx.
1159                         * (Do the reset outside of interrupt context). */
1160                        schedule_work(&adapter->tx_timeout_task);
1161                        /* return immediately since reset is imminent */
1162                        return;
1163                }
1164        }
1165
1166        /* Force detection of hung controller every watchdog period */
1167        adapter->detect_tx_hung = true;
1168
1169        /* generate an interrupt to force clean up of any stragglers */
1170        IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1171
1172        /* Reset the timer */
1173        mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1174}
1175
1176#define IXGB_TX_FLAGS_CSUM              0x00000001
1177#define IXGB_TX_FLAGS_VLAN              0x00000002
1178#define IXGB_TX_FLAGS_TSO               0x00000004
1179
1180static int
1181ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1182{
1183        struct ixgb_context_desc *context_desc;
1184        unsigned int i;
1185        u8 ipcss, ipcso, tucss, tucso, hdr_len;
1186        u16 ipcse, tucse, mss;
1187
1188        if (likely(skb_is_gso(skb))) {
1189                struct ixgb_buffer *buffer_info;
1190                struct iphdr *iph;
1191                int err;
1192
1193                err = skb_cow_head(skb, 0);
1194                if (err < 0)
1195                        return err;
1196
1197                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1198                mss = skb_shinfo(skb)->gso_size;
1199                iph = ip_hdr(skb);
1200                iph->tot_len = 0;
1201                iph->check = 0;
1202                tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1203                                                         iph->daddr, 0,
1204                                                         IPPROTO_TCP, 0);
1205                ipcss = skb_network_offset(skb);
1206                ipcso = (void *)&(iph->check) - (void *)skb->data;
1207                ipcse = skb_transport_offset(skb) - 1;
1208                tucss = skb_transport_offset(skb);
1209                tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1210                tucse = 0;
1211
1212                i = adapter->tx_ring.next_to_use;
1213                context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1214                buffer_info = &adapter->tx_ring.buffer_info[i];
1215                WARN_ON(buffer_info->dma != 0);
1216
1217                context_desc->ipcss = ipcss;
1218                context_desc->ipcso = ipcso;
1219                context_desc->ipcse = cpu_to_le16(ipcse);
1220                context_desc->tucss = tucss;
1221                context_desc->tucso = tucso;
1222                context_desc->tucse = cpu_to_le16(tucse);
1223                context_desc->mss = cpu_to_le16(mss);
1224                context_desc->hdr_len = hdr_len;
1225                context_desc->status = 0;
1226                context_desc->cmd_type_len = cpu_to_le32(
1227                                                  IXGB_CONTEXT_DESC_TYPE
1228                                                | IXGB_CONTEXT_DESC_CMD_TSE
1229                                                | IXGB_CONTEXT_DESC_CMD_IP
1230                                                | IXGB_CONTEXT_DESC_CMD_TCP
1231                                                | IXGB_CONTEXT_DESC_CMD_IDE
1232                                                | (skb->len - (hdr_len)));
1233
1234
1235                if (++i == adapter->tx_ring.count) i = 0;
1236                adapter->tx_ring.next_to_use = i;
1237
1238                return 1;
1239        }
1240
1241        return 0;
1242}
1243
1244static bool
1245ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1246{
1247        struct ixgb_context_desc *context_desc;
1248        unsigned int i;
1249        u8 css, cso;
1250
1251        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1252                struct ixgb_buffer *buffer_info;
1253                css = skb_checksum_start_offset(skb);
1254                cso = css + skb->csum_offset;
1255
1256                i = adapter->tx_ring.next_to_use;
1257                context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1258                buffer_info = &adapter->tx_ring.buffer_info[i];
1259                WARN_ON(buffer_info->dma != 0);
1260
1261                context_desc->tucss = css;
1262                context_desc->tucso = cso;
1263                context_desc->tucse = 0;
1264                /* zero out any previously existing data in one instruction */
1265                *(u32 *)&(context_desc->ipcss) = 0;
1266                context_desc->status = 0;
1267                context_desc->hdr_len = 0;
1268                context_desc->mss = 0;
1269                context_desc->cmd_type_len =
1270                        cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1271                                    | IXGB_TX_DESC_CMD_IDE);
1272
1273                if (++i == adapter->tx_ring.count) i = 0;
1274                adapter->tx_ring.next_to_use = i;
1275
1276                return true;
1277        }
1278
1279        return false;
1280}
1281
1282#define IXGB_MAX_TXD_PWR        14
1283#define IXGB_MAX_DATA_PER_TXD   (1<<IXGB_MAX_TXD_PWR)
1284
1285static int
1286ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1287            unsigned int first)
1288{
1289        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1290        struct pci_dev *pdev = adapter->pdev;
1291        struct ixgb_buffer *buffer_info;
1292        int len = skb_headlen(skb);
1293        unsigned int offset = 0, size, count = 0, i;
1294        unsigned int mss = skb_shinfo(skb)->gso_size;
1295        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1296        unsigned int f;
1297
1298        i = tx_ring->next_to_use;
1299
1300        while (len) {
1301                buffer_info = &tx_ring->buffer_info[i];
1302                size = min(len, IXGB_MAX_DATA_PER_TXD);
1303                /* Workaround for premature desc write-backs
1304                 * in TSO mode.  Append 4-byte sentinel desc */
1305                if (unlikely(mss && !nr_frags && size == len && size > 8))
1306                        size -= 4;
1307
1308                buffer_info->length = size;
1309                WARN_ON(buffer_info->dma != 0);
1310                buffer_info->time_stamp = jiffies;
1311                buffer_info->mapped_as_page = false;
1312                buffer_info->dma = dma_map_single(&pdev->dev,
1313                                                  skb->data + offset,
1314                                                  size, DMA_TO_DEVICE);
1315                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1316                        goto dma_error;
1317                buffer_info->next_to_watch = 0;
1318
1319                len -= size;
1320                offset += size;
1321                count++;
1322                if (len) {
1323                        i++;
1324                        if (i == tx_ring->count)
1325                                i = 0;
1326                }
1327        }
1328
1329        for (f = 0; f < nr_frags; f++) {
1330                const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1331                len = skb_frag_size(frag);
1332                offset = 0;
1333
1334                while (len) {
1335                        i++;
1336                        if (i == tx_ring->count)
1337                                i = 0;
1338
1339                        buffer_info = &tx_ring->buffer_info[i];
1340                        size = min(len, IXGB_MAX_DATA_PER_TXD);
1341
1342                        /* Workaround for premature desc write-backs
1343                         * in TSO mode.  Append 4-byte sentinel desc */
1344                        if (unlikely(mss && (f == (nr_frags - 1))
1345                                     && size == len && size > 8))
1346                                size -= 4;
1347
1348                        buffer_info->length = size;
1349                        buffer_info->time_stamp = jiffies;
1350                        buffer_info->mapped_as_page = true;
1351                        buffer_info->dma =
1352                                skb_frag_dma_map(&pdev->dev, frag, offset, size,
1353                                                 DMA_TO_DEVICE);
1354                        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1355                                goto dma_error;
1356                        buffer_info->next_to_watch = 0;
1357
1358                        len -= size;
1359                        offset += size;
1360                        count++;
1361                }
1362        }
1363        tx_ring->buffer_info[i].skb = skb;
1364        tx_ring->buffer_info[first].next_to_watch = i;
1365
1366        return count;
1367
1368dma_error:
1369        dev_err(&pdev->dev, "TX DMA map failed\n");
1370        buffer_info->dma = 0;
1371        if (count)
1372                count--;
1373
1374        while (count--) {
1375                if (i==0)
1376                        i += tx_ring->count;
1377                i--;
1378                buffer_info = &tx_ring->buffer_info[i];
1379                ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1380        }
1381
1382        return 0;
1383}
1384
1385static void
1386ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1387{
1388        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1389        struct ixgb_tx_desc *tx_desc = NULL;
1390        struct ixgb_buffer *buffer_info;
1391        u32 cmd_type_len = adapter->tx_cmd_type;
1392        u8 status = 0;
1393        u8 popts = 0;
1394        unsigned int i;
1395
1396        if (tx_flags & IXGB_TX_FLAGS_TSO) {
1397                cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1398                popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1399        }
1400
1401        if (tx_flags & IXGB_TX_FLAGS_CSUM)
1402                popts |= IXGB_TX_DESC_POPTS_TXSM;
1403
1404        if (tx_flags & IXGB_TX_FLAGS_VLAN)
1405                cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1406
1407        i = tx_ring->next_to_use;
1408
1409        while (count--) {
1410                buffer_info = &tx_ring->buffer_info[i];
1411                tx_desc = IXGB_TX_DESC(*tx_ring, i);
1412                tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1413                tx_desc->cmd_type_len =
1414                        cpu_to_le32(cmd_type_len | buffer_info->length);
1415                tx_desc->status = status;
1416                tx_desc->popts = popts;
1417                tx_desc->vlan = cpu_to_le16(vlan_id);
1418
1419                if (++i == tx_ring->count) i = 0;
1420        }
1421
1422        tx_desc->cmd_type_len |=
1423                cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1424
1425        /* Force memory writes to complete before letting h/w
1426         * know there are new descriptors to fetch.  (Only
1427         * applicable for weak-ordered memory model archs,
1428         * such as IA-64). */
1429        wmb();
1430
1431        tx_ring->next_to_use = i;
1432        IXGB_WRITE_REG(&adapter->hw, TDT, i);
1433}
1434
1435static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1436{
1437        struct ixgb_adapter *adapter = netdev_priv(netdev);
1438        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1439
1440        netif_stop_queue(netdev);
1441        /* Herbert's original patch had:
1442         *  smp_mb__after_netif_stop_queue();
1443         * but since that doesn't exist yet, just open code it. */
1444        smp_mb();
1445
1446        /* We need to check again in a case another CPU has just
1447         * made room available. */
1448        if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1449                return -EBUSY;
1450
1451        /* A reprieve! */
1452        netif_start_queue(netdev);
1453        ++adapter->restart_queue;
1454        return 0;
1455}
1456
1457static int ixgb_maybe_stop_tx(struct net_device *netdev,
1458                              struct ixgb_desc_ring *tx_ring, int size)
1459{
1460        if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1461                return 0;
1462        return __ixgb_maybe_stop_tx(netdev, size);
1463}
1464
1465
1466/* Tx Descriptors needed, worst case */
1467#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1468                         (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1469#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1470        MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1471        + 1 /* one more needed for sentinel TSO workaround */
1472
1473static netdev_tx_t
1474ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1475{
1476        struct ixgb_adapter *adapter = netdev_priv(netdev);
1477        unsigned int first;
1478        unsigned int tx_flags = 0;
1479        int vlan_id = 0;
1480        int count = 0;
1481        int tso;
1482
1483        if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1484                dev_kfree_skb_any(skb);
1485                return NETDEV_TX_OK;
1486        }
1487
1488        if (skb->len <= 0) {
1489                dev_kfree_skb_any(skb);
1490                return NETDEV_TX_OK;
1491        }
1492
1493        if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1494                     DESC_NEEDED)))
1495                return NETDEV_TX_BUSY;
1496
1497        if (skb_vlan_tag_present(skb)) {
1498                tx_flags |= IXGB_TX_FLAGS_VLAN;
1499                vlan_id = skb_vlan_tag_get(skb);
1500        }
1501
1502        first = adapter->tx_ring.next_to_use;
1503
1504        tso = ixgb_tso(adapter, skb);
1505        if (tso < 0) {
1506                dev_kfree_skb_any(skb);
1507                return NETDEV_TX_OK;
1508        }
1509
1510        if (likely(tso))
1511                tx_flags |= IXGB_TX_FLAGS_TSO;
1512        else if (ixgb_tx_csum(adapter, skb))
1513                tx_flags |= IXGB_TX_FLAGS_CSUM;
1514
1515        count = ixgb_tx_map(adapter, skb, first);
1516
1517        if (count) {
1518                ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1519                /* Make sure there is space in the ring for the next send. */
1520                ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1521
1522        } else {
1523                dev_kfree_skb_any(skb);
1524                adapter->tx_ring.buffer_info[first].time_stamp = 0;
1525                adapter->tx_ring.next_to_use = first;
1526        }
1527
1528        return NETDEV_TX_OK;
1529}
1530
1531/**
1532 * ixgb_tx_timeout - Respond to a Tx Hang
1533 * @netdev: network interface device structure
1534 * @txqueue: queue hanging (unused)
1535 **/
1536
1537static void
1538ixgb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
1539{
1540        struct ixgb_adapter *adapter = netdev_priv(netdev);
1541
1542        /* Do the reset outside of interrupt context */
1543        schedule_work(&adapter->tx_timeout_task);
1544}
1545
1546static void
1547ixgb_tx_timeout_task(struct work_struct *work)
1548{
1549        struct ixgb_adapter *adapter =
1550                container_of(work, struct ixgb_adapter, tx_timeout_task);
1551
1552        adapter->tx_timeout_count++;
1553        ixgb_down(adapter, true);
1554        ixgb_up(adapter);
1555}
1556
1557/**
1558 * ixgb_change_mtu - Change the Maximum Transfer Unit
1559 * @netdev: network interface device structure
1560 * @new_mtu: new value for maximum frame size
1561 *
1562 * Returns 0 on success, negative on failure
1563 **/
1564
1565static int
1566ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1567{
1568        struct ixgb_adapter *adapter = netdev_priv(netdev);
1569        int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1570
1571        if (netif_running(netdev))
1572                ixgb_down(adapter, true);
1573
1574        adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1575
1576        netdev->mtu = new_mtu;
1577
1578        if (netif_running(netdev))
1579                ixgb_up(adapter);
1580
1581        return 0;
1582}
1583
1584/**
1585 * ixgb_update_stats - Update the board statistics counters.
1586 * @adapter: board private structure
1587 **/
1588
1589void
1590ixgb_update_stats(struct ixgb_adapter *adapter)
1591{
1592        struct net_device *netdev = adapter->netdev;
1593        struct pci_dev *pdev = adapter->pdev;
1594
1595        /* Prevent stats update while adapter is being reset */
1596        if (pci_channel_offline(pdev))
1597                return;
1598
1599        if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1600           (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1601                u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1602                u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1603                u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1604                u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1605
1606                multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1607                /* fix up multicast stats by removing broadcasts */
1608                if (multi >= bcast)
1609                        multi -= bcast;
1610
1611                adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1612                adapter->stats.mprch += (multi >> 32);
1613                adapter->stats.bprcl += bcast_l;
1614                adapter->stats.bprch += bcast_h;
1615        } else {
1616                adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1617                adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1618                adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1619                adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1620        }
1621        adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1622        adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1623        adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1624        adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1625        adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1626        adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1627        adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1628        adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1629        adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1630        adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1631        adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1632        adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1633        adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1634        adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1635        adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1636        adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1637        adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1638        adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1639        adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1640        adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1641        adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1642        adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1643        adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1644        adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1645        adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1646        adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1647        adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1648        adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1649        adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1650        adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1651        adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1652        adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1653        adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1654        adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1655        adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1656        adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1657        adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1658        adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1659        adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1660        adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1661        adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1662        adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1663        adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1664        adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1665        adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1666        adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1667        adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1668        adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1669        adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1670        adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1671        adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1672        adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1673        adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1674        adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1675        adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1676        adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1677
1678        /* Fill out the OS statistics structure */
1679
1680        netdev->stats.rx_packets = adapter->stats.gprcl;
1681        netdev->stats.tx_packets = adapter->stats.gptcl;
1682        netdev->stats.rx_bytes = adapter->stats.gorcl;
1683        netdev->stats.tx_bytes = adapter->stats.gotcl;
1684        netdev->stats.multicast = adapter->stats.mprcl;
1685        netdev->stats.collisions = 0;
1686
1687        /* ignore RLEC as it reports errors for padded (<64bytes) frames
1688         * with a length in the type/len field */
1689        netdev->stats.rx_errors =
1690            /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1691            adapter->stats.ruc +
1692            adapter->stats.roc /*+ adapter->stats.rlec */  +
1693            adapter->stats.icbc +
1694            adapter->stats.ecbc + adapter->stats.mpc;
1695
1696        /* see above
1697         * netdev->stats.rx_length_errors = adapter->stats.rlec;
1698         */
1699
1700        netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1701        netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1702        netdev->stats.rx_missed_errors = adapter->stats.mpc;
1703        netdev->stats.rx_over_errors = adapter->stats.mpc;
1704
1705        netdev->stats.tx_errors = 0;
1706        netdev->stats.rx_frame_errors = 0;
1707        netdev->stats.tx_aborted_errors = 0;
1708        netdev->stats.tx_carrier_errors = 0;
1709        netdev->stats.tx_fifo_errors = 0;
1710        netdev->stats.tx_heartbeat_errors = 0;
1711        netdev->stats.tx_window_errors = 0;
1712}
1713
1714#define IXGB_MAX_INTR 10
1715/**
1716 * ixgb_intr - Interrupt Handler
1717 * @irq: interrupt number
1718 * @data: pointer to a network interface device structure
1719 **/
1720
1721static irqreturn_t
1722ixgb_intr(int irq, void *data)
1723{
1724        struct net_device *netdev = data;
1725        struct ixgb_adapter *adapter = netdev_priv(netdev);
1726        struct ixgb_hw *hw = &adapter->hw;
1727        u32 icr = IXGB_READ_REG(hw, ICR);
1728
1729        if (unlikely(!icr))
1730                return IRQ_NONE;  /* Not our interrupt */
1731
1732        if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1733                if (!test_bit(__IXGB_DOWN, &adapter->flags))
1734                        mod_timer(&adapter->watchdog_timer, jiffies);
1735
1736        if (napi_schedule_prep(&adapter->napi)) {
1737
1738                /* Disable interrupts and register for poll. The flush
1739                  of the posted write is intentionally left out.
1740                */
1741
1742                IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1743                __napi_schedule(&adapter->napi);
1744        }
1745        return IRQ_HANDLED;
1746}
1747
1748/**
1749 * ixgb_clean - NAPI Rx polling callback
1750 * @napi: napi struct pointer
1751 * @budget: max number of receives to clean
1752 **/
1753
1754static int
1755ixgb_clean(struct napi_struct *napi, int budget)
1756{
1757        struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1758        int work_done = 0;
1759
1760        ixgb_clean_tx_irq(adapter);
1761        ixgb_clean_rx_irq(adapter, &work_done, budget);
1762
1763        /* If budget not fully consumed, exit the polling mode */
1764        if (work_done < budget) {
1765                napi_complete_done(napi, work_done);
1766                if (!test_bit(__IXGB_DOWN, &adapter->flags))
1767                        ixgb_irq_enable(adapter);
1768        }
1769
1770        return work_done;
1771}
1772
1773/**
1774 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1775 * @adapter: board private structure
1776 **/
1777
1778static bool
1779ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1780{
1781        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1782        struct net_device *netdev = adapter->netdev;
1783        struct ixgb_tx_desc *tx_desc, *eop_desc;
1784        struct ixgb_buffer *buffer_info;
1785        unsigned int i, eop;
1786        bool cleaned = false;
1787
1788        i = tx_ring->next_to_clean;
1789        eop = tx_ring->buffer_info[i].next_to_watch;
1790        eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1791
1792        while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1793
1794                rmb(); /* read buffer_info after eop_desc */
1795                for (cleaned = false; !cleaned; ) {
1796                        tx_desc = IXGB_TX_DESC(*tx_ring, i);
1797                        buffer_info = &tx_ring->buffer_info[i];
1798
1799                        if (tx_desc->popts &
1800                           (IXGB_TX_DESC_POPTS_TXSM |
1801                            IXGB_TX_DESC_POPTS_IXSM))
1802                                adapter->hw_csum_tx_good++;
1803
1804                        ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1805
1806                        *(u32 *)&(tx_desc->status) = 0;
1807
1808                        cleaned = (i == eop);
1809                        if (++i == tx_ring->count) i = 0;
1810                }
1811
1812                eop = tx_ring->buffer_info[i].next_to_watch;
1813                eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1814        }
1815
1816        tx_ring->next_to_clean = i;
1817
1818        if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1819                     IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1820                /* Make sure that anybody stopping the queue after this
1821                 * sees the new next_to_clean. */
1822                smp_mb();
1823
1824                if (netif_queue_stopped(netdev) &&
1825                    !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1826                        netif_wake_queue(netdev);
1827                        ++adapter->restart_queue;
1828                }
1829        }
1830
1831        if (adapter->detect_tx_hung) {
1832                /* detect a transmit hang in hardware, this serializes the
1833                 * check with the clearing of time_stamp and movement of i */
1834                adapter->detect_tx_hung = false;
1835                if (tx_ring->buffer_info[eop].time_stamp &&
1836                   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1837                   && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1838                        IXGB_STATUS_TXOFF)) {
1839                        /* detected Tx unit hang */
1840                        netif_err(adapter, drv, adapter->netdev,
1841                                  "Detected Tx Unit Hang\n"
1842                                  "  TDH                  <%x>\n"
1843                                  "  TDT                  <%x>\n"
1844                                  "  next_to_use          <%x>\n"
1845                                  "  next_to_clean        <%x>\n"
1846                                  "buffer_info[next_to_clean]\n"
1847                                  "  time_stamp           <%lx>\n"
1848                                  "  next_to_watch        <%x>\n"
1849                                  "  jiffies              <%lx>\n"
1850                                  "  next_to_watch.status <%x>\n",
1851                                  IXGB_READ_REG(&adapter->hw, TDH),
1852                                  IXGB_READ_REG(&adapter->hw, TDT),
1853                                  tx_ring->next_to_use,
1854                                  tx_ring->next_to_clean,
1855                                  tx_ring->buffer_info[eop].time_stamp,
1856                                  eop,
1857                                  jiffies,
1858                                  eop_desc->status);
1859                        netif_stop_queue(netdev);
1860                }
1861        }
1862
1863        return cleaned;
1864}
1865
1866/**
1867 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1868 * @adapter: board private structure
1869 * @rx_desc: receive descriptor
1870 * @skb: socket buffer with received data
1871 **/
1872
1873static void
1874ixgb_rx_checksum(struct ixgb_adapter *adapter,
1875                 struct ixgb_rx_desc *rx_desc,
1876                 struct sk_buff *skb)
1877{
1878        /* Ignore Checksum bit is set OR
1879         * TCP Checksum has not been calculated
1880         */
1881        if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1882           (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1883                skb_checksum_none_assert(skb);
1884                return;
1885        }
1886
1887        /* At this point we know the hardware did the TCP checksum */
1888        /* now look at the TCP checksum error bit */
1889        if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1890                /* let the stack verify checksum errors */
1891                skb_checksum_none_assert(skb);
1892                adapter->hw_csum_rx_error++;
1893        } else {
1894                /* TCP checksum is good */
1895                skb->ip_summed = CHECKSUM_UNNECESSARY;
1896                adapter->hw_csum_rx_good++;
1897        }
1898}
1899
1900/*
1901 * this should improve performance for small packets with large amounts
1902 * of reassembly being done in the stack
1903 */
1904static void ixgb_check_copybreak(struct napi_struct *napi,
1905                                 struct ixgb_buffer *buffer_info,
1906                                 u32 length, struct sk_buff **skb)
1907{
1908        struct sk_buff *new_skb;
1909
1910        if (length > copybreak)
1911                return;
1912
1913        new_skb = napi_alloc_skb(napi, length);
1914        if (!new_skb)
1915                return;
1916
1917        skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1918                                       (*skb)->data - NET_IP_ALIGN,
1919                                       length + NET_IP_ALIGN);
1920        /* save the skb in buffer_info as good */
1921        buffer_info->skb = *skb;
1922        *skb = new_skb;
1923}
1924
1925/**
1926 * ixgb_clean_rx_irq - Send received data up the network stack,
1927 * @adapter: board private structure
1928 * @work_done: output pointer to amount of packets cleaned
1929 * @work_to_do: how much work we can complete
1930 **/
1931
1932static bool
1933ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1934{
1935        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1936        struct net_device *netdev = adapter->netdev;
1937        struct pci_dev *pdev = adapter->pdev;
1938        struct ixgb_rx_desc *rx_desc, *next_rxd;
1939        struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1940        u32 length;
1941        unsigned int i, j;
1942        int cleaned_count = 0;
1943        bool cleaned = false;
1944
1945        i = rx_ring->next_to_clean;
1946        rx_desc = IXGB_RX_DESC(*rx_ring, i);
1947        buffer_info = &rx_ring->buffer_info[i];
1948
1949        while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1950                struct sk_buff *skb;
1951                u8 status;
1952
1953                if (*work_done >= work_to_do)
1954                        break;
1955
1956                (*work_done)++;
1957                rmb();  /* read descriptor and rx_buffer_info after status DD */
1958                status = rx_desc->status;
1959                skb = buffer_info->skb;
1960                buffer_info->skb = NULL;
1961
1962                prefetch(skb->data - NET_IP_ALIGN);
1963
1964                if (++i == rx_ring->count)
1965                        i = 0;
1966                next_rxd = IXGB_RX_DESC(*rx_ring, i);
1967                prefetch(next_rxd);
1968
1969                j = i + 1;
1970                if (j == rx_ring->count)
1971                        j = 0;
1972                next2_buffer = &rx_ring->buffer_info[j];
1973                prefetch(next2_buffer);
1974
1975                next_buffer = &rx_ring->buffer_info[i];
1976
1977                cleaned = true;
1978                cleaned_count++;
1979
1980                dma_unmap_single(&pdev->dev,
1981                                 buffer_info->dma,
1982                                 buffer_info->length,
1983                                 DMA_FROM_DEVICE);
1984                buffer_info->dma = 0;
1985
1986                length = le16_to_cpu(rx_desc->length);
1987                rx_desc->length = 0;
1988
1989                if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1990
1991                        /* All receives must fit into a single buffer */
1992
1993                        pr_debug("Receive packet consumed multiple buffers length<%x>\n",
1994                                 length);
1995
1996                        dev_kfree_skb_irq(skb);
1997                        goto rxdesc_done;
1998                }
1999
2000                if (unlikely(rx_desc->errors &
2001                    (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2002                     IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2003                        dev_kfree_skb_irq(skb);
2004                        goto rxdesc_done;
2005                }
2006
2007                ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
2008
2009                /* Good Receive */
2010                skb_put(skb, length);
2011
2012                /* Receive Checksum Offload */
2013                ixgb_rx_checksum(adapter, rx_desc, skb);
2014
2015                skb->protocol = eth_type_trans(skb, netdev);
2016                if (status & IXGB_RX_DESC_STATUS_VP)
2017                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2018                                       le16_to_cpu(rx_desc->special));
2019
2020                netif_receive_skb(skb);
2021
2022rxdesc_done:
2023                /* clean up descriptor, might be written over by hw */
2024                rx_desc->status = 0;
2025
2026                /* return some buffers to hardware, one at a time is too slow */
2027                if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2028                        ixgb_alloc_rx_buffers(adapter, cleaned_count);
2029                        cleaned_count = 0;
2030                }
2031
2032                /* use prefetched values */
2033                rx_desc = next_rxd;
2034                buffer_info = next_buffer;
2035        }
2036
2037        rx_ring->next_to_clean = i;
2038
2039        cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2040        if (cleaned_count)
2041                ixgb_alloc_rx_buffers(adapter, cleaned_count);
2042
2043        return cleaned;
2044}
2045
2046/**
2047 * ixgb_alloc_rx_buffers - Replace used receive buffers
2048 * @adapter: address of board private structure
2049 * @cleaned_count: how many buffers to allocate
2050 **/
2051
2052static void
2053ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2054{
2055        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2056        struct net_device *netdev = adapter->netdev;
2057        struct pci_dev *pdev = adapter->pdev;
2058        struct ixgb_rx_desc *rx_desc;
2059        struct ixgb_buffer *buffer_info;
2060        struct sk_buff *skb;
2061        unsigned int i;
2062        long cleancount;
2063
2064        i = rx_ring->next_to_use;
2065        buffer_info = &rx_ring->buffer_info[i];
2066        cleancount = IXGB_DESC_UNUSED(rx_ring);
2067
2068
2069        /* leave three descriptors unused */
2070        while (--cleancount > 2 && cleaned_count--) {
2071                /* recycle! its good for you */
2072                skb = buffer_info->skb;
2073                if (skb) {
2074                        skb_trim(skb, 0);
2075                        goto map_skb;
2076                }
2077
2078                skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2079                if (unlikely(!skb)) {
2080                        /* Better luck next round */
2081                        adapter->alloc_rx_buff_failed++;
2082                        break;
2083                }
2084
2085                buffer_info->skb = skb;
2086                buffer_info->length = adapter->rx_buffer_len;
2087map_skb:
2088                buffer_info->dma = dma_map_single(&pdev->dev,
2089                                                  skb->data,
2090                                                  adapter->rx_buffer_len,
2091                                                  DMA_FROM_DEVICE);
2092                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
2093                        adapter->alloc_rx_buff_failed++;
2094                        break;
2095                }
2096
2097                rx_desc = IXGB_RX_DESC(*rx_ring, i);
2098                rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2099                /* guarantee DD bit not set now before h/w gets descriptor
2100                 * this is the rest of the workaround for h/w double
2101                 * writeback. */
2102                rx_desc->status = 0;
2103
2104
2105                if (++i == rx_ring->count)
2106                        i = 0;
2107                buffer_info = &rx_ring->buffer_info[i];
2108        }
2109
2110        if (likely(rx_ring->next_to_use != i)) {
2111                rx_ring->next_to_use = i;
2112                if (unlikely(i-- == 0))
2113                        i = (rx_ring->count - 1);
2114
2115                /* Force memory writes to complete before letting h/w
2116                 * know there are new descriptors to fetch.  (Only
2117                 * applicable for weak-ordered memory model archs, such
2118                 * as IA-64). */
2119                wmb();
2120                IXGB_WRITE_REG(&adapter->hw, RDT, i);
2121        }
2122}
2123
2124static void
2125ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2126{
2127        u32 ctrl;
2128
2129        /* enable VLAN tag insert/strip */
2130        ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2131        ctrl |= IXGB_CTRL0_VME;
2132        IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2133}
2134
2135static void
2136ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2137{
2138        u32 ctrl;
2139
2140        /* disable VLAN tag insert/strip */
2141        ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2142        ctrl &= ~IXGB_CTRL0_VME;
2143        IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2144}
2145
2146static int
2147ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2148{
2149        struct ixgb_adapter *adapter = netdev_priv(netdev);
2150        u32 vfta, index;
2151
2152        /* add VID to filter table */
2153
2154        index = (vid >> 5) & 0x7F;
2155        vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2156        vfta |= (1 << (vid & 0x1F));
2157        ixgb_write_vfta(&adapter->hw, index, vfta);
2158        set_bit(vid, adapter->active_vlans);
2159
2160        return 0;
2161}
2162
2163static int
2164ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2165{
2166        struct ixgb_adapter *adapter = netdev_priv(netdev);
2167        u32 vfta, index;
2168
2169        /* remove VID from filter table */
2170
2171        index = (vid >> 5) & 0x7F;
2172        vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2173        vfta &= ~(1 << (vid & 0x1F));
2174        ixgb_write_vfta(&adapter->hw, index, vfta);
2175        clear_bit(vid, adapter->active_vlans);
2176
2177        return 0;
2178}
2179
2180static void
2181ixgb_restore_vlan(struct ixgb_adapter *adapter)
2182{
2183        u16 vid;
2184
2185        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2186                ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2187}
2188
2189/**
2190 * ixgb_io_error_detected - called when PCI error is detected
2191 * @pdev:    pointer to pci device with error
2192 * @state:   pci channel state after error
2193 *
2194 * This callback is called by the PCI subsystem whenever
2195 * a PCI bus error is detected.
2196 */
2197static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2198                                               pci_channel_state_t state)
2199{
2200        struct net_device *netdev = pci_get_drvdata(pdev);
2201        struct ixgb_adapter *adapter = netdev_priv(netdev);
2202
2203        netif_device_detach(netdev);
2204
2205        if (state == pci_channel_io_perm_failure)
2206                return PCI_ERS_RESULT_DISCONNECT;
2207
2208        if (netif_running(netdev))
2209                ixgb_down(adapter, true);
2210
2211        pci_disable_device(pdev);
2212
2213        /* Request a slot reset. */
2214        return PCI_ERS_RESULT_NEED_RESET;
2215}
2216
2217/**
2218 * ixgb_io_slot_reset - called after the pci bus has been reset.
2219 * @pdev: pointer to pci device with error
2220 *
2221 * This callback is called after the PCI bus has been reset.
2222 * Basically, this tries to restart the card from scratch.
2223 * This is a shortened version of the device probe/discovery code,
2224 * it resembles the first-half of the ixgb_probe() routine.
2225 */
2226static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2227{
2228        struct net_device *netdev = pci_get_drvdata(pdev);
2229        struct ixgb_adapter *adapter = netdev_priv(netdev);
2230
2231        if (pci_enable_device(pdev)) {
2232                netif_err(adapter, probe, adapter->netdev,
2233                          "Cannot re-enable PCI device after reset\n");
2234                return PCI_ERS_RESULT_DISCONNECT;
2235        }
2236
2237        /* Perform card reset only on one instance of the card */
2238        if (0 != PCI_FUNC (pdev->devfn))
2239                return PCI_ERS_RESULT_RECOVERED;
2240
2241        pci_set_master(pdev);
2242
2243        netif_carrier_off(netdev);
2244        netif_stop_queue(netdev);
2245        ixgb_reset(adapter);
2246
2247        /* Make sure the EEPROM is good */
2248        if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2249                netif_err(adapter, probe, adapter->netdev,
2250                          "After reset, the EEPROM checksum is not valid\n");
2251                return PCI_ERS_RESULT_DISCONNECT;
2252        }
2253        ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2254        memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2255
2256        if (!is_valid_ether_addr(netdev->perm_addr)) {
2257                netif_err(adapter, probe, adapter->netdev,
2258                          "After reset, invalid MAC address\n");
2259                return PCI_ERS_RESULT_DISCONNECT;
2260        }
2261
2262        return PCI_ERS_RESULT_RECOVERED;
2263}
2264
2265/**
2266 * ixgb_io_resume - called when its OK to resume normal operations
2267 * @pdev: pointer to pci device with error
2268 *
2269 * The error recovery driver tells us that its OK to resume
2270 * normal operation. Implementation resembles the second-half
2271 * of the ixgb_probe() routine.
2272 */
2273static void ixgb_io_resume(struct pci_dev *pdev)
2274{
2275        struct net_device *netdev = pci_get_drvdata(pdev);
2276        struct ixgb_adapter *adapter = netdev_priv(netdev);
2277
2278        pci_set_master(pdev);
2279
2280        if (netif_running(netdev)) {
2281                if (ixgb_up(adapter)) {
2282                        pr_err("can't bring device back up after reset\n");
2283                        return;
2284                }
2285        }
2286
2287        netif_device_attach(netdev);
2288        mod_timer(&adapter->watchdog_timer, jiffies);
2289}
2290
2291/* ixgb_main.c */
2292