linux/drivers/net/ethernet/intel/ixgb/ixgb_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2008 Intel Corporation. */
   3
   4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   5
   6#include <linux/prefetch.h>
   7#include "ixgb.h"
   8
   9char ixgb_driver_name[] = "ixgb";
  10static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
  11
  12#define DRIVERNAPI "-NAPI"
  13#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
  14const char ixgb_driver_version[] = DRV_VERSION;
  15static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
  16
  17#define IXGB_CB_LENGTH 256
  18static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
  19module_param(copybreak, uint, 0644);
  20MODULE_PARM_DESC(copybreak,
  21        "Maximum size of packet that is copied to a new buffer on receive");
  22
  23/* ixgb_pci_tbl - PCI Device ID Table
  24 *
  25 * Wildcard entries (PCI_ANY_ID) should come last
  26 * Last entry must be all 0s
  27 *
  28 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  29 *   Class, Class Mask, private data (not used) }
  30 */
  31static const struct pci_device_id ixgb_pci_tbl[] = {
  32        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
  33         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  34        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
  35         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  36        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
  37         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  38        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
  39         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  40
  41        /* required last entry */
  42        {0,}
  43};
  44
  45MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
  46
  47/* Local Function Prototypes */
  48static int ixgb_init_module(void);
  49static void ixgb_exit_module(void);
  50static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  51static void ixgb_remove(struct pci_dev *pdev);
  52static int ixgb_sw_init(struct ixgb_adapter *adapter);
  53static int ixgb_open(struct net_device *netdev);
  54static int ixgb_close(struct net_device *netdev);
  55static void ixgb_configure_tx(struct ixgb_adapter *adapter);
  56static void ixgb_configure_rx(struct ixgb_adapter *adapter);
  57static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
  58static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
  59static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
  60static void ixgb_set_multi(struct net_device *netdev);
  61static void ixgb_watchdog(struct timer_list *t);
  62static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
  63                                   struct net_device *netdev);
  64static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
  65static int ixgb_set_mac(struct net_device *netdev, void *p);
  66static irqreturn_t ixgb_intr(int irq, void *data);
  67static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
  68
  69static int ixgb_clean(struct napi_struct *, int);
  70static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
  71static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
  72
  73static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
  74static void ixgb_tx_timeout_task(struct work_struct *work);
  75
  76static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
  77static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
  78static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
  79                                __be16 proto, u16 vid);
  80static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
  81                                 __be16 proto, u16 vid);
  82static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
  83
  84static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
  85                             enum pci_channel_state state);
  86static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
  87static void ixgb_io_resume (struct pci_dev *pdev);
  88
  89static const struct pci_error_handlers ixgb_err_handler = {
  90        .error_detected = ixgb_io_error_detected,
  91        .slot_reset = ixgb_io_slot_reset,
  92        .resume = ixgb_io_resume,
  93};
  94
  95static struct pci_driver ixgb_driver = {
  96        .name     = ixgb_driver_name,
  97        .id_table = ixgb_pci_tbl,
  98        .probe    = ixgb_probe,
  99        .remove   = ixgb_remove,
 100        .err_handler = &ixgb_err_handler
 101};
 102
 103MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 104MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
 105MODULE_LICENSE("GPL v2");
 106MODULE_VERSION(DRV_VERSION);
 107
 108#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 109static int debug = -1;
 110module_param(debug, int, 0);
 111MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 112
 113/**
 114 * ixgb_init_module - Driver Registration Routine
 115 *
 116 * ixgb_init_module is the first routine called when the driver is
 117 * loaded. All it does is register with the PCI subsystem.
 118 **/
 119
 120static int __init
 121ixgb_init_module(void)
 122{
 123        pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
 124        pr_info("%s\n", ixgb_copyright);
 125
 126        return pci_register_driver(&ixgb_driver);
 127}
 128
 129module_init(ixgb_init_module);
 130
 131/**
 132 * ixgb_exit_module - Driver Exit Cleanup Routine
 133 *
 134 * ixgb_exit_module is called just before the driver is removed
 135 * from memory.
 136 **/
 137
 138static void __exit
 139ixgb_exit_module(void)
 140{
 141        pci_unregister_driver(&ixgb_driver);
 142}
 143
 144module_exit(ixgb_exit_module);
 145
 146/**
 147 * ixgb_irq_disable - Mask off interrupt generation on the NIC
 148 * @adapter: board private structure
 149 **/
 150
 151static void
 152ixgb_irq_disable(struct ixgb_adapter *adapter)
 153{
 154        IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
 155        IXGB_WRITE_FLUSH(&adapter->hw);
 156        synchronize_irq(adapter->pdev->irq);
 157}
 158
 159/**
 160 * ixgb_irq_enable - Enable default interrupt generation settings
 161 * @adapter: board private structure
 162 **/
 163
 164static void
 165ixgb_irq_enable(struct ixgb_adapter *adapter)
 166{
 167        u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
 168                  IXGB_INT_TXDW | IXGB_INT_LSC;
 169        if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
 170                val |= IXGB_INT_GPI0;
 171        IXGB_WRITE_REG(&adapter->hw, IMS, val);
 172        IXGB_WRITE_FLUSH(&adapter->hw);
 173}
 174
 175int
 176ixgb_up(struct ixgb_adapter *adapter)
 177{
 178        struct net_device *netdev = adapter->netdev;
 179        int err, irq_flags = IRQF_SHARED;
 180        int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 181        struct ixgb_hw *hw = &adapter->hw;
 182
 183        /* hardware has been reset, we need to reload some things */
 184
 185        ixgb_rar_set(hw, netdev->dev_addr, 0);
 186        ixgb_set_multi(netdev);
 187
 188        ixgb_restore_vlan(adapter);
 189
 190        ixgb_configure_tx(adapter);
 191        ixgb_setup_rctl(adapter);
 192        ixgb_configure_rx(adapter);
 193        ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
 194
 195        /* disable interrupts and get the hardware into a known state */
 196        IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
 197
 198        /* only enable MSI if bus is in PCI-X mode */
 199        if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
 200                err = pci_enable_msi(adapter->pdev);
 201                if (!err) {
 202                        adapter->have_msi = true;
 203                        irq_flags = 0;
 204                }
 205                /* proceed to try to request regular interrupt */
 206        }
 207
 208        err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
 209                          netdev->name, netdev);
 210        if (err) {
 211                if (adapter->have_msi)
 212                        pci_disable_msi(adapter->pdev);
 213                netif_err(adapter, probe, adapter->netdev,
 214                          "Unable to allocate interrupt Error: %d\n", err);
 215                return err;
 216        }
 217
 218        if ((hw->max_frame_size != max_frame) ||
 219                (hw->max_frame_size !=
 220                (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
 221
 222                hw->max_frame_size = max_frame;
 223
 224                IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
 225
 226                if (hw->max_frame_size >
 227                   IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
 228                        u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
 229
 230                        if (!(ctrl0 & IXGB_CTRL0_JFE)) {
 231                                ctrl0 |= IXGB_CTRL0_JFE;
 232                                IXGB_WRITE_REG(hw, CTRL0, ctrl0);
 233                        }
 234                }
 235        }
 236
 237        clear_bit(__IXGB_DOWN, &adapter->flags);
 238
 239        napi_enable(&adapter->napi);
 240        ixgb_irq_enable(adapter);
 241
 242        netif_wake_queue(netdev);
 243
 244        mod_timer(&adapter->watchdog_timer, jiffies);
 245
 246        return 0;
 247}
 248
 249void
 250ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
 251{
 252        struct net_device *netdev = adapter->netdev;
 253
 254        /* prevent the interrupt handler from restarting watchdog */
 255        set_bit(__IXGB_DOWN, &adapter->flags);
 256
 257        netif_carrier_off(netdev);
 258
 259        napi_disable(&adapter->napi);
 260        /* waiting for NAPI to complete can re-enable interrupts */
 261        ixgb_irq_disable(adapter);
 262        free_irq(adapter->pdev->irq, netdev);
 263
 264        if (adapter->have_msi)
 265                pci_disable_msi(adapter->pdev);
 266
 267        if (kill_watchdog)
 268                del_timer_sync(&adapter->watchdog_timer);
 269
 270        adapter->link_speed = 0;
 271        adapter->link_duplex = 0;
 272        netif_stop_queue(netdev);
 273
 274        ixgb_reset(adapter);
 275        ixgb_clean_tx_ring(adapter);
 276        ixgb_clean_rx_ring(adapter);
 277}
 278
 279void
 280ixgb_reset(struct ixgb_adapter *adapter)
 281{
 282        struct ixgb_hw *hw = &adapter->hw;
 283
 284        ixgb_adapter_stop(hw);
 285        if (!ixgb_init_hw(hw))
 286                netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
 287
 288        /* restore frame size information */
 289        IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
 290        if (hw->max_frame_size >
 291            IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
 292                u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
 293                if (!(ctrl0 & IXGB_CTRL0_JFE)) {
 294                        ctrl0 |= IXGB_CTRL0_JFE;
 295                        IXGB_WRITE_REG(hw, CTRL0, ctrl0);
 296                }
 297        }
 298}
 299
 300static netdev_features_t
 301ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
 302{
 303        /*
 304         * Tx VLAN insertion does not work per HW design when Rx stripping is
 305         * disabled.
 306         */
 307        if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
 308                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 309
 310        return features;
 311}
 312
 313static int
 314ixgb_set_features(struct net_device *netdev, netdev_features_t features)
 315{
 316        struct ixgb_adapter *adapter = netdev_priv(netdev);
 317        netdev_features_t changed = features ^ netdev->features;
 318
 319        if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
 320                return 0;
 321
 322        adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 323
 324        if (netif_running(netdev)) {
 325                ixgb_down(adapter, true);
 326                ixgb_up(adapter);
 327                ixgb_set_speed_duplex(netdev);
 328        } else
 329                ixgb_reset(adapter);
 330
 331        return 0;
 332}
 333
 334
 335static const struct net_device_ops ixgb_netdev_ops = {
 336        .ndo_open               = ixgb_open,
 337        .ndo_stop               = ixgb_close,
 338        .ndo_start_xmit         = ixgb_xmit_frame,
 339        .ndo_set_rx_mode        = ixgb_set_multi,
 340        .ndo_validate_addr      = eth_validate_addr,
 341        .ndo_set_mac_address    = ixgb_set_mac,
 342        .ndo_change_mtu         = ixgb_change_mtu,
 343        .ndo_tx_timeout         = ixgb_tx_timeout,
 344        .ndo_vlan_rx_add_vid    = ixgb_vlan_rx_add_vid,
 345        .ndo_vlan_rx_kill_vid   = ixgb_vlan_rx_kill_vid,
 346        .ndo_fix_features       = ixgb_fix_features,
 347        .ndo_set_features       = ixgb_set_features,
 348};
 349
 350/**
 351 * ixgb_probe - Device Initialization Routine
 352 * @pdev: PCI device information struct
 353 * @ent: entry in ixgb_pci_tbl
 354 *
 355 * Returns 0 on success, negative on failure
 356 *
 357 * ixgb_probe initializes an adapter identified by a pci_dev structure.
 358 * The OS initialization, configuring of the adapter private structure,
 359 * and a hardware reset occur.
 360 **/
 361
 362static int
 363ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 364{
 365        struct net_device *netdev = NULL;
 366        struct ixgb_adapter *adapter;
 367        static int cards_found = 0;
 368        int pci_using_dac;
 369        int i;
 370        int err;
 371
 372        err = pci_enable_device(pdev);
 373        if (err)
 374                return err;
 375
 376        pci_using_dac = 0;
 377        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 378        if (!err) {
 379                pci_using_dac = 1;
 380        } else {
 381                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 382                if (err) {
 383                        pr_err("No usable DMA configuration, aborting\n");
 384                        goto err_dma_mask;
 385                }
 386        }
 387
 388        err = pci_request_regions(pdev, ixgb_driver_name);
 389        if (err)
 390                goto err_request_regions;
 391
 392        pci_set_master(pdev);
 393
 394        netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
 395        if (!netdev) {
 396                err = -ENOMEM;
 397                goto err_alloc_etherdev;
 398        }
 399
 400        SET_NETDEV_DEV(netdev, &pdev->dev);
 401
 402        pci_set_drvdata(pdev, netdev);
 403        adapter = netdev_priv(netdev);
 404        adapter->netdev = netdev;
 405        adapter->pdev = pdev;
 406        adapter->hw.back = adapter;
 407        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 408
 409        adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
 410        if (!adapter->hw.hw_addr) {
 411                err = -EIO;
 412                goto err_ioremap;
 413        }
 414
 415        for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
 416                if (pci_resource_len(pdev, i) == 0)
 417                        continue;
 418                if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 419                        adapter->hw.io_base = pci_resource_start(pdev, i);
 420                        break;
 421                }
 422        }
 423
 424        netdev->netdev_ops = &ixgb_netdev_ops;
 425        ixgb_set_ethtool_ops(netdev);
 426        netdev->watchdog_timeo = 5 * HZ;
 427        netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
 428
 429        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 430
 431        adapter->bd_number = cards_found;
 432        adapter->link_speed = 0;
 433        adapter->link_duplex = 0;
 434
 435        /* setup the private structure */
 436
 437        err = ixgb_sw_init(adapter);
 438        if (err)
 439                goto err_sw_init;
 440
 441        netdev->hw_features = NETIF_F_SG |
 442                           NETIF_F_TSO |
 443                           NETIF_F_HW_CSUM |
 444                           NETIF_F_HW_VLAN_CTAG_TX |
 445                           NETIF_F_HW_VLAN_CTAG_RX;
 446        netdev->features = netdev->hw_features |
 447                           NETIF_F_HW_VLAN_CTAG_FILTER;
 448        netdev->hw_features |= NETIF_F_RXCSUM;
 449
 450        if (pci_using_dac) {
 451                netdev->features |= NETIF_F_HIGHDMA;
 452                netdev->vlan_features |= NETIF_F_HIGHDMA;
 453        }
 454
 455        /* MTU range: 68 - 16114 */
 456        netdev->min_mtu = ETH_MIN_MTU;
 457        netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
 458
 459        /* make sure the EEPROM is good */
 460
 461        if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
 462                netif_err(adapter, probe, adapter->netdev,
 463                          "The EEPROM Checksum Is Not Valid\n");
 464                err = -EIO;
 465                goto err_eeprom;
 466        }
 467
 468        ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
 469
 470        if (!is_valid_ether_addr(netdev->dev_addr)) {
 471                netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
 472                err = -EIO;
 473                goto err_eeprom;
 474        }
 475
 476        adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
 477
 478        timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
 479
 480        INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
 481
 482        strcpy(netdev->name, "eth%d");
 483        err = register_netdev(netdev);
 484        if (err)
 485                goto err_register;
 486
 487        /* carrier off reporting is important to ethtool even BEFORE open */
 488        netif_carrier_off(netdev);
 489
 490        netif_info(adapter, probe, adapter->netdev,
 491                   "Intel(R) PRO/10GbE Network Connection\n");
 492        ixgb_check_options(adapter);
 493        /* reset the hardware with the new settings */
 494
 495        ixgb_reset(adapter);
 496
 497        cards_found++;
 498        return 0;
 499
 500err_register:
 501err_sw_init:
 502err_eeprom:
 503        iounmap(adapter->hw.hw_addr);
 504err_ioremap:
 505        free_netdev(netdev);
 506err_alloc_etherdev:
 507        pci_release_regions(pdev);
 508err_request_regions:
 509err_dma_mask:
 510        pci_disable_device(pdev);
 511        return err;
 512}
 513
 514/**
 515 * ixgb_remove - Device Removal Routine
 516 * @pdev: PCI device information struct
 517 *
 518 * ixgb_remove is called by the PCI subsystem to alert the driver
 519 * that it should release a PCI device.  The could be caused by a
 520 * Hot-Plug event, or because the driver is going to be removed from
 521 * memory.
 522 **/
 523
 524static void
 525ixgb_remove(struct pci_dev *pdev)
 526{
 527        struct net_device *netdev = pci_get_drvdata(pdev);
 528        struct ixgb_adapter *adapter = netdev_priv(netdev);
 529
 530        cancel_work_sync(&adapter->tx_timeout_task);
 531
 532        unregister_netdev(netdev);
 533
 534        iounmap(adapter->hw.hw_addr);
 535        pci_release_regions(pdev);
 536
 537        free_netdev(netdev);
 538        pci_disable_device(pdev);
 539}
 540
 541/**
 542 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
 543 * @adapter: board private structure to initialize
 544 *
 545 * ixgb_sw_init initializes the Adapter private data structure.
 546 * Fields are initialized based on PCI device information and
 547 * OS network device settings (MTU size).
 548 **/
 549
 550static int
 551ixgb_sw_init(struct ixgb_adapter *adapter)
 552{
 553        struct ixgb_hw *hw = &adapter->hw;
 554        struct net_device *netdev = adapter->netdev;
 555        struct pci_dev *pdev = adapter->pdev;
 556
 557        /* PCI config space info */
 558
 559        hw->vendor_id = pdev->vendor;
 560        hw->device_id = pdev->device;
 561        hw->subsystem_vendor_id = pdev->subsystem_vendor;
 562        hw->subsystem_id = pdev->subsystem_device;
 563
 564        hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 565        adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
 566
 567        if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
 568            (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
 569            (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
 570            (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
 571                hw->mac_type = ixgb_82597;
 572        else {
 573                /* should never have loaded on this device */
 574                netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
 575        }
 576
 577        /* enable flow control to be programmed */
 578        hw->fc.send_xon = 1;
 579
 580        set_bit(__IXGB_DOWN, &adapter->flags);
 581        return 0;
 582}
 583
 584/**
 585 * ixgb_open - Called when a network interface is made active
 586 * @netdev: network interface device structure
 587 *
 588 * Returns 0 on success, negative value on failure
 589 *
 590 * The open entry point is called when a network interface is made
 591 * active by the system (IFF_UP).  At this point all resources needed
 592 * for transmit and receive operations are allocated, the interrupt
 593 * handler is registered with the OS, the watchdog timer is started,
 594 * and the stack is notified that the interface is ready.
 595 **/
 596
 597static int
 598ixgb_open(struct net_device *netdev)
 599{
 600        struct ixgb_adapter *adapter = netdev_priv(netdev);
 601        int err;
 602
 603        /* allocate transmit descriptors */
 604        err = ixgb_setup_tx_resources(adapter);
 605        if (err)
 606                goto err_setup_tx;
 607
 608        netif_carrier_off(netdev);
 609
 610        /* allocate receive descriptors */
 611
 612        err = ixgb_setup_rx_resources(adapter);
 613        if (err)
 614                goto err_setup_rx;
 615
 616        err = ixgb_up(adapter);
 617        if (err)
 618                goto err_up;
 619
 620        netif_start_queue(netdev);
 621
 622        return 0;
 623
 624err_up:
 625        ixgb_free_rx_resources(adapter);
 626err_setup_rx:
 627        ixgb_free_tx_resources(adapter);
 628err_setup_tx:
 629        ixgb_reset(adapter);
 630
 631        return err;
 632}
 633
 634/**
 635 * ixgb_close - Disables a network interface
 636 * @netdev: network interface device structure
 637 *
 638 * Returns 0, this is not allowed to fail
 639 *
 640 * The close entry point is called when an interface is de-activated
 641 * by the OS.  The hardware is still under the drivers control, but
 642 * needs to be disabled.  A global MAC reset is issued to stop the
 643 * hardware, and all transmit and receive resources are freed.
 644 **/
 645
 646static int
 647ixgb_close(struct net_device *netdev)
 648{
 649        struct ixgb_adapter *adapter = netdev_priv(netdev);
 650
 651        ixgb_down(adapter, true);
 652
 653        ixgb_free_tx_resources(adapter);
 654        ixgb_free_rx_resources(adapter);
 655
 656        return 0;
 657}
 658
 659/**
 660 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
 661 * @adapter: board private structure
 662 *
 663 * Return 0 on success, negative on failure
 664 **/
 665
 666int
 667ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
 668{
 669        struct ixgb_desc_ring *txdr = &adapter->tx_ring;
 670        struct pci_dev *pdev = adapter->pdev;
 671        int size;
 672
 673        size = sizeof(struct ixgb_buffer) * txdr->count;
 674        txdr->buffer_info = vzalloc(size);
 675        if (!txdr->buffer_info)
 676                return -ENOMEM;
 677
 678        /* round up to nearest 4K */
 679
 680        txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
 681        txdr->size = ALIGN(txdr->size, 4096);
 682
 683        txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
 684                                        GFP_KERNEL);
 685        if (!txdr->desc) {
 686                vfree(txdr->buffer_info);
 687                return -ENOMEM;
 688        }
 689
 690        txdr->next_to_use = 0;
 691        txdr->next_to_clean = 0;
 692
 693        return 0;
 694}
 695
 696/**
 697 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
 698 * @adapter: board private structure
 699 *
 700 * Configure the Tx unit of the MAC after a reset.
 701 **/
 702
 703static void
 704ixgb_configure_tx(struct ixgb_adapter *adapter)
 705{
 706        u64 tdba = adapter->tx_ring.dma;
 707        u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
 708        u32 tctl;
 709        struct ixgb_hw *hw = &adapter->hw;
 710
 711        /* Setup the Base and Length of the Tx Descriptor Ring
 712         * tx_ring.dma can be either a 32 or 64 bit value
 713         */
 714
 715        IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
 716        IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
 717
 718        IXGB_WRITE_REG(hw, TDLEN, tdlen);
 719
 720        /* Setup the HW Tx Head and Tail descriptor pointers */
 721
 722        IXGB_WRITE_REG(hw, TDH, 0);
 723        IXGB_WRITE_REG(hw, TDT, 0);
 724
 725        /* don't set up txdctl, it induces performance problems if configured
 726         * incorrectly */
 727        /* Set the Tx Interrupt Delay register */
 728
 729        IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
 730
 731        /* Program the Transmit Control Register */
 732
 733        tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
 734        IXGB_WRITE_REG(hw, TCTL, tctl);
 735
 736        /* Setup Transmit Descriptor Settings for this adapter */
 737        adapter->tx_cmd_type =
 738                IXGB_TX_DESC_TYPE |
 739                (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
 740}
 741
 742/**
 743 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
 744 * @adapter: board private structure
 745 *
 746 * Returns 0 on success, negative on failure
 747 **/
 748
 749int
 750ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
 751{
 752        struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
 753        struct pci_dev *pdev = adapter->pdev;
 754        int size;
 755
 756        size = sizeof(struct ixgb_buffer) * rxdr->count;
 757        rxdr->buffer_info = vzalloc(size);
 758        if (!rxdr->buffer_info)
 759                return -ENOMEM;
 760
 761        /* Round up to nearest 4K */
 762
 763        rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
 764        rxdr->size = ALIGN(rxdr->size, 4096);
 765
 766        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
 767                                        GFP_KERNEL);
 768
 769        if (!rxdr->desc) {
 770                vfree(rxdr->buffer_info);
 771                return -ENOMEM;
 772        }
 773
 774        rxdr->next_to_clean = 0;
 775        rxdr->next_to_use = 0;
 776
 777        return 0;
 778}
 779
 780/**
 781 * ixgb_setup_rctl - configure the receive control register
 782 * @adapter: Board private structure
 783 **/
 784
 785static void
 786ixgb_setup_rctl(struct ixgb_adapter *adapter)
 787{
 788        u32 rctl;
 789
 790        rctl = IXGB_READ_REG(&adapter->hw, RCTL);
 791
 792        rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
 793
 794        rctl |=
 795                IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
 796                IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
 797                (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
 798
 799        rctl |= IXGB_RCTL_SECRC;
 800
 801        if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
 802                rctl |= IXGB_RCTL_BSIZE_2048;
 803        else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
 804                rctl |= IXGB_RCTL_BSIZE_4096;
 805        else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
 806                rctl |= IXGB_RCTL_BSIZE_8192;
 807        else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
 808                rctl |= IXGB_RCTL_BSIZE_16384;
 809
 810        IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
 811}
 812
 813/**
 814 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
 815 * @adapter: board private structure
 816 *
 817 * Configure the Rx unit of the MAC after a reset.
 818 **/
 819
 820static void
 821ixgb_configure_rx(struct ixgb_adapter *adapter)
 822{
 823        u64 rdba = adapter->rx_ring.dma;
 824        u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
 825        struct ixgb_hw *hw = &adapter->hw;
 826        u32 rctl;
 827        u32 rxcsum;
 828
 829        /* make sure receives are disabled while setting up the descriptors */
 830
 831        rctl = IXGB_READ_REG(hw, RCTL);
 832        IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
 833
 834        /* set the Receive Delay Timer Register */
 835
 836        IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
 837
 838        /* Setup the Base and Length of the Rx Descriptor Ring */
 839
 840        IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
 841        IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
 842
 843        IXGB_WRITE_REG(hw, RDLEN, rdlen);
 844
 845        /* Setup the HW Rx Head and Tail Descriptor Pointers */
 846        IXGB_WRITE_REG(hw, RDH, 0);
 847        IXGB_WRITE_REG(hw, RDT, 0);
 848
 849        /* due to the hardware errata with RXDCTL, we are unable to use any of
 850         * the performance enhancing features of it without causing other
 851         * subtle bugs, some of the bugs could include receive length
 852         * corruption at high data rates (WTHRESH > 0) and/or receive
 853         * descriptor ring irregularites (particularly in hardware cache) */
 854        IXGB_WRITE_REG(hw, RXDCTL, 0);
 855
 856        /* Enable Receive Checksum Offload for TCP and UDP */
 857        if (adapter->rx_csum) {
 858                rxcsum = IXGB_READ_REG(hw, RXCSUM);
 859                rxcsum |= IXGB_RXCSUM_TUOFL;
 860                IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
 861        }
 862
 863        /* Enable Receives */
 864
 865        IXGB_WRITE_REG(hw, RCTL, rctl);
 866}
 867
 868/**
 869 * ixgb_free_tx_resources - Free Tx Resources
 870 * @adapter: board private structure
 871 *
 872 * Free all transmit software resources
 873 **/
 874
 875void
 876ixgb_free_tx_resources(struct ixgb_adapter *adapter)
 877{
 878        struct pci_dev *pdev = adapter->pdev;
 879
 880        ixgb_clean_tx_ring(adapter);
 881
 882        vfree(adapter->tx_ring.buffer_info);
 883        adapter->tx_ring.buffer_info = NULL;
 884
 885        dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
 886                          adapter->tx_ring.desc, adapter->tx_ring.dma);
 887
 888        adapter->tx_ring.desc = NULL;
 889}
 890
 891static void
 892ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
 893                                struct ixgb_buffer *buffer_info)
 894{
 895        if (buffer_info->dma) {
 896                if (buffer_info->mapped_as_page)
 897                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
 898                                       buffer_info->length, DMA_TO_DEVICE);
 899                else
 900                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 901                                         buffer_info->length, DMA_TO_DEVICE);
 902                buffer_info->dma = 0;
 903        }
 904
 905        if (buffer_info->skb) {
 906                dev_kfree_skb_any(buffer_info->skb);
 907                buffer_info->skb = NULL;
 908        }
 909        buffer_info->time_stamp = 0;
 910        /* these fields must always be initialized in tx
 911         * buffer_info->length = 0;
 912         * buffer_info->next_to_watch = 0; */
 913}
 914
 915/**
 916 * ixgb_clean_tx_ring - Free Tx Buffers
 917 * @adapter: board private structure
 918 **/
 919
 920static void
 921ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
 922{
 923        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
 924        struct ixgb_buffer *buffer_info;
 925        unsigned long size;
 926        unsigned int i;
 927
 928        /* Free all the Tx ring sk_buffs */
 929
 930        for (i = 0; i < tx_ring->count; i++) {
 931                buffer_info = &tx_ring->buffer_info[i];
 932                ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
 933        }
 934
 935        size = sizeof(struct ixgb_buffer) * tx_ring->count;
 936        memset(tx_ring->buffer_info, 0, size);
 937
 938        /* Zero out the descriptor ring */
 939
 940        memset(tx_ring->desc, 0, tx_ring->size);
 941
 942        tx_ring->next_to_use = 0;
 943        tx_ring->next_to_clean = 0;
 944
 945        IXGB_WRITE_REG(&adapter->hw, TDH, 0);
 946        IXGB_WRITE_REG(&adapter->hw, TDT, 0);
 947}
 948
 949/**
 950 * ixgb_free_rx_resources - Free Rx Resources
 951 * @adapter: board private structure
 952 *
 953 * Free all receive software resources
 954 **/
 955
 956void
 957ixgb_free_rx_resources(struct ixgb_adapter *adapter)
 958{
 959        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
 960        struct pci_dev *pdev = adapter->pdev;
 961
 962        ixgb_clean_rx_ring(adapter);
 963
 964        vfree(rx_ring->buffer_info);
 965        rx_ring->buffer_info = NULL;
 966
 967        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
 968                          rx_ring->dma);
 969
 970        rx_ring->desc = NULL;
 971}
 972
 973/**
 974 * ixgb_clean_rx_ring - Free Rx Buffers
 975 * @adapter: board private structure
 976 **/
 977
 978static void
 979ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
 980{
 981        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
 982        struct ixgb_buffer *buffer_info;
 983        struct pci_dev *pdev = adapter->pdev;
 984        unsigned long size;
 985        unsigned int i;
 986
 987        /* Free all the Rx ring sk_buffs */
 988
 989        for (i = 0; i < rx_ring->count; i++) {
 990                buffer_info = &rx_ring->buffer_info[i];
 991                if (buffer_info->dma) {
 992                        dma_unmap_single(&pdev->dev,
 993                                         buffer_info->dma,
 994                                         buffer_info->length,
 995                                         DMA_FROM_DEVICE);
 996                        buffer_info->dma = 0;
 997                        buffer_info->length = 0;
 998                }
 999
1000                if (buffer_info->skb) {
1001                        dev_kfree_skb(buffer_info->skb);
1002                        buffer_info->skb = NULL;
1003                }
1004        }
1005
1006        size = sizeof(struct ixgb_buffer) * rx_ring->count;
1007        memset(rx_ring->buffer_info, 0, size);
1008
1009        /* Zero out the descriptor ring */
1010
1011        memset(rx_ring->desc, 0, rx_ring->size);
1012
1013        rx_ring->next_to_clean = 0;
1014        rx_ring->next_to_use = 0;
1015
1016        IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1017        IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1018}
1019
1020/**
1021 * ixgb_set_mac - Change the Ethernet Address of the NIC
1022 * @netdev: network interface device structure
1023 * @p: pointer to an address structure
1024 *
1025 * Returns 0 on success, negative on failure
1026 **/
1027
1028static int
1029ixgb_set_mac(struct net_device *netdev, void *p)
1030{
1031        struct ixgb_adapter *adapter = netdev_priv(netdev);
1032        struct sockaddr *addr = p;
1033
1034        if (!is_valid_ether_addr(addr->sa_data))
1035                return -EADDRNOTAVAIL;
1036
1037        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1038
1039        ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1040
1041        return 0;
1042}
1043
1044/**
1045 * ixgb_set_multi - Multicast and Promiscuous mode set
1046 * @netdev: network interface device structure
1047 *
1048 * The set_multi entry point is called whenever the multicast address
1049 * list or the network interface flags are updated.  This routine is
1050 * responsible for configuring the hardware for proper multicast,
1051 * promiscuous mode, and all-multi behavior.
1052 **/
1053
1054static void
1055ixgb_set_multi(struct net_device *netdev)
1056{
1057        struct ixgb_adapter *adapter = netdev_priv(netdev);
1058        struct ixgb_hw *hw = &adapter->hw;
1059        struct netdev_hw_addr *ha;
1060        u32 rctl;
1061
1062        /* Check for Promiscuous and All Multicast modes */
1063
1064        rctl = IXGB_READ_REG(hw, RCTL);
1065
1066        if (netdev->flags & IFF_PROMISC) {
1067                rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1068                /* disable VLAN filtering */
1069                rctl &= ~IXGB_RCTL_CFIEN;
1070                rctl &= ~IXGB_RCTL_VFE;
1071        } else {
1072                if (netdev->flags & IFF_ALLMULTI) {
1073                        rctl |= IXGB_RCTL_MPE;
1074                        rctl &= ~IXGB_RCTL_UPE;
1075                } else {
1076                        rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1077                }
1078                /* enable VLAN filtering */
1079                rctl |= IXGB_RCTL_VFE;
1080                rctl &= ~IXGB_RCTL_CFIEN;
1081        }
1082
1083        if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1084                rctl |= IXGB_RCTL_MPE;
1085                IXGB_WRITE_REG(hw, RCTL, rctl);
1086        } else {
1087                u8 *mta = kmalloc_array(ETH_ALEN,
1088                                        IXGB_MAX_NUM_MULTICAST_ADDRESSES,
1089                                        GFP_ATOMIC);
1090                u8 *addr;
1091                if (!mta)
1092                        goto alloc_failed;
1093
1094                IXGB_WRITE_REG(hw, RCTL, rctl);
1095
1096                addr = mta;
1097                netdev_for_each_mc_addr(ha, netdev) {
1098                        memcpy(addr, ha->addr, ETH_ALEN);
1099                        addr += ETH_ALEN;
1100                }
1101
1102                ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1103                kfree(mta);
1104        }
1105
1106alloc_failed:
1107        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1108                ixgb_vlan_strip_enable(adapter);
1109        else
1110                ixgb_vlan_strip_disable(adapter);
1111
1112}
1113
1114/**
1115 * ixgb_watchdog - Timer Call-back
1116 * @data: pointer to netdev cast into an unsigned long
1117 **/
1118
1119static void
1120ixgb_watchdog(struct timer_list *t)
1121{
1122        struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1123        struct net_device *netdev = adapter->netdev;
1124        struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1125
1126        ixgb_check_for_link(&adapter->hw);
1127
1128        if (ixgb_check_for_bad_link(&adapter->hw)) {
1129                /* force the reset path */
1130                netif_stop_queue(netdev);
1131        }
1132
1133        if (adapter->hw.link_up) {
1134                if (!netif_carrier_ok(netdev)) {
1135                        netdev_info(netdev,
1136                                    "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1137                                    (adapter->hw.fc.type == ixgb_fc_full) ?
1138                                    "RX/TX" :
1139                                    (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1140                                     "RX" :
1141                                    (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1142                                    "TX" : "None");
1143                        adapter->link_speed = 10000;
1144                        adapter->link_duplex = FULL_DUPLEX;
1145                        netif_carrier_on(netdev);
1146                }
1147        } else {
1148                if (netif_carrier_ok(netdev)) {
1149                        adapter->link_speed = 0;
1150                        adapter->link_duplex = 0;
1151                        netdev_info(netdev, "NIC Link is Down\n");
1152                        netif_carrier_off(netdev);
1153                }
1154        }
1155
1156        ixgb_update_stats(adapter);
1157
1158        if (!netif_carrier_ok(netdev)) {
1159                if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1160                        /* We've lost link, so the controller stops DMA,
1161                         * but we've got queued Tx work that's never going
1162                         * to get done, so reset controller to flush Tx.
1163                         * (Do the reset outside of interrupt context). */
1164                        schedule_work(&adapter->tx_timeout_task);
1165                        /* return immediately since reset is imminent */
1166                        return;
1167                }
1168        }
1169
1170        /* Force detection of hung controller every watchdog period */
1171        adapter->detect_tx_hung = true;
1172
1173        /* generate an interrupt to force clean up of any stragglers */
1174        IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1175
1176        /* Reset the timer */
1177        mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1178}
1179
1180#define IXGB_TX_FLAGS_CSUM              0x00000001
1181#define IXGB_TX_FLAGS_VLAN              0x00000002
1182#define IXGB_TX_FLAGS_TSO               0x00000004
1183
1184static int
1185ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1186{
1187        struct ixgb_context_desc *context_desc;
1188        unsigned int i;
1189        u8 ipcss, ipcso, tucss, tucso, hdr_len;
1190        u16 ipcse, tucse, mss;
1191
1192        if (likely(skb_is_gso(skb))) {
1193                struct ixgb_buffer *buffer_info;
1194                struct iphdr *iph;
1195                int err;
1196
1197                err = skb_cow_head(skb, 0);
1198                if (err < 0)
1199                        return err;
1200
1201                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1202                mss = skb_shinfo(skb)->gso_size;
1203                iph = ip_hdr(skb);
1204                iph->tot_len = 0;
1205                iph->check = 0;
1206                tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1207                                                         iph->daddr, 0,
1208                                                         IPPROTO_TCP, 0);
1209                ipcss = skb_network_offset(skb);
1210                ipcso = (void *)&(iph->check) - (void *)skb->data;
1211                ipcse = skb_transport_offset(skb) - 1;
1212                tucss = skb_transport_offset(skb);
1213                tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1214                tucse = 0;
1215
1216                i = adapter->tx_ring.next_to_use;
1217                context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1218                buffer_info = &adapter->tx_ring.buffer_info[i];
1219                WARN_ON(buffer_info->dma != 0);
1220
1221                context_desc->ipcss = ipcss;
1222                context_desc->ipcso = ipcso;
1223                context_desc->ipcse = cpu_to_le16(ipcse);
1224                context_desc->tucss = tucss;
1225                context_desc->tucso = tucso;
1226                context_desc->tucse = cpu_to_le16(tucse);
1227                context_desc->mss = cpu_to_le16(mss);
1228                context_desc->hdr_len = hdr_len;
1229                context_desc->status = 0;
1230                context_desc->cmd_type_len = cpu_to_le32(
1231                                                  IXGB_CONTEXT_DESC_TYPE
1232                                                | IXGB_CONTEXT_DESC_CMD_TSE
1233                                                | IXGB_CONTEXT_DESC_CMD_IP
1234                                                | IXGB_CONTEXT_DESC_CMD_TCP
1235                                                | IXGB_CONTEXT_DESC_CMD_IDE
1236                                                | (skb->len - (hdr_len)));
1237
1238
1239                if (++i == adapter->tx_ring.count) i = 0;
1240                adapter->tx_ring.next_to_use = i;
1241
1242                return 1;
1243        }
1244
1245        return 0;
1246}
1247
1248static bool
1249ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1250{
1251        struct ixgb_context_desc *context_desc;
1252        unsigned int i;
1253        u8 css, cso;
1254
1255        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1256                struct ixgb_buffer *buffer_info;
1257                css = skb_checksum_start_offset(skb);
1258                cso = css + skb->csum_offset;
1259
1260                i = adapter->tx_ring.next_to_use;
1261                context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1262                buffer_info = &adapter->tx_ring.buffer_info[i];
1263                WARN_ON(buffer_info->dma != 0);
1264
1265                context_desc->tucss = css;
1266                context_desc->tucso = cso;
1267                context_desc->tucse = 0;
1268                /* zero out any previously existing data in one instruction */
1269                *(u32 *)&(context_desc->ipcss) = 0;
1270                context_desc->status = 0;
1271                context_desc->hdr_len = 0;
1272                context_desc->mss = 0;
1273                context_desc->cmd_type_len =
1274                        cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1275                                    | IXGB_TX_DESC_CMD_IDE);
1276
1277                if (++i == adapter->tx_ring.count) i = 0;
1278                adapter->tx_ring.next_to_use = i;
1279
1280                return true;
1281        }
1282
1283        return false;
1284}
1285
1286#define IXGB_MAX_TXD_PWR        14
1287#define IXGB_MAX_DATA_PER_TXD   (1<<IXGB_MAX_TXD_PWR)
1288
1289static int
1290ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1291            unsigned int first)
1292{
1293        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1294        struct pci_dev *pdev = adapter->pdev;
1295        struct ixgb_buffer *buffer_info;
1296        int len = skb_headlen(skb);
1297        unsigned int offset = 0, size, count = 0, i;
1298        unsigned int mss = skb_shinfo(skb)->gso_size;
1299        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1300        unsigned int f;
1301
1302        i = tx_ring->next_to_use;
1303
1304        while (len) {
1305                buffer_info = &tx_ring->buffer_info[i];
1306                size = min(len, IXGB_MAX_DATA_PER_TXD);
1307                /* Workaround for premature desc write-backs
1308                 * in TSO mode.  Append 4-byte sentinel desc */
1309                if (unlikely(mss && !nr_frags && size == len && size > 8))
1310                        size -= 4;
1311
1312                buffer_info->length = size;
1313                WARN_ON(buffer_info->dma != 0);
1314                buffer_info->time_stamp = jiffies;
1315                buffer_info->mapped_as_page = false;
1316                buffer_info->dma = dma_map_single(&pdev->dev,
1317                                                  skb->data + offset,
1318                                                  size, DMA_TO_DEVICE);
1319                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1320                        goto dma_error;
1321                buffer_info->next_to_watch = 0;
1322
1323                len -= size;
1324                offset += size;
1325                count++;
1326                if (len) {
1327                        i++;
1328                        if (i == tx_ring->count)
1329                                i = 0;
1330                }
1331        }
1332
1333        for (f = 0; f < nr_frags; f++) {
1334                const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1335                len = skb_frag_size(frag);
1336                offset = 0;
1337
1338                while (len) {
1339                        i++;
1340                        if (i == tx_ring->count)
1341                                i = 0;
1342
1343                        buffer_info = &tx_ring->buffer_info[i];
1344                        size = min(len, IXGB_MAX_DATA_PER_TXD);
1345
1346                        /* Workaround for premature desc write-backs
1347                         * in TSO mode.  Append 4-byte sentinel desc */
1348                        if (unlikely(mss && (f == (nr_frags - 1))
1349                                     && size == len && size > 8))
1350                                size -= 4;
1351
1352                        buffer_info->length = size;
1353                        buffer_info->time_stamp = jiffies;
1354                        buffer_info->mapped_as_page = true;
1355                        buffer_info->dma =
1356                                skb_frag_dma_map(&pdev->dev, frag, offset, size,
1357                                                 DMA_TO_DEVICE);
1358                        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1359                                goto dma_error;
1360                        buffer_info->next_to_watch = 0;
1361
1362                        len -= size;
1363                        offset += size;
1364                        count++;
1365                }
1366        }
1367        tx_ring->buffer_info[i].skb = skb;
1368        tx_ring->buffer_info[first].next_to_watch = i;
1369
1370        return count;
1371
1372dma_error:
1373        dev_err(&pdev->dev, "TX DMA map failed\n");
1374        buffer_info->dma = 0;
1375        if (count)
1376                count--;
1377
1378        while (count--) {
1379                if (i==0)
1380                        i += tx_ring->count;
1381                i--;
1382                buffer_info = &tx_ring->buffer_info[i];
1383                ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1384        }
1385
1386        return 0;
1387}
1388
1389static void
1390ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1391{
1392        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1393        struct ixgb_tx_desc *tx_desc = NULL;
1394        struct ixgb_buffer *buffer_info;
1395        u32 cmd_type_len = adapter->tx_cmd_type;
1396        u8 status = 0;
1397        u8 popts = 0;
1398        unsigned int i;
1399
1400        if (tx_flags & IXGB_TX_FLAGS_TSO) {
1401                cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1402                popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1403        }
1404
1405        if (tx_flags & IXGB_TX_FLAGS_CSUM)
1406                popts |= IXGB_TX_DESC_POPTS_TXSM;
1407
1408        if (tx_flags & IXGB_TX_FLAGS_VLAN)
1409                cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1410
1411        i = tx_ring->next_to_use;
1412
1413        while (count--) {
1414                buffer_info = &tx_ring->buffer_info[i];
1415                tx_desc = IXGB_TX_DESC(*tx_ring, i);
1416                tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1417                tx_desc->cmd_type_len =
1418                        cpu_to_le32(cmd_type_len | buffer_info->length);
1419                tx_desc->status = status;
1420                tx_desc->popts = popts;
1421                tx_desc->vlan = cpu_to_le16(vlan_id);
1422
1423                if (++i == tx_ring->count) i = 0;
1424        }
1425
1426        tx_desc->cmd_type_len |=
1427                cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1428
1429        /* Force memory writes to complete before letting h/w
1430         * know there are new descriptors to fetch.  (Only
1431         * applicable for weak-ordered memory model archs,
1432         * such as IA-64). */
1433        wmb();
1434
1435        tx_ring->next_to_use = i;
1436        IXGB_WRITE_REG(&adapter->hw, TDT, i);
1437}
1438
1439static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1440{
1441        struct ixgb_adapter *adapter = netdev_priv(netdev);
1442        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1443
1444        netif_stop_queue(netdev);
1445        /* Herbert's original patch had:
1446         *  smp_mb__after_netif_stop_queue();
1447         * but since that doesn't exist yet, just open code it. */
1448        smp_mb();
1449
1450        /* We need to check again in a case another CPU has just
1451         * made room available. */
1452        if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1453                return -EBUSY;
1454
1455        /* A reprieve! */
1456        netif_start_queue(netdev);
1457        ++adapter->restart_queue;
1458        return 0;
1459}
1460
1461static int ixgb_maybe_stop_tx(struct net_device *netdev,
1462                              struct ixgb_desc_ring *tx_ring, int size)
1463{
1464        if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1465                return 0;
1466        return __ixgb_maybe_stop_tx(netdev, size);
1467}
1468
1469
1470/* Tx Descriptors needed, worst case */
1471#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1472                         (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1473#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1474        MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1475        + 1 /* one more needed for sentinel TSO workaround */
1476
1477static netdev_tx_t
1478ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1479{
1480        struct ixgb_adapter *adapter = netdev_priv(netdev);
1481        unsigned int first;
1482        unsigned int tx_flags = 0;
1483        int vlan_id = 0;
1484        int count = 0;
1485        int tso;
1486
1487        if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1488                dev_kfree_skb_any(skb);
1489                return NETDEV_TX_OK;
1490        }
1491
1492        if (skb->len <= 0) {
1493                dev_kfree_skb_any(skb);
1494                return NETDEV_TX_OK;
1495        }
1496
1497        if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1498                     DESC_NEEDED)))
1499                return NETDEV_TX_BUSY;
1500
1501        if (skb_vlan_tag_present(skb)) {
1502                tx_flags |= IXGB_TX_FLAGS_VLAN;
1503                vlan_id = skb_vlan_tag_get(skb);
1504        }
1505
1506        first = adapter->tx_ring.next_to_use;
1507
1508        tso = ixgb_tso(adapter, skb);
1509        if (tso < 0) {
1510                dev_kfree_skb_any(skb);
1511                return NETDEV_TX_OK;
1512        }
1513
1514        if (likely(tso))
1515                tx_flags |= IXGB_TX_FLAGS_TSO;
1516        else if (ixgb_tx_csum(adapter, skb))
1517                tx_flags |= IXGB_TX_FLAGS_CSUM;
1518
1519        count = ixgb_tx_map(adapter, skb, first);
1520
1521        if (count) {
1522                ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1523                /* Make sure there is space in the ring for the next send. */
1524                ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1525
1526        } else {
1527                dev_kfree_skb_any(skb);
1528                adapter->tx_ring.buffer_info[first].time_stamp = 0;
1529                adapter->tx_ring.next_to_use = first;
1530        }
1531
1532        return NETDEV_TX_OK;
1533}
1534
1535/**
1536 * ixgb_tx_timeout - Respond to a Tx Hang
1537 * @netdev: network interface device structure
1538 **/
1539
1540static void
1541ixgb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1542{
1543        struct ixgb_adapter *adapter = netdev_priv(netdev);
1544
1545        /* Do the reset outside of interrupt context */
1546        schedule_work(&adapter->tx_timeout_task);
1547}
1548
1549static void
1550ixgb_tx_timeout_task(struct work_struct *work)
1551{
1552        struct ixgb_adapter *adapter =
1553                container_of(work, struct ixgb_adapter, tx_timeout_task);
1554
1555        adapter->tx_timeout_count++;
1556        ixgb_down(adapter, true);
1557        ixgb_up(adapter);
1558}
1559
1560/**
1561 * ixgb_change_mtu - Change the Maximum Transfer Unit
1562 * @netdev: network interface device structure
1563 * @new_mtu: new value for maximum frame size
1564 *
1565 * Returns 0 on success, negative on failure
1566 **/
1567
1568static int
1569ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1570{
1571        struct ixgb_adapter *adapter = netdev_priv(netdev);
1572        int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1573
1574        if (netif_running(netdev))
1575                ixgb_down(adapter, true);
1576
1577        adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1578
1579        netdev->mtu = new_mtu;
1580
1581        if (netif_running(netdev))
1582                ixgb_up(adapter);
1583
1584        return 0;
1585}
1586
1587/**
1588 * ixgb_update_stats - Update the board statistics counters.
1589 * @adapter: board private structure
1590 **/
1591
1592void
1593ixgb_update_stats(struct ixgb_adapter *adapter)
1594{
1595        struct net_device *netdev = adapter->netdev;
1596        struct pci_dev *pdev = adapter->pdev;
1597
1598        /* Prevent stats update while adapter is being reset */
1599        if (pci_channel_offline(pdev))
1600                return;
1601
1602        if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1603           (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1604                u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1605                u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1606                u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1607                u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1608
1609                multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1610                /* fix up multicast stats by removing broadcasts */
1611                if (multi >= bcast)
1612                        multi -= bcast;
1613
1614                adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1615                adapter->stats.mprch += (multi >> 32);
1616                adapter->stats.bprcl += bcast_l;
1617                adapter->stats.bprch += bcast_h;
1618        } else {
1619                adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1620                adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1621                adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1622                adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1623        }
1624        adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1625        adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1626        adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1627        adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1628        adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1629        adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1630        adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1631        adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1632        adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1633        adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1634        adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1635        adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1636        adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1637        adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1638        adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1639        adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1640        adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1641        adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1642        adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1643        adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1644        adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1645        adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1646        adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1647        adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1648        adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1649        adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1650        adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1651        adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1652        adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1653        adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1654        adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1655        adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1656        adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1657        adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1658        adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1659        adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1660        adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1661        adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1662        adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1663        adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1664        adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1665        adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1666        adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1667        adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1668        adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1669        adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1670        adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1671        adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1672        adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1673        adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1674        adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1675        adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1676        adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1677        adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1678        adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1679        adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1680
1681        /* Fill out the OS statistics structure */
1682
1683        netdev->stats.rx_packets = adapter->stats.gprcl;
1684        netdev->stats.tx_packets = adapter->stats.gptcl;
1685        netdev->stats.rx_bytes = adapter->stats.gorcl;
1686        netdev->stats.tx_bytes = adapter->stats.gotcl;
1687        netdev->stats.multicast = adapter->stats.mprcl;
1688        netdev->stats.collisions = 0;
1689
1690        /* ignore RLEC as it reports errors for padded (<64bytes) frames
1691         * with a length in the type/len field */
1692        netdev->stats.rx_errors =
1693            /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1694            adapter->stats.ruc +
1695            adapter->stats.roc /*+ adapter->stats.rlec */  +
1696            adapter->stats.icbc +
1697            adapter->stats.ecbc + adapter->stats.mpc;
1698
1699        /* see above
1700         * netdev->stats.rx_length_errors = adapter->stats.rlec;
1701         */
1702
1703        netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1704        netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1705        netdev->stats.rx_missed_errors = adapter->stats.mpc;
1706        netdev->stats.rx_over_errors = adapter->stats.mpc;
1707
1708        netdev->stats.tx_errors = 0;
1709        netdev->stats.rx_frame_errors = 0;
1710        netdev->stats.tx_aborted_errors = 0;
1711        netdev->stats.tx_carrier_errors = 0;
1712        netdev->stats.tx_fifo_errors = 0;
1713        netdev->stats.tx_heartbeat_errors = 0;
1714        netdev->stats.tx_window_errors = 0;
1715}
1716
1717#define IXGB_MAX_INTR 10
1718/**
1719 * ixgb_intr - Interrupt Handler
1720 * @irq: interrupt number
1721 * @data: pointer to a network interface device structure
1722 **/
1723
1724static irqreturn_t
1725ixgb_intr(int irq, void *data)
1726{
1727        struct net_device *netdev = data;
1728        struct ixgb_adapter *adapter = netdev_priv(netdev);
1729        struct ixgb_hw *hw = &adapter->hw;
1730        u32 icr = IXGB_READ_REG(hw, ICR);
1731
1732        if (unlikely(!icr))
1733                return IRQ_NONE;  /* Not our interrupt */
1734
1735        if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1736                if (!test_bit(__IXGB_DOWN, &adapter->flags))
1737                        mod_timer(&adapter->watchdog_timer, jiffies);
1738
1739        if (napi_schedule_prep(&adapter->napi)) {
1740
1741                /* Disable interrupts and register for poll. The flush
1742                  of the posted write is intentionally left out.
1743                */
1744
1745                IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1746                __napi_schedule(&adapter->napi);
1747        }
1748        return IRQ_HANDLED;
1749}
1750
1751/**
1752 * ixgb_clean - NAPI Rx polling callback
1753 * @adapter: board private structure
1754 **/
1755
1756static int
1757ixgb_clean(struct napi_struct *napi, int budget)
1758{
1759        struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1760        int work_done = 0;
1761
1762        ixgb_clean_tx_irq(adapter);
1763        ixgb_clean_rx_irq(adapter, &work_done, budget);
1764
1765        /* If budget not fully consumed, exit the polling mode */
1766        if (work_done < budget) {
1767                napi_complete_done(napi, work_done);
1768                if (!test_bit(__IXGB_DOWN, &adapter->flags))
1769                        ixgb_irq_enable(adapter);
1770        }
1771
1772        return work_done;
1773}
1774
1775/**
1776 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1777 * @adapter: board private structure
1778 **/
1779
1780static bool
1781ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1782{
1783        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1784        struct net_device *netdev = adapter->netdev;
1785        struct ixgb_tx_desc *tx_desc, *eop_desc;
1786        struct ixgb_buffer *buffer_info;
1787        unsigned int i, eop;
1788        bool cleaned = false;
1789
1790        i = tx_ring->next_to_clean;
1791        eop = tx_ring->buffer_info[i].next_to_watch;
1792        eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1793
1794        while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1795
1796                rmb(); /* read buffer_info after eop_desc */
1797                for (cleaned = false; !cleaned; ) {
1798                        tx_desc = IXGB_TX_DESC(*tx_ring, i);
1799                        buffer_info = &tx_ring->buffer_info[i];
1800
1801                        if (tx_desc->popts &
1802                           (IXGB_TX_DESC_POPTS_TXSM |
1803                            IXGB_TX_DESC_POPTS_IXSM))
1804                                adapter->hw_csum_tx_good++;
1805
1806                        ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1807
1808                        *(u32 *)&(tx_desc->status) = 0;
1809
1810                        cleaned = (i == eop);
1811                        if (++i == tx_ring->count) i = 0;
1812                }
1813
1814                eop = tx_ring->buffer_info[i].next_to_watch;
1815                eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1816        }
1817
1818        tx_ring->next_to_clean = i;
1819
1820        if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1821                     IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1822                /* Make sure that anybody stopping the queue after this
1823                 * sees the new next_to_clean. */
1824                smp_mb();
1825
1826                if (netif_queue_stopped(netdev) &&
1827                    !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1828                        netif_wake_queue(netdev);
1829                        ++adapter->restart_queue;
1830                }
1831        }
1832
1833        if (adapter->detect_tx_hung) {
1834                /* detect a transmit hang in hardware, this serializes the
1835                 * check with the clearing of time_stamp and movement of i */
1836                adapter->detect_tx_hung = false;
1837                if (tx_ring->buffer_info[eop].time_stamp &&
1838                   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1839                   && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1840                        IXGB_STATUS_TXOFF)) {
1841                        /* detected Tx unit hang */
1842                        netif_err(adapter, drv, adapter->netdev,
1843                                  "Detected Tx Unit Hang\n"
1844                                  "  TDH                  <%x>\n"
1845                                  "  TDT                  <%x>\n"
1846                                  "  next_to_use          <%x>\n"
1847                                  "  next_to_clean        <%x>\n"
1848                                  "buffer_info[next_to_clean]\n"
1849                                  "  time_stamp           <%lx>\n"
1850                                  "  next_to_watch        <%x>\n"
1851                                  "  jiffies              <%lx>\n"
1852                                  "  next_to_watch.status <%x>\n",
1853                                  IXGB_READ_REG(&adapter->hw, TDH),
1854                                  IXGB_READ_REG(&adapter->hw, TDT),
1855                                  tx_ring->next_to_use,
1856                                  tx_ring->next_to_clean,
1857                                  tx_ring->buffer_info[eop].time_stamp,
1858                                  eop,
1859                                  jiffies,
1860                                  eop_desc->status);
1861                        netif_stop_queue(netdev);
1862                }
1863        }
1864
1865        return cleaned;
1866}
1867
1868/**
1869 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1870 * @adapter: board private structure
1871 * @rx_desc: receive descriptor
1872 * @sk_buff: socket buffer with received data
1873 **/
1874
1875static void
1876ixgb_rx_checksum(struct ixgb_adapter *adapter,
1877                 struct ixgb_rx_desc *rx_desc,
1878                 struct sk_buff *skb)
1879{
1880        /* Ignore Checksum bit is set OR
1881         * TCP Checksum has not been calculated
1882         */
1883        if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1884           (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1885                skb_checksum_none_assert(skb);
1886                return;
1887        }
1888
1889        /* At this point we know the hardware did the TCP checksum */
1890        /* now look at the TCP checksum error bit */
1891        if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1892                /* let the stack verify checksum errors */
1893                skb_checksum_none_assert(skb);
1894                adapter->hw_csum_rx_error++;
1895        } else {
1896                /* TCP checksum is good */
1897                skb->ip_summed = CHECKSUM_UNNECESSARY;
1898                adapter->hw_csum_rx_good++;
1899        }
1900}
1901
1902/*
1903 * this should improve performance for small packets with large amounts
1904 * of reassembly being done in the stack
1905 */
1906static void ixgb_check_copybreak(struct napi_struct *napi,
1907                                 struct ixgb_buffer *buffer_info,
1908                                 u32 length, struct sk_buff **skb)
1909{
1910        struct sk_buff *new_skb;
1911
1912        if (length > copybreak)
1913                return;
1914
1915        new_skb = napi_alloc_skb(napi, length);
1916        if (!new_skb)
1917                return;
1918
1919        skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1920                                       (*skb)->data - NET_IP_ALIGN,
1921                                       length + NET_IP_ALIGN);
1922        /* save the skb in buffer_info as good */
1923        buffer_info->skb = *skb;
1924        *skb = new_skb;
1925}
1926
1927/**
1928 * ixgb_clean_rx_irq - Send received data up the network stack,
1929 * @adapter: board private structure
1930 **/
1931
1932static bool
1933ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1934{
1935        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1936        struct net_device *netdev = adapter->netdev;
1937        struct pci_dev *pdev = adapter->pdev;
1938        struct ixgb_rx_desc *rx_desc, *next_rxd;
1939        struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1940        u32 length;
1941        unsigned int i, j;
1942        int cleaned_count = 0;
1943        bool cleaned = false;
1944
1945        i = rx_ring->next_to_clean;
1946        rx_desc = IXGB_RX_DESC(*rx_ring, i);
1947        buffer_info = &rx_ring->buffer_info[i];
1948
1949        while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1950                struct sk_buff *skb;
1951                u8 status;
1952
1953                if (*work_done >= work_to_do)
1954                        break;
1955
1956                (*work_done)++;
1957                rmb();  /* read descriptor and rx_buffer_info after status DD */
1958                status = rx_desc->status;
1959                skb = buffer_info->skb;
1960                buffer_info->skb = NULL;
1961
1962                prefetch(skb->data - NET_IP_ALIGN);
1963
1964                if (++i == rx_ring->count)
1965                        i = 0;
1966                next_rxd = IXGB_RX_DESC(*rx_ring, i);
1967                prefetch(next_rxd);
1968
1969                j = i + 1;
1970                if (j == rx_ring->count)
1971                        j = 0;
1972                next2_buffer = &rx_ring->buffer_info[j];
1973                prefetch(next2_buffer);
1974
1975                next_buffer = &rx_ring->buffer_info[i];
1976
1977                cleaned = true;
1978                cleaned_count++;
1979
1980                dma_unmap_single(&pdev->dev,
1981                                 buffer_info->dma,
1982                                 buffer_info->length,
1983                                 DMA_FROM_DEVICE);
1984                buffer_info->dma = 0;
1985
1986                length = le16_to_cpu(rx_desc->length);
1987                rx_desc->length = 0;
1988
1989                if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1990
1991                        /* All receives must fit into a single buffer */
1992
1993                        pr_debug("Receive packet consumed multiple buffers length<%x>\n",
1994                                 length);
1995
1996                        dev_kfree_skb_irq(skb);
1997                        goto rxdesc_done;
1998                }
1999
2000                if (unlikely(rx_desc->errors &
2001                    (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2002                     IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2003                        dev_kfree_skb_irq(skb);
2004                        goto rxdesc_done;
2005                }
2006
2007                ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
2008
2009                /* Good Receive */
2010                skb_put(skb, length);
2011
2012                /* Receive Checksum Offload */
2013                ixgb_rx_checksum(adapter, rx_desc, skb);
2014
2015                skb->protocol = eth_type_trans(skb, netdev);
2016                if (status & IXGB_RX_DESC_STATUS_VP)
2017                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2018                                       le16_to_cpu(rx_desc->special));
2019
2020                netif_receive_skb(skb);
2021
2022rxdesc_done:
2023                /* clean up descriptor, might be written over by hw */
2024                rx_desc->status = 0;
2025
2026                /* return some buffers to hardware, one at a time is too slow */
2027                if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2028                        ixgb_alloc_rx_buffers(adapter, cleaned_count);
2029                        cleaned_count = 0;
2030                }
2031
2032                /* use prefetched values */
2033                rx_desc = next_rxd;
2034                buffer_info = next_buffer;
2035        }
2036
2037        rx_ring->next_to_clean = i;
2038
2039        cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2040        if (cleaned_count)
2041                ixgb_alloc_rx_buffers(adapter, cleaned_count);
2042
2043        return cleaned;
2044}
2045
2046/**
2047 * ixgb_alloc_rx_buffers - Replace used receive buffers
2048 * @adapter: address of board private structure
2049 **/
2050
2051static void
2052ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2053{
2054        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2055        struct net_device *netdev = adapter->netdev;
2056        struct pci_dev *pdev = adapter->pdev;
2057        struct ixgb_rx_desc *rx_desc;
2058        struct ixgb_buffer *buffer_info;
2059        struct sk_buff *skb;
2060        unsigned int i;
2061        long cleancount;
2062
2063        i = rx_ring->next_to_use;
2064        buffer_info = &rx_ring->buffer_info[i];
2065        cleancount = IXGB_DESC_UNUSED(rx_ring);
2066
2067
2068        /* leave three descriptors unused */
2069        while (--cleancount > 2 && cleaned_count--) {
2070                /* recycle! its good for you */
2071                skb = buffer_info->skb;
2072                if (skb) {
2073                        skb_trim(skb, 0);
2074                        goto map_skb;
2075                }
2076
2077                skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2078                if (unlikely(!skb)) {
2079                        /* Better luck next round */
2080                        adapter->alloc_rx_buff_failed++;
2081                        break;
2082                }
2083
2084                buffer_info->skb = skb;
2085                buffer_info->length = adapter->rx_buffer_len;
2086map_skb:
2087                buffer_info->dma = dma_map_single(&pdev->dev,
2088                                                  skb->data,
2089                                                  adapter->rx_buffer_len,
2090                                                  DMA_FROM_DEVICE);
2091                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
2092                        adapter->alloc_rx_buff_failed++;
2093                        break;
2094                }
2095
2096                rx_desc = IXGB_RX_DESC(*rx_ring, i);
2097                rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2098                /* guarantee DD bit not set now before h/w gets descriptor
2099                 * this is the rest of the workaround for h/w double
2100                 * writeback. */
2101                rx_desc->status = 0;
2102
2103
2104                if (++i == rx_ring->count)
2105                        i = 0;
2106                buffer_info = &rx_ring->buffer_info[i];
2107        }
2108
2109        if (likely(rx_ring->next_to_use != i)) {
2110                rx_ring->next_to_use = i;
2111                if (unlikely(i-- == 0))
2112                        i = (rx_ring->count - 1);
2113
2114                /* Force memory writes to complete before letting h/w
2115                 * know there are new descriptors to fetch.  (Only
2116                 * applicable for weak-ordered memory model archs, such
2117                 * as IA-64). */
2118                wmb();
2119                IXGB_WRITE_REG(&adapter->hw, RDT, i);
2120        }
2121}
2122
2123static void
2124ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2125{
2126        u32 ctrl;
2127
2128        /* enable VLAN tag insert/strip */
2129        ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2130        ctrl |= IXGB_CTRL0_VME;
2131        IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2132}
2133
2134static void
2135ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2136{
2137        u32 ctrl;
2138
2139        /* disable VLAN tag insert/strip */
2140        ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2141        ctrl &= ~IXGB_CTRL0_VME;
2142        IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2143}
2144
2145static int
2146ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2147{
2148        struct ixgb_adapter *adapter = netdev_priv(netdev);
2149        u32 vfta, index;
2150
2151        /* add VID to filter table */
2152
2153        index = (vid >> 5) & 0x7F;
2154        vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2155        vfta |= (1 << (vid & 0x1F));
2156        ixgb_write_vfta(&adapter->hw, index, vfta);
2157        set_bit(vid, adapter->active_vlans);
2158
2159        return 0;
2160}
2161
2162static int
2163ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2164{
2165        struct ixgb_adapter *adapter = netdev_priv(netdev);
2166        u32 vfta, index;
2167
2168        /* remove VID from filter table */
2169
2170        index = (vid >> 5) & 0x7F;
2171        vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2172        vfta &= ~(1 << (vid & 0x1F));
2173        ixgb_write_vfta(&adapter->hw, index, vfta);
2174        clear_bit(vid, adapter->active_vlans);
2175
2176        return 0;
2177}
2178
2179static void
2180ixgb_restore_vlan(struct ixgb_adapter *adapter)
2181{
2182        u16 vid;
2183
2184        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2185                ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2186}
2187
2188/**
2189 * ixgb_io_error_detected - called when PCI error is detected
2190 * @pdev:    pointer to pci device with error
2191 * @state:   pci channel state after error
2192 *
2193 * This callback is called by the PCI subsystem whenever
2194 * a PCI bus error is detected.
2195 */
2196static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2197                                               enum pci_channel_state state)
2198{
2199        struct net_device *netdev = pci_get_drvdata(pdev);
2200        struct ixgb_adapter *adapter = netdev_priv(netdev);
2201
2202        netif_device_detach(netdev);
2203
2204        if (state == pci_channel_io_perm_failure)
2205                return PCI_ERS_RESULT_DISCONNECT;
2206
2207        if (netif_running(netdev))
2208                ixgb_down(adapter, true);
2209
2210        pci_disable_device(pdev);
2211
2212        /* Request a slot reset. */
2213        return PCI_ERS_RESULT_NEED_RESET;
2214}
2215
2216/**
2217 * ixgb_io_slot_reset - called after the pci bus has been reset.
2218 * @pdev    pointer to pci device with error
2219 *
2220 * This callback is called after the PCI bus has been reset.
2221 * Basically, this tries to restart the card from scratch.
2222 * This is a shortened version of the device probe/discovery code,
2223 * it resembles the first-half of the ixgb_probe() routine.
2224 */
2225static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2226{
2227        struct net_device *netdev = pci_get_drvdata(pdev);
2228        struct ixgb_adapter *adapter = netdev_priv(netdev);
2229
2230        if (pci_enable_device(pdev)) {
2231                netif_err(adapter, probe, adapter->netdev,
2232                          "Cannot re-enable PCI device after reset\n");
2233                return PCI_ERS_RESULT_DISCONNECT;
2234        }
2235
2236        /* Perform card reset only on one instance of the card */
2237        if (0 != PCI_FUNC (pdev->devfn))
2238                return PCI_ERS_RESULT_RECOVERED;
2239
2240        pci_set_master(pdev);
2241
2242        netif_carrier_off(netdev);
2243        netif_stop_queue(netdev);
2244        ixgb_reset(adapter);
2245
2246        /* Make sure the EEPROM is good */
2247        if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2248                netif_err(adapter, probe, adapter->netdev,
2249                          "After reset, the EEPROM checksum is not valid\n");
2250                return PCI_ERS_RESULT_DISCONNECT;
2251        }
2252        ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2253        memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2254
2255        if (!is_valid_ether_addr(netdev->perm_addr)) {
2256                netif_err(adapter, probe, adapter->netdev,
2257                          "After reset, invalid MAC address\n");
2258                return PCI_ERS_RESULT_DISCONNECT;
2259        }
2260
2261        return PCI_ERS_RESULT_RECOVERED;
2262}
2263
2264/**
2265 * ixgb_io_resume - called when its OK to resume normal operations
2266 * @pdev    pointer to pci device with error
2267 *
2268 * The error recovery driver tells us that its OK to resume
2269 * normal operation. Implementation resembles the second-half
2270 * of the ixgb_probe() routine.
2271 */
2272static void ixgb_io_resume(struct pci_dev *pdev)
2273{
2274        struct net_device *netdev = pci_get_drvdata(pdev);
2275        struct ixgb_adapter *adapter = netdev_priv(netdev);
2276
2277        pci_set_master(pdev);
2278
2279        if (netif_running(netdev)) {
2280                if (ixgb_up(adapter)) {
2281                        pr_err("can't bring device back up after reset\n");
2282                        return;
2283                }
2284        }
2285
2286        netif_device_attach(netdev);
2287        mod_timer(&adapter->watchdog_timer, jiffies);
2288}
2289
2290/* ixgb_main.c */
2291