linux/drivers/net/ethernet/intel/ixgb/ixgb_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2008 Intel Corporation. */
   3
   4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   5
   6#include <linux/prefetch.h>
   7#include "ixgb.h"
   8
   9char ixgb_driver_name[] = "ixgb";
  10static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
  11
  12#define DRIVERNAPI "-NAPI"
  13#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
  14const char ixgb_driver_version[] = DRV_VERSION;
  15static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
  16
  17#define IXGB_CB_LENGTH 256
  18static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
  19module_param(copybreak, uint, 0644);
  20MODULE_PARM_DESC(copybreak,
  21        "Maximum size of packet that is copied to a new buffer on receive");
  22
  23/* ixgb_pci_tbl - PCI Device ID Table
  24 *
  25 * Wildcard entries (PCI_ANY_ID) should come last
  26 * Last entry must be all 0s
  27 *
  28 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  29 *   Class, Class Mask, private data (not used) }
  30 */
  31static const struct pci_device_id ixgb_pci_tbl[] = {
  32        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
  33         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  34        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
  35         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  36        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
  37         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  38        {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
  39         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  40
  41        /* required last entry */
  42        {0,}
  43};
  44
  45MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
  46
  47/* Local Function Prototypes */
  48static int ixgb_init_module(void);
  49static void ixgb_exit_module(void);
  50static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  51static void ixgb_remove(struct pci_dev *pdev);
  52static int ixgb_sw_init(struct ixgb_adapter *adapter);
  53static int ixgb_open(struct net_device *netdev);
  54static int ixgb_close(struct net_device *netdev);
  55static void ixgb_configure_tx(struct ixgb_adapter *adapter);
  56static void ixgb_configure_rx(struct ixgb_adapter *adapter);
  57static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
  58static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
  59static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
  60static void ixgb_set_multi(struct net_device *netdev);
  61static void ixgb_watchdog(struct timer_list *t);
  62static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
  63                                   struct net_device *netdev);
  64static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
  65static int ixgb_set_mac(struct net_device *netdev, void *p);
  66static irqreturn_t ixgb_intr(int irq, void *data);
  67static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
  68
  69static int ixgb_clean(struct napi_struct *, int);
  70static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
  71static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
  72
  73static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
  74static void ixgb_tx_timeout_task(struct work_struct *work);
  75
  76static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
  77static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
  78static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
  79                                __be16 proto, u16 vid);
  80static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
  81                                 __be16 proto, u16 vid);
  82static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
  83
  84#ifdef CONFIG_NET_POLL_CONTROLLER
  85/* for netdump / net console */
  86static void ixgb_netpoll(struct net_device *dev);
  87#endif
  88
  89static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
  90                             enum pci_channel_state state);
  91static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
  92static void ixgb_io_resume (struct pci_dev *pdev);
  93
  94static const struct pci_error_handlers ixgb_err_handler = {
  95        .error_detected = ixgb_io_error_detected,
  96        .slot_reset = ixgb_io_slot_reset,
  97        .resume = ixgb_io_resume,
  98};
  99
 100static struct pci_driver ixgb_driver = {
 101        .name     = ixgb_driver_name,
 102        .id_table = ixgb_pci_tbl,
 103        .probe    = ixgb_probe,
 104        .remove   = ixgb_remove,
 105        .err_handler = &ixgb_err_handler
 106};
 107
 108MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 109MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
 110MODULE_LICENSE("GPL");
 111MODULE_VERSION(DRV_VERSION);
 112
 113#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 114static int debug = -1;
 115module_param(debug, int, 0);
 116MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 117
 118/**
 119 * ixgb_init_module - Driver Registration Routine
 120 *
 121 * ixgb_init_module is the first routine called when the driver is
 122 * loaded. All it does is register with the PCI subsystem.
 123 **/
 124
 125static int __init
 126ixgb_init_module(void)
 127{
 128        pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
 129        pr_info("%s\n", ixgb_copyright);
 130
 131        return pci_register_driver(&ixgb_driver);
 132}
 133
 134module_init(ixgb_init_module);
 135
 136/**
 137 * ixgb_exit_module - Driver Exit Cleanup Routine
 138 *
 139 * ixgb_exit_module is called just before the driver is removed
 140 * from memory.
 141 **/
 142
 143static void __exit
 144ixgb_exit_module(void)
 145{
 146        pci_unregister_driver(&ixgb_driver);
 147}
 148
 149module_exit(ixgb_exit_module);
 150
 151/**
 152 * ixgb_irq_disable - Mask off interrupt generation on the NIC
 153 * @adapter: board private structure
 154 **/
 155
 156static void
 157ixgb_irq_disable(struct ixgb_adapter *adapter)
 158{
 159        IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
 160        IXGB_WRITE_FLUSH(&adapter->hw);
 161        synchronize_irq(adapter->pdev->irq);
 162}
 163
 164/**
 165 * ixgb_irq_enable - Enable default interrupt generation settings
 166 * @adapter: board private structure
 167 **/
 168
 169static void
 170ixgb_irq_enable(struct ixgb_adapter *adapter)
 171{
 172        u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
 173                  IXGB_INT_TXDW | IXGB_INT_LSC;
 174        if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
 175                val |= IXGB_INT_GPI0;
 176        IXGB_WRITE_REG(&adapter->hw, IMS, val);
 177        IXGB_WRITE_FLUSH(&adapter->hw);
 178}
 179
 180int
 181ixgb_up(struct ixgb_adapter *adapter)
 182{
 183        struct net_device *netdev = adapter->netdev;
 184        int err, irq_flags = IRQF_SHARED;
 185        int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 186        struct ixgb_hw *hw = &adapter->hw;
 187
 188        /* hardware has been reset, we need to reload some things */
 189
 190        ixgb_rar_set(hw, netdev->dev_addr, 0);
 191        ixgb_set_multi(netdev);
 192
 193        ixgb_restore_vlan(adapter);
 194
 195        ixgb_configure_tx(adapter);
 196        ixgb_setup_rctl(adapter);
 197        ixgb_configure_rx(adapter);
 198        ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
 199
 200        /* disable interrupts and get the hardware into a known state */
 201        IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
 202
 203        /* only enable MSI if bus is in PCI-X mode */
 204        if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
 205                err = pci_enable_msi(adapter->pdev);
 206                if (!err) {
 207                        adapter->have_msi = true;
 208                        irq_flags = 0;
 209                }
 210                /* proceed to try to request regular interrupt */
 211        }
 212
 213        err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
 214                          netdev->name, netdev);
 215        if (err) {
 216                if (adapter->have_msi)
 217                        pci_disable_msi(adapter->pdev);
 218                netif_err(adapter, probe, adapter->netdev,
 219                          "Unable to allocate interrupt Error: %d\n", err);
 220                return err;
 221        }
 222
 223        if ((hw->max_frame_size != max_frame) ||
 224                (hw->max_frame_size !=
 225                (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
 226
 227                hw->max_frame_size = max_frame;
 228
 229                IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
 230
 231                if (hw->max_frame_size >
 232                   IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
 233                        u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
 234
 235                        if (!(ctrl0 & IXGB_CTRL0_JFE)) {
 236                                ctrl0 |= IXGB_CTRL0_JFE;
 237                                IXGB_WRITE_REG(hw, CTRL0, ctrl0);
 238                        }
 239                }
 240        }
 241
 242        clear_bit(__IXGB_DOWN, &adapter->flags);
 243
 244        napi_enable(&adapter->napi);
 245        ixgb_irq_enable(adapter);
 246
 247        netif_wake_queue(netdev);
 248
 249        mod_timer(&adapter->watchdog_timer, jiffies);
 250
 251        return 0;
 252}
 253
 254void
 255ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
 256{
 257        struct net_device *netdev = adapter->netdev;
 258
 259        /* prevent the interrupt handler from restarting watchdog */
 260        set_bit(__IXGB_DOWN, &adapter->flags);
 261
 262        netif_carrier_off(netdev);
 263
 264        napi_disable(&adapter->napi);
 265        /* waiting for NAPI to complete can re-enable interrupts */
 266        ixgb_irq_disable(adapter);
 267        free_irq(adapter->pdev->irq, netdev);
 268
 269        if (adapter->have_msi)
 270                pci_disable_msi(adapter->pdev);
 271
 272        if (kill_watchdog)
 273                del_timer_sync(&adapter->watchdog_timer);
 274
 275        adapter->link_speed = 0;
 276        adapter->link_duplex = 0;
 277        netif_stop_queue(netdev);
 278
 279        ixgb_reset(adapter);
 280        ixgb_clean_tx_ring(adapter);
 281        ixgb_clean_rx_ring(adapter);
 282}
 283
 284void
 285ixgb_reset(struct ixgb_adapter *adapter)
 286{
 287        struct ixgb_hw *hw = &adapter->hw;
 288
 289        ixgb_adapter_stop(hw);
 290        if (!ixgb_init_hw(hw))
 291                netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
 292
 293        /* restore frame size information */
 294        IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
 295        if (hw->max_frame_size >
 296            IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
 297                u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
 298                if (!(ctrl0 & IXGB_CTRL0_JFE)) {
 299                        ctrl0 |= IXGB_CTRL0_JFE;
 300                        IXGB_WRITE_REG(hw, CTRL0, ctrl0);
 301                }
 302        }
 303}
 304
 305static netdev_features_t
 306ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
 307{
 308        /*
 309         * Tx VLAN insertion does not work per HW design when Rx stripping is
 310         * disabled.
 311         */
 312        if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
 313                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 314
 315        return features;
 316}
 317
 318static int
 319ixgb_set_features(struct net_device *netdev, netdev_features_t features)
 320{
 321        struct ixgb_adapter *adapter = netdev_priv(netdev);
 322        netdev_features_t changed = features ^ netdev->features;
 323
 324        if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
 325                return 0;
 326
 327        adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 328
 329        if (netif_running(netdev)) {
 330                ixgb_down(adapter, true);
 331                ixgb_up(adapter);
 332                ixgb_set_speed_duplex(netdev);
 333        } else
 334                ixgb_reset(adapter);
 335
 336        return 0;
 337}
 338
 339
 340static const struct net_device_ops ixgb_netdev_ops = {
 341        .ndo_open               = ixgb_open,
 342        .ndo_stop               = ixgb_close,
 343        .ndo_start_xmit         = ixgb_xmit_frame,
 344        .ndo_set_rx_mode        = ixgb_set_multi,
 345        .ndo_validate_addr      = eth_validate_addr,
 346        .ndo_set_mac_address    = ixgb_set_mac,
 347        .ndo_change_mtu         = ixgb_change_mtu,
 348        .ndo_tx_timeout         = ixgb_tx_timeout,
 349        .ndo_vlan_rx_add_vid    = ixgb_vlan_rx_add_vid,
 350        .ndo_vlan_rx_kill_vid   = ixgb_vlan_rx_kill_vid,
 351#ifdef CONFIG_NET_POLL_CONTROLLER
 352        .ndo_poll_controller    = ixgb_netpoll,
 353#endif
 354        .ndo_fix_features       = ixgb_fix_features,
 355        .ndo_set_features       = ixgb_set_features,
 356};
 357
 358/**
 359 * ixgb_probe - Device Initialization Routine
 360 * @pdev: PCI device information struct
 361 * @ent: entry in ixgb_pci_tbl
 362 *
 363 * Returns 0 on success, negative on failure
 364 *
 365 * ixgb_probe initializes an adapter identified by a pci_dev structure.
 366 * The OS initialization, configuring of the adapter private structure,
 367 * and a hardware reset occur.
 368 **/
 369
 370static int
 371ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 372{
 373        struct net_device *netdev = NULL;
 374        struct ixgb_adapter *adapter;
 375        static int cards_found = 0;
 376        int pci_using_dac;
 377        int i;
 378        int err;
 379
 380        err = pci_enable_device(pdev);
 381        if (err)
 382                return err;
 383
 384        pci_using_dac = 0;
 385        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 386        if (!err) {
 387                pci_using_dac = 1;
 388        } else {
 389                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 390                if (err) {
 391                        pr_err("No usable DMA configuration, aborting\n");
 392                        goto err_dma_mask;
 393                }
 394        }
 395
 396        err = pci_request_regions(pdev, ixgb_driver_name);
 397        if (err)
 398                goto err_request_regions;
 399
 400        pci_set_master(pdev);
 401
 402        netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
 403        if (!netdev) {
 404                err = -ENOMEM;
 405                goto err_alloc_etherdev;
 406        }
 407
 408        SET_NETDEV_DEV(netdev, &pdev->dev);
 409
 410        pci_set_drvdata(pdev, netdev);
 411        adapter = netdev_priv(netdev);
 412        adapter->netdev = netdev;
 413        adapter->pdev = pdev;
 414        adapter->hw.back = adapter;
 415        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 416
 417        adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
 418        if (!adapter->hw.hw_addr) {
 419                err = -EIO;
 420                goto err_ioremap;
 421        }
 422
 423        for (i = BAR_1; i <= BAR_5; i++) {
 424                if (pci_resource_len(pdev, i) == 0)
 425                        continue;
 426                if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 427                        adapter->hw.io_base = pci_resource_start(pdev, i);
 428                        break;
 429                }
 430        }
 431
 432        netdev->netdev_ops = &ixgb_netdev_ops;
 433        ixgb_set_ethtool_ops(netdev);
 434        netdev->watchdog_timeo = 5 * HZ;
 435        netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
 436
 437        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 438
 439        adapter->bd_number = cards_found;
 440        adapter->link_speed = 0;
 441        adapter->link_duplex = 0;
 442
 443        /* setup the private structure */
 444
 445        err = ixgb_sw_init(adapter);
 446        if (err)
 447                goto err_sw_init;
 448
 449        netdev->hw_features = NETIF_F_SG |
 450                           NETIF_F_TSO |
 451                           NETIF_F_HW_CSUM |
 452                           NETIF_F_HW_VLAN_CTAG_TX |
 453                           NETIF_F_HW_VLAN_CTAG_RX;
 454        netdev->features = netdev->hw_features |
 455                           NETIF_F_HW_VLAN_CTAG_FILTER;
 456        netdev->hw_features |= NETIF_F_RXCSUM;
 457
 458        if (pci_using_dac) {
 459                netdev->features |= NETIF_F_HIGHDMA;
 460                netdev->vlan_features |= NETIF_F_HIGHDMA;
 461        }
 462
 463        /* MTU range: 68 - 16114 */
 464        netdev->min_mtu = ETH_MIN_MTU;
 465        netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
 466
 467        /* make sure the EEPROM is good */
 468
 469        if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
 470                netif_err(adapter, probe, adapter->netdev,
 471                          "The EEPROM Checksum Is Not Valid\n");
 472                err = -EIO;
 473                goto err_eeprom;
 474        }
 475
 476        ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
 477
 478        if (!is_valid_ether_addr(netdev->dev_addr)) {
 479                netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
 480                err = -EIO;
 481                goto err_eeprom;
 482        }
 483
 484        adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
 485
 486        timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
 487
 488        INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
 489
 490        strcpy(netdev->name, "eth%d");
 491        err = register_netdev(netdev);
 492        if (err)
 493                goto err_register;
 494
 495        /* carrier off reporting is important to ethtool even BEFORE open */
 496        netif_carrier_off(netdev);
 497
 498        netif_info(adapter, probe, adapter->netdev,
 499                   "Intel(R) PRO/10GbE Network Connection\n");
 500        ixgb_check_options(adapter);
 501        /* reset the hardware with the new settings */
 502
 503        ixgb_reset(adapter);
 504
 505        cards_found++;
 506        return 0;
 507
 508err_register:
 509err_sw_init:
 510err_eeprom:
 511        iounmap(adapter->hw.hw_addr);
 512err_ioremap:
 513        free_netdev(netdev);
 514err_alloc_etherdev:
 515        pci_release_regions(pdev);
 516err_request_regions:
 517err_dma_mask:
 518        pci_disable_device(pdev);
 519        return err;
 520}
 521
 522/**
 523 * ixgb_remove - Device Removal Routine
 524 * @pdev: PCI device information struct
 525 *
 526 * ixgb_remove is called by the PCI subsystem to alert the driver
 527 * that it should release a PCI device.  The could be caused by a
 528 * Hot-Plug event, or because the driver is going to be removed from
 529 * memory.
 530 **/
 531
 532static void
 533ixgb_remove(struct pci_dev *pdev)
 534{
 535        struct net_device *netdev = pci_get_drvdata(pdev);
 536        struct ixgb_adapter *adapter = netdev_priv(netdev);
 537
 538        cancel_work_sync(&adapter->tx_timeout_task);
 539
 540        unregister_netdev(netdev);
 541
 542        iounmap(adapter->hw.hw_addr);
 543        pci_release_regions(pdev);
 544
 545        free_netdev(netdev);
 546        pci_disable_device(pdev);
 547}
 548
 549/**
 550 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
 551 * @adapter: board private structure to initialize
 552 *
 553 * ixgb_sw_init initializes the Adapter private data structure.
 554 * Fields are initialized based on PCI device information and
 555 * OS network device settings (MTU size).
 556 **/
 557
 558static int
 559ixgb_sw_init(struct ixgb_adapter *adapter)
 560{
 561        struct ixgb_hw *hw = &adapter->hw;
 562        struct net_device *netdev = adapter->netdev;
 563        struct pci_dev *pdev = adapter->pdev;
 564
 565        /* PCI config space info */
 566
 567        hw->vendor_id = pdev->vendor;
 568        hw->device_id = pdev->device;
 569        hw->subsystem_vendor_id = pdev->subsystem_vendor;
 570        hw->subsystem_id = pdev->subsystem_device;
 571
 572        hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 573        adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
 574
 575        if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
 576            (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
 577            (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
 578            (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
 579                hw->mac_type = ixgb_82597;
 580        else {
 581                /* should never have loaded on this device */
 582                netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
 583        }
 584
 585        /* enable flow control to be programmed */
 586        hw->fc.send_xon = 1;
 587
 588        set_bit(__IXGB_DOWN, &adapter->flags);
 589        return 0;
 590}
 591
 592/**
 593 * ixgb_open - Called when a network interface is made active
 594 * @netdev: network interface device structure
 595 *
 596 * Returns 0 on success, negative value on failure
 597 *
 598 * The open entry point is called when a network interface is made
 599 * active by the system (IFF_UP).  At this point all resources needed
 600 * for transmit and receive operations are allocated, the interrupt
 601 * handler is registered with the OS, the watchdog timer is started,
 602 * and the stack is notified that the interface is ready.
 603 **/
 604
 605static int
 606ixgb_open(struct net_device *netdev)
 607{
 608        struct ixgb_adapter *adapter = netdev_priv(netdev);
 609        int err;
 610
 611        /* allocate transmit descriptors */
 612        err = ixgb_setup_tx_resources(adapter);
 613        if (err)
 614                goto err_setup_tx;
 615
 616        netif_carrier_off(netdev);
 617
 618        /* allocate receive descriptors */
 619
 620        err = ixgb_setup_rx_resources(adapter);
 621        if (err)
 622                goto err_setup_rx;
 623
 624        err = ixgb_up(adapter);
 625        if (err)
 626                goto err_up;
 627
 628        netif_start_queue(netdev);
 629
 630        return 0;
 631
 632err_up:
 633        ixgb_free_rx_resources(adapter);
 634err_setup_rx:
 635        ixgb_free_tx_resources(adapter);
 636err_setup_tx:
 637        ixgb_reset(adapter);
 638
 639        return err;
 640}
 641
 642/**
 643 * ixgb_close - Disables a network interface
 644 * @netdev: network interface device structure
 645 *
 646 * Returns 0, this is not allowed to fail
 647 *
 648 * The close entry point is called when an interface is de-activated
 649 * by the OS.  The hardware is still under the drivers control, but
 650 * needs to be disabled.  A global MAC reset is issued to stop the
 651 * hardware, and all transmit and receive resources are freed.
 652 **/
 653
 654static int
 655ixgb_close(struct net_device *netdev)
 656{
 657        struct ixgb_adapter *adapter = netdev_priv(netdev);
 658
 659        ixgb_down(adapter, true);
 660
 661        ixgb_free_tx_resources(adapter);
 662        ixgb_free_rx_resources(adapter);
 663
 664        return 0;
 665}
 666
 667/**
 668 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
 669 * @adapter: board private structure
 670 *
 671 * Return 0 on success, negative on failure
 672 **/
 673
 674int
 675ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
 676{
 677        struct ixgb_desc_ring *txdr = &adapter->tx_ring;
 678        struct pci_dev *pdev = adapter->pdev;
 679        int size;
 680
 681        size = sizeof(struct ixgb_buffer) * txdr->count;
 682        txdr->buffer_info = vzalloc(size);
 683        if (!txdr->buffer_info)
 684                return -ENOMEM;
 685
 686        /* round up to nearest 4K */
 687
 688        txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
 689        txdr->size = ALIGN(txdr->size, 4096);
 690
 691        txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
 692                                         GFP_KERNEL);
 693        if (!txdr->desc) {
 694                vfree(txdr->buffer_info);
 695                return -ENOMEM;
 696        }
 697
 698        txdr->next_to_use = 0;
 699        txdr->next_to_clean = 0;
 700
 701        return 0;
 702}
 703
 704/**
 705 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
 706 * @adapter: board private structure
 707 *
 708 * Configure the Tx unit of the MAC after a reset.
 709 **/
 710
 711static void
 712ixgb_configure_tx(struct ixgb_adapter *adapter)
 713{
 714        u64 tdba = adapter->tx_ring.dma;
 715        u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
 716        u32 tctl;
 717        struct ixgb_hw *hw = &adapter->hw;
 718
 719        /* Setup the Base and Length of the Tx Descriptor Ring
 720         * tx_ring.dma can be either a 32 or 64 bit value
 721         */
 722
 723        IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
 724        IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
 725
 726        IXGB_WRITE_REG(hw, TDLEN, tdlen);
 727
 728        /* Setup the HW Tx Head and Tail descriptor pointers */
 729
 730        IXGB_WRITE_REG(hw, TDH, 0);
 731        IXGB_WRITE_REG(hw, TDT, 0);
 732
 733        /* don't set up txdctl, it induces performance problems if configured
 734         * incorrectly */
 735        /* Set the Tx Interrupt Delay register */
 736
 737        IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
 738
 739        /* Program the Transmit Control Register */
 740
 741        tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
 742        IXGB_WRITE_REG(hw, TCTL, tctl);
 743
 744        /* Setup Transmit Descriptor Settings for this adapter */
 745        adapter->tx_cmd_type =
 746                IXGB_TX_DESC_TYPE |
 747                (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
 748}
 749
 750/**
 751 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
 752 * @adapter: board private structure
 753 *
 754 * Returns 0 on success, negative on failure
 755 **/
 756
 757int
 758ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
 759{
 760        struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
 761        struct pci_dev *pdev = adapter->pdev;
 762        int size;
 763
 764        size = sizeof(struct ixgb_buffer) * rxdr->count;
 765        rxdr->buffer_info = vzalloc(size);
 766        if (!rxdr->buffer_info)
 767                return -ENOMEM;
 768
 769        /* Round up to nearest 4K */
 770
 771        rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
 772        rxdr->size = ALIGN(rxdr->size, 4096);
 773
 774        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
 775                                        GFP_KERNEL);
 776
 777        if (!rxdr->desc) {
 778                vfree(rxdr->buffer_info);
 779                return -ENOMEM;
 780        }
 781        memset(rxdr->desc, 0, rxdr->size);
 782
 783        rxdr->next_to_clean = 0;
 784        rxdr->next_to_use = 0;
 785
 786        return 0;
 787}
 788
 789/**
 790 * ixgb_setup_rctl - configure the receive control register
 791 * @adapter: Board private structure
 792 **/
 793
 794static void
 795ixgb_setup_rctl(struct ixgb_adapter *adapter)
 796{
 797        u32 rctl;
 798
 799        rctl = IXGB_READ_REG(&adapter->hw, RCTL);
 800
 801        rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
 802
 803        rctl |=
 804                IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
 805                IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
 806                (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
 807
 808        rctl |= IXGB_RCTL_SECRC;
 809
 810        if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
 811                rctl |= IXGB_RCTL_BSIZE_2048;
 812        else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
 813                rctl |= IXGB_RCTL_BSIZE_4096;
 814        else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
 815                rctl |= IXGB_RCTL_BSIZE_8192;
 816        else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
 817                rctl |= IXGB_RCTL_BSIZE_16384;
 818
 819        IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
 820}
 821
 822/**
 823 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
 824 * @adapter: board private structure
 825 *
 826 * Configure the Rx unit of the MAC after a reset.
 827 **/
 828
 829static void
 830ixgb_configure_rx(struct ixgb_adapter *adapter)
 831{
 832        u64 rdba = adapter->rx_ring.dma;
 833        u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
 834        struct ixgb_hw *hw = &adapter->hw;
 835        u32 rctl;
 836        u32 rxcsum;
 837
 838        /* make sure receives are disabled while setting up the descriptors */
 839
 840        rctl = IXGB_READ_REG(hw, RCTL);
 841        IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
 842
 843        /* set the Receive Delay Timer Register */
 844
 845        IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
 846
 847        /* Setup the Base and Length of the Rx Descriptor Ring */
 848
 849        IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
 850        IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
 851
 852        IXGB_WRITE_REG(hw, RDLEN, rdlen);
 853
 854        /* Setup the HW Rx Head and Tail Descriptor Pointers */
 855        IXGB_WRITE_REG(hw, RDH, 0);
 856        IXGB_WRITE_REG(hw, RDT, 0);
 857
 858        /* due to the hardware errata with RXDCTL, we are unable to use any of
 859         * the performance enhancing features of it without causing other
 860         * subtle bugs, some of the bugs could include receive length
 861         * corruption at high data rates (WTHRESH > 0) and/or receive
 862         * descriptor ring irregularites (particularly in hardware cache) */
 863        IXGB_WRITE_REG(hw, RXDCTL, 0);
 864
 865        /* Enable Receive Checksum Offload for TCP and UDP */
 866        if (adapter->rx_csum) {
 867                rxcsum = IXGB_READ_REG(hw, RXCSUM);
 868                rxcsum |= IXGB_RXCSUM_TUOFL;
 869                IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
 870        }
 871
 872        /* Enable Receives */
 873
 874        IXGB_WRITE_REG(hw, RCTL, rctl);
 875}
 876
 877/**
 878 * ixgb_free_tx_resources - Free Tx Resources
 879 * @adapter: board private structure
 880 *
 881 * Free all transmit software resources
 882 **/
 883
 884void
 885ixgb_free_tx_resources(struct ixgb_adapter *adapter)
 886{
 887        struct pci_dev *pdev = adapter->pdev;
 888
 889        ixgb_clean_tx_ring(adapter);
 890
 891        vfree(adapter->tx_ring.buffer_info);
 892        adapter->tx_ring.buffer_info = NULL;
 893
 894        dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
 895                          adapter->tx_ring.desc, adapter->tx_ring.dma);
 896
 897        adapter->tx_ring.desc = NULL;
 898}
 899
 900static void
 901ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
 902                                struct ixgb_buffer *buffer_info)
 903{
 904        if (buffer_info->dma) {
 905                if (buffer_info->mapped_as_page)
 906                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
 907                                       buffer_info->length, DMA_TO_DEVICE);
 908                else
 909                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 910                                         buffer_info->length, DMA_TO_DEVICE);
 911                buffer_info->dma = 0;
 912        }
 913
 914        if (buffer_info->skb) {
 915                dev_kfree_skb_any(buffer_info->skb);
 916                buffer_info->skb = NULL;
 917        }
 918        buffer_info->time_stamp = 0;
 919        /* these fields must always be initialized in tx
 920         * buffer_info->length = 0;
 921         * buffer_info->next_to_watch = 0; */
 922}
 923
 924/**
 925 * ixgb_clean_tx_ring - Free Tx Buffers
 926 * @adapter: board private structure
 927 **/
 928
 929static void
 930ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
 931{
 932        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
 933        struct ixgb_buffer *buffer_info;
 934        unsigned long size;
 935        unsigned int i;
 936
 937        /* Free all the Tx ring sk_buffs */
 938
 939        for (i = 0; i < tx_ring->count; i++) {
 940                buffer_info = &tx_ring->buffer_info[i];
 941                ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
 942        }
 943
 944        size = sizeof(struct ixgb_buffer) * tx_ring->count;
 945        memset(tx_ring->buffer_info, 0, size);
 946
 947        /* Zero out the descriptor ring */
 948
 949        memset(tx_ring->desc, 0, tx_ring->size);
 950
 951        tx_ring->next_to_use = 0;
 952        tx_ring->next_to_clean = 0;
 953
 954        IXGB_WRITE_REG(&adapter->hw, TDH, 0);
 955        IXGB_WRITE_REG(&adapter->hw, TDT, 0);
 956}
 957
 958/**
 959 * ixgb_free_rx_resources - Free Rx Resources
 960 * @adapter: board private structure
 961 *
 962 * Free all receive software resources
 963 **/
 964
 965void
 966ixgb_free_rx_resources(struct ixgb_adapter *adapter)
 967{
 968        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
 969        struct pci_dev *pdev = adapter->pdev;
 970
 971        ixgb_clean_rx_ring(adapter);
 972
 973        vfree(rx_ring->buffer_info);
 974        rx_ring->buffer_info = NULL;
 975
 976        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
 977                          rx_ring->dma);
 978
 979        rx_ring->desc = NULL;
 980}
 981
 982/**
 983 * ixgb_clean_rx_ring - Free Rx Buffers
 984 * @adapter: board private structure
 985 **/
 986
 987static void
 988ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
 989{
 990        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
 991        struct ixgb_buffer *buffer_info;
 992        struct pci_dev *pdev = adapter->pdev;
 993        unsigned long size;
 994        unsigned int i;
 995
 996        /* Free all the Rx ring sk_buffs */
 997
 998        for (i = 0; i < rx_ring->count; i++) {
 999                buffer_info = &rx_ring->buffer_info[i];
1000                if (buffer_info->dma) {
1001                        dma_unmap_single(&pdev->dev,
1002                                         buffer_info->dma,
1003                                         buffer_info->length,
1004                                         DMA_FROM_DEVICE);
1005                        buffer_info->dma = 0;
1006                        buffer_info->length = 0;
1007                }
1008
1009                if (buffer_info->skb) {
1010                        dev_kfree_skb(buffer_info->skb);
1011                        buffer_info->skb = NULL;
1012                }
1013        }
1014
1015        size = sizeof(struct ixgb_buffer) * rx_ring->count;
1016        memset(rx_ring->buffer_info, 0, size);
1017
1018        /* Zero out the descriptor ring */
1019
1020        memset(rx_ring->desc, 0, rx_ring->size);
1021
1022        rx_ring->next_to_clean = 0;
1023        rx_ring->next_to_use = 0;
1024
1025        IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1026        IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1027}
1028
1029/**
1030 * ixgb_set_mac - Change the Ethernet Address of the NIC
1031 * @netdev: network interface device structure
1032 * @p: pointer to an address structure
1033 *
1034 * Returns 0 on success, negative on failure
1035 **/
1036
1037static int
1038ixgb_set_mac(struct net_device *netdev, void *p)
1039{
1040        struct ixgb_adapter *adapter = netdev_priv(netdev);
1041        struct sockaddr *addr = p;
1042
1043        if (!is_valid_ether_addr(addr->sa_data))
1044                return -EADDRNOTAVAIL;
1045
1046        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1047
1048        ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1049
1050        return 0;
1051}
1052
1053/**
1054 * ixgb_set_multi - Multicast and Promiscuous mode set
1055 * @netdev: network interface device structure
1056 *
1057 * The set_multi entry point is called whenever the multicast address
1058 * list or the network interface flags are updated.  This routine is
1059 * responsible for configuring the hardware for proper multicast,
1060 * promiscuous mode, and all-multi behavior.
1061 **/
1062
1063static void
1064ixgb_set_multi(struct net_device *netdev)
1065{
1066        struct ixgb_adapter *adapter = netdev_priv(netdev);
1067        struct ixgb_hw *hw = &adapter->hw;
1068        struct netdev_hw_addr *ha;
1069        u32 rctl;
1070
1071        /* Check for Promiscuous and All Multicast modes */
1072
1073        rctl = IXGB_READ_REG(hw, RCTL);
1074
1075        if (netdev->flags & IFF_PROMISC) {
1076                rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1077                /* disable VLAN filtering */
1078                rctl &= ~IXGB_RCTL_CFIEN;
1079                rctl &= ~IXGB_RCTL_VFE;
1080        } else {
1081                if (netdev->flags & IFF_ALLMULTI) {
1082                        rctl |= IXGB_RCTL_MPE;
1083                        rctl &= ~IXGB_RCTL_UPE;
1084                } else {
1085                        rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1086                }
1087                /* enable VLAN filtering */
1088                rctl |= IXGB_RCTL_VFE;
1089                rctl &= ~IXGB_RCTL_CFIEN;
1090        }
1091
1092        if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1093                rctl |= IXGB_RCTL_MPE;
1094                IXGB_WRITE_REG(hw, RCTL, rctl);
1095        } else {
1096                u8 *mta = kmalloc_array(ETH_ALEN,
1097                                        IXGB_MAX_NUM_MULTICAST_ADDRESSES,
1098                                        GFP_ATOMIC);
1099                u8 *addr;
1100                if (!mta)
1101                        goto alloc_failed;
1102
1103                IXGB_WRITE_REG(hw, RCTL, rctl);
1104
1105                addr = mta;
1106                netdev_for_each_mc_addr(ha, netdev) {
1107                        memcpy(addr, ha->addr, ETH_ALEN);
1108                        addr += ETH_ALEN;
1109                }
1110
1111                ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1112                kfree(mta);
1113        }
1114
1115alloc_failed:
1116        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1117                ixgb_vlan_strip_enable(adapter);
1118        else
1119                ixgb_vlan_strip_disable(adapter);
1120
1121}
1122
1123/**
1124 * ixgb_watchdog - Timer Call-back
1125 * @data: pointer to netdev cast into an unsigned long
1126 **/
1127
1128static void
1129ixgb_watchdog(struct timer_list *t)
1130{
1131        struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1132        struct net_device *netdev = adapter->netdev;
1133        struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1134
1135        ixgb_check_for_link(&adapter->hw);
1136
1137        if (ixgb_check_for_bad_link(&adapter->hw)) {
1138                /* force the reset path */
1139                netif_stop_queue(netdev);
1140        }
1141
1142        if (adapter->hw.link_up) {
1143                if (!netif_carrier_ok(netdev)) {
1144                        netdev_info(netdev,
1145                                    "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1146                                    (adapter->hw.fc.type == ixgb_fc_full) ?
1147                                    "RX/TX" :
1148                                    (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1149                                     "RX" :
1150                                    (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1151                                    "TX" : "None");
1152                        adapter->link_speed = 10000;
1153                        adapter->link_duplex = FULL_DUPLEX;
1154                        netif_carrier_on(netdev);
1155                }
1156        } else {
1157                if (netif_carrier_ok(netdev)) {
1158                        adapter->link_speed = 0;
1159                        adapter->link_duplex = 0;
1160                        netdev_info(netdev, "NIC Link is Down\n");
1161                        netif_carrier_off(netdev);
1162                }
1163        }
1164
1165        ixgb_update_stats(adapter);
1166
1167        if (!netif_carrier_ok(netdev)) {
1168                if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1169                        /* We've lost link, so the controller stops DMA,
1170                         * but we've got queued Tx work that's never going
1171                         * to get done, so reset controller to flush Tx.
1172                         * (Do the reset outside of interrupt context). */
1173                        schedule_work(&adapter->tx_timeout_task);
1174                        /* return immediately since reset is imminent */
1175                        return;
1176                }
1177        }
1178
1179        /* Force detection of hung controller every watchdog period */
1180        adapter->detect_tx_hung = true;
1181
1182        /* generate an interrupt to force clean up of any stragglers */
1183        IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1184
1185        /* Reset the timer */
1186        mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1187}
1188
1189#define IXGB_TX_FLAGS_CSUM              0x00000001
1190#define IXGB_TX_FLAGS_VLAN              0x00000002
1191#define IXGB_TX_FLAGS_TSO               0x00000004
1192
1193static int
1194ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1195{
1196        struct ixgb_context_desc *context_desc;
1197        unsigned int i;
1198        u8 ipcss, ipcso, tucss, tucso, hdr_len;
1199        u16 ipcse, tucse, mss;
1200
1201        if (likely(skb_is_gso(skb))) {
1202                struct ixgb_buffer *buffer_info;
1203                struct iphdr *iph;
1204                int err;
1205
1206                err = skb_cow_head(skb, 0);
1207                if (err < 0)
1208                        return err;
1209
1210                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1211                mss = skb_shinfo(skb)->gso_size;
1212                iph = ip_hdr(skb);
1213                iph->tot_len = 0;
1214                iph->check = 0;
1215                tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1216                                                         iph->daddr, 0,
1217                                                         IPPROTO_TCP, 0);
1218                ipcss = skb_network_offset(skb);
1219                ipcso = (void *)&(iph->check) - (void *)skb->data;
1220                ipcse = skb_transport_offset(skb) - 1;
1221                tucss = skb_transport_offset(skb);
1222                tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1223                tucse = 0;
1224
1225                i = adapter->tx_ring.next_to_use;
1226                context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1227                buffer_info = &adapter->tx_ring.buffer_info[i];
1228                WARN_ON(buffer_info->dma != 0);
1229
1230                context_desc->ipcss = ipcss;
1231                context_desc->ipcso = ipcso;
1232                context_desc->ipcse = cpu_to_le16(ipcse);
1233                context_desc->tucss = tucss;
1234                context_desc->tucso = tucso;
1235                context_desc->tucse = cpu_to_le16(tucse);
1236                context_desc->mss = cpu_to_le16(mss);
1237                context_desc->hdr_len = hdr_len;
1238                context_desc->status = 0;
1239                context_desc->cmd_type_len = cpu_to_le32(
1240                                                  IXGB_CONTEXT_DESC_TYPE
1241                                                | IXGB_CONTEXT_DESC_CMD_TSE
1242                                                | IXGB_CONTEXT_DESC_CMD_IP
1243                                                | IXGB_CONTEXT_DESC_CMD_TCP
1244                                                | IXGB_CONTEXT_DESC_CMD_IDE
1245                                                | (skb->len - (hdr_len)));
1246
1247
1248                if (++i == adapter->tx_ring.count) i = 0;
1249                adapter->tx_ring.next_to_use = i;
1250
1251                return 1;
1252        }
1253
1254        return 0;
1255}
1256
1257static bool
1258ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1259{
1260        struct ixgb_context_desc *context_desc;
1261        unsigned int i;
1262        u8 css, cso;
1263
1264        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1265                struct ixgb_buffer *buffer_info;
1266                css = skb_checksum_start_offset(skb);
1267                cso = css + skb->csum_offset;
1268
1269                i = adapter->tx_ring.next_to_use;
1270                context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1271                buffer_info = &adapter->tx_ring.buffer_info[i];
1272                WARN_ON(buffer_info->dma != 0);
1273
1274                context_desc->tucss = css;
1275                context_desc->tucso = cso;
1276                context_desc->tucse = 0;
1277                /* zero out any previously existing data in one instruction */
1278                *(u32 *)&(context_desc->ipcss) = 0;
1279                context_desc->status = 0;
1280                context_desc->hdr_len = 0;
1281                context_desc->mss = 0;
1282                context_desc->cmd_type_len =
1283                        cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1284                                    | IXGB_TX_DESC_CMD_IDE);
1285
1286                if (++i == adapter->tx_ring.count) i = 0;
1287                adapter->tx_ring.next_to_use = i;
1288
1289                return true;
1290        }
1291
1292        return false;
1293}
1294
1295#define IXGB_MAX_TXD_PWR        14
1296#define IXGB_MAX_DATA_PER_TXD   (1<<IXGB_MAX_TXD_PWR)
1297
1298static int
1299ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1300            unsigned int first)
1301{
1302        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1303        struct pci_dev *pdev = adapter->pdev;
1304        struct ixgb_buffer *buffer_info;
1305        int len = skb_headlen(skb);
1306        unsigned int offset = 0, size, count = 0, i;
1307        unsigned int mss = skb_shinfo(skb)->gso_size;
1308        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1309        unsigned int f;
1310
1311        i = tx_ring->next_to_use;
1312
1313        while (len) {
1314                buffer_info = &tx_ring->buffer_info[i];
1315                size = min(len, IXGB_MAX_DATA_PER_TXD);
1316                /* Workaround for premature desc write-backs
1317                 * in TSO mode.  Append 4-byte sentinel desc */
1318                if (unlikely(mss && !nr_frags && size == len && size > 8))
1319                        size -= 4;
1320
1321                buffer_info->length = size;
1322                WARN_ON(buffer_info->dma != 0);
1323                buffer_info->time_stamp = jiffies;
1324                buffer_info->mapped_as_page = false;
1325                buffer_info->dma = dma_map_single(&pdev->dev,
1326                                                  skb->data + offset,
1327                                                  size, DMA_TO_DEVICE);
1328                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1329                        goto dma_error;
1330                buffer_info->next_to_watch = 0;
1331
1332                len -= size;
1333                offset += size;
1334                count++;
1335                if (len) {
1336                        i++;
1337                        if (i == tx_ring->count)
1338                                i = 0;
1339                }
1340        }
1341
1342        for (f = 0; f < nr_frags; f++) {
1343                const struct skb_frag_struct *frag;
1344
1345                frag = &skb_shinfo(skb)->frags[f];
1346                len = skb_frag_size(frag);
1347                offset = 0;
1348
1349                while (len) {
1350                        i++;
1351                        if (i == tx_ring->count)
1352                                i = 0;
1353
1354                        buffer_info = &tx_ring->buffer_info[i];
1355                        size = min(len, IXGB_MAX_DATA_PER_TXD);
1356
1357                        /* Workaround for premature desc write-backs
1358                         * in TSO mode.  Append 4-byte sentinel desc */
1359                        if (unlikely(mss && (f == (nr_frags - 1))
1360                                     && size == len && size > 8))
1361                                size -= 4;
1362
1363                        buffer_info->length = size;
1364                        buffer_info->time_stamp = jiffies;
1365                        buffer_info->mapped_as_page = true;
1366                        buffer_info->dma =
1367                                skb_frag_dma_map(&pdev->dev, frag, offset, size,
1368                                                 DMA_TO_DEVICE);
1369                        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1370                                goto dma_error;
1371                        buffer_info->next_to_watch = 0;
1372
1373                        len -= size;
1374                        offset += size;
1375                        count++;
1376                }
1377        }
1378        tx_ring->buffer_info[i].skb = skb;
1379        tx_ring->buffer_info[first].next_to_watch = i;
1380
1381        return count;
1382
1383dma_error:
1384        dev_err(&pdev->dev, "TX DMA map failed\n");
1385        buffer_info->dma = 0;
1386        if (count)
1387                count--;
1388
1389        while (count--) {
1390                if (i==0)
1391                        i += tx_ring->count;
1392                i--;
1393                buffer_info = &tx_ring->buffer_info[i];
1394                ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1395        }
1396
1397        return 0;
1398}
1399
1400static void
1401ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1402{
1403        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1404        struct ixgb_tx_desc *tx_desc = NULL;
1405        struct ixgb_buffer *buffer_info;
1406        u32 cmd_type_len = adapter->tx_cmd_type;
1407        u8 status = 0;
1408        u8 popts = 0;
1409        unsigned int i;
1410
1411        if (tx_flags & IXGB_TX_FLAGS_TSO) {
1412                cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1413                popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1414        }
1415
1416        if (tx_flags & IXGB_TX_FLAGS_CSUM)
1417                popts |= IXGB_TX_DESC_POPTS_TXSM;
1418
1419        if (tx_flags & IXGB_TX_FLAGS_VLAN)
1420                cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1421
1422        i = tx_ring->next_to_use;
1423
1424        while (count--) {
1425                buffer_info = &tx_ring->buffer_info[i];
1426                tx_desc = IXGB_TX_DESC(*tx_ring, i);
1427                tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1428                tx_desc->cmd_type_len =
1429                        cpu_to_le32(cmd_type_len | buffer_info->length);
1430                tx_desc->status = status;
1431                tx_desc->popts = popts;
1432                tx_desc->vlan = cpu_to_le16(vlan_id);
1433
1434                if (++i == tx_ring->count) i = 0;
1435        }
1436
1437        tx_desc->cmd_type_len |=
1438                cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1439
1440        /* Force memory writes to complete before letting h/w
1441         * know there are new descriptors to fetch.  (Only
1442         * applicable for weak-ordered memory model archs,
1443         * such as IA-64). */
1444        wmb();
1445
1446        tx_ring->next_to_use = i;
1447        IXGB_WRITE_REG(&adapter->hw, TDT, i);
1448}
1449
1450static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1451{
1452        struct ixgb_adapter *adapter = netdev_priv(netdev);
1453        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1454
1455        netif_stop_queue(netdev);
1456        /* Herbert's original patch had:
1457         *  smp_mb__after_netif_stop_queue();
1458         * but since that doesn't exist yet, just open code it. */
1459        smp_mb();
1460
1461        /* We need to check again in a case another CPU has just
1462         * made room available. */
1463        if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1464                return -EBUSY;
1465
1466        /* A reprieve! */
1467        netif_start_queue(netdev);
1468        ++adapter->restart_queue;
1469        return 0;
1470}
1471
1472static int ixgb_maybe_stop_tx(struct net_device *netdev,
1473                              struct ixgb_desc_ring *tx_ring, int size)
1474{
1475        if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1476                return 0;
1477        return __ixgb_maybe_stop_tx(netdev, size);
1478}
1479
1480
1481/* Tx Descriptors needed, worst case */
1482#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1483                         (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1484#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1485        MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1486        + 1 /* one more needed for sentinel TSO workaround */
1487
1488static netdev_tx_t
1489ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1490{
1491        struct ixgb_adapter *adapter = netdev_priv(netdev);
1492        unsigned int first;
1493        unsigned int tx_flags = 0;
1494        int vlan_id = 0;
1495        int count = 0;
1496        int tso;
1497
1498        if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1499                dev_kfree_skb_any(skb);
1500                return NETDEV_TX_OK;
1501        }
1502
1503        if (skb->len <= 0) {
1504                dev_kfree_skb_any(skb);
1505                return NETDEV_TX_OK;
1506        }
1507
1508        if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1509                     DESC_NEEDED)))
1510                return NETDEV_TX_BUSY;
1511
1512        if (skb_vlan_tag_present(skb)) {
1513                tx_flags |= IXGB_TX_FLAGS_VLAN;
1514                vlan_id = skb_vlan_tag_get(skb);
1515        }
1516
1517        first = adapter->tx_ring.next_to_use;
1518
1519        tso = ixgb_tso(adapter, skb);
1520        if (tso < 0) {
1521                dev_kfree_skb_any(skb);
1522                return NETDEV_TX_OK;
1523        }
1524
1525        if (likely(tso))
1526                tx_flags |= IXGB_TX_FLAGS_TSO;
1527        else if (ixgb_tx_csum(adapter, skb))
1528                tx_flags |= IXGB_TX_FLAGS_CSUM;
1529
1530        count = ixgb_tx_map(adapter, skb, first);
1531
1532        if (count) {
1533                ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1534                /* Make sure there is space in the ring for the next send. */
1535                ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1536
1537        } else {
1538                dev_kfree_skb_any(skb);
1539                adapter->tx_ring.buffer_info[first].time_stamp = 0;
1540                adapter->tx_ring.next_to_use = first;
1541        }
1542
1543        return NETDEV_TX_OK;
1544}
1545
1546/**
1547 * ixgb_tx_timeout - Respond to a Tx Hang
1548 * @netdev: network interface device structure
1549 **/
1550
1551static void
1552ixgb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1553{
1554        struct ixgb_adapter *adapter = netdev_priv(netdev);
1555
1556        /* Do the reset outside of interrupt context */
1557        schedule_work(&adapter->tx_timeout_task);
1558}
1559
1560static void
1561ixgb_tx_timeout_task(struct work_struct *work)
1562{
1563        struct ixgb_adapter *adapter =
1564                container_of(work, struct ixgb_adapter, tx_timeout_task);
1565
1566        adapter->tx_timeout_count++;
1567        ixgb_down(adapter, true);
1568        ixgb_up(adapter);
1569}
1570
1571/**
1572 * ixgb_change_mtu - Change the Maximum Transfer Unit
1573 * @netdev: network interface device structure
1574 * @new_mtu: new value for maximum frame size
1575 *
1576 * Returns 0 on success, negative on failure
1577 **/
1578
1579static int
1580ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1581{
1582        struct ixgb_adapter *adapter = netdev_priv(netdev);
1583        int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1584
1585        if (netif_running(netdev))
1586                ixgb_down(adapter, true);
1587
1588        adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1589
1590        netdev->mtu = new_mtu;
1591
1592        if (netif_running(netdev))
1593                ixgb_up(adapter);
1594
1595        return 0;
1596}
1597
1598/**
1599 * ixgb_update_stats - Update the board statistics counters.
1600 * @adapter: board private structure
1601 **/
1602
1603void
1604ixgb_update_stats(struct ixgb_adapter *adapter)
1605{
1606        struct net_device *netdev = adapter->netdev;
1607        struct pci_dev *pdev = adapter->pdev;
1608
1609        /* Prevent stats update while adapter is being reset */
1610        if (pci_channel_offline(pdev))
1611                return;
1612
1613        if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1614           (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1615                u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1616                u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1617                u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1618                u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1619
1620                multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1621                /* fix up multicast stats by removing broadcasts */
1622                if (multi >= bcast)
1623                        multi -= bcast;
1624
1625                adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1626                adapter->stats.mprch += (multi >> 32);
1627                adapter->stats.bprcl += bcast_l;
1628                adapter->stats.bprch += bcast_h;
1629        } else {
1630                adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1631                adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1632                adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1633                adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1634        }
1635        adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1636        adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1637        adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1638        adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1639        adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1640        adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1641        adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1642        adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1643        adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1644        adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1645        adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1646        adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1647        adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1648        adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1649        adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1650        adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1651        adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1652        adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1653        adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1654        adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1655        adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1656        adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1657        adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1658        adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1659        adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1660        adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1661        adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1662        adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1663        adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1664        adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1665        adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1666        adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1667        adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1668        adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1669        adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1670        adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1671        adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1672        adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1673        adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1674        adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1675        adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1676        adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1677        adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1678        adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1679        adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1680        adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1681        adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1682        adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1683        adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1684        adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1685        adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1686        adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1687        adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1688        adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1689        adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1690        adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1691
1692        /* Fill out the OS statistics structure */
1693
1694        netdev->stats.rx_packets = adapter->stats.gprcl;
1695        netdev->stats.tx_packets = adapter->stats.gptcl;
1696        netdev->stats.rx_bytes = adapter->stats.gorcl;
1697        netdev->stats.tx_bytes = adapter->stats.gotcl;
1698        netdev->stats.multicast = adapter->stats.mprcl;
1699        netdev->stats.collisions = 0;
1700
1701        /* ignore RLEC as it reports errors for padded (<64bytes) frames
1702         * with a length in the type/len field */
1703        netdev->stats.rx_errors =
1704            /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1705            adapter->stats.ruc +
1706            adapter->stats.roc /*+ adapter->stats.rlec */  +
1707            adapter->stats.icbc +
1708            adapter->stats.ecbc + adapter->stats.mpc;
1709
1710        /* see above
1711         * netdev->stats.rx_length_errors = adapter->stats.rlec;
1712         */
1713
1714        netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1715        netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1716        netdev->stats.rx_missed_errors = adapter->stats.mpc;
1717        netdev->stats.rx_over_errors = adapter->stats.mpc;
1718
1719        netdev->stats.tx_errors = 0;
1720        netdev->stats.rx_frame_errors = 0;
1721        netdev->stats.tx_aborted_errors = 0;
1722        netdev->stats.tx_carrier_errors = 0;
1723        netdev->stats.tx_fifo_errors = 0;
1724        netdev->stats.tx_heartbeat_errors = 0;
1725        netdev->stats.tx_window_errors = 0;
1726}
1727
1728#define IXGB_MAX_INTR 10
1729/**
1730 * ixgb_intr - Interrupt Handler
1731 * @irq: interrupt number
1732 * @data: pointer to a network interface device structure
1733 **/
1734
1735static irqreturn_t
1736ixgb_intr(int irq, void *data)
1737{
1738        struct net_device *netdev = data;
1739        struct ixgb_adapter *adapter = netdev_priv(netdev);
1740        struct ixgb_hw *hw = &adapter->hw;
1741        u32 icr = IXGB_READ_REG(hw, ICR);
1742
1743        if (unlikely(!icr))
1744                return IRQ_NONE;  /* Not our interrupt */
1745
1746        if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1747                if (!test_bit(__IXGB_DOWN, &adapter->flags))
1748                        mod_timer(&adapter->watchdog_timer, jiffies);
1749
1750        if (napi_schedule_prep(&adapter->napi)) {
1751
1752                /* Disable interrupts and register for poll. The flush
1753                  of the posted write is intentionally left out.
1754                */
1755
1756                IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1757                __napi_schedule(&adapter->napi);
1758        }
1759        return IRQ_HANDLED;
1760}
1761
1762/**
1763 * ixgb_clean - NAPI Rx polling callback
1764 * @adapter: board private structure
1765 **/
1766
1767static int
1768ixgb_clean(struct napi_struct *napi, int budget)
1769{
1770        struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1771        int work_done = 0;
1772
1773        ixgb_clean_tx_irq(adapter);
1774        ixgb_clean_rx_irq(adapter, &work_done, budget);
1775
1776        /* If budget not fully consumed, exit the polling mode */
1777        if (work_done < budget) {
1778                napi_complete_done(napi, work_done);
1779                if (!test_bit(__IXGB_DOWN, &adapter->flags))
1780                        ixgb_irq_enable(adapter);
1781        }
1782
1783        return work_done;
1784}
1785
1786/**
1787 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1788 * @adapter: board private structure
1789 **/
1790
1791static bool
1792ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1793{
1794        struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1795        struct net_device *netdev = adapter->netdev;
1796        struct ixgb_tx_desc *tx_desc, *eop_desc;
1797        struct ixgb_buffer *buffer_info;
1798        unsigned int i, eop;
1799        bool cleaned = false;
1800
1801        i = tx_ring->next_to_clean;
1802        eop = tx_ring->buffer_info[i].next_to_watch;
1803        eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1804
1805        while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1806
1807                rmb(); /* read buffer_info after eop_desc */
1808                for (cleaned = false; !cleaned; ) {
1809                        tx_desc = IXGB_TX_DESC(*tx_ring, i);
1810                        buffer_info = &tx_ring->buffer_info[i];
1811
1812                        if (tx_desc->popts &
1813                           (IXGB_TX_DESC_POPTS_TXSM |
1814                            IXGB_TX_DESC_POPTS_IXSM))
1815                                adapter->hw_csum_tx_good++;
1816
1817                        ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1818
1819                        *(u32 *)&(tx_desc->status) = 0;
1820
1821                        cleaned = (i == eop);
1822                        if (++i == tx_ring->count) i = 0;
1823                }
1824
1825                eop = tx_ring->buffer_info[i].next_to_watch;
1826                eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1827        }
1828
1829        tx_ring->next_to_clean = i;
1830
1831        if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1832                     IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1833                /* Make sure that anybody stopping the queue after this
1834                 * sees the new next_to_clean. */
1835                smp_mb();
1836
1837                if (netif_queue_stopped(netdev) &&
1838                    !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1839                        netif_wake_queue(netdev);
1840                        ++adapter->restart_queue;
1841                }
1842        }
1843
1844        if (adapter->detect_tx_hung) {
1845                /* detect a transmit hang in hardware, this serializes the
1846                 * check with the clearing of time_stamp and movement of i */
1847                adapter->detect_tx_hung = false;
1848                if (tx_ring->buffer_info[eop].time_stamp &&
1849                   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1850                   && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1851                        IXGB_STATUS_TXOFF)) {
1852                        /* detected Tx unit hang */
1853                        netif_err(adapter, drv, adapter->netdev,
1854                                  "Detected Tx Unit Hang\n"
1855                                  "  TDH                  <%x>\n"
1856                                  "  TDT                  <%x>\n"
1857                                  "  next_to_use          <%x>\n"
1858                                  "  next_to_clean        <%x>\n"
1859                                  "buffer_info[next_to_clean]\n"
1860                                  "  time_stamp           <%lx>\n"
1861                                  "  next_to_watch        <%x>\n"
1862                                  "  jiffies              <%lx>\n"
1863                                  "  next_to_watch.status <%x>\n",
1864                                  IXGB_READ_REG(&adapter->hw, TDH),
1865                                  IXGB_READ_REG(&adapter->hw, TDT),
1866                                  tx_ring->next_to_use,
1867                                  tx_ring->next_to_clean,
1868                                  tx_ring->buffer_info[eop].time_stamp,
1869                                  eop,
1870                                  jiffies,
1871                                  eop_desc->status);
1872                        netif_stop_queue(netdev);
1873                }
1874        }
1875
1876        return cleaned;
1877}
1878
1879/**
1880 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1881 * @adapter: board private structure
1882 * @rx_desc: receive descriptor
1883 * @sk_buff: socket buffer with received data
1884 **/
1885
1886static void
1887ixgb_rx_checksum(struct ixgb_adapter *adapter,
1888                 struct ixgb_rx_desc *rx_desc,
1889                 struct sk_buff *skb)
1890{
1891        /* Ignore Checksum bit is set OR
1892         * TCP Checksum has not been calculated
1893         */
1894        if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1895           (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1896                skb_checksum_none_assert(skb);
1897                return;
1898        }
1899
1900        /* At this point we know the hardware did the TCP checksum */
1901        /* now look at the TCP checksum error bit */
1902        if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1903                /* let the stack verify checksum errors */
1904                skb_checksum_none_assert(skb);
1905                adapter->hw_csum_rx_error++;
1906        } else {
1907                /* TCP checksum is good */
1908                skb->ip_summed = CHECKSUM_UNNECESSARY;
1909                adapter->hw_csum_rx_good++;
1910        }
1911}
1912
1913/*
1914 * this should improve performance for small packets with large amounts
1915 * of reassembly being done in the stack
1916 */
1917static void ixgb_check_copybreak(struct napi_struct *napi,
1918                                 struct ixgb_buffer *buffer_info,
1919                                 u32 length, struct sk_buff **skb)
1920{
1921        struct sk_buff *new_skb;
1922
1923        if (length > copybreak)
1924                return;
1925
1926        new_skb = napi_alloc_skb(napi, length);
1927        if (!new_skb)
1928                return;
1929
1930        skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1931                                       (*skb)->data - NET_IP_ALIGN,
1932                                       length + NET_IP_ALIGN);
1933        /* save the skb in buffer_info as good */
1934        buffer_info->skb = *skb;
1935        *skb = new_skb;
1936}
1937
1938/**
1939 * ixgb_clean_rx_irq - Send received data up the network stack,
1940 * @adapter: board private structure
1941 **/
1942
1943static bool
1944ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1945{
1946        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1947        struct net_device *netdev = adapter->netdev;
1948        struct pci_dev *pdev = adapter->pdev;
1949        struct ixgb_rx_desc *rx_desc, *next_rxd;
1950        struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1951        u32 length;
1952        unsigned int i, j;
1953        int cleaned_count = 0;
1954        bool cleaned = false;
1955
1956        i = rx_ring->next_to_clean;
1957        rx_desc = IXGB_RX_DESC(*rx_ring, i);
1958        buffer_info = &rx_ring->buffer_info[i];
1959
1960        while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1961                struct sk_buff *skb;
1962                u8 status;
1963
1964                if (*work_done >= work_to_do)
1965                        break;
1966
1967                (*work_done)++;
1968                rmb();  /* read descriptor and rx_buffer_info after status DD */
1969                status = rx_desc->status;
1970                skb = buffer_info->skb;
1971                buffer_info->skb = NULL;
1972
1973                prefetch(skb->data - NET_IP_ALIGN);
1974
1975                if (++i == rx_ring->count)
1976                        i = 0;
1977                next_rxd = IXGB_RX_DESC(*rx_ring, i);
1978                prefetch(next_rxd);
1979
1980                j = i + 1;
1981                if (j == rx_ring->count)
1982                        j = 0;
1983                next2_buffer = &rx_ring->buffer_info[j];
1984                prefetch(next2_buffer);
1985
1986                next_buffer = &rx_ring->buffer_info[i];
1987
1988                cleaned = true;
1989                cleaned_count++;
1990
1991                dma_unmap_single(&pdev->dev,
1992                                 buffer_info->dma,
1993                                 buffer_info->length,
1994                                 DMA_FROM_DEVICE);
1995                buffer_info->dma = 0;
1996
1997                length = le16_to_cpu(rx_desc->length);
1998                rx_desc->length = 0;
1999
2000                if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
2001
2002                        /* All receives must fit into a single buffer */
2003
2004                        pr_debug("Receive packet consumed multiple buffers length<%x>\n",
2005                                 length);
2006
2007                        dev_kfree_skb_irq(skb);
2008                        goto rxdesc_done;
2009                }
2010
2011                if (unlikely(rx_desc->errors &
2012                    (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2013                     IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2014                        dev_kfree_skb_irq(skb);
2015                        goto rxdesc_done;
2016                }
2017
2018                ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
2019
2020                /* Good Receive */
2021                skb_put(skb, length);
2022
2023                /* Receive Checksum Offload */
2024                ixgb_rx_checksum(adapter, rx_desc, skb);
2025
2026                skb->protocol = eth_type_trans(skb, netdev);
2027                if (status & IXGB_RX_DESC_STATUS_VP)
2028                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2029                                       le16_to_cpu(rx_desc->special));
2030
2031                netif_receive_skb(skb);
2032
2033rxdesc_done:
2034                /* clean up descriptor, might be written over by hw */
2035                rx_desc->status = 0;
2036
2037                /* return some buffers to hardware, one at a time is too slow */
2038                if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2039                        ixgb_alloc_rx_buffers(adapter, cleaned_count);
2040                        cleaned_count = 0;
2041                }
2042
2043                /* use prefetched values */
2044                rx_desc = next_rxd;
2045                buffer_info = next_buffer;
2046        }
2047
2048        rx_ring->next_to_clean = i;
2049
2050        cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2051        if (cleaned_count)
2052                ixgb_alloc_rx_buffers(adapter, cleaned_count);
2053
2054        return cleaned;
2055}
2056
2057/**
2058 * ixgb_alloc_rx_buffers - Replace used receive buffers
2059 * @adapter: address of board private structure
2060 **/
2061
2062static void
2063ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2064{
2065        struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2066        struct net_device *netdev = adapter->netdev;
2067        struct pci_dev *pdev = adapter->pdev;
2068        struct ixgb_rx_desc *rx_desc;
2069        struct ixgb_buffer *buffer_info;
2070        struct sk_buff *skb;
2071        unsigned int i;
2072        long cleancount;
2073
2074        i = rx_ring->next_to_use;
2075        buffer_info = &rx_ring->buffer_info[i];
2076        cleancount = IXGB_DESC_UNUSED(rx_ring);
2077
2078
2079        /* leave three descriptors unused */
2080        while (--cleancount > 2 && cleaned_count--) {
2081                /* recycle! its good for you */
2082                skb = buffer_info->skb;
2083                if (skb) {
2084                        skb_trim(skb, 0);
2085                        goto map_skb;
2086                }
2087
2088                skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2089                if (unlikely(!skb)) {
2090                        /* Better luck next round */
2091                        adapter->alloc_rx_buff_failed++;
2092                        break;
2093                }
2094
2095                buffer_info->skb = skb;
2096                buffer_info->length = adapter->rx_buffer_len;
2097map_skb:
2098                buffer_info->dma = dma_map_single(&pdev->dev,
2099                                                  skb->data,
2100                                                  adapter->rx_buffer_len,
2101                                                  DMA_FROM_DEVICE);
2102                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
2103                        adapter->alloc_rx_buff_failed++;
2104                        break;
2105                }
2106
2107                rx_desc = IXGB_RX_DESC(*rx_ring, i);
2108                rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2109                /* guarantee DD bit not set now before h/w gets descriptor
2110                 * this is the rest of the workaround for h/w double
2111                 * writeback. */
2112                rx_desc->status = 0;
2113
2114
2115                if (++i == rx_ring->count)
2116                        i = 0;
2117                buffer_info = &rx_ring->buffer_info[i];
2118        }
2119
2120        if (likely(rx_ring->next_to_use != i)) {
2121                rx_ring->next_to_use = i;
2122                if (unlikely(i-- == 0))
2123                        i = (rx_ring->count - 1);
2124
2125                /* Force memory writes to complete before letting h/w
2126                 * know there are new descriptors to fetch.  (Only
2127                 * applicable for weak-ordered memory model archs, such
2128                 * as IA-64). */
2129                wmb();
2130                IXGB_WRITE_REG(&adapter->hw, RDT, i);
2131        }
2132}
2133
2134static void
2135ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2136{
2137        u32 ctrl;
2138
2139        /* enable VLAN tag insert/strip */
2140        ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2141        ctrl |= IXGB_CTRL0_VME;
2142        IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2143}
2144
2145static void
2146ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2147{
2148        u32 ctrl;
2149
2150        /* disable VLAN tag insert/strip */
2151        ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2152        ctrl &= ~IXGB_CTRL0_VME;
2153        IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2154}
2155
2156static int
2157ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2158{
2159        struct ixgb_adapter *adapter = netdev_priv(netdev);
2160        u32 vfta, index;
2161
2162        /* add VID to filter table */
2163
2164        index = (vid >> 5) & 0x7F;
2165        vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2166        vfta |= (1 << (vid & 0x1F));
2167        ixgb_write_vfta(&adapter->hw, index, vfta);
2168        set_bit(vid, adapter->active_vlans);
2169
2170        return 0;
2171}
2172
2173static int
2174ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2175{
2176        struct ixgb_adapter *adapter = netdev_priv(netdev);
2177        u32 vfta, index;
2178
2179        /* remove VID from filter table */
2180
2181        index = (vid >> 5) & 0x7F;
2182        vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2183        vfta &= ~(1 << (vid & 0x1F));
2184        ixgb_write_vfta(&adapter->hw, index, vfta);
2185        clear_bit(vid, adapter->active_vlans);
2186
2187        return 0;
2188}
2189
2190static void
2191ixgb_restore_vlan(struct ixgb_adapter *adapter)
2192{
2193        u16 vid;
2194
2195        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2196                ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2197}
2198
2199#ifdef CONFIG_NET_POLL_CONTROLLER
2200/*
2201 * Polling 'interrupt' - used by things like netconsole to send skbs
2202 * without having to re-enable interrupts. It's not called while
2203 * the interrupt routine is executing.
2204 */
2205
2206static void ixgb_netpoll(struct net_device *dev)
2207{
2208        struct ixgb_adapter *adapter = netdev_priv(dev);
2209
2210        disable_irq(adapter->pdev->irq);
2211        ixgb_intr(adapter->pdev->irq, dev);
2212        enable_irq(adapter->pdev->irq);
2213}
2214#endif
2215
2216/**
2217 * ixgb_io_error_detected - called when PCI error is detected
2218 * @pdev:    pointer to pci device with error
2219 * @state:   pci channel state after error
2220 *
2221 * This callback is called by the PCI subsystem whenever
2222 * a PCI bus error is detected.
2223 */
2224static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2225                                               enum pci_channel_state state)
2226{
2227        struct net_device *netdev = pci_get_drvdata(pdev);
2228        struct ixgb_adapter *adapter = netdev_priv(netdev);
2229
2230        netif_device_detach(netdev);
2231
2232        if (state == pci_channel_io_perm_failure)
2233                return PCI_ERS_RESULT_DISCONNECT;
2234
2235        if (netif_running(netdev))
2236                ixgb_down(adapter, true);
2237
2238        pci_disable_device(pdev);
2239
2240        /* Request a slot reset. */
2241        return PCI_ERS_RESULT_NEED_RESET;
2242}
2243
2244/**
2245 * ixgb_io_slot_reset - called after the pci bus has been reset.
2246 * @pdev    pointer to pci device with error
2247 *
2248 * This callback is called after the PCI bus has been reset.
2249 * Basically, this tries to restart the card from scratch.
2250 * This is a shortened version of the device probe/discovery code,
2251 * it resembles the first-half of the ixgb_probe() routine.
2252 */
2253static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2254{
2255        struct net_device *netdev = pci_get_drvdata(pdev);
2256        struct ixgb_adapter *adapter = netdev_priv(netdev);
2257
2258        if (pci_enable_device(pdev)) {
2259                netif_err(adapter, probe, adapter->netdev,
2260                          "Cannot re-enable PCI device after reset\n");
2261                return PCI_ERS_RESULT_DISCONNECT;
2262        }
2263
2264        /* Perform card reset only on one instance of the card */
2265        if (0 != PCI_FUNC (pdev->devfn))
2266                return PCI_ERS_RESULT_RECOVERED;
2267
2268        pci_set_master(pdev);
2269
2270        netif_carrier_off(netdev);
2271        netif_stop_queue(netdev);
2272        ixgb_reset(adapter);
2273
2274        /* Make sure the EEPROM is good */
2275        if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2276                netif_err(adapter, probe, adapter->netdev,
2277                          "After reset, the EEPROM checksum is not valid\n");
2278                return PCI_ERS_RESULT_DISCONNECT;
2279        }
2280        ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2281        memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2282
2283        if (!is_valid_ether_addr(netdev->perm_addr)) {
2284                netif_err(adapter, probe, adapter->netdev,
2285                          "After reset, invalid MAC address\n");
2286                return PCI_ERS_RESULT_DISCONNECT;
2287        }
2288
2289        return PCI_ERS_RESULT_RECOVERED;
2290}
2291
2292/**
2293 * ixgb_io_resume - called when its OK to resume normal operations
2294 * @pdev    pointer to pci device with error
2295 *
2296 * The error recovery driver tells us that its OK to resume
2297 * normal operation. Implementation resembles the second-half
2298 * of the ixgb_probe() routine.
2299 */
2300static void ixgb_io_resume(struct pci_dev *pdev)
2301{
2302        struct net_device *netdev = pci_get_drvdata(pdev);
2303        struct ixgb_adapter *adapter = netdev_priv(netdev);
2304
2305        pci_set_master(pdev);
2306
2307        if (netif_running(netdev)) {
2308                if (ixgb_up(adapter)) {
2309                        pr_err("can't bring device back up after reset\n");
2310                        return;
2311                }
2312        }
2313
2314        netif_device_attach(netdev);
2315        mod_timer(&adapter->watchdog_timer, jiffies);
2316}
2317
2318/* ixgb_main.c */
2319