linux/drivers/net/ixgbe/ixgbe_ethtool.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel 10 Gigabit PCI Express Linux driver
   4  Copyright(c) 1999 - 2009 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25
  26*******************************************************************************/
  27
  28/* ethtool support for ixgbe */
  29
  30#include <linux/types.h>
  31#include <linux/module.h>
  32#include <linux/pci.h>
  33#include <linux/netdevice.h>
  34#include <linux/ethtool.h>
  35#include <linux/vmalloc.h>
  36#include <linux/uaccess.h>
  37
  38#include "ixgbe.h"
  39
  40
  41#define IXGBE_ALL_RAR_ENTRIES 16
  42
  43struct ixgbe_stats {
  44        char stat_string[ETH_GSTRING_LEN];
  45        int sizeof_stat;
  46        int stat_offset;
  47};
  48
  49#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
  50                             offsetof(struct ixgbe_adapter, m)
  51static struct ixgbe_stats ixgbe_gstrings_stats[] = {
  52        {"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
  53        {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
  54        {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
  55        {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
  56        {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
  57        {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
  58        {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
  59        {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
  60        {"lsc_int", IXGBE_STAT(lsc_int)},
  61        {"tx_busy", IXGBE_STAT(tx_busy)},
  62        {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
  63        {"rx_errors", IXGBE_STAT(net_stats.rx_errors)},
  64        {"tx_errors", IXGBE_STAT(net_stats.tx_errors)},
  65        {"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)},
  66        {"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)},
  67        {"multicast", IXGBE_STAT(net_stats.multicast)},
  68        {"broadcast", IXGBE_STAT(stats.bprc)},
  69        {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
  70        {"collisions", IXGBE_STAT(net_stats.collisions)},
  71        {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
  72        {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
  73        {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
  74        {"hw_rsc_count", IXGBE_STAT(rsc_count)},
  75        {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
  76        {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
  77        {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
  78        {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
  79        {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
  80        {"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)},
  81        {"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)},
  82        {"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)},
  83        {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
  84        {"tx_restart_queue", IXGBE_STAT(restart_queue)},
  85        {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
  86        {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
  87        {"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)},
  88        {"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)},
  89        {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
  90        {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
  91        {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
  92        {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
  93        {"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)},
  94        {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
  95        {"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
  96        {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
  97        {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
  98        {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
  99        {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
 100#ifdef IXGBE_FCOE
 101        {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
 102        {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
 103        {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
 104        {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
 105        {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
 106        {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
 107#endif /* IXGBE_FCOE */
 108};
 109
 110#define IXGBE_QUEUE_STATS_LEN \
 111        ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
 112        ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
 113        (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
 114#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
 115#define IXGBE_PB_STATS_LEN ( \
 116                 (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
 117                 IXGBE_FLAG_DCB_ENABLED) ? \
 118                 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
 119                  sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
 120                  sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
 121                  sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
 122                  / sizeof(u64) : 0)
 123#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
 124                         IXGBE_PB_STATS_LEN + \
 125                         IXGBE_QUEUE_STATS_LEN)
 126
 127static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
 128        "Register test  (offline)", "Eeprom test    (offline)",
 129        "Interrupt test (offline)", "Loopback test  (offline)",
 130        "Link test   (on/offline)"
 131};
 132#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
 133
 134static int ixgbe_get_settings(struct net_device *netdev,
 135                              struct ethtool_cmd *ecmd)
 136{
 137        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 138        struct ixgbe_hw *hw = &adapter->hw;
 139        u32 link_speed = 0;
 140        bool link_up;
 141
 142        ecmd->supported = SUPPORTED_10000baseT_Full;
 143        ecmd->autoneg = AUTONEG_ENABLE;
 144        ecmd->transceiver = XCVR_EXTERNAL;
 145        if ((hw->phy.media_type == ixgbe_media_type_copper) ||
 146            (hw->phy.multispeed_fiber)) {
 147                ecmd->supported |= (SUPPORTED_1000baseT_Full |
 148                                    SUPPORTED_Autoneg);
 149
 150                ecmd->advertising = ADVERTISED_Autoneg;
 151                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
 152                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
 153                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
 154                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
 155                /*
 156                 * It's possible that phy.autoneg_advertised may not be
 157                 * set yet.  If so display what the default would be -
 158                 * both 1G and 10G supported.
 159                 */
 160                if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
 161                                           ADVERTISED_10000baseT_Full)))
 162                        ecmd->advertising |= (ADVERTISED_10000baseT_Full |
 163                                              ADVERTISED_1000baseT_Full);
 164
 165                if (hw->phy.media_type == ixgbe_media_type_copper) {
 166                        ecmd->supported |= SUPPORTED_TP;
 167                        ecmd->advertising |= ADVERTISED_TP;
 168                        ecmd->port = PORT_TP;
 169                } else {
 170                        ecmd->supported |= SUPPORTED_FIBRE;
 171                        ecmd->advertising |= ADVERTISED_FIBRE;
 172                        ecmd->port = PORT_FIBRE;
 173                }
 174        } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
 175                /* Set as FIBRE until SERDES defined in kernel */
 176                if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
 177                        ecmd->supported = (SUPPORTED_1000baseT_Full |
 178                                           SUPPORTED_FIBRE);
 179                        ecmd->advertising = (ADVERTISED_1000baseT_Full |
 180                                             ADVERTISED_FIBRE);
 181                        ecmd->port = PORT_FIBRE;
 182                        ecmd->autoneg = AUTONEG_DISABLE;
 183                } else {
 184                        ecmd->supported |= (SUPPORTED_1000baseT_Full |
 185                                            SUPPORTED_FIBRE);
 186                        ecmd->advertising = (ADVERTISED_10000baseT_Full |
 187                                             ADVERTISED_1000baseT_Full |
 188                                             ADVERTISED_FIBRE);
 189                        ecmd->port = PORT_FIBRE;
 190                }
 191        } else {
 192                ecmd->supported |= SUPPORTED_FIBRE;
 193                ecmd->advertising = (ADVERTISED_10000baseT_Full |
 194                                     ADVERTISED_FIBRE);
 195                ecmd->port = PORT_FIBRE;
 196                ecmd->autoneg = AUTONEG_DISABLE;
 197        }
 198
 199        hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
 200        if (link_up) {
 201                ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
 202                               SPEED_10000 : SPEED_1000;
 203                ecmd->duplex = DUPLEX_FULL;
 204        } else {
 205                ecmd->speed = -1;
 206                ecmd->duplex = -1;
 207        }
 208
 209        return 0;
 210}
 211
 212static int ixgbe_set_settings(struct net_device *netdev,
 213                              struct ethtool_cmd *ecmd)
 214{
 215        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 216        struct ixgbe_hw *hw = &adapter->hw;
 217        u32 advertised, old;
 218        s32 err = 0;
 219
 220        if ((hw->phy.media_type == ixgbe_media_type_copper) ||
 221            (hw->phy.multispeed_fiber)) {
 222                /* 10000/copper and 1000/copper must autoneg
 223                 * this function does not support any duplex forcing, but can
 224                 * limit the advertising of the adapter to only 10000 or 1000 */
 225                if (ecmd->autoneg == AUTONEG_DISABLE)
 226                        return -EINVAL;
 227
 228                old = hw->phy.autoneg_advertised;
 229                advertised = 0;
 230                if (ecmd->advertising & ADVERTISED_10000baseT_Full)
 231                        advertised |= IXGBE_LINK_SPEED_10GB_FULL;
 232
 233                if (ecmd->advertising & ADVERTISED_1000baseT_Full)
 234                        advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 235
 236                if (old == advertised)
 237                        return err;
 238                /* this sets the link speed and restarts auto-neg */
 239                hw->mac.autotry_restart = true;
 240                err = hw->mac.ops.setup_link(hw, advertised, true, true);
 241                if (err) {
 242                        DPRINTK(PROBE, INFO,
 243                                "setup link failed with code %d\n", err);
 244                        hw->mac.ops.setup_link(hw, old, true, true);
 245                }
 246        } else {
 247                /* in this case we currently only support 10Gb/FULL */
 248                if ((ecmd->autoneg == AUTONEG_ENABLE) ||
 249                    (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
 250                    (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
 251                        return -EINVAL;
 252        }
 253
 254        return err;
 255}
 256
 257static void ixgbe_get_pauseparam(struct net_device *netdev,
 258                                 struct ethtool_pauseparam *pause)
 259{
 260        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 261        struct ixgbe_hw *hw = &adapter->hw;
 262
 263        /*
 264         * Flow Control Autoneg isn't on if
 265         *  - we didn't ask for it OR
 266         *  - it failed, we know this by tx & rx being off
 267         */
 268        if (hw->fc.disable_fc_autoneg ||
 269            (hw->fc.current_mode == ixgbe_fc_none))
 270                pause->autoneg = 0;
 271        else
 272                pause->autoneg = 1;
 273
 274#ifdef CONFIG_DCB
 275        if (hw->fc.current_mode == ixgbe_fc_pfc) {
 276                pause->rx_pause = 0;
 277                pause->tx_pause = 0;
 278        }
 279
 280#endif
 281        if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
 282                pause->rx_pause = 1;
 283        } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
 284                pause->tx_pause = 1;
 285        } else if (hw->fc.current_mode == ixgbe_fc_full) {
 286                pause->rx_pause = 1;
 287                pause->tx_pause = 1;
 288        }
 289}
 290
 291static int ixgbe_set_pauseparam(struct net_device *netdev,
 292                                struct ethtool_pauseparam *pause)
 293{
 294        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 295        struct ixgbe_hw *hw = &adapter->hw;
 296        struct ixgbe_fc_info fc;
 297
 298#ifdef CONFIG_DCB
 299        if (adapter->dcb_cfg.pfc_mode_enable ||
 300                ((hw->mac.type == ixgbe_mac_82598EB) &&
 301                (adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
 302                return -EINVAL;
 303
 304#endif
 305
 306        fc = hw->fc;
 307
 308        if (pause->autoneg != AUTONEG_ENABLE)
 309                fc.disable_fc_autoneg = true;
 310        else
 311                fc.disable_fc_autoneg = false;
 312
 313        if (pause->rx_pause && pause->tx_pause)
 314                fc.requested_mode = ixgbe_fc_full;
 315        else if (pause->rx_pause && !pause->tx_pause)
 316                fc.requested_mode = ixgbe_fc_rx_pause;
 317        else if (!pause->rx_pause && pause->tx_pause)
 318                fc.requested_mode = ixgbe_fc_tx_pause;
 319        else if (!pause->rx_pause && !pause->tx_pause)
 320                fc.requested_mode = ixgbe_fc_none;
 321        else
 322                return -EINVAL;
 323
 324#ifdef CONFIG_DCB
 325        adapter->last_lfc_mode = fc.requested_mode;
 326#endif
 327
 328        /* if the thing changed then we'll update and use new autoneg */
 329        if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
 330                hw->fc = fc;
 331                if (netif_running(netdev))
 332                        ixgbe_reinit_locked(adapter);
 333                else
 334                        ixgbe_reset(adapter);
 335        }
 336
 337        return 0;
 338}
 339
 340static u32 ixgbe_get_rx_csum(struct net_device *netdev)
 341{
 342        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 343        return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED);
 344}
 345
 346static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
 347{
 348        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 349        if (data)
 350                adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
 351        else
 352                adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
 353
 354        if (netif_running(netdev))
 355                ixgbe_reinit_locked(adapter);
 356        else
 357                ixgbe_reset(adapter);
 358
 359        return 0;
 360}
 361
 362static u32 ixgbe_get_tx_csum(struct net_device *netdev)
 363{
 364        return (netdev->features & NETIF_F_IP_CSUM) != 0;
 365}
 366
 367static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
 368{
 369        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 370
 371        if (data) {
 372                netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
 373                if (adapter->hw.mac.type == ixgbe_mac_82599EB)
 374                        netdev->features |= NETIF_F_SCTP_CSUM;
 375        } else {
 376                netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
 377                if (adapter->hw.mac.type == ixgbe_mac_82599EB)
 378                        netdev->features &= ~NETIF_F_SCTP_CSUM;
 379        }
 380
 381        return 0;
 382}
 383
 384static int ixgbe_set_tso(struct net_device *netdev, u32 data)
 385{
 386        if (data) {
 387                netdev->features |= NETIF_F_TSO;
 388                netdev->features |= NETIF_F_TSO6;
 389        } else {
 390                netif_tx_stop_all_queues(netdev);
 391                netdev->features &= ~NETIF_F_TSO;
 392                netdev->features &= ~NETIF_F_TSO6;
 393                netif_tx_start_all_queues(netdev);
 394        }
 395        return 0;
 396}
 397
 398static u32 ixgbe_get_msglevel(struct net_device *netdev)
 399{
 400        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 401        return adapter->msg_enable;
 402}
 403
 404static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
 405{
 406        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 407        adapter->msg_enable = data;
 408}
 409
 410static int ixgbe_get_regs_len(struct net_device *netdev)
 411{
 412#define IXGBE_REGS_LEN  1128
 413        return IXGBE_REGS_LEN * sizeof(u32);
 414}
 415
 416#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
 417
 418static void ixgbe_get_regs(struct net_device *netdev,
 419                           struct ethtool_regs *regs, void *p)
 420{
 421        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 422        struct ixgbe_hw *hw = &adapter->hw;
 423        u32 *regs_buff = p;
 424        u8 i;
 425
 426        memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
 427
 428        regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
 429
 430        /* General Registers */
 431        regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
 432        regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
 433        regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
 434        regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
 435        regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
 436        regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 437        regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
 438        regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
 439
 440        /* NVM Register */
 441        regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
 442        regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
 443        regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
 444        regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
 445        regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
 446        regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
 447        regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
 448        regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
 449        regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
 450        regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
 451
 452        /* Interrupt */
 453        /* don't read EICR because it can clear interrupt causes, instead
 454         * read EICS which is a shadow but doesn't clear EICR */
 455        regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
 456        regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
 457        regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
 458        regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
 459        regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
 460        regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
 461        regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
 462        regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
 463        regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
 464        regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
 465        regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
 466        regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
 467
 468        /* Flow Control */
 469        regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
 470        regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
 471        regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
 472        regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
 473        regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
 474        for (i = 0; i < 8; i++)
 475                regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
 476        for (i = 0; i < 8; i++)
 477                regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
 478        regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
 479        regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
 480
 481        /* Receive DMA */
 482        for (i = 0; i < 64; i++)
 483                regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
 484        for (i = 0; i < 64; i++)
 485                regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
 486        for (i = 0; i < 64; i++)
 487                regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
 488        for (i = 0; i < 64; i++)
 489                regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
 490        for (i = 0; i < 64; i++)
 491                regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
 492        for (i = 0; i < 64; i++)
 493                regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
 494        for (i = 0; i < 16; i++)
 495                regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
 496        for (i = 0; i < 16; i++)
 497                regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
 498        regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
 499        for (i = 0; i < 8; i++)
 500                regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 501        regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 502        regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
 503
 504        /* Receive */
 505        regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 506        regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
 507        for (i = 0; i < 16; i++)
 508                regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
 509        for (i = 0; i < 16; i++)
 510                regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
 511        regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
 512        regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 513        regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 514        regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
 515        regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
 516        regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
 517        for (i = 0; i < 8; i++)
 518                regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
 519        for (i = 0; i < 8; i++)
 520                regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
 521        regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
 522
 523        /* Transmit */
 524        for (i = 0; i < 32; i++)
 525                regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
 526        for (i = 0; i < 32; i++)
 527                regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
 528        for (i = 0; i < 32; i++)
 529                regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
 530        for (i = 0; i < 32; i++)
 531                regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
 532        for (i = 0; i < 32; i++)
 533                regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
 534        for (i = 0; i < 32; i++)
 535                regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
 536        for (i = 0; i < 32; i++)
 537                regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
 538        for (i = 0; i < 32; i++)
 539                regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
 540        regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
 541        for (i = 0; i < 16; i++)
 542                regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
 543        regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
 544        for (i = 0; i < 8; i++)
 545                regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
 546        regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
 547
 548        /* Wake Up */
 549        regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
 550        regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
 551        regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
 552        regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
 553        regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
 554        regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
 555        regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
 556        regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
 557        regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
 558
 559        regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
 560        regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
 561        regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
 562        regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
 563        for (i = 0; i < 8; i++)
 564                regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
 565        for (i = 0; i < 8; i++)
 566                regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
 567        for (i = 0; i < 8; i++)
 568                regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
 569        for (i = 0; i < 8; i++)
 570                regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
 571        for (i = 0; i < 8; i++)
 572                regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
 573        for (i = 0; i < 8; i++)
 574                regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
 575
 576        /* Statistics */
 577        regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
 578        regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
 579        regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
 580        regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
 581        for (i = 0; i < 8; i++)
 582                regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
 583        regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
 584        regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
 585        regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
 586        regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
 587        regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
 588        regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
 589        regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
 590        for (i = 0; i < 8; i++)
 591                regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
 592        for (i = 0; i < 8; i++)
 593                regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
 594        for (i = 0; i < 8; i++)
 595                regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
 596        for (i = 0; i < 8; i++)
 597                regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
 598        regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
 599        regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
 600        regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
 601        regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
 602        regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
 603        regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
 604        regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
 605        regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
 606        regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
 607        regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
 608        regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
 609        regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
 610        for (i = 0; i < 8; i++)
 611                regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
 612        regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
 613        regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
 614        regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
 615        regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
 616        regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
 617        regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
 618        regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
 619        regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
 620        regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
 621        regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
 622        regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
 623        regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
 624        regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
 625        regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
 626        regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
 627        regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
 628        regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
 629        regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
 630        regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
 631        for (i = 0; i < 16; i++)
 632                regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
 633        for (i = 0; i < 16; i++)
 634                regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
 635        for (i = 0; i < 16; i++)
 636                regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
 637        for (i = 0; i < 16; i++)
 638                regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
 639
 640        /* MAC */
 641        regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
 642        regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
 643        regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
 644        regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
 645        regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
 646        regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
 647        regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
 648        regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
 649        regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
 650        regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 651        regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
 652        regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
 653        regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
 654        regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
 655        regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
 656        regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
 657        regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
 658        regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
 659        regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
 660        regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
 661        regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
 662        regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
 663        regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
 664        regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
 665        regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
 666        regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
 667        regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 668        regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
 669        regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
 670        regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
 671        regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
 672        regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
 673        regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
 674
 675        /* Diagnostic */
 676        regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
 677        for (i = 0; i < 8; i++)
 678                regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
 679        regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
 680        for (i = 0; i < 4; i++)
 681                regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
 682        regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
 683        regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
 684        for (i = 0; i < 8; i++)
 685                regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
 686        regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
 687        for (i = 0; i < 4; i++)
 688                regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
 689        regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
 690        regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
 691        regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
 692        regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
 693        regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
 694        regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
 695        regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
 696        regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
 697        regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
 698        regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
 699        regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
 700        for (i = 0; i < 8; i++)
 701                regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
 702        regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
 703        regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
 704        regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
 705        regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
 706        regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
 707        regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
 708        regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
 709        regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
 710        regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
 711}
 712
 713static int ixgbe_get_eeprom_len(struct net_device *netdev)
 714{
 715        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 716        return adapter->hw.eeprom.word_size * 2;
 717}
 718
 719static int ixgbe_get_eeprom(struct net_device *netdev,
 720                            struct ethtool_eeprom *eeprom, u8 *bytes)
 721{
 722        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 723        struct ixgbe_hw *hw = &adapter->hw;
 724        u16 *eeprom_buff;
 725        int first_word, last_word, eeprom_len;
 726        int ret_val = 0;
 727        u16 i;
 728
 729        if (eeprom->len == 0)
 730                return -EINVAL;
 731
 732        eeprom->magic = hw->vendor_id | (hw->device_id << 16);
 733
 734        first_word = eeprom->offset >> 1;
 735        last_word = (eeprom->offset + eeprom->len - 1) >> 1;
 736        eeprom_len = last_word - first_word + 1;
 737
 738        eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
 739        if (!eeprom_buff)
 740                return -ENOMEM;
 741
 742        for (i = 0; i < eeprom_len; i++) {
 743                if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
 744                    &eeprom_buff[i])))
 745                        break;
 746        }
 747
 748        /* Device's eeprom is always little-endian, word addressable */
 749        for (i = 0; i < eeprom_len; i++)
 750                le16_to_cpus(&eeprom_buff[i]);
 751
 752        memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
 753        kfree(eeprom_buff);
 754
 755        return ret_val;
 756}
 757
 758static void ixgbe_get_drvinfo(struct net_device *netdev,
 759                              struct ethtool_drvinfo *drvinfo)
 760{
 761        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 762        char firmware_version[32];
 763
 764        strncpy(drvinfo->driver, ixgbe_driver_name, 32);
 765        strncpy(drvinfo->version, ixgbe_driver_version, 32);
 766
 767        sprintf(firmware_version, "%d.%d-%d",
 768                (adapter->eeprom_version & 0xF000) >> 12,
 769                (adapter->eeprom_version & 0x0FF0) >> 4,
 770                adapter->eeprom_version & 0x000F);
 771
 772        strncpy(drvinfo->fw_version, firmware_version, 32);
 773        strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
 774        drvinfo->n_stats = IXGBE_STATS_LEN;
 775        drvinfo->testinfo_len = IXGBE_TEST_LEN;
 776        drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
 777}
 778
 779static void ixgbe_get_ringparam(struct net_device *netdev,
 780                                struct ethtool_ringparam *ring)
 781{
 782        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 783        struct ixgbe_ring *tx_ring = adapter->tx_ring;
 784        struct ixgbe_ring *rx_ring = adapter->rx_ring;
 785
 786        ring->rx_max_pending = IXGBE_MAX_RXD;
 787        ring->tx_max_pending = IXGBE_MAX_TXD;
 788        ring->rx_mini_max_pending = 0;
 789        ring->rx_jumbo_max_pending = 0;
 790        ring->rx_pending = rx_ring->count;
 791        ring->tx_pending = tx_ring->count;
 792        ring->rx_mini_pending = 0;
 793        ring->rx_jumbo_pending = 0;
 794}
 795
 796static int ixgbe_set_ringparam(struct net_device *netdev,
 797                               struct ethtool_ringparam *ring)
 798{
 799        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 800        struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
 801        int i, err = 0;
 802        u32 new_rx_count, new_tx_count;
 803        bool need_update = false;
 804
 805        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 806                return -EINVAL;
 807
 808        new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
 809        new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
 810        new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
 811
 812        new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
 813        new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
 814        new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
 815
 816        if ((new_tx_count == adapter->tx_ring->count) &&
 817            (new_rx_count == adapter->rx_ring->count)) {
 818                /* nothing to do */
 819                return 0;
 820        }
 821
 822        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 823                msleep(1);
 824
 825        if (!netif_running(adapter->netdev)) {
 826                for (i = 0; i < adapter->num_tx_queues; i++)
 827                        adapter->tx_ring[i].count = new_tx_count;
 828                for (i = 0; i < adapter->num_rx_queues; i++)
 829                        adapter->rx_ring[i].count = new_rx_count;
 830                adapter->tx_ring_count = new_tx_count;
 831                adapter->rx_ring_count = new_rx_count;
 832                goto err_setup;
 833        }
 834
 835        temp_tx_ring = kcalloc(adapter->num_tx_queues,
 836                               sizeof(struct ixgbe_ring), GFP_KERNEL);
 837        if (!temp_tx_ring) {
 838                err = -ENOMEM;
 839                goto err_setup;
 840        }
 841
 842        if (new_tx_count != adapter->tx_ring_count) {
 843                memcpy(temp_tx_ring, adapter->tx_ring,
 844                       adapter->num_tx_queues * sizeof(struct ixgbe_ring));
 845                for (i = 0; i < adapter->num_tx_queues; i++) {
 846                        temp_tx_ring[i].count = new_tx_count;
 847                        err = ixgbe_setup_tx_resources(adapter,
 848                                                       &temp_tx_ring[i]);
 849                        if (err) {
 850                                while (i) {
 851                                        i--;
 852                                        ixgbe_free_tx_resources(adapter,
 853                                                                &temp_tx_ring[i]);
 854                                }
 855                                goto err_setup;
 856                        }
 857                }
 858                need_update = true;
 859        }
 860
 861        temp_rx_ring = kcalloc(adapter->num_rx_queues,
 862                               sizeof(struct ixgbe_ring), GFP_KERNEL);
 863        if ((!temp_rx_ring) && (need_update)) {
 864                for (i = 0; i < adapter->num_tx_queues; i++)
 865                        ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
 866                kfree(temp_tx_ring);
 867                err = -ENOMEM;
 868                goto err_setup;
 869        }
 870
 871        if (new_rx_count != adapter->rx_ring_count) {
 872                memcpy(temp_rx_ring, adapter->rx_ring,
 873                       adapter->num_rx_queues * sizeof(struct ixgbe_ring));
 874                for (i = 0; i < adapter->num_rx_queues; i++) {
 875                        temp_rx_ring[i].count = new_rx_count;
 876                        err = ixgbe_setup_rx_resources(adapter,
 877                                                       &temp_rx_ring[i]);
 878                        if (err) {
 879                                while (i) {
 880                                        i--;
 881                                        ixgbe_free_rx_resources(adapter,
 882                                                              &temp_rx_ring[i]);
 883                                }
 884                                goto err_setup;
 885                        }
 886                }
 887                need_update = true;
 888        }
 889
 890        /* if rings need to be updated, here's the place to do it in one shot */
 891        if (need_update) {
 892                ixgbe_down(adapter);
 893
 894                /* tx */
 895                if (new_tx_count != adapter->tx_ring_count) {
 896                        kfree(adapter->tx_ring);
 897                        adapter->tx_ring = temp_tx_ring;
 898                        temp_tx_ring = NULL;
 899                        adapter->tx_ring_count = new_tx_count;
 900                }
 901
 902                /* rx */
 903                if (new_rx_count != adapter->rx_ring_count) {
 904                        kfree(adapter->rx_ring);
 905                        adapter->rx_ring = temp_rx_ring;
 906                        temp_rx_ring = NULL;
 907                        adapter->rx_ring_count = new_rx_count;
 908                }
 909                ixgbe_up(adapter);
 910        }
 911err_setup:
 912        clear_bit(__IXGBE_RESETTING, &adapter->state);
 913        return err;
 914}
 915
 916static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
 917{
 918        switch (sset) {
 919        case ETH_SS_TEST:
 920                return IXGBE_TEST_LEN;
 921        case ETH_SS_STATS:
 922                return IXGBE_STATS_LEN;
 923        default:
 924                return -EOPNOTSUPP;
 925        }
 926}
 927
 928static void ixgbe_get_ethtool_stats(struct net_device *netdev,
 929                                    struct ethtool_stats *stats, u64 *data)
 930{
 931        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 932        u64 *queue_stat;
 933        int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
 934        int j, k;
 935        int i;
 936
 937        ixgbe_update_stats(adapter);
 938        for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
 939                char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
 940                data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
 941                           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 942        }
 943        for (j = 0; j < adapter->num_tx_queues; j++) {
 944                queue_stat = (u64 *)&adapter->tx_ring[j].stats;
 945                for (k = 0; k < stat_count; k++)
 946                        data[i + k] = queue_stat[k];
 947                i += k;
 948        }
 949        for (j = 0; j < adapter->num_rx_queues; j++) {
 950                queue_stat = (u64 *)&adapter->rx_ring[j].stats;
 951                for (k = 0; k < stat_count; k++)
 952                        data[i + k] = queue_stat[k];
 953                i += k;
 954        }
 955        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 956                for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
 957                        data[i++] = adapter->stats.pxontxc[j];
 958                        data[i++] = adapter->stats.pxofftxc[j];
 959                }
 960                for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
 961                        data[i++] = adapter->stats.pxonrxc[j];
 962                        data[i++] = adapter->stats.pxoffrxc[j];
 963                }
 964        }
 965}
 966
 967static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
 968                              u8 *data)
 969{
 970        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 971        char *p = (char *)data;
 972        int i;
 973
 974        switch (stringset) {
 975        case ETH_SS_TEST:
 976                memcpy(data, *ixgbe_gstrings_test,
 977                       IXGBE_TEST_LEN * ETH_GSTRING_LEN);
 978                break;
 979        case ETH_SS_STATS:
 980                for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
 981                        memcpy(p, ixgbe_gstrings_stats[i].stat_string,
 982                               ETH_GSTRING_LEN);
 983                        p += ETH_GSTRING_LEN;
 984                }
 985                for (i = 0; i < adapter->num_tx_queues; i++) {
 986                        sprintf(p, "tx_queue_%u_packets", i);
 987                        p += ETH_GSTRING_LEN;
 988                        sprintf(p, "tx_queue_%u_bytes", i);
 989                        p += ETH_GSTRING_LEN;
 990                }
 991                for (i = 0; i < adapter->num_rx_queues; i++) {
 992                        sprintf(p, "rx_queue_%u_packets", i);
 993                        p += ETH_GSTRING_LEN;
 994                        sprintf(p, "rx_queue_%u_bytes", i);
 995                        p += ETH_GSTRING_LEN;
 996                }
 997                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 998                        for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
 999                                sprintf(p, "tx_pb_%u_pxon", i);
1000                                p += ETH_GSTRING_LEN;
1001                                sprintf(p, "tx_pb_%u_pxoff", i);
1002                                p += ETH_GSTRING_LEN;
1003                        }
1004                        for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
1005                                sprintf(p, "rx_pb_%u_pxon", i);
1006                                p += ETH_GSTRING_LEN;
1007                                sprintf(p, "rx_pb_%u_pxoff", i);
1008                                p += ETH_GSTRING_LEN;
1009                        }
1010                }
1011                /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1012                break;
1013        }
1014}
1015
1016static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1017{
1018        struct ixgbe_hw *hw = &adapter->hw;
1019        bool link_up;
1020        u32 link_speed = 0;
1021        *data = 0;
1022
1023        hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1024        if (link_up)
1025                return *data;
1026        else
1027                *data = 1;
1028        return *data;
1029}
1030
1031/* ethtool register test data */
1032struct ixgbe_reg_test {
1033        u16 reg;
1034        u8  array_len;
1035        u8  test_type;
1036        u32 mask;
1037        u32 write;
1038};
1039
1040/* In the hardware, registers are laid out either singly, in arrays
1041 * spaced 0x40 bytes apart, or in contiguous tables.  We assume
1042 * most tests take place on arrays or single registers (handled
1043 * as a single-element array) and special-case the tables.
1044 * Table tests are always pattern tests.
1045 *
1046 * We also make provision for some required setup steps by specifying
1047 * registers to be written without any read-back testing.
1048 */
1049
1050#define PATTERN_TEST    1
1051#define SET_READ_TEST   2
1052#define WRITE_NO_TEST   3
1053#define TABLE32_TEST    4
1054#define TABLE64_TEST_LO 5
1055#define TABLE64_TEST_HI 6
1056
1057/* default 82599 register test */
1058static struct ixgbe_reg_test reg_test_82599[] = {
1059        { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1060        { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1061        { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1062        { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1063        { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1064        { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1065        { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1066        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1067        { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1068        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1069        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1070        { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1071        { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1072        { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1073        { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1074        { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1075        { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1076        { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1077        { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1078        { 0, 0, 0, 0 }
1079};
1080
1081/* default 82598 register test */
1082static struct ixgbe_reg_test reg_test_82598[] = {
1083        { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1084        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1085        { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1086        { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1087        { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1088        { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1089        { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1090        /* Enable all four RX queues before testing. */
1091        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1092        /* RDH is read-only for 82598, only test RDT. */
1093        { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1094        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1095        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1096        { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1097        { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1098        { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1099        { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1100        { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1101        { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1102        { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1103        { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1104        { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1105        { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1106        { 0, 0, 0, 0 }
1107};
1108
1109#define REG_PATTERN_TEST(R, M, W)                                             \
1110{                                                                             \
1111        u32 pat, val, before;                                                 \
1112        const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
1113        for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {                       \
1114                before = readl(adapter->hw.hw_addr + R);                      \
1115                writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
1116                val = readl(adapter->hw.hw_addr + R);                         \
1117                if (val != (_test[pat] & W & M)) {                            \
1118                        DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
1119                                          "0x%08X expected 0x%08X\n",         \
1120                                R, val, (_test[pat] & W & M));                \
1121                        *data = R;                                            \
1122                        writel(before, adapter->hw.hw_addr + R);              \
1123                        return 1;                                             \
1124                }                                                             \
1125                writel(before, adapter->hw.hw_addr + R);                      \
1126        }                                                                     \
1127}
1128
1129#define REG_SET_AND_CHECK(R, M, W)                                            \
1130{                                                                             \
1131        u32 val, before;                                                      \
1132        before = readl(adapter->hw.hw_addr + R);                              \
1133        writel((W & M), (adapter->hw.hw_addr + R));                           \
1134        val = readl(adapter->hw.hw_addr + R);                                 \
1135        if ((W & M) != (val & M)) {                                           \
1136                DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
1137                                 "expected 0x%08X\n", R, (val & M), (W & M)); \
1138                *data = R;                                                    \
1139                writel(before, (adapter->hw.hw_addr + R));                    \
1140                return 1;                                                     \
1141        }                                                                     \
1142        writel(before, (adapter->hw.hw_addr + R));                            \
1143}
1144
1145static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1146{
1147        struct ixgbe_reg_test *test;
1148        u32 value, before, after;
1149        u32 i, toggle;
1150
1151        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1152                toggle = 0x7FFFF30F;
1153                test = reg_test_82599;
1154        } else {
1155                toggle = 0x7FFFF3FF;
1156                test = reg_test_82598;
1157        }
1158
1159        /*
1160         * Because the status register is such a special case,
1161         * we handle it separately from the rest of the register
1162         * tests.  Some bits are read-only, some toggle, and some
1163         * are writeable on newer MACs.
1164         */
1165        before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1166        value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1167        IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1168        after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1169        if (value != after) {
1170                DPRINTK(DRV, ERR, "failed STATUS register test got: "
1171                        "0x%08X expected: 0x%08X\n", after, value);
1172                *data = 1;
1173                return 1;
1174        }
1175        /* restore previous status */
1176        IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
1177
1178        /*
1179         * Perform the remainder of the register test, looping through
1180         * the test table until we either fail or reach the null entry.
1181         */
1182        while (test->reg) {
1183                for (i = 0; i < test->array_len; i++) {
1184                        switch (test->test_type) {
1185                        case PATTERN_TEST:
1186                                REG_PATTERN_TEST(test->reg + (i * 0x40),
1187                                                test->mask,
1188                                                test->write);
1189                                break;
1190                        case SET_READ_TEST:
1191                                REG_SET_AND_CHECK(test->reg + (i * 0x40),
1192                                                test->mask,
1193                                                test->write);
1194                                break;
1195                        case WRITE_NO_TEST:
1196                                writel(test->write,
1197                                       (adapter->hw.hw_addr + test->reg)
1198                                       + (i * 0x40));
1199                                break;
1200                        case TABLE32_TEST:
1201                                REG_PATTERN_TEST(test->reg + (i * 4),
1202                                                test->mask,
1203                                                test->write);
1204                                break;
1205                        case TABLE64_TEST_LO:
1206                                REG_PATTERN_TEST(test->reg + (i * 8),
1207                                                test->mask,
1208                                                test->write);
1209                                break;
1210                        case TABLE64_TEST_HI:
1211                                REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1212                                                test->mask,
1213                                                test->write);
1214                                break;
1215                        }
1216                }
1217                test++;
1218        }
1219
1220        *data = 0;
1221        return 0;
1222}
1223
1224static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1225{
1226        struct ixgbe_hw *hw = &adapter->hw;
1227        if (hw->eeprom.ops.validate_checksum(hw, NULL))
1228                *data = 1;
1229        else
1230                *data = 0;
1231        return *data;
1232}
1233
1234static irqreturn_t ixgbe_test_intr(int irq, void *data)
1235{
1236        struct net_device *netdev = (struct net_device *) data;
1237        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1238
1239        adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1240
1241        return IRQ_HANDLED;
1242}
1243
1244static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1245{
1246        struct net_device *netdev = adapter->netdev;
1247        u32 mask, i = 0, shared_int = true;
1248        u32 irq = adapter->pdev->irq;
1249
1250        *data = 0;
1251
1252        /* Hook up test interrupt handler just for this test */
1253        if (adapter->msix_entries) {
1254                /* NOTE: we don't test MSI-X interrupts here, yet */
1255                return 0;
1256        } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1257                shared_int = false;
1258                if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
1259                                netdev)) {
1260                        *data = 1;
1261                        return -1;
1262                }
1263        } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
1264                                netdev->name, netdev)) {
1265                shared_int = false;
1266        } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
1267                               netdev->name, netdev)) {
1268                *data = 1;
1269                return -1;
1270        }
1271        DPRINTK(HW, INFO, "testing %s interrupt\n",
1272                (shared_int ? "shared" : "unshared"));
1273
1274        /* Disable all the interrupts */
1275        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1276        msleep(10);
1277
1278        /* Test each interrupt */
1279        for (; i < 10; i++) {
1280                /* Interrupt to test */
1281                mask = 1 << i;
1282
1283                if (!shared_int) {
1284                        /*
1285                         * Disable the interrupts to be reported in
1286                         * the cause register and then force the same
1287                         * interrupt and see if one gets posted.  If
1288                         * an interrupt was posted to the bus, the
1289                         * test failed.
1290                         */
1291                        adapter->test_icr = 0;
1292                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1293                                        ~mask & 0x00007FFF);
1294                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1295                                        ~mask & 0x00007FFF);
1296                        msleep(10);
1297
1298                        if (adapter->test_icr & mask) {
1299                                *data = 3;
1300                                break;
1301                        }
1302                }
1303
1304                /*
1305                 * Enable the interrupt to be reported in the cause
1306                 * register and then force the same interrupt and see
1307                 * if one gets posted.  If an interrupt was not posted
1308                 * to the bus, the test failed.
1309                 */
1310                adapter->test_icr = 0;
1311                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1312                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1313                msleep(10);
1314
1315                if (!(adapter->test_icr &mask)) {
1316                        *data = 4;
1317                        break;
1318                }
1319
1320                if (!shared_int) {
1321                        /*
1322                         * Disable the other interrupts to be reported in
1323                         * the cause register and then force the other
1324                         * interrupts and see if any get posted.  If
1325                         * an interrupt was posted to the bus, the
1326                         * test failed.
1327                         */
1328                        adapter->test_icr = 0;
1329                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1330                                        ~mask & 0x00007FFF);
1331                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1332                                        ~mask & 0x00007FFF);
1333                        msleep(10);
1334
1335                        if (adapter->test_icr) {
1336                                *data = 5;
1337                                break;
1338                        }
1339                }
1340        }
1341
1342        /* Disable all the interrupts */
1343        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1344        msleep(10);
1345
1346        /* Unhook test interrupt handler */
1347        free_irq(irq, netdev);
1348
1349        return *data;
1350}
1351
1352static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1353{
1354        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1355        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1356        struct ixgbe_hw *hw = &adapter->hw;
1357        struct pci_dev *pdev = adapter->pdev;
1358        u32 reg_ctl;
1359        int i;
1360
1361        /* shut down the DMA engines now so they can be reinitialized later */
1362
1363        /* first Rx */
1364        reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1365        reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1366        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1367        reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0));
1368        reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1369        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
1370
1371        /* now Tx */
1372        reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0));
1373        reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1374        IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl);
1375        if (hw->mac.type == ixgbe_mac_82599EB) {
1376                reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1377                reg_ctl &= ~IXGBE_DMATXCTL_TE;
1378                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1379        }
1380
1381        ixgbe_reset(adapter);
1382
1383        if (tx_ring->desc && tx_ring->tx_buffer_info) {
1384                for (i = 0; i < tx_ring->count; i++) {
1385                        struct ixgbe_tx_buffer *buf =
1386                                        &(tx_ring->tx_buffer_info[i]);
1387                        if (buf->dma)
1388                                pci_unmap_single(pdev, buf->dma, buf->length,
1389                                                 PCI_DMA_TODEVICE);
1390                        if (buf->skb)
1391                                dev_kfree_skb(buf->skb);
1392                }
1393        }
1394
1395        if (rx_ring->desc && rx_ring->rx_buffer_info) {
1396                for (i = 0; i < rx_ring->count; i++) {
1397                        struct ixgbe_rx_buffer *buf =
1398                                        &(rx_ring->rx_buffer_info[i]);
1399                        if (buf->dma)
1400                                pci_unmap_single(pdev, buf->dma,
1401                                                 IXGBE_RXBUFFER_2048,
1402                                                 PCI_DMA_FROMDEVICE);
1403                        if (buf->skb)
1404                                dev_kfree_skb(buf->skb);
1405                }
1406        }
1407
1408        if (tx_ring->desc) {
1409                pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1410                                    tx_ring->dma);
1411                tx_ring->desc = NULL;
1412        }
1413        if (rx_ring->desc) {
1414                pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1415                                    rx_ring->dma);
1416                rx_ring->desc = NULL;
1417        }
1418
1419        kfree(tx_ring->tx_buffer_info);
1420        tx_ring->tx_buffer_info = NULL;
1421        kfree(rx_ring->rx_buffer_info);
1422        rx_ring->rx_buffer_info = NULL;
1423
1424        return;
1425}
1426
1427static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1428{
1429        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1430        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1431        struct pci_dev *pdev = adapter->pdev;
1432        u32 rctl, reg_data;
1433        int i, ret_val;
1434
1435        /* Setup Tx descriptor ring and Tx buffers */
1436
1437        if (!tx_ring->count)
1438                tx_ring->count = IXGBE_DEFAULT_TXD;
1439
1440        tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
1441                                          sizeof(struct ixgbe_tx_buffer),
1442                                          GFP_KERNEL);
1443        if (!(tx_ring->tx_buffer_info)) {
1444                ret_val = 1;
1445                goto err_nomem;
1446        }
1447
1448        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1449        tx_ring->size = ALIGN(tx_ring->size, 4096);
1450        if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1451                                                   &tx_ring->dma))) {
1452                ret_val = 2;
1453                goto err_nomem;
1454        }
1455        tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1456
1457        IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
1458                        ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1459        IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
1460                        ((u64) tx_ring->dma >> 32));
1461        IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
1462                        tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
1463        IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
1464        IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
1465
1466        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1467        reg_data |= IXGBE_HLREG0_TXPADEN;
1468        IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1469
1470        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1471                reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1472                reg_data |= IXGBE_DMATXCTL_TE;
1473                IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1474        }
1475        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
1476        reg_data |= IXGBE_TXDCTL_ENABLE;
1477        IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
1478
1479        for (i = 0; i < tx_ring->count; i++) {
1480                union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
1481                struct sk_buff *skb;
1482                unsigned int size = 1024;
1483
1484                skb = alloc_skb(size, GFP_KERNEL);
1485                if (!skb) {
1486                        ret_val = 3;
1487                        goto err_nomem;
1488                }
1489                skb_put(skb, size);
1490                tx_ring->tx_buffer_info[i].skb = skb;
1491                tx_ring->tx_buffer_info[i].length = skb->len;
1492                tx_ring->tx_buffer_info[i].dma =
1493                        pci_map_single(pdev, skb->data, skb->len,
1494                                       PCI_DMA_TODEVICE);
1495                desc->read.buffer_addr =
1496                                    cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1497                desc->read.cmd_type_len = cpu_to_le32(skb->len);
1498                desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
1499                                                       IXGBE_TXD_CMD_IFCS |
1500                                                       IXGBE_TXD_CMD_RS);
1501                desc->read.olinfo_status = 0;
1502                if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1503                        desc->read.olinfo_status |=
1504                                        (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
1505
1506        }
1507
1508        /* Setup Rx Descriptor ring and Rx buffers */
1509
1510        if (!rx_ring->count)
1511                rx_ring->count = IXGBE_DEFAULT_RXD;
1512
1513        rx_ring->rx_buffer_info = kcalloc(rx_ring->count,
1514                                          sizeof(struct ixgbe_rx_buffer),
1515                                          GFP_KERNEL);
1516        if (!(rx_ring->rx_buffer_info)) {
1517                ret_val = 4;
1518                goto err_nomem;
1519        }
1520
1521        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1522        rx_ring->size = ALIGN(rx_ring->size, 4096);
1523        if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1524                                                   &rx_ring->dma))) {
1525                ret_val = 5;
1526                goto err_nomem;
1527        }
1528        rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1529
1530        rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1531        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1532        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
1533                        ((u64)rx_ring->dma & 0xFFFFFFFF));
1534        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
1535                        ((u64) rx_ring->dma >> 32));
1536        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
1537        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
1538        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
1539
1540        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1541        reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1542        IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1543
1544        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1545        reg_data &= ~IXGBE_HLREG0_LPBK;
1546        IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1547
1548        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
1549#define IXGBE_RDRXCTL_RDMTS_MASK    0x00000003 /* Receive Descriptor Minimum
1550                                                  Threshold Size mask */
1551        reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
1552        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
1553
1554        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
1555#define IXGBE_MCSTCTRL_MO_MASK      0x00000003 /* Multicast Offset mask */
1556        reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
1557        reg_data |= adapter->hw.mac.mc_filter_type;
1558        IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
1559
1560        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
1561        reg_data |= IXGBE_RXDCTL_ENABLE;
1562        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1563        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1564                int j = adapter->rx_ring[0].reg_idx;
1565                u32 k;
1566                for (k = 0; k < 10; k++) {
1567                        if (IXGBE_READ_REG(&adapter->hw,
1568                                           IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1569                                break;
1570                        else
1571                                msleep(1);
1572                }
1573        }
1574
1575        rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1576        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1577
1578        for (i = 0; i < rx_ring->count; i++) {
1579                union ixgbe_adv_rx_desc *rx_desc =
1580                                                 IXGBE_RX_DESC_ADV(*rx_ring, i);
1581                struct sk_buff *skb;
1582
1583                skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
1584                if (!skb) {
1585                        ret_val = 6;
1586                        goto err_nomem;
1587                }
1588                skb_reserve(skb, NET_IP_ALIGN);
1589                rx_ring->rx_buffer_info[i].skb = skb;
1590                rx_ring->rx_buffer_info[i].dma =
1591                        pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
1592                                       PCI_DMA_FROMDEVICE);
1593                rx_desc->read.pkt_addr =
1594                                cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1595                memset(skb->data, 0x00, skb->len);
1596        }
1597
1598        return 0;
1599
1600err_nomem:
1601        ixgbe_free_desc_rings(adapter);
1602        return ret_val;
1603}
1604
1605static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1606{
1607        struct ixgbe_hw *hw = &adapter->hw;
1608        u32 reg_data;
1609
1610        /* right now we only support MAC loopback in the driver */
1611
1612        /* Setup MAC loopback */
1613        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1614        reg_data |= IXGBE_HLREG0_LPBK;
1615        IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1616
1617        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
1618        reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1619        reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1620        IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1621
1622        /* Disable Atlas Tx lanes; re-enabled in reset path */
1623        if (hw->mac.type == ixgbe_mac_82598EB) {
1624                u8 atlas;
1625
1626                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1627                atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1628                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1629
1630                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1631                atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1632                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1633
1634                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1635                atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1636                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1637
1638                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1639                atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1640                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1641        }
1642
1643        return 0;
1644}
1645
1646static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1647{
1648        u32 reg_data;
1649
1650        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1651        reg_data &= ~IXGBE_HLREG0_LPBK;
1652        IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1653}
1654
1655static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1656                                      unsigned int frame_size)
1657{
1658        memset(skb->data, 0xFF, frame_size);
1659        frame_size &= ~1;
1660        memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1661        memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1662        memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1663}
1664
1665static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1666                                    unsigned int frame_size)
1667{
1668        frame_size &= ~1;
1669        if (*(skb->data + 3) == 0xFF) {
1670                if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1671                    (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1672                        return 0;
1673                }
1674        }
1675        return 13;
1676}
1677
1678static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1679{
1680        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1681        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1682        struct pci_dev *pdev = adapter->pdev;
1683        int i, j, k, l, lc, good_cnt, ret_val = 0;
1684        unsigned long time;
1685
1686        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1);
1687
1688        /*
1689         * Calculate the loop count based on the largest descriptor ring
1690         * The idea is to wrap the largest ring a number of times using 64
1691         * send/receive pairs during each loop
1692         */
1693
1694        if (rx_ring->count <= tx_ring->count)
1695                lc = ((tx_ring->count / 64) * 2) + 1;
1696        else
1697                lc = ((rx_ring->count / 64) * 2) + 1;
1698
1699        k = l = 0;
1700        for (j = 0; j <= lc; j++) {
1701                for (i = 0; i < 64; i++) {
1702                        ixgbe_create_lbtest_frame(
1703                                        tx_ring->tx_buffer_info[k].skb,
1704                                        1024);
1705                        pci_dma_sync_single_for_device(pdev,
1706                                tx_ring->tx_buffer_info[k].dma,
1707                                tx_ring->tx_buffer_info[k].length,
1708                                PCI_DMA_TODEVICE);
1709                        if (unlikely(++k == tx_ring->count))
1710                                k = 0;
1711                }
1712                IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
1713                msleep(200);
1714                /* set the start time for the receive */
1715                time = jiffies;
1716                good_cnt = 0;
1717                do {
1718                        /* receive the sent packets */
1719                        pci_dma_sync_single_for_cpu(pdev,
1720                                        rx_ring->rx_buffer_info[l].dma,
1721                                        IXGBE_RXBUFFER_2048,
1722                                        PCI_DMA_FROMDEVICE);
1723                        ret_val = ixgbe_check_lbtest_frame(
1724                                        rx_ring->rx_buffer_info[l].skb, 1024);
1725                        if (!ret_val)
1726                                good_cnt++;
1727                        if (++l == rx_ring->count)
1728                                l = 0;
1729                        /*
1730                         * time + 20 msecs (200 msecs on 2.4) is more than
1731                         * enough time to complete the receives, if it's
1732                         * exceeded, break and error off
1733                         */
1734                } while (good_cnt < 64 && jiffies < (time + 20));
1735                if (good_cnt != 64) {
1736                        /* ret_val is the same as mis-compare */
1737                        ret_val = 13;
1738                        break;
1739                }
1740                if (jiffies >= (time + 20)) {
1741                        /* Error code for time out error */
1742                        ret_val = 14;
1743                        break;
1744                }
1745        }
1746
1747        return ret_val;
1748}
1749
1750static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1751{
1752        *data = ixgbe_setup_desc_rings(adapter);
1753        if (*data)
1754                goto out;
1755        *data = ixgbe_setup_loopback_test(adapter);
1756        if (*data)
1757                goto err_loopback;
1758        *data = ixgbe_run_loopback_test(adapter);
1759        ixgbe_loopback_cleanup(adapter);
1760
1761err_loopback:
1762        ixgbe_free_desc_rings(adapter);
1763out:
1764        return *data;
1765}
1766
1767static void ixgbe_diag_test(struct net_device *netdev,
1768                            struct ethtool_test *eth_test, u64 *data)
1769{
1770        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1771        bool if_running = netif_running(netdev);
1772
1773        set_bit(__IXGBE_TESTING, &adapter->state);
1774        if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1775                /* Offline tests */
1776
1777                DPRINTK(HW, INFO, "offline testing starting\n");
1778
1779                /* Link test performed before hardware reset so autoneg doesn't
1780                 * interfere with test result */
1781                if (ixgbe_link_test(adapter, &data[4]))
1782                        eth_test->flags |= ETH_TEST_FL_FAILED;
1783
1784                if (if_running)
1785                        /* indicate we're in test mode */
1786                        dev_close(netdev);
1787                else
1788                        ixgbe_reset(adapter);
1789
1790                DPRINTK(HW, INFO, "register testing starting\n");
1791                if (ixgbe_reg_test(adapter, &data[0]))
1792                        eth_test->flags |= ETH_TEST_FL_FAILED;
1793
1794                ixgbe_reset(adapter);
1795                DPRINTK(HW, INFO, "eeprom testing starting\n");
1796                if (ixgbe_eeprom_test(adapter, &data[1]))
1797                        eth_test->flags |= ETH_TEST_FL_FAILED;
1798
1799                ixgbe_reset(adapter);
1800                DPRINTK(HW, INFO, "interrupt testing starting\n");
1801                if (ixgbe_intr_test(adapter, &data[2]))
1802                        eth_test->flags |= ETH_TEST_FL_FAILED;
1803
1804                ixgbe_reset(adapter);
1805                DPRINTK(HW, INFO, "loopback testing starting\n");
1806                if (ixgbe_loopback_test(adapter, &data[3]))
1807                        eth_test->flags |= ETH_TEST_FL_FAILED;
1808
1809                ixgbe_reset(adapter);
1810
1811                clear_bit(__IXGBE_TESTING, &adapter->state);
1812                if (if_running)
1813                        dev_open(netdev);
1814        } else {
1815                DPRINTK(HW, INFO, "online testing starting\n");
1816                /* Online tests */
1817                if (ixgbe_link_test(adapter, &data[4]))
1818                        eth_test->flags |= ETH_TEST_FL_FAILED;
1819
1820                /* Online tests aren't run; pass by default */
1821                data[0] = 0;
1822                data[1] = 0;
1823                data[2] = 0;
1824                data[3] = 0;
1825
1826                clear_bit(__IXGBE_TESTING, &adapter->state);
1827        }
1828        msleep_interruptible(4 * 1000);
1829}
1830
1831static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1832                               struct ethtool_wolinfo *wol)
1833{
1834        struct ixgbe_hw *hw = &adapter->hw;
1835        int retval = 1;
1836
1837        switch(hw->device_id) {
1838        case IXGBE_DEV_ID_82599_KX4:
1839                retval = 0;
1840                break;
1841        default:
1842                wol->supported = 0;
1843        }
1844
1845        return retval;
1846}
1847
1848static void ixgbe_get_wol(struct net_device *netdev,
1849                          struct ethtool_wolinfo *wol)
1850{
1851        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1852
1853        wol->supported = WAKE_UCAST | WAKE_MCAST |
1854                         WAKE_BCAST | WAKE_MAGIC;
1855        wol->wolopts = 0;
1856
1857        if (ixgbe_wol_exclusion(adapter, wol) ||
1858            !device_can_wakeup(&adapter->pdev->dev))
1859                return;
1860
1861        if (adapter->wol & IXGBE_WUFC_EX)
1862                wol->wolopts |= WAKE_UCAST;
1863        if (adapter->wol & IXGBE_WUFC_MC)
1864                wol->wolopts |= WAKE_MCAST;
1865        if (adapter->wol & IXGBE_WUFC_BC)
1866                wol->wolopts |= WAKE_BCAST;
1867        if (adapter->wol & IXGBE_WUFC_MAG)
1868                wol->wolopts |= WAKE_MAGIC;
1869
1870        return;
1871}
1872
1873static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1874{
1875        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1876
1877        if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1878                return -EOPNOTSUPP;
1879
1880        if (ixgbe_wol_exclusion(adapter, wol))
1881                return wol->wolopts ? -EOPNOTSUPP : 0;
1882
1883        adapter->wol = 0;
1884
1885        if (wol->wolopts & WAKE_UCAST)
1886                adapter->wol |= IXGBE_WUFC_EX;
1887        if (wol->wolopts & WAKE_MCAST)
1888                adapter->wol |= IXGBE_WUFC_MC;
1889        if (wol->wolopts & WAKE_BCAST)
1890                adapter->wol |= IXGBE_WUFC_BC;
1891        if (wol->wolopts & WAKE_MAGIC)
1892                adapter->wol |= IXGBE_WUFC_MAG;
1893
1894        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1895
1896        return 0;
1897}
1898
1899static int ixgbe_nway_reset(struct net_device *netdev)
1900{
1901        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1902
1903        if (netif_running(netdev))
1904                ixgbe_reinit_locked(adapter);
1905
1906        return 0;
1907}
1908
1909static int ixgbe_phys_id(struct net_device *netdev, u32 data)
1910{
1911        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1912        struct ixgbe_hw *hw = &adapter->hw;
1913        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1914        u32 i;
1915
1916        if (!data || data > 300)
1917                data = 300;
1918
1919        for (i = 0; i < (data * 1000); i += 400) {
1920                hw->mac.ops.led_on(hw, IXGBE_LED_ON);
1921                msleep_interruptible(200);
1922                hw->mac.ops.led_off(hw, IXGBE_LED_ON);
1923                msleep_interruptible(200);
1924        }
1925
1926        /* Restore LED settings */
1927        IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg);
1928
1929        return 0;
1930}
1931
1932static int ixgbe_get_coalesce(struct net_device *netdev,
1933                              struct ethtool_coalesce *ec)
1934{
1935        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1936
1937        ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
1938
1939        /* only valid if in constant ITR mode */
1940        switch (adapter->rx_itr_setting) {
1941        case 0:
1942                /* throttling disabled */
1943                ec->rx_coalesce_usecs = 0;
1944                break;
1945        case 1:
1946                /* dynamic ITR mode */
1947                ec->rx_coalesce_usecs = 1;
1948                break;
1949        default:
1950                /* fixed interrupt rate mode */
1951                ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
1952                break;
1953        }
1954
1955        /* only valid if in constant ITR mode */
1956        switch (adapter->tx_itr_setting) {
1957        case 0:
1958                /* throttling disabled */
1959                ec->tx_coalesce_usecs = 0;
1960                break;
1961        case 1:
1962                /* dynamic ITR mode */
1963                ec->tx_coalesce_usecs = 1;
1964                break;
1965        default:
1966                ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
1967                break;
1968        }
1969
1970        return 0;
1971}
1972
1973static int ixgbe_set_coalesce(struct net_device *netdev,
1974                              struct ethtool_coalesce *ec)
1975{
1976        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1977        struct ixgbe_q_vector *q_vector;
1978        int i;
1979
1980        /*
1981         * don't accept tx specific changes if we've got mixed RxTx vectors
1982         * test and jump out here if needed before changing the rx numbers
1983         */
1984        if ((1000000/ec->tx_coalesce_usecs) != adapter->tx_eitr_param &&
1985            adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
1986                return -EINVAL;
1987
1988        if (ec->tx_max_coalesced_frames_irq)
1989                adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
1990
1991        if (ec->rx_coalesce_usecs > 1) {
1992                /* check the limits */
1993                if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
1994                    (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
1995                        return -EINVAL;
1996
1997                /* store the value in ints/second */
1998                adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
1999
2000                /* static value of interrupt rate */
2001                adapter->rx_itr_setting = adapter->rx_eitr_param;
2002                /* clear the lower bit as its used for dynamic state */
2003                adapter->rx_itr_setting &= ~1;
2004        } else if (ec->rx_coalesce_usecs == 1) {
2005                /* 1 means dynamic mode */
2006                adapter->rx_eitr_param = 20000;
2007                adapter->rx_itr_setting = 1;
2008        } else {
2009                /*
2010                 * any other value means disable eitr, which is best
2011                 * served by setting the interrupt rate very high
2012                 */
2013                if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2014                        adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
2015                else
2016                        adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2017                adapter->rx_itr_setting = 0;
2018        }
2019
2020        if (ec->tx_coalesce_usecs > 1) {
2021                /* check the limits */
2022                if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2023                    (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2024                        return -EINVAL;
2025
2026                /* store the value in ints/second */
2027                adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
2028
2029                /* static value of interrupt rate */
2030                adapter->tx_itr_setting = adapter->tx_eitr_param;
2031
2032                /* clear the lower bit as its used for dynamic state */
2033                adapter->tx_itr_setting &= ~1;
2034        } else if (ec->tx_coalesce_usecs == 1) {
2035                /* 1 means dynamic mode */
2036                adapter->tx_eitr_param = 10000;
2037                adapter->tx_itr_setting = 1;
2038        } else {
2039                adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
2040                adapter->tx_itr_setting = 0;
2041        }
2042
2043        /* MSI/MSIx Interrupt Mode */
2044        if (adapter->flags &
2045            (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
2046                int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2047                for (i = 0; i < num_vectors; i++) {
2048                        q_vector = adapter->q_vector[i];
2049                        if (q_vector->txr_count && !q_vector->rxr_count)
2050                                /* tx only */
2051                                q_vector->eitr = adapter->tx_eitr_param;
2052                        else
2053                                /* rx only or mixed */
2054                                q_vector->eitr = adapter->rx_eitr_param;
2055                        ixgbe_write_eitr(q_vector);
2056                }
2057        /* Legacy Interrupt Mode */
2058        } else {
2059                q_vector = adapter->q_vector[0];
2060                q_vector->eitr = adapter->rx_eitr_param;
2061                ixgbe_write_eitr(q_vector);
2062        }
2063
2064        return 0;
2065}
2066
2067static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2068{
2069        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2070
2071        ethtool_op_set_flags(netdev, data);
2072
2073        if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2074                return 0;
2075
2076        /* if state changes we need to update adapter->flags and reset */
2077        if ((!!(data & ETH_FLAG_LRO)) != 
2078            (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2079                adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2080                if (netif_running(netdev))
2081                        ixgbe_reinit_locked(adapter);
2082                else
2083                        ixgbe_reset(adapter);
2084        }
2085        return 0;
2086
2087}
2088
2089static const struct ethtool_ops ixgbe_ethtool_ops = {
2090        .get_settings           = ixgbe_get_settings,
2091        .set_settings           = ixgbe_set_settings,
2092        .get_drvinfo            = ixgbe_get_drvinfo,
2093        .get_regs_len           = ixgbe_get_regs_len,
2094        .get_regs               = ixgbe_get_regs,
2095        .get_wol                = ixgbe_get_wol,
2096        .set_wol                = ixgbe_set_wol,
2097        .nway_reset             = ixgbe_nway_reset,
2098        .get_link               = ethtool_op_get_link,
2099        .get_eeprom_len         = ixgbe_get_eeprom_len,
2100        .get_eeprom             = ixgbe_get_eeprom,
2101        .get_ringparam          = ixgbe_get_ringparam,
2102        .set_ringparam          = ixgbe_set_ringparam,
2103        .get_pauseparam         = ixgbe_get_pauseparam,
2104        .set_pauseparam         = ixgbe_set_pauseparam,
2105        .get_rx_csum            = ixgbe_get_rx_csum,
2106        .set_rx_csum            = ixgbe_set_rx_csum,
2107        .get_tx_csum            = ixgbe_get_tx_csum,
2108        .set_tx_csum            = ixgbe_set_tx_csum,
2109        .get_sg                 = ethtool_op_get_sg,
2110        .set_sg                 = ethtool_op_set_sg,
2111        .get_msglevel           = ixgbe_get_msglevel,
2112        .set_msglevel           = ixgbe_set_msglevel,
2113        .get_tso                = ethtool_op_get_tso,
2114        .set_tso                = ixgbe_set_tso,
2115        .self_test              = ixgbe_diag_test,
2116        .get_strings            = ixgbe_get_strings,
2117        .phys_id                = ixgbe_phys_id,
2118        .get_sset_count         = ixgbe_get_sset_count,
2119        .get_ethtool_stats      = ixgbe_get_ethtool_stats,
2120        .get_coalesce           = ixgbe_get_coalesce,
2121        .set_coalesce           = ixgbe_set_coalesce,
2122        .get_flags              = ethtool_op_get_flags,
2123        .set_flags              = ixgbe_set_flags,
2124};
2125
2126void ixgbe_set_ethtool_ops(struct net_device *netdev)
2127{
2128        SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
2129}
2130