linux/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel 10 Gigabit PCI Express Linux driver
   4  Copyright(c) 1999 - 2013 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25
  26*******************************************************************************/
  27
  28/* ethtool support for ixgbe */
  29
  30#include <linux/interrupt.h>
  31#include <linux/types.h>
  32#include <linux/module.h>
  33#include <linux/slab.h>
  34#include <linux/pci.h>
  35#include <linux/netdevice.h>
  36#include <linux/ethtool.h>
  37#include <linux/vmalloc.h>
  38#include <linux/highmem.h>
  39#include <linux/uaccess.h>
  40
  41#include "ixgbe.h"
  42#include "ixgbe_phy.h"
  43
  44
  45#define IXGBE_ALL_RAR_ENTRIES 16
  46
  47enum {NETDEV_STATS, IXGBE_STATS};
  48
  49struct ixgbe_stats {
  50        char stat_string[ETH_GSTRING_LEN];
  51        int type;
  52        int sizeof_stat;
  53        int stat_offset;
  54};
  55
  56#define IXGBE_STAT(m)           IXGBE_STATS, \
  57                                sizeof(((struct ixgbe_adapter *)0)->m), \
  58                                offsetof(struct ixgbe_adapter, m)
  59#define IXGBE_NETDEV_STAT(m)    NETDEV_STATS, \
  60                                sizeof(((struct rtnl_link_stats64 *)0)->m), \
  61                                offsetof(struct rtnl_link_stats64, m)
  62
  63static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
  64        {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
  65        {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
  66        {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
  67        {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
  68        {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
  69        {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
  70        {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
  71        {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
  72        {"lsc_int", IXGBE_STAT(lsc_int)},
  73        {"tx_busy", IXGBE_STAT(tx_busy)},
  74        {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
  75        {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
  76        {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
  77        {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
  78        {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
  79        {"multicast", IXGBE_NETDEV_STAT(multicast)},
  80        {"broadcast", IXGBE_STAT(stats.bprc)},
  81        {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
  82        {"collisions", IXGBE_NETDEV_STAT(collisions)},
  83        {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
  84        {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
  85        {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
  86        {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
  87        {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
  88        {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
  89        {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
  90        {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
  91        {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
  92        {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
  93        {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
  94        {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
  95        {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
  96        {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
  97        {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
  98        {"tx_restart_queue", IXGBE_STAT(restart_queue)},
  99        {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
 100        {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
 101        {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
 102        {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
 103        {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
 104        {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
 105        {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
 106        {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
 107        {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
 108        {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
 109        {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
 110        {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
 111        {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
 112        {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
 113#ifdef IXGBE_FCOE
 114        {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
 115        {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
 116        {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
 117        {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
 118        {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
 119        {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
 120        {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
 121        {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
 122#endif /* IXGBE_FCOE */
 123};
 124
 125/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
 126 * we set the num_rx_queues to evaluate to num_tx_queues. This is
 127 * used because we do not have a good way to get the max number of
 128 * rx queues with CONFIG_RPS disabled.
 129 */
 130#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
 131
 132#define IXGBE_QUEUE_STATS_LEN ( \
 133        (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
 134        (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
 135#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
 136#define IXGBE_PB_STATS_LEN ( \
 137                        (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
 138                         sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
 139                         sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
 140                         sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
 141                        / sizeof(u64))
 142#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
 143                         IXGBE_PB_STATS_LEN + \
 144                         IXGBE_QUEUE_STATS_LEN)
 145
 146static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
 147        "Register test  (offline)", "Eeprom test    (offline)",
 148        "Interrupt test (offline)", "Loopback test  (offline)",
 149        "Link test   (on/offline)"
 150};
 151#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
 152
 153static int ixgbe_get_settings(struct net_device *netdev,
 154                              struct ethtool_cmd *ecmd)
 155{
 156        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 157        struct ixgbe_hw *hw = &adapter->hw;
 158        ixgbe_link_speed supported_link;
 159        u32 link_speed = 0;
 160        bool autoneg = false;
 161        bool link_up;
 162
 163        hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
 164
 165        /* set the supported link speeds */
 166        if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
 167                ecmd->supported |= SUPPORTED_10000baseT_Full;
 168        if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
 169                ecmd->supported |= SUPPORTED_1000baseT_Full;
 170        if (supported_link & IXGBE_LINK_SPEED_100_FULL)
 171                ecmd->supported |= SUPPORTED_100baseT_Full;
 172
 173        /* set the advertised speeds */
 174        if (hw->phy.autoneg_advertised) {
 175                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
 176                        ecmd->advertising |= ADVERTISED_100baseT_Full;
 177                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
 178                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
 179                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
 180                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
 181        } else {
 182                /* default modes in case phy.autoneg_advertised isn't set */
 183                if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
 184                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
 185                if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
 186                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
 187                if (supported_link & IXGBE_LINK_SPEED_100_FULL)
 188                        ecmd->advertising |= ADVERTISED_100baseT_Full;
 189        }
 190
 191        if (autoneg) {
 192                ecmd->supported |= SUPPORTED_Autoneg;
 193                ecmd->advertising |= ADVERTISED_Autoneg;
 194                ecmd->autoneg = AUTONEG_ENABLE;
 195        } else
 196                ecmd->autoneg = AUTONEG_DISABLE;
 197
 198        ecmd->transceiver = XCVR_EXTERNAL;
 199
 200        /* Determine the remaining settings based on the PHY type. */
 201        switch (adapter->hw.phy.type) {
 202        case ixgbe_phy_tn:
 203        case ixgbe_phy_aq:
 204        case ixgbe_phy_cu_unknown:
 205                ecmd->supported |= SUPPORTED_TP;
 206                ecmd->advertising |= ADVERTISED_TP;
 207                ecmd->port = PORT_TP;
 208                break;
 209        case ixgbe_phy_qt:
 210                ecmd->supported |= SUPPORTED_FIBRE;
 211                ecmd->advertising |= ADVERTISED_FIBRE;
 212                ecmd->port = PORT_FIBRE;
 213                break;
 214        case ixgbe_phy_nl:
 215        case ixgbe_phy_sfp_passive_tyco:
 216        case ixgbe_phy_sfp_passive_unknown:
 217        case ixgbe_phy_sfp_ftl:
 218        case ixgbe_phy_sfp_avago:
 219        case ixgbe_phy_sfp_intel:
 220        case ixgbe_phy_sfp_unknown:
 221                /* SFP+ devices, further checking needed */
 222                switch (adapter->hw.phy.sfp_type) {
 223                case ixgbe_sfp_type_da_cu:
 224                case ixgbe_sfp_type_da_cu_core0:
 225                case ixgbe_sfp_type_da_cu_core1:
 226                        ecmd->supported |= SUPPORTED_FIBRE;
 227                        ecmd->advertising |= ADVERTISED_FIBRE;
 228                        ecmd->port = PORT_DA;
 229                        break;
 230                case ixgbe_sfp_type_sr:
 231                case ixgbe_sfp_type_lr:
 232                case ixgbe_sfp_type_srlr_core0:
 233                case ixgbe_sfp_type_srlr_core1:
 234                case ixgbe_sfp_type_1g_sx_core0:
 235                case ixgbe_sfp_type_1g_sx_core1:
 236                case ixgbe_sfp_type_1g_lx_core0:
 237                case ixgbe_sfp_type_1g_lx_core1:
 238                        ecmd->supported |= SUPPORTED_FIBRE;
 239                        ecmd->advertising |= ADVERTISED_FIBRE;
 240                        ecmd->port = PORT_FIBRE;
 241                        break;
 242                case ixgbe_sfp_type_not_present:
 243                        ecmd->supported |= SUPPORTED_FIBRE;
 244                        ecmd->advertising |= ADVERTISED_FIBRE;
 245                        ecmd->port = PORT_NONE;
 246                        break;
 247                case ixgbe_sfp_type_1g_cu_core0:
 248                case ixgbe_sfp_type_1g_cu_core1:
 249                        ecmd->supported |= SUPPORTED_TP;
 250                        ecmd->advertising |= ADVERTISED_TP;
 251                        ecmd->port = PORT_TP;
 252                        break;
 253                case ixgbe_sfp_type_unknown:
 254                default:
 255                        ecmd->supported |= SUPPORTED_FIBRE;
 256                        ecmd->advertising |= ADVERTISED_FIBRE;
 257                        ecmd->port = PORT_OTHER;
 258                        break;
 259                }
 260                break;
 261        case ixgbe_phy_xaui:
 262                ecmd->supported |= SUPPORTED_FIBRE;
 263                ecmd->advertising |= ADVERTISED_FIBRE;
 264                ecmd->port = PORT_NONE;
 265                break;
 266        case ixgbe_phy_unknown:
 267        case ixgbe_phy_generic:
 268        case ixgbe_phy_sfp_unsupported:
 269        default:
 270                ecmd->supported |= SUPPORTED_FIBRE;
 271                ecmd->advertising |= ADVERTISED_FIBRE;
 272                ecmd->port = PORT_OTHER;
 273                break;
 274        }
 275
 276        hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
 277        if (link_up) {
 278                switch (link_speed) {
 279                case IXGBE_LINK_SPEED_10GB_FULL:
 280                        ethtool_cmd_speed_set(ecmd, SPEED_10000);
 281                        break;
 282                case IXGBE_LINK_SPEED_1GB_FULL:
 283                        ethtool_cmd_speed_set(ecmd, SPEED_1000);
 284                        break;
 285                case IXGBE_LINK_SPEED_100_FULL:
 286                        ethtool_cmd_speed_set(ecmd, SPEED_100);
 287                        break;
 288                default:
 289                        break;
 290                }
 291                ecmd->duplex = DUPLEX_FULL;
 292        } else {
 293                ethtool_cmd_speed_set(ecmd, -1);
 294                ecmd->duplex = -1;
 295        }
 296
 297        return 0;
 298}
 299
 300static int ixgbe_set_settings(struct net_device *netdev,
 301                              struct ethtool_cmd *ecmd)
 302{
 303        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 304        struct ixgbe_hw *hw = &adapter->hw;
 305        u32 advertised, old;
 306        s32 err = 0;
 307
 308        if ((hw->phy.media_type == ixgbe_media_type_copper) ||
 309            (hw->phy.multispeed_fiber)) {
 310                /*
 311                 * this function does not support duplex forcing, but can
 312                 * limit the advertising of the adapter to the specified speed
 313                 */
 314                if (ecmd->autoneg == AUTONEG_DISABLE)
 315                        return -EINVAL;
 316
 317                if (ecmd->advertising & ~ecmd->supported)
 318                        return -EINVAL;
 319
 320                old = hw->phy.autoneg_advertised;
 321                advertised = 0;
 322                if (ecmd->advertising & ADVERTISED_10000baseT_Full)
 323                        advertised |= IXGBE_LINK_SPEED_10GB_FULL;
 324
 325                if (ecmd->advertising & ADVERTISED_1000baseT_Full)
 326                        advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 327
 328                if (ecmd->advertising & ADVERTISED_100baseT_Full)
 329                        advertised |= IXGBE_LINK_SPEED_100_FULL;
 330
 331                if (old == advertised)
 332                        return err;
 333                /* this sets the link speed and restarts auto-neg */
 334                hw->mac.autotry_restart = true;
 335                err = hw->mac.ops.setup_link(hw, advertised, true);
 336                if (err) {
 337                        e_info(probe, "setup link failed with code %d\n", err);
 338                        hw->mac.ops.setup_link(hw, old, true);
 339                }
 340        } else {
 341                /* in this case we currently only support 10Gb/FULL */
 342                u32 speed = ethtool_cmd_speed(ecmd);
 343                if ((ecmd->autoneg == AUTONEG_ENABLE) ||
 344                    (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
 345                    (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
 346                        return -EINVAL;
 347        }
 348
 349        return err;
 350}
 351
 352static void ixgbe_get_pauseparam(struct net_device *netdev,
 353                                 struct ethtool_pauseparam *pause)
 354{
 355        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 356        struct ixgbe_hw *hw = &adapter->hw;
 357
 358        if (hw->fc.disable_fc_autoneg)
 359                pause->autoneg = 0;
 360        else
 361                pause->autoneg = 1;
 362
 363        if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
 364                pause->rx_pause = 1;
 365        } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
 366                pause->tx_pause = 1;
 367        } else if (hw->fc.current_mode == ixgbe_fc_full) {
 368                pause->rx_pause = 1;
 369                pause->tx_pause = 1;
 370        }
 371}
 372
 373static int ixgbe_set_pauseparam(struct net_device *netdev,
 374                                struct ethtool_pauseparam *pause)
 375{
 376        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 377        struct ixgbe_hw *hw = &adapter->hw;
 378        struct ixgbe_fc_info fc = hw->fc;
 379
 380        /* 82598 does no support link flow control with DCB enabled */
 381        if ((hw->mac.type == ixgbe_mac_82598EB) &&
 382            (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
 383                return -EINVAL;
 384
 385        /* some devices do not support autoneg of link flow control */
 386        if ((pause->autoneg == AUTONEG_ENABLE) &&
 387            (ixgbe_device_supports_autoneg_fc(hw) != 0))
 388                return -EINVAL;
 389
 390        fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
 391
 392        if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
 393                fc.requested_mode = ixgbe_fc_full;
 394        else if (pause->rx_pause && !pause->tx_pause)
 395                fc.requested_mode = ixgbe_fc_rx_pause;
 396        else if (!pause->rx_pause && pause->tx_pause)
 397                fc.requested_mode = ixgbe_fc_tx_pause;
 398        else
 399                fc.requested_mode = ixgbe_fc_none;
 400
 401        /* if the thing changed then we'll update and use new autoneg */
 402        if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
 403                hw->fc = fc;
 404                if (netif_running(netdev))
 405                        ixgbe_reinit_locked(adapter);
 406                else
 407                        ixgbe_reset(adapter);
 408        }
 409
 410        return 0;
 411}
 412
 413static u32 ixgbe_get_msglevel(struct net_device *netdev)
 414{
 415        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 416        return adapter->msg_enable;
 417}
 418
 419static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
 420{
 421        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 422        adapter->msg_enable = data;
 423}
 424
 425static int ixgbe_get_regs_len(struct net_device *netdev)
 426{
 427#define IXGBE_REGS_LEN  1129
 428        return IXGBE_REGS_LEN * sizeof(u32);
 429}
 430
 431#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
 432
 433static void ixgbe_get_regs(struct net_device *netdev,
 434                           struct ethtool_regs *regs, void *p)
 435{
 436        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 437        struct ixgbe_hw *hw = &adapter->hw;
 438        u32 *regs_buff = p;
 439        u8 i;
 440
 441        memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
 442
 443        regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
 444                        hw->device_id;
 445
 446        /* General Registers */
 447        regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
 448        regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
 449        regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
 450        regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
 451        regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
 452        regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 453        regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
 454        regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
 455
 456        /* NVM Register */
 457        regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
 458        regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
 459        regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
 460        regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
 461        regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
 462        regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
 463        regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
 464        regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
 465        regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
 466        regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
 467
 468        /* Interrupt */
 469        /* don't read EICR because it can clear interrupt causes, instead
 470         * read EICS which is a shadow but doesn't clear EICR */
 471        regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
 472        regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
 473        regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
 474        regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
 475        regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
 476        regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
 477        regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
 478        regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
 479        regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
 480        regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
 481        regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
 482        regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
 483
 484        /* Flow Control */
 485        regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
 486        regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
 487        regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
 488        regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
 489        regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
 490        for (i = 0; i < 8; i++) {
 491                switch (hw->mac.type) {
 492                case ixgbe_mac_82598EB:
 493                        regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
 494                        regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
 495                        break;
 496                case ixgbe_mac_82599EB:
 497                case ixgbe_mac_X540:
 498                        regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
 499                        regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
 500                        break;
 501                default:
 502                        break;
 503                }
 504        }
 505        regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
 506        regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
 507
 508        /* Receive DMA */
 509        for (i = 0; i < 64; i++)
 510                regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
 511        for (i = 0; i < 64; i++)
 512                regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
 513        for (i = 0; i < 64; i++)
 514                regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
 515        for (i = 0; i < 64; i++)
 516                regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
 517        for (i = 0; i < 64; i++)
 518                regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
 519        for (i = 0; i < 64; i++)
 520                regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
 521        for (i = 0; i < 16; i++)
 522                regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
 523        for (i = 0; i < 16; i++)
 524                regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
 525        regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
 526        for (i = 0; i < 8; i++)
 527                regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 528        regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 529        regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
 530
 531        /* Receive */
 532        regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 533        regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
 534        for (i = 0; i < 16; i++)
 535                regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
 536        for (i = 0; i < 16; i++)
 537                regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
 538        regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
 539        regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 540        regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 541        regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
 542        regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
 543        regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
 544        for (i = 0; i < 8; i++)
 545                regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
 546        for (i = 0; i < 8; i++)
 547                regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
 548        regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
 549
 550        /* Transmit */
 551        for (i = 0; i < 32; i++)
 552                regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
 553        for (i = 0; i < 32; i++)
 554                regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
 555        for (i = 0; i < 32; i++)
 556                regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
 557        for (i = 0; i < 32; i++)
 558                regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
 559        for (i = 0; i < 32; i++)
 560                regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
 561        for (i = 0; i < 32; i++)
 562                regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
 563        for (i = 0; i < 32; i++)
 564                regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
 565        for (i = 0; i < 32; i++)
 566                regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
 567        regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
 568        for (i = 0; i < 16; i++)
 569                regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
 570        regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
 571        for (i = 0; i < 8; i++)
 572                regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
 573        regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
 574
 575        /* Wake Up */
 576        regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
 577        regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
 578        regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
 579        regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
 580        regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
 581        regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
 582        regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
 583        regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
 584        regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
 585
 586        /* DCB */
 587        regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
 588        regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
 589        regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
 590        regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
 591        for (i = 0; i < 8; i++)
 592                regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
 593        for (i = 0; i < 8; i++)
 594                regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
 595        for (i = 0; i < 8; i++)
 596                regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
 597        for (i = 0; i < 8; i++)
 598                regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
 599        for (i = 0; i < 8; i++)
 600                regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
 601        for (i = 0; i < 8; i++)
 602                regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
 603
 604        /* Statistics */
 605        regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
 606        regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
 607        regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
 608        regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
 609        for (i = 0; i < 8; i++)
 610                regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
 611        regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
 612        regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
 613        regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
 614        regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
 615        regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
 616        regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
 617        regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
 618        for (i = 0; i < 8; i++)
 619                regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
 620        for (i = 0; i < 8; i++)
 621                regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
 622        for (i = 0; i < 8; i++)
 623                regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
 624        for (i = 0; i < 8; i++)
 625                regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
 626        regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
 627        regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
 628        regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
 629        regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
 630        regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
 631        regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
 632        regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
 633        regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
 634        regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
 635        regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
 636        regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
 637        regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
 638        for (i = 0; i < 8; i++)
 639                regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
 640        regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
 641        regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
 642        regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
 643        regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
 644        regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
 645        regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
 646        regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
 647        regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
 648        regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
 649        regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
 650        regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
 651        regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
 652        regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
 653        regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
 654        regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
 655        regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
 656        regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
 657        regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
 658        regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
 659        for (i = 0; i < 16; i++)
 660                regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
 661        for (i = 0; i < 16; i++)
 662                regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
 663        for (i = 0; i < 16; i++)
 664                regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
 665        for (i = 0; i < 16; i++)
 666                regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
 667
 668        /* MAC */
 669        regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
 670        regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
 671        regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
 672        regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
 673        regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
 674        regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
 675        regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
 676        regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
 677        regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
 678        regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 679        regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
 680        regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
 681        regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
 682        regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
 683        regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
 684        regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
 685        regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
 686        regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
 687        regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
 688        regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
 689        regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
 690        regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
 691        regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
 692        regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
 693        regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
 694        regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
 695        regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 696        regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
 697        regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
 698        regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
 699        regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
 700        regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
 701        regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
 702
 703        /* Diagnostic */
 704        regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
 705        for (i = 0; i < 8; i++)
 706                regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
 707        regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
 708        for (i = 0; i < 4; i++)
 709                regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
 710        regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
 711        regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
 712        for (i = 0; i < 8; i++)
 713                regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
 714        regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
 715        for (i = 0; i < 4; i++)
 716                regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
 717        regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
 718        regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
 719        regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
 720        regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
 721        regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
 722        regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
 723        regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
 724        regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
 725        regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
 726        regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
 727        regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
 728        for (i = 0; i < 8; i++)
 729                regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
 730        regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
 731        regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
 732        regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
 733        regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
 734        regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
 735        regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
 736        regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
 737        regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
 738        regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
 739
 740        /* 82599 X540 specific registers  */
 741        regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
 742}
 743
 744static int ixgbe_get_eeprom_len(struct net_device *netdev)
 745{
 746        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 747        return adapter->hw.eeprom.word_size * 2;
 748}
 749
 750static int ixgbe_get_eeprom(struct net_device *netdev,
 751                            struct ethtool_eeprom *eeprom, u8 *bytes)
 752{
 753        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 754        struct ixgbe_hw *hw = &adapter->hw;
 755        u16 *eeprom_buff;
 756        int first_word, last_word, eeprom_len;
 757        int ret_val = 0;
 758        u16 i;
 759
 760        if (eeprom->len == 0)
 761                return -EINVAL;
 762
 763        eeprom->magic = hw->vendor_id | (hw->device_id << 16);
 764
 765        first_word = eeprom->offset >> 1;
 766        last_word = (eeprom->offset + eeprom->len - 1) >> 1;
 767        eeprom_len = last_word - first_word + 1;
 768
 769        eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
 770        if (!eeprom_buff)
 771                return -ENOMEM;
 772
 773        ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
 774                                             eeprom_buff);
 775
 776        /* Device's eeprom is always little-endian, word addressable */
 777        for (i = 0; i < eeprom_len; i++)
 778                le16_to_cpus(&eeprom_buff[i]);
 779
 780        memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
 781        kfree(eeprom_buff);
 782
 783        return ret_val;
 784}
 785
 786static int ixgbe_set_eeprom(struct net_device *netdev,
 787                            struct ethtool_eeprom *eeprom, u8 *bytes)
 788{
 789        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 790        struct ixgbe_hw *hw = &adapter->hw;
 791        u16 *eeprom_buff;
 792        void *ptr;
 793        int max_len, first_word, last_word, ret_val = 0;
 794        u16 i;
 795
 796        if (eeprom->len == 0)
 797                return -EINVAL;
 798
 799        if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
 800                return -EINVAL;
 801
 802        max_len = hw->eeprom.word_size * 2;
 803
 804        first_word = eeprom->offset >> 1;
 805        last_word = (eeprom->offset + eeprom->len - 1) >> 1;
 806        eeprom_buff = kmalloc(max_len, GFP_KERNEL);
 807        if (!eeprom_buff)
 808                return -ENOMEM;
 809
 810        ptr = eeprom_buff;
 811
 812        if (eeprom->offset & 1) {
 813                /*
 814                 * need read/modify/write of first changed EEPROM word
 815                 * only the second byte of the word is being modified
 816                 */
 817                ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
 818                if (ret_val)
 819                        goto err;
 820
 821                ptr++;
 822        }
 823        if ((eeprom->offset + eeprom->len) & 1) {
 824                /*
 825                 * need read/modify/write of last changed EEPROM word
 826                 * only the first byte of the word is being modified
 827                 */
 828                ret_val = hw->eeprom.ops.read(hw, last_word,
 829                                          &eeprom_buff[last_word - first_word]);
 830                if (ret_val)
 831                        goto err;
 832        }
 833
 834        /* Device's eeprom is always little-endian, word addressable */
 835        for (i = 0; i < last_word - first_word + 1; i++)
 836                le16_to_cpus(&eeprom_buff[i]);
 837
 838        memcpy(ptr, bytes, eeprom->len);
 839
 840        for (i = 0; i < last_word - first_word + 1; i++)
 841                cpu_to_le16s(&eeprom_buff[i]);
 842
 843        ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
 844                                              last_word - first_word + 1,
 845                                              eeprom_buff);
 846
 847        /* Update the checksum */
 848        if (ret_val == 0)
 849                hw->eeprom.ops.update_checksum(hw);
 850
 851err:
 852        kfree(eeprom_buff);
 853        return ret_val;
 854}
 855
 856static void ixgbe_get_drvinfo(struct net_device *netdev,
 857                              struct ethtool_drvinfo *drvinfo)
 858{
 859        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 860        u32 nvm_track_id;
 861
 862        strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
 863        strlcpy(drvinfo->version, ixgbe_driver_version,
 864                sizeof(drvinfo->version));
 865
 866        nvm_track_id = (adapter->eeprom_verh << 16) |
 867                        adapter->eeprom_verl;
 868        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
 869                 nvm_track_id);
 870
 871        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 872                sizeof(drvinfo->bus_info));
 873        drvinfo->n_stats = IXGBE_STATS_LEN;
 874        drvinfo->testinfo_len = IXGBE_TEST_LEN;
 875        drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
 876}
 877
 878static void ixgbe_get_ringparam(struct net_device *netdev,
 879                                struct ethtool_ringparam *ring)
 880{
 881        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 882        struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
 883        struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
 884
 885        ring->rx_max_pending = IXGBE_MAX_RXD;
 886        ring->tx_max_pending = IXGBE_MAX_TXD;
 887        ring->rx_pending = rx_ring->count;
 888        ring->tx_pending = tx_ring->count;
 889}
 890
 891static int ixgbe_set_ringparam(struct net_device *netdev,
 892                               struct ethtool_ringparam *ring)
 893{
 894        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 895        struct ixgbe_ring *temp_ring;
 896        int i, err = 0;
 897        u32 new_rx_count, new_tx_count;
 898
 899        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 900                return -EINVAL;
 901
 902        new_tx_count = clamp_t(u32, ring->tx_pending,
 903                               IXGBE_MIN_TXD, IXGBE_MAX_TXD);
 904        new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
 905
 906        new_rx_count = clamp_t(u32, ring->rx_pending,
 907                               IXGBE_MIN_RXD, IXGBE_MAX_RXD);
 908        new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
 909
 910        if ((new_tx_count == adapter->tx_ring_count) &&
 911            (new_rx_count == adapter->rx_ring_count)) {
 912                /* nothing to do */
 913                return 0;
 914        }
 915
 916        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 917                usleep_range(1000, 2000);
 918
 919        if (!netif_running(adapter->netdev)) {
 920                for (i = 0; i < adapter->num_tx_queues; i++)
 921                        adapter->tx_ring[i]->count = new_tx_count;
 922                for (i = 0; i < adapter->num_rx_queues; i++)
 923                        adapter->rx_ring[i]->count = new_rx_count;
 924                adapter->tx_ring_count = new_tx_count;
 925                adapter->rx_ring_count = new_rx_count;
 926                goto clear_reset;
 927        }
 928
 929        /* allocate temporary buffer to store rings in */
 930        i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
 931        temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
 932
 933        if (!temp_ring) {
 934                err = -ENOMEM;
 935                goto clear_reset;
 936        }
 937
 938        ixgbe_down(adapter);
 939
 940        /*
 941         * Setup new Tx resources and free the old Tx resources in that order.
 942         * We can then assign the new resources to the rings via a memcpy.
 943         * The advantage to this approach is that we are guaranteed to still
 944         * have resources even in the case of an allocation failure.
 945         */
 946        if (new_tx_count != adapter->tx_ring_count) {
 947                for (i = 0; i < adapter->num_tx_queues; i++) {
 948                        memcpy(&temp_ring[i], adapter->tx_ring[i],
 949                               sizeof(struct ixgbe_ring));
 950
 951                        temp_ring[i].count = new_tx_count;
 952                        err = ixgbe_setup_tx_resources(&temp_ring[i]);
 953                        if (err) {
 954                                while (i) {
 955                                        i--;
 956                                        ixgbe_free_tx_resources(&temp_ring[i]);
 957                                }
 958                                goto err_setup;
 959                        }
 960                }
 961
 962                for (i = 0; i < adapter->num_tx_queues; i++) {
 963                        ixgbe_free_tx_resources(adapter->tx_ring[i]);
 964
 965                        memcpy(adapter->tx_ring[i], &temp_ring[i],
 966                               sizeof(struct ixgbe_ring));
 967                }
 968
 969                adapter->tx_ring_count = new_tx_count;
 970        }
 971
 972        /* Repeat the process for the Rx rings if needed */
 973        if (new_rx_count != adapter->rx_ring_count) {
 974                for (i = 0; i < adapter->num_rx_queues; i++) {
 975                        memcpy(&temp_ring[i], adapter->rx_ring[i],
 976                               sizeof(struct ixgbe_ring));
 977
 978                        temp_ring[i].count = new_rx_count;
 979                        err = ixgbe_setup_rx_resources(&temp_ring[i]);
 980                        if (err) {
 981                                while (i) {
 982                                        i--;
 983                                        ixgbe_free_rx_resources(&temp_ring[i]);
 984                                }
 985                                goto err_setup;
 986                        }
 987
 988                }
 989
 990                for (i = 0; i < adapter->num_rx_queues; i++) {
 991                        ixgbe_free_rx_resources(adapter->rx_ring[i]);
 992
 993                        memcpy(adapter->rx_ring[i], &temp_ring[i],
 994                               sizeof(struct ixgbe_ring));
 995                }
 996
 997                adapter->rx_ring_count = new_rx_count;
 998        }
 999
1000err_setup:
1001        ixgbe_up(adapter);
1002        vfree(temp_ring);
1003clear_reset:
1004        clear_bit(__IXGBE_RESETTING, &adapter->state);
1005        return err;
1006}
1007
1008static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1009{
1010        switch (sset) {
1011        case ETH_SS_TEST:
1012                return IXGBE_TEST_LEN;
1013        case ETH_SS_STATS:
1014                return IXGBE_STATS_LEN;
1015        default:
1016                return -EOPNOTSUPP;
1017        }
1018}
1019
1020static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1021                                    struct ethtool_stats *stats, u64 *data)
1022{
1023        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1024        struct rtnl_link_stats64 temp;
1025        const struct rtnl_link_stats64 *net_stats;
1026        unsigned int start;
1027        struct ixgbe_ring *ring;
1028        int i, j;
1029        char *p = NULL;
1030
1031        ixgbe_update_stats(adapter);
1032        net_stats = dev_get_stats(netdev, &temp);
1033        for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1034                switch (ixgbe_gstrings_stats[i].type) {
1035                case NETDEV_STATS:
1036                        p = (char *) net_stats +
1037                                        ixgbe_gstrings_stats[i].stat_offset;
1038                        break;
1039                case IXGBE_STATS:
1040                        p = (char *) adapter +
1041                                        ixgbe_gstrings_stats[i].stat_offset;
1042                        break;
1043                default:
1044                        data[i] = 0;
1045                        continue;
1046                }
1047
1048                data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1049                           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1050        }
1051        for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1052                ring = adapter->tx_ring[j];
1053                if (!ring) {
1054                        data[i] = 0;
1055                        data[i+1] = 0;
1056                        i += 2;
1057                        continue;
1058                }
1059
1060                do {
1061                        start = u64_stats_fetch_begin_bh(&ring->syncp);
1062                        data[i]   = ring->stats.packets;
1063                        data[i+1] = ring->stats.bytes;
1064                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1065                i += 2;
1066        }
1067        for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1068                ring = adapter->rx_ring[j];
1069                if (!ring) {
1070                        data[i] = 0;
1071                        data[i+1] = 0;
1072                        i += 2;
1073                        continue;
1074                }
1075
1076                do {
1077                        start = u64_stats_fetch_begin_bh(&ring->syncp);
1078                        data[i]   = ring->stats.packets;
1079                        data[i+1] = ring->stats.bytes;
1080                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1081                i += 2;
1082        }
1083
1084        for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1085                data[i++] = adapter->stats.pxontxc[j];
1086                data[i++] = adapter->stats.pxofftxc[j];
1087        }
1088        for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1089                data[i++] = adapter->stats.pxonrxc[j];
1090                data[i++] = adapter->stats.pxoffrxc[j];
1091        }
1092}
1093
1094static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1095                              u8 *data)
1096{
1097        char *p = (char *)data;
1098        int i;
1099
1100        switch (stringset) {
1101        case ETH_SS_TEST:
1102                for (i = 0; i < IXGBE_TEST_LEN; i++) {
1103                        memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1104                        data += ETH_GSTRING_LEN;
1105                }
1106                break;
1107        case ETH_SS_STATS:
1108                for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1109                        memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1110                               ETH_GSTRING_LEN);
1111                        p += ETH_GSTRING_LEN;
1112                }
1113                for (i = 0; i < netdev->num_tx_queues; i++) {
1114                        sprintf(p, "tx_queue_%u_packets", i);
1115                        p += ETH_GSTRING_LEN;
1116                        sprintf(p, "tx_queue_%u_bytes", i);
1117                        p += ETH_GSTRING_LEN;
1118                }
1119                for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1120                        sprintf(p, "rx_queue_%u_packets", i);
1121                        p += ETH_GSTRING_LEN;
1122                        sprintf(p, "rx_queue_%u_bytes", i);
1123                        p += ETH_GSTRING_LEN;
1124                }
1125                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1126                        sprintf(p, "tx_pb_%u_pxon", i);
1127                        p += ETH_GSTRING_LEN;
1128                        sprintf(p, "tx_pb_%u_pxoff", i);
1129                        p += ETH_GSTRING_LEN;
1130                }
1131                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1132                        sprintf(p, "rx_pb_%u_pxon", i);
1133                        p += ETH_GSTRING_LEN;
1134                        sprintf(p, "rx_pb_%u_pxoff", i);
1135                        p += ETH_GSTRING_LEN;
1136                }
1137                /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1138                break;
1139        }
1140}
1141
1142static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1143{
1144        struct ixgbe_hw *hw = &adapter->hw;
1145        bool link_up;
1146        u32 link_speed = 0;
1147        *data = 0;
1148
1149        hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1150        if (link_up)
1151                return *data;
1152        else
1153                *data = 1;
1154        return *data;
1155}
1156
1157/* ethtool register test data */
1158struct ixgbe_reg_test {
1159        u16 reg;
1160        u8  array_len;
1161        u8  test_type;
1162        u32 mask;
1163        u32 write;
1164};
1165
1166/* In the hardware, registers are laid out either singly, in arrays
1167 * spaced 0x40 bytes apart, or in contiguous tables.  We assume
1168 * most tests take place on arrays or single registers (handled
1169 * as a single-element array) and special-case the tables.
1170 * Table tests are always pattern tests.
1171 *
1172 * We also make provision for some required setup steps by specifying
1173 * registers to be written without any read-back testing.
1174 */
1175
1176#define PATTERN_TEST    1
1177#define SET_READ_TEST   2
1178#define WRITE_NO_TEST   3
1179#define TABLE32_TEST    4
1180#define TABLE64_TEST_LO 5
1181#define TABLE64_TEST_HI 6
1182
1183/* default 82599 register test */
1184static const struct ixgbe_reg_test reg_test_82599[] = {
1185        { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1186        { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1187        { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1188        { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1189        { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1190        { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1191        { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1192        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1193        { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1194        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1195        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1196        { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1197        { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1198        { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1199        { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1200        { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1201        { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1202        { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1203        { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1204        { 0, 0, 0, 0 }
1205};
1206
1207/* default 82598 register test */
1208static const struct ixgbe_reg_test reg_test_82598[] = {
1209        { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1210        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1211        { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1212        { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1213        { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1214        { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1215        { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1216        /* Enable all four RX queues before testing. */
1217        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1218        /* RDH is read-only for 82598, only test RDT. */
1219        { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1220        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1221        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1222        { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1223        { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1224        { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1225        { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1226        { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1227        { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1228        { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1229        { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1230        { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1231        { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1232        { 0, 0, 0, 0 }
1233};
1234
1235static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1236                             u32 mask, u32 write)
1237{
1238        u32 pat, val, before;
1239        static const u32 test_pattern[] = {
1240                0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1241
1242        for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1243                before = readl(adapter->hw.hw_addr + reg);
1244                writel((test_pattern[pat] & write),
1245                       (adapter->hw.hw_addr + reg));
1246                val = readl(adapter->hw.hw_addr + reg);
1247                if (val != (test_pattern[pat] & write & mask)) {
1248                        e_err(drv, "pattern test reg %04X failed: got "
1249                              "0x%08X expected 0x%08X\n",
1250                              reg, val, (test_pattern[pat] & write & mask));
1251                        *data = reg;
1252                        writel(before, adapter->hw.hw_addr + reg);
1253                        return 1;
1254                }
1255                writel(before, adapter->hw.hw_addr + reg);
1256        }
1257        return 0;
1258}
1259
1260static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1261                              u32 mask, u32 write)
1262{
1263        u32 val, before;
1264        before = readl(adapter->hw.hw_addr + reg);
1265        writel((write & mask), (adapter->hw.hw_addr + reg));
1266        val = readl(adapter->hw.hw_addr + reg);
1267        if ((write & mask) != (val & mask)) {
1268                e_err(drv, "set/check reg %04X test failed: got 0x%08X "
1269                      "expected 0x%08X\n", reg, (val & mask), (write & mask));
1270                *data = reg;
1271                writel(before, (adapter->hw.hw_addr + reg));
1272                return 1;
1273        }
1274        writel(before, (adapter->hw.hw_addr + reg));
1275        return 0;
1276}
1277
1278#define REG_PATTERN_TEST(reg, mask, write)                                    \
1279        do {                                                                  \
1280                if (reg_pattern_test(adapter, data, reg, mask, write))        \
1281                        return 1;                                             \
1282        } while (0)                                                           \
1283
1284
1285#define REG_SET_AND_CHECK(reg, mask, write)                                   \
1286        do {                                                                  \
1287                if (reg_set_and_check(adapter, data, reg, mask, write))       \
1288                        return 1;                                             \
1289        } while (0)                                                           \
1290
1291static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1292{
1293        const struct ixgbe_reg_test *test;
1294        u32 value, before, after;
1295        u32 i, toggle;
1296
1297        switch (adapter->hw.mac.type) {
1298        case ixgbe_mac_82598EB:
1299                toggle = 0x7FFFF3FF;
1300                test = reg_test_82598;
1301                break;
1302        case ixgbe_mac_82599EB:
1303        case ixgbe_mac_X540:
1304                toggle = 0x7FFFF30F;
1305                test = reg_test_82599;
1306                break;
1307        default:
1308                *data = 1;
1309                return 1;
1310                break;
1311        }
1312
1313        /*
1314         * Because the status register is such a special case,
1315         * we handle it separately from the rest of the register
1316         * tests.  Some bits are read-only, some toggle, and some
1317         * are writeable on newer MACs.
1318         */
1319        before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1320        value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1321        IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1322        after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1323        if (value != after) {
1324                e_err(drv, "failed STATUS register test got: 0x%08X "
1325                      "expected: 0x%08X\n", after, value);
1326                *data = 1;
1327                return 1;
1328        }
1329        /* restore previous status */
1330        IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
1331
1332        /*
1333         * Perform the remainder of the register test, looping through
1334         * the test table until we either fail or reach the null entry.
1335         */
1336        while (test->reg) {
1337                for (i = 0; i < test->array_len; i++) {
1338                        switch (test->test_type) {
1339                        case PATTERN_TEST:
1340                                REG_PATTERN_TEST(test->reg + (i * 0x40),
1341                                                 test->mask,
1342                                                 test->write);
1343                                break;
1344                        case SET_READ_TEST:
1345                                REG_SET_AND_CHECK(test->reg + (i * 0x40),
1346                                                  test->mask,
1347                                                  test->write);
1348                                break;
1349                        case WRITE_NO_TEST:
1350                                writel(test->write,
1351                                       (adapter->hw.hw_addr + test->reg)
1352                                       + (i * 0x40));
1353                                break;
1354                        case TABLE32_TEST:
1355                                REG_PATTERN_TEST(test->reg + (i * 4),
1356                                                 test->mask,
1357                                                 test->write);
1358                                break;
1359                        case TABLE64_TEST_LO:
1360                                REG_PATTERN_TEST(test->reg + (i * 8),
1361                                                 test->mask,
1362                                                 test->write);
1363                                break;
1364                        case TABLE64_TEST_HI:
1365                                REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1366                                                 test->mask,
1367                                                 test->write);
1368                                break;
1369                        }
1370                }
1371                test++;
1372        }
1373
1374        *data = 0;
1375        return 0;
1376}
1377
1378static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1379{
1380        struct ixgbe_hw *hw = &adapter->hw;
1381        if (hw->eeprom.ops.validate_checksum(hw, NULL))
1382                *data = 1;
1383        else
1384                *data = 0;
1385        return *data;
1386}
1387
1388static irqreturn_t ixgbe_test_intr(int irq, void *data)
1389{
1390        struct net_device *netdev = (struct net_device *) data;
1391        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1392
1393        adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1394
1395        return IRQ_HANDLED;
1396}
1397
1398static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1399{
1400        struct net_device *netdev = adapter->netdev;
1401        u32 mask, i = 0, shared_int = true;
1402        u32 irq = adapter->pdev->irq;
1403
1404        *data = 0;
1405
1406        /* Hook up test interrupt handler just for this test */
1407        if (adapter->msix_entries) {
1408                /* NOTE: we don't test MSI-X interrupts here, yet */
1409                return 0;
1410        } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1411                shared_int = false;
1412                if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1413                                netdev)) {
1414                        *data = 1;
1415                        return -1;
1416                }
1417        } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1418                                netdev->name, netdev)) {
1419                shared_int = false;
1420        } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1421                               netdev->name, netdev)) {
1422                *data = 1;
1423                return -1;
1424        }
1425        e_info(hw, "testing %s interrupt\n", shared_int ?
1426               "shared" : "unshared");
1427
1428        /* Disable all the interrupts */
1429        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1430        IXGBE_WRITE_FLUSH(&adapter->hw);
1431        usleep_range(10000, 20000);
1432
1433        /* Test each interrupt */
1434        for (; i < 10; i++) {
1435                /* Interrupt to test */
1436                mask = 1 << i;
1437
1438                if (!shared_int) {
1439                        /*
1440                         * Disable the interrupts to be reported in
1441                         * the cause register and then force the same
1442                         * interrupt and see if one gets posted.  If
1443                         * an interrupt was posted to the bus, the
1444                         * test failed.
1445                         */
1446                        adapter->test_icr = 0;
1447                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1448                                        ~mask & 0x00007FFF);
1449                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1450                                        ~mask & 0x00007FFF);
1451                        IXGBE_WRITE_FLUSH(&adapter->hw);
1452                        usleep_range(10000, 20000);
1453
1454                        if (adapter->test_icr & mask) {
1455                                *data = 3;
1456                                break;
1457                        }
1458                }
1459
1460                /*
1461                 * Enable the interrupt to be reported in the cause
1462                 * register and then force the same interrupt and see
1463                 * if one gets posted.  If an interrupt was not posted
1464                 * to the bus, the test failed.
1465                 */
1466                adapter->test_icr = 0;
1467                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1468                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1469                IXGBE_WRITE_FLUSH(&adapter->hw);
1470                usleep_range(10000, 20000);
1471
1472                if (!(adapter->test_icr &mask)) {
1473                        *data = 4;
1474                        break;
1475                }
1476
1477                if (!shared_int) {
1478                        /*
1479                         * Disable the other interrupts to be reported in
1480                         * the cause register and then force the other
1481                         * interrupts and see if any get posted.  If
1482                         * an interrupt was posted to the bus, the
1483                         * test failed.
1484                         */
1485                        adapter->test_icr = 0;
1486                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1487                                        ~mask & 0x00007FFF);
1488                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1489                                        ~mask & 0x00007FFF);
1490                        IXGBE_WRITE_FLUSH(&adapter->hw);
1491                        usleep_range(10000, 20000);
1492
1493                        if (adapter->test_icr) {
1494                                *data = 5;
1495                                break;
1496                        }
1497                }
1498        }
1499
1500        /* Disable all the interrupts */
1501        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1502        IXGBE_WRITE_FLUSH(&adapter->hw);
1503        usleep_range(10000, 20000);
1504
1505        /* Unhook test interrupt handler */
1506        free_irq(irq, netdev);
1507
1508        return *data;
1509}
1510
1511static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1512{
1513        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1514        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1515        struct ixgbe_hw *hw = &adapter->hw;
1516        u32 reg_ctl;
1517
1518        /* shut down the DMA engines now so they can be reinitialized later */
1519
1520        /* first Rx */
1521        reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1522        reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1523        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1524        ixgbe_disable_rx_queue(adapter, rx_ring);
1525
1526        /* now Tx */
1527        reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1528        reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1529        IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1530
1531        switch (hw->mac.type) {
1532        case ixgbe_mac_82599EB:
1533        case ixgbe_mac_X540:
1534                reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1535                reg_ctl &= ~IXGBE_DMATXCTL_TE;
1536                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1537                break;
1538        default:
1539                break;
1540        }
1541
1542        ixgbe_reset(adapter);
1543
1544        ixgbe_free_tx_resources(&adapter->test_tx_ring);
1545        ixgbe_free_rx_resources(&adapter->test_rx_ring);
1546}
1547
1548static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1549{
1550        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1551        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1552        u32 rctl, reg_data;
1553        int ret_val;
1554        int err;
1555
1556        /* Setup Tx descriptor ring and Tx buffers */
1557        tx_ring->count = IXGBE_DEFAULT_TXD;
1558        tx_ring->queue_index = 0;
1559        tx_ring->dev = &adapter->pdev->dev;
1560        tx_ring->netdev = adapter->netdev;
1561        tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1562
1563        err = ixgbe_setup_tx_resources(tx_ring);
1564        if (err)
1565                return 1;
1566
1567        switch (adapter->hw.mac.type) {
1568        case ixgbe_mac_82599EB:
1569        case ixgbe_mac_X540:
1570                reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1571                reg_data |= IXGBE_DMATXCTL_TE;
1572                IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1573                break;
1574        default:
1575                break;
1576        }
1577
1578        ixgbe_configure_tx_ring(adapter, tx_ring);
1579
1580        /* Setup Rx Descriptor ring and Rx buffers */
1581        rx_ring->count = IXGBE_DEFAULT_RXD;
1582        rx_ring->queue_index = 0;
1583        rx_ring->dev = &adapter->pdev->dev;
1584        rx_ring->netdev = adapter->netdev;
1585        rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1586
1587        err = ixgbe_setup_rx_resources(rx_ring);
1588        if (err) {
1589                ret_val = 4;
1590                goto err_nomem;
1591        }
1592
1593        rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1594        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1595
1596        ixgbe_configure_rx_ring(adapter, rx_ring);
1597
1598        rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1599        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1600
1601        return 0;
1602
1603err_nomem:
1604        ixgbe_free_desc_rings(adapter);
1605        return ret_val;
1606}
1607
1608static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1609{
1610        struct ixgbe_hw *hw = &adapter->hw;
1611        u32 reg_data;
1612
1613
1614        /* Setup MAC loopback */
1615        reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1616        reg_data |= IXGBE_HLREG0_LPBK;
1617        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1618
1619        reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1620        reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1621        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1622
1623        /* X540 needs to set the MACC.FLU bit to force link up */
1624        if (adapter->hw.mac.type == ixgbe_mac_X540) {
1625                reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1626                reg_data |= IXGBE_MACC_FLU;
1627                IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1628        } else {
1629                if (hw->mac.orig_autoc) {
1630                        reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1631                        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1632                } else {
1633                        return 10;
1634                }
1635        }
1636        IXGBE_WRITE_FLUSH(hw);
1637        usleep_range(10000, 20000);
1638
1639        /* Disable Atlas Tx lanes; re-enabled in reset path */
1640        if (hw->mac.type == ixgbe_mac_82598EB) {
1641                u8 atlas;
1642
1643                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1644                atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1645                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1646
1647                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1648                atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1649                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1650
1651                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1652                atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1653                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1654
1655                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1656                atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1657                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1658        }
1659
1660        return 0;
1661}
1662
1663static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1664{
1665        u32 reg_data;
1666
1667        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1668        reg_data &= ~IXGBE_HLREG0_LPBK;
1669        IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1670}
1671
1672static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1673                                      unsigned int frame_size)
1674{
1675        memset(skb->data, 0xFF, frame_size);
1676        frame_size >>= 1;
1677        memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1678        memset(&skb->data[frame_size + 10], 0xBE, 1);
1679        memset(&skb->data[frame_size + 12], 0xAF, 1);
1680}
1681
1682static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1683                                     unsigned int frame_size)
1684{
1685        unsigned char *data;
1686        bool match = true;
1687
1688        frame_size >>= 1;
1689
1690        data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1691
1692        if (data[3] != 0xFF ||
1693            data[frame_size + 10] != 0xBE ||
1694            data[frame_size + 12] != 0xAF)
1695                match = false;
1696
1697        kunmap(rx_buffer->page);
1698
1699        return match;
1700}
1701
1702static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1703                                  struct ixgbe_ring *tx_ring,
1704                                  unsigned int size)
1705{
1706        union ixgbe_adv_rx_desc *rx_desc;
1707        struct ixgbe_rx_buffer *rx_buffer;
1708        struct ixgbe_tx_buffer *tx_buffer;
1709        u16 rx_ntc, tx_ntc, count = 0;
1710
1711        /* initialize next to clean and descriptor values */
1712        rx_ntc = rx_ring->next_to_clean;
1713        tx_ntc = tx_ring->next_to_clean;
1714        rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1715
1716        while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
1717                /* check Rx buffer */
1718                rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1719
1720                /* sync Rx buffer for CPU read */
1721                dma_sync_single_for_cpu(rx_ring->dev,
1722                                        rx_buffer->dma,
1723                                        ixgbe_rx_bufsz(rx_ring),
1724                                        DMA_FROM_DEVICE);
1725
1726                /* verify contents of skb */
1727                if (ixgbe_check_lbtest_frame(rx_buffer, size))
1728                        count++;
1729
1730                /* sync Rx buffer for device write */
1731                dma_sync_single_for_device(rx_ring->dev,
1732                                           rx_buffer->dma,
1733                                           ixgbe_rx_bufsz(rx_ring),
1734                                           DMA_FROM_DEVICE);
1735
1736                /* unmap buffer on Tx side */
1737                tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1738                ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1739
1740                /* increment Rx/Tx next to clean counters */
1741                rx_ntc++;
1742                if (rx_ntc == rx_ring->count)
1743                        rx_ntc = 0;
1744                tx_ntc++;
1745                if (tx_ntc == tx_ring->count)
1746                        tx_ntc = 0;
1747
1748                /* fetch next descriptor */
1749                rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1750        }
1751
1752        netdev_tx_reset_queue(txring_txq(tx_ring));
1753
1754        /* re-map buffers to ring, store next to clean values */
1755        ixgbe_alloc_rx_buffers(rx_ring, count);
1756        rx_ring->next_to_clean = rx_ntc;
1757        tx_ring->next_to_clean = tx_ntc;
1758
1759        return count;
1760}
1761
1762static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1763{
1764        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1765        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1766        int i, j, lc, good_cnt, ret_val = 0;
1767        unsigned int size = 1024;
1768        netdev_tx_t tx_ret_val;
1769        struct sk_buff *skb;
1770
1771        /* allocate test skb */
1772        skb = alloc_skb(size, GFP_KERNEL);
1773        if (!skb)
1774                return 11;
1775
1776        /* place data into test skb */
1777        ixgbe_create_lbtest_frame(skb, size);
1778        skb_put(skb, size);
1779
1780        /*
1781         * Calculate the loop count based on the largest descriptor ring
1782         * The idea is to wrap the largest ring a number of times using 64
1783         * send/receive pairs during each loop
1784         */
1785
1786        if (rx_ring->count <= tx_ring->count)
1787                lc = ((tx_ring->count / 64) * 2) + 1;
1788        else
1789                lc = ((rx_ring->count / 64) * 2) + 1;
1790
1791        for (j = 0; j <= lc; j++) {
1792                /* reset count of good packets */
1793                good_cnt = 0;
1794
1795                /* place 64 packets on the transmit queue*/
1796                for (i = 0; i < 64; i++) {
1797                        skb_get(skb);
1798                        tx_ret_val = ixgbe_xmit_frame_ring(skb,
1799                                                           adapter,
1800                                                           tx_ring);
1801                        if (tx_ret_val == NETDEV_TX_OK)
1802                                good_cnt++;
1803                }
1804
1805                if (good_cnt != 64) {
1806                        ret_val = 12;
1807                        break;
1808                }
1809
1810                /* allow 200 milliseconds for packets to go from Tx to Rx */
1811                msleep(200);
1812
1813                good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1814                if (good_cnt != 64) {
1815                        ret_val = 13;
1816                        break;
1817                }
1818        }
1819
1820        /* free the original skb */
1821        kfree_skb(skb);
1822
1823        return ret_val;
1824}
1825
1826static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1827{
1828        *data = ixgbe_setup_desc_rings(adapter);
1829        if (*data)
1830                goto out;
1831        *data = ixgbe_setup_loopback_test(adapter);
1832        if (*data)
1833                goto err_loopback;
1834        *data = ixgbe_run_loopback_test(adapter);
1835        ixgbe_loopback_cleanup(adapter);
1836
1837err_loopback:
1838        ixgbe_free_desc_rings(adapter);
1839out:
1840        return *data;
1841}
1842
1843static void ixgbe_diag_test(struct net_device *netdev,
1844                            struct ethtool_test *eth_test, u64 *data)
1845{
1846        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1847        struct ixgbe_hw *hw = &adapter->hw;
1848        bool if_running = netif_running(netdev);
1849
1850        set_bit(__IXGBE_TESTING, &adapter->state);
1851        if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1852                if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1853                        int i;
1854                        for (i = 0; i < adapter->num_vfs; i++) {
1855                                if (adapter->vfinfo[i].clear_to_send) {
1856                                        netdev_warn(netdev, "%s",
1857                                                    "offline diagnostic is not "
1858                                                    "supported when VFs are "
1859                                                    "present\n");
1860                                        data[0] = 1;
1861                                        data[1] = 1;
1862                                        data[2] = 1;
1863                                        data[3] = 1;
1864                                        eth_test->flags |= ETH_TEST_FL_FAILED;
1865                                        clear_bit(__IXGBE_TESTING,
1866                                                  &adapter->state);
1867                                        goto skip_ol_tests;
1868                                }
1869                        }
1870                }
1871
1872                /* Offline tests */
1873                e_info(hw, "offline testing starting\n");
1874
1875                if (if_running)
1876                        /* indicate we're in test mode */
1877                        dev_close(netdev);
1878
1879                /* bringing adapter down disables SFP+ optics */
1880                if (hw->mac.ops.enable_tx_laser)
1881                        hw->mac.ops.enable_tx_laser(hw);
1882
1883                /* Link test performed before hardware reset so autoneg doesn't
1884                 * interfere with test result
1885                 */
1886                if (ixgbe_link_test(adapter, &data[4]))
1887                        eth_test->flags |= ETH_TEST_FL_FAILED;
1888
1889                ixgbe_reset(adapter);
1890                e_info(hw, "register testing starting\n");
1891                if (ixgbe_reg_test(adapter, &data[0]))
1892                        eth_test->flags |= ETH_TEST_FL_FAILED;
1893
1894                ixgbe_reset(adapter);
1895                e_info(hw, "eeprom testing starting\n");
1896                if (ixgbe_eeprom_test(adapter, &data[1]))
1897                        eth_test->flags |= ETH_TEST_FL_FAILED;
1898
1899                ixgbe_reset(adapter);
1900                e_info(hw, "interrupt testing starting\n");
1901                if (ixgbe_intr_test(adapter, &data[2]))
1902                        eth_test->flags |= ETH_TEST_FL_FAILED;
1903
1904                /* If SRIOV or VMDq is enabled then skip MAC
1905                 * loopback diagnostic. */
1906                if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1907                                      IXGBE_FLAG_VMDQ_ENABLED)) {
1908                        e_info(hw, "Skip MAC loopback diagnostic in VT "
1909                               "mode\n");
1910                        data[3] = 0;
1911                        goto skip_loopback;
1912                }
1913
1914                ixgbe_reset(adapter);
1915                e_info(hw, "loopback testing starting\n");
1916                if (ixgbe_loopback_test(adapter, &data[3]))
1917                        eth_test->flags |= ETH_TEST_FL_FAILED;
1918
1919skip_loopback:
1920                ixgbe_reset(adapter);
1921
1922                /* clear testing bit and return adapter to previous state */
1923                clear_bit(__IXGBE_TESTING, &adapter->state);
1924                if (if_running)
1925                        dev_open(netdev);
1926        } else {
1927                e_info(hw, "online testing starting\n");
1928
1929                /* if adapter is down, SFP+ optics will be disabled */
1930                if (!if_running && hw->mac.ops.enable_tx_laser)
1931                        hw->mac.ops.enable_tx_laser(hw);
1932
1933                /* Online tests */
1934                if (ixgbe_link_test(adapter, &data[4]))
1935                        eth_test->flags |= ETH_TEST_FL_FAILED;
1936
1937                /* Offline tests aren't run; pass by default */
1938                data[0] = 0;
1939                data[1] = 0;
1940                data[2] = 0;
1941                data[3] = 0;
1942
1943                clear_bit(__IXGBE_TESTING, &adapter->state);
1944        }
1945
1946        /* if adapter was down, ensure SFP+ optics are disabled again */
1947        if (!if_running && hw->mac.ops.disable_tx_laser)
1948                hw->mac.ops.disable_tx_laser(hw);
1949skip_ol_tests:
1950        msleep_interruptible(4 * 1000);
1951}
1952
1953static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1954                               struct ethtool_wolinfo *wol)
1955{
1956        struct ixgbe_hw *hw = &adapter->hw;
1957        int retval = 0;
1958
1959        /* WOL not supported for all devices */
1960        if (!ixgbe_wol_supported(adapter, hw->device_id,
1961                                 hw->subsystem_device_id)) {
1962                retval = 1;
1963                wol->supported = 0;
1964        }
1965
1966        return retval;
1967}
1968
1969static void ixgbe_get_wol(struct net_device *netdev,
1970                          struct ethtool_wolinfo *wol)
1971{
1972        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1973
1974        wol->supported = WAKE_UCAST | WAKE_MCAST |
1975                         WAKE_BCAST | WAKE_MAGIC;
1976        wol->wolopts = 0;
1977
1978        if (ixgbe_wol_exclusion(adapter, wol) ||
1979            !device_can_wakeup(&adapter->pdev->dev))
1980                return;
1981
1982        if (adapter->wol & IXGBE_WUFC_EX)
1983                wol->wolopts |= WAKE_UCAST;
1984        if (adapter->wol & IXGBE_WUFC_MC)
1985                wol->wolopts |= WAKE_MCAST;
1986        if (adapter->wol & IXGBE_WUFC_BC)
1987                wol->wolopts |= WAKE_BCAST;
1988        if (adapter->wol & IXGBE_WUFC_MAG)
1989                wol->wolopts |= WAKE_MAGIC;
1990}
1991
1992static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1993{
1994        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1995
1996        if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1997                return -EOPNOTSUPP;
1998
1999        if (ixgbe_wol_exclusion(adapter, wol))
2000                return wol->wolopts ? -EOPNOTSUPP : 0;
2001
2002        adapter->wol = 0;
2003
2004        if (wol->wolopts & WAKE_UCAST)
2005                adapter->wol |= IXGBE_WUFC_EX;
2006        if (wol->wolopts & WAKE_MCAST)
2007                adapter->wol |= IXGBE_WUFC_MC;
2008        if (wol->wolopts & WAKE_BCAST)
2009                adapter->wol |= IXGBE_WUFC_BC;
2010        if (wol->wolopts & WAKE_MAGIC)
2011                adapter->wol |= IXGBE_WUFC_MAG;
2012
2013        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2014
2015        return 0;
2016}
2017
2018static int ixgbe_nway_reset(struct net_device *netdev)
2019{
2020        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2021
2022        if (netif_running(netdev))
2023                ixgbe_reinit_locked(adapter);
2024
2025        return 0;
2026}
2027
2028static int ixgbe_set_phys_id(struct net_device *netdev,
2029                             enum ethtool_phys_id_state state)
2030{
2031        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2032        struct ixgbe_hw *hw = &adapter->hw;
2033
2034        switch (state) {
2035        case ETHTOOL_ID_ACTIVE:
2036                adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2037                return 2;
2038
2039        case ETHTOOL_ID_ON:
2040                hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2041                break;
2042
2043        case ETHTOOL_ID_OFF:
2044                hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2045                break;
2046
2047        case ETHTOOL_ID_INACTIVE:
2048                /* Restore LED settings */
2049                IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2050                break;
2051        }
2052
2053        return 0;
2054}
2055
2056static int ixgbe_get_coalesce(struct net_device *netdev,
2057                              struct ethtool_coalesce *ec)
2058{
2059        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2060
2061        /* only valid if in constant ITR mode */
2062        if (adapter->rx_itr_setting <= 1)
2063                ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2064        else
2065                ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2066
2067        /* if in mixed tx/rx queues per vector mode, report only rx settings */
2068        if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2069                return 0;
2070
2071        /* only valid if in constant ITR mode */
2072        if (adapter->tx_itr_setting <= 1)
2073                ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2074        else
2075                ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2076
2077        return 0;
2078}
2079
2080/*
2081 * this function must be called before setting the new value of
2082 * rx_itr_setting
2083 */
2084static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2085{
2086        struct net_device *netdev = adapter->netdev;
2087
2088        /* nothing to do if LRO or RSC are not enabled */
2089        if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2090            !(netdev->features & NETIF_F_LRO))
2091                return false;
2092
2093        /* check the feature flag value and enable RSC if necessary */
2094        if (adapter->rx_itr_setting == 1 ||
2095            adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2096                if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2097                        adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2098                        e_info(probe, "rx-usecs value high enough "
2099                                      "to re-enable RSC\n");
2100                        return true;
2101                }
2102        /* if interrupt rate is too high then disable RSC */
2103        } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2104                adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2105                e_info(probe, "rx-usecs set too low, disabling RSC\n");
2106                return true;
2107        }
2108        return false;
2109}
2110
2111static int ixgbe_set_coalesce(struct net_device *netdev,
2112                              struct ethtool_coalesce *ec)
2113{
2114        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2115        struct ixgbe_q_vector *q_vector;
2116        int i;
2117        u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2118        bool need_reset = false;
2119
2120        if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2121                /* reject Tx specific changes in case of mixed RxTx vectors */
2122                if (ec->tx_coalesce_usecs)
2123                        return -EINVAL;
2124                tx_itr_prev = adapter->rx_itr_setting;
2125        } else {
2126                tx_itr_prev = adapter->tx_itr_setting;
2127        }
2128
2129        if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2130            (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2131                return -EINVAL;
2132
2133        if (ec->rx_coalesce_usecs > 1)
2134                adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2135        else
2136                adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2137
2138        if (adapter->rx_itr_setting == 1)
2139                rx_itr_param = IXGBE_20K_ITR;
2140        else
2141                rx_itr_param = adapter->rx_itr_setting;
2142
2143        if (ec->tx_coalesce_usecs > 1)
2144                adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2145        else
2146                adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2147
2148        if (adapter->tx_itr_setting == 1)
2149                tx_itr_param = IXGBE_10K_ITR;
2150        else
2151                tx_itr_param = adapter->tx_itr_setting;
2152
2153        /* mixed Rx/Tx */
2154        if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2155                adapter->tx_itr_setting = adapter->rx_itr_setting;
2156
2157#if IS_ENABLED(CONFIG_BQL)
2158        /* detect ITR changes that require update of TXDCTL.WTHRESH */
2159        if ((adapter->tx_itr_setting > 1) &&
2160            (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2161                if ((tx_itr_prev == 1) ||
2162                    (tx_itr_prev > IXGBE_100K_ITR))
2163                        need_reset = true;
2164        } else {
2165                if ((tx_itr_prev > 1) &&
2166                    (tx_itr_prev < IXGBE_100K_ITR))
2167                        need_reset = true;
2168        }
2169#endif
2170        /* check the old value and enable RSC if necessary */
2171        need_reset |= ixgbe_update_rsc(adapter);
2172
2173        for (i = 0; i < adapter->num_q_vectors; i++) {
2174                q_vector = adapter->q_vector[i];
2175                if (q_vector->tx.count && !q_vector->rx.count)
2176                        /* tx only */
2177                        q_vector->itr = tx_itr_param;
2178                else
2179                        /* rx only or mixed */
2180                        q_vector->itr = rx_itr_param;
2181                ixgbe_write_eitr(q_vector);
2182        }
2183
2184        /*
2185         * do reset here at the end to make sure EITR==0 case is handled
2186         * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2187         * also locks in RSC enable/disable which requires reset
2188         */
2189        if (need_reset)
2190                ixgbe_do_reset(netdev);
2191
2192        return 0;
2193}
2194
2195static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2196                                        struct ethtool_rxnfc *cmd)
2197{
2198        union ixgbe_atr_input *mask = &adapter->fdir_mask;
2199        struct ethtool_rx_flow_spec *fsp =
2200                (struct ethtool_rx_flow_spec *)&cmd->fs;
2201        struct hlist_node *node2;
2202        struct ixgbe_fdir_filter *rule = NULL;
2203
2204        /* report total rule count */
2205        cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2206
2207        hlist_for_each_entry_safe(rule, node2,
2208                                  &adapter->fdir_filter_list, fdir_node) {
2209                if (fsp->location <= rule->sw_idx)
2210                        break;
2211        }
2212
2213        if (!rule || fsp->location != rule->sw_idx)
2214                return -EINVAL;
2215
2216        /* fill out the flow spec entry */
2217
2218        /* set flow type field */
2219        switch (rule->filter.formatted.flow_type) {
2220        case IXGBE_ATR_FLOW_TYPE_TCPV4:
2221                fsp->flow_type = TCP_V4_FLOW;
2222                break;
2223        case IXGBE_ATR_FLOW_TYPE_UDPV4:
2224                fsp->flow_type = UDP_V4_FLOW;
2225                break;
2226        case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2227                fsp->flow_type = SCTP_V4_FLOW;
2228                break;
2229        case IXGBE_ATR_FLOW_TYPE_IPV4:
2230                fsp->flow_type = IP_USER_FLOW;
2231                fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2232                fsp->h_u.usr_ip4_spec.proto = 0;
2233                fsp->m_u.usr_ip4_spec.proto = 0;
2234                break;
2235        default:
2236                return -EINVAL;
2237        }
2238
2239        fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2240        fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2241        fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2242        fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2243        fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2244        fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2245        fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2246        fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2247        fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2248        fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2249        fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2250        fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2251        fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2252        fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2253        fsp->flow_type |= FLOW_EXT;
2254
2255        /* record action */
2256        if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2257                fsp->ring_cookie = RX_CLS_FLOW_DISC;
2258        else
2259                fsp->ring_cookie = rule->action;
2260
2261        return 0;
2262}
2263
2264static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2265                                      struct ethtool_rxnfc *cmd,
2266                                      u32 *rule_locs)
2267{
2268        struct hlist_node *node2;
2269        struct ixgbe_fdir_filter *rule;
2270        int cnt = 0;
2271
2272        /* report total rule count */
2273        cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2274
2275        hlist_for_each_entry_safe(rule, node2,
2276                                  &adapter->fdir_filter_list, fdir_node) {
2277                if (cnt == cmd->rule_cnt)
2278                        return -EMSGSIZE;
2279                rule_locs[cnt] = rule->sw_idx;
2280                cnt++;
2281        }
2282
2283        cmd->rule_cnt = cnt;
2284
2285        return 0;
2286}
2287
2288static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2289                                   struct ethtool_rxnfc *cmd)
2290{
2291        cmd->data = 0;
2292
2293        /* Report default options for RSS on ixgbe */
2294        switch (cmd->flow_type) {
2295        case TCP_V4_FLOW:
2296                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2297        case UDP_V4_FLOW:
2298                if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2299                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2300        case SCTP_V4_FLOW:
2301        case AH_ESP_V4_FLOW:
2302        case AH_V4_FLOW:
2303        case ESP_V4_FLOW:
2304        case IPV4_FLOW:
2305                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2306                break;
2307        case TCP_V6_FLOW:
2308                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2309        case UDP_V6_FLOW:
2310                if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2311                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2312        case SCTP_V6_FLOW:
2313        case AH_ESP_V6_FLOW:
2314        case AH_V6_FLOW:
2315        case ESP_V6_FLOW:
2316        case IPV6_FLOW:
2317                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2318                break;
2319        default:
2320                return -EINVAL;
2321        }
2322
2323        return 0;
2324}
2325
2326static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2327                           u32 *rule_locs)
2328{
2329        struct ixgbe_adapter *adapter = netdev_priv(dev);
2330        int ret = -EOPNOTSUPP;
2331
2332        switch (cmd->cmd) {
2333        case ETHTOOL_GRXRINGS:
2334                cmd->data = adapter->num_rx_queues;
2335                ret = 0;
2336                break;
2337        case ETHTOOL_GRXCLSRLCNT:
2338                cmd->rule_cnt = adapter->fdir_filter_count;
2339                ret = 0;
2340                break;
2341        case ETHTOOL_GRXCLSRULE:
2342                ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2343                break;
2344        case ETHTOOL_GRXCLSRLALL:
2345                ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2346                break;
2347        case ETHTOOL_GRXFH:
2348                ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2349                break;
2350        default:
2351                break;
2352        }
2353
2354        return ret;
2355}
2356
2357static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2358                                           struct ixgbe_fdir_filter *input,
2359                                           u16 sw_idx)
2360{
2361        struct ixgbe_hw *hw = &adapter->hw;
2362        struct hlist_node *node2;
2363        struct ixgbe_fdir_filter *rule, *parent;
2364        int err = -EINVAL;
2365
2366        parent = NULL;
2367        rule = NULL;
2368
2369        hlist_for_each_entry_safe(rule, node2,
2370                                  &adapter->fdir_filter_list, fdir_node) {
2371                /* hash found, or no matching entry */
2372                if (rule->sw_idx >= sw_idx)
2373                        break;
2374                parent = rule;
2375        }
2376
2377        /* if there is an old rule occupying our place remove it */
2378        if (rule && (rule->sw_idx == sw_idx)) {
2379                if (!input || (rule->filter.formatted.bkt_hash !=
2380                               input->filter.formatted.bkt_hash)) {
2381                        err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2382                                                                &rule->filter,
2383                                                                sw_idx);
2384                }
2385
2386                hlist_del(&rule->fdir_node);
2387                kfree(rule);
2388                adapter->fdir_filter_count--;
2389        }
2390
2391        /*
2392         * If no input this was a delete, err should be 0 if a rule was
2393         * successfully found and removed from the list else -EINVAL
2394         */
2395        if (!input)
2396                return err;
2397
2398        /* initialize node and set software index */
2399        INIT_HLIST_NODE(&input->fdir_node);
2400
2401        /* add filter to the list */
2402        if (parent)
2403                hlist_add_after(&parent->fdir_node, &input->fdir_node);
2404        else
2405                hlist_add_head(&input->fdir_node,
2406                               &adapter->fdir_filter_list);
2407
2408        /* update counts */
2409        adapter->fdir_filter_count++;
2410
2411        return 0;
2412}
2413
2414static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2415                                       u8 *flow_type)
2416{
2417        switch (fsp->flow_type & ~FLOW_EXT) {
2418        case TCP_V4_FLOW:
2419                *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2420                break;
2421        case UDP_V4_FLOW:
2422                *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2423                break;
2424        case SCTP_V4_FLOW:
2425                *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2426                break;
2427        case IP_USER_FLOW:
2428                switch (fsp->h_u.usr_ip4_spec.proto) {
2429                case IPPROTO_TCP:
2430                        *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2431                        break;
2432                case IPPROTO_UDP:
2433                        *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2434                        break;
2435                case IPPROTO_SCTP:
2436                        *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2437                        break;
2438                case 0:
2439                        if (!fsp->m_u.usr_ip4_spec.proto) {
2440                                *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2441                                break;
2442                        }
2443                default:
2444                        return 0;
2445                }
2446                break;
2447        default:
2448                return 0;
2449        }
2450
2451        return 1;
2452}
2453
2454static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2455                                        struct ethtool_rxnfc *cmd)
2456{
2457        struct ethtool_rx_flow_spec *fsp =
2458                (struct ethtool_rx_flow_spec *)&cmd->fs;
2459        struct ixgbe_hw *hw = &adapter->hw;
2460        struct ixgbe_fdir_filter *input;
2461        union ixgbe_atr_input mask;
2462        int err;
2463
2464        if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2465                return -EOPNOTSUPP;
2466
2467        /*
2468         * Don't allow programming if the action is a queue greater than
2469         * the number of online Rx queues.
2470         */
2471        if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2472            (fsp->ring_cookie >= adapter->num_rx_queues))
2473                return -EINVAL;
2474
2475        /* Don't allow indexes to exist outside of available space */
2476        if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2477                e_err(drv, "Location out of range\n");
2478                return -EINVAL;
2479        }
2480
2481        input = kzalloc(sizeof(*input), GFP_ATOMIC);
2482        if (!input)
2483                return -ENOMEM;
2484
2485        memset(&mask, 0, sizeof(union ixgbe_atr_input));
2486
2487        /* set SW index */
2488        input->sw_idx = fsp->location;
2489
2490        /* record flow type */
2491        if (!ixgbe_flowspec_to_flow_type(fsp,
2492                                         &input->filter.formatted.flow_type)) {
2493                e_err(drv, "Unrecognized flow type\n");
2494                goto err_out;
2495        }
2496
2497        mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2498                                   IXGBE_ATR_L4TYPE_MASK;
2499
2500        if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2501                mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2502
2503        /* Copy input into formatted structures */
2504        input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2505        mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2506        input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2507        mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2508        input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2509        mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2510        input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2511        mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2512
2513        if (fsp->flow_type & FLOW_EXT) {
2514                input->filter.formatted.vm_pool =
2515                                (unsigned char)ntohl(fsp->h_ext.data[1]);
2516                mask.formatted.vm_pool =
2517                                (unsigned char)ntohl(fsp->m_ext.data[1]);
2518                input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2519                mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2520                input->filter.formatted.flex_bytes =
2521                                                fsp->h_ext.vlan_etype;
2522                mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2523        }
2524
2525        /* determine if we need to drop or route the packet */
2526        if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2527                input->action = IXGBE_FDIR_DROP_QUEUE;
2528        else
2529                input->action = fsp->ring_cookie;
2530
2531        spin_lock(&adapter->fdir_perfect_lock);
2532
2533        if (hlist_empty(&adapter->fdir_filter_list)) {
2534                /* save mask and program input mask into HW */
2535                memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2536                err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2537                if (err) {
2538                        e_err(drv, "Error writing mask\n");
2539                        goto err_out_w_lock;
2540                }
2541        } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2542                e_err(drv, "Only one mask supported per port\n");
2543                goto err_out_w_lock;
2544        }
2545
2546        /* apply mask and compute/store hash */
2547        ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2548
2549        /* program filters to filter memory */
2550        err = ixgbe_fdir_write_perfect_filter_82599(hw,
2551                                &input->filter, input->sw_idx,
2552                                (input->action == IXGBE_FDIR_DROP_QUEUE) ?
2553                                IXGBE_FDIR_DROP_QUEUE :
2554                                adapter->rx_ring[input->action]->reg_idx);
2555        if (err)
2556                goto err_out_w_lock;
2557
2558        ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2559
2560        spin_unlock(&adapter->fdir_perfect_lock);
2561
2562        return err;
2563err_out_w_lock:
2564        spin_unlock(&adapter->fdir_perfect_lock);
2565err_out:
2566        kfree(input);
2567        return -EINVAL;
2568}
2569
2570static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2571                                        struct ethtool_rxnfc *cmd)
2572{
2573        struct ethtool_rx_flow_spec *fsp =
2574                (struct ethtool_rx_flow_spec *)&cmd->fs;
2575        int err;
2576
2577        spin_lock(&adapter->fdir_perfect_lock);
2578        err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2579        spin_unlock(&adapter->fdir_perfect_lock);
2580
2581        return err;
2582}
2583
2584#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2585                       IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2586static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2587                                  struct ethtool_rxnfc *nfc)
2588{
2589        u32 flags2 = adapter->flags2;
2590
2591        /*
2592         * RSS does not support anything other than hashing
2593         * to queues on src and dst IPs and ports
2594         */
2595        if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2596                          RXH_L4_B_0_1 | RXH_L4_B_2_3))
2597                return -EINVAL;
2598
2599        switch (nfc->flow_type) {
2600        case TCP_V4_FLOW:
2601        case TCP_V6_FLOW:
2602                if (!(nfc->data & RXH_IP_SRC) ||
2603                    !(nfc->data & RXH_IP_DST) ||
2604                    !(nfc->data & RXH_L4_B_0_1) ||
2605                    !(nfc->data & RXH_L4_B_2_3))
2606                        return -EINVAL;
2607                break;
2608        case UDP_V4_FLOW:
2609                if (!(nfc->data & RXH_IP_SRC) ||
2610                    !(nfc->data & RXH_IP_DST))
2611                        return -EINVAL;
2612                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2613                case 0:
2614                        flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2615                        break;
2616                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2617                        flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2618                        break;
2619                default:
2620                        return -EINVAL;
2621                }
2622                break;
2623        case UDP_V6_FLOW:
2624                if (!(nfc->data & RXH_IP_SRC) ||
2625                    !(nfc->data & RXH_IP_DST))
2626                        return -EINVAL;
2627                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2628                case 0:
2629                        flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2630                        break;
2631                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2632                        flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2633                        break;
2634                default:
2635                        return -EINVAL;
2636                }
2637                break;
2638        case AH_ESP_V4_FLOW:
2639        case AH_V4_FLOW:
2640        case ESP_V4_FLOW:
2641        case SCTP_V4_FLOW:
2642        case AH_ESP_V6_FLOW:
2643        case AH_V6_FLOW:
2644        case ESP_V6_FLOW:
2645        case SCTP_V6_FLOW:
2646                if (!(nfc->data & RXH_IP_SRC) ||
2647                    !(nfc->data & RXH_IP_DST) ||
2648                    (nfc->data & RXH_L4_B_0_1) ||
2649                    (nfc->data & RXH_L4_B_2_3))
2650                        return -EINVAL;
2651                break;
2652        default:
2653                return -EINVAL;
2654        }
2655
2656        /* if we changed something we need to update flags */
2657        if (flags2 != adapter->flags2) {
2658                struct ixgbe_hw *hw = &adapter->hw;
2659                u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2660
2661                if ((flags2 & UDP_RSS_FLAGS) &&
2662                    !(adapter->flags2 & UDP_RSS_FLAGS))
2663                        e_warn(drv, "enabling UDP RSS: fragmented packets"
2664                               " may arrive out of order to the stack above\n");
2665
2666                adapter->flags2 = flags2;
2667
2668                /* Perform hash on these packet types */
2669                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2670                      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2671                      | IXGBE_MRQC_RSS_FIELD_IPV6
2672                      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2673
2674                mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2675                          IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2676
2677                if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2678                        mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2679
2680                if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2681                        mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2682
2683                IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2684        }
2685
2686        return 0;
2687}
2688
2689static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2690{
2691        struct ixgbe_adapter *adapter = netdev_priv(dev);
2692        int ret = -EOPNOTSUPP;
2693
2694        switch (cmd->cmd) {
2695        case ETHTOOL_SRXCLSRLINS:
2696                ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2697                break;
2698        case ETHTOOL_SRXCLSRLDEL:
2699                ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2700                break;
2701        case ETHTOOL_SRXFH:
2702                ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2703                break;
2704        default:
2705                break;
2706        }
2707
2708        return ret;
2709}
2710
2711static int ixgbe_get_ts_info(struct net_device *dev,
2712                             struct ethtool_ts_info *info)
2713{
2714        struct ixgbe_adapter *adapter = netdev_priv(dev);
2715
2716        switch (adapter->hw.mac.type) {
2717        case ixgbe_mac_X540:
2718        case ixgbe_mac_82599EB:
2719                info->so_timestamping =
2720                        SOF_TIMESTAMPING_TX_SOFTWARE |
2721                        SOF_TIMESTAMPING_RX_SOFTWARE |
2722                        SOF_TIMESTAMPING_SOFTWARE |
2723                        SOF_TIMESTAMPING_TX_HARDWARE |
2724                        SOF_TIMESTAMPING_RX_HARDWARE |
2725                        SOF_TIMESTAMPING_RAW_HARDWARE;
2726
2727                if (adapter->ptp_clock)
2728                        info->phc_index = ptp_clock_index(adapter->ptp_clock);
2729                else
2730                        info->phc_index = -1;
2731
2732                info->tx_types =
2733                        (1 << HWTSTAMP_TX_OFF) |
2734                        (1 << HWTSTAMP_TX_ON);
2735
2736                info->rx_filters =
2737                        (1 << HWTSTAMP_FILTER_NONE) |
2738                        (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2739                        (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2740                        (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2741                        (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
2742                        (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
2743                        (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
2744                        (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
2745                        (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
2746                        (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
2747                        (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
2748                        (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2749                break;
2750        default:
2751                return ethtool_op_get_ts_info(dev, info);
2752                break;
2753        }
2754        return 0;
2755}
2756
2757static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
2758{
2759        unsigned int max_combined;
2760        u8 tcs = netdev_get_num_tc(adapter->netdev);
2761
2762        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2763                /* We only support one q_vector without MSI-X */
2764                max_combined = 1;
2765        } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2766                /* SR-IOV currently only allows one queue on the PF */
2767                max_combined = 1;
2768        } else if (tcs > 1) {
2769                /* For DCB report channels per traffic class */
2770                if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2771                        /* 8 TC w/ 4 queues per TC */
2772                        max_combined = 4;
2773                } else if (tcs > 4) {
2774                        /* 8 TC w/ 8 queues per TC */
2775                        max_combined = 8;
2776                } else {
2777                        /* 4 TC w/ 16 queues per TC */
2778                        max_combined = 16;
2779                }
2780        } else if (adapter->atr_sample_rate) {
2781                /* support up to 64 queues with ATR */
2782                max_combined = IXGBE_MAX_FDIR_INDICES;
2783        } else {
2784                /* support up to 16 queues with RSS */
2785                max_combined = IXGBE_MAX_RSS_INDICES;
2786        }
2787
2788        return max_combined;
2789}
2790
2791static void ixgbe_get_channels(struct net_device *dev,
2792                               struct ethtool_channels *ch)
2793{
2794        struct ixgbe_adapter *adapter = netdev_priv(dev);
2795
2796        /* report maximum channels */
2797        ch->max_combined = ixgbe_max_channels(adapter);
2798
2799        /* report info for other vector */
2800        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2801                ch->max_other = NON_Q_VECTORS;
2802                ch->other_count = NON_Q_VECTORS;
2803        }
2804
2805        /* record RSS queues */
2806        ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
2807
2808        /* nothing else to report if RSS is disabled */
2809        if (ch->combined_count == 1)
2810                return;
2811
2812        /* we do not support ATR queueing if SR-IOV is enabled */
2813        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
2814                return;
2815
2816        /* same thing goes for being DCB enabled */
2817        if (netdev_get_num_tc(dev) > 1)
2818                return;
2819
2820        /* if ATR is disabled we can exit */
2821        if (!adapter->atr_sample_rate)
2822                return;
2823
2824        /* report flow director queues as maximum channels */
2825        ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
2826}
2827
2828static int ixgbe_set_channels(struct net_device *dev,
2829                              struct ethtool_channels *ch)
2830{
2831        struct ixgbe_adapter *adapter = netdev_priv(dev);
2832        unsigned int count = ch->combined_count;
2833
2834        /* verify they are not requesting separate vectors */
2835        if (!count || ch->rx_count || ch->tx_count)
2836                return -EINVAL;
2837
2838        /* verify other_count has not changed */
2839        if (ch->other_count != NON_Q_VECTORS)
2840                return -EINVAL;
2841
2842        /* verify the number of channels does not exceed hardware limits */
2843        if (count > ixgbe_max_channels(adapter))
2844                return -EINVAL;
2845
2846        /* update feature limits from largest to smallest supported values */
2847        adapter->ring_feature[RING_F_FDIR].limit = count;
2848
2849        /* cap RSS limit at 16 */
2850        if (count > IXGBE_MAX_RSS_INDICES)
2851                count = IXGBE_MAX_RSS_INDICES;
2852        adapter->ring_feature[RING_F_RSS].limit = count;
2853
2854#ifdef IXGBE_FCOE
2855        /* cap FCoE limit at 8 */
2856        if (count > IXGBE_FCRETA_SIZE)
2857                count = IXGBE_FCRETA_SIZE;
2858        adapter->ring_feature[RING_F_FCOE].limit = count;
2859
2860#endif
2861        /* use setup TC to update any traffic class queue mapping */
2862        return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
2863}
2864
2865static int ixgbe_get_module_info(struct net_device *dev,
2866                                       struct ethtool_modinfo *modinfo)
2867{
2868        struct ixgbe_adapter *adapter = netdev_priv(dev);
2869        struct ixgbe_hw *hw = &adapter->hw;
2870        u32 status;
2871        u8 sff8472_rev, addr_mode;
2872        int ret_val = 0;
2873        bool page_swap = false;
2874
2875        /* avoid concurent i2c reads */
2876        while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2877                msleep(100);
2878
2879        /* used by the service task */
2880        set_bit(__IXGBE_READ_I2C, &adapter->state);
2881
2882        /* Check whether we support SFF-8472 or not */
2883        status = hw->phy.ops.read_i2c_eeprom(hw,
2884                                             IXGBE_SFF_SFF_8472_COMP,
2885                                             &sff8472_rev);
2886        if (status != 0) {
2887                ret_val = -EIO;
2888                goto err_out;
2889        }
2890
2891        /* addressing mode is not supported */
2892        status = hw->phy.ops.read_i2c_eeprom(hw,
2893                                             IXGBE_SFF_SFF_8472_SWAP,
2894                                             &addr_mode);
2895        if (status != 0) {
2896                ret_val = -EIO;
2897                goto err_out;
2898        }
2899
2900        if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
2901                e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
2902                page_swap = true;
2903        }
2904
2905        if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
2906                /* We have a SFP, but it does not support SFF-8472 */
2907                modinfo->type = ETH_MODULE_SFF_8079;
2908                modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
2909        } else {
2910                /* We have a SFP which supports a revision of SFF-8472. */
2911                modinfo->type = ETH_MODULE_SFF_8472;
2912                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2913        }
2914
2915err_out:
2916        clear_bit(__IXGBE_READ_I2C, &adapter->state);
2917        return ret_val;
2918}
2919
2920static int ixgbe_get_module_eeprom(struct net_device *dev,
2921                                         struct ethtool_eeprom *ee,
2922                                         u8 *data)
2923{
2924        struct ixgbe_adapter *adapter = netdev_priv(dev);
2925        struct ixgbe_hw *hw = &adapter->hw;
2926        u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2927        u8 databyte = 0xFF;
2928        int i = 0;
2929        int ret_val = 0;
2930
2931        /* ixgbe_get_module_info is called before this function in all
2932         * cases, so we do not need any checks we already do above,
2933         * and can trust ee->len to be a known value.
2934         */
2935
2936        while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2937                msleep(100);
2938        set_bit(__IXGBE_READ_I2C, &adapter->state);
2939
2940        /* Read the first block, SFF-8079 */
2941        for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
2942                status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
2943                if (status != 0) {
2944                        /* Error occured while reading module */
2945                        ret_val = -EIO;
2946                        goto err_out;
2947                }
2948                data[i] = databyte;
2949        }
2950
2951        /* If the second block is requested, check if SFF-8472 is supported. */
2952        if (ee->len == ETH_MODULE_SFF_8472_LEN) {
2953                if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
2954                        return -EOPNOTSUPP;
2955
2956                /* Read the second block, SFF-8472 */
2957                for (i = ETH_MODULE_SFF_8079_LEN;
2958                     i < ETH_MODULE_SFF_8472_LEN; i++) {
2959                        status = hw->phy.ops.read_i2c_sff8472(hw,
2960                                i - ETH_MODULE_SFF_8079_LEN, &databyte);
2961                        if (status != 0) {
2962                                /* Error occured while reading module */
2963                                ret_val = -EIO;
2964                                goto err_out;
2965                        }
2966                        data[i] = databyte;
2967                }
2968        }
2969
2970err_out:
2971        clear_bit(__IXGBE_READ_I2C, &adapter->state);
2972
2973        return ret_val;
2974}
2975
2976static const struct ethtool_ops ixgbe_ethtool_ops = {
2977        .get_settings           = ixgbe_get_settings,
2978        .set_settings           = ixgbe_set_settings,
2979        .get_drvinfo            = ixgbe_get_drvinfo,
2980        .get_regs_len           = ixgbe_get_regs_len,
2981        .get_regs               = ixgbe_get_regs,
2982        .get_wol                = ixgbe_get_wol,
2983        .set_wol                = ixgbe_set_wol,
2984        .nway_reset             = ixgbe_nway_reset,
2985        .get_link               = ethtool_op_get_link,
2986        .get_eeprom_len         = ixgbe_get_eeprom_len,
2987        .get_eeprom             = ixgbe_get_eeprom,
2988        .set_eeprom             = ixgbe_set_eeprom,
2989        .get_ringparam          = ixgbe_get_ringparam,
2990        .set_ringparam          = ixgbe_set_ringparam,
2991        .get_pauseparam         = ixgbe_get_pauseparam,
2992        .set_pauseparam         = ixgbe_set_pauseparam,
2993        .get_msglevel           = ixgbe_get_msglevel,
2994        .set_msglevel           = ixgbe_set_msglevel,
2995        .self_test              = ixgbe_diag_test,
2996        .get_strings            = ixgbe_get_strings,
2997        .set_phys_id            = ixgbe_set_phys_id,
2998        .get_sset_count         = ixgbe_get_sset_count,
2999        .get_ethtool_stats      = ixgbe_get_ethtool_stats,
3000        .get_coalesce           = ixgbe_get_coalesce,
3001        .set_coalesce           = ixgbe_set_coalesce,
3002        .get_rxnfc              = ixgbe_get_rxnfc,
3003        .set_rxnfc              = ixgbe_set_rxnfc,
3004        .get_channels           = ixgbe_get_channels,
3005        .set_channels           = ixgbe_set_channels,
3006        .get_ts_info            = ixgbe_get_ts_info,
3007        .get_module_info        = ixgbe_get_module_info,
3008        .get_module_eeprom      = ixgbe_get_module_eeprom,
3009};
3010
3011void ixgbe_set_ethtool_ops(struct net_device *netdev)
3012{
3013        SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
3014}
3015