linux/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel 10 Gigabit PCI Express Linux driver
   4  Copyright(c) 1999 - 2016 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29/* ethtool support for ixgbe */
  30
  31#include <linux/interrupt.h>
  32#include <linux/types.h>
  33#include <linux/module.h>
  34#include <linux/slab.h>
  35#include <linux/pci.h>
  36#include <linux/netdevice.h>
  37#include <linux/ethtool.h>
  38#include <linux/vmalloc.h>
  39#include <linux/highmem.h>
  40#include <linux/uaccess.h>
  41
  42#include "ixgbe.h"
  43#include "ixgbe_phy.h"
  44
  45
  46#define IXGBE_ALL_RAR_ENTRIES 16
  47
  48enum {NETDEV_STATS, IXGBE_STATS};
  49
  50struct ixgbe_stats {
  51        char stat_string[ETH_GSTRING_LEN];
  52        int type;
  53        int sizeof_stat;
  54        int stat_offset;
  55};
  56
  57#define IXGBE_STAT(m)           IXGBE_STATS, \
  58                                sizeof(((struct ixgbe_adapter *)0)->m), \
  59                                offsetof(struct ixgbe_adapter, m)
  60#define IXGBE_NETDEV_STAT(m)    NETDEV_STATS, \
  61                                sizeof(((struct rtnl_link_stats64 *)0)->m), \
  62                                offsetof(struct rtnl_link_stats64, m)
  63
  64static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
  65        {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
  66        {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
  67        {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
  68        {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
  69        {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
  70        {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
  71        {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
  72        {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
  73        {"lsc_int", IXGBE_STAT(lsc_int)},
  74        {"tx_busy", IXGBE_STAT(tx_busy)},
  75        {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
  76        {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
  77        {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
  78        {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
  79        {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
  80        {"multicast", IXGBE_NETDEV_STAT(multicast)},
  81        {"broadcast", IXGBE_STAT(stats.bprc)},
  82        {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
  83        {"collisions", IXGBE_NETDEV_STAT(collisions)},
  84        {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
  85        {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
  86        {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
  87        {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
  88        {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
  89        {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
  90        {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
  91        {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
  92        {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
  93        {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
  94        {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
  95        {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
  96        {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
  97        {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
  98        {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
  99        {"tx_restart_queue", IXGBE_STAT(restart_queue)},
 100        {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
 101        {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
 102        {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
 103        {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
 104        {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
 105        {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
 106        {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
 107        {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
 108        {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
 109        {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
 110        {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
 111        {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
 112        {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
 113        {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
 114        {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
 115        {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
 116        {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
 117#ifdef IXGBE_FCOE
 118        {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
 119        {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
 120        {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
 121        {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
 122        {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
 123        {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
 124        {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
 125        {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
 126#endif /* IXGBE_FCOE */
 127};
 128
 129/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
 130 * we set the num_rx_queues to evaluate to num_tx_queues. This is
 131 * used because we do not have a good way to get the max number of
 132 * rx queues with CONFIG_RPS disabled.
 133 */
 134#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
 135
 136#define IXGBE_QUEUE_STATS_LEN ( \
 137        (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
 138        (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
 139#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
 140#define IXGBE_PB_STATS_LEN ( \
 141                        (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
 142                         sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
 143                         sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
 144                         sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
 145                        / sizeof(u64))
 146#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
 147                         IXGBE_PB_STATS_LEN + \
 148                         IXGBE_QUEUE_STATS_LEN)
 149
 150static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
 151        "Register test  (offline)", "Eeprom test    (offline)",
 152        "Interrupt test (offline)", "Loopback test  (offline)",
 153        "Link test   (on/offline)"
 154};
 155#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
 156
 157static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
 158#define IXGBE_PRIV_FLAGS_LEGACY_RX      BIT(0)
 159        "legacy-rx",
 160};
 161
 162#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
 163
 164/* currently supported speeds for 10G */
 165#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
 166                         SUPPORTED_10000baseKX4_Full | \
 167                         SUPPORTED_10000baseKR_Full)
 168
 169#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
 170
 171static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
 172{
 173        if (!ixgbe_isbackplane(hw->phy.media_type))
 174                return SUPPORTED_10000baseT_Full;
 175
 176        switch (hw->device_id) {
 177        case IXGBE_DEV_ID_82598:
 178        case IXGBE_DEV_ID_82599_KX4:
 179        case IXGBE_DEV_ID_82599_KX4_MEZZ:
 180        case IXGBE_DEV_ID_X550EM_X_KX4:
 181                return SUPPORTED_10000baseKX4_Full;
 182        case IXGBE_DEV_ID_82598_BX:
 183        case IXGBE_DEV_ID_82599_KR:
 184        case IXGBE_DEV_ID_X550EM_X_KR:
 185        case IXGBE_DEV_ID_X550EM_X_XFI:
 186                return SUPPORTED_10000baseKR_Full;
 187        default:
 188                return SUPPORTED_10000baseKX4_Full |
 189                       SUPPORTED_10000baseKR_Full;
 190        }
 191}
 192
 193static int ixgbe_get_link_ksettings(struct net_device *netdev,
 194                                    struct ethtool_link_ksettings *cmd)
 195{
 196        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 197        struct ixgbe_hw *hw = &adapter->hw;
 198        ixgbe_link_speed supported_link;
 199        bool autoneg = false;
 200        u32 supported, advertising;
 201
 202        ethtool_convert_link_mode_to_legacy_u32(&supported,
 203                                                cmd->link_modes.supported);
 204
 205        hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
 206
 207        /* set the supported link speeds */
 208        if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
 209                supported |= ixgbe_get_supported_10gtypes(hw);
 210        if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
 211                supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
 212                                   SUPPORTED_1000baseKX_Full :
 213                                   SUPPORTED_1000baseT_Full;
 214        if (supported_link & IXGBE_LINK_SPEED_100_FULL)
 215                supported |= SUPPORTED_100baseT_Full;
 216        if (supported_link & IXGBE_LINK_SPEED_10_FULL)
 217                supported |= SUPPORTED_10baseT_Full;
 218
 219        /* default advertised speed if phy.autoneg_advertised isn't set */
 220        advertising = supported;
 221        /* set the advertised speeds */
 222        if (hw->phy.autoneg_advertised) {
 223                advertising = 0;
 224                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
 225                        advertising |= ADVERTISED_10baseT_Full;
 226                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
 227                        advertising |= ADVERTISED_100baseT_Full;
 228                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
 229                        advertising |= supported & ADVRTSD_MSK_10G;
 230                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
 231                        if (supported & SUPPORTED_1000baseKX_Full)
 232                                advertising |= ADVERTISED_1000baseKX_Full;
 233                        else
 234                                advertising |= ADVERTISED_1000baseT_Full;
 235                }
 236        } else {
 237                if (hw->phy.multispeed_fiber && !autoneg) {
 238                        if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
 239                                advertising = ADVERTISED_10000baseT_Full;
 240                }
 241        }
 242
 243        if (autoneg) {
 244                supported |= SUPPORTED_Autoneg;
 245                advertising |= ADVERTISED_Autoneg;
 246                cmd->base.autoneg = AUTONEG_ENABLE;
 247        } else
 248                cmd->base.autoneg = AUTONEG_DISABLE;
 249
 250        /* Determine the remaining settings based on the PHY type. */
 251        switch (adapter->hw.phy.type) {
 252        case ixgbe_phy_tn:
 253        case ixgbe_phy_aq:
 254        case ixgbe_phy_x550em_ext_t:
 255        case ixgbe_phy_fw:
 256        case ixgbe_phy_cu_unknown:
 257                supported |= SUPPORTED_TP;
 258                advertising |= ADVERTISED_TP;
 259                cmd->base.port = PORT_TP;
 260                break;
 261        case ixgbe_phy_qt:
 262                supported |= SUPPORTED_FIBRE;
 263                advertising |= ADVERTISED_FIBRE;
 264                cmd->base.port = PORT_FIBRE;
 265                break;
 266        case ixgbe_phy_nl:
 267        case ixgbe_phy_sfp_passive_tyco:
 268        case ixgbe_phy_sfp_passive_unknown:
 269        case ixgbe_phy_sfp_ftl:
 270        case ixgbe_phy_sfp_avago:
 271        case ixgbe_phy_sfp_intel:
 272        case ixgbe_phy_sfp_unknown:
 273        case ixgbe_phy_qsfp_passive_unknown:
 274        case ixgbe_phy_qsfp_active_unknown:
 275        case ixgbe_phy_qsfp_intel:
 276        case ixgbe_phy_qsfp_unknown:
 277                /* SFP+ devices, further checking needed */
 278                switch (adapter->hw.phy.sfp_type) {
 279                case ixgbe_sfp_type_da_cu:
 280                case ixgbe_sfp_type_da_cu_core0:
 281                case ixgbe_sfp_type_da_cu_core1:
 282                        supported |= SUPPORTED_FIBRE;
 283                        advertising |= ADVERTISED_FIBRE;
 284                        cmd->base.port = PORT_DA;
 285                        break;
 286                case ixgbe_sfp_type_sr:
 287                case ixgbe_sfp_type_lr:
 288                case ixgbe_sfp_type_srlr_core0:
 289                case ixgbe_sfp_type_srlr_core1:
 290                case ixgbe_sfp_type_1g_sx_core0:
 291                case ixgbe_sfp_type_1g_sx_core1:
 292                case ixgbe_sfp_type_1g_lx_core0:
 293                case ixgbe_sfp_type_1g_lx_core1:
 294                        supported |= SUPPORTED_FIBRE;
 295                        advertising |= ADVERTISED_FIBRE;
 296                        cmd->base.port = PORT_FIBRE;
 297                        break;
 298                case ixgbe_sfp_type_not_present:
 299                        supported |= SUPPORTED_FIBRE;
 300                        advertising |= ADVERTISED_FIBRE;
 301                        cmd->base.port = PORT_NONE;
 302                        break;
 303                case ixgbe_sfp_type_1g_cu_core0:
 304                case ixgbe_sfp_type_1g_cu_core1:
 305                        supported |= SUPPORTED_TP;
 306                        advertising |= ADVERTISED_TP;
 307                        cmd->base.port = PORT_TP;
 308                        break;
 309                case ixgbe_sfp_type_unknown:
 310                default:
 311                        supported |= SUPPORTED_FIBRE;
 312                        advertising |= ADVERTISED_FIBRE;
 313                        cmd->base.port = PORT_OTHER;
 314                        break;
 315                }
 316                break;
 317        case ixgbe_phy_xaui:
 318                supported |= SUPPORTED_FIBRE;
 319                advertising |= ADVERTISED_FIBRE;
 320                cmd->base.port = PORT_NONE;
 321                break;
 322        case ixgbe_phy_unknown:
 323        case ixgbe_phy_generic:
 324        case ixgbe_phy_sfp_unsupported:
 325        default:
 326                supported |= SUPPORTED_FIBRE;
 327                advertising |= ADVERTISED_FIBRE;
 328                cmd->base.port = PORT_OTHER;
 329                break;
 330        }
 331
 332        /* Indicate pause support */
 333        supported |= SUPPORTED_Pause;
 334
 335        switch (hw->fc.requested_mode) {
 336        case ixgbe_fc_full:
 337                advertising |= ADVERTISED_Pause;
 338                break;
 339        case ixgbe_fc_rx_pause:
 340                advertising |= ADVERTISED_Pause |
 341                                     ADVERTISED_Asym_Pause;
 342                break;
 343        case ixgbe_fc_tx_pause:
 344                advertising |= ADVERTISED_Asym_Pause;
 345                break;
 346        default:
 347                advertising &= ~(ADVERTISED_Pause |
 348                                       ADVERTISED_Asym_Pause);
 349        }
 350
 351        if (netif_carrier_ok(netdev)) {
 352                switch (adapter->link_speed) {
 353                case IXGBE_LINK_SPEED_10GB_FULL:
 354                        cmd->base.speed = SPEED_10000;
 355                        break;
 356                case IXGBE_LINK_SPEED_5GB_FULL:
 357                        cmd->base.speed = SPEED_5000;
 358                        break;
 359                case IXGBE_LINK_SPEED_2_5GB_FULL:
 360                        cmd->base.speed = SPEED_2500;
 361                        break;
 362                case IXGBE_LINK_SPEED_1GB_FULL:
 363                        cmd->base.speed = SPEED_1000;
 364                        break;
 365                case IXGBE_LINK_SPEED_100_FULL:
 366                        cmd->base.speed = SPEED_100;
 367                        break;
 368                case IXGBE_LINK_SPEED_10_FULL:
 369                        cmd->base.speed = SPEED_10;
 370                        break;
 371                default:
 372                        break;
 373                }
 374                cmd->base.duplex = DUPLEX_FULL;
 375        } else {
 376                cmd->base.speed = SPEED_UNKNOWN;
 377                cmd->base.duplex = DUPLEX_UNKNOWN;
 378        }
 379
 380        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
 381                                                supported);
 382        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
 383                                                advertising);
 384
 385        return 0;
 386}
 387
 388static int ixgbe_set_link_ksettings(struct net_device *netdev,
 389                                    const struct ethtool_link_ksettings *cmd)
 390{
 391        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 392        struct ixgbe_hw *hw = &adapter->hw;
 393        u32 advertised, old;
 394        s32 err = 0;
 395        u32 supported, advertising;
 396
 397        ethtool_convert_link_mode_to_legacy_u32(&supported,
 398                                                cmd->link_modes.supported);
 399        ethtool_convert_link_mode_to_legacy_u32(&advertising,
 400                                                cmd->link_modes.advertising);
 401
 402        if ((hw->phy.media_type == ixgbe_media_type_copper) ||
 403            (hw->phy.multispeed_fiber)) {
 404                /*
 405                 * this function does not support duplex forcing, but can
 406                 * limit the advertising of the adapter to the specified speed
 407                 */
 408                if (advertising & ~supported)
 409                        return -EINVAL;
 410
 411                /* only allow one speed at a time if no autoneg */
 412                if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
 413                        if (advertising ==
 414                            (ADVERTISED_10000baseT_Full |
 415                             ADVERTISED_1000baseT_Full))
 416                                return -EINVAL;
 417                }
 418
 419                old = hw->phy.autoneg_advertised;
 420                advertised = 0;
 421                if (advertising & ADVERTISED_10000baseT_Full)
 422                        advertised |= IXGBE_LINK_SPEED_10GB_FULL;
 423
 424                if (advertising & ADVERTISED_1000baseT_Full)
 425                        advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 426
 427                if (advertising & ADVERTISED_100baseT_Full)
 428                        advertised |= IXGBE_LINK_SPEED_100_FULL;
 429
 430                if (advertising & ADVERTISED_10baseT_Full)
 431                        advertised |= IXGBE_LINK_SPEED_10_FULL;
 432
 433                if (old == advertised)
 434                        return err;
 435                /* this sets the link speed and restarts auto-neg */
 436                while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
 437                        usleep_range(1000, 2000);
 438
 439                hw->mac.autotry_restart = true;
 440                err = hw->mac.ops.setup_link(hw, advertised, true);
 441                if (err) {
 442                        e_info(probe, "setup link failed with code %d\n", err);
 443                        hw->mac.ops.setup_link(hw, old, true);
 444                }
 445                clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
 446        } else {
 447                /* in this case we currently only support 10Gb/FULL */
 448                u32 speed = cmd->base.speed;
 449
 450                if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
 451                    (advertising != ADVERTISED_10000baseT_Full) ||
 452                    (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
 453                        return -EINVAL;
 454        }
 455
 456        return err;
 457}
 458
 459static void ixgbe_get_pauseparam(struct net_device *netdev,
 460                                 struct ethtool_pauseparam *pause)
 461{
 462        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 463        struct ixgbe_hw *hw = &adapter->hw;
 464
 465        if (ixgbe_device_supports_autoneg_fc(hw) &&
 466            !hw->fc.disable_fc_autoneg)
 467                pause->autoneg = 1;
 468        else
 469                pause->autoneg = 0;
 470
 471        if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
 472                pause->rx_pause = 1;
 473        } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
 474                pause->tx_pause = 1;
 475        } else if (hw->fc.current_mode == ixgbe_fc_full) {
 476                pause->rx_pause = 1;
 477                pause->tx_pause = 1;
 478        }
 479}
 480
 481static int ixgbe_set_pauseparam(struct net_device *netdev,
 482                                struct ethtool_pauseparam *pause)
 483{
 484        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 485        struct ixgbe_hw *hw = &adapter->hw;
 486        struct ixgbe_fc_info fc = hw->fc;
 487
 488        /* 82598 does no support link flow control with DCB enabled */
 489        if ((hw->mac.type == ixgbe_mac_82598EB) &&
 490            (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
 491                return -EINVAL;
 492
 493        /* some devices do not support autoneg of link flow control */
 494        if ((pause->autoneg == AUTONEG_ENABLE) &&
 495            !ixgbe_device_supports_autoneg_fc(hw))
 496                return -EINVAL;
 497
 498        fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
 499
 500        if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
 501                fc.requested_mode = ixgbe_fc_full;
 502        else if (pause->rx_pause && !pause->tx_pause)
 503                fc.requested_mode = ixgbe_fc_rx_pause;
 504        else if (!pause->rx_pause && pause->tx_pause)
 505                fc.requested_mode = ixgbe_fc_tx_pause;
 506        else
 507                fc.requested_mode = ixgbe_fc_none;
 508
 509        /* if the thing changed then we'll update and use new autoneg */
 510        if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
 511                hw->fc = fc;
 512                if (netif_running(netdev))
 513                        ixgbe_reinit_locked(adapter);
 514                else
 515                        ixgbe_reset(adapter);
 516        }
 517
 518        return 0;
 519}
 520
 521static u32 ixgbe_get_msglevel(struct net_device *netdev)
 522{
 523        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 524        return adapter->msg_enable;
 525}
 526
 527static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
 528{
 529        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 530        adapter->msg_enable = data;
 531}
 532
 533static int ixgbe_get_regs_len(struct net_device *netdev)
 534{
 535#define IXGBE_REGS_LEN  1139
 536        return IXGBE_REGS_LEN * sizeof(u32);
 537}
 538
 539#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
 540
 541static void ixgbe_get_regs(struct net_device *netdev,
 542                           struct ethtool_regs *regs, void *p)
 543{
 544        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 545        struct ixgbe_hw *hw = &adapter->hw;
 546        u32 *regs_buff = p;
 547        u8 i;
 548
 549        memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
 550
 551        regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
 552                        hw->device_id;
 553
 554        /* General Registers */
 555        regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
 556        regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
 557        regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
 558        regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
 559        regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
 560        regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 561        regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
 562        regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
 563
 564        /* NVM Register */
 565        regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 566        regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
 567        regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
 568        regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
 569        regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
 570        regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
 571        regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
 572        regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
 573        regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
 574        regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
 575
 576        /* Interrupt */
 577        /* don't read EICR because it can clear interrupt causes, instead
 578         * read EICS which is a shadow but doesn't clear EICR */
 579        regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
 580        regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
 581        regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
 582        regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
 583        regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
 584        regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
 585        regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
 586        regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
 587        regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
 588        regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
 589        regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
 590        regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
 591
 592        /* Flow Control */
 593        regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
 594        for (i = 0; i < 4; i++)
 595                regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
 596        for (i = 0; i < 8; i++) {
 597                switch (hw->mac.type) {
 598                case ixgbe_mac_82598EB:
 599                        regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
 600                        regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
 601                        break;
 602                case ixgbe_mac_82599EB:
 603                case ixgbe_mac_X540:
 604                case ixgbe_mac_X550:
 605                case ixgbe_mac_X550EM_x:
 606                case ixgbe_mac_x550em_a:
 607                        regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
 608                        regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
 609                        break;
 610                default:
 611                        break;
 612                }
 613        }
 614        regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
 615        regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
 616
 617        /* Receive DMA */
 618        for (i = 0; i < 64; i++)
 619                regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
 620        for (i = 0; i < 64; i++)
 621                regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
 622        for (i = 0; i < 64; i++)
 623                regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
 624        for (i = 0; i < 64; i++)
 625                regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
 626        for (i = 0; i < 64; i++)
 627                regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
 628        for (i = 0; i < 64; i++)
 629                regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
 630        for (i = 0; i < 16; i++)
 631                regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
 632        for (i = 0; i < 16; i++)
 633                regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
 634        regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
 635        for (i = 0; i < 8; i++)
 636                regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 637        regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 638        regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
 639
 640        /* Receive */
 641        regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 642        regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
 643        for (i = 0; i < 16; i++)
 644                regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
 645        for (i = 0; i < 16; i++)
 646                regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
 647        regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
 648        regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 649        regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 650        regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
 651        regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
 652        regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
 653        for (i = 0; i < 8; i++)
 654                regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
 655        for (i = 0; i < 8; i++)
 656                regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
 657        regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
 658
 659        /* Transmit */
 660        for (i = 0; i < 32; i++)
 661                regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
 662        for (i = 0; i < 32; i++)
 663                regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
 664        for (i = 0; i < 32; i++)
 665                regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
 666        for (i = 0; i < 32; i++)
 667                regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
 668        for (i = 0; i < 32; i++)
 669                regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
 670        for (i = 0; i < 32; i++)
 671                regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
 672        for (i = 0; i < 32; i++)
 673                regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
 674        for (i = 0; i < 32; i++)
 675                regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
 676        regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
 677        for (i = 0; i < 16; i++)
 678                regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
 679        regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
 680        for (i = 0; i < 8; i++)
 681                regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
 682        regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
 683
 684        /* Wake Up */
 685        regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
 686        regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
 687        regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
 688        regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
 689        regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
 690        regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
 691        regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
 692        regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
 693        regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
 694
 695        /* DCB */
 696        regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
 697        regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
 698
 699        switch (hw->mac.type) {
 700        case ixgbe_mac_82598EB:
 701                regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
 702                regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
 703                for (i = 0; i < 8; i++)
 704                        regs_buff[833 + i] =
 705                                IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
 706                for (i = 0; i < 8; i++)
 707                        regs_buff[841 + i] =
 708                                IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
 709                for (i = 0; i < 8; i++)
 710                        regs_buff[849 + i] =
 711                                IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
 712                for (i = 0; i < 8; i++)
 713                        regs_buff[857 + i] =
 714                                IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
 715                break;
 716        case ixgbe_mac_82599EB:
 717        case ixgbe_mac_X540:
 718        case ixgbe_mac_X550:
 719        case ixgbe_mac_X550EM_x:
 720        case ixgbe_mac_x550em_a:
 721                regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
 722                regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
 723                for (i = 0; i < 8; i++)
 724                        regs_buff[833 + i] =
 725                                IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
 726                for (i = 0; i < 8; i++)
 727                        regs_buff[841 + i] =
 728                                IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
 729                for (i = 0; i < 8; i++)
 730                        regs_buff[849 + i] =
 731                                IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
 732                for (i = 0; i < 8; i++)
 733                        regs_buff[857 + i] =
 734                                IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
 735                break;
 736        default:
 737                break;
 738        }
 739
 740        for (i = 0; i < 8; i++)
 741                regs_buff[865 + i] =
 742                IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
 743        for (i = 0; i < 8; i++)
 744                regs_buff[873 + i] =
 745                IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
 746
 747        /* Statistics */
 748        regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
 749        regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
 750        regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
 751        regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
 752        for (i = 0; i < 8; i++)
 753                regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
 754        regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
 755        regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
 756        regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
 757        regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
 758        regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
 759        regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
 760        regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
 761        for (i = 0; i < 8; i++)
 762                regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
 763        for (i = 0; i < 8; i++)
 764                regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
 765        for (i = 0; i < 8; i++)
 766                regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
 767        for (i = 0; i < 8; i++)
 768                regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
 769        regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
 770        regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
 771        regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
 772        regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
 773        regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
 774        regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
 775        regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
 776        regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
 777        regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
 778        regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
 779        regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
 780        regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
 781        regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
 782        regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
 783        for (i = 0; i < 8; i++)
 784                regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
 785        regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
 786        regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
 787        regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
 788        regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
 789        regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
 790        regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
 791        regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
 792        regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
 793        regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
 794        regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
 795        regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
 796        regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
 797        regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
 798        regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
 799        regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
 800        regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
 801        regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
 802        regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
 803        regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
 804        regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
 805        for (i = 0; i < 16; i++)
 806                regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
 807        for (i = 0; i < 16; i++)
 808                regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
 809        for (i = 0; i < 16; i++)
 810                regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
 811        for (i = 0; i < 16; i++)
 812                regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
 813
 814        /* MAC */
 815        regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
 816        regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
 817        regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
 818        regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
 819        regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
 820        regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
 821        regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
 822        regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
 823        regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
 824        regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 825        regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
 826        regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
 827        regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
 828        regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
 829        regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
 830        regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
 831        regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
 832        regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
 833        regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
 834        regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
 835        regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
 836        regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
 837        regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
 838        regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
 839        regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
 840        regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
 841        regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 842        regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
 843        regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
 844        regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
 845        regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
 846        regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
 847        regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
 848
 849        /* Diagnostic */
 850        regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
 851        for (i = 0; i < 8; i++)
 852                regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
 853        regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
 854        for (i = 0; i < 4; i++)
 855                regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
 856        regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
 857        regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
 858        for (i = 0; i < 8; i++)
 859                regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
 860        regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
 861        for (i = 0; i < 4; i++)
 862                regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
 863        regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
 864        regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
 865        for (i = 0; i < 4; i++)
 866                regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
 867        regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
 868        for (i = 0; i < 4; i++)
 869                regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
 870        for (i = 0; i < 8; i++)
 871                regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
 872        regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
 873        regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
 874        regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
 875        regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
 876        regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
 877        regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
 878        regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
 879        regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
 880        regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
 881
 882        /* 82599 X540 specific registers  */
 883        regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
 884
 885        /* 82599 X540 specific DCB registers  */
 886        regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
 887        regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
 888        for (i = 0; i < 4; i++)
 889                regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
 890        regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
 891                                        /* same as RTTQCNRM */
 892        regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
 893                                        /* same as RTTQCNRR */
 894
 895        /* X540 specific DCB registers  */
 896        regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
 897        regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
 898}
 899
 900static int ixgbe_get_eeprom_len(struct net_device *netdev)
 901{
 902        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 903        return adapter->hw.eeprom.word_size * 2;
 904}
 905
 906static int ixgbe_get_eeprom(struct net_device *netdev,
 907                            struct ethtool_eeprom *eeprom, u8 *bytes)
 908{
 909        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 910        struct ixgbe_hw *hw = &adapter->hw;
 911        u16 *eeprom_buff;
 912        int first_word, last_word, eeprom_len;
 913        int ret_val = 0;
 914        u16 i;
 915
 916        if (eeprom->len == 0)
 917                return -EINVAL;
 918
 919        eeprom->magic = hw->vendor_id | (hw->device_id << 16);
 920
 921        first_word = eeprom->offset >> 1;
 922        last_word = (eeprom->offset + eeprom->len - 1) >> 1;
 923        eeprom_len = last_word - first_word + 1;
 924
 925        eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
 926        if (!eeprom_buff)
 927                return -ENOMEM;
 928
 929        ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
 930                                             eeprom_buff);
 931
 932        /* Device's eeprom is always little-endian, word addressable */
 933        for (i = 0; i < eeprom_len; i++)
 934                le16_to_cpus(&eeprom_buff[i]);
 935
 936        memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
 937        kfree(eeprom_buff);
 938
 939        return ret_val;
 940}
 941
 942static int ixgbe_set_eeprom(struct net_device *netdev,
 943                            struct ethtool_eeprom *eeprom, u8 *bytes)
 944{
 945        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 946        struct ixgbe_hw *hw = &adapter->hw;
 947        u16 *eeprom_buff;
 948        void *ptr;
 949        int max_len, first_word, last_word, ret_val = 0;
 950        u16 i;
 951
 952        if (eeprom->len == 0)
 953                return -EINVAL;
 954
 955        if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
 956                return -EINVAL;
 957
 958        max_len = hw->eeprom.word_size * 2;
 959
 960        first_word = eeprom->offset >> 1;
 961        last_word = (eeprom->offset + eeprom->len - 1) >> 1;
 962        eeprom_buff = kmalloc(max_len, GFP_KERNEL);
 963        if (!eeprom_buff)
 964                return -ENOMEM;
 965
 966        ptr = eeprom_buff;
 967
 968        if (eeprom->offset & 1) {
 969                /*
 970                 * need read/modify/write of first changed EEPROM word
 971                 * only the second byte of the word is being modified
 972                 */
 973                ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
 974                if (ret_val)
 975                        goto err;
 976
 977                ptr++;
 978        }
 979        if ((eeprom->offset + eeprom->len) & 1) {
 980                /*
 981                 * need read/modify/write of last changed EEPROM word
 982                 * only the first byte of the word is being modified
 983                 */
 984                ret_val = hw->eeprom.ops.read(hw, last_word,
 985                                          &eeprom_buff[last_word - first_word]);
 986                if (ret_val)
 987                        goto err;
 988        }
 989
 990        /* Device's eeprom is always little-endian, word addressable */
 991        for (i = 0; i < last_word - first_word + 1; i++)
 992                le16_to_cpus(&eeprom_buff[i]);
 993
 994        memcpy(ptr, bytes, eeprom->len);
 995
 996        for (i = 0; i < last_word - first_word + 1; i++)
 997                cpu_to_le16s(&eeprom_buff[i]);
 998
 999        ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1000                                              last_word - first_word + 1,
1001                                              eeprom_buff);
1002
1003        /* Update the checksum */
1004        if (ret_val == 0)
1005                hw->eeprom.ops.update_checksum(hw);
1006
1007err:
1008        kfree(eeprom_buff);
1009        return ret_val;
1010}
1011
1012static void ixgbe_get_drvinfo(struct net_device *netdev,
1013                              struct ethtool_drvinfo *drvinfo)
1014{
1015        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1016        u32 nvm_track_id;
1017
1018        strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1019        strlcpy(drvinfo->version, ixgbe_driver_version,
1020                sizeof(drvinfo->version));
1021
1022        nvm_track_id = (adapter->eeprom_verh << 16) |
1023                        adapter->eeprom_verl;
1024        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
1025                 nvm_track_id);
1026
1027        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
1028                sizeof(drvinfo->bus_info));
1029
1030        drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1031}
1032
1033static void ixgbe_get_ringparam(struct net_device *netdev,
1034                                struct ethtool_ringparam *ring)
1035{
1036        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1037        struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1038        struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1039
1040        ring->rx_max_pending = IXGBE_MAX_RXD;
1041        ring->tx_max_pending = IXGBE_MAX_TXD;
1042        ring->rx_pending = rx_ring->count;
1043        ring->tx_pending = tx_ring->count;
1044}
1045
1046static int ixgbe_set_ringparam(struct net_device *netdev,
1047                               struct ethtool_ringparam *ring)
1048{
1049        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1050        struct ixgbe_ring *temp_ring;
1051        int i, j, err = 0;
1052        u32 new_rx_count, new_tx_count;
1053
1054        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1055                return -EINVAL;
1056
1057        new_tx_count = clamp_t(u32, ring->tx_pending,
1058                               IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1059        new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1060
1061        new_rx_count = clamp_t(u32, ring->rx_pending,
1062                               IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1063        new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1064
1065        if ((new_tx_count == adapter->tx_ring_count) &&
1066            (new_rx_count == adapter->rx_ring_count)) {
1067                /* nothing to do */
1068                return 0;
1069        }
1070
1071        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1072                usleep_range(1000, 2000);
1073
1074        if (!netif_running(adapter->netdev)) {
1075                for (i = 0; i < adapter->num_tx_queues; i++)
1076                        adapter->tx_ring[i]->count = new_tx_count;
1077                for (i = 0; i < adapter->num_xdp_queues; i++)
1078                        adapter->xdp_ring[i]->count = new_tx_count;
1079                for (i = 0; i < adapter->num_rx_queues; i++)
1080                        adapter->rx_ring[i]->count = new_rx_count;
1081                adapter->tx_ring_count = new_tx_count;
1082                adapter->xdp_ring_count = new_tx_count;
1083                adapter->rx_ring_count = new_rx_count;
1084                goto clear_reset;
1085        }
1086
1087        /* allocate temporary buffer to store rings in */
1088        i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1089                  adapter->num_rx_queues);
1090        temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
1091
1092        if (!temp_ring) {
1093                err = -ENOMEM;
1094                goto clear_reset;
1095        }
1096
1097        ixgbe_down(adapter);
1098
1099        /*
1100         * Setup new Tx resources and free the old Tx resources in that order.
1101         * We can then assign the new resources to the rings via a memcpy.
1102         * The advantage to this approach is that we are guaranteed to still
1103         * have resources even in the case of an allocation failure.
1104         */
1105        if (new_tx_count != adapter->tx_ring_count) {
1106                for (i = 0; i < adapter->num_tx_queues; i++) {
1107                        memcpy(&temp_ring[i], adapter->tx_ring[i],
1108                               sizeof(struct ixgbe_ring));
1109
1110                        temp_ring[i].count = new_tx_count;
1111                        err = ixgbe_setup_tx_resources(&temp_ring[i]);
1112                        if (err) {
1113                                while (i) {
1114                                        i--;
1115                                        ixgbe_free_tx_resources(&temp_ring[i]);
1116                                }
1117                                goto err_setup;
1118                        }
1119                }
1120
1121                for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1122                        memcpy(&temp_ring[i], adapter->xdp_ring[j],
1123                               sizeof(struct ixgbe_ring));
1124
1125                        temp_ring[i].count = new_tx_count;
1126                        err = ixgbe_setup_tx_resources(&temp_ring[i]);
1127                        if (err) {
1128                                while (i) {
1129                                        i--;
1130                                        ixgbe_free_tx_resources(&temp_ring[i]);
1131                                }
1132                                goto err_setup;
1133                        }
1134                }
1135
1136                for (i = 0; i < adapter->num_tx_queues; i++) {
1137                        ixgbe_free_tx_resources(adapter->tx_ring[i]);
1138
1139                        memcpy(adapter->tx_ring[i], &temp_ring[i],
1140                               sizeof(struct ixgbe_ring));
1141                }
1142                for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1143                        ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1144
1145                        memcpy(adapter->xdp_ring[j], &temp_ring[i],
1146                               sizeof(struct ixgbe_ring));
1147                }
1148
1149                adapter->tx_ring_count = new_tx_count;
1150        }
1151
1152        /* Repeat the process for the Rx rings if needed */
1153        if (new_rx_count != adapter->rx_ring_count) {
1154                for (i = 0; i < adapter->num_rx_queues; i++) {
1155                        memcpy(&temp_ring[i], adapter->rx_ring[i],
1156                               sizeof(struct ixgbe_ring));
1157
1158                        temp_ring[i].count = new_rx_count;
1159                        err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1160                        if (err) {
1161                                while (i) {
1162                                        i--;
1163                                        ixgbe_free_rx_resources(&temp_ring[i]);
1164                                }
1165                                goto err_setup;
1166                        }
1167
1168                }
1169
1170                for (i = 0; i < adapter->num_rx_queues; i++) {
1171                        ixgbe_free_rx_resources(adapter->rx_ring[i]);
1172
1173                        memcpy(adapter->rx_ring[i], &temp_ring[i],
1174                               sizeof(struct ixgbe_ring));
1175                }
1176
1177                adapter->rx_ring_count = new_rx_count;
1178        }
1179
1180err_setup:
1181        ixgbe_up(adapter);
1182        vfree(temp_ring);
1183clear_reset:
1184        clear_bit(__IXGBE_RESETTING, &adapter->state);
1185        return err;
1186}
1187
1188static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1189{
1190        switch (sset) {
1191        case ETH_SS_TEST:
1192                return IXGBE_TEST_LEN;
1193        case ETH_SS_STATS:
1194                return IXGBE_STATS_LEN;
1195        case ETH_SS_PRIV_FLAGS:
1196                return IXGBE_PRIV_FLAGS_STR_LEN;
1197        default:
1198                return -EOPNOTSUPP;
1199        }
1200}
1201
1202static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1203                                    struct ethtool_stats *stats, u64 *data)
1204{
1205        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1206        struct rtnl_link_stats64 temp;
1207        const struct rtnl_link_stats64 *net_stats;
1208        unsigned int start;
1209        struct ixgbe_ring *ring;
1210        int i, j;
1211        char *p = NULL;
1212
1213        ixgbe_update_stats(adapter);
1214        net_stats = dev_get_stats(netdev, &temp);
1215        for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1216                switch (ixgbe_gstrings_stats[i].type) {
1217                case NETDEV_STATS:
1218                        p = (char *) net_stats +
1219                                        ixgbe_gstrings_stats[i].stat_offset;
1220                        break;
1221                case IXGBE_STATS:
1222                        p = (char *) adapter +
1223                                        ixgbe_gstrings_stats[i].stat_offset;
1224                        break;
1225                default:
1226                        data[i] = 0;
1227                        continue;
1228                }
1229
1230                data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1231                           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1232        }
1233        for (j = 0; j < netdev->num_tx_queues; j++) {
1234                ring = adapter->tx_ring[j];
1235                if (!ring) {
1236                        data[i] = 0;
1237                        data[i+1] = 0;
1238                        i += 2;
1239                        continue;
1240                }
1241
1242                do {
1243                        start = u64_stats_fetch_begin_irq(&ring->syncp);
1244                        data[i]   = ring->stats.packets;
1245                        data[i+1] = ring->stats.bytes;
1246                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1247                i += 2;
1248        }
1249        for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1250                ring = adapter->rx_ring[j];
1251                if (!ring) {
1252                        data[i] = 0;
1253                        data[i+1] = 0;
1254                        i += 2;
1255                        continue;
1256                }
1257
1258                do {
1259                        start = u64_stats_fetch_begin_irq(&ring->syncp);
1260                        data[i]   = ring->stats.packets;
1261                        data[i+1] = ring->stats.bytes;
1262                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1263                i += 2;
1264        }
1265
1266        for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1267                data[i++] = adapter->stats.pxontxc[j];
1268                data[i++] = adapter->stats.pxofftxc[j];
1269        }
1270        for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1271                data[i++] = adapter->stats.pxonrxc[j];
1272                data[i++] = adapter->stats.pxoffrxc[j];
1273        }
1274}
1275
1276static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1277                              u8 *data)
1278{
1279        char *p = (char *)data;
1280        unsigned int i;
1281
1282        switch (stringset) {
1283        case ETH_SS_TEST:
1284                for (i = 0; i < IXGBE_TEST_LEN; i++) {
1285                        memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1286                        data += ETH_GSTRING_LEN;
1287                }
1288                break;
1289        case ETH_SS_STATS:
1290                for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1291                        memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1292                               ETH_GSTRING_LEN);
1293                        p += ETH_GSTRING_LEN;
1294                }
1295                for (i = 0; i < netdev->num_tx_queues; i++) {
1296                        sprintf(p, "tx_queue_%u_packets", i);
1297                        p += ETH_GSTRING_LEN;
1298                        sprintf(p, "tx_queue_%u_bytes", i);
1299                        p += ETH_GSTRING_LEN;
1300                }
1301                for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1302                        sprintf(p, "rx_queue_%u_packets", i);
1303                        p += ETH_GSTRING_LEN;
1304                        sprintf(p, "rx_queue_%u_bytes", i);
1305                        p += ETH_GSTRING_LEN;
1306                }
1307                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1308                        sprintf(p, "tx_pb_%u_pxon", i);
1309                        p += ETH_GSTRING_LEN;
1310                        sprintf(p, "tx_pb_%u_pxoff", i);
1311                        p += ETH_GSTRING_LEN;
1312                }
1313                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1314                        sprintf(p, "rx_pb_%u_pxon", i);
1315                        p += ETH_GSTRING_LEN;
1316                        sprintf(p, "rx_pb_%u_pxoff", i);
1317                        p += ETH_GSTRING_LEN;
1318                }
1319                /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1320                break;
1321        case ETH_SS_PRIV_FLAGS:
1322                memcpy(data, ixgbe_priv_flags_strings,
1323                       IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1324        }
1325}
1326
1327static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1328{
1329        struct ixgbe_hw *hw = &adapter->hw;
1330        bool link_up;
1331        u32 link_speed = 0;
1332
1333        if (ixgbe_removed(hw->hw_addr)) {
1334                *data = 1;
1335                return 1;
1336        }
1337        *data = 0;
1338
1339        hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1340        if (link_up)
1341                return *data;
1342        else
1343                *data = 1;
1344        return *data;
1345}
1346
1347/* ethtool register test data */
1348struct ixgbe_reg_test {
1349        u16 reg;
1350        u8  array_len;
1351        u8  test_type;
1352        u32 mask;
1353        u32 write;
1354};
1355
1356/* In the hardware, registers are laid out either singly, in arrays
1357 * spaced 0x40 bytes apart, or in contiguous tables.  We assume
1358 * most tests take place on arrays or single registers (handled
1359 * as a single-element array) and special-case the tables.
1360 * Table tests are always pattern tests.
1361 *
1362 * We also make provision for some required setup steps by specifying
1363 * registers to be written without any read-back testing.
1364 */
1365
1366#define PATTERN_TEST    1
1367#define SET_READ_TEST   2
1368#define WRITE_NO_TEST   3
1369#define TABLE32_TEST    4
1370#define TABLE64_TEST_LO 5
1371#define TABLE64_TEST_HI 6
1372
1373/* default 82599 register test */
1374static const struct ixgbe_reg_test reg_test_82599[] = {
1375        { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1376        { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1377        { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1378        { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1379        { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1380        { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1381        { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1382        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1383        { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1384        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1385        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1386        { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1387        { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1388        { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1389        { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1390        { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1391        { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1392        { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1393        { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1394        { .reg = 0 }
1395};
1396
1397/* default 82598 register test */
1398static const struct ixgbe_reg_test reg_test_82598[] = {
1399        { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1400        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1401        { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1402        { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1403        { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1404        { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1405        { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1406        /* Enable all four RX queues before testing. */
1407        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1408        /* RDH is read-only for 82598, only test RDT. */
1409        { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1410        { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1411        { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1412        { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1413        { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1414        { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1415        { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1416        { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1417        { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1418        { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1419        { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1420        { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1421        { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1422        { .reg = 0 }
1423};
1424
1425static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1426                             u32 mask, u32 write)
1427{
1428        u32 pat, val, before;
1429        static const u32 test_pattern[] = {
1430                0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1431
1432        if (ixgbe_removed(adapter->hw.hw_addr)) {
1433                *data = 1;
1434                return true;
1435        }
1436        for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1437                before = ixgbe_read_reg(&adapter->hw, reg);
1438                ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1439                val = ixgbe_read_reg(&adapter->hw, reg);
1440                if (val != (test_pattern[pat] & write & mask)) {
1441                        e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1442                              reg, val, (test_pattern[pat] & write & mask));
1443                        *data = reg;
1444                        ixgbe_write_reg(&adapter->hw, reg, before);
1445                        return true;
1446                }
1447                ixgbe_write_reg(&adapter->hw, reg, before);
1448        }
1449        return false;
1450}
1451
1452static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1453                              u32 mask, u32 write)
1454{
1455        u32 val, before;
1456
1457        if (ixgbe_removed(adapter->hw.hw_addr)) {
1458                *data = 1;
1459                return true;
1460        }
1461        before = ixgbe_read_reg(&adapter->hw, reg);
1462        ixgbe_write_reg(&adapter->hw, reg, write & mask);
1463        val = ixgbe_read_reg(&adapter->hw, reg);
1464        if ((write & mask) != (val & mask)) {
1465                e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1466                      reg, (val & mask), (write & mask));
1467                *data = reg;
1468                ixgbe_write_reg(&adapter->hw, reg, before);
1469                return true;
1470        }
1471        ixgbe_write_reg(&adapter->hw, reg, before);
1472        return false;
1473}
1474
1475static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1476{
1477        const struct ixgbe_reg_test *test;
1478        u32 value, before, after;
1479        u32 i, toggle;
1480
1481        if (ixgbe_removed(adapter->hw.hw_addr)) {
1482                e_err(drv, "Adapter removed - register test blocked\n");
1483                *data = 1;
1484                return 1;
1485        }
1486        switch (adapter->hw.mac.type) {
1487        case ixgbe_mac_82598EB:
1488                toggle = 0x7FFFF3FF;
1489                test = reg_test_82598;
1490                break;
1491        case ixgbe_mac_82599EB:
1492        case ixgbe_mac_X540:
1493        case ixgbe_mac_X550:
1494        case ixgbe_mac_X550EM_x:
1495        case ixgbe_mac_x550em_a:
1496                toggle = 0x7FFFF30F;
1497                test = reg_test_82599;
1498                break;
1499        default:
1500                *data = 1;
1501                return 1;
1502        }
1503
1504        /*
1505         * Because the status register is such a special case,
1506         * we handle it separately from the rest of the register
1507         * tests.  Some bits are read-only, some toggle, and some
1508         * are writeable on newer MACs.
1509         */
1510        before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1511        value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1512        ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1513        after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1514        if (value != after) {
1515                e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1516                      after, value);
1517                *data = 1;
1518                return 1;
1519        }
1520        /* restore previous status */
1521        ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1522
1523        /*
1524         * Perform the remainder of the register test, looping through
1525         * the test table until we either fail or reach the null entry.
1526         */
1527        while (test->reg) {
1528                for (i = 0; i < test->array_len; i++) {
1529                        bool b = false;
1530
1531                        switch (test->test_type) {
1532                        case PATTERN_TEST:
1533                                b = reg_pattern_test(adapter, data,
1534                                                     test->reg + (i * 0x40),
1535                                                     test->mask,
1536                                                     test->write);
1537                                break;
1538                        case SET_READ_TEST:
1539                                b = reg_set_and_check(adapter, data,
1540                                                      test->reg + (i * 0x40),
1541                                                      test->mask,
1542                                                      test->write);
1543                                break;
1544                        case WRITE_NO_TEST:
1545                                ixgbe_write_reg(&adapter->hw,
1546                                                test->reg + (i * 0x40),
1547                                                test->write);
1548                                break;
1549                        case TABLE32_TEST:
1550                                b = reg_pattern_test(adapter, data,
1551                                                     test->reg + (i * 4),
1552                                                     test->mask,
1553                                                     test->write);
1554                                break;
1555                        case TABLE64_TEST_LO:
1556                                b = reg_pattern_test(adapter, data,
1557                                                     test->reg + (i * 8),
1558                                                     test->mask,
1559                                                     test->write);
1560                                break;
1561                        case TABLE64_TEST_HI:
1562                                b = reg_pattern_test(adapter, data,
1563                                                     (test->reg + 4) + (i * 8),
1564                                                     test->mask,
1565                                                     test->write);
1566                                break;
1567                        }
1568                        if (b)
1569                                return 1;
1570                }
1571                test++;
1572        }
1573
1574        *data = 0;
1575        return 0;
1576}
1577
1578static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1579{
1580        struct ixgbe_hw *hw = &adapter->hw;
1581        if (hw->eeprom.ops.validate_checksum(hw, NULL))
1582                *data = 1;
1583        else
1584                *data = 0;
1585        return *data;
1586}
1587
1588static irqreturn_t ixgbe_test_intr(int irq, void *data)
1589{
1590        struct net_device *netdev = (struct net_device *) data;
1591        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1592
1593        adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1594
1595        return IRQ_HANDLED;
1596}
1597
1598static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1599{
1600        struct net_device *netdev = adapter->netdev;
1601        u32 mask, i = 0, shared_int = true;
1602        u32 irq = adapter->pdev->irq;
1603
1604        *data = 0;
1605
1606        /* Hook up test interrupt handler just for this test */
1607        if (adapter->msix_entries) {
1608                /* NOTE: we don't test MSI-X interrupts here, yet */
1609                return 0;
1610        } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1611                shared_int = false;
1612                if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1613                                netdev)) {
1614                        *data = 1;
1615                        return -1;
1616                }
1617        } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1618                                netdev->name, netdev)) {
1619                shared_int = false;
1620        } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1621                               netdev->name, netdev)) {
1622                *data = 1;
1623                return -1;
1624        }
1625        e_info(hw, "testing %s interrupt\n", shared_int ?
1626               "shared" : "unshared");
1627
1628        /* Disable all the interrupts */
1629        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1630        IXGBE_WRITE_FLUSH(&adapter->hw);
1631        usleep_range(10000, 20000);
1632
1633        /* Test each interrupt */
1634        for (; i < 10; i++) {
1635                /* Interrupt to test */
1636                mask = BIT(i);
1637
1638                if (!shared_int) {
1639                        /*
1640                         * Disable the interrupts to be reported in
1641                         * the cause register and then force the same
1642                         * interrupt and see if one gets posted.  If
1643                         * an interrupt was posted to the bus, the
1644                         * test failed.
1645                         */
1646                        adapter->test_icr = 0;
1647                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1648                                        ~mask & 0x00007FFF);
1649                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1650                                        ~mask & 0x00007FFF);
1651                        IXGBE_WRITE_FLUSH(&adapter->hw);
1652                        usleep_range(10000, 20000);
1653
1654                        if (adapter->test_icr & mask) {
1655                                *data = 3;
1656                                break;
1657                        }
1658                }
1659
1660                /*
1661                 * Enable the interrupt to be reported in the cause
1662                 * register and then force the same interrupt and see
1663                 * if one gets posted.  If an interrupt was not posted
1664                 * to the bus, the test failed.
1665                 */
1666                adapter->test_icr = 0;
1667                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1668                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1669                IXGBE_WRITE_FLUSH(&adapter->hw);
1670                usleep_range(10000, 20000);
1671
1672                if (!(adapter->test_icr & mask)) {
1673                        *data = 4;
1674                        break;
1675                }
1676
1677                if (!shared_int) {
1678                        /*
1679                         * Disable the other interrupts to be reported in
1680                         * the cause register and then force the other
1681                         * interrupts and see if any get posted.  If
1682                         * an interrupt was posted to the bus, the
1683                         * test failed.
1684                         */
1685                        adapter->test_icr = 0;
1686                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1687                                        ~mask & 0x00007FFF);
1688                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1689                                        ~mask & 0x00007FFF);
1690                        IXGBE_WRITE_FLUSH(&adapter->hw);
1691                        usleep_range(10000, 20000);
1692
1693                        if (adapter->test_icr) {
1694                                *data = 5;
1695                                break;
1696                        }
1697                }
1698        }
1699
1700        /* Disable all the interrupts */
1701        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1702        IXGBE_WRITE_FLUSH(&adapter->hw);
1703        usleep_range(10000, 20000);
1704
1705        /* Unhook test interrupt handler */
1706        free_irq(irq, netdev);
1707
1708        return *data;
1709}
1710
1711static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1712{
1713        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1714        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1715        struct ixgbe_hw *hw = &adapter->hw;
1716        u32 reg_ctl;
1717
1718        /* shut down the DMA engines now so they can be reinitialized later */
1719
1720        /* first Rx */
1721        hw->mac.ops.disable_rx(hw);
1722        ixgbe_disable_rx_queue(adapter, rx_ring);
1723
1724        /* now Tx */
1725        reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1726        reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1727        IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1728
1729        switch (hw->mac.type) {
1730        case ixgbe_mac_82599EB:
1731        case ixgbe_mac_X540:
1732        case ixgbe_mac_X550:
1733        case ixgbe_mac_X550EM_x:
1734        case ixgbe_mac_x550em_a:
1735                reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1736                reg_ctl &= ~IXGBE_DMATXCTL_TE;
1737                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1738                break;
1739        default:
1740                break;
1741        }
1742
1743        ixgbe_reset(adapter);
1744
1745        ixgbe_free_tx_resources(&adapter->test_tx_ring);
1746        ixgbe_free_rx_resources(&adapter->test_rx_ring);
1747}
1748
1749static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1750{
1751        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1752        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1753        struct ixgbe_hw *hw = &adapter->hw;
1754        u32 rctl, reg_data;
1755        int ret_val;
1756        int err;
1757
1758        /* Setup Tx descriptor ring and Tx buffers */
1759        tx_ring->count = IXGBE_DEFAULT_TXD;
1760        tx_ring->queue_index = 0;
1761        tx_ring->dev = &adapter->pdev->dev;
1762        tx_ring->netdev = adapter->netdev;
1763        tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1764
1765        err = ixgbe_setup_tx_resources(tx_ring);
1766        if (err)
1767                return 1;
1768
1769        switch (adapter->hw.mac.type) {
1770        case ixgbe_mac_82599EB:
1771        case ixgbe_mac_X540:
1772        case ixgbe_mac_X550:
1773        case ixgbe_mac_X550EM_x:
1774        case ixgbe_mac_x550em_a:
1775                reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1776                reg_data |= IXGBE_DMATXCTL_TE;
1777                IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1778                break;
1779        default:
1780                break;
1781        }
1782
1783        ixgbe_configure_tx_ring(adapter, tx_ring);
1784
1785        /* Setup Rx Descriptor ring and Rx buffers */
1786        rx_ring->count = IXGBE_DEFAULT_RXD;
1787        rx_ring->queue_index = 0;
1788        rx_ring->dev = &adapter->pdev->dev;
1789        rx_ring->netdev = adapter->netdev;
1790        rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1791
1792        err = ixgbe_setup_rx_resources(adapter, rx_ring);
1793        if (err) {
1794                ret_val = 4;
1795                goto err_nomem;
1796        }
1797
1798        hw->mac.ops.disable_rx(hw);
1799
1800        ixgbe_configure_rx_ring(adapter, rx_ring);
1801
1802        rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1803        rctl |= IXGBE_RXCTRL_DMBYPS;
1804        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1805
1806        hw->mac.ops.enable_rx(hw);
1807
1808        return 0;
1809
1810err_nomem:
1811        ixgbe_free_desc_rings(adapter);
1812        return ret_val;
1813}
1814
1815static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1816{
1817        struct ixgbe_hw *hw = &adapter->hw;
1818        u32 reg_data;
1819
1820
1821        /* Setup MAC loopback */
1822        reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1823        reg_data |= IXGBE_HLREG0_LPBK;
1824        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1825
1826        reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1827        reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1828        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1829
1830        /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1831        switch (adapter->hw.mac.type) {
1832        case ixgbe_mac_X540:
1833        case ixgbe_mac_X550:
1834        case ixgbe_mac_X550EM_x:
1835        case ixgbe_mac_x550em_a:
1836                reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1837                reg_data |= IXGBE_MACC_FLU;
1838                IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1839                break;
1840        default:
1841                if (hw->mac.orig_autoc) {
1842                        reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1843                        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1844                } else {
1845                        return 10;
1846                }
1847        }
1848        IXGBE_WRITE_FLUSH(hw);
1849        usleep_range(10000, 20000);
1850
1851        /* Disable Atlas Tx lanes; re-enabled in reset path */
1852        if (hw->mac.type == ixgbe_mac_82598EB) {
1853                u8 atlas;
1854
1855                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1856                atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1857                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1858
1859                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1860                atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1861                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1862
1863                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1864                atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1865                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1866
1867                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1868                atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1869                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1870        }
1871
1872        return 0;
1873}
1874
1875static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1876{
1877        u32 reg_data;
1878
1879        reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1880        reg_data &= ~IXGBE_HLREG0_LPBK;
1881        IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1882}
1883
1884static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1885                                      unsigned int frame_size)
1886{
1887        memset(skb->data, 0xFF, frame_size);
1888        frame_size >>= 1;
1889        memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1890        memset(&skb->data[frame_size + 10], 0xBE, 1);
1891        memset(&skb->data[frame_size + 12], 0xAF, 1);
1892}
1893
1894static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1895                                     unsigned int frame_size)
1896{
1897        unsigned char *data;
1898        bool match = true;
1899
1900        frame_size >>= 1;
1901
1902        data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1903
1904        if (data[3] != 0xFF ||
1905            data[frame_size + 10] != 0xBE ||
1906            data[frame_size + 12] != 0xAF)
1907                match = false;
1908
1909        kunmap(rx_buffer->page);
1910
1911        return match;
1912}
1913
1914static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1915                                  struct ixgbe_ring *tx_ring,
1916                                  unsigned int size)
1917{
1918        union ixgbe_adv_rx_desc *rx_desc;
1919        struct ixgbe_rx_buffer *rx_buffer;
1920        struct ixgbe_tx_buffer *tx_buffer;
1921        u16 rx_ntc, tx_ntc, count = 0;
1922
1923        /* initialize next to clean and descriptor values */
1924        rx_ntc = rx_ring->next_to_clean;
1925        tx_ntc = tx_ring->next_to_clean;
1926        rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1927
1928        while (rx_desc->wb.upper.length) {
1929                /* check Rx buffer */
1930                rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1931
1932                /* sync Rx buffer for CPU read */
1933                dma_sync_single_for_cpu(rx_ring->dev,
1934                                        rx_buffer->dma,
1935                                        ixgbe_rx_bufsz(rx_ring),
1936                                        DMA_FROM_DEVICE);
1937
1938                /* verify contents of skb */
1939                if (ixgbe_check_lbtest_frame(rx_buffer, size))
1940                        count++;
1941
1942                /* sync Rx buffer for device write */
1943                dma_sync_single_for_device(rx_ring->dev,
1944                                           rx_buffer->dma,
1945                                           ixgbe_rx_bufsz(rx_ring),
1946                                           DMA_FROM_DEVICE);
1947
1948                /* unmap buffer on Tx side */
1949                tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1950
1951                /* Free all the Tx ring sk_buffs */
1952                dev_kfree_skb_any(tx_buffer->skb);
1953
1954                /* unmap skb header data */
1955                dma_unmap_single(tx_ring->dev,
1956                                 dma_unmap_addr(tx_buffer, dma),
1957                                 dma_unmap_len(tx_buffer, len),
1958                                 DMA_TO_DEVICE);
1959                dma_unmap_len_set(tx_buffer, len, 0);
1960
1961                /* increment Rx/Tx next to clean counters */
1962                rx_ntc++;
1963                if (rx_ntc == rx_ring->count)
1964                        rx_ntc = 0;
1965                tx_ntc++;
1966                if (tx_ntc == tx_ring->count)
1967                        tx_ntc = 0;
1968
1969                /* fetch next descriptor */
1970                rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1971        }
1972
1973        netdev_tx_reset_queue(txring_txq(tx_ring));
1974
1975        /* re-map buffers to ring, store next to clean values */
1976        ixgbe_alloc_rx_buffers(rx_ring, count);
1977        rx_ring->next_to_clean = rx_ntc;
1978        tx_ring->next_to_clean = tx_ntc;
1979
1980        return count;
1981}
1982
1983static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1984{
1985        struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1986        struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1987        int i, j, lc, good_cnt, ret_val = 0;
1988        unsigned int size = 1024;
1989        netdev_tx_t tx_ret_val;
1990        struct sk_buff *skb;
1991        u32 flags_orig = adapter->flags;
1992
1993        /* DCB can modify the frames on Tx */
1994        adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1995
1996        /* allocate test skb */
1997        skb = alloc_skb(size, GFP_KERNEL);
1998        if (!skb)
1999                return 11;
2000
2001        /* place data into test skb */
2002        ixgbe_create_lbtest_frame(skb, size);
2003        skb_put(skb, size);
2004
2005        /*
2006         * Calculate the loop count based on the largest descriptor ring
2007         * The idea is to wrap the largest ring a number of times using 64
2008         * send/receive pairs during each loop
2009         */
2010
2011        if (rx_ring->count <= tx_ring->count)
2012                lc = ((tx_ring->count / 64) * 2) + 1;
2013        else
2014                lc = ((rx_ring->count / 64) * 2) + 1;
2015
2016        for (j = 0; j <= lc; j++) {
2017                /* reset count of good packets */
2018                good_cnt = 0;
2019
2020                /* place 64 packets on the transmit queue*/
2021                for (i = 0; i < 64; i++) {
2022                        skb_get(skb);
2023                        tx_ret_val = ixgbe_xmit_frame_ring(skb,
2024                                                           adapter,
2025                                                           tx_ring);
2026                        if (tx_ret_val == NETDEV_TX_OK)
2027                                good_cnt++;
2028                }
2029
2030                if (good_cnt != 64) {
2031                        ret_val = 12;
2032                        break;
2033                }
2034
2035                /* allow 200 milliseconds for packets to go from Tx to Rx */
2036                msleep(200);
2037
2038                good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2039                if (good_cnt != 64) {
2040                        ret_val = 13;
2041                        break;
2042                }
2043        }
2044
2045        /* free the original skb */
2046        kfree_skb(skb);
2047        adapter->flags = flags_orig;
2048
2049        return ret_val;
2050}
2051
2052static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2053{
2054        *data = ixgbe_setup_desc_rings(adapter);
2055        if (*data)
2056                goto out;
2057        *data = ixgbe_setup_loopback_test(adapter);
2058        if (*data)
2059                goto err_loopback;
2060        *data = ixgbe_run_loopback_test(adapter);
2061        ixgbe_loopback_cleanup(adapter);
2062
2063err_loopback:
2064        ixgbe_free_desc_rings(adapter);
2065out:
2066        return *data;
2067}
2068
2069static void ixgbe_diag_test(struct net_device *netdev,
2070                            struct ethtool_test *eth_test, u64 *data)
2071{
2072        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2073        bool if_running = netif_running(netdev);
2074
2075        if (ixgbe_removed(adapter->hw.hw_addr)) {
2076                e_err(hw, "Adapter removed - test blocked\n");
2077                data[0] = 1;
2078                data[1] = 1;
2079                data[2] = 1;
2080                data[3] = 1;
2081                data[4] = 1;
2082                eth_test->flags |= ETH_TEST_FL_FAILED;
2083                return;
2084        }
2085        set_bit(__IXGBE_TESTING, &adapter->state);
2086        if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2087                struct ixgbe_hw *hw = &adapter->hw;
2088
2089                if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2090                        int i;
2091                        for (i = 0; i < adapter->num_vfs; i++) {
2092                                if (adapter->vfinfo[i].clear_to_send) {
2093                                        netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2094                                        data[0] = 1;
2095                                        data[1] = 1;
2096                                        data[2] = 1;
2097                                        data[3] = 1;
2098                                        data[4] = 1;
2099                                        eth_test->flags |= ETH_TEST_FL_FAILED;
2100                                        clear_bit(__IXGBE_TESTING,
2101                                                  &adapter->state);
2102                                        goto skip_ol_tests;
2103                                }
2104                        }
2105                }
2106
2107                /* Offline tests */
2108                e_info(hw, "offline testing starting\n");
2109
2110                /* Link test performed before hardware reset so autoneg doesn't
2111                 * interfere with test result
2112                 */
2113                if (ixgbe_link_test(adapter, &data[4]))
2114                        eth_test->flags |= ETH_TEST_FL_FAILED;
2115
2116                if (if_running)
2117                        /* indicate we're in test mode */
2118                        ixgbe_close(netdev);
2119                else
2120                        ixgbe_reset(adapter);
2121
2122                e_info(hw, "register testing starting\n");
2123                if (ixgbe_reg_test(adapter, &data[0]))
2124                        eth_test->flags |= ETH_TEST_FL_FAILED;
2125
2126                ixgbe_reset(adapter);
2127                e_info(hw, "eeprom testing starting\n");
2128                if (ixgbe_eeprom_test(adapter, &data[1]))
2129                        eth_test->flags |= ETH_TEST_FL_FAILED;
2130
2131                ixgbe_reset(adapter);
2132                e_info(hw, "interrupt testing starting\n");
2133                if (ixgbe_intr_test(adapter, &data[2]))
2134                        eth_test->flags |= ETH_TEST_FL_FAILED;
2135
2136                /* If SRIOV or VMDq is enabled then skip MAC
2137                 * loopback diagnostic. */
2138                if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2139                                      IXGBE_FLAG_VMDQ_ENABLED)) {
2140                        e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2141                        data[3] = 0;
2142                        goto skip_loopback;
2143                }
2144
2145                ixgbe_reset(adapter);
2146                e_info(hw, "loopback testing starting\n");
2147                if (ixgbe_loopback_test(adapter, &data[3]))
2148                        eth_test->flags |= ETH_TEST_FL_FAILED;
2149
2150skip_loopback:
2151                ixgbe_reset(adapter);
2152
2153                /* clear testing bit and return adapter to previous state */
2154                clear_bit(__IXGBE_TESTING, &adapter->state);
2155                if (if_running)
2156                        ixgbe_open(netdev);
2157                else if (hw->mac.ops.disable_tx_laser)
2158                        hw->mac.ops.disable_tx_laser(hw);
2159        } else {
2160                e_info(hw, "online testing starting\n");
2161
2162                /* Online tests */
2163                if (ixgbe_link_test(adapter, &data[4]))
2164                        eth_test->flags |= ETH_TEST_FL_FAILED;
2165
2166                /* Offline tests aren't run; pass by default */
2167                data[0] = 0;
2168                data[1] = 0;
2169                data[2] = 0;
2170                data[3] = 0;
2171
2172                clear_bit(__IXGBE_TESTING, &adapter->state);
2173        }
2174
2175skip_ol_tests:
2176        msleep_interruptible(4 * 1000);
2177}
2178
2179static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2180                               struct ethtool_wolinfo *wol)
2181{
2182        struct ixgbe_hw *hw = &adapter->hw;
2183        int retval = 0;
2184
2185        /* WOL not supported for all devices */
2186        if (!ixgbe_wol_supported(adapter, hw->device_id,
2187                                 hw->subsystem_device_id)) {
2188                retval = 1;
2189                wol->supported = 0;
2190        }
2191
2192        return retval;
2193}
2194
2195static void ixgbe_get_wol(struct net_device *netdev,
2196                          struct ethtool_wolinfo *wol)
2197{
2198        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2199
2200        wol->supported = WAKE_UCAST | WAKE_MCAST |
2201                         WAKE_BCAST | WAKE_MAGIC;
2202        wol->wolopts = 0;
2203
2204        if (ixgbe_wol_exclusion(adapter, wol) ||
2205            !device_can_wakeup(&adapter->pdev->dev))
2206                return;
2207
2208        if (adapter->wol & IXGBE_WUFC_EX)
2209                wol->wolopts |= WAKE_UCAST;
2210        if (adapter->wol & IXGBE_WUFC_MC)
2211                wol->wolopts |= WAKE_MCAST;
2212        if (adapter->wol & IXGBE_WUFC_BC)
2213                wol->wolopts |= WAKE_BCAST;
2214        if (adapter->wol & IXGBE_WUFC_MAG)
2215                wol->wolopts |= WAKE_MAGIC;
2216}
2217
2218static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2219{
2220        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2221
2222        if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2223                return -EOPNOTSUPP;
2224
2225        if (ixgbe_wol_exclusion(adapter, wol))
2226                return wol->wolopts ? -EOPNOTSUPP : 0;
2227
2228        adapter->wol = 0;
2229
2230        if (wol->wolopts & WAKE_UCAST)
2231                adapter->wol |= IXGBE_WUFC_EX;
2232        if (wol->wolopts & WAKE_MCAST)
2233                adapter->wol |= IXGBE_WUFC_MC;
2234        if (wol->wolopts & WAKE_BCAST)
2235                adapter->wol |= IXGBE_WUFC_BC;
2236        if (wol->wolopts & WAKE_MAGIC)
2237                adapter->wol |= IXGBE_WUFC_MAG;
2238
2239        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2240
2241        return 0;
2242}
2243
2244static int ixgbe_nway_reset(struct net_device *netdev)
2245{
2246        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2247
2248        if (netif_running(netdev))
2249                ixgbe_reinit_locked(adapter);
2250
2251        return 0;
2252}
2253
2254static int ixgbe_set_phys_id(struct net_device *netdev,
2255                             enum ethtool_phys_id_state state)
2256{
2257        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2258        struct ixgbe_hw *hw = &adapter->hw;
2259
2260        if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2261                return -EOPNOTSUPP;
2262
2263        switch (state) {
2264        case ETHTOOL_ID_ACTIVE:
2265                adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2266                return 2;
2267
2268        case ETHTOOL_ID_ON:
2269                hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2270                break;
2271
2272        case ETHTOOL_ID_OFF:
2273                hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2274                break;
2275
2276        case ETHTOOL_ID_INACTIVE:
2277                /* Restore LED settings */
2278                IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2279                break;
2280        }
2281
2282        return 0;
2283}
2284
2285static int ixgbe_get_coalesce(struct net_device *netdev,
2286                              struct ethtool_coalesce *ec)
2287{
2288        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2289
2290        /* only valid if in constant ITR mode */
2291        if (adapter->rx_itr_setting <= 1)
2292                ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2293        else
2294                ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2295
2296        /* if in mixed tx/rx queues per vector mode, report only rx settings */
2297        if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2298                return 0;
2299
2300        /* only valid if in constant ITR mode */
2301        if (adapter->tx_itr_setting <= 1)
2302                ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2303        else
2304                ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2305
2306        return 0;
2307}
2308
2309/*
2310 * this function must be called before setting the new value of
2311 * rx_itr_setting
2312 */
2313static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2314{
2315        struct net_device *netdev = adapter->netdev;
2316
2317        /* nothing to do if LRO or RSC are not enabled */
2318        if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2319            !(netdev->features & NETIF_F_LRO))
2320                return false;
2321
2322        /* check the feature flag value and enable RSC if necessary */
2323        if (adapter->rx_itr_setting == 1 ||
2324            adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2325                if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2326                        adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2327                        e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2328                        return true;
2329                }
2330        /* if interrupt rate is too high then disable RSC */
2331        } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2332                adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2333                e_info(probe, "rx-usecs set too low, disabling RSC\n");
2334                return true;
2335        }
2336        return false;
2337}
2338
2339static int ixgbe_set_coalesce(struct net_device *netdev,
2340                              struct ethtool_coalesce *ec)
2341{
2342        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2343        struct ixgbe_q_vector *q_vector;
2344        int i;
2345        u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2346        bool need_reset = false;
2347
2348        if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2349                /* reject Tx specific changes in case of mixed RxTx vectors */
2350                if (ec->tx_coalesce_usecs)
2351                        return -EINVAL;
2352                tx_itr_prev = adapter->rx_itr_setting;
2353        } else {
2354                tx_itr_prev = adapter->tx_itr_setting;
2355        }
2356
2357        if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2358            (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2359                return -EINVAL;
2360
2361        if (ec->rx_coalesce_usecs > 1)
2362                adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2363        else
2364                adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2365
2366        if (adapter->rx_itr_setting == 1)
2367                rx_itr_param = IXGBE_20K_ITR;
2368        else
2369                rx_itr_param = adapter->rx_itr_setting;
2370
2371        if (ec->tx_coalesce_usecs > 1)
2372                adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2373        else
2374                adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2375
2376        if (adapter->tx_itr_setting == 1)
2377                tx_itr_param = IXGBE_12K_ITR;
2378        else
2379                tx_itr_param = adapter->tx_itr_setting;
2380
2381        /* mixed Rx/Tx */
2382        if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2383                adapter->tx_itr_setting = adapter->rx_itr_setting;
2384
2385        /* detect ITR changes that require update of TXDCTL.WTHRESH */
2386        if ((adapter->tx_itr_setting != 1) &&
2387            (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2388                if ((tx_itr_prev == 1) ||
2389                    (tx_itr_prev >= IXGBE_100K_ITR))
2390                        need_reset = true;
2391        } else {
2392                if ((tx_itr_prev != 1) &&
2393                    (tx_itr_prev < IXGBE_100K_ITR))
2394                        need_reset = true;
2395        }
2396
2397        /* check the old value and enable RSC if necessary */
2398        need_reset |= ixgbe_update_rsc(adapter);
2399
2400        for (i = 0; i < adapter->num_q_vectors; i++) {
2401                q_vector = adapter->q_vector[i];
2402                if (q_vector->tx.count && !q_vector->rx.count)
2403                        /* tx only */
2404                        q_vector->itr = tx_itr_param;
2405                else
2406                        /* rx only or mixed */
2407                        q_vector->itr = rx_itr_param;
2408                ixgbe_write_eitr(q_vector);
2409        }
2410
2411        /*
2412         * do reset here at the end to make sure EITR==0 case is handled
2413         * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2414         * also locks in RSC enable/disable which requires reset
2415         */
2416        if (need_reset)
2417                ixgbe_do_reset(netdev);
2418
2419        return 0;
2420}
2421
2422static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2423                                        struct ethtool_rxnfc *cmd)
2424{
2425        union ixgbe_atr_input *mask = &adapter->fdir_mask;
2426        struct ethtool_rx_flow_spec *fsp =
2427                (struct ethtool_rx_flow_spec *)&cmd->fs;
2428        struct hlist_node *node2;
2429        struct ixgbe_fdir_filter *rule = NULL;
2430
2431        /* report total rule count */
2432        cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2433
2434        hlist_for_each_entry_safe(rule, node2,
2435                                  &adapter->fdir_filter_list, fdir_node) {
2436                if (fsp->location <= rule->sw_idx)
2437                        break;
2438        }
2439
2440        if (!rule || fsp->location != rule->sw_idx)
2441                return -EINVAL;
2442
2443        /* fill out the flow spec entry */
2444
2445        /* set flow type field */
2446        switch (rule->filter.formatted.flow_type) {
2447        case IXGBE_ATR_FLOW_TYPE_TCPV4:
2448                fsp->flow_type = TCP_V4_FLOW;
2449                break;
2450        case IXGBE_ATR_FLOW_TYPE_UDPV4:
2451                fsp->flow_type = UDP_V4_FLOW;
2452                break;
2453        case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2454                fsp->flow_type = SCTP_V4_FLOW;
2455                break;
2456        case IXGBE_ATR_FLOW_TYPE_IPV4:
2457                fsp->flow_type = IP_USER_FLOW;
2458                fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2459                fsp->h_u.usr_ip4_spec.proto = 0;
2460                fsp->m_u.usr_ip4_spec.proto = 0;
2461                break;
2462        default:
2463                return -EINVAL;
2464        }
2465
2466        fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2467        fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2468        fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2469        fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2470        fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2471        fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2472        fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2473        fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2474        fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2475        fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2476        fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2477        fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2478        fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2479        fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2480        fsp->flow_type |= FLOW_EXT;
2481
2482        /* record action */
2483        if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2484                fsp->ring_cookie = RX_CLS_FLOW_DISC;
2485        else
2486                fsp->ring_cookie = rule->action;
2487
2488        return 0;
2489}
2490
2491static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2492                                      struct ethtool_rxnfc *cmd,
2493                                      u32 *rule_locs)
2494{
2495        struct hlist_node *node2;
2496        struct ixgbe_fdir_filter *rule;
2497        int cnt = 0;
2498
2499        /* report total rule count */
2500        cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2501
2502        hlist_for_each_entry_safe(rule, node2,
2503                                  &adapter->fdir_filter_list, fdir_node) {
2504                if (cnt == cmd->rule_cnt)
2505                        return -EMSGSIZE;
2506                rule_locs[cnt] = rule->sw_idx;
2507                cnt++;
2508        }
2509
2510        cmd->rule_cnt = cnt;
2511
2512        return 0;
2513}
2514
2515static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2516                                   struct ethtool_rxnfc *cmd)
2517{
2518        cmd->data = 0;
2519
2520        /* Report default options for RSS on ixgbe */
2521        switch (cmd->flow_type) {
2522        case TCP_V4_FLOW:
2523                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2524                /* fallthrough */
2525        case UDP_V4_FLOW:
2526                if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2527                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2528                /* fallthrough */
2529        case SCTP_V4_FLOW:
2530        case AH_ESP_V4_FLOW:
2531        case AH_V4_FLOW:
2532        case ESP_V4_FLOW:
2533        case IPV4_FLOW:
2534                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2535                break;
2536        case TCP_V6_FLOW:
2537                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2538                /* fallthrough */
2539        case UDP_V6_FLOW:
2540                if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2541                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2542                /* fallthrough */
2543        case SCTP_V6_FLOW:
2544        case AH_ESP_V6_FLOW:
2545        case AH_V6_FLOW:
2546        case ESP_V6_FLOW:
2547        case IPV6_FLOW:
2548                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2549                break;
2550        default:
2551                return -EINVAL;
2552        }
2553
2554        return 0;
2555}
2556
2557static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2558                           u32 *rule_locs)
2559{
2560        struct ixgbe_adapter *adapter = netdev_priv(dev);
2561        int ret = -EOPNOTSUPP;
2562
2563        switch (cmd->cmd) {
2564        case ETHTOOL_GRXRINGS:
2565                cmd->data = adapter->num_rx_queues;
2566                ret = 0;
2567                break;
2568        case ETHTOOL_GRXCLSRLCNT:
2569                cmd->rule_cnt = adapter->fdir_filter_count;
2570                ret = 0;
2571                break;
2572        case ETHTOOL_GRXCLSRULE:
2573                ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2574                break;
2575        case ETHTOOL_GRXCLSRLALL:
2576                ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2577                break;
2578        case ETHTOOL_GRXFH:
2579                ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2580                break;
2581        default:
2582                break;
2583        }
2584
2585        return ret;
2586}
2587
2588int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2589                                    struct ixgbe_fdir_filter *input,
2590                                    u16 sw_idx)
2591{
2592        struct ixgbe_hw *hw = &adapter->hw;
2593        struct hlist_node *node2;
2594        struct ixgbe_fdir_filter *rule, *parent;
2595        int err = -EINVAL;
2596
2597        parent = NULL;
2598        rule = NULL;
2599
2600        hlist_for_each_entry_safe(rule, node2,
2601                                  &adapter->fdir_filter_list, fdir_node) {
2602                /* hash found, or no matching entry */
2603                if (rule->sw_idx >= sw_idx)
2604                        break;
2605                parent = rule;
2606        }
2607
2608        /* if there is an old rule occupying our place remove it */
2609        if (rule && (rule->sw_idx == sw_idx)) {
2610                if (!input || (rule->filter.formatted.bkt_hash !=
2611                               input->filter.formatted.bkt_hash)) {
2612                        err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2613                                                                &rule->filter,
2614                                                                sw_idx);
2615                }
2616
2617                hlist_del(&rule->fdir_node);
2618                kfree(rule);
2619                adapter->fdir_filter_count--;
2620        }
2621
2622        /*
2623         * If no input this was a delete, err should be 0 if a rule was
2624         * successfully found and removed from the list else -EINVAL
2625         */
2626        if (!input)
2627                return err;
2628
2629        /* initialize node and set software index */
2630        INIT_HLIST_NODE(&input->fdir_node);
2631
2632        /* add filter to the list */
2633        if (parent)
2634                hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2635        else
2636                hlist_add_head(&input->fdir_node,
2637                               &adapter->fdir_filter_list);
2638
2639        /* update counts */
2640        adapter->fdir_filter_count++;
2641
2642        return 0;
2643}
2644
2645static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2646                                       u8 *flow_type)
2647{
2648        switch (fsp->flow_type & ~FLOW_EXT) {
2649        case TCP_V4_FLOW:
2650                *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2651                break;
2652        case UDP_V4_FLOW:
2653                *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2654                break;
2655        case SCTP_V4_FLOW:
2656                *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2657                break;
2658        case IP_USER_FLOW:
2659                switch (fsp->h_u.usr_ip4_spec.proto) {
2660                case IPPROTO_TCP:
2661                        *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2662                        break;
2663                case IPPROTO_UDP:
2664                        *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2665                        break;
2666                case IPPROTO_SCTP:
2667                        *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2668                        break;
2669                case 0:
2670                        if (!fsp->m_u.usr_ip4_spec.proto) {
2671                                *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2672                                break;
2673                        }
2674                        /* fall through */
2675                default:
2676                        return 0;
2677                }
2678                break;
2679        default:
2680                return 0;
2681        }
2682
2683        return 1;
2684}
2685
2686static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2687                                        struct ethtool_rxnfc *cmd)
2688{
2689        struct ethtool_rx_flow_spec *fsp =
2690                (struct ethtool_rx_flow_spec *)&cmd->fs;
2691        struct ixgbe_hw *hw = &adapter->hw;
2692        struct ixgbe_fdir_filter *input;
2693        union ixgbe_atr_input mask;
2694        u8 queue;
2695        int err;
2696
2697        if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2698                return -EOPNOTSUPP;
2699
2700        /* ring_cookie is a masked into a set of queues and ixgbe pools or
2701         * we use the drop index.
2702         */
2703        if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2704                queue = IXGBE_FDIR_DROP_QUEUE;
2705        } else {
2706                u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2707                u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2708
2709                if (!vf && (ring >= adapter->num_rx_queues))
2710                        return -EINVAL;
2711                else if (vf &&
2712                         ((vf > adapter->num_vfs) ||
2713                           ring >= adapter->num_rx_queues_per_pool))
2714                        return -EINVAL;
2715
2716                /* Map the ring onto the absolute queue index */
2717                if (!vf)
2718                        queue = adapter->rx_ring[ring]->reg_idx;
2719                else
2720                        queue = ((vf - 1) *
2721                                adapter->num_rx_queues_per_pool) + ring;
2722        }
2723
2724        /* Don't allow indexes to exist outside of available space */
2725        if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2726                e_err(drv, "Location out of range\n");
2727                return -EINVAL;
2728        }
2729
2730        input = kzalloc(sizeof(*input), GFP_ATOMIC);
2731        if (!input)
2732                return -ENOMEM;
2733
2734        memset(&mask, 0, sizeof(union ixgbe_atr_input));
2735
2736        /* set SW index */
2737        input->sw_idx = fsp->location;
2738
2739        /* record flow type */
2740        if (!ixgbe_flowspec_to_flow_type(fsp,
2741                                         &input->filter.formatted.flow_type)) {
2742                e_err(drv, "Unrecognized flow type\n");
2743                goto err_out;
2744        }
2745
2746        mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2747                                   IXGBE_ATR_L4TYPE_MASK;
2748
2749        if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2750                mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2751
2752        /* Copy input into formatted structures */
2753        input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2754        mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2755        input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2756        mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2757        input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2758        mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2759        input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2760        mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2761
2762        if (fsp->flow_type & FLOW_EXT) {
2763                input->filter.formatted.vm_pool =
2764                                (unsigned char)ntohl(fsp->h_ext.data[1]);
2765                mask.formatted.vm_pool =
2766                                (unsigned char)ntohl(fsp->m_ext.data[1]);
2767                input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2768                mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2769                input->filter.formatted.flex_bytes =
2770                                                fsp->h_ext.vlan_etype;
2771                mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2772        }
2773
2774        /* determine if we need to drop or route the packet */
2775        if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2776                input->action = IXGBE_FDIR_DROP_QUEUE;
2777        else
2778                input->action = fsp->ring_cookie;
2779
2780        spin_lock(&adapter->fdir_perfect_lock);
2781
2782        if (hlist_empty(&adapter->fdir_filter_list)) {
2783                /* save mask and program input mask into HW */
2784                memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2785                err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2786                if (err) {
2787                        e_err(drv, "Error writing mask\n");
2788                        goto err_out_w_lock;
2789                }
2790        } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2791                e_err(drv, "Only one mask supported per port\n");
2792                goto err_out_w_lock;
2793        }
2794
2795        /* apply mask and compute/store hash */
2796        ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2797
2798        /* program filters to filter memory */
2799        err = ixgbe_fdir_write_perfect_filter_82599(hw,
2800                                &input->filter, input->sw_idx, queue);
2801        if (err)
2802                goto err_out_w_lock;
2803
2804        ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2805
2806        spin_unlock(&adapter->fdir_perfect_lock);
2807
2808        return err;
2809err_out_w_lock:
2810        spin_unlock(&adapter->fdir_perfect_lock);
2811err_out:
2812        kfree(input);
2813        return -EINVAL;
2814}
2815
2816static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2817                                        struct ethtool_rxnfc *cmd)
2818{
2819        struct ethtool_rx_flow_spec *fsp =
2820                (struct ethtool_rx_flow_spec *)&cmd->fs;
2821        int err;
2822
2823        spin_lock(&adapter->fdir_perfect_lock);
2824        err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2825        spin_unlock(&adapter->fdir_perfect_lock);
2826
2827        return err;
2828}
2829
2830#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2831                       IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2832static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2833                                  struct ethtool_rxnfc *nfc)
2834{
2835        u32 flags2 = adapter->flags2;
2836
2837        /*
2838         * RSS does not support anything other than hashing
2839         * to queues on src and dst IPs and ports
2840         */
2841        if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2842                          RXH_L4_B_0_1 | RXH_L4_B_2_3))
2843                return -EINVAL;
2844
2845        switch (nfc->flow_type) {
2846        case TCP_V4_FLOW:
2847        case TCP_V6_FLOW:
2848                if (!(nfc->data & RXH_IP_SRC) ||
2849                    !(nfc->data & RXH_IP_DST) ||
2850                    !(nfc->data & RXH_L4_B_0_1) ||
2851                    !(nfc->data & RXH_L4_B_2_3))
2852                        return -EINVAL;
2853                break;
2854        case UDP_V4_FLOW:
2855                if (!(nfc->data & RXH_IP_SRC) ||
2856                    !(nfc->data & RXH_IP_DST))
2857                        return -EINVAL;
2858                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2859                case 0:
2860                        flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2861                        break;
2862                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2863                        flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2864                        break;
2865                default:
2866                        return -EINVAL;
2867                }
2868                break;
2869        case UDP_V6_FLOW:
2870                if (!(nfc->data & RXH_IP_SRC) ||
2871                    !(nfc->data & RXH_IP_DST))
2872                        return -EINVAL;
2873                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2874                case 0:
2875                        flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2876                        break;
2877                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2878                        flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2879                        break;
2880                default:
2881                        return -EINVAL;
2882                }
2883                break;
2884        case AH_ESP_V4_FLOW:
2885        case AH_V4_FLOW:
2886        case ESP_V4_FLOW:
2887        case SCTP_V4_FLOW:
2888        case AH_ESP_V6_FLOW:
2889        case AH_V6_FLOW:
2890        case ESP_V6_FLOW:
2891        case SCTP_V6_FLOW:
2892                if (!(nfc->data & RXH_IP_SRC) ||
2893                    !(nfc->data & RXH_IP_DST) ||
2894                    (nfc->data & RXH_L4_B_0_1) ||
2895                    (nfc->data & RXH_L4_B_2_3))
2896                        return -EINVAL;
2897                break;
2898        default:
2899                return -EINVAL;
2900        }
2901
2902        /* if we changed something we need to update flags */
2903        if (flags2 != adapter->flags2) {
2904                struct ixgbe_hw *hw = &adapter->hw;
2905                u32 mrqc;
2906                unsigned int pf_pool = adapter->num_vfs;
2907
2908                if ((hw->mac.type >= ixgbe_mac_X550) &&
2909                    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2910                        mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2911                else
2912                        mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2913
2914                if ((flags2 & UDP_RSS_FLAGS) &&
2915                    !(adapter->flags2 & UDP_RSS_FLAGS))
2916                        e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2917
2918                adapter->flags2 = flags2;
2919
2920                /* Perform hash on these packet types */
2921                mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2922                      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2923                      | IXGBE_MRQC_RSS_FIELD_IPV6
2924                      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2925
2926                mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2927                          IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2928
2929                if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2930                        mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2931
2932                if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2933                        mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2934
2935                if ((hw->mac.type >= ixgbe_mac_X550) &&
2936                    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2937                        IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
2938                else
2939                        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2940        }
2941
2942        return 0;
2943}
2944
2945static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2946{
2947        struct ixgbe_adapter *adapter = netdev_priv(dev);
2948        int ret = -EOPNOTSUPP;
2949
2950        switch (cmd->cmd) {
2951        case ETHTOOL_SRXCLSRLINS:
2952                ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2953                break;
2954        case ETHTOOL_SRXCLSRLDEL:
2955                ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2956                break;
2957        case ETHTOOL_SRXFH:
2958                ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2959                break;
2960        default:
2961                break;
2962        }
2963
2964        return ret;
2965}
2966
2967static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2968{
2969        if (adapter->hw.mac.type < ixgbe_mac_X550)
2970                return 16;
2971        else
2972                return 64;
2973}
2974
2975static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
2976{
2977        return IXGBE_RSS_KEY_SIZE;
2978}
2979
2980static u32 ixgbe_rss_indir_size(struct net_device *netdev)
2981{
2982        struct ixgbe_adapter *adapter = netdev_priv(netdev);
2983
2984        return ixgbe_rss_indir_tbl_entries(adapter);
2985}
2986
2987static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
2988{
2989        int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
2990        u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
2991
2992        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
2993                rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
2994
2995        for (i = 0; i < reta_size; i++)
2996                indir[i] = adapter->rss_indir_tbl[i] & rss_m;
2997}
2998
2999static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
3000                          u8 *hfunc)
3001{
3002        struct ixgbe_adapter *adapter = netdev_priv(netdev);
3003
3004        if (hfunc)
3005                *hfunc = ETH_RSS_HASH_TOP;
3006
3007        if (indir)
3008                ixgbe_get_reta(adapter, indir);
3009
3010        if (key)
3011                memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
3012
3013        return 0;
3014}
3015
3016static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
3017                          const u8 *key, const u8 hfunc)
3018{
3019        struct ixgbe_adapter *adapter = netdev_priv(netdev);
3020        int i;
3021        u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3022
3023        if (hfunc)
3024                return -EINVAL;
3025
3026        /* Fill out the redirection table */
3027        if (indir) {
3028                int max_queues = min_t(int, adapter->num_rx_queues,
3029                                       ixgbe_rss_indir_tbl_max(adapter));
3030
3031                /*Allow at least 2 queues w/ SR-IOV.*/
3032                if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3033                    (max_queues < 2))
3034                        max_queues = 2;
3035
3036                /* Verify user input. */
3037                for (i = 0; i < reta_entries; i++)
3038                        if (indir[i] >= max_queues)
3039                                return -EINVAL;
3040
3041                for (i = 0; i < reta_entries; i++)
3042                        adapter->rss_indir_tbl[i] = indir[i];
3043        }
3044
3045        /* Fill out the rss hash key */
3046        if (key) {
3047                memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3048                ixgbe_store_key(adapter);
3049        }
3050
3051        ixgbe_store_reta(adapter);
3052
3053        return 0;
3054}
3055
3056static int ixgbe_get_ts_info(struct net_device *dev,
3057                             struct ethtool_ts_info *info)
3058{
3059        struct ixgbe_adapter *adapter = netdev_priv(dev);
3060
3061        /* we always support timestamping disabled */
3062        info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3063
3064        switch (adapter->hw.mac.type) {
3065        case ixgbe_mac_X550:
3066        case ixgbe_mac_X550EM_x:
3067        case ixgbe_mac_x550em_a:
3068                info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3069                /* fallthrough */
3070        case ixgbe_mac_X540:
3071        case ixgbe_mac_82599EB:
3072                info->so_timestamping =
3073                        SOF_TIMESTAMPING_TX_SOFTWARE |
3074                        SOF_TIMESTAMPING_RX_SOFTWARE |
3075                        SOF_TIMESTAMPING_SOFTWARE |
3076                        SOF_TIMESTAMPING_TX_HARDWARE |
3077                        SOF_TIMESTAMPING_RX_HARDWARE |
3078                        SOF_TIMESTAMPING_RAW_HARDWARE;
3079
3080                if (adapter->ptp_clock)
3081                        info->phc_index = ptp_clock_index(adapter->ptp_clock);
3082                else
3083                        info->phc_index = -1;
3084
3085                info->tx_types =
3086                        BIT(HWTSTAMP_TX_OFF) |
3087                        BIT(HWTSTAMP_TX_ON);
3088
3089                info->rx_filters |=
3090                        BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3091                        BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3092                        BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3093                break;
3094        default:
3095                return ethtool_op_get_ts_info(dev, info);
3096        }
3097        return 0;
3098}
3099
3100static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3101{
3102        unsigned int max_combined;
3103        u8 tcs = netdev_get_num_tc(adapter->netdev);
3104
3105        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3106                /* We only support one q_vector without MSI-X */
3107                max_combined = 1;
3108        } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3109                /* Limit value based on the queue mask */
3110                max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3111        } else if (tcs > 1) {
3112                /* For DCB report channels per traffic class */
3113                if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3114                        /* 8 TC w/ 4 queues per TC */
3115                        max_combined = 4;
3116                } else if (tcs > 4) {
3117                        /* 8 TC w/ 8 queues per TC */
3118                        max_combined = 8;
3119                } else {
3120                        /* 4 TC w/ 16 queues per TC */
3121                        max_combined = 16;
3122                }
3123        } else if (adapter->atr_sample_rate) {
3124                /* support up to 64 queues with ATR */
3125                max_combined = IXGBE_MAX_FDIR_INDICES;
3126        } else {
3127                /* support up to 16 queues with RSS */
3128                max_combined = ixgbe_max_rss_indices(adapter);
3129        }
3130
3131        return max_combined;
3132}
3133
3134static void ixgbe_get_channels(struct net_device *dev,
3135                               struct ethtool_channels *ch)
3136{
3137        struct ixgbe_adapter *adapter = netdev_priv(dev);
3138
3139        /* report maximum channels */
3140        ch->max_combined = ixgbe_max_channels(adapter);
3141
3142        /* report info for other vector */
3143        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3144                ch->max_other = NON_Q_VECTORS;
3145                ch->other_count = NON_Q_VECTORS;
3146        }
3147
3148        /* record RSS queues */
3149        ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3150
3151        /* nothing else to report if RSS is disabled */
3152        if (ch->combined_count == 1)
3153                return;
3154
3155        /* we do not support ATR queueing if SR-IOV is enabled */
3156        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3157                return;
3158
3159        /* same thing goes for being DCB enabled */
3160        if (netdev_get_num_tc(dev) > 1)
3161                return;
3162
3163        /* if ATR is disabled we can exit */
3164        if (!adapter->atr_sample_rate)
3165                return;
3166
3167        /* report flow director queues as maximum channels */
3168        ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3169}
3170
3171static int ixgbe_set_channels(struct net_device *dev,
3172                              struct ethtool_channels *ch)
3173{
3174        struct ixgbe_adapter *adapter = netdev_priv(dev);
3175        unsigned int count = ch->combined_count;
3176        u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3177
3178        /* verify they are not requesting separate vectors */
3179        if (!count || ch->rx_count || ch->tx_count)
3180                return -EINVAL;
3181
3182        /* verify other_count has not changed */
3183        if (ch->other_count != NON_Q_VECTORS)
3184                return -EINVAL;
3185
3186        /* verify the number of channels does not exceed hardware limits */
3187        if (count > ixgbe_max_channels(adapter))
3188                return -EINVAL;
3189
3190        /* update feature limits from largest to smallest supported values */
3191        adapter->ring_feature[RING_F_FDIR].limit = count;
3192
3193        /* cap RSS limit */
3194        if (count > max_rss_indices)
3195                count = max_rss_indices;
3196        adapter->ring_feature[RING_F_RSS].limit = count;
3197
3198#ifdef IXGBE_FCOE
3199        /* cap FCoE limit at 8 */
3200        if (count > IXGBE_FCRETA_SIZE)
3201                count = IXGBE_FCRETA_SIZE;
3202        adapter->ring_feature[RING_F_FCOE].limit = count;
3203
3204#endif
3205        /* use setup TC to update any traffic class queue mapping */
3206        return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
3207}
3208
3209static int ixgbe_get_module_info(struct net_device *dev,
3210                                       struct ethtool_modinfo *modinfo)
3211{
3212        struct ixgbe_adapter *adapter = netdev_priv(dev);
3213        struct ixgbe_hw *hw = &adapter->hw;
3214        s32 status;
3215        u8 sff8472_rev, addr_mode;
3216        bool page_swap = false;
3217
3218        if (hw->phy.type == ixgbe_phy_fw)
3219                return -ENXIO;
3220
3221        /* Check whether we support SFF-8472 or not */
3222        status = hw->phy.ops.read_i2c_eeprom(hw,
3223                                             IXGBE_SFF_SFF_8472_COMP,
3224                                             &sff8472_rev);
3225        if (status)
3226                return -EIO;
3227
3228        /* addressing mode is not supported */
3229        status = hw->phy.ops.read_i2c_eeprom(hw,
3230                                             IXGBE_SFF_SFF_8472_SWAP,
3231                                             &addr_mode);
3232        if (status)
3233                return -EIO;
3234
3235        if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3236                e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3237                page_swap = true;
3238        }
3239
3240        if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3241                /* We have a SFP, but it does not support SFF-8472 */
3242                modinfo->type = ETH_MODULE_SFF_8079;
3243                modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3244        } else {
3245                /* We have a SFP which supports a revision of SFF-8472. */
3246                modinfo->type = ETH_MODULE_SFF_8472;
3247                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3248        }
3249
3250        return 0;
3251}
3252
3253static int ixgbe_get_module_eeprom(struct net_device *dev,
3254                                         struct ethtool_eeprom *ee,
3255                                         u8 *data)
3256{
3257        struct ixgbe_adapter *adapter = netdev_priv(dev);
3258        struct ixgbe_hw *hw = &adapter->hw;
3259        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3260        u8 databyte = 0xFF;
3261        int i = 0;
3262
3263        if (ee->len == 0)
3264                return -EINVAL;
3265
3266        if (hw->phy.type == ixgbe_phy_fw)
3267                return -ENXIO;
3268
3269        for (i = ee->offset; i < ee->offset + ee->len; i++) {
3270                /* I2C reads can take long time */
3271                if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3272                        return -EBUSY;
3273
3274                if (i < ETH_MODULE_SFF_8079_LEN)
3275                        status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3276                else
3277                        status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3278
3279                if (status)
3280                        return -EIO;
3281
3282                data[i - ee->offset] = databyte;
3283        }
3284
3285        return 0;
3286}
3287
3288static const struct {
3289        ixgbe_link_speed mac_speed;
3290        u32 supported;
3291} ixgbe_ls_map[] = {
3292        { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
3293        { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
3294        { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
3295        { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
3296        { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
3297};
3298
3299static const struct {
3300        u32 lp_advertised;
3301        u32 mac_speed;
3302} ixgbe_lp_map[] = {
3303        { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
3304        { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
3305        { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
3306        { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
3307        { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
3308        { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
3309};
3310
3311static int
3312ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
3313{
3314        u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3315        struct ixgbe_hw *hw = &adapter->hw;
3316        s32 rc;
3317        u16 i;
3318
3319        rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3320        if (rc)
3321                return rc;
3322
3323        edata->lp_advertised = 0;
3324        for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3325                if (info[0] & ixgbe_lp_map[i].lp_advertised)
3326                        edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
3327        }
3328
3329        edata->supported = 0;
3330        for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3331                if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3332                        edata->supported |= ixgbe_ls_map[i].supported;
3333        }
3334
3335        edata->advertised = 0;
3336        for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3337                if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3338                        edata->advertised |= ixgbe_ls_map[i].supported;
3339        }
3340
3341        edata->eee_enabled = !!edata->advertised;
3342        edata->tx_lpi_enabled = edata->eee_enabled;
3343        if (edata->advertised & edata->lp_advertised)
3344                edata->eee_active = true;
3345
3346        return 0;
3347}
3348
3349static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
3350{
3351        struct ixgbe_adapter *adapter = netdev_priv(netdev);
3352        struct ixgbe_hw *hw = &adapter->hw;
3353
3354        if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3355                return -EOPNOTSUPP;
3356
3357        if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3358                return ixgbe_get_eee_fw(adapter, edata);
3359
3360        return -EOPNOTSUPP;
3361}
3362
3363static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
3364{
3365        struct ixgbe_adapter *adapter = netdev_priv(netdev);
3366        struct ixgbe_hw *hw = &adapter->hw;
3367        struct ethtool_eee eee_data;
3368        s32 ret_val;
3369
3370        if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3371                return -EOPNOTSUPP;
3372
3373        memset(&eee_data, 0, sizeof(struct ethtool_eee));
3374
3375        ret_val = ixgbe_get_eee(netdev, &eee_data);
3376        if (ret_val)
3377                return ret_val;
3378
3379        if (eee_data.eee_enabled && !edata->eee_enabled) {
3380                if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3381                        e_err(drv, "Setting EEE tx-lpi is not supported\n");
3382                        return -EINVAL;
3383                }
3384
3385                if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3386                        e_err(drv,
3387                              "Setting EEE Tx LPI timer is not supported\n");
3388                        return -EINVAL;
3389                }
3390
3391                if (eee_data.advertised != edata->advertised) {
3392                        e_err(drv,
3393                              "Setting EEE advertised speeds is not supported\n");
3394                        return -EINVAL;
3395                }
3396        }
3397
3398        if (eee_data.eee_enabled != edata->eee_enabled) {
3399                if (edata->eee_enabled) {
3400                        adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3401                        hw->phy.eee_speeds_advertised =
3402                                                   hw->phy.eee_speeds_supported;
3403                } else {
3404                        adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3405                        hw->phy.eee_speeds_advertised = 0;
3406                }
3407
3408                /* reset link */
3409                if (netif_running(netdev))
3410                        ixgbe_reinit_locked(adapter);
3411                else
3412                        ixgbe_reset(adapter);
3413        }
3414
3415        return 0;
3416}
3417
3418static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3419{
3420        struct ixgbe_adapter *adapter = netdev_priv(netdev);
3421        u32 priv_flags = 0;
3422
3423        if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3424                priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3425
3426        return priv_flags;
3427}
3428
3429static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3430{
3431        struct ixgbe_adapter *adapter = netdev_priv(netdev);
3432        unsigned int flags2 = adapter->flags2;
3433
3434        flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3435        if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3436                flags2 |= IXGBE_FLAG2_RX_LEGACY;
3437
3438        if (flags2 != adapter->flags2) {
3439                adapter->flags2 = flags2;
3440
3441                /* reset interface to repopulate queues */
3442                if (netif_running(netdev))
3443                        ixgbe_reinit_locked(adapter);
3444        }
3445
3446        return 0;
3447}
3448
3449static const struct ethtool_ops ixgbe_ethtool_ops = {
3450        .get_drvinfo            = ixgbe_get_drvinfo,
3451        .get_regs_len           = ixgbe_get_regs_len,
3452        .get_regs               = ixgbe_get_regs,
3453        .get_wol                = ixgbe_get_wol,
3454        .set_wol                = ixgbe_set_wol,
3455        .nway_reset             = ixgbe_nway_reset,
3456        .get_link               = ethtool_op_get_link,
3457        .get_eeprom_len         = ixgbe_get_eeprom_len,
3458        .get_eeprom             = ixgbe_get_eeprom,
3459        .set_eeprom             = ixgbe_set_eeprom,
3460        .get_ringparam          = ixgbe_get_ringparam,
3461        .set_ringparam          = ixgbe_set_ringparam,
3462        .get_pauseparam         = ixgbe_get_pauseparam,
3463        .set_pauseparam         = ixgbe_set_pauseparam,
3464        .get_msglevel           = ixgbe_get_msglevel,
3465        .set_msglevel           = ixgbe_set_msglevel,
3466        .self_test              = ixgbe_diag_test,
3467        .get_strings            = ixgbe_get_strings,
3468        .set_phys_id            = ixgbe_set_phys_id,
3469        .get_sset_count         = ixgbe_get_sset_count,
3470        .get_ethtool_stats      = ixgbe_get_ethtool_stats,
3471        .get_coalesce           = ixgbe_get_coalesce,
3472        .set_coalesce           = ixgbe_set_coalesce,
3473        .get_rxnfc              = ixgbe_get_rxnfc,
3474        .set_rxnfc              = ixgbe_set_rxnfc,
3475        .get_rxfh_indir_size    = ixgbe_rss_indir_size,
3476        .get_rxfh_key_size      = ixgbe_get_rxfh_key_size,
3477        .get_rxfh               = ixgbe_get_rxfh,
3478        .set_rxfh               = ixgbe_set_rxfh,
3479        .get_eee                = ixgbe_get_eee,
3480        .set_eee                = ixgbe_set_eee,
3481        .get_channels           = ixgbe_get_channels,
3482        .set_channels           = ixgbe_set_channels,
3483        .get_priv_flags         = ixgbe_get_priv_flags,
3484        .set_priv_flags         = ixgbe_set_priv_flags,
3485        .get_ts_info            = ixgbe_get_ts_info,
3486        .get_module_info        = ixgbe_get_module_info,
3487        .get_module_eeprom      = ixgbe_get_module_eeprom,
3488        .get_link_ksettings     = ixgbe_get_link_ksettings,
3489        .set_link_ksettings     = ixgbe_set_link_ksettings,
3490};
3491
3492void ixgbe_set_ethtool_ops(struct net_device *netdev)
3493{
3494        netdev->ethtool_ops = &ixgbe_ethtool_ops;
3495}
3496