linux/drivers/net/ethernet/intel/e1000e/ethtool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4/* ethtool support for e1000 */
   5
   6#include <linux/netdevice.h>
   7#include <linux/interrupt.h>
   8#include <linux/ethtool.h>
   9#include <linux/pci.h>
  10#include <linux/slab.h>
  11#include <linux/delay.h>
  12#include <linux/vmalloc.h>
  13#include <linux/pm_runtime.h>
  14
  15#include "e1000.h"
  16
  17enum { NETDEV_STATS, E1000_STATS };
  18
  19struct e1000_stats {
  20        char stat_string[ETH_GSTRING_LEN];
  21        int type;
  22        int sizeof_stat;
  23        int stat_offset;
  24};
  25
  26static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
  27#define E1000E_PRIV_FLAGS_S0IX_ENABLED  BIT(0)
  28        "s0ix-enabled",
  29};
  30
  31#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
  32
  33#define E1000_STAT(str, m) { \
  34                .stat_string = str, \
  35                .type = E1000_STATS, \
  36                .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
  37                .stat_offset = offsetof(struct e1000_adapter, m) }
  38#define E1000_NETDEV_STAT(str, m) { \
  39                .stat_string = str, \
  40                .type = NETDEV_STATS, \
  41                .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
  42                .stat_offset = offsetof(struct rtnl_link_stats64, m) }
  43
  44static const struct e1000_stats e1000_gstrings_stats[] = {
  45        E1000_STAT("rx_packets", stats.gprc),
  46        E1000_STAT("tx_packets", stats.gptc),
  47        E1000_STAT("rx_bytes", stats.gorc),
  48        E1000_STAT("tx_bytes", stats.gotc),
  49        E1000_STAT("rx_broadcast", stats.bprc),
  50        E1000_STAT("tx_broadcast", stats.bptc),
  51        E1000_STAT("rx_multicast", stats.mprc),
  52        E1000_STAT("tx_multicast", stats.mptc),
  53        E1000_NETDEV_STAT("rx_errors", rx_errors),
  54        E1000_NETDEV_STAT("tx_errors", tx_errors),
  55        E1000_NETDEV_STAT("tx_dropped", tx_dropped),
  56        E1000_STAT("multicast", stats.mprc),
  57        E1000_STAT("collisions", stats.colc),
  58        E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
  59        E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
  60        E1000_STAT("rx_crc_errors", stats.crcerrs),
  61        E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
  62        E1000_STAT("rx_no_buffer_count", stats.rnbc),
  63        E1000_STAT("rx_missed_errors", stats.mpc),
  64        E1000_STAT("tx_aborted_errors", stats.ecol),
  65        E1000_STAT("tx_carrier_errors", stats.tncrs),
  66        E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
  67        E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
  68        E1000_STAT("tx_window_errors", stats.latecol),
  69        E1000_STAT("tx_abort_late_coll", stats.latecol),
  70        E1000_STAT("tx_deferred_ok", stats.dc),
  71        E1000_STAT("tx_single_coll_ok", stats.scc),
  72        E1000_STAT("tx_multi_coll_ok", stats.mcc),
  73        E1000_STAT("tx_timeout_count", tx_timeout_count),
  74        E1000_STAT("tx_restart_queue", restart_queue),
  75        E1000_STAT("rx_long_length_errors", stats.roc),
  76        E1000_STAT("rx_short_length_errors", stats.ruc),
  77        E1000_STAT("rx_align_errors", stats.algnerrc),
  78        E1000_STAT("tx_tcp_seg_good", stats.tsctc),
  79        E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
  80        E1000_STAT("rx_flow_control_xon", stats.xonrxc),
  81        E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
  82        E1000_STAT("tx_flow_control_xon", stats.xontxc),
  83        E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
  84        E1000_STAT("rx_csum_offload_good", hw_csum_good),
  85        E1000_STAT("rx_csum_offload_errors", hw_csum_err),
  86        E1000_STAT("rx_header_split", rx_hdr_split),
  87        E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
  88        E1000_STAT("tx_smbus", stats.mgptc),
  89        E1000_STAT("rx_smbus", stats.mgprc),
  90        E1000_STAT("dropped_smbus", stats.mgpdc),
  91        E1000_STAT("rx_dma_failed", rx_dma_failed),
  92        E1000_STAT("tx_dma_failed", tx_dma_failed),
  93        E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
  94        E1000_STAT("uncorr_ecc_errors", uncorr_errors),
  95        E1000_STAT("corr_ecc_errors", corr_errors),
  96        E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
  97        E1000_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
  98};
  99
 100#define E1000_GLOBAL_STATS_LEN  ARRAY_SIZE(e1000_gstrings_stats)
 101#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN)
 102static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
 103        "Register test  (offline)", "Eeprom test    (offline)",
 104        "Interrupt test (offline)", "Loopback test  (offline)",
 105        "Link test   (on/offline)"
 106};
 107
 108#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
 109
 110static int e1000_get_link_ksettings(struct net_device *netdev,
 111                                    struct ethtool_link_ksettings *cmd)
 112{
 113        struct e1000_adapter *adapter = netdev_priv(netdev);
 114        struct e1000_hw *hw = &adapter->hw;
 115        u32 speed, supported, advertising;
 116
 117        if (hw->phy.media_type == e1000_media_type_copper) {
 118                supported = (SUPPORTED_10baseT_Half |
 119                             SUPPORTED_10baseT_Full |
 120                             SUPPORTED_100baseT_Half |
 121                             SUPPORTED_100baseT_Full |
 122                             SUPPORTED_1000baseT_Full |
 123                             SUPPORTED_Autoneg |
 124                             SUPPORTED_TP);
 125                if (hw->phy.type == e1000_phy_ife)
 126                        supported &= ~SUPPORTED_1000baseT_Full;
 127                advertising = ADVERTISED_TP;
 128
 129                if (hw->mac.autoneg == 1) {
 130                        advertising |= ADVERTISED_Autoneg;
 131                        /* the e1000 autoneg seems to match ethtool nicely */
 132                        advertising |= hw->phy.autoneg_advertised;
 133                }
 134
 135                cmd->base.port = PORT_TP;
 136                cmd->base.phy_address = hw->phy.addr;
 137        } else {
 138                supported   = (SUPPORTED_1000baseT_Full |
 139                               SUPPORTED_FIBRE |
 140                               SUPPORTED_Autoneg);
 141
 142                advertising = (ADVERTISED_1000baseT_Full |
 143                               ADVERTISED_FIBRE |
 144                               ADVERTISED_Autoneg);
 145
 146                cmd->base.port = PORT_FIBRE;
 147        }
 148
 149        speed = SPEED_UNKNOWN;
 150        cmd->base.duplex = DUPLEX_UNKNOWN;
 151
 152        if (netif_running(netdev)) {
 153                if (netif_carrier_ok(netdev)) {
 154                        speed = adapter->link_speed;
 155                        cmd->base.duplex = adapter->link_duplex - 1;
 156                }
 157        } else if (!pm_runtime_suspended(netdev->dev.parent)) {
 158                u32 status = er32(STATUS);
 159
 160                if (status & E1000_STATUS_LU) {
 161                        if (status & E1000_STATUS_SPEED_1000)
 162                                speed = SPEED_1000;
 163                        else if (status & E1000_STATUS_SPEED_100)
 164                                speed = SPEED_100;
 165                        else
 166                                speed = SPEED_10;
 167
 168                        if (status & E1000_STATUS_FD)
 169                                cmd->base.duplex = DUPLEX_FULL;
 170                        else
 171                                cmd->base.duplex = DUPLEX_HALF;
 172                }
 173        }
 174
 175        cmd->base.speed = speed;
 176        cmd->base.autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
 177                         hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
 178
 179        /* MDI-X => 2; MDI =>1; Invalid =>0 */
 180        if ((hw->phy.media_type == e1000_media_type_copper) &&
 181            netif_carrier_ok(netdev))
 182                cmd->base.eth_tp_mdix = hw->phy.is_mdix ?
 183                        ETH_TP_MDI_X : ETH_TP_MDI;
 184        else
 185                cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
 186
 187        if (hw->phy.mdix == AUTO_ALL_MODES)
 188                cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
 189        else
 190                cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
 191
 192        if (hw->phy.media_type != e1000_media_type_copper)
 193                cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
 194
 195        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
 196                                                supported);
 197        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
 198                                                advertising);
 199
 200        return 0;
 201}
 202
 203static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
 204{
 205        struct e1000_mac_info *mac = &adapter->hw.mac;
 206
 207        mac->autoneg = 0;
 208
 209        /* Make sure dplx is at most 1 bit and lsb of speed is not set
 210         * for the switch() below to work
 211         */
 212        if ((spd & 1) || (dplx & ~1))
 213                goto err_inval;
 214
 215        /* Fiber NICs only allow 1000 gbps Full duplex */
 216        if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
 217            (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) {
 218                goto err_inval;
 219        }
 220
 221        switch (spd + dplx) {
 222        case SPEED_10 + DUPLEX_HALF:
 223                mac->forced_speed_duplex = ADVERTISE_10_HALF;
 224                break;
 225        case SPEED_10 + DUPLEX_FULL:
 226                mac->forced_speed_duplex = ADVERTISE_10_FULL;
 227                break;
 228        case SPEED_100 + DUPLEX_HALF:
 229                mac->forced_speed_duplex = ADVERTISE_100_HALF;
 230                break;
 231        case SPEED_100 + DUPLEX_FULL:
 232                mac->forced_speed_duplex = ADVERTISE_100_FULL;
 233                break;
 234        case SPEED_1000 + DUPLEX_FULL:
 235                if (adapter->hw.phy.media_type == e1000_media_type_copper) {
 236                        mac->autoneg = 1;
 237                        adapter->hw.phy.autoneg_advertised =
 238                                ADVERTISE_1000_FULL;
 239                } else {
 240                        mac->forced_speed_duplex = ADVERTISE_1000_FULL;
 241                }
 242                break;
 243        case SPEED_1000 + DUPLEX_HALF:  /* not supported */
 244        default:
 245                goto err_inval;
 246        }
 247
 248        /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
 249        adapter->hw.phy.mdix = AUTO_ALL_MODES;
 250
 251        return 0;
 252
 253err_inval:
 254        e_err("Unsupported Speed/Duplex configuration\n");
 255        return -EINVAL;
 256}
 257
 258static int e1000_set_link_ksettings(struct net_device *netdev,
 259                                    const struct ethtool_link_ksettings *cmd)
 260{
 261        struct e1000_adapter *adapter = netdev_priv(netdev);
 262        struct e1000_hw *hw = &adapter->hw;
 263        int ret_val = 0;
 264        u32 advertising;
 265
 266        ethtool_convert_link_mode_to_legacy_u32(&advertising,
 267                                                cmd->link_modes.advertising);
 268
 269        pm_runtime_get_sync(netdev->dev.parent);
 270
 271        /* When SoL/IDER sessions are active, autoneg/speed/duplex
 272         * cannot be changed
 273         */
 274        if (hw->phy.ops.check_reset_block &&
 275            hw->phy.ops.check_reset_block(hw)) {
 276                e_err("Cannot change link characteristics when SoL/IDER is active.\n");
 277                ret_val = -EINVAL;
 278                goto out;
 279        }
 280
 281        /* MDI setting is only allowed when autoneg enabled because
 282         * some hardware doesn't allow MDI setting when speed or
 283         * duplex is forced.
 284         */
 285        if (cmd->base.eth_tp_mdix_ctrl) {
 286                if (hw->phy.media_type != e1000_media_type_copper) {
 287                        ret_val = -EOPNOTSUPP;
 288                        goto out;
 289                }
 290
 291                if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
 292                    (cmd->base.autoneg != AUTONEG_ENABLE)) {
 293                        e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
 294                        ret_val = -EINVAL;
 295                        goto out;
 296                }
 297        }
 298
 299        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
 300                usleep_range(1000, 2000);
 301
 302        if (cmd->base.autoneg == AUTONEG_ENABLE) {
 303                hw->mac.autoneg = 1;
 304                if (hw->phy.media_type == e1000_media_type_fiber)
 305                        hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
 306                            ADVERTISED_FIBRE | ADVERTISED_Autoneg;
 307                else
 308                        hw->phy.autoneg_advertised = advertising |
 309                            ADVERTISED_TP | ADVERTISED_Autoneg;
 310                advertising = hw->phy.autoneg_advertised;
 311                if (adapter->fc_autoneg)
 312                        hw->fc.requested_mode = e1000_fc_default;
 313        } else {
 314                u32 speed = cmd->base.speed;
 315                /* calling this overrides forced MDI setting */
 316                if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
 317                        ret_val = -EINVAL;
 318                        goto out;
 319                }
 320        }
 321
 322        /* MDI-X => 2; MDI => 1; Auto => 3 */
 323        if (cmd->base.eth_tp_mdix_ctrl) {
 324                /* fix up the value for auto (3 => 0) as zero is mapped
 325                 * internally to auto
 326                 */
 327                if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
 328                        hw->phy.mdix = AUTO_ALL_MODES;
 329                else
 330                        hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
 331        }
 332
 333        /* reset the link */
 334        if (netif_running(adapter->netdev)) {
 335                e1000e_down(adapter, true);
 336                e1000e_up(adapter);
 337        } else {
 338                e1000e_reset(adapter);
 339        }
 340
 341out:
 342        pm_runtime_put_sync(netdev->dev.parent);
 343        clear_bit(__E1000_RESETTING, &adapter->state);
 344        return ret_val;
 345}
 346
 347static void e1000_get_pauseparam(struct net_device *netdev,
 348                                 struct ethtool_pauseparam *pause)
 349{
 350        struct e1000_adapter *adapter = netdev_priv(netdev);
 351        struct e1000_hw *hw = &adapter->hw;
 352
 353        pause->autoneg =
 354            (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
 355
 356        if (hw->fc.current_mode == e1000_fc_rx_pause) {
 357                pause->rx_pause = 1;
 358        } else if (hw->fc.current_mode == e1000_fc_tx_pause) {
 359                pause->tx_pause = 1;
 360        } else if (hw->fc.current_mode == e1000_fc_full) {
 361                pause->rx_pause = 1;
 362                pause->tx_pause = 1;
 363        }
 364}
 365
 366static int e1000_set_pauseparam(struct net_device *netdev,
 367                                struct ethtool_pauseparam *pause)
 368{
 369        struct e1000_adapter *adapter = netdev_priv(netdev);
 370        struct e1000_hw *hw = &adapter->hw;
 371        int retval = 0;
 372
 373        adapter->fc_autoneg = pause->autoneg;
 374
 375        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
 376                usleep_range(1000, 2000);
 377
 378        pm_runtime_get_sync(netdev->dev.parent);
 379
 380        if (adapter->fc_autoneg == AUTONEG_ENABLE) {
 381                hw->fc.requested_mode = e1000_fc_default;
 382                if (netif_running(adapter->netdev)) {
 383                        e1000e_down(adapter, true);
 384                        e1000e_up(adapter);
 385                } else {
 386                        e1000e_reset(adapter);
 387                }
 388        } else {
 389                if (pause->rx_pause && pause->tx_pause)
 390                        hw->fc.requested_mode = e1000_fc_full;
 391                else if (pause->rx_pause && !pause->tx_pause)
 392                        hw->fc.requested_mode = e1000_fc_rx_pause;
 393                else if (!pause->rx_pause && pause->tx_pause)
 394                        hw->fc.requested_mode = e1000_fc_tx_pause;
 395                else if (!pause->rx_pause && !pause->tx_pause)
 396                        hw->fc.requested_mode = e1000_fc_none;
 397
 398                hw->fc.current_mode = hw->fc.requested_mode;
 399
 400                if (hw->phy.media_type == e1000_media_type_fiber) {
 401                        retval = hw->mac.ops.setup_link(hw);
 402                        /* implicit goto out */
 403                } else {
 404                        retval = e1000e_force_mac_fc(hw);
 405                        if (retval)
 406                                goto out;
 407                        e1000e_set_fc_watermarks(hw);
 408                }
 409        }
 410
 411out:
 412        pm_runtime_put_sync(netdev->dev.parent);
 413        clear_bit(__E1000_RESETTING, &adapter->state);
 414        return retval;
 415}
 416
 417static u32 e1000_get_msglevel(struct net_device *netdev)
 418{
 419        struct e1000_adapter *adapter = netdev_priv(netdev);
 420        return adapter->msg_enable;
 421}
 422
 423static void e1000_set_msglevel(struct net_device *netdev, u32 data)
 424{
 425        struct e1000_adapter *adapter = netdev_priv(netdev);
 426        adapter->msg_enable = data;
 427}
 428
 429static int e1000_get_regs_len(struct net_device __always_unused *netdev)
 430{
 431#define E1000_REGS_LEN 32       /* overestimate */
 432        return E1000_REGS_LEN * sizeof(u32);
 433}
 434
 435static void e1000_get_regs(struct net_device *netdev,
 436                           struct ethtool_regs *regs, void *p)
 437{
 438        struct e1000_adapter *adapter = netdev_priv(netdev);
 439        struct e1000_hw *hw = &adapter->hw;
 440        u32 *regs_buff = p;
 441        u16 phy_data;
 442
 443        pm_runtime_get_sync(netdev->dev.parent);
 444
 445        memset(p, 0, E1000_REGS_LEN * sizeof(u32));
 446
 447        regs->version = (1u << 24) |
 448                        (adapter->pdev->revision << 16) |
 449                        adapter->pdev->device;
 450
 451        regs_buff[0] = er32(CTRL);
 452        regs_buff[1] = er32(STATUS);
 453
 454        regs_buff[2] = er32(RCTL);
 455        regs_buff[3] = er32(RDLEN(0));
 456        regs_buff[4] = er32(RDH(0));
 457        regs_buff[5] = er32(RDT(0));
 458        regs_buff[6] = er32(RDTR);
 459
 460        regs_buff[7] = er32(TCTL);
 461        regs_buff[8] = er32(TDLEN(0));
 462        regs_buff[9] = er32(TDH(0));
 463        regs_buff[10] = er32(TDT(0));
 464        regs_buff[11] = er32(TIDV);
 465
 466        regs_buff[12] = adapter->hw.phy.type;   /* PHY type (IGP=1, M88=0) */
 467
 468        /* ethtool doesn't use anything past this point, so all this
 469         * code is likely legacy junk for apps that may or may not exist
 470         */
 471        if (hw->phy.type == e1000_phy_m88) {
 472                e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
 473                regs_buff[13] = (u32)phy_data; /* cable length */
 474                regs_buff[14] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
 475                regs_buff[15] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
 476                regs_buff[16] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
 477                e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
 478                regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
 479                regs_buff[18] = regs_buff[13]; /* cable polarity */
 480                regs_buff[19] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
 481                regs_buff[20] = regs_buff[17]; /* polarity correction */
 482                /* phy receive errors */
 483                regs_buff[22] = adapter->phy_stats.receive_errors;
 484                regs_buff[23] = regs_buff[13]; /* mdix mode */
 485        }
 486        regs_buff[21] = 0;      /* was idle_errors */
 487        e1e_rphy(hw, MII_STAT1000, &phy_data);
 488        regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
 489        regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
 490
 491        pm_runtime_put_sync(netdev->dev.parent);
 492}
 493
 494static int e1000_get_eeprom_len(struct net_device *netdev)
 495{
 496        struct e1000_adapter *adapter = netdev_priv(netdev);
 497        return adapter->hw.nvm.word_size * 2;
 498}
 499
 500static int e1000_get_eeprom(struct net_device *netdev,
 501                            struct ethtool_eeprom *eeprom, u8 *bytes)
 502{
 503        struct e1000_adapter *adapter = netdev_priv(netdev);
 504        struct e1000_hw *hw = &adapter->hw;
 505        u16 *eeprom_buff;
 506        int first_word;
 507        int last_word;
 508        int ret_val = 0;
 509        u16 i;
 510
 511        if (eeprom->len == 0)
 512                return -EINVAL;
 513
 514        eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16);
 515
 516        first_word = eeprom->offset >> 1;
 517        last_word = (eeprom->offset + eeprom->len - 1) >> 1;
 518
 519        eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
 520                                    GFP_KERNEL);
 521        if (!eeprom_buff)
 522                return -ENOMEM;
 523
 524        pm_runtime_get_sync(netdev->dev.parent);
 525
 526        if (hw->nvm.type == e1000_nvm_eeprom_spi) {
 527                ret_val = e1000_read_nvm(hw, first_word,
 528                                         last_word - first_word + 1,
 529                                         eeprom_buff);
 530        } else {
 531                for (i = 0; i < last_word - first_word + 1; i++) {
 532                        ret_val = e1000_read_nvm(hw, first_word + i, 1,
 533                                                 &eeprom_buff[i]);
 534                        if (ret_val)
 535                                break;
 536                }
 537        }
 538
 539        pm_runtime_put_sync(netdev->dev.parent);
 540
 541        if (ret_val) {
 542                /* a read error occurred, throw away the result */
 543                memset(eeprom_buff, 0xff, sizeof(u16) *
 544                       (last_word - first_word + 1));
 545        } else {
 546                /* Device's eeprom is always little-endian, word addressable */
 547                for (i = 0; i < last_word - first_word + 1; i++)
 548                        le16_to_cpus(&eeprom_buff[i]);
 549        }
 550
 551        memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
 552        kfree(eeprom_buff);
 553
 554        return ret_val;
 555}
 556
 557static int e1000_set_eeprom(struct net_device *netdev,
 558                            struct ethtool_eeprom *eeprom, u8 *bytes)
 559{
 560        struct e1000_adapter *adapter = netdev_priv(netdev);
 561        struct e1000_hw *hw = &adapter->hw;
 562        u16 *eeprom_buff;
 563        void *ptr;
 564        int max_len;
 565        int first_word;
 566        int last_word;
 567        int ret_val = 0;
 568        u16 i;
 569
 570        if (eeprom->len == 0)
 571                return -EOPNOTSUPP;
 572
 573        if (eeprom->magic !=
 574            (adapter->pdev->vendor | (adapter->pdev->device << 16)))
 575                return -EFAULT;
 576
 577        if (adapter->flags & FLAG_READ_ONLY_NVM)
 578                return -EINVAL;
 579
 580        max_len = hw->nvm.word_size * 2;
 581
 582        first_word = eeprom->offset >> 1;
 583        last_word = (eeprom->offset + eeprom->len - 1) >> 1;
 584        eeprom_buff = kmalloc(max_len, GFP_KERNEL);
 585        if (!eeprom_buff)
 586                return -ENOMEM;
 587
 588        ptr = (void *)eeprom_buff;
 589
 590        pm_runtime_get_sync(netdev->dev.parent);
 591
 592        if (eeprom->offset & 1) {
 593                /* need read/modify/write of first changed EEPROM word */
 594                /* only the second byte of the word is being modified */
 595                ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
 596                ptr++;
 597        }
 598        if (((eeprom->offset + eeprom->len) & 1) && (!ret_val))
 599                /* need read/modify/write of last changed EEPROM word */
 600                /* only the first byte of the word is being modified */
 601                ret_val = e1000_read_nvm(hw, last_word, 1,
 602                                         &eeprom_buff[last_word - first_word]);
 603
 604        if (ret_val)
 605                goto out;
 606
 607        /* Device's eeprom is always little-endian, word addressable */
 608        for (i = 0; i < last_word - first_word + 1; i++)
 609                le16_to_cpus(&eeprom_buff[i]);
 610
 611        memcpy(ptr, bytes, eeprom->len);
 612
 613        for (i = 0; i < last_word - first_word + 1; i++)
 614                cpu_to_le16s(&eeprom_buff[i]);
 615
 616        ret_val = e1000_write_nvm(hw, first_word,
 617                                  last_word - first_word + 1, eeprom_buff);
 618
 619        if (ret_val)
 620                goto out;
 621
 622        /* Update the checksum over the first part of the EEPROM if needed
 623         * and flush shadow RAM for applicable controllers
 624         */
 625        if ((first_word <= NVM_CHECKSUM_REG) ||
 626            (hw->mac.type == e1000_82583) ||
 627            (hw->mac.type == e1000_82574) ||
 628            (hw->mac.type == e1000_82573))
 629                ret_val = e1000e_update_nvm_checksum(hw);
 630
 631out:
 632        pm_runtime_put_sync(netdev->dev.parent);
 633        kfree(eeprom_buff);
 634        return ret_val;
 635}
 636
 637static void e1000_get_drvinfo(struct net_device *netdev,
 638                              struct ethtool_drvinfo *drvinfo)
 639{
 640        struct e1000_adapter *adapter = netdev_priv(netdev);
 641
 642        strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
 643
 644        /* EEPROM image version # is reported as firmware version # for
 645         * PCI-E controllers
 646         */
 647        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
 648                 "%d.%d-%d",
 649                 (adapter->eeprom_vers & 0xF000) >> 12,
 650                 (adapter->eeprom_vers & 0x0FF0) >> 4,
 651                 (adapter->eeprom_vers & 0x000F));
 652
 653        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 654                sizeof(drvinfo->bus_info));
 655}
 656
 657static void e1000_get_ringparam(struct net_device *netdev,
 658                                struct ethtool_ringparam *ring)
 659{
 660        struct e1000_adapter *adapter = netdev_priv(netdev);
 661
 662        ring->rx_max_pending = E1000_MAX_RXD;
 663        ring->tx_max_pending = E1000_MAX_TXD;
 664        ring->rx_pending = adapter->rx_ring_count;
 665        ring->tx_pending = adapter->tx_ring_count;
 666}
 667
 668static int e1000_set_ringparam(struct net_device *netdev,
 669                               struct ethtool_ringparam *ring)
 670{
 671        struct e1000_adapter *adapter = netdev_priv(netdev);
 672        struct e1000_ring *temp_tx = NULL, *temp_rx = NULL;
 673        int err = 0, size = sizeof(struct e1000_ring);
 674        bool set_tx = false, set_rx = false;
 675        u16 new_rx_count, new_tx_count;
 676
 677        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 678                return -EINVAL;
 679
 680        new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD,
 681                               E1000_MAX_RXD);
 682        new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
 683
 684        new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD,
 685                               E1000_MAX_TXD);
 686        new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
 687
 688        if ((new_tx_count == adapter->tx_ring_count) &&
 689            (new_rx_count == adapter->rx_ring_count))
 690                /* nothing to do */
 691                return 0;
 692
 693        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
 694                usleep_range(1000, 2000);
 695
 696        if (!netif_running(adapter->netdev)) {
 697                /* Set counts now and allocate resources during open() */
 698                adapter->tx_ring->count = new_tx_count;
 699                adapter->rx_ring->count = new_rx_count;
 700                adapter->tx_ring_count = new_tx_count;
 701                adapter->rx_ring_count = new_rx_count;
 702                goto clear_reset;
 703        }
 704
 705        set_tx = (new_tx_count != adapter->tx_ring_count);
 706        set_rx = (new_rx_count != adapter->rx_ring_count);
 707
 708        /* Allocate temporary storage for ring updates */
 709        if (set_tx) {
 710                temp_tx = vmalloc(size);
 711                if (!temp_tx) {
 712                        err = -ENOMEM;
 713                        goto free_temp;
 714                }
 715        }
 716        if (set_rx) {
 717                temp_rx = vmalloc(size);
 718                if (!temp_rx) {
 719                        err = -ENOMEM;
 720                        goto free_temp;
 721                }
 722        }
 723
 724        pm_runtime_get_sync(netdev->dev.parent);
 725
 726        e1000e_down(adapter, true);
 727
 728        /* We can't just free everything and then setup again, because the
 729         * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
 730         * structs.  First, attempt to allocate new resources...
 731         */
 732        if (set_tx) {
 733                memcpy(temp_tx, adapter->tx_ring, size);
 734                temp_tx->count = new_tx_count;
 735                err = e1000e_setup_tx_resources(temp_tx);
 736                if (err)
 737                        goto err_setup;
 738        }
 739        if (set_rx) {
 740                memcpy(temp_rx, adapter->rx_ring, size);
 741                temp_rx->count = new_rx_count;
 742                err = e1000e_setup_rx_resources(temp_rx);
 743                if (err)
 744                        goto err_setup_rx;
 745        }
 746
 747        /* ...then free the old resources and copy back any new ring data */
 748        if (set_tx) {
 749                e1000e_free_tx_resources(adapter->tx_ring);
 750                memcpy(adapter->tx_ring, temp_tx, size);
 751                adapter->tx_ring_count = new_tx_count;
 752        }
 753        if (set_rx) {
 754                e1000e_free_rx_resources(adapter->rx_ring);
 755                memcpy(adapter->rx_ring, temp_rx, size);
 756                adapter->rx_ring_count = new_rx_count;
 757        }
 758
 759err_setup_rx:
 760        if (err && set_tx)
 761                e1000e_free_tx_resources(temp_tx);
 762err_setup:
 763        e1000e_up(adapter);
 764        pm_runtime_put_sync(netdev->dev.parent);
 765free_temp:
 766        vfree(temp_tx);
 767        vfree(temp_rx);
 768clear_reset:
 769        clear_bit(__E1000_RESETTING, &adapter->state);
 770        return err;
 771}
 772
 773static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
 774                             int reg, int offset, u32 mask, u32 write)
 775{
 776        u32 pat, val;
 777        static const u32 test[] = {
 778                0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
 779        };
 780        for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
 781                E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
 782                                      (test[pat] & write));
 783                val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
 784                if (val != (test[pat] & write & mask)) {
 785                        e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
 786                              reg + (offset << 2), val,
 787                              (test[pat] & write & mask));
 788                        *data = reg;
 789                        return true;
 790                }
 791        }
 792        return false;
 793}
 794
 795static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
 796                              int reg, u32 mask, u32 write)
 797{
 798        u32 val;
 799
 800        __ew32(&adapter->hw, reg, write & mask);
 801        val = __er32(&adapter->hw, reg);
 802        if ((write & mask) != (val & mask)) {
 803                e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
 804                      reg, (val & mask), (write & mask));
 805                *data = reg;
 806                return true;
 807        }
 808        return false;
 809}
 810
 811#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write)                       \
 812        do {                                                                   \
 813                if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
 814                        return 1;                                              \
 815        } while (0)
 816#define REG_PATTERN_TEST(reg, mask, write)                                     \
 817        REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
 818
 819#define REG_SET_AND_CHECK(reg, mask, write)                                    \
 820        do {                                                                   \
 821                if (reg_set_and_check(adapter, data, reg, mask, write))        \
 822                        return 1;                                              \
 823        } while (0)
 824
 825static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
 826{
 827        struct e1000_hw *hw = &adapter->hw;
 828        struct e1000_mac_info *mac = &adapter->hw.mac;
 829        u32 value;
 830        u32 before;
 831        u32 after;
 832        u32 i;
 833        u32 toggle;
 834        u32 mask;
 835        u32 wlock_mac = 0;
 836
 837        /* The status register is Read Only, so a write should fail.
 838         * Some bits that get toggled are ignored.  There are several bits
 839         * on newer hardware that are r/w.
 840         */
 841        switch (mac->type) {
 842        case e1000_82571:
 843        case e1000_82572:
 844        case e1000_80003es2lan:
 845                toggle = 0x7FFFF3FF;
 846                break;
 847        default:
 848                toggle = 0x7FFFF033;
 849                break;
 850        }
 851
 852        before = er32(STATUS);
 853        value = (er32(STATUS) & toggle);
 854        ew32(STATUS, toggle);
 855        after = er32(STATUS) & toggle;
 856        if (value != after) {
 857                e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
 858                      after, value);
 859                *data = 1;
 860                return 1;
 861        }
 862        /* restore previous status */
 863        ew32(STATUS, before);
 864
 865        if (!(adapter->flags & FLAG_IS_ICH)) {
 866                REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
 867                REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
 868                REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
 869                REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
 870        }
 871
 872        REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
 873        REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
 874        REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
 875        REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
 876        REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
 877        REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
 878        REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
 879        REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
 880        REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
 881        REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
 882
 883        REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
 884
 885        before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
 886        REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
 887        REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
 888
 889        REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
 890        REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
 891        if (!(adapter->flags & FLAG_IS_ICH))
 892                REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
 893        REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
 894        REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
 895        mask = 0x8003FFFF;
 896        switch (mac->type) {
 897        case e1000_ich10lan:
 898        case e1000_pchlan:
 899        case e1000_pch2lan:
 900        case e1000_pch_lpt:
 901        case e1000_pch_spt:
 902        case e1000_pch_cnp:
 903        case e1000_pch_tgp:
 904        case e1000_pch_adp:
 905        case e1000_pch_mtp:
 906        case e1000_pch_lnp:
 907                mask |= BIT(18);
 908                break;
 909        default:
 910                break;
 911        }
 912
 913        if (mac->type >= e1000_pch_lpt)
 914                wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
 915                    E1000_FWSM_WLOCK_MAC_SHIFT;
 916
 917        for (i = 0; i < mac->rar_entry_count; i++) {
 918                if (mac->type >= e1000_pch_lpt) {
 919                        /* Cannot test write-protected SHRAL[n] registers */
 920                        if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
 921                                continue;
 922
 923                        /* SHRAH[9] different than the others */
 924                        if (i == 10)
 925                                mask |= BIT(30);
 926                        else
 927                                mask &= ~BIT(30);
 928                }
 929                if (mac->type == e1000_pch2lan) {
 930                        /* SHRAH[0,1,2] different than previous */
 931                        if (i == 1)
 932                                mask &= 0xFFF4FFFF;
 933                        /* SHRAH[3] different than SHRAH[0,1,2] */
 934                        if (i == 4)
 935                                mask |= BIT(30);
 936                        /* RAR[1-6] owned by management engine - skipping */
 937                        if (i > 0)
 938                                i += 6;
 939                }
 940
 941                REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
 942                                       0xFFFFFFFF);
 943                /* reset index to actual value */
 944                if ((mac->type == e1000_pch2lan) && (i > 6))
 945                        i -= 6;
 946        }
 947
 948        for (i = 0; i < mac->mta_reg_count; i++)
 949                REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
 950
 951        *data = 0;
 952
 953        return 0;
 954}
 955
 956static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
 957{
 958        u16 temp;
 959        u16 checksum = 0;
 960        u16 i;
 961
 962        *data = 0;
 963        /* Read and add up the contents of the EEPROM */
 964        for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
 965                if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
 966                        *data = 1;
 967                        return *data;
 968                }
 969                checksum += temp;
 970        }
 971
 972        /* If Checksum is not Correct return error else test passed */
 973        if ((checksum != (u16)NVM_SUM) && !(*data))
 974                *data = 2;
 975
 976        return *data;
 977}
 978
 979static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
 980{
 981        struct net_device *netdev = (struct net_device *)data;
 982        struct e1000_adapter *adapter = netdev_priv(netdev);
 983        struct e1000_hw *hw = &adapter->hw;
 984
 985        adapter->test_icr |= er32(ICR);
 986
 987        return IRQ_HANDLED;
 988}
 989
 990static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
 991{
 992        struct net_device *netdev = adapter->netdev;
 993        struct e1000_hw *hw = &adapter->hw;
 994        u32 mask;
 995        u32 shared_int = 1;
 996        u32 irq = adapter->pdev->irq;
 997        int i;
 998        int ret_val = 0;
 999        int int_mode = E1000E_INT_MODE_LEGACY;
1000
1001        *data = 0;
1002
1003        /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
1004        if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
1005                int_mode = adapter->int_mode;
1006                e1000e_reset_interrupt_capability(adapter);
1007                adapter->int_mode = E1000E_INT_MODE_LEGACY;
1008                e1000e_set_interrupt_capability(adapter);
1009        }
1010        /* Hook up test interrupt handler just for this test */
1011        if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
1012                         netdev)) {
1013                shared_int = 0;
1014        } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name,
1015                               netdev)) {
1016                *data = 1;
1017                ret_val = -1;
1018                goto out;
1019        }
1020        e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
1021
1022        /* Disable all the interrupts */
1023        ew32(IMC, 0xFFFFFFFF);
1024        e1e_flush();
1025        usleep_range(10000, 11000);
1026
1027        /* Test each interrupt */
1028        for (i = 0; i < 10; i++) {
1029                /* Interrupt to test */
1030                mask = BIT(i);
1031
1032                if (adapter->flags & FLAG_IS_ICH) {
1033                        switch (mask) {
1034                        case E1000_ICR_RXSEQ:
1035                                continue;
1036                        case 0x00000100:
1037                                if (adapter->hw.mac.type == e1000_ich8lan ||
1038                                    adapter->hw.mac.type == e1000_ich9lan)
1039                                        continue;
1040                                break;
1041                        default:
1042                                break;
1043                        }
1044                }
1045
1046                if (!shared_int) {
1047                        /* Disable the interrupt to be reported in
1048                         * the cause register and then force the same
1049                         * interrupt and see if one gets posted.  If
1050                         * an interrupt was posted to the bus, the
1051                         * test failed.
1052                         */
1053                        adapter->test_icr = 0;
1054                        ew32(IMC, mask);
1055                        ew32(ICS, mask);
1056                        e1e_flush();
1057                        usleep_range(10000, 11000);
1058
1059                        if (adapter->test_icr & mask) {
1060                                *data = 3;
1061                                break;
1062                        }
1063                }
1064
1065                /* Enable the interrupt to be reported in
1066                 * the cause register and then force the same
1067                 * interrupt and see if one gets posted.  If
1068                 * an interrupt was not posted to the bus, the
1069                 * test failed.
1070                 */
1071                adapter->test_icr = 0;
1072                ew32(IMS, mask);
1073                ew32(ICS, mask);
1074                e1e_flush();
1075                usleep_range(10000, 11000);
1076
1077                if (!(adapter->test_icr & mask)) {
1078                        *data = 4;
1079                        break;
1080                }
1081
1082                if (!shared_int) {
1083                        /* Disable the other interrupts to be reported in
1084                         * the cause register and then force the other
1085                         * interrupts and see if any get posted.  If
1086                         * an interrupt was posted to the bus, the
1087                         * test failed.
1088                         */
1089                        adapter->test_icr = 0;
1090                        ew32(IMC, ~mask & 0x00007FFF);
1091                        ew32(ICS, ~mask & 0x00007FFF);
1092                        e1e_flush();
1093                        usleep_range(10000, 11000);
1094
1095                        if (adapter->test_icr) {
1096                                *data = 5;
1097                                break;
1098                        }
1099                }
1100        }
1101
1102        /* Disable all the interrupts */
1103        ew32(IMC, 0xFFFFFFFF);
1104        e1e_flush();
1105        usleep_range(10000, 11000);
1106
1107        /* Unhook test interrupt handler */
1108        free_irq(irq, netdev);
1109
1110out:
1111        if (int_mode == E1000E_INT_MODE_MSIX) {
1112                e1000e_reset_interrupt_capability(adapter);
1113                adapter->int_mode = int_mode;
1114                e1000e_set_interrupt_capability(adapter);
1115        }
1116
1117        return ret_val;
1118}
1119
1120static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1121{
1122        struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1123        struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1124        struct pci_dev *pdev = adapter->pdev;
1125        struct e1000_buffer *buffer_info;
1126        int i;
1127
1128        if (tx_ring->desc && tx_ring->buffer_info) {
1129                for (i = 0; i < tx_ring->count; i++) {
1130                        buffer_info = &tx_ring->buffer_info[i];
1131
1132                        if (buffer_info->dma)
1133                                dma_unmap_single(&pdev->dev,
1134                                                 buffer_info->dma,
1135                                                 buffer_info->length,
1136                                                 DMA_TO_DEVICE);
1137                        dev_kfree_skb(buffer_info->skb);
1138                }
1139        }
1140
1141        if (rx_ring->desc && rx_ring->buffer_info) {
1142                for (i = 0; i < rx_ring->count; i++) {
1143                        buffer_info = &rx_ring->buffer_info[i];
1144
1145                        if (buffer_info->dma)
1146                                dma_unmap_single(&pdev->dev,
1147                                                 buffer_info->dma,
1148                                                 2048, DMA_FROM_DEVICE);
1149                        dev_kfree_skb(buffer_info->skb);
1150                }
1151        }
1152
1153        if (tx_ring->desc) {
1154                dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1155                                  tx_ring->dma);
1156                tx_ring->desc = NULL;
1157        }
1158        if (rx_ring->desc) {
1159                dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1160                                  rx_ring->dma);
1161                rx_ring->desc = NULL;
1162        }
1163
1164        kfree(tx_ring->buffer_info);
1165        tx_ring->buffer_info = NULL;
1166        kfree(rx_ring->buffer_info);
1167        rx_ring->buffer_info = NULL;
1168}
1169
1170static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1171{
1172        struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1173        struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1174        struct pci_dev *pdev = adapter->pdev;
1175        struct e1000_hw *hw = &adapter->hw;
1176        u32 rctl;
1177        int i;
1178        int ret_val;
1179
1180        /* Setup Tx descriptor ring and Tx buffers */
1181
1182        if (!tx_ring->count)
1183                tx_ring->count = E1000_DEFAULT_TXD;
1184
1185        tx_ring->buffer_info = kcalloc(tx_ring->count,
1186                                       sizeof(struct e1000_buffer), GFP_KERNEL);
1187        if (!tx_ring->buffer_info) {
1188                ret_val = 1;
1189                goto err_nomem;
1190        }
1191
1192        tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1193        tx_ring->size = ALIGN(tx_ring->size, 4096);
1194        tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1195                                           &tx_ring->dma, GFP_KERNEL);
1196        if (!tx_ring->desc) {
1197                ret_val = 2;
1198                goto err_nomem;
1199        }
1200        tx_ring->next_to_use = 0;
1201        tx_ring->next_to_clean = 0;
1202
1203        ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
1204        ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
1205        ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
1206        ew32(TDH(0), 0);
1207        ew32(TDT(0), 0);
1208        ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
1209             E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1210             E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1211
1212        for (i = 0; i < tx_ring->count; i++) {
1213                struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
1214                struct sk_buff *skb;
1215                unsigned int skb_size = 1024;
1216
1217                skb = alloc_skb(skb_size, GFP_KERNEL);
1218                if (!skb) {
1219                        ret_val = 3;
1220                        goto err_nomem;
1221                }
1222                skb_put(skb, skb_size);
1223                tx_ring->buffer_info[i].skb = skb;
1224                tx_ring->buffer_info[i].length = skb->len;
1225                tx_ring->buffer_info[i].dma =
1226                    dma_map_single(&pdev->dev, skb->data, skb->len,
1227                                   DMA_TO_DEVICE);
1228                if (dma_mapping_error(&pdev->dev,
1229                                      tx_ring->buffer_info[i].dma)) {
1230                        ret_val = 4;
1231                        goto err_nomem;
1232                }
1233                tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
1234                tx_desc->lower.data = cpu_to_le32(skb->len);
1235                tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1236                                                   E1000_TXD_CMD_IFCS |
1237                                                   E1000_TXD_CMD_RS);
1238                tx_desc->upper.data = 0;
1239        }
1240
1241        /* Setup Rx descriptor ring and Rx buffers */
1242
1243        if (!rx_ring->count)
1244                rx_ring->count = E1000_DEFAULT_RXD;
1245
1246        rx_ring->buffer_info = kcalloc(rx_ring->count,
1247                                       sizeof(struct e1000_buffer), GFP_KERNEL);
1248        if (!rx_ring->buffer_info) {
1249                ret_val = 5;
1250                goto err_nomem;
1251        }
1252
1253        rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended);
1254        rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1255                                           &rx_ring->dma, GFP_KERNEL);
1256        if (!rx_ring->desc) {
1257                ret_val = 6;
1258                goto err_nomem;
1259        }
1260        rx_ring->next_to_use = 0;
1261        rx_ring->next_to_clean = 0;
1262
1263        rctl = er32(RCTL);
1264        if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1265                ew32(RCTL, rctl & ~E1000_RCTL_EN);
1266        ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF));
1267        ew32(RDBAH(0), ((u64)rx_ring->dma >> 32));
1268        ew32(RDLEN(0), rx_ring->size);
1269        ew32(RDH(0), 0);
1270        ew32(RDT(0), 0);
1271        rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1272            E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1273            E1000_RCTL_SBP | E1000_RCTL_SECRC |
1274            E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1275            (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1276        ew32(RCTL, rctl);
1277
1278        for (i = 0; i < rx_ring->count; i++) {
1279                union e1000_rx_desc_extended *rx_desc;
1280                struct sk_buff *skb;
1281
1282                skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
1283                if (!skb) {
1284                        ret_val = 7;
1285                        goto err_nomem;
1286                }
1287                skb_reserve(skb, NET_IP_ALIGN);
1288                rx_ring->buffer_info[i].skb = skb;
1289                rx_ring->buffer_info[i].dma =
1290                    dma_map_single(&pdev->dev, skb->data, 2048,
1291                                   DMA_FROM_DEVICE);
1292                if (dma_mapping_error(&pdev->dev,
1293                                      rx_ring->buffer_info[i].dma)) {
1294                        ret_val = 8;
1295                        goto err_nomem;
1296                }
1297                rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1298                rx_desc->read.buffer_addr =
1299                    cpu_to_le64(rx_ring->buffer_info[i].dma);
1300                memset(skb->data, 0x00, skb->len);
1301        }
1302
1303        return 0;
1304
1305err_nomem:
1306        e1000_free_desc_rings(adapter);
1307        return ret_val;
1308}
1309
1310static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
1311{
1312        /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1313        e1e_wphy(&adapter->hw, 29, 0x001F);
1314        e1e_wphy(&adapter->hw, 30, 0x8FFC);
1315        e1e_wphy(&adapter->hw, 29, 0x001A);
1316        e1e_wphy(&adapter->hw, 30, 0x8FF0);
1317}
1318
1319static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1320{
1321        struct e1000_hw *hw = &adapter->hw;
1322        u32 ctrl_reg = 0;
1323        u16 phy_reg = 0;
1324        s32 ret_val = 0;
1325
1326        hw->mac.autoneg = 0;
1327
1328        if (hw->phy.type == e1000_phy_ife) {
1329                /* force 100, set loopback */
1330                e1e_wphy(hw, MII_BMCR, 0x6100);
1331
1332                /* Now set up the MAC to the same speed/duplex as the PHY. */
1333                ctrl_reg = er32(CTRL);
1334                ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1335                ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1336                             E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1337                             E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1338                             E1000_CTRL_FD);     /* Force Duplex to FULL */
1339
1340                ew32(CTRL, ctrl_reg);
1341                e1e_flush();
1342                usleep_range(500, 1000);
1343
1344                return 0;
1345        }
1346
1347        /* Specific PHY configuration for loopback */
1348        switch (hw->phy.type) {
1349        case e1000_phy_m88:
1350                /* Auto-MDI/MDIX Off */
1351                e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1352                /* reset to update Auto-MDI/MDIX */
1353                e1e_wphy(hw, MII_BMCR, 0x9140);
1354                /* autoneg off */
1355                e1e_wphy(hw, MII_BMCR, 0x8140);
1356                break;
1357        case e1000_phy_gg82563:
1358                e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
1359                break;
1360        case e1000_phy_bm:
1361                /* Set Default MAC Interface speed to 1GB */
1362                e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
1363                phy_reg &= ~0x0007;
1364                phy_reg |= 0x006;
1365                e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
1366                /* Assert SW reset for above settings to take effect */
1367                hw->phy.ops.commit(hw);
1368                usleep_range(1000, 2000);
1369                /* Force Full Duplex */
1370                e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1371                e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
1372                /* Set Link Up (in force link) */
1373                e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
1374                e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
1375                /* Force Link */
1376                e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1377                e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
1378                /* Set Early Link Enable */
1379                e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1380                e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
1381                break;
1382        case e1000_phy_82577:
1383        case e1000_phy_82578:
1384                /* Workaround: K1 must be disabled for stable 1Gbps operation */
1385                ret_val = hw->phy.ops.acquire(hw);
1386                if (ret_val) {
1387                        e_err("Cannot setup 1Gbps loopback.\n");
1388                        return ret_val;
1389                }
1390                e1000_configure_k1_ich8lan(hw, false);
1391                hw->phy.ops.release(hw);
1392                break;
1393        case e1000_phy_82579:
1394                /* Disable PHY energy detect power down */
1395                e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
1396                e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3));
1397                /* Disable full chip energy detect */
1398                e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
1399                e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
1400                /* Enable loopback on the PHY */
1401                e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
1402                break;
1403        default:
1404                break;
1405        }
1406
1407        /* force 1000, set loopback */
1408        e1e_wphy(hw, MII_BMCR, 0x4140);
1409        msleep(250);
1410
1411        /* Now set up the MAC to the same speed/duplex as the PHY. */
1412        ctrl_reg = er32(CTRL);
1413        ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1414        ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1415                     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1416                     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1417                     E1000_CTRL_FD);     /* Force Duplex to FULL */
1418
1419        if (adapter->flags & FLAG_IS_ICH)
1420                ctrl_reg |= E1000_CTRL_SLU;     /* Set Link Up */
1421
1422        if (hw->phy.media_type == e1000_media_type_copper &&
1423            hw->phy.type == e1000_phy_m88) {
1424                ctrl_reg |= E1000_CTRL_ILOS;    /* Invert Loss of Signal */
1425        } else {
1426                /* Set the ILOS bit on the fiber Nic if half duplex link is
1427                 * detected.
1428                 */
1429                if ((er32(STATUS) & E1000_STATUS_FD) == 0)
1430                        ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1431        }
1432
1433        ew32(CTRL, ctrl_reg);
1434
1435        /* Disable the receiver on the PHY so when a cable is plugged in, the
1436         * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1437         */
1438        if (hw->phy.type == e1000_phy_m88)
1439                e1000_phy_disable_receiver(adapter);
1440
1441        usleep_range(500, 1000);
1442
1443        return 0;
1444}
1445
1446static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1447{
1448        struct e1000_hw *hw = &adapter->hw;
1449        u32 ctrl = er32(CTRL);
1450        int link;
1451
1452        /* special requirements for 82571/82572 fiber adapters */
1453
1454        /* jump through hoops to make sure link is up because serdes
1455         * link is hardwired up
1456         */
1457        ctrl |= E1000_CTRL_SLU;
1458        ew32(CTRL, ctrl);
1459
1460        /* disable autoneg */
1461        ctrl = er32(TXCW);
1462        ctrl &= ~BIT(31);
1463        ew32(TXCW, ctrl);
1464
1465        link = (er32(STATUS) & E1000_STATUS_LU);
1466
1467        if (!link) {
1468                /* set invert loss of signal */
1469                ctrl = er32(CTRL);
1470                ctrl |= E1000_CTRL_ILOS;
1471                ew32(CTRL, ctrl);
1472        }
1473
1474        /* special write to serdes control register to enable SerDes analog
1475         * loopback
1476         */
1477        ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
1478        e1e_flush();
1479        usleep_range(10000, 11000);
1480
1481        return 0;
1482}
1483
1484/* only call this for fiber/serdes connections to es2lan */
1485static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
1486{
1487        struct e1000_hw *hw = &adapter->hw;
1488        u32 ctrlext = er32(CTRL_EXT);
1489        u32 ctrl = er32(CTRL);
1490
1491        /* save CTRL_EXT to restore later, reuse an empty variable (unused
1492         * on mac_type 80003es2lan)
1493         */
1494        adapter->tx_fifo_head = ctrlext;
1495
1496        /* clear the serdes mode bits, putting the device into mac loopback */
1497        ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
1498        ew32(CTRL_EXT, ctrlext);
1499
1500        /* force speed to 1000/FD, link up */
1501        ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1502        ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
1503                 E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
1504        ew32(CTRL, ctrl);
1505
1506        /* set mac loopback */
1507        ctrl = er32(RCTL);
1508        ctrl |= E1000_RCTL_LBM_MAC;
1509        ew32(RCTL, ctrl);
1510
1511        /* set testing mode parameters (no need to reset later) */
1512#define KMRNCTRLSTA_OPMODE (0x1F << 16)
1513#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
1514        ew32(KMRNCTRLSTA,
1515             (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
1516
1517        return 0;
1518}
1519
1520static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1521{
1522        struct e1000_hw *hw = &adapter->hw;
1523        u32 rctl, fext_nvm11, tarc0;
1524
1525        if (hw->mac.type >= e1000_pch_spt) {
1526                fext_nvm11 = er32(FEXTNVM11);
1527                fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
1528                ew32(FEXTNVM11, fext_nvm11);
1529                tarc0 = er32(TARC(0));
1530                /* clear bits 28 & 29 (control of MULR concurrent requests) */
1531                tarc0 &= 0xcfffffff;
1532                /* set bit 29 (value of MULR requests is now 2) */
1533                tarc0 |= 0x20000000;
1534                ew32(TARC(0), tarc0);
1535        }
1536        if (hw->phy.media_type == e1000_media_type_fiber ||
1537            hw->phy.media_type == e1000_media_type_internal_serdes) {
1538                switch (hw->mac.type) {
1539                case e1000_80003es2lan:
1540                        return e1000_set_es2lan_mac_loopback(adapter);
1541                case e1000_82571:
1542                case e1000_82572:
1543                        return e1000_set_82571_fiber_loopback(adapter);
1544                default:
1545                        rctl = er32(RCTL);
1546                        rctl |= E1000_RCTL_LBM_TCVR;
1547                        ew32(RCTL, rctl);
1548                        return 0;
1549                }
1550        } else if (hw->phy.media_type == e1000_media_type_copper) {
1551                return e1000_integrated_phy_loopback(adapter);
1552        }
1553
1554        return 7;
1555}
1556
1557static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1558{
1559        struct e1000_hw *hw = &adapter->hw;
1560        u32 rctl, fext_nvm11, tarc0;
1561        u16 phy_reg;
1562
1563        rctl = er32(RCTL);
1564        rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1565        ew32(RCTL, rctl);
1566
1567        switch (hw->mac.type) {
1568        case e1000_pch_spt:
1569        case e1000_pch_cnp:
1570        case e1000_pch_tgp:
1571        case e1000_pch_adp:
1572        case e1000_pch_mtp:
1573        case e1000_pch_lnp:
1574                fext_nvm11 = er32(FEXTNVM11);
1575                fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
1576                ew32(FEXTNVM11, fext_nvm11);
1577                tarc0 = er32(TARC(0));
1578                /* clear bits 28 & 29 (control of MULR concurrent requests) */
1579                /* set bit 29 (value of MULR requests is now 0) */
1580                tarc0 &= 0xcfffffff;
1581                ew32(TARC(0), tarc0);
1582                fallthrough;
1583        case e1000_80003es2lan:
1584                if (hw->phy.media_type == e1000_media_type_fiber ||
1585                    hw->phy.media_type == e1000_media_type_internal_serdes) {
1586                        /* restore CTRL_EXT, stealing space from tx_fifo_head */
1587                        ew32(CTRL_EXT, adapter->tx_fifo_head);
1588                        adapter->tx_fifo_head = 0;
1589                }
1590                fallthrough;
1591        case e1000_82571:
1592        case e1000_82572:
1593                if (hw->phy.media_type == e1000_media_type_fiber ||
1594                    hw->phy.media_type == e1000_media_type_internal_serdes) {
1595                        ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1596                        e1e_flush();
1597                        usleep_range(10000, 11000);
1598                        break;
1599                }
1600                fallthrough;
1601        default:
1602                hw->mac.autoneg = 1;
1603                if (hw->phy.type == e1000_phy_gg82563)
1604                        e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
1605                e1e_rphy(hw, MII_BMCR, &phy_reg);
1606                if (phy_reg & BMCR_LOOPBACK) {
1607                        phy_reg &= ~BMCR_LOOPBACK;
1608                        e1e_wphy(hw, MII_BMCR, phy_reg);
1609                        if (hw->phy.ops.commit)
1610                                hw->phy.ops.commit(hw);
1611                }
1612                break;
1613        }
1614}
1615
1616static void e1000_create_lbtest_frame(struct sk_buff *skb,
1617                                      unsigned int frame_size)
1618{
1619        memset(skb->data, 0xFF, frame_size);
1620        frame_size &= ~1;
1621        memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1622        skb->data[frame_size / 2 + 10] = 0xBE;
1623        skb->data[frame_size / 2 + 12] = 0xAF;
1624}
1625
1626static int e1000_check_lbtest_frame(struct sk_buff *skb,
1627                                    unsigned int frame_size)
1628{
1629        frame_size &= ~1;
1630        if (*(skb->data + 3) == 0xFF)
1631                if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1632                    (*(skb->data + frame_size / 2 + 12) == 0xAF))
1633                        return 0;
1634        return 13;
1635}
1636
1637static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1638{
1639        struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1640        struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1641        struct pci_dev *pdev = adapter->pdev;
1642        struct e1000_hw *hw = &adapter->hw;
1643        struct e1000_buffer *buffer_info;
1644        int i, j, k, l;
1645        int lc;
1646        int good_cnt;
1647        int ret_val = 0;
1648        unsigned long time;
1649
1650        ew32(RDT(0), rx_ring->count - 1);
1651
1652        /* Calculate the loop count based on the largest descriptor ring
1653         * The idea is to wrap the largest ring a number of times using 64
1654         * send/receive pairs during each loop
1655         */
1656
1657        if (rx_ring->count <= tx_ring->count)
1658                lc = ((tx_ring->count / 64) * 2) + 1;
1659        else
1660                lc = ((rx_ring->count / 64) * 2) + 1;
1661
1662        k = 0;
1663        l = 0;
1664        /* loop count loop */
1665        for (j = 0; j <= lc; j++) {
1666                /* send the packets */
1667                for (i = 0; i < 64; i++) {
1668                        buffer_info = &tx_ring->buffer_info[k];
1669
1670                        e1000_create_lbtest_frame(buffer_info->skb, 1024);
1671                        dma_sync_single_for_device(&pdev->dev,
1672                                                   buffer_info->dma,
1673                                                   buffer_info->length,
1674                                                   DMA_TO_DEVICE);
1675                        k++;
1676                        if (k == tx_ring->count)
1677                                k = 0;
1678                }
1679                ew32(TDT(0), k);
1680                e1e_flush();
1681                msleep(200);
1682                time = jiffies; /* set the start time for the receive */
1683                good_cnt = 0;
1684                /* receive the sent packets */
1685                do {
1686                        buffer_info = &rx_ring->buffer_info[l];
1687
1688                        dma_sync_single_for_cpu(&pdev->dev,
1689                                                buffer_info->dma, 2048,
1690                                                DMA_FROM_DEVICE);
1691
1692                        ret_val = e1000_check_lbtest_frame(buffer_info->skb,
1693                                                           1024);
1694                        if (!ret_val)
1695                                good_cnt++;
1696                        l++;
1697                        if (l == rx_ring->count)
1698                                l = 0;
1699                        /* time + 20 msecs (200 msecs on 2.4) is more than
1700                         * enough time to complete the receives, if it's
1701                         * exceeded, break and error off
1702                         */
1703                } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
1704                if (good_cnt != 64) {
1705                        ret_val = 13;   /* ret_val is the same as mis-compare */
1706                        break;
1707                }
1708                if (time_after(jiffies, time + 20)) {
1709                        ret_val = 14;   /* error code for time out error */
1710                        break;
1711                }
1712        }
1713        return ret_val;
1714}
1715
1716static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1717{
1718        struct e1000_hw *hw = &adapter->hw;
1719
1720        /* PHY loopback cannot be performed if SoL/IDER sessions are active */
1721        if (hw->phy.ops.check_reset_block &&
1722            hw->phy.ops.check_reset_block(hw)) {
1723                e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
1724                *data = 0;
1725                goto out;
1726        }
1727
1728        *data = e1000_setup_desc_rings(adapter);
1729        if (*data)
1730                goto out;
1731
1732        *data = e1000_setup_loopback_test(adapter);
1733        if (*data)
1734                goto err_loopback;
1735
1736        *data = e1000_run_loopback_test(adapter);
1737        e1000_loopback_cleanup(adapter);
1738
1739err_loopback:
1740        e1000_free_desc_rings(adapter);
1741out:
1742        return *data;
1743}
1744
1745static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1746{
1747        struct e1000_hw *hw = &adapter->hw;
1748
1749        *data = 0;
1750        if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1751                int i = 0;
1752
1753                hw->mac.serdes_has_link = false;
1754
1755                /* On some blade server designs, link establishment
1756                 * could take as long as 2-3 minutes
1757                 */
1758                do {
1759                        hw->mac.ops.check_for_link(hw);
1760                        if (hw->mac.serdes_has_link)
1761                                return *data;
1762                        msleep(20);
1763                } while (i++ < 3750);
1764
1765                *data = 1;
1766        } else {
1767                hw->mac.ops.check_for_link(hw);
1768                if (hw->mac.autoneg)
1769                        /* On some Phy/switch combinations, link establishment
1770                         * can take a few seconds more than expected.
1771                         */
1772                        msleep_interruptible(5000);
1773
1774                if (!(er32(STATUS) & E1000_STATUS_LU))
1775                        *data = 1;
1776        }
1777        return *data;
1778}
1779
1780static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
1781                                 int sset)
1782{
1783        switch (sset) {
1784        case ETH_SS_TEST:
1785                return E1000_TEST_LEN;
1786        case ETH_SS_STATS:
1787                return E1000_STATS_LEN;
1788        case ETH_SS_PRIV_FLAGS:
1789                return E1000E_PRIV_FLAGS_STR_LEN;
1790        default:
1791                return -EOPNOTSUPP;
1792        }
1793}
1794
1795static void e1000_diag_test(struct net_device *netdev,
1796                            struct ethtool_test *eth_test, u64 *data)
1797{
1798        struct e1000_adapter *adapter = netdev_priv(netdev);
1799        u16 autoneg_advertised;
1800        u8 forced_speed_duplex;
1801        u8 autoneg;
1802        bool if_running = netif_running(netdev);
1803
1804        pm_runtime_get_sync(netdev->dev.parent);
1805
1806        set_bit(__E1000_TESTING, &adapter->state);
1807
1808        if (!if_running) {
1809                /* Get control of and reset hardware */
1810                if (adapter->flags & FLAG_HAS_AMT)
1811                        e1000e_get_hw_control(adapter);
1812
1813                e1000e_power_up_phy(adapter);
1814
1815                adapter->hw.phy.autoneg_wait_to_complete = 1;
1816                e1000e_reset(adapter);
1817                adapter->hw.phy.autoneg_wait_to_complete = 0;
1818        }
1819
1820        if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1821                /* Offline tests */
1822
1823                /* save speed, duplex, autoneg settings */
1824                autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1825                forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1826                autoneg = adapter->hw.mac.autoneg;
1827
1828                e_info("offline testing starting\n");
1829
1830                if (if_running)
1831                        /* indicate we're in test mode */
1832                        e1000e_close(netdev);
1833
1834                if (e1000_reg_test(adapter, &data[0]))
1835                        eth_test->flags |= ETH_TEST_FL_FAILED;
1836
1837                e1000e_reset(adapter);
1838                if (e1000_eeprom_test(adapter, &data[1]))
1839                        eth_test->flags |= ETH_TEST_FL_FAILED;
1840
1841                e1000e_reset(adapter);
1842                if (e1000_intr_test(adapter, &data[2]))
1843                        eth_test->flags |= ETH_TEST_FL_FAILED;
1844
1845                e1000e_reset(adapter);
1846                if (e1000_loopback_test(adapter, &data[3]))
1847                        eth_test->flags |= ETH_TEST_FL_FAILED;
1848
1849                /* force this routine to wait until autoneg complete/timeout */
1850                adapter->hw.phy.autoneg_wait_to_complete = 1;
1851                e1000e_reset(adapter);
1852                adapter->hw.phy.autoneg_wait_to_complete = 0;
1853
1854                if (e1000_link_test(adapter, &data[4]))
1855                        eth_test->flags |= ETH_TEST_FL_FAILED;
1856
1857                /* restore speed, duplex, autoneg settings */
1858                adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1859                adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1860                adapter->hw.mac.autoneg = autoneg;
1861                e1000e_reset(adapter);
1862
1863                clear_bit(__E1000_TESTING, &adapter->state);
1864                if (if_running)
1865                        e1000e_open(netdev);
1866        } else {
1867                /* Online tests */
1868
1869                e_info("online testing starting\n");
1870
1871                /* register, eeprom, intr and loopback tests not run online */
1872                data[0] = 0;
1873                data[1] = 0;
1874                data[2] = 0;
1875                data[3] = 0;
1876
1877                if (e1000_link_test(adapter, &data[4]))
1878                        eth_test->flags |= ETH_TEST_FL_FAILED;
1879
1880                clear_bit(__E1000_TESTING, &adapter->state);
1881        }
1882
1883        if (!if_running) {
1884                e1000e_reset(adapter);
1885
1886                if (adapter->flags & FLAG_HAS_AMT)
1887                        e1000e_release_hw_control(adapter);
1888        }
1889
1890        msleep_interruptible(4 * 1000);
1891
1892        pm_runtime_put_sync(netdev->dev.parent);
1893}
1894
1895static void e1000_get_wol(struct net_device *netdev,
1896                          struct ethtool_wolinfo *wol)
1897{
1898        struct e1000_adapter *adapter = netdev_priv(netdev);
1899
1900        wol->supported = 0;
1901        wol->wolopts = 0;
1902
1903        if (!(adapter->flags & FLAG_HAS_WOL) ||
1904            !device_can_wakeup(&adapter->pdev->dev))
1905                return;
1906
1907        wol->supported = WAKE_UCAST | WAKE_MCAST |
1908            WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
1909
1910        /* apply any specific unsupported masks here */
1911        if (adapter->flags & FLAG_NO_WAKE_UCAST) {
1912                wol->supported &= ~WAKE_UCAST;
1913
1914                if (adapter->wol & E1000_WUFC_EX)
1915                        e_err("Interface does not support directed (unicast) frame wake-up packets\n");
1916        }
1917
1918        if (adapter->wol & E1000_WUFC_EX)
1919                wol->wolopts |= WAKE_UCAST;
1920        if (adapter->wol & E1000_WUFC_MC)
1921                wol->wolopts |= WAKE_MCAST;
1922        if (adapter->wol & E1000_WUFC_BC)
1923                wol->wolopts |= WAKE_BCAST;
1924        if (adapter->wol & E1000_WUFC_MAG)
1925                wol->wolopts |= WAKE_MAGIC;
1926        if (adapter->wol & E1000_WUFC_LNKC)
1927                wol->wolopts |= WAKE_PHY;
1928}
1929
1930static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1931{
1932        struct e1000_adapter *adapter = netdev_priv(netdev);
1933
1934        if (!(adapter->flags & FLAG_HAS_WOL) ||
1935            !device_can_wakeup(&adapter->pdev->dev) ||
1936            (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1937                              WAKE_MAGIC | WAKE_PHY)))
1938                return -EOPNOTSUPP;
1939
1940        /* these settings will always override what we currently have */
1941        adapter->wol = 0;
1942
1943        if (wol->wolopts & WAKE_UCAST)
1944                adapter->wol |= E1000_WUFC_EX;
1945        if (wol->wolopts & WAKE_MCAST)
1946                adapter->wol |= E1000_WUFC_MC;
1947        if (wol->wolopts & WAKE_BCAST)
1948                adapter->wol |= E1000_WUFC_BC;
1949        if (wol->wolopts & WAKE_MAGIC)
1950                adapter->wol |= E1000_WUFC_MAG;
1951        if (wol->wolopts & WAKE_PHY)
1952                adapter->wol |= E1000_WUFC_LNKC;
1953
1954        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1955
1956        return 0;
1957}
1958
1959static int e1000_set_phys_id(struct net_device *netdev,
1960                             enum ethtool_phys_id_state state)
1961{
1962        struct e1000_adapter *adapter = netdev_priv(netdev);
1963        struct e1000_hw *hw = &adapter->hw;
1964
1965        switch (state) {
1966        case ETHTOOL_ID_ACTIVE:
1967                pm_runtime_get_sync(netdev->dev.parent);
1968
1969                if (!hw->mac.ops.blink_led)
1970                        return 2;       /* cycle on/off twice per second */
1971
1972                hw->mac.ops.blink_led(hw);
1973                break;
1974
1975        case ETHTOOL_ID_INACTIVE:
1976                if (hw->phy.type == e1000_phy_ife)
1977                        e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
1978                hw->mac.ops.led_off(hw);
1979                hw->mac.ops.cleanup_led(hw);
1980                pm_runtime_put_sync(netdev->dev.parent);
1981                break;
1982
1983        case ETHTOOL_ID_ON:
1984                hw->mac.ops.led_on(hw);
1985                break;
1986
1987        case ETHTOOL_ID_OFF:
1988                hw->mac.ops.led_off(hw);
1989                break;
1990        }
1991
1992        return 0;
1993}
1994
1995static int e1000_get_coalesce(struct net_device *netdev,
1996                              struct ethtool_coalesce *ec,
1997                              struct kernel_ethtool_coalesce *kernel_coal,
1998                              struct netlink_ext_ack *extack)
1999{
2000        struct e1000_adapter *adapter = netdev_priv(netdev);
2001
2002        if (adapter->itr_setting <= 4)
2003                ec->rx_coalesce_usecs = adapter->itr_setting;
2004        else
2005                ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
2006
2007        return 0;
2008}
2009
2010static int e1000_set_coalesce(struct net_device *netdev,
2011                              struct ethtool_coalesce *ec,
2012                              struct kernel_ethtool_coalesce *kernel_coal,
2013                              struct netlink_ext_ack *extack)
2014{
2015        struct e1000_adapter *adapter = netdev_priv(netdev);
2016
2017        if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
2018            ((ec->rx_coalesce_usecs > 4) &&
2019             (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
2020            (ec->rx_coalesce_usecs == 2))
2021                return -EINVAL;
2022
2023        if (ec->rx_coalesce_usecs == 4) {
2024                adapter->itr_setting = 4;
2025                adapter->itr = adapter->itr_setting;
2026        } else if (ec->rx_coalesce_usecs <= 3) {
2027                adapter->itr = 20000;
2028                adapter->itr_setting = ec->rx_coalesce_usecs;
2029        } else {
2030                adapter->itr = (1000000 / ec->rx_coalesce_usecs);
2031                adapter->itr_setting = adapter->itr & ~3;
2032        }
2033
2034        pm_runtime_get_sync(netdev->dev.parent);
2035
2036        if (adapter->itr_setting != 0)
2037                e1000e_write_itr(adapter, adapter->itr);
2038        else
2039                e1000e_write_itr(adapter, 0);
2040
2041        pm_runtime_put_sync(netdev->dev.parent);
2042
2043        return 0;
2044}
2045
2046static int e1000_nway_reset(struct net_device *netdev)
2047{
2048        struct e1000_adapter *adapter = netdev_priv(netdev);
2049
2050        if (!netif_running(netdev))
2051                return -EAGAIN;
2052
2053        if (!adapter->hw.mac.autoneg)
2054                return -EINVAL;
2055
2056        pm_runtime_get_sync(netdev->dev.parent);
2057        e1000e_reinit_locked(adapter);
2058        pm_runtime_put_sync(netdev->dev.parent);
2059
2060        return 0;
2061}
2062
2063static void e1000_get_ethtool_stats(struct net_device *netdev,
2064                                    struct ethtool_stats __always_unused *stats,
2065                                    u64 *data)
2066{
2067        struct e1000_adapter *adapter = netdev_priv(netdev);
2068        struct rtnl_link_stats64 net_stats;
2069        int i;
2070        char *p = NULL;
2071
2072        pm_runtime_get_sync(netdev->dev.parent);
2073
2074        dev_get_stats(netdev, &net_stats);
2075
2076        pm_runtime_put_sync(netdev->dev.parent);
2077
2078        for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
2079                switch (e1000_gstrings_stats[i].type) {
2080                case NETDEV_STATS:
2081                        p = (char *)&net_stats +
2082                            e1000_gstrings_stats[i].stat_offset;
2083                        break;
2084                case E1000_STATS:
2085                        p = (char *)adapter +
2086                            e1000_gstrings_stats[i].stat_offset;
2087                        break;
2088                default:
2089                        data[i] = 0;
2090                        continue;
2091                }
2092
2093                data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
2094                           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2095        }
2096}
2097
2098static void e1000_get_strings(struct net_device __always_unused *netdev,
2099                              u32 stringset, u8 *data)
2100{
2101        u8 *p = data;
2102        int i;
2103
2104        switch (stringset) {
2105        case ETH_SS_TEST:
2106                memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
2107                break;
2108        case ETH_SS_STATS:
2109                for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
2110                        memcpy(p, e1000_gstrings_stats[i].stat_string,
2111                               ETH_GSTRING_LEN);
2112                        p += ETH_GSTRING_LEN;
2113                }
2114                break;
2115        case ETH_SS_PRIV_FLAGS:
2116                memcpy(data, e1000e_priv_flags_strings,
2117                       E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
2118                break;
2119        }
2120}
2121
2122static int e1000_get_rxnfc(struct net_device *netdev,
2123                           struct ethtool_rxnfc *info,
2124                           u32 __always_unused *rule_locs)
2125{
2126        info->data = 0;
2127
2128        switch (info->cmd) {
2129        case ETHTOOL_GRXFH: {
2130                struct e1000_adapter *adapter = netdev_priv(netdev);
2131                struct e1000_hw *hw = &adapter->hw;
2132                u32 mrqc;
2133
2134                pm_runtime_get_sync(netdev->dev.parent);
2135                mrqc = er32(MRQC);
2136                pm_runtime_put_sync(netdev->dev.parent);
2137
2138                if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
2139                        return 0;
2140
2141                switch (info->flow_type) {
2142                case TCP_V4_FLOW:
2143                        if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2144                                info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2145                        fallthrough;
2146                case UDP_V4_FLOW:
2147                case SCTP_V4_FLOW:
2148                case AH_ESP_V4_FLOW:
2149                case IPV4_FLOW:
2150                        if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2151                                info->data |= RXH_IP_SRC | RXH_IP_DST;
2152                        break;
2153                case TCP_V6_FLOW:
2154                        if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2155                                info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2156                        fallthrough;
2157                case UDP_V6_FLOW:
2158                case SCTP_V6_FLOW:
2159                case AH_ESP_V6_FLOW:
2160                case IPV6_FLOW:
2161                        if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2162                                info->data |= RXH_IP_SRC | RXH_IP_DST;
2163                        break;
2164                default:
2165                        break;
2166                }
2167                return 0;
2168        }
2169        default:
2170                return -EOPNOTSUPP;
2171        }
2172}
2173
2174static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2175{
2176        struct e1000_adapter *adapter = netdev_priv(netdev);
2177        struct e1000_hw *hw = &adapter->hw;
2178        u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data;
2179        u32 ret_val;
2180
2181        if (!(adapter->flags2 & FLAG2_HAS_EEE))
2182                return -EOPNOTSUPP;
2183
2184        switch (hw->phy.type) {
2185        case e1000_phy_82579:
2186                cap_addr = I82579_EEE_CAPABILITY;
2187                lpa_addr = I82579_EEE_LP_ABILITY;
2188                pcs_stat_addr = I82579_EEE_PCS_STATUS;
2189                break;
2190        case e1000_phy_i217:
2191                cap_addr = I217_EEE_CAPABILITY;
2192                lpa_addr = I217_EEE_LP_ABILITY;
2193                pcs_stat_addr = I217_EEE_PCS_STATUS;
2194                break;
2195        default:
2196                return -EOPNOTSUPP;
2197        }
2198
2199        pm_runtime_get_sync(netdev->dev.parent);
2200
2201        ret_val = hw->phy.ops.acquire(hw);
2202        if (ret_val) {
2203                pm_runtime_put_sync(netdev->dev.parent);
2204                return -EBUSY;
2205        }
2206
2207        /* EEE Capability */
2208        ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
2209        if (ret_val)
2210                goto release;
2211        edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
2212
2213        /* EEE Advertised */
2214        edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
2215
2216        /* EEE Link Partner Advertised */
2217        ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
2218        if (ret_val)
2219                goto release;
2220        edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2221
2222        /* EEE PCS Status */
2223        ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
2224        if (ret_val)
2225                goto release;
2226        if (hw->phy.type == e1000_phy_82579)
2227                phy_data <<= 8;
2228
2229        /* Result of the EEE auto negotiation - there is no register that
2230         * has the status of the EEE negotiation so do a best-guess based
2231         * on whether Tx or Rx LPI indications have been received.
2232         */
2233        if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD))
2234                edata->eee_active = true;
2235
2236        edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
2237        edata->tx_lpi_enabled = true;
2238        edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
2239
2240release:
2241        hw->phy.ops.release(hw);
2242        if (ret_val)
2243                ret_val = -ENODATA;
2244
2245        pm_runtime_put_sync(netdev->dev.parent);
2246
2247        return ret_val;
2248}
2249
2250static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
2251{
2252        struct e1000_adapter *adapter = netdev_priv(netdev);
2253        struct e1000_hw *hw = &adapter->hw;
2254        struct ethtool_eee eee_curr;
2255        s32 ret_val;
2256
2257        ret_val = e1000e_get_eee(netdev, &eee_curr);
2258        if (ret_val)
2259                return ret_val;
2260
2261        if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
2262                e_err("Setting EEE tx-lpi is not supported\n");
2263                return -EINVAL;
2264        }
2265
2266        if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) {
2267                e_err("Setting EEE Tx LPI timer is not supported\n");
2268                return -EINVAL;
2269        }
2270
2271        if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
2272                e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
2273                return -EINVAL;
2274        }
2275
2276        adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
2277
2278        hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
2279
2280        pm_runtime_get_sync(netdev->dev.parent);
2281
2282        /* reset the link */
2283        if (netif_running(netdev))
2284                e1000e_reinit_locked(adapter);
2285        else
2286                e1000e_reset(adapter);
2287
2288        pm_runtime_put_sync(netdev->dev.parent);
2289
2290        return 0;
2291}
2292
2293static int e1000e_get_ts_info(struct net_device *netdev,
2294                              struct ethtool_ts_info *info)
2295{
2296        struct e1000_adapter *adapter = netdev_priv(netdev);
2297
2298        ethtool_op_get_ts_info(netdev, info);
2299
2300        if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
2301                return 0;
2302
2303        info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
2304                                  SOF_TIMESTAMPING_RX_HARDWARE |
2305                                  SOF_TIMESTAMPING_RAW_HARDWARE);
2306
2307        info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
2308
2309        info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) |
2310                            BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2311                            BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2312                            BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
2313                            BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
2314                            BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
2315                            BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
2316                            BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
2317                            BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
2318                            BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
2319                            BIT(HWTSTAMP_FILTER_ALL));
2320
2321        if (adapter->ptp_clock)
2322                info->phc_index = ptp_clock_index(adapter->ptp_clock);
2323
2324        return 0;
2325}
2326
2327static u32 e1000e_get_priv_flags(struct net_device *netdev)
2328{
2329        struct e1000_adapter *adapter = netdev_priv(netdev);
2330        u32 priv_flags = 0;
2331
2332        if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
2333                priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
2334
2335        return priv_flags;
2336}
2337
2338static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
2339{
2340        struct e1000_adapter *adapter = netdev_priv(netdev);
2341        unsigned int flags2 = adapter->flags2;
2342
2343        flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
2344        if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
2345                struct e1000_hw *hw = &adapter->hw;
2346
2347                if (hw->mac.type < e1000_pch_cnp)
2348                        return -EINVAL;
2349                flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
2350        }
2351
2352        if (flags2 != adapter->flags2)
2353                adapter->flags2 = flags2;
2354
2355        return 0;
2356}
2357
2358static const struct ethtool_ops e1000_ethtool_ops = {
2359        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
2360        .get_drvinfo            = e1000_get_drvinfo,
2361        .get_regs_len           = e1000_get_regs_len,
2362        .get_regs               = e1000_get_regs,
2363        .get_wol                = e1000_get_wol,
2364        .set_wol                = e1000_set_wol,
2365        .get_msglevel           = e1000_get_msglevel,
2366        .set_msglevel           = e1000_set_msglevel,
2367        .nway_reset             = e1000_nway_reset,
2368        .get_link               = ethtool_op_get_link,
2369        .get_eeprom_len         = e1000_get_eeprom_len,
2370        .get_eeprom             = e1000_get_eeprom,
2371        .set_eeprom             = e1000_set_eeprom,
2372        .get_ringparam          = e1000_get_ringparam,
2373        .set_ringparam          = e1000_set_ringparam,
2374        .get_pauseparam         = e1000_get_pauseparam,
2375        .set_pauseparam         = e1000_set_pauseparam,
2376        .self_test              = e1000_diag_test,
2377        .get_strings            = e1000_get_strings,
2378        .set_phys_id            = e1000_set_phys_id,
2379        .get_ethtool_stats      = e1000_get_ethtool_stats,
2380        .get_sset_count         = e1000e_get_sset_count,
2381        .get_coalesce           = e1000_get_coalesce,
2382        .set_coalesce           = e1000_set_coalesce,
2383        .get_rxnfc              = e1000_get_rxnfc,
2384        .get_ts_info            = e1000e_get_ts_info,
2385        .get_eee                = e1000e_get_eee,
2386        .set_eee                = e1000e_set_eee,
2387        .get_link_ksettings     = e1000_get_link_ksettings,
2388        .set_link_ksettings     = e1000_set_link_ksettings,
2389        .get_priv_flags         = e1000e_get_priv_flags,
2390        .set_priv_flags         = e1000e_set_priv_flags,
2391};
2392
2393void e1000e_set_ethtool_ops(struct net_device *netdev)
2394{
2395        netdev->ethtool_ops = &e1000_ethtool_ops;
2396}
2397