linux/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
<<
>>
Prefs
   1/*******************************************************************************
   2 *
   3 * Intel Ethernet Controller XL710 Family Linux Driver
   4 * Copyright(c) 2013 - 2016 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 * Contact Information:
  22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24 *
  25 ******************************************************************************/
  26
  27/* ethtool support for i40e */
  28
  29#include "i40e.h"
  30#include "i40e_diag.h"
  31
  32struct i40e_stats {
  33        char stat_string[ETH_GSTRING_LEN];
  34        int sizeof_stat;
  35        int stat_offset;
  36};
  37
  38#define I40E_STAT(_type, _name, _stat) { \
  39        .stat_string = _name, \
  40        .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
  41        .stat_offset = offsetof(_type, _stat) \
  42}
  43
  44#define I40E_NETDEV_STAT(_net_stat) \
  45                I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
  46#define I40E_PF_STAT(_name, _stat) \
  47                I40E_STAT(struct i40e_pf, _name, _stat)
  48#define I40E_VSI_STAT(_name, _stat) \
  49                I40E_STAT(struct i40e_vsi, _name, _stat)
  50#define I40E_VEB_STAT(_name, _stat) \
  51                I40E_STAT(struct i40e_veb, _name, _stat)
  52
  53static const struct i40e_stats i40e_gstrings_net_stats[] = {
  54        I40E_NETDEV_STAT(rx_packets),
  55        I40E_NETDEV_STAT(tx_packets),
  56        I40E_NETDEV_STAT(rx_bytes),
  57        I40E_NETDEV_STAT(tx_bytes),
  58        I40E_NETDEV_STAT(rx_errors),
  59        I40E_NETDEV_STAT(tx_errors),
  60        I40E_NETDEV_STAT(rx_dropped),
  61        I40E_NETDEV_STAT(tx_dropped),
  62        I40E_NETDEV_STAT(collisions),
  63        I40E_NETDEV_STAT(rx_length_errors),
  64        I40E_NETDEV_STAT(rx_crc_errors),
  65};
  66
  67static const struct i40e_stats i40e_gstrings_veb_stats[] = {
  68        I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
  69        I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
  70        I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
  71        I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
  72        I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
  73        I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
  74        I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
  75        I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
  76        I40E_VEB_STAT("rx_discards", stats.rx_discards),
  77        I40E_VEB_STAT("tx_discards", stats.tx_discards),
  78        I40E_VEB_STAT("tx_errors", stats.tx_errors),
  79        I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
  80};
  81
  82static const struct i40e_stats i40e_gstrings_misc_stats[] = {
  83        I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
  84        I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
  85        I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
  86        I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
  87        I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
  88        I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
  89        I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
  90        I40E_VSI_STAT("tx_linearize", tx_linearize),
  91        I40E_VSI_STAT("tx_force_wb", tx_force_wb),
  92        I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
  93        I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
  94        I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
  95};
  96
  97/* These PF_STATs might look like duplicates of some NETDEV_STATs,
  98 * but they are separate.  This device supports Virtualization, and
  99 * as such might have several netdevs supporting VMDq and FCoE going
 100 * through a single port.  The NETDEV_STATs are for individual netdevs
 101 * seen at the top of the stack, and the PF_STATs are for the physical
 102 * function at the bottom of the stack hosting those netdevs.
 103 *
 104 * The PF_STATs are appended to the netdev stats only when ethtool -S
 105 * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
 106 */
 107static const struct i40e_stats i40e_gstrings_stats[] = {
 108        I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
 109        I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
 110        I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
 111        I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
 112        I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
 113        I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
 114        I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
 115        I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
 116        I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
 117        I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
 118        I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
 119        I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
 120        I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
 121        I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
 122        I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
 123        I40E_PF_STAT("tx_timeout", tx_timeout_count),
 124        I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
 125        I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
 126        I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
 127        I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
 128        I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
 129        I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
 130        I40E_PF_STAT("rx_size_64", stats.rx_size_64),
 131        I40E_PF_STAT("rx_size_127", stats.rx_size_127),
 132        I40E_PF_STAT("rx_size_255", stats.rx_size_255),
 133        I40E_PF_STAT("rx_size_511", stats.rx_size_511),
 134        I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
 135        I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
 136        I40E_PF_STAT("rx_size_big", stats.rx_size_big),
 137        I40E_PF_STAT("tx_size_64", stats.tx_size_64),
 138        I40E_PF_STAT("tx_size_127", stats.tx_size_127),
 139        I40E_PF_STAT("tx_size_255", stats.tx_size_255),
 140        I40E_PF_STAT("tx_size_511", stats.tx_size_511),
 141        I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
 142        I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
 143        I40E_PF_STAT("tx_size_big", stats.tx_size_big),
 144        I40E_PF_STAT("rx_undersize", stats.rx_undersize),
 145        I40E_PF_STAT("rx_fragments", stats.rx_fragments),
 146        I40E_PF_STAT("rx_oversize", stats.rx_oversize),
 147        I40E_PF_STAT("rx_jabber", stats.rx_jabber),
 148        I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
 149        I40E_PF_STAT("arq_overflows", arq_overflows),
 150        I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
 151        I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
 152        I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
 153        I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
 154        I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
 155        I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
 156        I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
 157
 158        /* LPI stats */
 159        I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
 160        I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
 161        I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
 162        I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
 163};
 164
 165#ifdef I40E_FCOE
 166static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
 167        I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
 168        I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
 169        I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
 170        I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
 171        I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
 172        I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
 173        I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
 174        I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
 175};
 176
 177#endif /* I40E_FCOE */
 178#define I40E_QUEUE_STATS_LEN(n) \
 179        (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
 180            * 2 /* Tx and Rx together */                                     \
 181            * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
 182#define I40E_GLOBAL_STATS_LEN   ARRAY_SIZE(i40e_gstrings_stats)
 183#define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
 184#define I40E_MISC_STATS_LEN     ARRAY_SIZE(i40e_gstrings_misc_stats)
 185#ifdef I40E_FCOE
 186#define I40E_FCOE_STATS_LEN     ARRAY_SIZE(i40e_gstrings_fcoe_stats)
 187#define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
 188                                 I40E_FCOE_STATS_LEN + \
 189                                 I40E_MISC_STATS_LEN + \
 190                                 I40E_QUEUE_STATS_LEN((n)))
 191#else
 192#define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
 193                                 I40E_MISC_STATS_LEN + \
 194                                 I40E_QUEUE_STATS_LEN((n)))
 195#endif /* I40E_FCOE */
 196#define I40E_PFC_STATS_LEN ( \
 197                (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
 198                 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
 199                 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
 200                 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
 201                 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
 202                 / sizeof(u64))
 203#define I40E_VEB_TC_STATS_LEN ( \
 204                (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
 205                 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
 206                 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
 207                 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
 208                 / sizeof(u64))
 209#define I40E_VEB_STATS_LEN      ARRAY_SIZE(i40e_gstrings_veb_stats)
 210#define I40E_VEB_STATS_TOTAL    (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
 211#define I40E_PF_STATS_LEN(n)    (I40E_GLOBAL_STATS_LEN + \
 212                                 I40E_PFC_STATS_LEN + \
 213                                 I40E_VSI_STATS_LEN((n)))
 214
 215enum i40e_ethtool_test_id {
 216        I40E_ETH_TEST_REG = 0,
 217        I40E_ETH_TEST_EEPROM,
 218        I40E_ETH_TEST_INTR,
 219        I40E_ETH_TEST_LINK,
 220};
 221
 222static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 223        "Register test  (offline)",
 224        "Eeprom test    (offline)",
 225        "Interrupt test (offline)",
 226        "Link test   (on/offline)"
 227};
 228
 229#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 230
 231static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
 232        "MFP",
 233        "LinkPolling",
 234        "flow-director-atr",
 235        "veb-stats",
 236        "hw-atr-eviction",
 237};
 238
 239#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
 240
 241/* Private flags with a global effect, restricted to PF 0 */
 242static const char i40e_gl_priv_flags_strings[][ETH_GSTRING_LEN] = {
 243        "vf-true-promisc-support",
 244};
 245
 246#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_priv_flags_strings)
 247
 248/**
 249 * i40e_partition_setting_complaint - generic complaint for MFP restriction
 250 * @pf: the PF struct
 251 **/
 252static void i40e_partition_setting_complaint(struct i40e_pf *pf)
 253{
 254        dev_info(&pf->pdev->dev,
 255                 "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
 256}
 257
 258/**
 259 * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
 260 * @phy_types: PHY types to convert
 261 * @supported: pointer to the ethtool supported variable to fill in
 262 * @advertising: pointer to the ethtool advertising variable to fill in
 263 *
 264 **/
 265static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
 266                                     u32 *advertising)
 267{
 268        struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
 269        u64 phy_types = pf->hw.phy.phy_types;
 270
 271        *supported = 0x0;
 272        *advertising = 0x0;
 273
 274        if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
 275                *supported |= SUPPORTED_Autoneg |
 276                              SUPPORTED_1000baseT_Full;
 277                *advertising |= ADVERTISED_Autoneg;
 278                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
 279                        *advertising |= ADVERTISED_1000baseT_Full;
 280                if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
 281                        *supported |= SUPPORTED_100baseT_Full;
 282                        *advertising |= ADVERTISED_100baseT_Full;
 283                }
 284        }
 285        if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
 286            phy_types & I40E_CAP_PHY_TYPE_XFI ||
 287            phy_types & I40E_CAP_PHY_TYPE_SFI ||
 288            phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
 289            phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
 290                *supported |= SUPPORTED_10000baseT_Full;
 291        if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
 292            phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
 293            phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
 294            phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
 295            phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
 296                *supported |= SUPPORTED_Autoneg |
 297                              SUPPORTED_10000baseT_Full;
 298                *advertising |= ADVERTISED_Autoneg;
 299                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
 300                        *advertising |= ADVERTISED_10000baseT_Full;
 301        }
 302        if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
 303            phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
 304            phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
 305                *supported |= SUPPORTED_40000baseCR4_Full;
 306        if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
 307            phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
 308                *supported |= SUPPORTED_Autoneg |
 309                              SUPPORTED_40000baseCR4_Full;
 310                *advertising |= ADVERTISED_Autoneg;
 311                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB)
 312                        *advertising |= ADVERTISED_40000baseCR4_Full;
 313        }
 314        if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
 315                *supported |= SUPPORTED_Autoneg |
 316                              SUPPORTED_100baseT_Full;
 317                *advertising |= ADVERTISED_Autoneg;
 318                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
 319                        *advertising |= ADVERTISED_100baseT_Full;
 320        }
 321        if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
 322            phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
 323            phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
 324            phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
 325                *supported |= SUPPORTED_Autoneg |
 326                              SUPPORTED_1000baseT_Full;
 327                *advertising |= ADVERTISED_Autoneg;
 328                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
 329                        *advertising |= ADVERTISED_1000baseT_Full;
 330        }
 331        if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
 332                *supported |= SUPPORTED_40000baseSR4_Full;
 333        if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
 334                *supported |= SUPPORTED_40000baseLR4_Full;
 335        if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
 336                *supported |= SUPPORTED_40000baseKR4_Full |
 337                              SUPPORTED_Autoneg;
 338                *advertising |= ADVERTISED_40000baseKR4_Full |
 339                                ADVERTISED_Autoneg;
 340        }
 341        if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
 342                *supported |= SUPPORTED_20000baseKR2_Full |
 343                              SUPPORTED_Autoneg;
 344                *advertising |= ADVERTISED_Autoneg;
 345                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB)
 346                        *advertising |= ADVERTISED_20000baseKR2_Full;
 347        }
 348        if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
 349                if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
 350                        *supported |= SUPPORTED_10000baseKR_Full |
 351                                      SUPPORTED_Autoneg;
 352                *advertising |= ADVERTISED_Autoneg;
 353                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
 354                        if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
 355                                *advertising |= ADVERTISED_10000baseKR_Full;
 356        }
 357        if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
 358                *supported |= SUPPORTED_10000baseKX4_Full |
 359                              SUPPORTED_Autoneg;
 360                *advertising |= ADVERTISED_Autoneg;
 361                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
 362                        *advertising |= ADVERTISED_10000baseKX4_Full;
 363        }
 364        if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
 365                if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
 366                        *supported |= SUPPORTED_1000baseKX_Full |
 367                                      SUPPORTED_Autoneg;
 368                *advertising |= ADVERTISED_Autoneg;
 369                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
 370                        if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
 371                                *advertising |= ADVERTISED_1000baseKX_Full;
 372        }
 373        if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
 374            phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
 375            phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
 376            phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
 377                *supported |= SUPPORTED_Autoneg;
 378                *advertising |= ADVERTISED_Autoneg;
 379        }
 380}
 381
 382/**
 383 * i40e_get_settings_link_up - Get the Link settings for when link is up
 384 * @hw: hw structure
 385 * @ecmd: ethtool command to fill in
 386 * @netdev: network interface device structure
 387 *
 388 **/
 389static void i40e_get_settings_link_up(struct i40e_hw *hw,
 390                                      struct ethtool_cmd *ecmd,
 391                                      struct net_device *netdev,
 392                                      struct i40e_pf *pf)
 393{
 394        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 395        u32 link_speed = hw_link_info->link_speed;
 396        u32 e_advertising = 0x0;
 397        u32 e_supported = 0x0;
 398
 399        /* Initialize supported and advertised settings based on phy settings */
 400        switch (hw_link_info->phy_type) {
 401        case I40E_PHY_TYPE_40GBASE_CR4:
 402        case I40E_PHY_TYPE_40GBASE_CR4_CU:
 403                ecmd->supported = SUPPORTED_Autoneg |
 404                                  SUPPORTED_40000baseCR4_Full;
 405                ecmd->advertising = ADVERTISED_Autoneg |
 406                                    ADVERTISED_40000baseCR4_Full;
 407                break;
 408        case I40E_PHY_TYPE_XLAUI:
 409        case I40E_PHY_TYPE_XLPPI:
 410        case I40E_PHY_TYPE_40GBASE_AOC:
 411                ecmd->supported = SUPPORTED_40000baseCR4_Full;
 412                break;
 413        case I40E_PHY_TYPE_40GBASE_SR4:
 414                ecmd->supported = SUPPORTED_40000baseSR4_Full;
 415                break;
 416        case I40E_PHY_TYPE_40GBASE_LR4:
 417                ecmd->supported = SUPPORTED_40000baseLR4_Full;
 418                break;
 419        case I40E_PHY_TYPE_10GBASE_SR:
 420        case I40E_PHY_TYPE_10GBASE_LR:
 421        case I40E_PHY_TYPE_1000BASE_SX:
 422        case I40E_PHY_TYPE_1000BASE_LX:
 423                ecmd->supported = SUPPORTED_10000baseT_Full;
 424                if (hw_link_info->module_type[2] &
 425                    I40E_MODULE_TYPE_1000BASE_SX ||
 426                    hw_link_info->module_type[2] &
 427                    I40E_MODULE_TYPE_1000BASE_LX) {
 428                        ecmd->supported |= SUPPORTED_1000baseT_Full;
 429                        if (hw_link_info->requested_speeds &
 430                            I40E_LINK_SPEED_1GB)
 431                                ecmd->advertising |= ADVERTISED_1000baseT_Full;
 432                }
 433                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
 434                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
 435                break;
 436        case I40E_PHY_TYPE_10GBASE_T:
 437        case I40E_PHY_TYPE_1000BASE_T:
 438        case I40E_PHY_TYPE_100BASE_TX:
 439                ecmd->supported = SUPPORTED_Autoneg |
 440                                  SUPPORTED_10000baseT_Full |
 441                                  SUPPORTED_1000baseT_Full |
 442                                  SUPPORTED_100baseT_Full;
 443                ecmd->advertising = ADVERTISED_Autoneg;
 444                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
 445                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
 446                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
 447                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
 448                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
 449                        ecmd->advertising |= ADVERTISED_100baseT_Full;
 450                break;
 451        case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
 452                ecmd->supported = SUPPORTED_Autoneg |
 453                                  SUPPORTED_1000baseT_Full;
 454                ecmd->advertising = ADVERTISED_Autoneg |
 455                                    ADVERTISED_1000baseT_Full;
 456                break;
 457        case I40E_PHY_TYPE_10GBASE_CR1_CU:
 458        case I40E_PHY_TYPE_10GBASE_CR1:
 459                ecmd->supported = SUPPORTED_Autoneg |
 460                                  SUPPORTED_10000baseT_Full;
 461                ecmd->advertising = ADVERTISED_Autoneg |
 462                                    ADVERTISED_10000baseT_Full;
 463                break;
 464        case I40E_PHY_TYPE_XAUI:
 465        case I40E_PHY_TYPE_XFI:
 466        case I40E_PHY_TYPE_SFI:
 467        case I40E_PHY_TYPE_10GBASE_SFPP_CU:
 468        case I40E_PHY_TYPE_10GBASE_AOC:
 469                ecmd->supported = SUPPORTED_10000baseT_Full;
 470                ecmd->advertising = SUPPORTED_10000baseT_Full;
 471                break;
 472        case I40E_PHY_TYPE_SGMII:
 473                ecmd->supported = SUPPORTED_Autoneg |
 474                                  SUPPORTED_1000baseT_Full;
 475                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
 476                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
 477                if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
 478                        ecmd->supported |= SUPPORTED_100baseT_Full;
 479                        if (hw_link_info->requested_speeds &
 480                            I40E_LINK_SPEED_100MB)
 481                                ecmd->advertising |= ADVERTISED_100baseT_Full;
 482                }
 483                break;
 484        case I40E_PHY_TYPE_40GBASE_KR4:
 485        case I40E_PHY_TYPE_20GBASE_KR2:
 486        case I40E_PHY_TYPE_10GBASE_KR:
 487        case I40E_PHY_TYPE_10GBASE_KX4:
 488        case I40E_PHY_TYPE_1000BASE_KX:
 489                ecmd->supported |= SUPPORTED_40000baseKR4_Full |
 490                                   SUPPORTED_20000baseKR2_Full |
 491                                   SUPPORTED_10000baseKR_Full |
 492                                   SUPPORTED_10000baseKX4_Full |
 493                                   SUPPORTED_1000baseKX_Full |
 494                                   SUPPORTED_Autoneg;
 495                ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
 496                                     ADVERTISED_20000baseKR2_Full |
 497                                     ADVERTISED_10000baseKR_Full |
 498                                     ADVERTISED_10000baseKX4_Full |
 499                                     ADVERTISED_1000baseKX_Full |
 500                                     ADVERTISED_Autoneg;
 501                break;
 502        case I40E_PHY_TYPE_25GBASE_KR:
 503        case I40E_PHY_TYPE_25GBASE_CR:
 504        case I40E_PHY_TYPE_25GBASE_SR:
 505        case I40E_PHY_TYPE_25GBASE_LR:
 506                ecmd->supported = SUPPORTED_Autoneg;
 507                ecmd->advertising = ADVERTISED_Autoneg;
 508                /* TODO: add speeds when ethtool is ready to support*/
 509                break;
 510        default:
 511                /* if we got here and link is up something bad is afoot */
 512                netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
 513                            hw_link_info->phy_type);
 514        }
 515
 516        /* Now that we've worked out everything that could be supported by the
 517         * current PHY type, get what is supported by the NVM and them to
 518         * get what is truly supported
 519         */
 520        i40e_phy_type_to_ethtool(pf, &e_supported,
 521                                 &e_advertising);
 522
 523        ecmd->supported = ecmd->supported & e_supported;
 524        ecmd->advertising = ecmd->advertising & e_advertising;
 525
 526        /* Set speed and duplex */
 527        switch (link_speed) {
 528        case I40E_LINK_SPEED_40GB:
 529                ethtool_cmd_speed_set(ecmd, SPEED_40000);
 530                break;
 531        case I40E_LINK_SPEED_25GB:
 532#ifdef SPEED_25000
 533                ethtool_cmd_speed_set(ecmd, SPEED_25000);
 534#else
 535                netdev_info(netdev,
 536                            "Speed is 25G, display not supported by this version of ethtool.\n");
 537#endif
 538                break;
 539        case I40E_LINK_SPEED_20GB:
 540                ethtool_cmd_speed_set(ecmd, SPEED_20000);
 541                break;
 542        case I40E_LINK_SPEED_10GB:
 543                ethtool_cmd_speed_set(ecmd, SPEED_10000);
 544                break;
 545        case I40E_LINK_SPEED_1GB:
 546                ethtool_cmd_speed_set(ecmd, SPEED_1000);
 547                break;
 548        case I40E_LINK_SPEED_100MB:
 549                ethtool_cmd_speed_set(ecmd, SPEED_100);
 550                break;
 551        default:
 552                break;
 553        }
 554        ecmd->duplex = DUPLEX_FULL;
 555}
 556
 557/**
 558 * i40e_get_settings_link_down - Get the Link settings for when link is down
 559 * @hw: hw structure
 560 * @ecmd: ethtool command to fill in
 561 *
 562 * Reports link settings that can be determined when link is down
 563 **/
 564static void i40e_get_settings_link_down(struct i40e_hw *hw,
 565                                        struct ethtool_cmd *ecmd,
 566                                        struct i40e_pf *pf)
 567{
 568        /* link is down and the driver needs to fall back on
 569         * supported phy types to figure out what info to display
 570         */
 571        i40e_phy_type_to_ethtool(pf, &ecmd->supported,
 572                                 &ecmd->advertising);
 573
 574        /* With no link speed and duplex are unknown */
 575        ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
 576        ecmd->duplex = DUPLEX_UNKNOWN;
 577}
 578
 579/**
 580 * i40e_get_settings - Get Link Speed and Duplex settings
 581 * @netdev: network interface device structure
 582 * @ecmd: ethtool command
 583 *
 584 * Reports speed/duplex settings based on media_type
 585 **/
 586static int i40e_get_settings(struct net_device *netdev,
 587                             struct ethtool_cmd *ecmd)
 588{
 589        struct i40e_netdev_priv *np = netdev_priv(netdev);
 590        struct i40e_pf *pf = np->vsi->back;
 591        struct i40e_hw *hw = &pf->hw;
 592        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 593        bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
 594
 595        if (link_up)
 596                i40e_get_settings_link_up(hw, ecmd, netdev, pf);
 597        else
 598                i40e_get_settings_link_down(hw, ecmd, pf);
 599
 600        /* Now set the settings that don't rely on link being up/down */
 601        /* Set autoneg settings */
 602        ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
 603                          AUTONEG_ENABLE : AUTONEG_DISABLE);
 604
 605        switch (hw->phy.media_type) {
 606        case I40E_MEDIA_TYPE_BACKPLANE:
 607                ecmd->supported |= SUPPORTED_Autoneg |
 608                                   SUPPORTED_Backplane;
 609                ecmd->advertising |= ADVERTISED_Autoneg |
 610                                     ADVERTISED_Backplane;
 611                ecmd->port = PORT_NONE;
 612                break;
 613        case I40E_MEDIA_TYPE_BASET:
 614                ecmd->supported |= SUPPORTED_TP;
 615                ecmd->advertising |= ADVERTISED_TP;
 616                ecmd->port = PORT_TP;
 617                break;
 618        case I40E_MEDIA_TYPE_DA:
 619        case I40E_MEDIA_TYPE_CX4:
 620                ecmd->supported |= SUPPORTED_FIBRE;
 621                ecmd->advertising |= ADVERTISED_FIBRE;
 622                ecmd->port = PORT_DA;
 623                break;
 624        case I40E_MEDIA_TYPE_FIBER:
 625                ecmd->supported |= SUPPORTED_FIBRE;
 626                ecmd->port = PORT_FIBRE;
 627                break;
 628        case I40E_MEDIA_TYPE_UNKNOWN:
 629        default:
 630                ecmd->port = PORT_OTHER;
 631                break;
 632        }
 633
 634        /* Set transceiver */
 635        ecmd->transceiver = XCVR_EXTERNAL;
 636
 637        /* Set flow control settings */
 638        ecmd->supported |= SUPPORTED_Pause;
 639
 640        switch (hw->fc.requested_mode) {
 641        case I40E_FC_FULL:
 642                ecmd->advertising |= ADVERTISED_Pause;
 643                break;
 644        case I40E_FC_TX_PAUSE:
 645                ecmd->advertising |= ADVERTISED_Asym_Pause;
 646                break;
 647        case I40E_FC_RX_PAUSE:
 648                ecmd->advertising |= (ADVERTISED_Pause |
 649                                      ADVERTISED_Asym_Pause);
 650                break;
 651        default:
 652                ecmd->advertising &= ~(ADVERTISED_Pause |
 653                                       ADVERTISED_Asym_Pause);
 654                break;
 655        }
 656
 657        return 0;
 658}
 659
 660/**
 661 * i40e_set_settings - Set Speed and Duplex
 662 * @netdev: network interface device structure
 663 * @ecmd: ethtool command
 664 *
 665 * Set speed/duplex per media_types advertised/forced
 666 **/
 667static int i40e_set_settings(struct net_device *netdev,
 668                             struct ethtool_cmd *ecmd)
 669{
 670        struct i40e_netdev_priv *np = netdev_priv(netdev);
 671        struct i40e_aq_get_phy_abilities_resp abilities;
 672        struct i40e_aq_set_phy_config config;
 673        struct i40e_pf *pf = np->vsi->back;
 674        struct i40e_vsi *vsi = np->vsi;
 675        struct i40e_hw *hw = &pf->hw;
 676        struct ethtool_cmd safe_ecmd;
 677        i40e_status status = 0;
 678        bool change = false;
 679        int err = 0;
 680        u8 autoneg;
 681        u32 advertise;
 682
 683        /* Changing port settings is not supported if this isn't the
 684         * port's controlling PF
 685         */
 686        if (hw->partition_id != 1) {
 687                i40e_partition_setting_complaint(pf);
 688                return -EOPNOTSUPP;
 689        }
 690
 691        if (vsi != pf->vsi[pf->lan_vsi])
 692                return -EOPNOTSUPP;
 693
 694        if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
 695            hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
 696            hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
 697            hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
 698            hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
 699                return -EOPNOTSUPP;
 700
 701        if (hw->device_id == I40E_DEV_ID_KX_B ||
 702            hw->device_id == I40E_DEV_ID_KX_C ||
 703            hw->device_id == I40E_DEV_ID_20G_KR2 ||
 704            hw->device_id == I40E_DEV_ID_20G_KR2_A) {
 705                netdev_info(netdev, "Changing settings is not supported on backplane.\n");
 706                return -EOPNOTSUPP;
 707        }
 708
 709        /* get our own copy of the bits to check against */
 710        memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
 711        i40e_get_settings(netdev, &safe_ecmd);
 712
 713        /* save autoneg and speed out of ecmd */
 714        autoneg = ecmd->autoneg;
 715        advertise = ecmd->advertising;
 716
 717        /* set autoneg and speed back to what they currently are */
 718        ecmd->autoneg = safe_ecmd.autoneg;
 719        ecmd->advertising = safe_ecmd.advertising;
 720
 721        ecmd->cmd = safe_ecmd.cmd;
 722        /* If ecmd and safe_ecmd are not the same now, then they are
 723         * trying to set something that we do not support
 724         */
 725        if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
 726                return -EOPNOTSUPP;
 727
 728        while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
 729                usleep_range(1000, 2000);
 730
 731        /* Get the current phy config */
 732        status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
 733                                              NULL);
 734        if (status)
 735                return -EAGAIN;
 736
 737        /* Copy abilities to config in case autoneg is not
 738         * set below
 739         */
 740        memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
 741        config.abilities = abilities.abilities;
 742
 743        /* Check autoneg */
 744        if (autoneg == AUTONEG_ENABLE) {
 745                /* If autoneg was not already enabled */
 746                if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
 747                        /* If autoneg is not supported, return error */
 748                        if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
 749                                netdev_info(netdev, "Autoneg not supported on this phy\n");
 750                                return -EINVAL;
 751                        }
 752                        /* Autoneg is allowed to change */
 753                        config.abilities = abilities.abilities |
 754                                           I40E_AQ_PHY_ENABLE_AN;
 755                        change = true;
 756                }
 757        } else {
 758                /* If autoneg is currently enabled */
 759                if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
 760                        /* If autoneg is supported 10GBASE_T is the only PHY
 761                         * that can disable it, so otherwise return error
 762                         */
 763                        if (safe_ecmd.supported & SUPPORTED_Autoneg &&
 764                            hw->phy.link_info.phy_type !=
 765                            I40E_PHY_TYPE_10GBASE_T) {
 766                                netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
 767                                return -EINVAL;
 768                        }
 769                        /* Autoneg is allowed to change */
 770                        config.abilities = abilities.abilities &
 771                                           ~I40E_AQ_PHY_ENABLE_AN;
 772                        change = true;
 773                }
 774        }
 775
 776        if (advertise & ~safe_ecmd.supported)
 777                return -EINVAL;
 778
 779        if (advertise & ADVERTISED_100baseT_Full)
 780                config.link_speed |= I40E_LINK_SPEED_100MB;
 781        if (advertise & ADVERTISED_1000baseT_Full ||
 782            advertise & ADVERTISED_1000baseKX_Full)
 783                config.link_speed |= I40E_LINK_SPEED_1GB;
 784        if (advertise & ADVERTISED_10000baseT_Full ||
 785            advertise & ADVERTISED_10000baseKX4_Full ||
 786            advertise & ADVERTISED_10000baseKR_Full)
 787                config.link_speed |= I40E_LINK_SPEED_10GB;
 788        if (advertise & ADVERTISED_20000baseKR2_Full)
 789                config.link_speed |= I40E_LINK_SPEED_20GB;
 790        if (advertise & ADVERTISED_40000baseKR4_Full ||
 791            advertise & ADVERTISED_40000baseCR4_Full ||
 792            advertise & ADVERTISED_40000baseSR4_Full ||
 793            advertise & ADVERTISED_40000baseLR4_Full)
 794                config.link_speed |= I40E_LINK_SPEED_40GB;
 795
 796        /* If speed didn't get set, set it to what it currently is.
 797         * This is needed because if advertise is 0 (as it is when autoneg
 798         * is disabled) then speed won't get set.
 799         */
 800        if (!config.link_speed)
 801                config.link_speed = abilities.link_speed;
 802
 803        if (change || (abilities.link_speed != config.link_speed)) {
 804                /* copy over the rest of the abilities */
 805                config.phy_type = abilities.phy_type;
 806                config.phy_type_ext = abilities.phy_type_ext;
 807                config.eee_capability = abilities.eee_capability;
 808                config.eeer = abilities.eeer_val;
 809                config.low_power_ctrl = abilities.d3_lpan;
 810                config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
 811                                    I40E_AQ_PHY_FEC_CONFIG_MASK;
 812
 813                /* save the requested speeds */
 814                hw->phy.link_info.requested_speeds = config.link_speed;
 815                /* set link and auto negotiation so changes take effect */
 816                config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
 817                /* If link is up put link down */
 818                if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
 819                        /* Tell the OS link is going down, the link will go
 820                         * back up when fw says it is ready asynchronously
 821                         */
 822                        i40e_print_link_message(vsi, false);
 823                        netif_carrier_off(netdev);
 824                        netif_tx_stop_all_queues(netdev);
 825                }
 826
 827                /* make the aq call */
 828                status = i40e_aq_set_phy_config(hw, &config, NULL);
 829                if (status) {
 830                        netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
 831                                    i40e_stat_str(hw, status),
 832                                    i40e_aq_str(hw, hw->aq.asq_last_status));
 833                        return -EAGAIN;
 834                }
 835
 836                status = i40e_update_link_info(hw);
 837                if (status)
 838                        netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
 839                                   i40e_stat_str(hw, status),
 840                                   i40e_aq_str(hw, hw->aq.asq_last_status));
 841
 842        } else {
 843                netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
 844        }
 845
 846        return err;
 847}
 848
 849static int i40e_nway_reset(struct net_device *netdev)
 850{
 851        /* restart autonegotiation */
 852        struct i40e_netdev_priv *np = netdev_priv(netdev);
 853        struct i40e_pf *pf = np->vsi->back;
 854        struct i40e_hw *hw = &pf->hw;
 855        bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
 856        i40e_status ret = 0;
 857
 858        ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
 859        if (ret) {
 860                netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
 861                            i40e_stat_str(hw, ret),
 862                            i40e_aq_str(hw, hw->aq.asq_last_status));
 863                return -EIO;
 864        }
 865
 866        return 0;
 867}
 868
 869/**
 870 * i40e_get_pauseparam -  Get Flow Control status
 871 * Return tx/rx-pause status
 872 **/
 873static void i40e_get_pauseparam(struct net_device *netdev,
 874                                struct ethtool_pauseparam *pause)
 875{
 876        struct i40e_netdev_priv *np = netdev_priv(netdev);
 877        struct i40e_pf *pf = np->vsi->back;
 878        struct i40e_hw *hw = &pf->hw;
 879        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 880        struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
 881
 882        pause->autoneg =
 883                ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
 884                  AUTONEG_ENABLE : AUTONEG_DISABLE);
 885
 886        /* PFC enabled so report LFC as off */
 887        if (dcbx_cfg->pfc.pfcenable) {
 888                pause->rx_pause = 0;
 889                pause->tx_pause = 0;
 890                return;
 891        }
 892
 893        if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
 894                pause->rx_pause = 1;
 895        } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
 896                pause->tx_pause = 1;
 897        } else if (hw->fc.current_mode == I40E_FC_FULL) {
 898                pause->rx_pause = 1;
 899                pause->tx_pause = 1;
 900        }
 901}
 902
 903/**
 904 * i40e_set_pauseparam - Set Flow Control parameter
 905 * @netdev: network interface device structure
 906 * @pause: return tx/rx flow control status
 907 **/
 908static int i40e_set_pauseparam(struct net_device *netdev,
 909                               struct ethtool_pauseparam *pause)
 910{
 911        struct i40e_netdev_priv *np = netdev_priv(netdev);
 912        struct i40e_pf *pf = np->vsi->back;
 913        struct i40e_vsi *vsi = np->vsi;
 914        struct i40e_hw *hw = &pf->hw;
 915        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 916        struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
 917        bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
 918        i40e_status status;
 919        u8 aq_failures;
 920        int err = 0;
 921
 922        /* Changing the port's flow control is not supported if this isn't the
 923         * port's controlling PF
 924         */
 925        if (hw->partition_id != 1) {
 926                i40e_partition_setting_complaint(pf);
 927                return -EOPNOTSUPP;
 928        }
 929
 930        if (vsi != pf->vsi[pf->lan_vsi])
 931                return -EOPNOTSUPP;
 932
 933        if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
 934            AUTONEG_ENABLE : AUTONEG_DISABLE)) {
 935                netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
 936                return -EOPNOTSUPP;
 937        }
 938
 939        /* If we have link and don't have autoneg */
 940        if (!test_bit(__I40E_DOWN, &pf->state) &&
 941            !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
 942                /* Send message that it might not necessarily work*/
 943                netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
 944        }
 945
 946        if (dcbx_cfg->pfc.pfcenable) {
 947                netdev_info(netdev,
 948                            "Priority flow control enabled. Cannot set link flow control.\n");
 949                return -EOPNOTSUPP;
 950        }
 951
 952        if (pause->rx_pause && pause->tx_pause)
 953                hw->fc.requested_mode = I40E_FC_FULL;
 954        else if (pause->rx_pause && !pause->tx_pause)
 955                hw->fc.requested_mode = I40E_FC_RX_PAUSE;
 956        else if (!pause->rx_pause && pause->tx_pause)
 957                hw->fc.requested_mode = I40E_FC_TX_PAUSE;
 958        else if (!pause->rx_pause && !pause->tx_pause)
 959                hw->fc.requested_mode = I40E_FC_NONE;
 960        else
 961                 return -EINVAL;
 962
 963        /* Tell the OS link is going down, the link will go back up when fw
 964         * says it is ready asynchronously
 965         */
 966        i40e_print_link_message(vsi, false);
 967        netif_carrier_off(netdev);
 968        netif_tx_stop_all_queues(netdev);
 969
 970        /* Set the fc mode and only restart an if link is up*/
 971        status = i40e_set_fc(hw, &aq_failures, link_up);
 972
 973        if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
 974                netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
 975                            i40e_stat_str(hw, status),
 976                            i40e_aq_str(hw, hw->aq.asq_last_status));
 977                err = -EAGAIN;
 978        }
 979        if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
 980                netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
 981                            i40e_stat_str(hw, status),
 982                            i40e_aq_str(hw, hw->aq.asq_last_status));
 983                err = -EAGAIN;
 984        }
 985        if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
 986                netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
 987                            i40e_stat_str(hw, status),
 988                            i40e_aq_str(hw, hw->aq.asq_last_status));
 989                err = -EAGAIN;
 990        }
 991
 992        if (!test_bit(__I40E_DOWN, &pf->state)) {
 993                /* Give it a little more time to try to come back */
 994                msleep(75);
 995                if (!test_bit(__I40E_DOWN, &pf->state))
 996                        return i40e_nway_reset(netdev);
 997        }
 998
 999        return err;
1000}
1001
1002static u32 i40e_get_msglevel(struct net_device *netdev)
1003{
1004        struct i40e_netdev_priv *np = netdev_priv(netdev);
1005        struct i40e_pf *pf = np->vsi->back;
1006        u32 debug_mask = pf->hw.debug_mask;
1007
1008        if (debug_mask)
1009                netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask);
1010
1011        return pf->msg_enable;
1012}
1013
1014static void i40e_set_msglevel(struct net_device *netdev, u32 data)
1015{
1016        struct i40e_netdev_priv *np = netdev_priv(netdev);
1017        struct i40e_pf *pf = np->vsi->back;
1018
1019        if (I40E_DEBUG_USER & data)
1020                pf->hw.debug_mask = data;
1021        else
1022                pf->msg_enable = data;
1023}
1024
1025static int i40e_get_regs_len(struct net_device *netdev)
1026{
1027        int reg_count = 0;
1028        int i;
1029
1030        for (i = 0; i40e_reg_list[i].offset != 0; i++)
1031                reg_count += i40e_reg_list[i].elements;
1032
1033        return reg_count * sizeof(u32);
1034}
1035
1036static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1037                          void *p)
1038{
1039        struct i40e_netdev_priv *np = netdev_priv(netdev);
1040        struct i40e_pf *pf = np->vsi->back;
1041        struct i40e_hw *hw = &pf->hw;
1042        u32 *reg_buf = p;
1043        int i, j, ri;
1044        u32 reg;
1045
1046        /* Tell ethtool which driver-version-specific regs output we have.
1047         *
1048         * At some point, if we have ethtool doing special formatting of
1049         * this data, it will rely on this version number to know how to
1050         * interpret things.  Hence, this needs to be updated if/when the
1051         * diags register table is changed.
1052         */
1053        regs->version = 1;
1054
1055        /* loop through the diags reg table for what to print */
1056        ri = 0;
1057        for (i = 0; i40e_reg_list[i].offset != 0; i++) {
1058                for (j = 0; j < i40e_reg_list[i].elements; j++) {
1059                        reg = i40e_reg_list[i].offset
1060                                + (j * i40e_reg_list[i].stride);
1061                        reg_buf[ri++] = rd32(hw, reg);
1062                }
1063        }
1064
1065}
1066
1067static int i40e_get_eeprom(struct net_device *netdev,
1068                           struct ethtool_eeprom *eeprom, u8 *bytes)
1069{
1070        struct i40e_netdev_priv *np = netdev_priv(netdev);
1071        struct i40e_hw *hw = &np->vsi->back->hw;
1072        struct i40e_pf *pf = np->vsi->back;
1073        int ret_val = 0, len, offset;
1074        u8 *eeprom_buff;
1075        u16 i, sectors;
1076        bool last;
1077        u32 magic;
1078
1079#define I40E_NVM_SECTOR_SIZE  4096
1080        if (eeprom->len == 0)
1081                return -EINVAL;
1082
1083        /* check for NVMUpdate access method */
1084        magic = hw->vendor_id | (hw->device_id << 16);
1085        if (eeprom->magic && eeprom->magic != magic) {
1086                struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
1087                int errno = 0;
1088
1089                /* make sure it is the right magic for NVMUpdate */
1090                if ((eeprom->magic >> 16) != hw->device_id)
1091                        errno = -EINVAL;
1092                else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
1093                         test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
1094                        errno = -EBUSY;
1095                else
1096                        ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
1097
1098                if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
1099                        dev_info(&pf->pdev->dev,
1100                                 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
1101                                 ret_val, hw->aq.asq_last_status, errno,
1102                                 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
1103                                 cmd->offset, cmd->data_size);
1104
1105                return errno;
1106        }
1107
1108        /* normal ethtool get_eeprom support */
1109        eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1110
1111        eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
1112        if (!eeprom_buff)
1113                return -ENOMEM;
1114
1115        ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1116        if (ret_val) {
1117                dev_info(&pf->pdev->dev,
1118                         "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1119                         ret_val, hw->aq.asq_last_status);
1120                goto free_buff;
1121        }
1122
1123        sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
1124        sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
1125        len = I40E_NVM_SECTOR_SIZE;
1126        last = false;
1127        for (i = 0; i < sectors; i++) {
1128                if (i == (sectors - 1)) {
1129                        len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
1130                        last = true;
1131                }
1132                offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
1133                ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
1134                                (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
1135                                last, NULL);
1136                if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
1137                        dev_info(&pf->pdev->dev,
1138                                 "read NVM failed, invalid offset 0x%x\n",
1139                                 offset);
1140                        break;
1141                } else if (ret_val &&
1142                           hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
1143                        dev_info(&pf->pdev->dev,
1144                                 "read NVM failed, access, offset 0x%x\n",
1145                                 offset);
1146                        break;
1147                } else if (ret_val) {
1148                        dev_info(&pf->pdev->dev,
1149                                 "read NVM failed offset %d err=%d status=0x%x\n",
1150                                 offset, ret_val, hw->aq.asq_last_status);
1151                        break;
1152                }
1153        }
1154
1155        i40e_release_nvm(hw);
1156        memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
1157free_buff:
1158        kfree(eeprom_buff);
1159        return ret_val;
1160}
1161
1162static int i40e_get_eeprom_len(struct net_device *netdev)
1163{
1164        struct i40e_netdev_priv *np = netdev_priv(netdev);
1165        struct i40e_hw *hw = &np->vsi->back->hw;
1166        u32 val;
1167
1168        val = (rd32(hw, I40E_GLPCI_LBARCTRL)
1169                & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
1170                >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
1171        /* register returns value in power of 2, 64Kbyte chunks. */
1172        val = (64 * 1024) * BIT(val);
1173        return val;
1174}
1175
1176static int i40e_set_eeprom(struct net_device *netdev,
1177                           struct ethtool_eeprom *eeprom, u8 *bytes)
1178{
1179        struct i40e_netdev_priv *np = netdev_priv(netdev);
1180        struct i40e_hw *hw = &np->vsi->back->hw;
1181        struct i40e_pf *pf = np->vsi->back;
1182        struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
1183        int ret_val = 0;
1184        int errno = 0;
1185        u32 magic;
1186
1187        /* normal ethtool set_eeprom is not supported */
1188        magic = hw->vendor_id | (hw->device_id << 16);
1189        if (eeprom->magic == magic)
1190                errno = -EOPNOTSUPP;
1191        /* check for NVMUpdate access method */
1192        else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
1193                errno = -EINVAL;
1194        else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
1195                 test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
1196                errno = -EBUSY;
1197        else
1198                ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
1199
1200        if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
1201                dev_info(&pf->pdev->dev,
1202                         "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
1203                         ret_val, hw->aq.asq_last_status, errno,
1204                         (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
1205                         cmd->offset, cmd->data_size);
1206
1207        return errno;
1208}
1209
1210static void i40e_get_drvinfo(struct net_device *netdev,
1211                             struct ethtool_drvinfo *drvinfo)
1212{
1213        struct i40e_netdev_priv *np = netdev_priv(netdev);
1214        struct i40e_vsi *vsi = np->vsi;
1215        struct i40e_pf *pf = vsi->back;
1216
1217        strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
1218        strlcpy(drvinfo->version, i40e_driver_version_str,
1219                sizeof(drvinfo->version));
1220        strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
1221                sizeof(drvinfo->fw_version));
1222        strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
1223                sizeof(drvinfo->bus_info));
1224        drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
1225        if (pf->hw.pf_id == 0)
1226                drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
1227}
1228
1229static void i40e_get_ringparam(struct net_device *netdev,
1230                               struct ethtool_ringparam *ring)
1231{
1232        struct i40e_netdev_priv *np = netdev_priv(netdev);
1233        struct i40e_pf *pf = np->vsi->back;
1234        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
1235
1236        ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1237        ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1238        ring->rx_mini_max_pending = 0;
1239        ring->rx_jumbo_max_pending = 0;
1240        ring->rx_pending = vsi->rx_rings[0]->count;
1241        ring->tx_pending = vsi->tx_rings[0]->count;
1242        ring->rx_mini_pending = 0;
1243        ring->rx_jumbo_pending = 0;
1244}
1245
1246static int i40e_set_ringparam(struct net_device *netdev,
1247                              struct ethtool_ringparam *ring)
1248{
1249        struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
1250        struct i40e_netdev_priv *np = netdev_priv(netdev);
1251        struct i40e_hw *hw = &np->vsi->back->hw;
1252        struct i40e_vsi *vsi = np->vsi;
1253        struct i40e_pf *pf = vsi->back;
1254        u32 new_rx_count, new_tx_count;
1255        int i, err = 0;
1256
1257        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1258                return -EINVAL;
1259
1260        if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1261            ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
1262            ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1263            ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
1264                netdev_info(netdev,
1265                            "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
1266                            ring->tx_pending, ring->rx_pending,
1267                            I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
1268                return -EINVAL;
1269        }
1270
1271        new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1272        new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1273
1274        /* if nothing to do return success */
1275        if ((new_tx_count == vsi->tx_rings[0]->count) &&
1276            (new_rx_count == vsi->rx_rings[0]->count))
1277                return 0;
1278
1279        while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
1280                usleep_range(1000, 2000);
1281
1282        if (!netif_running(vsi->netdev)) {
1283                /* simple case - set for the next time the netdev is started */
1284                for (i = 0; i < vsi->num_queue_pairs; i++) {
1285                        vsi->tx_rings[i]->count = new_tx_count;
1286                        vsi->rx_rings[i]->count = new_rx_count;
1287                }
1288                goto done;
1289        }
1290
1291        /* We can't just free everything and then setup again,
1292         * because the ISRs in MSI-X mode get passed pointers
1293         * to the Tx and Rx ring structs.
1294         */
1295
1296        /* alloc updated Tx resources */
1297        if (new_tx_count != vsi->tx_rings[0]->count) {
1298                netdev_info(netdev,
1299                            "Changing Tx descriptor count from %d to %d.\n",
1300                            vsi->tx_rings[0]->count, new_tx_count);
1301                tx_rings = kcalloc(vsi->alloc_queue_pairs,
1302                                   sizeof(struct i40e_ring), GFP_KERNEL);
1303                if (!tx_rings) {
1304                        err = -ENOMEM;
1305                        goto done;
1306                }
1307
1308                for (i = 0; i < vsi->num_queue_pairs; i++) {
1309                        /* clone ring and setup updated count */
1310                        tx_rings[i] = *vsi->tx_rings[i];
1311                        tx_rings[i].count = new_tx_count;
1312                        /* the desc and bi pointers will be reallocated in the
1313                         * setup call
1314                         */
1315                        tx_rings[i].desc = NULL;
1316                        tx_rings[i].rx_bi = NULL;
1317                        err = i40e_setup_tx_descriptors(&tx_rings[i]);
1318                        if (err) {
1319                                while (i) {
1320                                        i--;
1321                                        i40e_free_tx_resources(&tx_rings[i]);
1322                                }
1323                                kfree(tx_rings);
1324                                tx_rings = NULL;
1325
1326                                goto done;
1327                        }
1328                }
1329        }
1330
1331        /* alloc updated Rx resources */
1332        if (new_rx_count != vsi->rx_rings[0]->count) {
1333                netdev_info(netdev,
1334                            "Changing Rx descriptor count from %d to %d\n",
1335                            vsi->rx_rings[0]->count, new_rx_count);
1336                rx_rings = kcalloc(vsi->alloc_queue_pairs,
1337                                   sizeof(struct i40e_ring), GFP_KERNEL);
1338                if (!rx_rings) {
1339                        err = -ENOMEM;
1340                        goto free_tx;
1341                }
1342
1343                for (i = 0; i < vsi->num_queue_pairs; i++) {
1344                        struct i40e_ring *ring;
1345                        u16 unused;
1346
1347                        /* clone ring and setup updated count */
1348                        rx_rings[i] = *vsi->rx_rings[i];
1349                        rx_rings[i].count = new_rx_count;
1350                        /* the desc and bi pointers will be reallocated in the
1351                         * setup call
1352                         */
1353                        rx_rings[i].desc = NULL;
1354                        rx_rings[i].rx_bi = NULL;
1355                        /* this is to allow wr32 to have something to write to
1356                         * during early allocation of Rx buffers
1357                         */
1358                        rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS;
1359                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
1360                        if (err)
1361                                goto rx_unwind;
1362
1363                        /* now allocate the Rx buffers to make sure the OS
1364                         * has enough memory, any failure here means abort
1365                         */
1366                        ring = &rx_rings[i];
1367                        unused = I40E_DESC_UNUSED(ring);
1368                        err = i40e_alloc_rx_buffers(ring, unused);
1369rx_unwind:
1370                        if (err) {
1371                                do {
1372                                        i40e_free_rx_resources(&rx_rings[i]);
1373                                } while (i--);
1374                                kfree(rx_rings);
1375                                rx_rings = NULL;
1376
1377                                goto free_tx;
1378                        }
1379                }
1380        }
1381
1382        /* Bring interface down, copy in the new ring info,
1383         * then restore the interface
1384         */
1385        i40e_down(vsi);
1386
1387        if (tx_rings) {
1388                for (i = 0; i < vsi->num_queue_pairs; i++) {
1389                        i40e_free_tx_resources(vsi->tx_rings[i]);
1390                        *vsi->tx_rings[i] = tx_rings[i];
1391                }
1392                kfree(tx_rings);
1393                tx_rings = NULL;
1394        }
1395
1396        if (rx_rings) {
1397                for (i = 0; i < vsi->num_queue_pairs; i++) {
1398                        i40e_free_rx_resources(vsi->rx_rings[i]);
1399                        /* get the real tail offset */
1400                        rx_rings[i].tail = vsi->rx_rings[i]->tail;
1401                        /* this is to fake out the allocation routine
1402                         * into thinking it has to realloc everything
1403                         * but the recycling logic will let us re-use
1404                         * the buffers allocated above
1405                         */
1406                        rx_rings[i].next_to_use = 0;
1407                        rx_rings[i].next_to_clean = 0;
1408                        rx_rings[i].next_to_alloc = 0;
1409                        /* do a struct copy */
1410                        *vsi->rx_rings[i] = rx_rings[i];
1411                }
1412                kfree(rx_rings);
1413                rx_rings = NULL;
1414        }
1415
1416        i40e_up(vsi);
1417
1418free_tx:
1419        /* error cleanup if the Rx allocations failed after getting Tx */
1420        if (tx_rings) {
1421                for (i = 0; i < vsi->num_queue_pairs; i++)
1422                        i40e_free_tx_resources(&tx_rings[i]);
1423                kfree(tx_rings);
1424                tx_rings = NULL;
1425        }
1426
1427done:
1428        clear_bit(__I40E_CONFIG_BUSY, &pf->state);
1429
1430        return err;
1431}
1432
1433static int i40e_get_sset_count(struct net_device *netdev, int sset)
1434{
1435        struct i40e_netdev_priv *np = netdev_priv(netdev);
1436        struct i40e_vsi *vsi = np->vsi;
1437        struct i40e_pf *pf = vsi->back;
1438
1439        switch (sset) {
1440        case ETH_SS_TEST:
1441                return I40E_TEST_LEN;
1442        case ETH_SS_STATS:
1443                if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
1444                        int len = I40E_PF_STATS_LEN(netdev);
1445
1446                        if ((pf->lan_veb != I40E_NO_VEB) &&
1447                            (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
1448                                len += I40E_VEB_STATS_TOTAL;
1449                        return len;
1450                } else {
1451                        return I40E_VSI_STATS_LEN(netdev);
1452                }
1453        case ETH_SS_PRIV_FLAGS:
1454                return I40E_PRIV_FLAGS_STR_LEN +
1455                        (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0);
1456        default:
1457                return -EOPNOTSUPP;
1458        }
1459}
1460
1461static void i40e_get_ethtool_stats(struct net_device *netdev,
1462                                   struct ethtool_stats *stats, u64 *data)
1463{
1464        struct i40e_netdev_priv *np = netdev_priv(netdev);
1465        struct i40e_ring *tx_ring, *rx_ring;
1466        struct i40e_vsi *vsi = np->vsi;
1467        struct i40e_pf *pf = vsi->back;
1468        int i = 0;
1469        char *p;
1470        int j;
1471        struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
1472        unsigned int start;
1473
1474        i40e_update_stats(vsi);
1475
1476        for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
1477                p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
1478                data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
1479                        sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1480        }
1481        for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
1482                p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
1483                data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
1484                            sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1485        }
1486#ifdef I40E_FCOE
1487        for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
1488                p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
1489                data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
1490                        sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1491        }
1492#endif
1493        rcu_read_lock();
1494        for (j = 0; j < vsi->num_queue_pairs; j++) {
1495                tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
1496
1497                if (!tx_ring)
1498                        continue;
1499
1500                /* process Tx ring statistics */
1501                do {
1502                        start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
1503                        data[i] = tx_ring->stats.packets;
1504                        data[i + 1] = tx_ring->stats.bytes;
1505                } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
1506                i += 2;
1507
1508                /* Rx ring is the 2nd half of the queue pair */
1509                rx_ring = &tx_ring[1];
1510                do {
1511                        start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
1512                        data[i] = rx_ring->stats.packets;
1513                        data[i + 1] = rx_ring->stats.bytes;
1514                } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
1515                i += 2;
1516        }
1517        rcu_read_unlock();
1518        if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
1519                return;
1520
1521        if ((pf->lan_veb != I40E_NO_VEB) &&
1522            (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
1523                struct i40e_veb *veb = pf->veb[pf->lan_veb];
1524
1525                for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
1526                        p = (char *)veb;
1527                        p += i40e_gstrings_veb_stats[j].stat_offset;
1528                        data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
1529                                     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1530                }
1531                for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
1532                        data[i++] = veb->tc_stats.tc_tx_packets[j];
1533                        data[i++] = veb->tc_stats.tc_tx_bytes[j];
1534                        data[i++] = veb->tc_stats.tc_rx_packets[j];
1535                        data[i++] = veb->tc_stats.tc_rx_bytes[j];
1536                }
1537        }
1538        for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
1539                p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
1540                data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
1541                             sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1542        }
1543        for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
1544                data[i++] = pf->stats.priority_xon_tx[j];
1545                data[i++] = pf->stats.priority_xoff_tx[j];
1546        }
1547        for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
1548                data[i++] = pf->stats.priority_xon_rx[j];
1549                data[i++] = pf->stats.priority_xoff_rx[j];
1550        }
1551        for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
1552                data[i++] = pf->stats.priority_xon_2_xoff[j];
1553}
1554
1555static void i40e_get_strings(struct net_device *netdev, u32 stringset,
1556                             u8 *data)
1557{
1558        struct i40e_netdev_priv *np = netdev_priv(netdev);
1559        struct i40e_vsi *vsi = np->vsi;
1560        struct i40e_pf *pf = vsi->back;
1561        char *p = (char *)data;
1562        int i;
1563
1564        switch (stringset) {
1565        case ETH_SS_TEST:
1566                memcpy(data, i40e_gstrings_test,
1567                       I40E_TEST_LEN * ETH_GSTRING_LEN);
1568                break;
1569        case ETH_SS_STATS:
1570                for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
1571                        snprintf(p, ETH_GSTRING_LEN, "%s",
1572                                 i40e_gstrings_net_stats[i].stat_string);
1573                        p += ETH_GSTRING_LEN;
1574                }
1575                for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
1576                        snprintf(p, ETH_GSTRING_LEN, "%s",
1577                                 i40e_gstrings_misc_stats[i].stat_string);
1578                        p += ETH_GSTRING_LEN;
1579                }
1580#ifdef I40E_FCOE
1581                for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
1582                        snprintf(p, ETH_GSTRING_LEN, "%s",
1583                                 i40e_gstrings_fcoe_stats[i].stat_string);
1584                        p += ETH_GSTRING_LEN;
1585                }
1586#endif
1587                for (i = 0; i < vsi->num_queue_pairs; i++) {
1588                        snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
1589                        p += ETH_GSTRING_LEN;
1590                        snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i);
1591                        p += ETH_GSTRING_LEN;
1592                        snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i);
1593                        p += ETH_GSTRING_LEN;
1594                        snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i);
1595                        p += ETH_GSTRING_LEN;
1596                }
1597                if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
1598                        return;
1599
1600                if ((pf->lan_veb != I40E_NO_VEB) &&
1601                    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
1602                        for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
1603                                snprintf(p, ETH_GSTRING_LEN, "veb.%s",
1604                                        i40e_gstrings_veb_stats[i].stat_string);
1605                                p += ETH_GSTRING_LEN;
1606                        }
1607                        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1608                                snprintf(p, ETH_GSTRING_LEN,
1609                                         "veb.tc_%d_tx_packets", i);
1610                                p += ETH_GSTRING_LEN;
1611                                snprintf(p, ETH_GSTRING_LEN,
1612                                         "veb.tc_%d_tx_bytes", i);
1613                                p += ETH_GSTRING_LEN;
1614                                snprintf(p, ETH_GSTRING_LEN,
1615                                         "veb.tc_%d_rx_packets", i);
1616                                p += ETH_GSTRING_LEN;
1617                                snprintf(p, ETH_GSTRING_LEN,
1618                                         "veb.tc_%d_rx_bytes", i);
1619                                p += ETH_GSTRING_LEN;
1620                        }
1621                }
1622                for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
1623                        snprintf(p, ETH_GSTRING_LEN, "port.%s",
1624                                 i40e_gstrings_stats[i].stat_string);
1625                        p += ETH_GSTRING_LEN;
1626                }
1627                for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1628                        snprintf(p, ETH_GSTRING_LEN,
1629                                 "port.tx_priority_%d_xon", i);
1630                        p += ETH_GSTRING_LEN;
1631                        snprintf(p, ETH_GSTRING_LEN,
1632                                 "port.tx_priority_%d_xoff", i);
1633                        p += ETH_GSTRING_LEN;
1634                }
1635                for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1636                        snprintf(p, ETH_GSTRING_LEN,
1637                                 "port.rx_priority_%d_xon", i);
1638                        p += ETH_GSTRING_LEN;
1639                        snprintf(p, ETH_GSTRING_LEN,
1640                                 "port.rx_priority_%d_xoff", i);
1641                        p += ETH_GSTRING_LEN;
1642                }
1643                for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1644                        snprintf(p, ETH_GSTRING_LEN,
1645                                 "port.rx_priority_%d_xon_2_xoff", i);
1646                        p += ETH_GSTRING_LEN;
1647                }
1648                /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
1649                break;
1650        case ETH_SS_PRIV_FLAGS:
1651                memcpy(data, i40e_priv_flags_strings,
1652                       I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1653                data += I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN;
1654                if (pf->hw.pf_id == 0)
1655                        memcpy(data, i40e_gl_priv_flags_strings,
1656                               I40E_GL_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1657                break;
1658        default:
1659                break;
1660        }
1661}
1662
1663static int i40e_get_ts_info(struct net_device *dev,
1664                            struct ethtool_ts_info *info)
1665{
1666        struct i40e_pf *pf = i40e_netdev_to_pf(dev);
1667
1668        /* only report HW timestamping if PTP is enabled */
1669        if (!(pf->flags & I40E_FLAG_PTP))
1670                return ethtool_op_get_ts_info(dev, info);
1671
1672        info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1673                                SOF_TIMESTAMPING_RX_SOFTWARE |
1674                                SOF_TIMESTAMPING_SOFTWARE |
1675                                SOF_TIMESTAMPING_TX_HARDWARE |
1676                                SOF_TIMESTAMPING_RX_HARDWARE |
1677                                SOF_TIMESTAMPING_RAW_HARDWARE;
1678
1679        if (pf->ptp_clock)
1680                info->phc_index = ptp_clock_index(pf->ptp_clock);
1681        else
1682                info->phc_index = -1;
1683
1684        info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1685
1686        info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1687                           BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1688                           BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1689                           BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
1690
1691        if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE)
1692                info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1693                                    BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1694                                    BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
1695                                    BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1696                                    BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
1697                                    BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1698                                    BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
1699                                    BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1700
1701        return 0;
1702}
1703
1704static int i40e_link_test(struct net_device *netdev, u64 *data)
1705{
1706        struct i40e_netdev_priv *np = netdev_priv(netdev);
1707        struct i40e_pf *pf = np->vsi->back;
1708        i40e_status status;
1709        bool link_up = false;
1710
1711        netif_info(pf, hw, netdev, "link test\n");
1712        status = i40e_get_link_status(&pf->hw, &link_up);
1713        if (status) {
1714                netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
1715                *data = 1;
1716                return *data;
1717        }
1718
1719        if (link_up)
1720                *data = 0;
1721        else
1722                *data = 1;
1723
1724        return *data;
1725}
1726
1727static int i40e_reg_test(struct net_device *netdev, u64 *data)
1728{
1729        struct i40e_netdev_priv *np = netdev_priv(netdev);
1730        struct i40e_pf *pf = np->vsi->back;
1731
1732        netif_info(pf, hw, netdev, "register test\n");
1733        *data = i40e_diag_reg_test(&pf->hw);
1734
1735        return *data;
1736}
1737
1738static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
1739{
1740        struct i40e_netdev_priv *np = netdev_priv(netdev);
1741        struct i40e_pf *pf = np->vsi->back;
1742
1743        netif_info(pf, hw, netdev, "eeprom test\n");
1744        *data = i40e_diag_eeprom_test(&pf->hw);
1745
1746        /* forcebly clear the NVM Update state machine */
1747        pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT;
1748
1749        return *data;
1750}
1751
1752static int i40e_intr_test(struct net_device *netdev, u64 *data)
1753{
1754        struct i40e_netdev_priv *np = netdev_priv(netdev);
1755        struct i40e_pf *pf = np->vsi->back;
1756        u16 swc_old = pf->sw_int_count;
1757
1758        netif_info(pf, hw, netdev, "interrupt test\n");
1759        wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
1760             (I40E_PFINT_DYN_CTL0_INTENA_MASK |
1761              I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1762              I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
1763              I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
1764              I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
1765        usleep_range(1000, 2000);
1766        *data = (swc_old == pf->sw_int_count);
1767
1768        return *data;
1769}
1770
1771static inline bool i40e_active_vfs(struct i40e_pf *pf)
1772{
1773        struct i40e_vf *vfs = pf->vf;
1774        int i;
1775
1776        for (i = 0; i < pf->num_alloc_vfs; i++)
1777                if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
1778                        return true;
1779        return false;
1780}
1781
1782static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
1783{
1784        return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2);
1785}
1786
1787static void i40e_diag_test(struct net_device *netdev,
1788                           struct ethtool_test *eth_test, u64 *data)
1789{
1790        struct i40e_netdev_priv *np = netdev_priv(netdev);
1791        bool if_running = netif_running(netdev);
1792        struct i40e_pf *pf = np->vsi->back;
1793
1794        if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1795                /* Offline tests */
1796                netif_info(pf, drv, netdev, "offline testing starting\n");
1797
1798                set_bit(__I40E_TESTING, &pf->state);
1799
1800                if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
1801                        dev_warn(&pf->pdev->dev,
1802                                 "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
1803                        data[I40E_ETH_TEST_REG]         = 1;
1804                        data[I40E_ETH_TEST_EEPROM]      = 1;
1805                        data[I40E_ETH_TEST_INTR]        = 1;
1806                        data[I40E_ETH_TEST_LINK]        = 1;
1807                        eth_test->flags |= ETH_TEST_FL_FAILED;
1808                        clear_bit(__I40E_TESTING, &pf->state);
1809                        goto skip_ol_tests;
1810                }
1811
1812                /* If the device is online then take it offline */
1813                if (if_running)
1814                        /* indicate we're in test mode */
1815                        i40e_close(netdev);
1816                else
1817                        /* This reset does not affect link - if it is
1818                         * changed to a type of reset that does affect
1819                         * link then the following link test would have
1820                         * to be moved to before the reset
1821                         */
1822                        i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
1823
1824                if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
1825                        eth_test->flags |= ETH_TEST_FL_FAILED;
1826
1827                if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
1828                        eth_test->flags |= ETH_TEST_FL_FAILED;
1829
1830                if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
1831                        eth_test->flags |= ETH_TEST_FL_FAILED;
1832
1833                /* run reg test last, a reset is required after it */
1834                if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
1835                        eth_test->flags |= ETH_TEST_FL_FAILED;
1836
1837                clear_bit(__I40E_TESTING, &pf->state);
1838                i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
1839
1840                if (if_running)
1841                        i40e_open(netdev);
1842        } else {
1843                /* Online tests */
1844                netif_info(pf, drv, netdev, "online testing starting\n");
1845
1846                if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
1847                        eth_test->flags |= ETH_TEST_FL_FAILED;
1848
1849                /* Offline only tests, not run in online; pass by default */
1850                data[I40E_ETH_TEST_REG] = 0;
1851                data[I40E_ETH_TEST_EEPROM] = 0;
1852                data[I40E_ETH_TEST_INTR] = 0;
1853        }
1854
1855skip_ol_tests:
1856
1857        netif_info(pf, drv, netdev, "testing finished\n");
1858}
1859
1860static void i40e_get_wol(struct net_device *netdev,
1861                         struct ethtool_wolinfo *wol)
1862{
1863        struct i40e_netdev_priv *np = netdev_priv(netdev);
1864        struct i40e_pf *pf = np->vsi->back;
1865        struct i40e_hw *hw = &pf->hw;
1866        u16 wol_nvm_bits;
1867
1868        /* NVM bit on means WoL disabled for the port */
1869        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1870        if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
1871                wol->supported = 0;
1872                wol->wolopts = 0;
1873        } else {
1874                wol->supported = WAKE_MAGIC;
1875                wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
1876        }
1877}
1878
1879/**
1880 * i40e_set_wol - set the WakeOnLAN configuration
1881 * @netdev: the netdev in question
1882 * @wol: the ethtool WoL setting data
1883 **/
1884static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1885{
1886        struct i40e_netdev_priv *np = netdev_priv(netdev);
1887        struct i40e_pf *pf = np->vsi->back;
1888        struct i40e_vsi *vsi = np->vsi;
1889        struct i40e_hw *hw = &pf->hw;
1890        u16 wol_nvm_bits;
1891
1892        /* WoL not supported if this isn't the controlling PF on the port */
1893        if (hw->partition_id != 1) {
1894                i40e_partition_setting_complaint(pf);
1895                return -EOPNOTSUPP;
1896        }
1897
1898        if (vsi != pf->vsi[pf->lan_vsi])
1899                return -EOPNOTSUPP;
1900
1901        /* NVM bit on means WoL disabled for the port */
1902        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1903        if (BIT(hw->port) & wol_nvm_bits)
1904                return -EOPNOTSUPP;
1905
1906        /* only magic packet is supported */
1907        if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
1908                return -EOPNOTSUPP;
1909
1910        /* is this a new value? */
1911        if (pf->wol_en != !!wol->wolopts) {
1912                pf->wol_en = !!wol->wolopts;
1913                device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
1914        }
1915
1916        return 0;
1917}
1918
1919static int i40e_set_phys_id(struct net_device *netdev,
1920                            enum ethtool_phys_id_state state)
1921{
1922        struct i40e_netdev_priv *np = netdev_priv(netdev);
1923        i40e_status ret = 0;
1924        struct i40e_pf *pf = np->vsi->back;
1925        struct i40e_hw *hw = &pf->hw;
1926        int blink_freq = 2;
1927        u16 temp_status;
1928
1929        switch (state) {
1930        case ETHTOOL_ID_ACTIVE:
1931                if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) {
1932                        pf->led_status = i40e_led_get(hw);
1933                } else {
1934                        i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
1935                        ret = i40e_led_get_phy(hw, &temp_status,
1936                                               &pf->phy_led_val);
1937                        pf->led_status = temp_status;
1938                }
1939                return blink_freq;
1940        case ETHTOOL_ID_ON:
1941                if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
1942                        i40e_led_set(hw, 0xf, false);
1943                else
1944                        ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
1945                break;
1946        case ETHTOOL_ID_OFF:
1947                if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
1948                        i40e_led_set(hw, 0x0, false);
1949                else
1950                        ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
1951                break;
1952        case ETHTOOL_ID_INACTIVE:
1953                if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) {
1954                        i40e_led_set(hw, pf->led_status, false);
1955                } else {
1956                        ret = i40e_led_set_phy(hw, false, pf->led_status,
1957                                               (pf->phy_led_val |
1958                                               I40E_PHY_LED_MODE_ORIG));
1959                        i40e_aq_set_phy_debug(hw, 0, NULL);
1960                }
1961                break;
1962        default:
1963                break;
1964        }
1965                if (ret)
1966                        return -ENOENT;
1967                else
1968                        return 0;
1969}
1970
1971/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
1972 * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
1973 * 125us (8000 interrupts per second) == ITR(62)
1974 */
1975
1976/**
1977 * __i40e_get_coalesce - get per-queue coalesce settings
1978 * @netdev: the netdev to check
1979 * @ec: ethtool coalesce data structure
1980 * @queue: which queue to pick
1981 *
1982 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
1983 * are per queue. If queue is <0 then we default to queue 0 as the
1984 * representative value.
1985 **/
1986static int __i40e_get_coalesce(struct net_device *netdev,
1987                               struct ethtool_coalesce *ec,
1988                               int queue)
1989{
1990        struct i40e_netdev_priv *np = netdev_priv(netdev);
1991        struct i40e_ring *rx_ring, *tx_ring;
1992        struct i40e_vsi *vsi = np->vsi;
1993
1994        ec->tx_max_coalesced_frames_irq = vsi->work_limit;
1995        ec->rx_max_coalesced_frames_irq = vsi->work_limit;
1996
1997        /* rx and tx usecs has per queue value. If user doesn't specify the queue,
1998         * return queue 0's value to represent.
1999         */
2000        if (queue < 0) {
2001                queue = 0;
2002        } else if (queue >= vsi->num_queue_pairs) {
2003                return -EINVAL;
2004        }
2005
2006        rx_ring = vsi->rx_rings[queue];
2007        tx_ring = vsi->tx_rings[queue];
2008
2009        if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
2010                ec->use_adaptive_rx_coalesce = 1;
2011
2012        if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
2013                ec->use_adaptive_tx_coalesce = 1;
2014
2015        ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
2016        ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
2017
2018
2019        /* we use the _usecs_high to store/set the interrupt rate limit
2020         * that the hardware supports, that almost but not quite
2021         * fits the original intent of the ethtool variable,
2022         * the rx_coalesce_usecs_high limits total interrupts
2023         * per second from both tx/rx sources.
2024         */
2025        ec->rx_coalesce_usecs_high = vsi->int_rate_limit;
2026        ec->tx_coalesce_usecs_high = vsi->int_rate_limit;
2027
2028        return 0;
2029}
2030
2031/**
2032 * i40e_get_coalesce - get a netdev's coalesce settings
2033 * @netdev: the netdev to check
2034 * @ec: ethtool coalesce data structure
2035 *
2036 * Gets the coalesce settings for a particular netdev. Note that if user has
2037 * modified per-queue settings, this only guarantees to represent queue 0. See
2038 * __i40e_get_coalesce for more details.
2039 **/
2040static int i40e_get_coalesce(struct net_device *netdev,
2041                             struct ethtool_coalesce *ec)
2042{
2043        return __i40e_get_coalesce(netdev, ec, -1);
2044}
2045
2046/**
2047 * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
2048 * @netdev: netdev structure
2049 * @ec: ethtool's coalesce settings
2050 * @queue: the particular queue to read
2051 *
2052 * Will read a specific queue's coalesce settings
2053 **/
2054static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
2055                                       struct ethtool_coalesce *ec)
2056{
2057        return __i40e_get_coalesce(netdev, ec, queue);
2058}
2059
2060/**
2061 * i40e_set_itr_per_queue - set ITR values for specific queue
2062 * @vsi: the VSI to set values for
2063 * @ec: coalesce settings from ethtool
2064 * @queue: the queue to modify
2065 *
2066 * Change the ITR settings for a specific queue.
2067 **/
2068
2069static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
2070                                   struct ethtool_coalesce *ec,
2071                                   int queue)
2072{
2073        struct i40e_pf *pf = vsi->back;
2074        struct i40e_hw *hw = &pf->hw;
2075        struct i40e_q_vector *q_vector;
2076        u16 vector, intrl;
2077
2078        intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
2079
2080        vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
2081        vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
2082
2083        if (ec->use_adaptive_rx_coalesce)
2084                vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
2085        else
2086                vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
2087
2088        if (ec->use_adaptive_tx_coalesce)
2089                vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
2090        else
2091                vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
2092
2093        q_vector = vsi->rx_rings[queue]->q_vector;
2094        q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
2095        vector = vsi->base_vector + q_vector->v_idx;
2096        wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
2097
2098        q_vector = vsi->tx_rings[queue]->q_vector;
2099        q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
2100        vector = vsi->base_vector + q_vector->v_idx;
2101        wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
2102
2103        wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
2104        i40e_flush(hw);
2105}
2106
2107/**
2108 * __i40e_set_coalesce - set coalesce settings for particular queue
2109 * @netdev: the netdev to change
2110 * @ec: ethtool coalesce settings
2111 * @queue: the queue to change
2112 *
2113 * Sets the coalesce settings for a particular queue.
2114 **/
2115static int __i40e_set_coalesce(struct net_device *netdev,
2116                               struct ethtool_coalesce *ec,
2117                               int queue)
2118{
2119        struct i40e_netdev_priv *np = netdev_priv(netdev);
2120        struct i40e_vsi *vsi = np->vsi;
2121        struct i40e_pf *pf = vsi->back;
2122        u16 intrl_reg;
2123        int i;
2124
2125        if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
2126                vsi->work_limit = ec->tx_max_coalesced_frames_irq;
2127
2128        /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */
2129        if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
2130                netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
2131                return -EINVAL;
2132        }
2133
2134        if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
2135                netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n",
2136                           INTRL_REG_TO_USEC(I40E_MAX_INTRL));
2137                return -EINVAL;
2138        }
2139
2140        if (ec->rx_coalesce_usecs == 0) {
2141                if (ec->use_adaptive_rx_coalesce)
2142                        netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
2143        } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
2144                   (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
2145                        netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
2146                        return -EINVAL;
2147        }
2148
2149        intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
2150        vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
2151        if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) {
2152                netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n",
2153                           vsi->int_rate_limit);
2154        }
2155
2156        if (ec->tx_coalesce_usecs == 0) {
2157                if (ec->use_adaptive_tx_coalesce)
2158                        netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
2159        } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
2160                   (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
2161                        netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
2162                        return -EINVAL;
2163        }
2164
2165        /* rx and tx usecs has per queue value. If user doesn't specify the queue,
2166         * apply to all queues.
2167         */
2168        if (queue < 0) {
2169                for (i = 0; i < vsi->num_queue_pairs; i++)
2170                        i40e_set_itr_per_queue(vsi, ec, i);
2171        } else if (queue < vsi->num_queue_pairs) {
2172                i40e_set_itr_per_queue(vsi, ec, queue);
2173        } else {
2174                netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
2175                           vsi->num_queue_pairs - 1);
2176                return -EINVAL;
2177        }
2178
2179        return 0;
2180}
2181
2182/**
2183 * i40e_set_coalesce - set coalesce settings for every queue on the netdev
2184 * @netdev: the netdev to change
2185 * @ec: ethtool coalesce settings
2186 *
2187 * This will set each queue to the same coalesce settings.
2188 **/
2189static int i40e_set_coalesce(struct net_device *netdev,
2190                             struct ethtool_coalesce *ec)
2191{
2192        return __i40e_set_coalesce(netdev, ec, -1);
2193}
2194
2195/**
2196 * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
2197 * @netdev: the netdev to change
2198 * @ec: ethtool's coalesce settings
2199 * @queue: the queue to change
2200 *
2201 * Sets the specified queue's coalesce settings.
2202 **/
2203static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
2204                                       struct ethtool_coalesce *ec)
2205{
2206        return __i40e_set_coalesce(netdev, ec, queue);
2207}
2208
2209/**
2210 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
2211 * @pf: pointer to the physical function struct
2212 * @cmd: ethtool rxnfc command
2213 *
2214 * Returns Success if the flow is supported, else Invalid Input.
2215 **/
2216static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
2217{
2218        struct i40e_hw *hw = &pf->hw;
2219        u8 flow_pctype = 0;
2220        u64 i_set = 0;
2221
2222        cmd->data = 0;
2223
2224        switch (cmd->flow_type) {
2225        case TCP_V4_FLOW:
2226                flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2227                break;
2228        case UDP_V4_FLOW:
2229                flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2230                break;
2231        case TCP_V6_FLOW:
2232                flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2233                break;
2234        case UDP_V6_FLOW:
2235                flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2236                break;
2237        case SCTP_V4_FLOW:
2238        case AH_ESP_V4_FLOW:
2239        case AH_V4_FLOW:
2240        case ESP_V4_FLOW:
2241        case IPV4_FLOW:
2242        case SCTP_V6_FLOW:
2243        case AH_ESP_V6_FLOW:
2244        case AH_V6_FLOW:
2245        case ESP_V6_FLOW:
2246        case IPV6_FLOW:
2247                /* Default is src/dest for IP, no matter the L4 hashing */
2248                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2249                break;
2250        default:
2251                return -EINVAL;
2252        }
2253
2254        /* Read flow based hash input set register */
2255        if (flow_pctype) {
2256                i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
2257                                              flow_pctype)) |
2258                        ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
2259                                               flow_pctype)) << 32);
2260        }
2261
2262        /* Process bits of hash input set */
2263        if (i_set) {
2264                if (i_set & I40E_L4_SRC_MASK)
2265                        cmd->data |= RXH_L4_B_0_1;
2266                if (i_set & I40E_L4_DST_MASK)
2267                        cmd->data |= RXH_L4_B_2_3;
2268
2269                if (cmd->flow_type == TCP_V4_FLOW ||
2270                    cmd->flow_type == UDP_V4_FLOW) {
2271                        if (i_set & I40E_L3_SRC_MASK)
2272                                cmd->data |= RXH_IP_SRC;
2273                        if (i_set & I40E_L3_DST_MASK)
2274                                cmd->data |= RXH_IP_DST;
2275                } else if (cmd->flow_type == TCP_V6_FLOW ||
2276                          cmd->flow_type == UDP_V6_FLOW) {
2277                        if (i_set & I40E_L3_V6_SRC_MASK)
2278                                cmd->data |= RXH_IP_SRC;
2279                        if (i_set & I40E_L3_V6_DST_MASK)
2280                                cmd->data |= RXH_IP_DST;
2281                }
2282        }
2283
2284        return 0;
2285}
2286
2287/**
2288 * i40e_get_ethtool_fdir_all - Populates the rule count of a command
2289 * @pf: Pointer to the physical function struct
2290 * @cmd: The command to get or set Rx flow classification rules
2291 * @rule_locs: Array of used rule locations
2292 *
2293 * This function populates both the total and actual rule count of
2294 * the ethtool flow classification command
2295 *
2296 * Returns 0 on success or -EMSGSIZE if entry not found
2297 **/
2298static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
2299                                     struct ethtool_rxnfc *cmd,
2300                                     u32 *rule_locs)
2301{
2302        struct i40e_fdir_filter *rule;
2303        struct hlist_node *node2;
2304        int cnt = 0;
2305
2306        /* report total rule count */
2307        cmd->data = i40e_get_fd_cnt_all(pf);
2308
2309        hlist_for_each_entry_safe(rule, node2,
2310                                  &pf->fdir_filter_list, fdir_node) {
2311                if (cnt == cmd->rule_cnt)
2312                        return -EMSGSIZE;
2313
2314                rule_locs[cnt] = rule->fd_id;
2315                cnt++;
2316        }
2317
2318        cmd->rule_cnt = cnt;
2319
2320        return 0;
2321}
2322
2323/**
2324 * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
2325 * @pf: Pointer to the physical function struct
2326 * @cmd: The command to get or set Rx flow classification rules
2327 *
2328 * This function looks up a filter based on the Rx flow classification
2329 * command and fills the flow spec info for it if found
2330 *
2331 * Returns 0 on success or -EINVAL if filter not found
2332 **/
2333static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
2334                                       struct ethtool_rxnfc *cmd)
2335{
2336        struct ethtool_rx_flow_spec *fsp =
2337                        (struct ethtool_rx_flow_spec *)&cmd->fs;
2338        struct i40e_fdir_filter *rule = NULL;
2339        struct hlist_node *node2;
2340
2341        hlist_for_each_entry_safe(rule, node2,
2342                                  &pf->fdir_filter_list, fdir_node) {
2343                if (fsp->location <= rule->fd_id)
2344                        break;
2345        }
2346
2347        if (!rule || fsp->location != rule->fd_id)
2348                return -EINVAL;
2349
2350        fsp->flow_type = rule->flow_type;
2351        if (fsp->flow_type == IP_USER_FLOW) {
2352                fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2353                fsp->h_u.usr_ip4_spec.proto = 0;
2354                fsp->m_u.usr_ip4_spec.proto = 0;
2355        }
2356
2357        /* Reverse the src and dest notion, since the HW views them from
2358         * Tx perspective where as the user expects it from Rx filter view.
2359         */
2360        fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
2361        fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
2362        fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
2363        fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
2364
2365        if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
2366                fsp->ring_cookie = RX_CLS_FLOW_DISC;
2367        else
2368                fsp->ring_cookie = rule->q_index;
2369
2370        if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
2371                struct i40e_vsi *vsi;
2372
2373                vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
2374                if (vsi && vsi->type == I40E_VSI_SRIOV) {
2375                        fsp->h_ext.data[1] = htonl(vsi->vf_id);
2376                        fsp->m_ext.data[1] = htonl(0x1);
2377                }
2378        }
2379
2380        return 0;
2381}
2382
2383/**
2384 * i40e_get_rxnfc - command to get RX flow classification rules
2385 * @netdev: network interface device structure
2386 * @cmd: ethtool rxnfc command
2387 *
2388 * Returns Success if the command is supported.
2389 **/
2390static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
2391                          u32 *rule_locs)
2392{
2393        struct i40e_netdev_priv *np = netdev_priv(netdev);
2394        struct i40e_vsi *vsi = np->vsi;
2395        struct i40e_pf *pf = vsi->back;
2396        int ret = -EOPNOTSUPP;
2397
2398        switch (cmd->cmd) {
2399        case ETHTOOL_GRXRINGS:
2400                cmd->data = vsi->num_queue_pairs;
2401                ret = 0;
2402                break;
2403        case ETHTOOL_GRXFH:
2404                ret = i40e_get_rss_hash_opts(pf, cmd);
2405                break;
2406        case ETHTOOL_GRXCLSRLCNT:
2407                cmd->rule_cnt = pf->fdir_pf_active_filters;
2408                /* report total rule count */
2409                cmd->data = i40e_get_fd_cnt_all(pf);
2410                ret = 0;
2411                break;
2412        case ETHTOOL_GRXCLSRULE:
2413                ret = i40e_get_ethtool_fdir_entry(pf, cmd);
2414                break;
2415        case ETHTOOL_GRXCLSRLALL:
2416                ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
2417                break;
2418        default:
2419                break;
2420        }
2421
2422        return ret;
2423}
2424
2425/**
2426 * i40e_get_rss_hash_bits - Read RSS Hash bits from register
2427 * @nfc: pointer to user request
2428 * @i_setc bits currently set
2429 *
2430 * Returns value of bits to be set per user request
2431 **/
2432static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
2433{
2434        u64 i_set = i_setc;
2435        u64 src_l3 = 0, dst_l3 = 0;
2436
2437        if (nfc->data & RXH_L4_B_0_1)
2438                i_set |= I40E_L4_SRC_MASK;
2439        else
2440                i_set &= ~I40E_L4_SRC_MASK;
2441        if (nfc->data & RXH_L4_B_2_3)
2442                i_set |= I40E_L4_DST_MASK;
2443        else
2444                i_set &= ~I40E_L4_DST_MASK;
2445
2446        if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) {
2447                src_l3 = I40E_L3_V6_SRC_MASK;
2448                dst_l3 = I40E_L3_V6_DST_MASK;
2449        } else if (nfc->flow_type == TCP_V4_FLOW ||
2450                  nfc->flow_type == UDP_V4_FLOW) {
2451                src_l3 = I40E_L3_SRC_MASK;
2452                dst_l3 = I40E_L3_DST_MASK;
2453        } else {
2454                /* Any other flow type are not supported here */
2455                return i_set;
2456        }
2457
2458        if (nfc->data & RXH_IP_SRC)
2459                i_set |= src_l3;
2460        else
2461                i_set &= ~src_l3;
2462        if (nfc->data & RXH_IP_DST)
2463                i_set |= dst_l3;
2464        else
2465                i_set &= ~dst_l3;
2466
2467        return i_set;
2468}
2469
2470/**
2471 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
2472 * @pf: pointer to the physical function struct
2473 * @cmd: ethtool rxnfc command
2474 *
2475 * Returns Success if the flow input set is supported.
2476 **/
2477static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
2478{
2479        struct i40e_hw *hw = &pf->hw;
2480        u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
2481                   ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
2482        u8 flow_pctype = 0;
2483        u64 i_set, i_setc;
2484
2485        /* RSS does not support anything other than hashing
2486         * to queues on src and dst IPs and ports
2487         */
2488        if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2489                          RXH_L4_B_0_1 | RXH_L4_B_2_3))
2490                return -EINVAL;
2491
2492        switch (nfc->flow_type) {
2493        case TCP_V4_FLOW:
2494                flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2495                if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
2496                        hena |=
2497                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
2498                break;
2499        case TCP_V6_FLOW:
2500                flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2501                if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
2502                        hena |=
2503                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
2504                if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
2505                        hena |=
2506                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
2507                break;
2508        case UDP_V4_FLOW:
2509                flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2510                if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
2511                        hena |=
2512                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
2513                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
2514
2515                hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
2516                break;
2517        case UDP_V6_FLOW:
2518                flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2519                if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
2520                        hena |=
2521                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
2522                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
2523
2524                hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
2525                break;
2526        case AH_ESP_V4_FLOW:
2527        case AH_V4_FLOW:
2528        case ESP_V4_FLOW:
2529        case SCTP_V4_FLOW:
2530                if ((nfc->data & RXH_L4_B_0_1) ||
2531                    (nfc->data & RXH_L4_B_2_3))
2532                        return -EINVAL;
2533                hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2534                break;
2535        case AH_ESP_V6_FLOW:
2536        case AH_V6_FLOW:
2537        case ESP_V6_FLOW:
2538        case SCTP_V6_FLOW:
2539                if ((nfc->data & RXH_L4_B_0_1) ||
2540                    (nfc->data & RXH_L4_B_2_3))
2541                        return -EINVAL;
2542                hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2543                break;
2544        case IPV4_FLOW:
2545                hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2546                        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
2547                break;
2548        case IPV6_FLOW:
2549                hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2550                        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
2551                break;
2552        default:
2553                return -EINVAL;
2554        }
2555
2556        if (flow_pctype) {
2557                i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
2558                                               flow_pctype)) |
2559                        ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
2560                                               flow_pctype)) << 32);
2561                i_set = i40e_get_rss_hash_bits(nfc, i_setc);
2562                i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
2563                                  (u32)i_set);
2564                i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
2565                                  (u32)(i_set >> 32));
2566                hena |= BIT_ULL(flow_pctype);
2567        }
2568
2569        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
2570        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
2571        i40e_flush(hw);
2572
2573        return 0;
2574}
2575
2576/**
2577 * i40e_match_fdir_input_set - Match a new filter against an existing one
2578 * @rule: The filter already added
2579 * @input: The new filter to comapre against
2580 *
2581 * Returns true if the two input set match
2582 **/
2583static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
2584                                      struct i40e_fdir_filter *input)
2585{
2586        if ((rule->dst_ip[0] != input->dst_ip[0]) ||
2587            (rule->src_ip[0] != input->src_ip[0]) ||
2588            (rule->dst_port != input->dst_port) ||
2589            (rule->src_port != input->src_port))
2590                return false;
2591        return true;
2592}
2593
2594/**
2595 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
2596 * @vsi: Pointer to the targeted VSI
2597 * @input: The filter to update or NULL to indicate deletion
2598 * @sw_idx: Software index to the filter
2599 * @cmd: The command to get or set Rx flow classification rules
2600 *
2601 * This function updates (or deletes) a Flow Director entry from
2602 * the hlist of the corresponding PF
2603 *
2604 * Returns 0 on success
2605 **/
2606static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
2607                                          struct i40e_fdir_filter *input,
2608                                          u16 sw_idx,
2609                                          struct ethtool_rxnfc *cmd)
2610{
2611        struct i40e_fdir_filter *rule, *parent;
2612        struct i40e_pf *pf = vsi->back;
2613        struct hlist_node *node2;
2614        int err = -EINVAL;
2615
2616        parent = NULL;
2617        rule = NULL;
2618
2619        hlist_for_each_entry_safe(rule, node2,
2620                                  &pf->fdir_filter_list, fdir_node) {
2621                /* hash found, or no matching entry */
2622                if (rule->fd_id >= sw_idx)
2623                        break;
2624                parent = rule;
2625        }
2626
2627        /* if there is an old rule occupying our place remove it */
2628        if (rule && (rule->fd_id == sw_idx)) {
2629                if (input && !i40e_match_fdir_input_set(rule, input))
2630                        err = i40e_add_del_fdir(vsi, rule, false);
2631                else if (!input)
2632                        err = i40e_add_del_fdir(vsi, rule, false);
2633                hlist_del(&rule->fdir_node);
2634                kfree(rule);
2635                pf->fdir_pf_active_filters--;
2636        }
2637
2638        /* If no input this was a delete, err should be 0 if a rule was
2639         * successfully found and removed from the list else -EINVAL
2640         */
2641        if (!input)
2642                return err;
2643
2644        /* initialize node and set software index */
2645        INIT_HLIST_NODE(&input->fdir_node);
2646
2647        /* add filter to the list */
2648        if (parent)
2649                hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2650        else
2651                hlist_add_head(&input->fdir_node,
2652                               &pf->fdir_filter_list);
2653
2654        /* update counts */
2655        pf->fdir_pf_active_filters++;
2656
2657        return 0;
2658}
2659
2660/**
2661 * i40e_del_fdir_entry - Deletes a Flow Director filter entry
2662 * @vsi: Pointer to the targeted VSI
2663 * @cmd: The command to get or set Rx flow classification rules
2664 *
2665 * The function removes a Flow Director filter entry from the
2666 * hlist of the corresponding PF
2667 *
2668 * Returns 0 on success
2669 */
2670static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
2671                               struct ethtool_rxnfc *cmd)
2672{
2673        struct ethtool_rx_flow_spec *fsp =
2674                (struct ethtool_rx_flow_spec *)&cmd->fs;
2675        struct i40e_pf *pf = vsi->back;
2676        int ret = 0;
2677
2678        if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
2679            test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
2680                return -EBUSY;
2681
2682        if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
2683                return -EBUSY;
2684
2685        ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
2686
2687        i40e_fdir_check_and_reenable(pf);
2688        return ret;
2689}
2690
2691/**
2692 * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
2693 * @vsi: pointer to the targeted VSI
2694 * @cmd: command to get or set RX flow classification rules
2695 *
2696 * Add Flow Director filters for a specific flow spec based on their
2697 * protocol.  Returns 0 if the filters were successfully added.
2698 **/
2699static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2700                                 struct ethtool_rxnfc *cmd)
2701{
2702        struct ethtool_rx_flow_spec *fsp;
2703        struct i40e_fdir_filter *input;
2704        struct i40e_pf *pf;
2705        int ret = -EINVAL;
2706        u16 vf_id;
2707
2708        if (!vsi)
2709                return -EINVAL;
2710        pf = vsi->back;
2711
2712        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2713                return -EOPNOTSUPP;
2714
2715        if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
2716                return -ENOSPC;
2717
2718        if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
2719            test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
2720                return -EBUSY;
2721
2722        if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
2723                return -EBUSY;
2724
2725        fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
2726
2727        if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
2728                              pf->hw.func_caps.fd_filters_guaranteed)) {
2729                return -EINVAL;
2730        }
2731
2732        if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2733            (fsp->ring_cookie >= vsi->num_queue_pairs))
2734                return -EINVAL;
2735
2736        input = kzalloc(sizeof(*input), GFP_KERNEL);
2737
2738        if (!input)
2739                return -ENOMEM;
2740
2741        input->fd_id = fsp->location;
2742
2743        if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2744                input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
2745        else
2746                input->dest_ctl =
2747                             I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
2748
2749        input->q_index = fsp->ring_cookie;
2750        input->flex_off = 0;
2751        input->pctype = 0;
2752        input->dest_vsi = vsi->id;
2753        input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
2754        input->cnt_index  = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
2755        input->flow_type = fsp->flow_type;
2756        input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
2757
2758        /* Reverse the src and dest notion, since the HW expects them to be from
2759         * Tx perspective where as the input from user is from Rx filter view.
2760         */
2761        input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
2762        input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
2763        input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2764        input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2765
2766        if (ntohl(fsp->m_ext.data[1])) {
2767                vf_id = ntohl(fsp->h_ext.data[1]);
2768                if (vf_id >= pf->num_alloc_vfs) {
2769                        netif_info(pf, drv, vsi->netdev,
2770                                   "Invalid VF id %d\n", vf_id);
2771                        goto free_input;
2772                }
2773                /* Find vsi id from vf id and override dest vsi */
2774                input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
2775                if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
2776                        netif_info(pf, drv, vsi->netdev,
2777                                   "Invalid queue id %d for VF %d\n",
2778                                   input->q_index, vf_id);
2779                        goto free_input;
2780                }
2781        }
2782
2783        ret = i40e_add_del_fdir(vsi, input, true);
2784free_input:
2785        if (ret)
2786                kfree(input);
2787        else
2788                i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
2789
2790        return ret;
2791}
2792
2793/**
2794 * i40e_set_rxnfc - command to set RX flow classification rules
2795 * @netdev: network interface device structure
2796 * @cmd: ethtool rxnfc command
2797 *
2798 * Returns Success if the command is supported.
2799 **/
2800static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
2801{
2802        struct i40e_netdev_priv *np = netdev_priv(netdev);
2803        struct i40e_vsi *vsi = np->vsi;
2804        struct i40e_pf *pf = vsi->back;
2805        int ret = -EOPNOTSUPP;
2806
2807        switch (cmd->cmd) {
2808        case ETHTOOL_SRXFH:
2809                ret = i40e_set_rss_hash_opt(pf, cmd);
2810                break;
2811        case ETHTOOL_SRXCLSRLINS:
2812                ret = i40e_add_fdir_ethtool(vsi, cmd);
2813                break;
2814        case ETHTOOL_SRXCLSRLDEL:
2815                ret = i40e_del_fdir_entry(vsi, cmd);
2816                break;
2817        default:
2818                break;
2819        }
2820
2821        return ret;
2822}
2823
2824/**
2825 * i40e_max_channels - get Max number of combined channels supported
2826 * @vsi: vsi pointer
2827 **/
2828static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
2829{
2830        /* TODO: This code assumes DCB and FD is disabled for now. */
2831        return vsi->alloc_queue_pairs;
2832}
2833
2834/**
2835 * i40e_get_channels - Get the current channels enabled and max supported etc.
2836 * @netdev: network interface device structure
2837 * @ch: ethtool channels structure
2838 *
2839 * We don't support separate tx and rx queues as channels. The other count
2840 * represents how many queues are being used for control. max_combined counts
2841 * how many queue pairs we can support. They may not be mapped 1 to 1 with
2842 * q_vectors since we support a lot more queue pairs than q_vectors.
2843 **/
2844static void i40e_get_channels(struct net_device *dev,
2845                               struct ethtool_channels *ch)
2846{
2847        struct i40e_netdev_priv *np = netdev_priv(dev);
2848        struct i40e_vsi *vsi = np->vsi;
2849        struct i40e_pf *pf = vsi->back;
2850
2851        /* report maximum channels */
2852        ch->max_combined = i40e_max_channels(vsi);
2853
2854        /* report info for other vector */
2855        ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
2856        ch->max_other = ch->other_count;
2857
2858        /* Note: This code assumes DCB is disabled for now. */
2859        ch->combined_count = vsi->num_queue_pairs;
2860}
2861
2862/**
2863 * i40e_set_channels - Set the new channels count.
2864 * @netdev: network interface device structure
2865 * @ch: ethtool channels structure
2866 *
2867 * The new channels count may not be the same as requested by the user
2868 * since it gets rounded down to a power of 2 value.
2869 **/
2870static int i40e_set_channels(struct net_device *dev,
2871                              struct ethtool_channels *ch)
2872{
2873        const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
2874        struct i40e_netdev_priv *np = netdev_priv(dev);
2875        unsigned int count = ch->combined_count;
2876        struct i40e_vsi *vsi = np->vsi;
2877        struct i40e_pf *pf = vsi->back;
2878        struct i40e_fdir_filter *rule;
2879        struct hlist_node *node2;
2880        int new_count;
2881        int err = 0;
2882
2883        /* We do not support setting channels for any other VSI at present */
2884        if (vsi->type != I40E_VSI_MAIN)
2885                return -EINVAL;
2886
2887        /* verify they are not requesting separate vectors */
2888        if (!count || ch->rx_count || ch->tx_count)
2889                return -EINVAL;
2890
2891        /* verify other_count has not changed */
2892        if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
2893                return -EINVAL;
2894
2895        /* verify the number of channels does not exceed hardware limits */
2896        if (count > i40e_max_channels(vsi))
2897                return -EINVAL;
2898
2899        /* verify that the number of channels does not invalidate any current
2900         * flow director rules
2901         */
2902        hlist_for_each_entry_safe(rule, node2,
2903                                  &pf->fdir_filter_list, fdir_node) {
2904                if (rule->dest_ctl != drop && count <= rule->q_index) {
2905                        dev_warn(&pf->pdev->dev,
2906                                 "Existing user defined filter %d assigns flow to queue %d\n",
2907                                 rule->fd_id, rule->q_index);
2908                        err = -EINVAL;
2909                }
2910        }
2911
2912        if (err) {
2913                dev_err(&pf->pdev->dev,
2914                        "Existing filter rules must be deleted to reduce combined channel count to %d\n",
2915                        count);
2916                return err;
2917        }
2918
2919        /* update feature limits from largest to smallest supported values */
2920        /* TODO: Flow director limit, DCB etc */
2921
2922        /* use rss_reconfig to rebuild with new queue count and update traffic
2923         * class queue mapping
2924         */
2925        new_count = i40e_reconfig_rss_queues(pf, count);
2926        if (new_count > 0)
2927                return 0;
2928        else
2929                return -EINVAL;
2930}
2931
2932/**
2933 * i40e_get_rxfh_key_size - get the RSS hash key size
2934 * @netdev: network interface device structure
2935 *
2936 * Returns the table size.
2937 **/
2938static u32 i40e_get_rxfh_key_size(struct net_device *netdev)
2939{
2940        return I40E_HKEY_ARRAY_SIZE;
2941}
2942
2943/**
2944 * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
2945 * @netdev: network interface device structure
2946 *
2947 * Returns the table size.
2948 **/
2949static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
2950{
2951        return I40E_HLUT_ARRAY_SIZE;
2952}
2953
2954static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2955                         u8 *hfunc)
2956{
2957        struct i40e_netdev_priv *np = netdev_priv(netdev);
2958        struct i40e_vsi *vsi = np->vsi;
2959        u8 *lut, *seed = NULL;
2960        int ret;
2961        u16 i;
2962
2963        if (hfunc)
2964                *hfunc = ETH_RSS_HASH_TOP;
2965
2966        if (!indir)
2967                return 0;
2968
2969        seed = key;
2970        lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
2971        if (!lut)
2972                return -ENOMEM;
2973        ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE);
2974        if (ret)
2975                goto out;
2976        for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
2977                indir[i] = (u32)(lut[i]);
2978
2979out:
2980        kfree(lut);
2981
2982        return ret;
2983}
2984
2985/**
2986 * i40e_set_rxfh - set the rx flow hash indirection table
2987 * @netdev: network interface device structure
2988 * @indir: indirection table
2989 * @key: hash key
2990 *
2991 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
2992 * returns 0 after programming the table.
2993 **/
2994static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
2995                         const u8 *key, const u8 hfunc)
2996{
2997        struct i40e_netdev_priv *np = netdev_priv(netdev);
2998        struct i40e_vsi *vsi = np->vsi;
2999        struct i40e_pf *pf = vsi->back;
3000        u8 *seed = NULL;
3001        u16 i;
3002
3003        if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
3004                return -EOPNOTSUPP;
3005
3006        if (key) {
3007                if (!vsi->rss_hkey_user) {
3008                        vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
3009                                                     GFP_KERNEL);
3010                        if (!vsi->rss_hkey_user)
3011                                return -ENOMEM;
3012                }
3013                memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
3014                seed = vsi->rss_hkey_user;
3015        }
3016        if (!vsi->rss_lut_user) {
3017                vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
3018                if (!vsi->rss_lut_user)
3019                        return -ENOMEM;
3020        }
3021
3022        /* Each 32 bits pointed by 'indir' is stored with a lut entry */
3023        if (indir)
3024                for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
3025                        vsi->rss_lut_user[i] = (u8)(indir[i]);
3026        else
3027                i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
3028                                  vsi->rss_size);
3029
3030        return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
3031                               I40E_HLUT_ARRAY_SIZE);
3032}
3033
3034/**
3035 * i40e_get_priv_flags - report device private flags
3036 * @dev: network interface device structure
3037 *
3038 * The get string set count and the string set should be matched for each
3039 * flag returned.  Add new strings for each flag to the i40e_priv_flags_strings
3040 * array.
3041 *
3042 * Returns a u32 bitmap of flags.
3043 **/
3044static u32 i40e_get_priv_flags(struct net_device *dev)
3045{
3046        struct i40e_netdev_priv *np = netdev_priv(dev);
3047        struct i40e_vsi *vsi = np->vsi;
3048        struct i40e_pf *pf = vsi->back;
3049        u32 ret_flags = 0;
3050
3051        ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
3052                I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
3053        ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
3054                I40E_PRIV_FLAGS_FD_ATR : 0;
3055        ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
3056                I40E_PRIV_FLAGS_VEB_STATS : 0;
3057        ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
3058                0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
3059        if (pf->hw.pf_id == 0) {
3060                ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
3061                        I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
3062        }
3063
3064        return ret_flags;
3065}
3066
3067/**
3068 * i40e_set_priv_flags - set private flags
3069 * @dev: network interface device structure
3070 * @flags: bit flags to be set
3071 **/
3072static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
3073{
3074        struct i40e_netdev_priv *np = netdev_priv(dev);
3075        struct i40e_vsi *vsi = np->vsi;
3076        struct i40e_pf *pf = vsi->back;
3077        u16 sw_flags = 0, valid_flags = 0;
3078        bool reset_required = false;
3079        bool promisc_change = false;
3080        int ret;
3081
3082        /* NOTE: MFP is not settable */
3083
3084        if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
3085                pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
3086        else
3087                pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
3088
3089        /* allow the user to control the state of the Flow
3090         * Director ATR (Application Targeted Routing) feature
3091         * of the driver
3092         */
3093        if (flags & I40E_PRIV_FLAGS_FD_ATR) {
3094                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
3095        } else {
3096                pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
3097                pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
3098
3099                /* flush current ATR settings */
3100                set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
3101        }
3102
3103        if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
3104            !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
3105                pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
3106                reset_required = true;
3107        } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) &&
3108                   (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
3109                pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
3110                reset_required = true;
3111        }
3112
3113        if (pf->hw.pf_id == 0) {
3114                if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
3115                    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
3116                        pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
3117                        promisc_change = true;
3118                } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
3119                           (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
3120                        pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
3121                        promisc_change = true;
3122                }
3123        }
3124        if (promisc_change) {
3125                if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
3126                        sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
3127                valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
3128                ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
3129                                                NULL);
3130                if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
3131                        dev_info(&pf->pdev->dev,
3132                                 "couldn't set switch config bits, err %s aq_err %s\n",
3133                                 i40e_stat_str(&pf->hw, ret),
3134                                 i40e_aq_str(&pf->hw,
3135                                             pf->hw.aq.asq_last_status));
3136                        /* not a fatal problem, just keep going */
3137                }
3138        }
3139
3140        if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
3141            (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
3142                pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
3143        else
3144                pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
3145
3146        /* if needed, issue reset to cause things to take effect */
3147        if (reset_required)
3148                i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
3149
3150        return 0;
3151}
3152
3153static const struct ethtool_ops i40e_ethtool_ops = {
3154        .get_settings           = i40e_get_settings,
3155        .set_settings           = i40e_set_settings,
3156        .get_drvinfo            = i40e_get_drvinfo,
3157        .get_regs_len           = i40e_get_regs_len,
3158        .get_regs               = i40e_get_regs,
3159        .nway_reset             = i40e_nway_reset,
3160        .get_link               = ethtool_op_get_link,
3161        .get_wol                = i40e_get_wol,
3162        .set_wol                = i40e_set_wol,
3163        .set_eeprom             = i40e_set_eeprom,
3164        .get_eeprom_len         = i40e_get_eeprom_len,
3165        .get_eeprom             = i40e_get_eeprom,
3166        .get_ringparam          = i40e_get_ringparam,
3167        .set_ringparam          = i40e_set_ringparam,
3168        .get_pauseparam         = i40e_get_pauseparam,
3169        .set_pauseparam         = i40e_set_pauseparam,
3170        .get_msglevel           = i40e_get_msglevel,
3171        .set_msglevel           = i40e_set_msglevel,
3172        .get_rxnfc              = i40e_get_rxnfc,
3173        .set_rxnfc              = i40e_set_rxnfc,
3174        .self_test              = i40e_diag_test,
3175        .get_strings            = i40e_get_strings,
3176        .set_phys_id            = i40e_set_phys_id,
3177        .get_sset_count         = i40e_get_sset_count,
3178        .get_ethtool_stats      = i40e_get_ethtool_stats,
3179        .get_coalesce           = i40e_get_coalesce,
3180        .set_coalesce           = i40e_set_coalesce,
3181        .get_rxfh_key_size      = i40e_get_rxfh_key_size,
3182        .get_rxfh_indir_size    = i40e_get_rxfh_indir_size,
3183        .get_rxfh               = i40e_get_rxfh,
3184        .set_rxfh               = i40e_set_rxfh,
3185        .get_channels           = i40e_get_channels,
3186        .set_channels           = i40e_set_channels,
3187        .get_ts_info            = i40e_get_ts_info,
3188        .get_priv_flags         = i40e_get_priv_flags,
3189        .set_priv_flags         = i40e_set_priv_flags,
3190        .get_per_queue_coalesce = i40e_get_per_queue_coalesce,
3191        .set_per_queue_coalesce = i40e_set_per_queue_coalesce,
3192};
3193
3194void i40e_set_ethtool_ops(struct net_device *netdev)
3195{
3196        netdev->ethtool_ops = &i40e_ethtool_ops;
3197}
3198