linux/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Linux network driver for QLogic BR-series Converged Network Adapter.
   4 */
   5/*
   6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   7 * Copyright (c) 2014-2015 QLogic Corporation
   8 * All rights reserved
   9 * www.qlogic.com
  10 */
  11
  12#include "cna.h"
  13
  14#include <linux/netdevice.h>
  15#include <linux/skbuff.h>
  16#include <linux/ethtool.h>
  17#include <linux/rtnetlink.h>
  18
  19#include "bna.h"
  20
  21#include "bnad.h"
  22
  23#define BNAD_NUM_TXF_COUNTERS 12
  24#define BNAD_NUM_RXF_COUNTERS 10
  25#define BNAD_NUM_CQ_COUNTERS (3 + 5)
  26#define BNAD_NUM_RXQ_COUNTERS 7
  27#define BNAD_NUM_TXQ_COUNTERS 5
  28
  29static const char *bnad_net_stats_strings[] = {
  30        "rx_packets",
  31        "tx_packets",
  32        "rx_bytes",
  33        "tx_bytes",
  34        "rx_errors",
  35        "tx_errors",
  36        "rx_dropped",
  37        "tx_dropped",
  38        "multicast",
  39        "collisions",
  40        "rx_length_errors",
  41        "rx_crc_errors",
  42        "rx_frame_errors",
  43        "tx_fifo_errors",
  44
  45        "netif_queue_stop",
  46        "netif_queue_wakeup",
  47        "netif_queue_stopped",
  48        "tso4",
  49        "tso6",
  50        "tso_err",
  51        "tcpcsum_offload",
  52        "udpcsum_offload",
  53        "csum_help",
  54        "tx_skb_too_short",
  55        "tx_skb_stopping",
  56        "tx_skb_max_vectors",
  57        "tx_skb_mss_too_long",
  58        "tx_skb_tso_too_short",
  59        "tx_skb_tso_prepare",
  60        "tx_skb_non_tso_too_long",
  61        "tx_skb_tcp_hdr",
  62        "tx_skb_udp_hdr",
  63        "tx_skb_csum_err",
  64        "tx_skb_headlen_too_long",
  65        "tx_skb_headlen_zero",
  66        "tx_skb_frag_zero",
  67        "tx_skb_len_mismatch",
  68        "tx_skb_map_failed",
  69        "hw_stats_updates",
  70        "netif_rx_dropped",
  71
  72        "link_toggle",
  73        "cee_toggle",
  74
  75        "rxp_info_alloc_failed",
  76        "mbox_intr_disabled",
  77        "mbox_intr_enabled",
  78        "tx_unmap_q_alloc_failed",
  79        "rx_unmap_q_alloc_failed",
  80        "rxbuf_alloc_failed",
  81        "rxbuf_map_failed",
  82
  83        "mac_stats_clr_cnt",
  84        "mac_frame_64",
  85        "mac_frame_65_127",
  86        "mac_frame_128_255",
  87        "mac_frame_256_511",
  88        "mac_frame_512_1023",
  89        "mac_frame_1024_1518",
  90        "mac_frame_1518_1522",
  91        "mac_rx_bytes",
  92        "mac_rx_packets",
  93        "mac_rx_fcs_error",
  94        "mac_rx_multicast",
  95        "mac_rx_broadcast",
  96        "mac_rx_control_frames",
  97        "mac_rx_pause",
  98        "mac_rx_unknown_opcode",
  99        "mac_rx_alignment_error",
 100        "mac_rx_frame_length_error",
 101        "mac_rx_code_error",
 102        "mac_rx_carrier_sense_error",
 103        "mac_rx_undersize",
 104        "mac_rx_oversize",
 105        "mac_rx_fragments",
 106        "mac_rx_jabber",
 107        "mac_rx_drop",
 108
 109        "mac_tx_bytes",
 110        "mac_tx_packets",
 111        "mac_tx_multicast",
 112        "mac_tx_broadcast",
 113        "mac_tx_pause",
 114        "mac_tx_deferral",
 115        "mac_tx_excessive_deferral",
 116        "mac_tx_single_collision",
 117        "mac_tx_muliple_collision",
 118        "mac_tx_late_collision",
 119        "mac_tx_excessive_collision",
 120        "mac_tx_total_collision",
 121        "mac_tx_pause_honored",
 122        "mac_tx_drop",
 123        "mac_tx_jabber",
 124        "mac_tx_fcs_error",
 125        "mac_tx_control_frame",
 126        "mac_tx_oversize",
 127        "mac_tx_undersize",
 128        "mac_tx_fragments",
 129
 130        "bpc_tx_pause_0",
 131        "bpc_tx_pause_1",
 132        "bpc_tx_pause_2",
 133        "bpc_tx_pause_3",
 134        "bpc_tx_pause_4",
 135        "bpc_tx_pause_5",
 136        "bpc_tx_pause_6",
 137        "bpc_tx_pause_7",
 138        "bpc_tx_zero_pause_0",
 139        "bpc_tx_zero_pause_1",
 140        "bpc_tx_zero_pause_2",
 141        "bpc_tx_zero_pause_3",
 142        "bpc_tx_zero_pause_4",
 143        "bpc_tx_zero_pause_5",
 144        "bpc_tx_zero_pause_6",
 145        "bpc_tx_zero_pause_7",
 146        "bpc_tx_first_pause_0",
 147        "bpc_tx_first_pause_1",
 148        "bpc_tx_first_pause_2",
 149        "bpc_tx_first_pause_3",
 150        "bpc_tx_first_pause_4",
 151        "bpc_tx_first_pause_5",
 152        "bpc_tx_first_pause_6",
 153        "bpc_tx_first_pause_7",
 154
 155        "bpc_rx_pause_0",
 156        "bpc_rx_pause_1",
 157        "bpc_rx_pause_2",
 158        "bpc_rx_pause_3",
 159        "bpc_rx_pause_4",
 160        "bpc_rx_pause_5",
 161        "bpc_rx_pause_6",
 162        "bpc_rx_pause_7",
 163        "bpc_rx_zero_pause_0",
 164        "bpc_rx_zero_pause_1",
 165        "bpc_rx_zero_pause_2",
 166        "bpc_rx_zero_pause_3",
 167        "bpc_rx_zero_pause_4",
 168        "bpc_rx_zero_pause_5",
 169        "bpc_rx_zero_pause_6",
 170        "bpc_rx_zero_pause_7",
 171        "bpc_rx_first_pause_0",
 172        "bpc_rx_first_pause_1",
 173        "bpc_rx_first_pause_2",
 174        "bpc_rx_first_pause_3",
 175        "bpc_rx_first_pause_4",
 176        "bpc_rx_first_pause_5",
 177        "bpc_rx_first_pause_6",
 178        "bpc_rx_first_pause_7",
 179
 180        "rad_rx_frames",
 181        "rad_rx_octets",
 182        "rad_rx_vlan_frames",
 183        "rad_rx_ucast",
 184        "rad_rx_ucast_octets",
 185        "rad_rx_ucast_vlan",
 186        "rad_rx_mcast",
 187        "rad_rx_mcast_octets",
 188        "rad_rx_mcast_vlan",
 189        "rad_rx_bcast",
 190        "rad_rx_bcast_octets",
 191        "rad_rx_bcast_vlan",
 192        "rad_rx_drops",
 193
 194        "rlb_rad_rx_frames",
 195        "rlb_rad_rx_octets",
 196        "rlb_rad_rx_vlan_frames",
 197        "rlb_rad_rx_ucast",
 198        "rlb_rad_rx_ucast_octets",
 199        "rlb_rad_rx_ucast_vlan",
 200        "rlb_rad_rx_mcast",
 201        "rlb_rad_rx_mcast_octets",
 202        "rlb_rad_rx_mcast_vlan",
 203        "rlb_rad_rx_bcast",
 204        "rlb_rad_rx_bcast_octets",
 205        "rlb_rad_rx_bcast_vlan",
 206        "rlb_rad_rx_drops",
 207
 208        "fc_rx_ucast_octets",
 209        "fc_rx_ucast",
 210        "fc_rx_ucast_vlan",
 211        "fc_rx_mcast_octets",
 212        "fc_rx_mcast",
 213        "fc_rx_mcast_vlan",
 214        "fc_rx_bcast_octets",
 215        "fc_rx_bcast",
 216        "fc_rx_bcast_vlan",
 217
 218        "fc_tx_ucast_octets",
 219        "fc_tx_ucast",
 220        "fc_tx_ucast_vlan",
 221        "fc_tx_mcast_octets",
 222        "fc_tx_mcast",
 223        "fc_tx_mcast_vlan",
 224        "fc_tx_bcast_octets",
 225        "fc_tx_bcast",
 226        "fc_tx_bcast_vlan",
 227        "fc_tx_parity_errors",
 228        "fc_tx_timeout",
 229        "fc_tx_fid_parity_errors",
 230};
 231
 232#define BNAD_ETHTOOL_STATS_NUM  ARRAY_SIZE(bnad_net_stats_strings)
 233
 234static int
 235bnad_get_link_ksettings(struct net_device *netdev,
 236                        struct ethtool_link_ksettings *cmd)
 237{
 238        u32 supported, advertising;
 239
 240        supported = SUPPORTED_10000baseT_Full;
 241        advertising = ADVERTISED_10000baseT_Full;
 242        cmd->base.autoneg = AUTONEG_DISABLE;
 243        supported |= SUPPORTED_FIBRE;
 244        advertising |= ADVERTISED_FIBRE;
 245        cmd->base.port = PORT_FIBRE;
 246        cmd->base.phy_address = 0;
 247
 248        if (netif_carrier_ok(netdev)) {
 249                cmd->base.speed = SPEED_10000;
 250                cmd->base.duplex = DUPLEX_FULL;
 251        } else {
 252                cmd->base.speed = SPEED_UNKNOWN;
 253                cmd->base.duplex = DUPLEX_UNKNOWN;
 254        }
 255
 256        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
 257                                                supported);
 258        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
 259                                                advertising);
 260
 261        return 0;
 262}
 263
 264static int
 265bnad_set_link_ksettings(struct net_device *netdev,
 266                        const struct ethtool_link_ksettings *cmd)
 267{
 268        /* 10G full duplex setting supported only */
 269        if (cmd->base.autoneg == AUTONEG_ENABLE)
 270                return -EOPNOTSUPP;
 271
 272        if ((cmd->base.speed == SPEED_10000) &&
 273            (cmd->base.duplex == DUPLEX_FULL))
 274                return 0;
 275
 276        return -EOPNOTSUPP;
 277}
 278
 279static void
 280bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
 281{
 282        struct bnad *bnad = netdev_priv(netdev);
 283        struct bfa_ioc_attr *ioc_attr;
 284        unsigned long flags;
 285
 286        strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
 287
 288        ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
 289        if (ioc_attr) {
 290                spin_lock_irqsave(&bnad->bna_lock, flags);
 291                bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
 292                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 293
 294                strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
 295                        sizeof(drvinfo->fw_version));
 296                kfree(ioc_attr);
 297        }
 298
 299        strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
 300                sizeof(drvinfo->bus_info));
 301}
 302
 303static void
 304bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
 305{
 306        wolinfo->supported = 0;
 307        wolinfo->wolopts = 0;
 308}
 309
 310static int bnad_get_coalesce(struct net_device *netdev,
 311                             struct ethtool_coalesce *coalesce,
 312                             struct kernel_ethtool_coalesce *kernel_coal,
 313                             struct netlink_ext_ack *extack)
 314{
 315        struct bnad *bnad = netdev_priv(netdev);
 316        unsigned long flags;
 317
 318        /* Lock rqd. to access bnad->bna_lock */
 319        spin_lock_irqsave(&bnad->bna_lock, flags);
 320        coalesce->use_adaptive_rx_coalesce =
 321                (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
 322        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 323
 324        coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
 325                                        BFI_COALESCING_TIMER_UNIT;
 326        coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
 327                                        BFI_COALESCING_TIMER_UNIT;
 328        coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
 329
 330        return 0;
 331}
 332
 333static int bnad_set_coalesce(struct net_device *netdev,
 334                             struct ethtool_coalesce *coalesce,
 335                             struct kernel_ethtool_coalesce *kernel_coal,
 336                             struct netlink_ext_ack *extack)
 337{
 338        struct bnad *bnad = netdev_priv(netdev);
 339        unsigned long flags;
 340        int to_del = 0;
 341
 342        if (coalesce->rx_coalesce_usecs == 0 ||
 343            coalesce->rx_coalesce_usecs >
 344            BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
 345                return -EINVAL;
 346
 347        if (coalesce->tx_coalesce_usecs == 0 ||
 348            coalesce->tx_coalesce_usecs >
 349            BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
 350                return -EINVAL;
 351
 352        mutex_lock(&bnad->conf_mutex);
 353        /*
 354         * Do not need to store rx_coalesce_usecs here
 355         * Every time DIM is disabled, we can get it from the
 356         * stack.
 357         */
 358        spin_lock_irqsave(&bnad->bna_lock, flags);
 359        if (coalesce->use_adaptive_rx_coalesce) {
 360                if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
 361                        bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
 362                        bnad_dim_timer_start(bnad);
 363                }
 364        } else {
 365                if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
 366                        bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
 367                        if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
 368                            test_bit(BNAD_RF_DIM_TIMER_RUNNING,
 369                            &bnad->run_flags)) {
 370                                clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
 371                                                        &bnad->run_flags);
 372                                to_del = 1;
 373                        }
 374                        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 375                        if (to_del)
 376                                del_timer_sync(&bnad->dim_timer);
 377                        spin_lock_irqsave(&bnad->bna_lock, flags);
 378                        bnad_rx_coalescing_timeo_set(bnad);
 379                }
 380        }
 381        if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
 382                                        BFI_COALESCING_TIMER_UNIT) {
 383                bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
 384                                                BFI_COALESCING_TIMER_UNIT;
 385                bnad_tx_coalescing_timeo_set(bnad);
 386        }
 387
 388        if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
 389                                        BFI_COALESCING_TIMER_UNIT) {
 390                bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
 391                                                BFI_COALESCING_TIMER_UNIT;
 392
 393                if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
 394                        bnad_rx_coalescing_timeo_set(bnad);
 395
 396        }
 397
 398        /* Add Tx Inter-pkt DMA count?  */
 399
 400        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 401
 402        mutex_unlock(&bnad->conf_mutex);
 403        return 0;
 404}
 405
 406static void
 407bnad_get_ringparam(struct net_device *netdev,
 408                   struct ethtool_ringparam *ringparam)
 409{
 410        struct bnad *bnad = netdev_priv(netdev);
 411
 412        ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
 413        ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
 414
 415        ringparam->rx_pending = bnad->rxq_depth;
 416        ringparam->tx_pending = bnad->txq_depth;
 417}
 418
 419static int
 420bnad_set_ringparam(struct net_device *netdev,
 421                   struct ethtool_ringparam *ringparam)
 422{
 423        int i, current_err, err = 0;
 424        struct bnad *bnad = netdev_priv(netdev);
 425        unsigned long flags;
 426
 427        mutex_lock(&bnad->conf_mutex);
 428        if (ringparam->rx_pending == bnad->rxq_depth &&
 429            ringparam->tx_pending == bnad->txq_depth) {
 430                mutex_unlock(&bnad->conf_mutex);
 431                return 0;
 432        }
 433
 434        if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
 435            ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
 436            !is_power_of_2(ringparam->rx_pending)) {
 437                mutex_unlock(&bnad->conf_mutex);
 438                return -EINVAL;
 439        }
 440        if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
 441            ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
 442            !is_power_of_2(ringparam->tx_pending)) {
 443                mutex_unlock(&bnad->conf_mutex);
 444                return -EINVAL;
 445        }
 446
 447        if (ringparam->rx_pending != bnad->rxq_depth) {
 448                bnad->rxq_depth = ringparam->rx_pending;
 449                if (!netif_running(netdev)) {
 450                        mutex_unlock(&bnad->conf_mutex);
 451                        return 0;
 452                }
 453
 454                for (i = 0; i < bnad->num_rx; i++) {
 455                        if (!bnad->rx_info[i].rx)
 456                                continue;
 457                        bnad_destroy_rx(bnad, i);
 458                        current_err = bnad_setup_rx(bnad, i);
 459                        if (current_err && !err)
 460                                err = current_err;
 461                }
 462
 463                if (!err && bnad->rx_info[0].rx) {
 464                        /* restore rx configuration */
 465                        bnad_restore_vlans(bnad, 0);
 466                        bnad_enable_default_bcast(bnad);
 467                        spin_lock_irqsave(&bnad->bna_lock, flags);
 468                        bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
 469                        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 470                        bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
 471                                             BNAD_CF_PROMISC);
 472                        bnad_set_rx_mode(netdev);
 473                }
 474        }
 475        if (ringparam->tx_pending != bnad->txq_depth) {
 476                bnad->txq_depth = ringparam->tx_pending;
 477                if (!netif_running(netdev)) {
 478                        mutex_unlock(&bnad->conf_mutex);
 479                        return 0;
 480                }
 481
 482                for (i = 0; i < bnad->num_tx; i++) {
 483                        if (!bnad->tx_info[i].tx)
 484                                continue;
 485                        bnad_destroy_tx(bnad, i);
 486                        current_err = bnad_setup_tx(bnad, i);
 487                        if (current_err && !err)
 488                                err = current_err;
 489                }
 490        }
 491
 492        mutex_unlock(&bnad->conf_mutex);
 493        return err;
 494}
 495
 496static void
 497bnad_get_pauseparam(struct net_device *netdev,
 498                    struct ethtool_pauseparam *pauseparam)
 499{
 500        struct bnad *bnad = netdev_priv(netdev);
 501
 502        pauseparam->autoneg = 0;
 503        pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
 504        pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
 505}
 506
 507static int
 508bnad_set_pauseparam(struct net_device *netdev,
 509                    struct ethtool_pauseparam *pauseparam)
 510{
 511        struct bnad *bnad = netdev_priv(netdev);
 512        struct bna_pause_config pause_config;
 513        unsigned long flags;
 514
 515        if (pauseparam->autoneg == AUTONEG_ENABLE)
 516                return -EINVAL;
 517
 518        mutex_lock(&bnad->conf_mutex);
 519        if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
 520            pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
 521                pause_config.rx_pause = pauseparam->rx_pause;
 522                pause_config.tx_pause = pauseparam->tx_pause;
 523                spin_lock_irqsave(&bnad->bna_lock, flags);
 524                bna_enet_pause_config(&bnad->bna.enet, &pause_config);
 525                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 526        }
 527        mutex_unlock(&bnad->conf_mutex);
 528        return 0;
 529}
 530
 531static void bnad_get_txf_strings(u8 **string, int f_num)
 532{
 533        ethtool_sprintf(string, "txf%d_ucast_octets", f_num);
 534        ethtool_sprintf(string, "txf%d_ucast", f_num);
 535        ethtool_sprintf(string, "txf%d_ucast_vlan", f_num);
 536        ethtool_sprintf(string, "txf%d_mcast_octets", f_num);
 537        ethtool_sprintf(string, "txf%d_mcast", f_num);
 538        ethtool_sprintf(string, "txf%d_mcast_vlan", f_num);
 539        ethtool_sprintf(string, "txf%d_bcast_octets", f_num);
 540        ethtool_sprintf(string, "txf%d_bcast", f_num);
 541        ethtool_sprintf(string, "txf%d_bcast_vlan", f_num);
 542        ethtool_sprintf(string, "txf%d_errors", f_num);
 543        ethtool_sprintf(string, "txf%d_filter_vlan", f_num);
 544        ethtool_sprintf(string, "txf%d_filter_mac_sa", f_num);
 545}
 546
 547static void bnad_get_rxf_strings(u8 **string, int f_num)
 548{
 549        ethtool_sprintf(string, "rxf%d_ucast_octets", f_num);
 550        ethtool_sprintf(string, "rxf%d_ucast", f_num);
 551        ethtool_sprintf(string, "rxf%d_ucast_vlan", f_num);
 552        ethtool_sprintf(string, "rxf%d_mcast_octets", f_num);
 553        ethtool_sprintf(string, "rxf%d_mcast", f_num);
 554        ethtool_sprintf(string, "rxf%d_mcast_vlan", f_num);
 555        ethtool_sprintf(string, "rxf%d_bcast_octets", f_num);
 556        ethtool_sprintf(string, "rxf%d_bcast", f_num);
 557        ethtool_sprintf(string, "rxf%d_bcast_vlan", f_num);
 558        ethtool_sprintf(string, "rxf%d_frame_drops", f_num);
 559}
 560
 561static void bnad_get_cq_strings(u8 **string, int q_num)
 562{
 563        ethtool_sprintf(string, "cq%d_producer_index", q_num);
 564        ethtool_sprintf(string, "cq%d_consumer_index", q_num);
 565        ethtool_sprintf(string, "cq%d_hw_producer_index", q_num);
 566        ethtool_sprintf(string, "cq%d_intr", q_num);
 567        ethtool_sprintf(string, "cq%d_poll", q_num);
 568        ethtool_sprintf(string, "cq%d_schedule", q_num);
 569        ethtool_sprintf(string, "cq%d_keep_poll", q_num);
 570        ethtool_sprintf(string, "cq%d_complete", q_num);
 571}
 572
 573static void bnad_get_rxq_strings(u8 **string, int q_num)
 574{
 575        ethtool_sprintf(string, "rxq%d_packets", q_num);
 576        ethtool_sprintf(string, "rxq%d_bytes", q_num);
 577        ethtool_sprintf(string, "rxq%d_packets_with_error", q_num);
 578        ethtool_sprintf(string, "rxq%d_allocbuf_failed", q_num);
 579        ethtool_sprintf(string, "rxq%d_mapbuf_failed", q_num);
 580        ethtool_sprintf(string, "rxq%d_producer_index", q_num);
 581        ethtool_sprintf(string, "rxq%d_consumer_index", q_num);
 582}
 583
 584static void bnad_get_txq_strings(u8 **string, int q_num)
 585{
 586        ethtool_sprintf(string, "txq%d_packets", q_num);
 587        ethtool_sprintf(string, "txq%d_bytes", q_num);
 588        ethtool_sprintf(string, "txq%d_producer_index", q_num);
 589        ethtool_sprintf(string, "txq%d_consumer_index", q_num);
 590        ethtool_sprintf(string, "txq%d_hw_consumer_index", q_num);
 591}
 592
 593static void
 594bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
 595{
 596        struct bnad *bnad = netdev_priv(netdev);
 597        int i, j, q_num;
 598        u32 bmap;
 599
 600        if (stringset != ETH_SS_STATS)
 601                return;
 602
 603        mutex_lock(&bnad->conf_mutex);
 604
 605        for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
 606                BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN));
 607                ethtool_sprintf(&string, bnad_net_stats_strings[i]);
 608        }
 609
 610        bmap = bna_tx_rid_mask(&bnad->bna);
 611        for (i = 0; bmap; i++) {
 612                if (bmap & 1)
 613                        bnad_get_txf_strings(&string, i);
 614                bmap >>= 1;
 615        }
 616
 617        bmap = bna_rx_rid_mask(&bnad->bna);
 618        for (i = 0; bmap; i++, bmap >>= 1) {
 619                if (bmap & 1)
 620                        bnad_get_rxf_strings(&string, i);
 621                bmap >>= 1;
 622        }
 623
 624        q_num = 0;
 625        for (i = 0; i < bnad->num_rx; i++) {
 626                if (!bnad->rx_info[i].rx)
 627                        continue;
 628                for (j = 0; j < bnad->num_rxp_per_rx; j++)
 629                        bnad_get_cq_strings(&string, q_num++);
 630        }
 631
 632        q_num = 0;
 633        for (i = 0; i < bnad->num_rx; i++) {
 634                if (!bnad->rx_info[i].rx)
 635                        continue;
 636                for (j = 0; j < bnad->num_rxp_per_rx; j++) {
 637                        bnad_get_rxq_strings(&string, q_num++);
 638                        if (bnad->rx_info[i].rx_ctrl[j].ccb &&
 639                            bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
 640                            bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
 641                                bnad_get_rxq_strings(&string, q_num++);
 642                }
 643        }
 644
 645        q_num = 0;
 646        for (i = 0; i < bnad->num_tx; i++) {
 647                if (!bnad->tx_info[i].tx)
 648                        continue;
 649                for (j = 0; j < bnad->num_txq_per_tx; j++)
 650                        bnad_get_txq_strings(&string, q_num++);
 651        }
 652
 653        mutex_unlock(&bnad->conf_mutex);
 654}
 655
 656static int
 657bnad_get_stats_count_locked(struct net_device *netdev)
 658{
 659        struct bnad *bnad = netdev_priv(netdev);
 660        int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
 661        u32 bmap;
 662
 663        bmap = bna_tx_rid_mask(&bnad->bna);
 664        for (i = 0; bmap; i++) {
 665                if (bmap & 1)
 666                        txf_active_num++;
 667                bmap >>= 1;
 668        }
 669        bmap = bna_rx_rid_mask(&bnad->bna);
 670        for (i = 0; bmap; i++) {
 671                if (bmap & 1)
 672                        rxf_active_num++;
 673                bmap >>= 1;
 674        }
 675        count = BNAD_ETHTOOL_STATS_NUM +
 676                txf_active_num * BNAD_NUM_TXF_COUNTERS +
 677                rxf_active_num * BNAD_NUM_RXF_COUNTERS;
 678
 679        for (i = 0; i < bnad->num_rx; i++) {
 680                if (!bnad->rx_info[i].rx)
 681                        continue;
 682                count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
 683                count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
 684                for (j = 0; j < bnad->num_rxp_per_rx; j++)
 685                        if (bnad->rx_info[i].rx_ctrl[j].ccb &&
 686                                bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
 687                                bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
 688                                count +=  BNAD_NUM_RXQ_COUNTERS;
 689        }
 690
 691        for (i = 0; i < bnad->num_tx; i++) {
 692                if (!bnad->tx_info[i].tx)
 693                        continue;
 694                count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
 695        }
 696        return count;
 697}
 698
 699static int
 700bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
 701{
 702        int i, j;
 703        struct bna_rcb *rcb = NULL;
 704        struct bna_tcb *tcb = NULL;
 705
 706        for (i = 0; i < bnad->num_rx; i++) {
 707                if (!bnad->rx_info[i].rx)
 708                        continue;
 709                for (j = 0; j < bnad->num_rxp_per_rx; j++)
 710                        if (bnad->rx_info[i].rx_ctrl[j].ccb &&
 711                                bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
 712                                bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
 713                                buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
 714                                                ccb->producer_index;
 715                                buf[bi++] = 0; /* ccb->consumer_index */
 716                                buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
 717                                                ccb->hw_producer_index);
 718
 719                                buf[bi++] = bnad->rx_info[i].
 720                                                rx_ctrl[j].rx_intr_ctr;
 721                                buf[bi++] = bnad->rx_info[i].
 722                                                rx_ctrl[j].rx_poll_ctr;
 723                                buf[bi++] = bnad->rx_info[i].
 724                                                rx_ctrl[j].rx_schedule;
 725                                buf[bi++] = bnad->rx_info[i].
 726                                                rx_ctrl[j].rx_keep_poll;
 727                                buf[bi++] = bnad->rx_info[i].
 728                                                rx_ctrl[j].rx_complete;
 729                        }
 730        }
 731        for (i = 0; i < bnad->num_rx; i++) {
 732                if (!bnad->rx_info[i].rx)
 733                        continue;
 734                for (j = 0; j < bnad->num_rxp_per_rx; j++)
 735                        if (bnad->rx_info[i].rx_ctrl[j].ccb) {
 736                                if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
 737                                        bnad->rx_info[i].rx_ctrl[j].ccb->
 738                                        rcb[0]->rxq) {
 739                                        rcb = bnad->rx_info[i].rx_ctrl[j].
 740                                                        ccb->rcb[0];
 741                                        buf[bi++] = rcb->rxq->rx_packets;
 742                                        buf[bi++] = rcb->rxq->rx_bytes;
 743                                        buf[bi++] = rcb->rxq->
 744                                                        rx_packets_with_error;
 745                                        buf[bi++] = rcb->rxq->
 746                                                        rxbuf_alloc_failed;
 747                                        buf[bi++] = rcb->rxq->rxbuf_map_failed;
 748                                        buf[bi++] = rcb->producer_index;
 749                                        buf[bi++] = rcb->consumer_index;
 750                                }
 751                                if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
 752                                        bnad->rx_info[i].rx_ctrl[j].ccb->
 753                                        rcb[1]->rxq) {
 754                                        rcb = bnad->rx_info[i].rx_ctrl[j].
 755                                                                ccb->rcb[1];
 756                                        buf[bi++] = rcb->rxq->rx_packets;
 757                                        buf[bi++] = rcb->rxq->rx_bytes;
 758                                        buf[bi++] = rcb->rxq->
 759                                                        rx_packets_with_error;
 760                                        buf[bi++] = rcb->rxq->
 761                                                        rxbuf_alloc_failed;
 762                                        buf[bi++] = rcb->rxq->rxbuf_map_failed;
 763                                        buf[bi++] = rcb->producer_index;
 764                                        buf[bi++] = rcb->consumer_index;
 765                                }
 766                        }
 767        }
 768
 769        for (i = 0; i < bnad->num_tx; i++) {
 770                if (!bnad->tx_info[i].tx)
 771                        continue;
 772                for (j = 0; j < bnad->num_txq_per_tx; j++)
 773                        if (bnad->tx_info[i].tcb[j] &&
 774                                bnad->tx_info[i].tcb[j]->txq) {
 775                                tcb = bnad->tx_info[i].tcb[j];
 776                                buf[bi++] = tcb->txq->tx_packets;
 777                                buf[bi++] = tcb->txq->tx_bytes;
 778                                buf[bi++] = tcb->producer_index;
 779                                buf[bi++] = tcb->consumer_index;
 780                                buf[bi++] = *(tcb->hw_consumer_index);
 781                        }
 782        }
 783
 784        return bi;
 785}
 786
 787static void
 788bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
 789                       u64 *buf)
 790{
 791        struct bnad *bnad = netdev_priv(netdev);
 792        int i, j, bi = 0;
 793        unsigned long flags;
 794        struct rtnl_link_stats64 net_stats64;
 795        u64 *stats64;
 796        u32 bmap;
 797
 798        mutex_lock(&bnad->conf_mutex);
 799        if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
 800                mutex_unlock(&bnad->conf_mutex);
 801                return;
 802        }
 803
 804        /*
 805         * Used bna_lock to sync reads from bna_stats, which is written
 806         * under the same lock
 807         */
 808        spin_lock_irqsave(&bnad->bna_lock, flags);
 809
 810        memset(&net_stats64, 0, sizeof(net_stats64));
 811        bnad_netdev_qstats_fill(bnad, &net_stats64);
 812        bnad_netdev_hwstats_fill(bnad, &net_stats64);
 813
 814        buf[bi++] = net_stats64.rx_packets;
 815        buf[bi++] = net_stats64.tx_packets;
 816        buf[bi++] = net_stats64.rx_bytes;
 817        buf[bi++] = net_stats64.tx_bytes;
 818        buf[bi++] = net_stats64.rx_errors;
 819        buf[bi++] = net_stats64.tx_errors;
 820        buf[bi++] = net_stats64.rx_dropped;
 821        buf[bi++] = net_stats64.tx_dropped;
 822        buf[bi++] = net_stats64.multicast;
 823        buf[bi++] = net_stats64.collisions;
 824        buf[bi++] = net_stats64.rx_length_errors;
 825        buf[bi++] = net_stats64.rx_crc_errors;
 826        buf[bi++] = net_stats64.rx_frame_errors;
 827        buf[bi++] = net_stats64.tx_fifo_errors;
 828
 829        /* Get netif_queue_stopped from stack */
 830        bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
 831
 832        /* Fill driver stats into ethtool buffers */
 833        stats64 = (u64 *)&bnad->stats.drv_stats;
 834        for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
 835                buf[bi++] = stats64[i];
 836
 837        /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
 838        stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
 839        for (i = 0;
 840             i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
 841                sizeof(u64);
 842             i++)
 843                buf[bi++] = stats64[i];
 844
 845        /* Fill txf stats into ethtool buffers */
 846        bmap = bna_tx_rid_mask(&bnad->bna);
 847        for (i = 0; bmap; i++) {
 848                if (bmap & 1) {
 849                        stats64 = (u64 *)&bnad->stats.bna_stats->
 850                                                hw_stats.txf_stats[i];
 851                        for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
 852                                        sizeof(u64); j++)
 853                                buf[bi++] = stats64[j];
 854                }
 855                bmap >>= 1;
 856        }
 857
 858        /*  Fill rxf stats into ethtool buffers */
 859        bmap = bna_rx_rid_mask(&bnad->bna);
 860        for (i = 0; bmap; i++) {
 861                if (bmap & 1) {
 862                        stats64 = (u64 *)&bnad->stats.bna_stats->
 863                                                hw_stats.rxf_stats[i];
 864                        for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
 865                                        sizeof(u64); j++)
 866                                buf[bi++] = stats64[j];
 867                }
 868                bmap >>= 1;
 869        }
 870
 871        /* Fill per Q stats into ethtool buffers */
 872        bi = bnad_per_q_stats_fill(bnad, buf, bi);
 873
 874        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 875
 876        mutex_unlock(&bnad->conf_mutex);
 877}
 878
 879static int
 880bnad_get_sset_count(struct net_device *netdev, int sset)
 881{
 882        switch (sset) {
 883        case ETH_SS_STATS:
 884                return bnad_get_stats_count_locked(netdev);
 885        default:
 886                return -EOPNOTSUPP;
 887        }
 888}
 889
 890static u32
 891bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
 892                                u32 *base_offset)
 893{
 894        struct bfa_flash_attr *flash_attr;
 895        struct bnad_iocmd_comp fcomp;
 896        u32 i, flash_part = 0, ret;
 897        unsigned long flags = 0;
 898
 899        flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
 900        if (!flash_attr)
 901                return 0;
 902
 903        fcomp.bnad = bnad;
 904        fcomp.comp_status = 0;
 905
 906        init_completion(&fcomp.comp);
 907        spin_lock_irqsave(&bnad->bna_lock, flags);
 908        ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
 909                                bnad_cb_completion, &fcomp);
 910        if (ret != BFA_STATUS_OK) {
 911                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 912                kfree(flash_attr);
 913                return 0;
 914        }
 915        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 916        wait_for_completion(&fcomp.comp);
 917        ret = fcomp.comp_status;
 918
 919        /* Check for the flash type & base offset value */
 920        if (ret == BFA_STATUS_OK) {
 921                for (i = 0; i < flash_attr->npart; i++) {
 922                        if (offset >= flash_attr->part[i].part_off &&
 923                            offset < (flash_attr->part[i].part_off +
 924                                      flash_attr->part[i].part_size)) {
 925                                flash_part = flash_attr->part[i].part_type;
 926                                *base_offset = flash_attr->part[i].part_off;
 927                                break;
 928                        }
 929                }
 930        }
 931        kfree(flash_attr);
 932        return flash_part;
 933}
 934
 935static int
 936bnad_get_eeprom_len(struct net_device *netdev)
 937{
 938        return BFA_TOTAL_FLASH_SIZE;
 939}
 940
 941static int
 942bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
 943                u8 *bytes)
 944{
 945        struct bnad *bnad = netdev_priv(netdev);
 946        struct bnad_iocmd_comp fcomp;
 947        u32 flash_part = 0, base_offset = 0;
 948        unsigned long flags = 0;
 949        int ret = 0;
 950
 951        /* Fill the magic value */
 952        eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16);
 953
 954        /* Query the flash partition based on the offset */
 955        flash_part = bnad_get_flash_partition_by_offset(bnad,
 956                                eeprom->offset, &base_offset);
 957        if (flash_part == 0)
 958                return -EFAULT;
 959
 960        fcomp.bnad = bnad;
 961        fcomp.comp_status = 0;
 962
 963        init_completion(&fcomp.comp);
 964        spin_lock_irqsave(&bnad->bna_lock, flags);
 965        ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
 966                                bnad->id, bytes, eeprom->len,
 967                                eeprom->offset - base_offset,
 968                                bnad_cb_completion, &fcomp);
 969        if (ret != BFA_STATUS_OK) {
 970                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 971                goto done;
 972        }
 973
 974        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 975        wait_for_completion(&fcomp.comp);
 976        ret = fcomp.comp_status;
 977done:
 978        return ret;
 979}
 980
 981static int
 982bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
 983                u8 *bytes)
 984{
 985        struct bnad *bnad = netdev_priv(netdev);
 986        struct bnad_iocmd_comp fcomp;
 987        u32 flash_part = 0, base_offset = 0;
 988        unsigned long flags = 0;
 989        int ret = 0;
 990
 991        /* Check if the flash update request is valid */
 992        if (eeprom->magic != (bnad->pcidev->vendor |
 993                             (bnad->pcidev->device << 16)))
 994                return -EINVAL;
 995
 996        /* Query the flash partition based on the offset */
 997        flash_part = bnad_get_flash_partition_by_offset(bnad,
 998                                eeprom->offset, &base_offset);
 999        if (flash_part == 0)
1000                return -EFAULT;
1001
1002        fcomp.bnad = bnad;
1003        fcomp.comp_status = 0;
1004
1005        init_completion(&fcomp.comp);
1006        spin_lock_irqsave(&bnad->bna_lock, flags);
1007        ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
1008                                bnad->id, bytes, eeprom->len,
1009                                eeprom->offset - base_offset,
1010                                bnad_cb_completion, &fcomp);
1011        if (ret != BFA_STATUS_OK) {
1012                spin_unlock_irqrestore(&bnad->bna_lock, flags);
1013                goto done;
1014        }
1015
1016        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1017        wait_for_completion(&fcomp.comp);
1018        ret = fcomp.comp_status;
1019done:
1020        return ret;
1021}
1022
1023static int
1024bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
1025{
1026        struct bnad *bnad = netdev_priv(netdev);
1027        struct bnad_iocmd_comp fcomp;
1028        const struct firmware *fw;
1029        int ret = 0;
1030
1031        ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
1032        if (ret) {
1033                netdev_err(netdev, "can't load firmware %s\n", eflash->data);
1034                goto out;
1035        }
1036
1037        fcomp.bnad = bnad;
1038        fcomp.comp_status = 0;
1039
1040        init_completion(&fcomp.comp);
1041        spin_lock_irq(&bnad->bna_lock);
1042        ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG,
1043                                bnad->id, (u8 *)fw->data, fw->size, 0,
1044                                bnad_cb_completion, &fcomp);
1045        if (ret != BFA_STATUS_OK) {
1046                netdev_warn(netdev, "flash update failed with err=%d\n", ret);
1047                ret = -EIO;
1048                spin_unlock_irq(&bnad->bna_lock);
1049                goto out;
1050        }
1051
1052        spin_unlock_irq(&bnad->bna_lock);
1053        wait_for_completion(&fcomp.comp);
1054        if (fcomp.comp_status != BFA_STATUS_OK) {
1055                ret = -EIO;
1056                netdev_warn(netdev,
1057                            "firmware image update failed with err=%d\n",
1058                            fcomp.comp_status);
1059        }
1060out:
1061        release_firmware(fw);
1062        return ret;
1063}
1064
1065static const struct ethtool_ops bnad_ethtool_ops = {
1066        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1067                                     ETHTOOL_COALESCE_TX_MAX_FRAMES |
1068                                     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1069        .get_drvinfo = bnad_get_drvinfo,
1070        .get_wol = bnad_get_wol,
1071        .get_link = ethtool_op_get_link,
1072        .get_coalesce = bnad_get_coalesce,
1073        .set_coalesce = bnad_set_coalesce,
1074        .get_ringparam = bnad_get_ringparam,
1075        .set_ringparam = bnad_set_ringparam,
1076        .get_pauseparam = bnad_get_pauseparam,
1077        .set_pauseparam = bnad_set_pauseparam,
1078        .get_strings = bnad_get_strings,
1079        .get_ethtool_stats = bnad_get_ethtool_stats,
1080        .get_sset_count = bnad_get_sset_count,
1081        .get_eeprom_len = bnad_get_eeprom_len,
1082        .get_eeprom = bnad_get_eeprom,
1083        .set_eeprom = bnad_set_eeprom,
1084        .flash_device = bnad_flash_device,
1085        .get_ts_info = ethtool_op_get_ts_info,
1086        .get_link_ksettings = bnad_get_link_ksettings,
1087        .set_link_ksettings = bnad_set_link_ksettings,
1088};
1089
1090void
1091bnad_set_ethtool_ops(struct net_device *netdev)
1092{
1093        netdev->ethtool_ops = &bnad_ethtool_ops;
1094}
1095