linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/netdevice.h>
  19#include <linux/net_tstamp.h>
  20#include <linux/pci.h>
  21#include "liquidio_common.h"
  22#include "octeon_droq.h"
  23#include "octeon_iq.h"
  24#include "response_manager.h"
  25#include "octeon_device.h"
  26#include "octeon_nic.h"
  27#include "octeon_main.h"
  28#include "octeon_network.h"
  29#include "cn66xx_regs.h"
  30#include "cn66xx_device.h"
  31#include "cn23xx_pf_device.h"
  32#include "cn23xx_vf_device.h"
  33
  34static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
  35
  36struct oct_intrmod_resp {
  37        u64     rh;
  38        struct oct_intrmod_cfg intrmod;
  39        u64     status;
  40};
  41
  42struct oct_mdio_cmd_resp {
  43        u64 rh;
  44        struct oct_mdio_cmd resp;
  45        u64 status;
  46};
  47
  48#define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
  49
  50/* Octeon's interface mode of operation */
  51enum {
  52        INTERFACE_MODE_DISABLED,
  53        INTERFACE_MODE_RGMII,
  54        INTERFACE_MODE_GMII,
  55        INTERFACE_MODE_SPI,
  56        INTERFACE_MODE_PCIE,
  57        INTERFACE_MODE_XAUI,
  58        INTERFACE_MODE_SGMII,
  59        INTERFACE_MODE_PICMG,
  60        INTERFACE_MODE_NPI,
  61        INTERFACE_MODE_LOOP,
  62        INTERFACE_MODE_SRIO,
  63        INTERFACE_MODE_ILK,
  64        INTERFACE_MODE_RXAUI,
  65        INTERFACE_MODE_QSGMII,
  66        INTERFACE_MODE_AGL,
  67        INTERFACE_MODE_XLAUI,
  68        INTERFACE_MODE_XFI,
  69        INTERFACE_MODE_10G_KR,
  70        INTERFACE_MODE_40G_KR4,
  71        INTERFACE_MODE_MIXED,
  72};
  73
  74#define OCT_ETHTOOL_REGDUMP_LEN  4096
  75#define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
  76#define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF  (4096 * 2)
  77#define OCT_ETHTOOL_REGSVER  1
  78
  79/* statistics of PF */
  80static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
  81        "rx_packets",
  82        "tx_packets",
  83        "rx_bytes",
  84        "tx_bytes",
  85        "rx_errors",
  86        "tx_errors",
  87        "rx_dropped",
  88        "tx_dropped",
  89
  90        "tx_total_sent",
  91        "tx_total_fwd",
  92        "tx_err_pko",
  93        "tx_err_pki",
  94        "tx_err_link",
  95        "tx_err_drop",
  96
  97        "tx_tso",
  98        "tx_tso_packets",
  99        "tx_tso_err",
 100        "tx_vxlan",
 101
 102        "tx_mcast",
 103        "tx_bcast",
 104
 105        "mac_tx_total_pkts",
 106        "mac_tx_total_bytes",
 107        "mac_tx_mcast_pkts",
 108        "mac_tx_bcast_pkts",
 109        "mac_tx_ctl_packets",
 110        "mac_tx_total_collisions",
 111        "mac_tx_one_collision",
 112        "mac_tx_multi_collision",
 113        "mac_tx_max_collision_fail",
 114        "mac_tx_max_deferral_fail",
 115        "mac_tx_fifo_err",
 116        "mac_tx_runts",
 117
 118        "rx_total_rcvd",
 119        "rx_total_fwd",
 120        "rx_mcast",
 121        "rx_bcast",
 122        "rx_jabber_err",
 123        "rx_l2_err",
 124        "rx_frame_err",
 125        "rx_err_pko",
 126        "rx_err_link",
 127        "rx_err_drop",
 128
 129        "rx_vxlan",
 130        "rx_vxlan_err",
 131
 132        "rx_lro_pkts",
 133        "rx_lro_bytes",
 134        "rx_total_lro",
 135
 136        "rx_lro_aborts",
 137        "rx_lro_aborts_port",
 138        "rx_lro_aborts_seq",
 139        "rx_lro_aborts_tsval",
 140        "rx_lro_aborts_timer",
 141        "rx_fwd_rate",
 142
 143        "mac_rx_total_rcvd",
 144        "mac_rx_bytes",
 145        "mac_rx_total_bcst",
 146        "mac_rx_total_mcst",
 147        "mac_rx_runts",
 148        "mac_rx_ctl_packets",
 149        "mac_rx_fifo_err",
 150        "mac_rx_dma_drop",
 151        "mac_rx_fcs_err",
 152
 153        "link_state_changes",
 154};
 155
 156/* statistics of VF */
 157static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
 158        "rx_packets",
 159        "tx_packets",
 160        "rx_bytes",
 161        "tx_bytes",
 162        "rx_errors",
 163        "tx_errors",
 164        "rx_dropped",
 165        "tx_dropped",
 166        "rx_mcast",
 167        "tx_mcast",
 168        "rx_bcast",
 169        "tx_bcast",
 170        "link_state_changes",
 171};
 172
 173/* statistics of host tx queue */
 174static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
 175        "packets",
 176        "bytes",
 177        "dropped",
 178        "iq_busy",
 179        "sgentry_sent",
 180
 181        "fw_instr_posted",
 182        "fw_instr_processed",
 183        "fw_instr_dropped",
 184        "fw_bytes_sent",
 185
 186        "tso",
 187        "vxlan",
 188        "txq_restart",
 189};
 190
 191/* statistics of host rx queue */
 192static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
 193        "packets",
 194        "bytes",
 195        "dropped",
 196        "dropped_nomem",
 197        "dropped_toomany",
 198        "fw_dropped",
 199        "fw_pkts_received",
 200        "fw_bytes_received",
 201        "fw_dropped_nodispatch",
 202
 203        "vxlan",
 204        "buffer_alloc_failure",
 205};
 206
 207/* LiquidIO driver private flags */
 208static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
 209};
 210
 211#define OCTNIC_NCMD_AUTONEG_ON  0x1
 212#define OCTNIC_NCMD_PHY_ON      0x2
 213
 214static int lio_get_link_ksettings(struct net_device *netdev,
 215                                  struct ethtool_link_ksettings *ecmd)
 216{
 217        struct lio *lio = GET_LIO(netdev);
 218        struct octeon_device *oct = lio->oct_dev;
 219        struct oct_link_info *linfo;
 220
 221        linfo = &lio->linfo;
 222
 223        ethtool_link_ksettings_zero_link_mode(ecmd, supported);
 224        ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
 225
 226        switch (linfo->link.s.phy_type) {
 227        case LIO_PHY_PORT_TP:
 228                ecmd->base.port = PORT_TP;
 229                ecmd->base.autoneg = AUTONEG_DISABLE;
 230                ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
 231                ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
 232                ethtool_link_ksettings_add_link_mode(ecmd, supported,
 233                                                     10000baseT_Full);
 234
 235                ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
 236                ethtool_link_ksettings_add_link_mode(ecmd, advertising,
 237                                                     10000baseT_Full);
 238
 239                break;
 240
 241        case LIO_PHY_PORT_FIBRE:
 242                if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
 243                    linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
 244                    linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
 245                    linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
 246                        dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
 247                        ecmd->base.transceiver = XCVR_EXTERNAL;
 248                } else {
 249                        dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
 250                                linfo->link.s.if_mode);
 251                }
 252
 253                ecmd->base.port = PORT_FIBRE;
 254                ecmd->base.autoneg = AUTONEG_DISABLE;
 255                ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
 256
 257                ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
 258                ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
 259                if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
 260                    oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
 261                        if (OCTEON_CN23XX_PF(oct)) {
 262                                ethtool_link_ksettings_add_link_mode
 263                                        (ecmd, supported, 25000baseSR_Full);
 264                                ethtool_link_ksettings_add_link_mode
 265                                        (ecmd, supported, 25000baseKR_Full);
 266                                ethtool_link_ksettings_add_link_mode
 267                                        (ecmd, supported, 25000baseCR_Full);
 268
 269                                if (oct->no_speed_setting == 0)  {
 270                                        ethtool_link_ksettings_add_link_mode
 271                                                (ecmd, supported,
 272                                                 10000baseSR_Full);
 273                                        ethtool_link_ksettings_add_link_mode
 274                                                (ecmd, supported,
 275                                                 10000baseKR_Full);
 276                                        ethtool_link_ksettings_add_link_mode
 277                                                (ecmd, supported,
 278                                                 10000baseCR_Full);
 279                                }
 280
 281                                if (oct->no_speed_setting == 0) {
 282                                        liquidio_get_speed(lio);
 283                                        liquidio_get_fec(lio);
 284                                } else {
 285                                        oct->speed_setting = 25;
 286                                }
 287
 288                                if (oct->speed_setting == 10) {
 289                                        ethtool_link_ksettings_add_link_mode
 290                                                (ecmd, advertising,
 291                                                 10000baseSR_Full);
 292                                        ethtool_link_ksettings_add_link_mode
 293                                                (ecmd, advertising,
 294                                                 10000baseKR_Full);
 295                                        ethtool_link_ksettings_add_link_mode
 296                                                (ecmd, advertising,
 297                                                 10000baseCR_Full);
 298                                }
 299                                if (oct->speed_setting == 25) {
 300                                        ethtool_link_ksettings_add_link_mode
 301                                                (ecmd, advertising,
 302                                                 25000baseSR_Full);
 303                                        ethtool_link_ksettings_add_link_mode
 304                                                (ecmd, advertising,
 305                                                 25000baseKR_Full);
 306                                        ethtool_link_ksettings_add_link_mode
 307                                                (ecmd, advertising,
 308                                                 25000baseCR_Full);
 309                                }
 310
 311                                if (oct->no_speed_setting)
 312                                        break;
 313
 314                                ethtool_link_ksettings_add_link_mode
 315                                        (ecmd, supported, FEC_RS);
 316                                ethtool_link_ksettings_add_link_mode
 317                                        (ecmd, supported, FEC_NONE);
 318                                        /*FEC_OFF*/
 319                                if (oct->props[lio->ifidx].fec == 1) {
 320                                        /* ETHTOOL_FEC_RS */
 321                                        ethtool_link_ksettings_add_link_mode
 322                                                (ecmd, advertising, FEC_RS);
 323                                } else {
 324                                        /* ETHTOOL_FEC_OFF */
 325                                        ethtool_link_ksettings_add_link_mode
 326                                                (ecmd, advertising, FEC_NONE);
 327                                }
 328                        } else { /* VF */
 329                                if (linfo->link.s.speed == 10000) {
 330                                        ethtool_link_ksettings_add_link_mode
 331                                                (ecmd, supported,
 332                                                 10000baseSR_Full);
 333                                        ethtool_link_ksettings_add_link_mode
 334                                                (ecmd, supported,
 335                                                 10000baseKR_Full);
 336                                        ethtool_link_ksettings_add_link_mode
 337                                                (ecmd, supported,
 338                                                 10000baseCR_Full);
 339
 340                                        ethtool_link_ksettings_add_link_mode
 341                                                (ecmd, advertising,
 342                                                 10000baseSR_Full);
 343                                        ethtool_link_ksettings_add_link_mode
 344                                                (ecmd, advertising,
 345                                                 10000baseKR_Full);
 346                                        ethtool_link_ksettings_add_link_mode
 347                                                (ecmd, advertising,
 348                                                 10000baseCR_Full);
 349                                }
 350
 351                                if (linfo->link.s.speed == 25000) {
 352                                        ethtool_link_ksettings_add_link_mode
 353                                                (ecmd, supported,
 354                                                 25000baseSR_Full);
 355                                        ethtool_link_ksettings_add_link_mode
 356                                                (ecmd, supported,
 357                                                 25000baseKR_Full);
 358                                        ethtool_link_ksettings_add_link_mode
 359                                                (ecmd, supported,
 360                                                 25000baseCR_Full);
 361
 362                                        ethtool_link_ksettings_add_link_mode
 363                                                (ecmd, advertising,
 364                                                 25000baseSR_Full);
 365                                        ethtool_link_ksettings_add_link_mode
 366                                                (ecmd, advertising,
 367                                                 25000baseKR_Full);
 368                                        ethtool_link_ksettings_add_link_mode
 369                                                (ecmd, advertising,
 370                                                 25000baseCR_Full);
 371                                }
 372                        }
 373                } else {
 374                        ethtool_link_ksettings_add_link_mode(ecmd, supported,
 375                                                             10000baseT_Full);
 376                        ethtool_link_ksettings_add_link_mode(ecmd, advertising,
 377                                                             10000baseT_Full);
 378                }
 379                break;
 380        }
 381
 382        if (linfo->link.s.link_up) {
 383                ecmd->base.speed = linfo->link.s.speed;
 384                ecmd->base.duplex = linfo->link.s.duplex;
 385        } else {
 386                ecmd->base.speed = SPEED_UNKNOWN;
 387                ecmd->base.duplex = DUPLEX_UNKNOWN;
 388        }
 389
 390        return 0;
 391}
 392
 393static int lio_set_link_ksettings(struct net_device *netdev,
 394                                  const struct ethtool_link_ksettings *ecmd)
 395{
 396        const int speed = ecmd->base.speed;
 397        struct lio *lio = GET_LIO(netdev);
 398        struct oct_link_info *linfo;
 399        struct octeon_device *oct;
 400
 401        oct = lio->oct_dev;
 402
 403        linfo = &lio->linfo;
 404
 405        if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
 406              oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID))
 407                return -EOPNOTSUPP;
 408
 409        if (oct->no_speed_setting) {
 410                dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
 411                        __func__);
 412                return -EOPNOTSUPP;
 413        }
 414
 415        if ((ecmd->base.duplex != DUPLEX_UNKNOWN &&
 416             ecmd->base.duplex != linfo->link.s.duplex) ||
 417             ecmd->base.autoneg != AUTONEG_DISABLE ||
 418            (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 &&
 419             ecmd->base.speed != SPEED_UNKNOWN))
 420                return -EOPNOTSUPP;
 421
 422        if ((oct->speed_boot == speed / 1000) &&
 423            oct->speed_boot == oct->speed_setting)
 424                return 0;
 425
 426        liquidio_set_speed(lio, speed / 1000);
 427
 428        dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n",
 429                oct->speed_setting);
 430
 431        return 0;
 432}
 433
 434static void
 435lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
 436{
 437        struct lio *lio;
 438        struct octeon_device *oct;
 439
 440        lio = GET_LIO(netdev);
 441        oct = lio->oct_dev;
 442
 443        memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
 444        strcpy(drvinfo->driver, "liquidio");
 445        strcpy(drvinfo->version, LIQUIDIO_VERSION);
 446        strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
 447                ETHTOOL_FWVERS_LEN);
 448        strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
 449}
 450
 451static void
 452lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
 453{
 454        struct octeon_device *oct;
 455        struct lio *lio;
 456
 457        lio = GET_LIO(netdev);
 458        oct = lio->oct_dev;
 459
 460        memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
 461        strcpy(drvinfo->driver, "liquidio_vf");
 462        strcpy(drvinfo->version, LIQUIDIO_VERSION);
 463        strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
 464                ETHTOOL_FWVERS_LEN);
 465        strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
 466}
 467
 468static int
 469lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
 470{
 471        struct lio *lio = GET_LIO(netdev);
 472        struct octeon_device *oct = lio->oct_dev;
 473        struct octnic_ctrl_pkt nctrl;
 474        int ret = 0;
 475
 476        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
 477
 478        nctrl.ncmd.u64 = 0;
 479        nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
 480        nctrl.ncmd.s.param1 = num_queues;
 481        nctrl.ncmd.s.param2 = num_queues;
 482        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
 483        nctrl.netpndev = (u64)netdev;
 484        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
 485
 486        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
 487        if (ret) {
 488                dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
 489                        ret);
 490                return -1;
 491        }
 492
 493        return 0;
 494}
 495
 496static void
 497lio_ethtool_get_channels(struct net_device *dev,
 498                         struct ethtool_channels *channel)
 499{
 500        struct lio *lio = GET_LIO(dev);
 501        struct octeon_device *oct = lio->oct_dev;
 502        u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
 503        u32 combined_count = 0, max_combined = 0;
 504
 505        if (OCTEON_CN6XXX(oct)) {
 506                struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
 507
 508                max_rx = CFG_GET_OQ_MAX_Q(conf6x);
 509                max_tx = CFG_GET_IQ_MAX_Q(conf6x);
 510                rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
 511                tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
 512        } else if (OCTEON_CN23XX_PF(oct)) {
 513                if (oct->sriov_info.sriov_enabled) {
 514                        max_combined = lio->linfo.num_txpciq;
 515                } else {
 516                        struct octeon_config *conf23_pf =
 517                                CHIP_CONF(oct, cn23xx_pf);
 518
 519                        max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
 520                }
 521                combined_count = oct->num_iqs;
 522        } else if (OCTEON_CN23XX_VF(oct)) {
 523                u64 reg_val = 0ULL;
 524                u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
 525
 526                reg_val = octeon_read_csr64(oct, ctrl);
 527                reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
 528                max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
 529                combined_count = oct->num_iqs;
 530        }
 531
 532        channel->max_rx = max_rx;
 533        channel->max_tx = max_tx;
 534        channel->max_combined = max_combined;
 535        channel->rx_count = rx_count;
 536        channel->tx_count = tx_count;
 537        channel->combined_count = combined_count;
 538}
 539
 540static int
 541lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
 542{
 543        struct msix_entry *msix_entries;
 544        int num_msix_irqs = 0;
 545        int i;
 546
 547        if (!oct->msix_on)
 548                return 0;
 549
 550        /* Disable the input and output queues now. No more packets will
 551         * arrive from Octeon.
 552         */
 553        oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 554
 555        if (oct->msix_on) {
 556                if (OCTEON_CN23XX_PF(oct))
 557                        num_msix_irqs = oct->num_msix_irqs - 1;
 558                else if (OCTEON_CN23XX_VF(oct))
 559                        num_msix_irqs = oct->num_msix_irqs;
 560
 561                msix_entries = (struct msix_entry *)oct->msix_entries;
 562                for (i = 0; i < num_msix_irqs; i++) {
 563                        if (oct->ioq_vector[i].vector) {
 564                                /* clear the affinity_cpumask */
 565                                irq_set_affinity_hint(msix_entries[i].vector,
 566                                                      NULL);
 567                                free_irq(msix_entries[i].vector,
 568                                         &oct->ioq_vector[i]);
 569                                oct->ioq_vector[i].vector = 0;
 570                        }
 571                }
 572
 573                /* non-iov vector's argument is oct struct */
 574                if (OCTEON_CN23XX_PF(oct))
 575                        free_irq(msix_entries[i].vector, oct);
 576
 577                pci_disable_msix(oct->pci_dev);
 578                kfree(oct->msix_entries);
 579                oct->msix_entries = NULL;
 580        }
 581
 582        kfree(oct->irq_name_storage);
 583        oct->irq_name_storage = NULL;
 584
 585        if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
 586                dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
 587                return -1;
 588        }
 589
 590        if (octeon_setup_interrupt(oct, num_ioqs)) {
 591                dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
 592                return -1;
 593        }
 594
 595        /* Enable Octeon device interrupts */
 596        oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
 597
 598        return 0;
 599}
 600
 601static int
 602lio_ethtool_set_channels(struct net_device *dev,
 603                         struct ethtool_channels *channel)
 604{
 605        u32 combined_count, max_combined;
 606        struct lio *lio = GET_LIO(dev);
 607        struct octeon_device *oct = lio->oct_dev;
 608        int stopped = 0;
 609
 610        if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
 611                dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
 612                return -EINVAL;
 613        }
 614
 615        if (!channel->combined_count || channel->other_count ||
 616            channel->rx_count || channel->tx_count)
 617                return -EINVAL;
 618
 619        combined_count = channel->combined_count;
 620
 621        if (OCTEON_CN23XX_PF(oct)) {
 622                if (oct->sriov_info.sriov_enabled) {
 623                        max_combined = lio->linfo.num_txpciq;
 624                } else {
 625                        struct octeon_config *conf23_pf =
 626                                CHIP_CONF(oct,
 627                                          cn23xx_pf);
 628
 629                        max_combined =
 630                                CFG_GET_IQ_MAX_Q(conf23_pf);
 631                }
 632        } else if (OCTEON_CN23XX_VF(oct)) {
 633                u64 reg_val = 0ULL;
 634                u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
 635
 636                reg_val = octeon_read_csr64(oct, ctrl);
 637                reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
 638                max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
 639        } else {
 640                return -EINVAL;
 641        }
 642
 643        if (combined_count > max_combined || combined_count < 1)
 644                return -EINVAL;
 645
 646        if (combined_count == oct->num_iqs)
 647                return 0;
 648
 649        ifstate_set(lio, LIO_IFSTATE_RESETTING);
 650
 651        if (netif_running(dev)) {
 652                dev->netdev_ops->ndo_stop(dev);
 653                stopped = 1;
 654        }
 655
 656        if (lio_reset_queues(dev, combined_count))
 657                return -EINVAL;
 658
 659        if (stopped)
 660                dev->netdev_ops->ndo_open(dev);
 661
 662        ifstate_reset(lio, LIO_IFSTATE_RESETTING);
 663
 664        return 0;
 665}
 666
 667static int lio_get_eeprom_len(struct net_device *netdev)
 668{
 669        u8 buf[192];
 670        struct lio *lio = GET_LIO(netdev);
 671        struct octeon_device *oct_dev = lio->oct_dev;
 672        struct octeon_board_info *board_info;
 673        int len;
 674
 675        board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
 676        len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
 677                      board_info->name, board_info->serial_number,
 678                      board_info->major, board_info->minor);
 679
 680        return len;
 681}
 682
 683static int
 684lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
 685               u8 *bytes)
 686{
 687        struct lio *lio = GET_LIO(netdev);
 688        struct octeon_device *oct_dev = lio->oct_dev;
 689        struct octeon_board_info *board_info;
 690
 691        if (eeprom->offset)
 692                return -EINVAL;
 693
 694        eeprom->magic = oct_dev->pci_dev->vendor;
 695        board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
 696        sprintf((char *)bytes,
 697                "boardname:%s serialnum:%s maj:%lld min:%lld\n",
 698                board_info->name, board_info->serial_number,
 699                board_info->major, board_info->minor);
 700
 701        return 0;
 702}
 703
 704static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
 705{
 706        struct lio *lio = GET_LIO(netdev);
 707        struct octeon_device *oct = lio->oct_dev;
 708        struct octnic_ctrl_pkt nctrl;
 709        int ret = 0;
 710
 711        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
 712
 713        nctrl.ncmd.u64 = 0;
 714        nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
 715        nctrl.ncmd.s.param1 = addr;
 716        nctrl.ncmd.s.param2 = val;
 717        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
 718        nctrl.netpndev = (u64)netdev;
 719        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
 720
 721        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
 722        if (ret) {
 723                dev_err(&oct->pci_dev->dev,
 724                        "Failed to configure gpio value, ret=%d\n", ret);
 725                return -EINVAL;
 726        }
 727
 728        return 0;
 729}
 730
 731static int octnet_id_active(struct net_device *netdev, int val)
 732{
 733        struct lio *lio = GET_LIO(netdev);
 734        struct octeon_device *oct = lio->oct_dev;
 735        struct octnic_ctrl_pkt nctrl;
 736        int ret = 0;
 737
 738        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
 739
 740        nctrl.ncmd.u64 = 0;
 741        nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
 742        nctrl.ncmd.s.param1 = val;
 743        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
 744        nctrl.netpndev = (u64)netdev;
 745        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
 746
 747        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
 748        if (ret) {
 749                dev_err(&oct->pci_dev->dev,
 750                        "Failed to configure gpio value, ret=%d\n", ret);
 751                return -EINVAL;
 752        }
 753
 754        return 0;
 755}
 756
 757/* This routine provides PHY access routines for
 758 * mdio  clause45 .
 759 */
 760static int
 761octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
 762{
 763        struct octeon_device *oct_dev = lio->oct_dev;
 764        struct octeon_soft_command *sc;
 765        struct oct_mdio_cmd_resp *mdio_cmd_rsp;
 766        struct oct_mdio_cmd *mdio_cmd;
 767        int retval = 0;
 768
 769        sc = (struct octeon_soft_command *)
 770                octeon_alloc_soft_command(oct_dev,
 771                                          sizeof(struct oct_mdio_cmd),
 772                                          sizeof(struct oct_mdio_cmd_resp), 0);
 773
 774        if (!sc)
 775                return -ENOMEM;
 776
 777        mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
 778        mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
 779
 780        mdio_cmd->op = op;
 781        mdio_cmd->mdio_addr = loc;
 782        if (op)
 783                mdio_cmd->value1 = *value;
 784        octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
 785
 786        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 787
 788        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
 789                                    0, 0, 0);
 790
 791        init_completion(&sc->complete);
 792        sc->sc_status = OCTEON_REQUEST_PENDING;
 793
 794        retval = octeon_send_soft_command(oct_dev, sc);
 795        if (retval == IQ_SEND_FAILED) {
 796                dev_err(&oct_dev->pci_dev->dev,
 797                        "octnet_mdio45_access instruction failed status: %x\n",
 798                        retval);
 799                octeon_free_soft_command(oct_dev, sc);
 800                return -EBUSY;
 801        } else {
 802                /* Sleep on a wait queue till the cond flag indicates that the
 803                 * response arrived
 804                 */
 805                retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
 806                if (retval)
 807                        return retval;
 808
 809                retval = mdio_cmd_rsp->status;
 810                if (retval) {
 811                        dev_err(&oct_dev->pci_dev->dev,
 812                                "octnet mdio45 access failed: %x\n", retval);
 813                        WRITE_ONCE(sc->caller_is_done, true);
 814                        return -EBUSY;
 815                }
 816
 817                octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
 818                                    sizeof(struct oct_mdio_cmd) / 8);
 819
 820                if (!op)
 821                        *value = mdio_cmd_rsp->resp.value1;
 822
 823                WRITE_ONCE(sc->caller_is_done, true);
 824        }
 825
 826        return retval;
 827}
 828
 829static int lio_set_phys_id(struct net_device *netdev,
 830                           enum ethtool_phys_id_state state)
 831{
 832        struct lio *lio = GET_LIO(netdev);
 833        struct octeon_device *oct = lio->oct_dev;
 834        struct oct_link_info *linfo;
 835        int value, ret;
 836        u32 cur_ver;
 837
 838        linfo = &lio->linfo;
 839        cur_ver = OCT_FW_VER(oct->fw_info.ver.maj,
 840                             oct->fw_info.ver.min,
 841                             oct->fw_info.ver.rev);
 842
 843        switch (state) {
 844        case ETHTOOL_ID_ACTIVE:
 845                if (oct->chip_id == OCTEON_CN66XX) {
 846                        octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
 847                                           VITESSE_PHY_GPIO_DRIVEON);
 848                        return 2;
 849
 850                } else if (oct->chip_id == OCTEON_CN68XX) {
 851                        /* Save the current LED settings */
 852                        ret = octnet_mdio45_access(lio, 0,
 853                                                   LIO68XX_LED_BEACON_ADDR,
 854                                                   &lio->phy_beacon_val);
 855                        if (ret)
 856                                return ret;
 857
 858                        ret = octnet_mdio45_access(lio, 0,
 859                                                   LIO68XX_LED_CTRL_ADDR,
 860                                                   &lio->led_ctrl_val);
 861                        if (ret)
 862                                return ret;
 863
 864                        /* Configure Beacon values */
 865                        value = LIO68XX_LED_BEACON_CFGON;
 866                        ret = octnet_mdio45_access(lio, 1,
 867                                                   LIO68XX_LED_BEACON_ADDR,
 868                                                   &value);
 869                        if (ret)
 870                                return ret;
 871
 872                        value = LIO68XX_LED_CTRL_CFGON;
 873                        ret = octnet_mdio45_access(lio, 1,
 874                                                   LIO68XX_LED_CTRL_ADDR,
 875                                                   &value);
 876                        if (ret)
 877                                return ret;
 878                } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
 879                        octnet_id_active(netdev, LED_IDENTIFICATION_ON);
 880                        if (linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
 881                            cur_ver > OCT_FW_VER(1, 7, 2))
 882                                return 2;
 883                        else
 884                                return 0;
 885                } else {
 886                        return -EINVAL;
 887                }
 888                break;
 889
 890        case ETHTOOL_ID_ON:
 891                if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
 892                    linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
 893                    cur_ver > OCT_FW_VER(1, 7, 2))
 894                        octnet_id_active(netdev, LED_IDENTIFICATION_ON);
 895                else if (oct->chip_id == OCTEON_CN66XX)
 896                        octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
 897                                           VITESSE_PHY_GPIO_HIGH);
 898                else
 899                        return -EINVAL;
 900
 901                break;
 902
 903        case ETHTOOL_ID_OFF:
 904                if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
 905                    linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
 906                    cur_ver > OCT_FW_VER(1, 7, 2))
 907                        octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
 908                else if (oct->chip_id == OCTEON_CN66XX)
 909                        octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
 910                                           VITESSE_PHY_GPIO_LOW);
 911                else
 912                        return -EINVAL;
 913
 914                break;
 915
 916        case ETHTOOL_ID_INACTIVE:
 917                if (oct->chip_id == OCTEON_CN66XX) {
 918                        octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
 919                                           VITESSE_PHY_GPIO_DRIVEOFF);
 920                } else if (oct->chip_id == OCTEON_CN68XX) {
 921                        /* Restore LED settings */
 922                        ret = octnet_mdio45_access(lio, 1,
 923                                                   LIO68XX_LED_CTRL_ADDR,
 924                                                   &lio->led_ctrl_val);
 925                        if (ret)
 926                                return ret;
 927
 928                        ret = octnet_mdio45_access(lio, 1,
 929                                                   LIO68XX_LED_BEACON_ADDR,
 930                                                   &lio->phy_beacon_val);
 931                        if (ret)
 932                                return ret;
 933                } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
 934                        octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
 935
 936                        return 0;
 937                } else {
 938                        return -EINVAL;
 939                }
 940                break;
 941
 942        default:
 943                return -EINVAL;
 944        }
 945
 946        return 0;
 947}
 948
 949static void
 950lio_ethtool_get_ringparam(struct net_device *netdev,
 951                          struct ethtool_ringparam *ering)
 952{
 953        struct lio *lio = GET_LIO(netdev);
 954        struct octeon_device *oct = lio->oct_dev;
 955        u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
 956            rx_pending = 0;
 957
 958        if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
 959                return;
 960
 961        if (OCTEON_CN6XXX(oct)) {
 962                struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
 963
 964                tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
 965                rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
 966                rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
 967                tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
 968        } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
 969                tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
 970                rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
 971                rx_pending = oct->droq[0]->max_count;
 972                tx_pending = oct->instr_queue[0]->max_count;
 973        }
 974
 975        ering->tx_pending = tx_pending;
 976        ering->tx_max_pending = tx_max_pending;
 977        ering->rx_pending = rx_pending;
 978        ering->rx_max_pending = rx_max_pending;
 979        ering->rx_mini_pending = 0;
 980        ering->rx_jumbo_pending = 0;
 981        ering->rx_mini_max_pending = 0;
 982        ering->rx_jumbo_max_pending = 0;
 983}
 984
 985static int lio_23xx_reconfigure_queue_count(struct lio *lio)
 986{
 987        struct octeon_device *oct = lio->oct_dev;
 988        u32 resp_size, data_size;
 989        struct liquidio_if_cfg_resp *resp;
 990        struct octeon_soft_command *sc;
 991        union oct_nic_if_cfg if_cfg;
 992        struct lio_version *vdata;
 993        u32 ifidx_or_pfnum;
 994        int retval;
 995        int j;
 996
 997        resp_size = sizeof(struct liquidio_if_cfg_resp);
 998        data_size = sizeof(struct lio_version);
 999        sc = (struct octeon_soft_command *)
1000                octeon_alloc_soft_command(oct, data_size,
1001                                          resp_size, 0);
1002        if (!sc) {
1003                dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
1004                        __func__);
1005                return -1;
1006        }
1007
1008        resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1009        vdata = (struct lio_version *)sc->virtdptr;
1010
1011        vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1012        vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1013        vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1014
1015        ifidx_or_pfnum = oct->pf_num;
1016
1017        if_cfg.u64 = 0;
1018        if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
1019        if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
1020        if_cfg.s.base_queue = oct->sriov_info.pf_srn;
1021        if_cfg.s.gmx_port_id = oct->pf_num;
1022
1023        sc->iq_no = 0;
1024        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1025                                    OPCODE_NIC_QCOUNT_UPDATE, 0,
1026                                    if_cfg.u64, 0);
1027
1028        init_completion(&sc->complete);
1029        sc->sc_status = OCTEON_REQUEST_PENDING;
1030
1031        retval = octeon_send_soft_command(oct, sc);
1032        if (retval == IQ_SEND_FAILED) {
1033                dev_err(&oct->pci_dev->dev,
1034                        "Sending iq/oq config failed status: %x\n",
1035                        retval);
1036                octeon_free_soft_command(oct, sc);
1037                return -EIO;
1038        }
1039
1040        retval = wait_for_sc_completion_timeout(oct, sc, 0);
1041        if (retval)
1042                return retval;
1043
1044        retval = resp->status;
1045        if (retval) {
1046                dev_err(&oct->pci_dev->dev,
1047                        "iq/oq config failed: %x\n", retval);
1048                WRITE_ONCE(sc->caller_is_done, true);
1049                return -1;
1050        }
1051
1052        octeon_swap_8B_data((u64 *)(&resp->cfg_info),
1053                            (sizeof(struct liquidio_if_cfg_info)) >> 3);
1054
1055        lio->ifidx = ifidx_or_pfnum;
1056        lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
1057        lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
1058        for (j = 0; j < lio->linfo.num_rxpciq; j++) {
1059                lio->linfo.rxpciq[j].u64 =
1060                        resp->cfg_info.linfo.rxpciq[j].u64;
1061        }
1062
1063        for (j = 0; j < lio->linfo.num_txpciq; j++) {
1064                lio->linfo.txpciq[j].u64 =
1065                        resp->cfg_info.linfo.txpciq[j].u64;
1066        }
1067
1068        lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1069        lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1070        lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
1071        lio->txq = lio->linfo.txpciq[0].s.q_no;
1072        lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1073
1074        dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
1075                 lio->linfo.num_rxpciq);
1076
1077        WRITE_ONCE(sc->caller_is_done, true);
1078
1079        return 0;
1080}
1081
1082static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
1083{
1084        struct lio *lio = GET_LIO(netdev);
1085        struct octeon_device *oct = lio->oct_dev;
1086        int i, queue_count_update = 0;
1087        struct napi_struct *napi, *n;
1088        int ret;
1089
1090        schedule_timeout_uninterruptible(msecs_to_jiffies(100));
1091
1092        if (wait_for_pending_requests(oct))
1093                dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1094
1095        if (lio_wait_for_instr_fetch(oct))
1096                dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1097
1098        if (octeon_set_io_queues_off(oct)) {
1099                dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
1100                return -1;
1101        }
1102
1103        /* Disable the input and output queues now. No more packets will
1104         * arrive from Octeon.
1105         */
1106        oct->fn_list.disable_io_queues(oct);
1107        /* Delete NAPI */
1108        list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1109                netif_napi_del(napi);
1110
1111        if (num_qs != oct->num_iqs) {
1112                ret = netif_set_real_num_rx_queues(netdev, num_qs);
1113                if (ret) {
1114                        dev_err(&oct->pci_dev->dev,
1115                                "Setting real number rx failed\n");
1116                        return ret;
1117                }
1118
1119                ret = netif_set_real_num_tx_queues(netdev, num_qs);
1120                if (ret) {
1121                        dev_err(&oct->pci_dev->dev,
1122                                "Setting real number tx failed\n");
1123                        return ret;
1124                }
1125
1126                /* The value of queue_count_update decides whether it is the
1127                 * queue count or the descriptor count that is being
1128                 * re-configured.
1129                 */
1130                queue_count_update = 1;
1131        }
1132
1133        /* Re-configuration of queues can happen in two scenarios, SRIOV enabled
1134         * and SRIOV disabled. Few things like recreating queue zero, resetting
1135         * glists and IRQs are required for both. For the latter, some more
1136         * steps like updating sriov_info for the octeon device need to be done.
1137         */
1138        if (queue_count_update) {
1139                cleanup_rx_oom_poll_fn(netdev);
1140
1141                lio_delete_glists(lio);
1142
1143                /* Delete mbox for PF which is SRIOV disabled because sriov_info
1144                 * will be now changed.
1145                 */
1146                if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
1147                        oct->fn_list.free_mbox(oct);
1148        }
1149
1150        for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1151                if (!(oct->io_qmask.oq & BIT_ULL(i)))
1152                        continue;
1153                octeon_delete_droq(oct, i);
1154        }
1155
1156        for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1157                if (!(oct->io_qmask.iq & BIT_ULL(i)))
1158                        continue;
1159                octeon_delete_instr_queue(oct, i);
1160        }
1161
1162        if (queue_count_update) {
1163                /* For PF re-configure sriov related information */
1164                if ((OCTEON_CN23XX_PF(oct)) &&
1165                    !oct->sriov_info.sriov_enabled) {
1166                        oct->sriov_info.num_pf_rings = num_qs;
1167                        if (cn23xx_sriov_config(oct)) {
1168                                dev_err(&oct->pci_dev->dev,
1169                                        "Queue reset aborted: SRIOV config failed\n");
1170                                return -1;
1171                        }
1172
1173                        num_qs = oct->sriov_info.num_pf_rings;
1174                }
1175        }
1176
1177        if (oct->fn_list.setup_device_regs(oct)) {
1178                dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
1179                return -1;
1180        }
1181
1182        /* The following are needed in case of queue count re-configuration and
1183         * not for descriptor count re-configuration.
1184         */
1185        if (queue_count_update) {
1186                if (octeon_setup_instr_queues(oct))
1187                        return -1;
1188
1189                if (octeon_setup_output_queues(oct))
1190                        return -1;
1191
1192                /* Recreating mbox for PF that is SRIOV disabled */
1193                if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1194                        if (oct->fn_list.setup_mbox(oct)) {
1195                                dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
1196                                return -1;
1197                        }
1198                }
1199
1200                /* Deleting and recreating IRQs whether the interface is SRIOV
1201                 * enabled or disabled.
1202                 */
1203                if (lio_irq_reallocate_irqs(oct, num_qs)) {
1204                        dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
1205                        return -1;
1206                }
1207
1208                /* Enable the input and output queues for this Octeon device */
1209                if (oct->fn_list.enable_io_queues(oct)) {
1210                        dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
1211                        return -1;
1212                }
1213
1214                for (i = 0; i < oct->num_oqs; i++)
1215                        writel(oct->droq[i]->max_count,
1216                               oct->droq[i]->pkts_credit_reg);
1217
1218                /* Informing firmware about the new queue count. It is required
1219                 * for firmware to allocate more number of queues than those at
1220                 * load time.
1221                 */
1222                if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1223                        if (lio_23xx_reconfigure_queue_count(lio))
1224                                return -1;
1225                }
1226        }
1227
1228        /* Once firmware is aware of the new value, queues can be recreated */
1229        if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
1230                dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
1231                return -1;
1232        }
1233
1234        if (queue_count_update) {
1235                if (lio_setup_glists(oct, lio, num_qs)) {
1236                        dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
1237                        return -1;
1238                }
1239
1240                if (setup_rx_oom_poll_fn(netdev)) {
1241                        dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n");
1242                        return 1;
1243                }
1244
1245                /* Send firmware the information about new number of queues
1246                 * if the interface is a VF or a PF that is SRIOV enabled.
1247                 */
1248                if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
1249                        if (lio_send_queue_count_update(netdev, num_qs))
1250                                return -1;
1251        }
1252
1253        return 0;
1254}
1255
1256static int lio_ethtool_set_ringparam(struct net_device *netdev,
1257                                     struct ethtool_ringparam *ering)
1258{
1259        u32 rx_count, tx_count, rx_count_old, tx_count_old;
1260        struct lio *lio = GET_LIO(netdev);
1261        struct octeon_device *oct = lio->oct_dev;
1262        int stopped = 0;
1263
1264        if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
1265                return -EINVAL;
1266
1267        if (ering->rx_mini_pending || ering->rx_jumbo_pending)
1268                return -EINVAL;
1269
1270        rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
1271                           CN23XX_MAX_OQ_DESCRIPTORS);
1272        tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
1273                           CN23XX_MAX_IQ_DESCRIPTORS);
1274
1275        rx_count_old = oct->droq[0]->max_count;
1276        tx_count_old = oct->instr_queue[0]->max_count;
1277
1278        if (rx_count == rx_count_old && tx_count == tx_count_old)
1279                return 0;
1280
1281        ifstate_set(lio, LIO_IFSTATE_RESETTING);
1282
1283        if (netif_running(netdev)) {
1284                netdev->netdev_ops->ndo_stop(netdev);
1285                stopped = 1;
1286        }
1287
1288        /* Change RX/TX DESCS  count */
1289        if (tx_count != tx_count_old)
1290                CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1291                                            tx_count);
1292        if (rx_count != rx_count_old)
1293                CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1294                                            rx_count);
1295
1296        if (lio_reset_queues(netdev, oct->num_iqs))
1297                goto err_lio_reset_queues;
1298
1299        if (stopped)
1300                netdev->netdev_ops->ndo_open(netdev);
1301
1302        ifstate_reset(lio, LIO_IFSTATE_RESETTING);
1303
1304        return 0;
1305
1306err_lio_reset_queues:
1307        if (tx_count != tx_count_old)
1308                CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1309                                            tx_count_old);
1310        if (rx_count != rx_count_old)
1311                CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1312                                            rx_count_old);
1313        return -EINVAL;
1314}
1315
1316static u32 lio_get_msglevel(struct net_device *netdev)
1317{
1318        struct lio *lio = GET_LIO(netdev);
1319
1320        return lio->msg_enable;
1321}
1322
1323static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
1324{
1325        struct lio *lio = GET_LIO(netdev);
1326
1327        if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
1328                if (msglvl & NETIF_MSG_HW)
1329                        liquidio_set_feature(netdev,
1330                                             OCTNET_CMD_VERBOSE_ENABLE, 0);
1331                else
1332                        liquidio_set_feature(netdev,
1333                                             OCTNET_CMD_VERBOSE_DISABLE, 0);
1334        }
1335
1336        lio->msg_enable = msglvl;
1337}
1338
1339static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
1340{
1341        struct lio *lio = GET_LIO(netdev);
1342
1343        lio->msg_enable = msglvl;
1344}
1345
1346static void
1347lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1348{
1349        /* Notes: Not supporting any auto negotiation in these
1350         * drivers. Just report pause frame support.
1351         */
1352        struct lio *lio = GET_LIO(netdev);
1353        struct octeon_device *oct = lio->oct_dev;
1354
1355        pause->autoneg = 0;
1356
1357        pause->tx_pause = oct->tx_pause;
1358        pause->rx_pause = oct->rx_pause;
1359}
1360
1361static int
1362lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1363{
1364        /* Notes: Not supporting any auto negotiation in these
1365         * drivers.
1366         */
1367        struct lio *lio = GET_LIO(netdev);
1368        struct octeon_device *oct = lio->oct_dev;
1369        struct octnic_ctrl_pkt nctrl;
1370        struct oct_link_info *linfo = &lio->linfo;
1371
1372        int ret = 0;
1373
1374        if (oct->chip_id != OCTEON_CN23XX_PF_VID)
1375                return -EINVAL;
1376
1377        if (linfo->link.s.duplex == 0) {
1378                /*no flow control for half duplex*/
1379                if (pause->rx_pause || pause->tx_pause)
1380                        return -EINVAL;
1381        }
1382
1383        /*do not support autoneg of link flow control*/
1384        if (pause->autoneg == AUTONEG_ENABLE)
1385                return -EINVAL;
1386
1387        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1388
1389        nctrl.ncmd.u64 = 0;
1390        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
1391        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1392        nctrl.netpndev = (u64)netdev;
1393        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1394
1395        if (pause->rx_pause) {
1396                /*enable rx pause*/
1397                nctrl.ncmd.s.param1 = 1;
1398        } else {
1399                /*disable rx pause*/
1400                nctrl.ncmd.s.param1 = 0;
1401        }
1402
1403        if (pause->tx_pause) {
1404                /*enable tx pause*/
1405                nctrl.ncmd.s.param2 = 1;
1406        } else {
1407                /*disable tx pause*/
1408                nctrl.ncmd.s.param2 = 0;
1409        }
1410
1411        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1412        if (ret) {
1413                dev_err(&oct->pci_dev->dev,
1414                        "Failed to set pause parameter, ret=%d\n", ret);
1415                return -EINVAL;
1416        }
1417
1418        oct->rx_pause = pause->rx_pause;
1419        oct->tx_pause = pause->tx_pause;
1420
1421        return 0;
1422}
1423
1424static void
1425lio_get_ethtool_stats(struct net_device *netdev,
1426                      struct ethtool_stats *stats  __attribute__((unused)),
1427                      u64 *data)
1428{
1429        struct lio *lio = GET_LIO(netdev);
1430        struct octeon_device *oct_dev = lio->oct_dev;
1431        struct rtnl_link_stats64 lstats;
1432        int i = 0, j;
1433
1434        if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1435                return;
1436
1437        netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1438        /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1439        data[i++] = lstats.rx_packets;
1440        /*sum of oct->instr_queue[iq_no]->stats.tx_done */
1441        data[i++] = lstats.tx_packets;
1442        /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1443        data[i++] = lstats.rx_bytes;
1444        /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1445        data[i++] = lstats.tx_bytes;
1446        data[i++] = lstats.rx_errors +
1447                        oct_dev->link_stats.fromwire.fcs_err +
1448                        oct_dev->link_stats.fromwire.jabber_err +
1449                        oct_dev->link_stats.fromwire.l2_err +
1450                        oct_dev->link_stats.fromwire.frame_err;
1451        data[i++] = lstats.tx_errors;
1452        /*sum of oct->droq[oq_no]->stats->rx_dropped +
1453         *oct->droq[oq_no]->stats->dropped_nodispatch +
1454         *oct->droq[oq_no]->stats->dropped_toomany +
1455         *oct->droq[oq_no]->stats->dropped_nomem
1456         */
1457        data[i++] = lstats.rx_dropped +
1458                        oct_dev->link_stats.fromwire.fifo_err +
1459                        oct_dev->link_stats.fromwire.dmac_drop +
1460                        oct_dev->link_stats.fromwire.red_drops +
1461                        oct_dev->link_stats.fromwire.fw_err_pko +
1462                        oct_dev->link_stats.fromwire.fw_err_link +
1463                        oct_dev->link_stats.fromwire.fw_err_drop;
1464        /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1465        data[i++] = lstats.tx_dropped +
1466                        oct_dev->link_stats.fromhost.max_collision_fail +
1467                        oct_dev->link_stats.fromhost.max_deferral_fail +
1468                        oct_dev->link_stats.fromhost.total_collisions +
1469                        oct_dev->link_stats.fromhost.fw_err_pko +
1470                        oct_dev->link_stats.fromhost.fw_err_link +
1471                        oct_dev->link_stats.fromhost.fw_err_drop +
1472                        oct_dev->link_stats.fromhost.fw_err_pki;
1473
1474        /* firmware tx stats */
1475        /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
1476         *fromhost.fw_total_sent
1477         */
1478        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
1479        /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
1480        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
1481        /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
1482        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
1483        /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
1484        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
1485        /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
1486        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
1487        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1488         *fw_err_drop
1489         */
1490        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
1491
1492        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
1493        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
1494        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1495         *fw_tso_fwd
1496         */
1497        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
1498        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1499         *fw_err_tso
1500         */
1501        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
1502        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1503         *fw_tx_vxlan
1504         */
1505        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
1506
1507        /* Multicast packets sent by this port */
1508        data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1509        data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1510
1511        /* mac tx statistics */
1512        /*CVMX_BGXX_CMRX_TX_STAT5 */
1513        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
1514        /*CVMX_BGXX_CMRX_TX_STAT4 */
1515        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
1516        /*CVMX_BGXX_CMRX_TX_STAT15 */
1517        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
1518        /*CVMX_BGXX_CMRX_TX_STAT14 */
1519        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
1520        /*CVMX_BGXX_CMRX_TX_STAT17 */
1521        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
1522        /*CVMX_BGXX_CMRX_TX_STAT0 */
1523        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
1524        /*CVMX_BGXX_CMRX_TX_STAT3 */
1525        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
1526        /*CVMX_BGXX_CMRX_TX_STAT2 */
1527        data[i++] =
1528                CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
1529        /*CVMX_BGXX_CMRX_TX_STAT0 */
1530        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
1531        /*CVMX_BGXX_CMRX_TX_STAT1 */
1532        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
1533        /*CVMX_BGXX_CMRX_TX_STAT16 */
1534        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
1535        /*CVMX_BGXX_CMRX_TX_STAT6 */
1536        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
1537
1538        /* RX firmware stats */
1539        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1540         *fw_total_rcvd
1541         */
1542        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
1543        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1544         *fw_total_fwd
1545         */
1546        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
1547        /* Multicast packets received on this port */
1548        data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1549        data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1550        /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1551        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
1552        /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
1553        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
1554        /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
1555        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
1556        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1557         *fw_err_pko
1558         */
1559        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
1560        /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
1561        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
1562        /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1563         *fromwire.fw_err_drop
1564         */
1565        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
1566
1567        /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1568         *fromwire.fw_rx_vxlan
1569         */
1570        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
1571        /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1572         *fromwire.fw_rx_vxlan_err
1573         */
1574        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
1575
1576        /* LRO */
1577        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1578         *fw_lro_pkts
1579         */
1580        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
1581        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1582         *fw_lro_octs
1583         */
1584        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
1585        /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
1586        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
1587        /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1588        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
1589        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1590         *fw_lro_aborts_port
1591         */
1592        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
1593        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1594         *fw_lro_aborts_seq
1595         */
1596        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
1597        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1598         *fw_lro_aborts_tsval
1599         */
1600        data[i++] =
1601                CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
1602        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1603         *fw_lro_aborts_timer
1604         */
1605        /* intrmod: packet forward rate */
1606        data[i++] =
1607                CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
1608        /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1609        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
1610
1611        /* mac: link-level stats */
1612        /*CVMX_BGXX_CMRX_RX_STAT0 */
1613        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
1614        /*CVMX_BGXX_CMRX_RX_STAT1 */
1615        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
1616        /*CVMX_PKI_STATX_STAT5 */
1617        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
1618        /*CVMX_PKI_STATX_STAT5 */
1619        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
1620        /*wqe->word2.err_code or wqe->word2.err_level */
1621        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
1622        /*CVMX_BGXX_CMRX_RX_STAT2 */
1623        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
1624        /*CVMX_BGXX_CMRX_RX_STAT6 */
1625        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
1626        /*CVMX_BGXX_CMRX_RX_STAT4 */
1627        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
1628        /*wqe->word2.err_code or wqe->word2.err_level */
1629        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
1630        /*lio->link_changes*/
1631        data[i++] = CVM_CAST64(lio->link_changes);
1632
1633        for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
1634                if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
1635                        continue;
1636                /*packets to network port*/
1637                /*# of packets tx to network */
1638                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1639                /*# of bytes tx to network */
1640                data[i++] =
1641                        CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1642                /*# of packets dropped */
1643                data[i++] =
1644                        CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
1645                /*# of tx fails due to queue full */
1646                data[i++] =
1647                        CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
1648                /*XXX gather entries sent */
1649                data[i++] =
1650                        CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
1651
1652                /*instruction to firmware: data and control */
1653                /*# of instructions to the queue */
1654                data[i++] =
1655                        CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
1656                /*# of instructions processed */
1657                data[i++] = CVM_CAST64(
1658                                oct_dev->instr_queue[j]->stats.instr_processed);
1659                /*# of instructions could not be processed */
1660                data[i++] = CVM_CAST64(
1661                                oct_dev->instr_queue[j]->stats.instr_dropped);
1662                /*bytes sent through the queue */
1663                data[i++] =
1664                        CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
1665
1666                /*tso request*/
1667                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1668                /*vxlan request*/
1669                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1670                /*txq restart*/
1671                data[i++] =
1672                        CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
1673        }
1674
1675        /* RX */
1676        for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
1677                if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
1678                        continue;
1679
1680                /*packets send to TCP/IP network stack */
1681                /*# of packets to network stack */
1682                data[i++] =
1683                        CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
1684                /*# of bytes to network stack */
1685                data[i++] =
1686                        CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
1687                /*# of packets dropped */
1688                data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1689                                       oct_dev->droq[j]->stats.dropped_toomany +
1690                                       oct_dev->droq[j]->stats.rx_dropped);
1691                data[i++] =
1692                        CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1693                data[i++] =
1694                        CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1695                data[i++] =
1696                        CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1697
1698                /*control and data path*/
1699                data[i++] =
1700                        CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1701                data[i++] =
1702                        CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1703                data[i++] =
1704                        CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1705
1706                data[i++] =
1707                        CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1708                data[i++] =
1709                        CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1710        }
1711}
1712
1713static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1714                                     struct ethtool_stats *stats
1715                                     __attribute__((unused)),
1716                                     u64 *data)
1717{
1718        struct rtnl_link_stats64 lstats;
1719        struct lio *lio = GET_LIO(netdev);
1720        struct octeon_device *oct_dev = lio->oct_dev;
1721        int i = 0, j, vj;
1722
1723        if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1724                return;
1725
1726        netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1727        /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1728        data[i++] = lstats.rx_packets;
1729        /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1730        data[i++] = lstats.tx_packets;
1731        /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1732        data[i++] = lstats.rx_bytes;
1733        /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1734        data[i++] = lstats.tx_bytes;
1735        data[i++] = lstats.rx_errors;
1736        data[i++] = lstats.tx_errors;
1737         /* sum of oct->droq[oq_no]->stats->rx_dropped +
1738          * oct->droq[oq_no]->stats->dropped_nodispatch +
1739          * oct->droq[oq_no]->stats->dropped_toomany +
1740          * oct->droq[oq_no]->stats->dropped_nomem
1741          */
1742        data[i++] = lstats.rx_dropped;
1743        /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1744        data[i++] = lstats.tx_dropped +
1745                oct_dev->link_stats.fromhost.fw_err_drop;
1746
1747        data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1748        data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1749        data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1750        data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1751
1752        /* lio->link_changes */
1753        data[i++] = CVM_CAST64(lio->link_changes);
1754
1755        for (vj = 0; vj < oct_dev->num_iqs; vj++) {
1756                j = lio->linfo.txpciq[vj].s.q_no;
1757
1758                /* packets to network port */
1759                /* # of packets tx to network */
1760                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1761                 /* # of bytes tx to network */
1762                data[i++] = CVM_CAST64(
1763                                oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1764                /* # of packets dropped */
1765                data[i++] = CVM_CAST64(
1766                                oct_dev->instr_queue[j]->stats.tx_dropped);
1767                /* # of tx fails due to queue full */
1768                data[i++] = CVM_CAST64(
1769                                oct_dev->instr_queue[j]->stats.tx_iq_busy);
1770                /* XXX gather entries sent */
1771                data[i++] = CVM_CAST64(
1772                                oct_dev->instr_queue[j]->stats.sgentry_sent);
1773
1774                /* instruction to firmware: data and control */
1775                /* # of instructions to the queue */
1776                data[i++] = CVM_CAST64(
1777                                oct_dev->instr_queue[j]->stats.instr_posted);
1778                /* # of instructions processed */
1779                data[i++] =
1780                    CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1781                /* # of instructions could not be processed */
1782                data[i++] =
1783                    CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1784                /* bytes sent through the queue */
1785                data[i++] = CVM_CAST64(
1786                                oct_dev->instr_queue[j]->stats.bytes_sent);
1787                /* tso request */
1788                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1789                /* vxlan request */
1790                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1791                /* txq restart */
1792                data[i++] = CVM_CAST64(
1793                                oct_dev->instr_queue[j]->stats.tx_restart);
1794        }
1795
1796        /* RX */
1797        for (vj = 0; vj < oct_dev->num_oqs; vj++) {
1798                j = lio->linfo.rxpciq[vj].s.q_no;
1799
1800                /* packets send to TCP/IP network stack */
1801                /* # of packets to network stack */
1802                data[i++] = CVM_CAST64(
1803                                oct_dev->droq[j]->stats.rx_pkts_received);
1804                /* # of bytes to network stack */
1805                data[i++] = CVM_CAST64(
1806                                oct_dev->droq[j]->stats.rx_bytes_received);
1807                data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1808                                       oct_dev->droq[j]->stats.dropped_toomany +
1809                                       oct_dev->droq[j]->stats.rx_dropped);
1810                data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1811                data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1812                data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1813
1814                /* control and data path */
1815                data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1816                data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1817                data[i++] =
1818                        CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1819
1820                data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1821                data[i++] =
1822                    CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1823        }
1824}
1825
1826static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1827{
1828        struct octeon_device *oct_dev = lio->oct_dev;
1829        int i;
1830
1831        switch (oct_dev->chip_id) {
1832        case OCTEON_CN23XX_PF_VID:
1833        case OCTEON_CN23XX_VF_VID:
1834                for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1835                        sprintf(data, "%s", oct_priv_flags_strings[i]);
1836                        data += ETH_GSTRING_LEN;
1837                }
1838                break;
1839        case OCTEON_CN68XX:
1840        case OCTEON_CN66XX:
1841                break;
1842        default:
1843                netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1844                break;
1845        }
1846}
1847
1848static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1849{
1850        struct lio *lio = GET_LIO(netdev);
1851        struct octeon_device *oct_dev = lio->oct_dev;
1852        int num_iq_stats, num_oq_stats, i, j;
1853        int num_stats;
1854
1855        switch (stringset) {
1856        case ETH_SS_STATS:
1857                num_stats = ARRAY_SIZE(oct_stats_strings);
1858                for (j = 0; j < num_stats; j++) {
1859                        sprintf(data, "%s", oct_stats_strings[j]);
1860                        data += ETH_GSTRING_LEN;
1861                }
1862
1863                num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1864                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1865                        if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1866                                continue;
1867                        for (j = 0; j < num_iq_stats; j++) {
1868                                sprintf(data, "tx-%d-%s", i,
1869                                        oct_iq_stats_strings[j]);
1870                                data += ETH_GSTRING_LEN;
1871                        }
1872                }
1873
1874                num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1875                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1876                        if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1877                                continue;
1878                        for (j = 0; j < num_oq_stats; j++) {
1879                                sprintf(data, "rx-%d-%s", i,
1880                                        oct_droq_stats_strings[j]);
1881                                data += ETH_GSTRING_LEN;
1882                        }
1883                }
1884                break;
1885
1886        case ETH_SS_PRIV_FLAGS:
1887                lio_get_priv_flags_strings(lio, data);
1888                break;
1889        default:
1890                netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1891                break;
1892        }
1893}
1894
1895static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1896                               u8 *data)
1897{
1898        int num_iq_stats, num_oq_stats, i, j;
1899        struct lio *lio = GET_LIO(netdev);
1900        struct octeon_device *oct_dev = lio->oct_dev;
1901        int num_stats;
1902
1903        switch (stringset) {
1904        case ETH_SS_STATS:
1905                num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1906                for (j = 0; j < num_stats; j++) {
1907                        sprintf(data, "%s", oct_vf_stats_strings[j]);
1908                        data += ETH_GSTRING_LEN;
1909                }
1910
1911                num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1912                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1913                        if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1914                                continue;
1915                        for (j = 0; j < num_iq_stats; j++) {
1916                                sprintf(data, "tx-%d-%s", i,
1917                                        oct_iq_stats_strings[j]);
1918                                data += ETH_GSTRING_LEN;
1919                        }
1920                }
1921
1922                num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1923                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1924                        if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1925                                continue;
1926                        for (j = 0; j < num_oq_stats; j++) {
1927                                sprintf(data, "rx-%d-%s", i,
1928                                        oct_droq_stats_strings[j]);
1929                                data += ETH_GSTRING_LEN;
1930                        }
1931                }
1932                break;
1933
1934        case ETH_SS_PRIV_FLAGS:
1935                lio_get_priv_flags_strings(lio, data);
1936                break;
1937        default:
1938                netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1939                break;
1940        }
1941}
1942
1943static int lio_get_priv_flags_ss_count(struct lio *lio)
1944{
1945        struct octeon_device *oct_dev = lio->oct_dev;
1946
1947        switch (oct_dev->chip_id) {
1948        case OCTEON_CN23XX_PF_VID:
1949        case OCTEON_CN23XX_VF_VID:
1950                return ARRAY_SIZE(oct_priv_flags_strings);
1951        case OCTEON_CN68XX:
1952        case OCTEON_CN66XX:
1953                return -EOPNOTSUPP;
1954        default:
1955                netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1956                return -EOPNOTSUPP;
1957        }
1958}
1959
1960static int lio_get_sset_count(struct net_device *netdev, int sset)
1961{
1962        struct lio *lio = GET_LIO(netdev);
1963        struct octeon_device *oct_dev = lio->oct_dev;
1964
1965        switch (sset) {
1966        case ETH_SS_STATS:
1967                return (ARRAY_SIZE(oct_stats_strings) +
1968                        ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1969                        ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1970        case ETH_SS_PRIV_FLAGS:
1971                return lio_get_priv_flags_ss_count(lio);
1972        default:
1973                return -EOPNOTSUPP;
1974        }
1975}
1976
1977static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1978{
1979        struct lio *lio = GET_LIO(netdev);
1980        struct octeon_device *oct_dev = lio->oct_dev;
1981
1982        switch (sset) {
1983        case ETH_SS_STATS:
1984                return (ARRAY_SIZE(oct_vf_stats_strings) +
1985                        ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1986                        ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1987        case ETH_SS_PRIV_FLAGS:
1988                return lio_get_priv_flags_ss_count(lio);
1989        default:
1990                return -EOPNOTSUPP;
1991        }
1992}
1993
1994/*  get interrupt moderation parameters */
1995static int octnet_get_intrmod_cfg(struct lio *lio,
1996                                  struct oct_intrmod_cfg *intr_cfg)
1997{
1998        struct octeon_soft_command *sc;
1999        struct oct_intrmod_resp *resp;
2000        int retval;
2001        struct octeon_device *oct_dev = lio->oct_dev;
2002
2003        /* Alloc soft command */
2004        sc = (struct octeon_soft_command *)
2005                octeon_alloc_soft_command(oct_dev,
2006                                          0,
2007                                          sizeof(struct oct_intrmod_resp), 0);
2008
2009        if (!sc)
2010                return -ENOMEM;
2011
2012        resp = (struct oct_intrmod_resp *)sc->virtrptr;
2013        memset(resp, 0, sizeof(struct oct_intrmod_resp));
2014
2015        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2016
2017        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
2018                                    OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
2019
2020        init_completion(&sc->complete);
2021        sc->sc_status = OCTEON_REQUEST_PENDING;
2022
2023        retval = octeon_send_soft_command(oct_dev, sc);
2024        if (retval == IQ_SEND_FAILED) {
2025                octeon_free_soft_command(oct_dev, sc);
2026                return -EINVAL;
2027        }
2028
2029        /* Sleep on a wait queue till the cond flag indicates that the
2030         * response arrived or timed-out.
2031         */
2032        retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2033        if (retval)
2034                return -ENODEV;
2035
2036        if (resp->status) {
2037                dev_err(&oct_dev->pci_dev->dev,
2038                        "Get interrupt moderation parameters failed\n");
2039                WRITE_ONCE(sc->caller_is_done, true);
2040                return -ENODEV;
2041        }
2042
2043        octeon_swap_8B_data((u64 *)&resp->intrmod,
2044                            (sizeof(struct oct_intrmod_cfg)) / 8);
2045        memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
2046        WRITE_ONCE(sc->caller_is_done, true);
2047
2048        return 0;
2049}
2050
2051/*  Configure interrupt moderation parameters */
2052static int octnet_set_intrmod_cfg(struct lio *lio,
2053                                  struct oct_intrmod_cfg *intr_cfg)
2054{
2055        struct octeon_soft_command *sc;
2056        struct oct_intrmod_cfg *cfg;
2057        int retval;
2058        struct octeon_device *oct_dev = lio->oct_dev;
2059
2060        /* Alloc soft command */
2061        sc = (struct octeon_soft_command *)
2062                octeon_alloc_soft_command(oct_dev,
2063                                          sizeof(struct oct_intrmod_cfg),
2064                                          16, 0);
2065
2066        if (!sc)
2067                return -ENOMEM;
2068
2069        cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
2070
2071        memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
2072        octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
2073
2074        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2075
2076        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
2077                                    OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
2078
2079        init_completion(&sc->complete);
2080        sc->sc_status = OCTEON_REQUEST_PENDING;
2081
2082        retval = octeon_send_soft_command(oct_dev, sc);
2083        if (retval == IQ_SEND_FAILED) {
2084                octeon_free_soft_command(oct_dev, sc);
2085                return -EINVAL;
2086        }
2087
2088        /* Sleep on a wait queue till the cond flag indicates that the
2089         * response arrived or timed-out.
2090         */
2091        retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2092        if (retval)
2093                return retval;
2094
2095        retval = sc->sc_status;
2096        if (retval == 0) {
2097                dev_info(&oct_dev->pci_dev->dev,
2098                         "Rx-Adaptive Interrupt moderation %s\n",
2099                         (intr_cfg->rx_enable) ?
2100                         "enabled" : "disabled");
2101                WRITE_ONCE(sc->caller_is_done, true);
2102                return 0;
2103        }
2104
2105        dev_err(&oct_dev->pci_dev->dev,
2106                "intrmod config failed. Status: %x\n", retval);
2107        WRITE_ONCE(sc->caller_is_done, true);
2108        return -ENODEV;
2109}
2110
2111static int lio_get_intr_coalesce(struct net_device *netdev,
2112                                 struct ethtool_coalesce *intr_coal)
2113{
2114        struct lio *lio = GET_LIO(netdev);
2115        struct octeon_device *oct = lio->oct_dev;
2116        struct octeon_instr_queue *iq;
2117        struct oct_intrmod_cfg intrmod_cfg;
2118
2119        if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
2120                return -ENODEV;
2121
2122        switch (oct->chip_id) {
2123        case OCTEON_CN23XX_PF_VID:
2124        case OCTEON_CN23XX_VF_VID: {
2125                if (!intrmod_cfg.rx_enable) {
2126                        intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
2127                        intr_coal->rx_max_coalesced_frames =
2128                                oct->rx_max_coalesced_frames;
2129                }
2130                if (!intrmod_cfg.tx_enable)
2131                        intr_coal->tx_max_coalesced_frames =
2132                                oct->tx_max_coalesced_frames;
2133                break;
2134        }
2135        case OCTEON_CN68XX:
2136        case OCTEON_CN66XX: {
2137                struct octeon_cn6xxx *cn6xxx =
2138                        (struct octeon_cn6xxx *)oct->chip;
2139
2140                if (!intrmod_cfg.rx_enable) {
2141                        intr_coal->rx_coalesce_usecs =
2142                                CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
2143                        intr_coal->rx_max_coalesced_frames =
2144                                CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
2145                }
2146                iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
2147                intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
2148                break;
2149        }
2150        default:
2151                netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
2152                return -EINVAL;
2153        }
2154        if (intrmod_cfg.rx_enable) {
2155                intr_coal->use_adaptive_rx_coalesce =
2156                        intrmod_cfg.rx_enable;
2157                intr_coal->rate_sample_interval =
2158                        intrmod_cfg.check_intrvl;
2159                intr_coal->pkt_rate_high =
2160                        intrmod_cfg.maxpkt_ratethr;
2161                intr_coal->pkt_rate_low =
2162                        intrmod_cfg.minpkt_ratethr;
2163                intr_coal->rx_max_coalesced_frames_high =
2164                        intrmod_cfg.rx_maxcnt_trigger;
2165                intr_coal->rx_coalesce_usecs_high =
2166                        intrmod_cfg.rx_maxtmr_trigger;
2167                intr_coal->rx_coalesce_usecs_low =
2168                        intrmod_cfg.rx_mintmr_trigger;
2169                intr_coal->rx_max_coalesced_frames_low =
2170                        intrmod_cfg.rx_mincnt_trigger;
2171        }
2172        if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
2173            (intrmod_cfg.tx_enable)) {
2174                intr_coal->use_adaptive_tx_coalesce =
2175                        intrmod_cfg.tx_enable;
2176                intr_coal->tx_max_coalesced_frames_high =
2177                        intrmod_cfg.tx_maxcnt_trigger;
2178                intr_coal->tx_max_coalesced_frames_low =
2179                        intrmod_cfg.tx_mincnt_trigger;
2180        }
2181        return 0;
2182}
2183
2184/* Enable/Disable auto interrupt Moderation */
2185static int oct_cfg_adaptive_intr(struct lio *lio,
2186                                 struct oct_intrmod_cfg *intrmod_cfg,
2187                                 struct ethtool_coalesce *intr_coal)
2188{
2189        int ret = 0;
2190
2191        if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
2192                intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
2193                intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
2194                intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
2195        }
2196        if (intrmod_cfg->rx_enable) {
2197                intrmod_cfg->rx_maxcnt_trigger =
2198                        intr_coal->rx_max_coalesced_frames_high;
2199                intrmod_cfg->rx_maxtmr_trigger =
2200                        intr_coal->rx_coalesce_usecs_high;
2201                intrmod_cfg->rx_mintmr_trigger =
2202                        intr_coal->rx_coalesce_usecs_low;
2203                intrmod_cfg->rx_mincnt_trigger =
2204                        intr_coal->rx_max_coalesced_frames_low;
2205        }
2206        if (intrmod_cfg->tx_enable) {
2207                intrmod_cfg->tx_maxcnt_trigger =
2208                        intr_coal->tx_max_coalesced_frames_high;
2209                intrmod_cfg->tx_mincnt_trigger =
2210                        intr_coal->tx_max_coalesced_frames_low;
2211        }
2212
2213        ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
2214
2215        return ret;
2216}
2217
2218static int
2219oct_cfg_rx_intrcnt(struct lio *lio,
2220                   struct oct_intrmod_cfg *intrmod,
2221                   struct ethtool_coalesce *intr_coal)
2222{
2223        struct octeon_device *oct = lio->oct_dev;
2224        u32 rx_max_coalesced_frames;
2225
2226        /* Config Cnt based interrupt values */
2227        switch (oct->chip_id) {
2228        case OCTEON_CN68XX:
2229        case OCTEON_CN66XX: {
2230                struct octeon_cn6xxx *cn6xxx =
2231                        (struct octeon_cn6xxx *)oct->chip;
2232
2233                if (!intr_coal->rx_max_coalesced_frames)
2234                        rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
2235                else
2236                        rx_max_coalesced_frames =
2237                                intr_coal->rx_max_coalesced_frames;
2238                octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
2239                                 rx_max_coalesced_frames);
2240                CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
2241                break;
2242        }
2243        case OCTEON_CN23XX_PF_VID: {
2244                int q_no;
2245
2246                if (!intr_coal->rx_max_coalesced_frames)
2247                        rx_max_coalesced_frames = intrmod->rx_frames;
2248                else
2249                        rx_max_coalesced_frames =
2250                            intr_coal->rx_max_coalesced_frames;
2251                for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2252                        q_no += oct->sriov_info.pf_srn;
2253                        octeon_write_csr64(
2254                            oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2255                            (octeon_read_csr64(
2256                                 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2257                             (0x3fffff00000000UL)) |
2258                                (rx_max_coalesced_frames - 1));
2259                        /*consider setting resend bit*/
2260                }
2261                intrmod->rx_frames = rx_max_coalesced_frames;
2262                oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2263                break;
2264        }
2265        case OCTEON_CN23XX_VF_VID: {
2266                int q_no;
2267
2268                if (!intr_coal->rx_max_coalesced_frames)
2269                        rx_max_coalesced_frames = intrmod->rx_frames;
2270                else
2271                        rx_max_coalesced_frames =
2272                            intr_coal->rx_max_coalesced_frames;
2273                for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2274                        octeon_write_csr64(
2275                            oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2276                            (octeon_read_csr64(
2277                                 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2278                             (0x3fffff00000000UL)) |
2279                                (rx_max_coalesced_frames - 1));
2280                        /*consider writing to resend bit here*/
2281                }
2282                intrmod->rx_frames = rx_max_coalesced_frames;
2283                oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2284                break;
2285        }
2286        default:
2287                return -EINVAL;
2288        }
2289        return 0;
2290}
2291
2292static int oct_cfg_rx_intrtime(struct lio *lio,
2293                               struct oct_intrmod_cfg *intrmod,
2294                               struct ethtool_coalesce *intr_coal)
2295{
2296        struct octeon_device *oct = lio->oct_dev;
2297        u32 time_threshold, rx_coalesce_usecs;
2298
2299        /* Config Time based interrupt values */
2300        switch (oct->chip_id) {
2301        case OCTEON_CN68XX:
2302        case OCTEON_CN66XX: {
2303                struct octeon_cn6xxx *cn6xxx =
2304                        (struct octeon_cn6xxx *)oct->chip;
2305                if (!intr_coal->rx_coalesce_usecs)
2306                        rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
2307                else
2308                        rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2309
2310                time_threshold = lio_cn6xxx_get_oq_ticks(oct,
2311                                                         rx_coalesce_usecs);
2312                octeon_write_csr(oct,
2313                                 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
2314                                 time_threshold);
2315
2316                CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
2317                break;
2318        }
2319        case OCTEON_CN23XX_PF_VID: {
2320                u64 time_threshold;
2321                int q_no;
2322
2323                if (!intr_coal->rx_coalesce_usecs)
2324                        rx_coalesce_usecs = intrmod->rx_usecs;
2325                else
2326                        rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2327                time_threshold =
2328                    cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2329                for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2330                        q_no += oct->sriov_info.pf_srn;
2331                        octeon_write_csr64(oct,
2332                                           CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2333                                           (intrmod->rx_frames |
2334                                            ((u64)time_threshold << 32)));
2335                        /*consider writing to resend bit here*/
2336                }
2337                intrmod->rx_usecs = rx_coalesce_usecs;
2338                oct->rx_coalesce_usecs = rx_coalesce_usecs;
2339                break;
2340        }
2341        case OCTEON_CN23XX_VF_VID: {
2342                u64 time_threshold;
2343                int q_no;
2344
2345                if (!intr_coal->rx_coalesce_usecs)
2346                        rx_coalesce_usecs = intrmod->rx_usecs;
2347                else
2348                        rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2349
2350                time_threshold =
2351                    cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2352                for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2353                        octeon_write_csr64(
2354                                oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2355                                (intrmod->rx_frames |
2356                                 ((u64)time_threshold << 32)));
2357                        /*consider setting resend bit*/
2358                }
2359                intrmod->rx_usecs = rx_coalesce_usecs;
2360                oct->rx_coalesce_usecs = rx_coalesce_usecs;
2361                break;
2362        }
2363        default:
2364                return -EINVAL;
2365        }
2366
2367        return 0;
2368}
2369
2370static int
2371oct_cfg_tx_intrcnt(struct lio *lio,
2372                   struct oct_intrmod_cfg *intrmod,
2373                   struct ethtool_coalesce *intr_coal)
2374{
2375        struct octeon_device *oct = lio->oct_dev;
2376        u32 iq_intr_pkt;
2377        void __iomem *inst_cnt_reg;
2378        u64 val;
2379
2380        /* Config Cnt based interrupt values */
2381        switch (oct->chip_id) {
2382        case OCTEON_CN68XX:
2383        case OCTEON_CN66XX:
2384                break;
2385        case OCTEON_CN23XX_VF_VID:
2386        case OCTEON_CN23XX_PF_VID: {
2387                int q_no;
2388
2389                if (!intr_coal->tx_max_coalesced_frames)
2390                        iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
2391                                      CN23XX_PKT_IN_DONE_WMARK_MASK;
2392                else
2393                        iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
2394                                      CN23XX_PKT_IN_DONE_WMARK_MASK;
2395                for (q_no = 0; q_no < oct->num_iqs; q_no++) {
2396                        inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
2397                        val = readq(inst_cnt_reg);
2398                        /*clear wmark and count.dont want to write count back*/
2399                        val = (val & 0xFFFF000000000000ULL) |
2400                              ((u64)(iq_intr_pkt - 1)
2401                               << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
2402                        writeq(val, inst_cnt_reg);
2403                        /*consider setting resend bit*/
2404                }
2405                intrmod->tx_frames = iq_intr_pkt;
2406                oct->tx_max_coalesced_frames = iq_intr_pkt;
2407                break;
2408        }
2409        default:
2410                return -EINVAL;
2411        }
2412        return 0;
2413}
2414
2415static int lio_set_intr_coalesce(struct net_device *netdev,
2416                                 struct ethtool_coalesce *intr_coal)
2417{
2418        struct lio *lio = GET_LIO(netdev);
2419        int ret;
2420        struct octeon_device *oct = lio->oct_dev;
2421        struct oct_intrmod_cfg intrmod = {0};
2422        u32 j, q_no;
2423        int db_max, db_min;
2424
2425        switch (oct->chip_id) {
2426        case OCTEON_CN68XX:
2427        case OCTEON_CN66XX:
2428                db_min = CN6XXX_DB_MIN;
2429                db_max = CN6XXX_DB_MAX;
2430                if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
2431                    (intr_coal->tx_max_coalesced_frames <= db_max)) {
2432                        for (j = 0; j < lio->linfo.num_txpciq; j++) {
2433                                q_no = lio->linfo.txpciq[j].s.q_no;
2434                                oct->instr_queue[q_no]->fill_threshold =
2435                                        intr_coal->tx_max_coalesced_frames;
2436                        }
2437                } else {
2438                        dev_err(&oct->pci_dev->dev,
2439                                "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
2440                                intr_coal->tx_max_coalesced_frames,
2441                                db_min, db_max);
2442                        return -EINVAL;
2443                }
2444                break;
2445        case OCTEON_CN23XX_PF_VID:
2446        case OCTEON_CN23XX_VF_VID:
2447                break;
2448        default:
2449                return -EINVAL;
2450        }
2451
2452        intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
2453        intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
2454        intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2455        intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2456        intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2457
2458        ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
2459
2460        if (!intr_coal->use_adaptive_rx_coalesce) {
2461                ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
2462                if (ret)
2463                        goto ret_intrmod;
2464
2465                ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
2466                if (ret)
2467                        goto ret_intrmod;
2468        } else {
2469                oct->rx_coalesce_usecs =
2470                        CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2471                oct->rx_max_coalesced_frames =
2472                        CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2473        }
2474
2475        if (!intr_coal->use_adaptive_tx_coalesce) {
2476                ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
2477                if (ret)
2478                        goto ret_intrmod;
2479        } else {
2480                oct->tx_max_coalesced_frames =
2481                        CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2482        }
2483
2484        return 0;
2485ret_intrmod:
2486        return ret;
2487}
2488
2489static int lio_get_ts_info(struct net_device *netdev,
2490                           struct ethtool_ts_info *info)
2491{
2492        struct lio *lio = GET_LIO(netdev);
2493
2494        info->so_timestamping =
2495#ifdef PTP_HARDWARE_TIMESTAMPING
2496                SOF_TIMESTAMPING_TX_HARDWARE |
2497                SOF_TIMESTAMPING_RX_HARDWARE |
2498                SOF_TIMESTAMPING_RAW_HARDWARE |
2499                SOF_TIMESTAMPING_TX_SOFTWARE |
2500#endif
2501                SOF_TIMESTAMPING_RX_SOFTWARE |
2502                SOF_TIMESTAMPING_SOFTWARE;
2503
2504        if (lio->ptp_clock)
2505                info->phc_index = ptp_clock_index(lio->ptp_clock);
2506        else
2507                info->phc_index = -1;
2508
2509#ifdef PTP_HARDWARE_TIMESTAMPING
2510        info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2511
2512        info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2513                           (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2514                           (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2515                           (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2516#endif
2517
2518        return 0;
2519}
2520
2521/* Return register dump len. */
2522static int lio_get_regs_len(struct net_device *dev)
2523{
2524        struct lio *lio = GET_LIO(dev);
2525        struct octeon_device *oct = lio->oct_dev;
2526
2527        switch (oct->chip_id) {
2528        case OCTEON_CN23XX_PF_VID:
2529                return OCT_ETHTOOL_REGDUMP_LEN_23XX;
2530        case OCTEON_CN23XX_VF_VID:
2531                return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
2532        default:
2533                return OCT_ETHTOOL_REGDUMP_LEN;
2534        }
2535}
2536
2537static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2538{
2539        u32 reg;
2540        u8 pf_num = oct->pf_num;
2541        int len = 0;
2542        int i;
2543
2544        /* PCI  Window Registers */
2545
2546        len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2547
2548        /*0x29030 or 0x29040*/
2549        reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2550        len += sprintf(s + len,
2551                       "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2552                       reg, oct->pcie_port, oct->pf_num,
2553                       (u64)octeon_read_csr64(oct, reg));
2554
2555        /*0x27080 or 0x27090*/
2556        reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2557        len +=
2558            sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2559                    reg, oct->pcie_port, oct->pf_num,
2560                    (u64)octeon_read_csr64(oct, reg));
2561
2562        /*0x27000 or 0x27010*/
2563        reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2564        len +=
2565            sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2566                    reg, oct->pcie_port, oct->pf_num,
2567                    (u64)octeon_read_csr64(oct, reg));
2568
2569        /*0x29120*/
2570        reg = 0x29120;
2571        len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2572                       (u64)octeon_read_csr64(oct, reg));
2573
2574        /*0x27300*/
2575        reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2576              (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2577        len += sprintf(
2578            s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2579            oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2580
2581        /*0x27200*/
2582        reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2583              (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2584        len += sprintf(s + len,
2585                       "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2586                       reg, oct->pcie_port, oct->pf_num,
2587                       (u64)octeon_read_csr64(oct, reg));
2588
2589        /*29130*/
2590        reg = CN23XX_SLI_PKT_CNT_INT;
2591        len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2592                       (u64)octeon_read_csr64(oct, reg));
2593
2594        /*0x29140*/
2595        reg = CN23XX_SLI_PKT_TIME_INT;
2596        len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2597                       (u64)octeon_read_csr64(oct, reg));
2598
2599        /*0x29160*/
2600        reg = 0x29160;
2601        len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2602                       (u64)octeon_read_csr64(oct, reg));
2603
2604        /*0x29180*/
2605        reg = CN23XX_SLI_OQ_WMARK;
2606        len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2607                       reg, (u64)octeon_read_csr64(oct, reg));
2608
2609        /*0x291E0*/
2610        reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2611        len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2612                       (u64)octeon_read_csr64(oct, reg));
2613
2614        /*0x29210*/
2615        reg = CN23XX_SLI_GBL_CONTROL;
2616        len += sprintf(s + len,
2617                       "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2618                       (u64)octeon_read_csr64(oct, reg));
2619
2620        /*0x29220*/
2621        reg = 0x29220;
2622        len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2623                       reg, (u64)octeon_read_csr64(oct, reg));
2624
2625        /*PF only*/
2626        if (pf_num == 0) {
2627                /*0x29260*/
2628                reg = CN23XX_SLI_OUT_BP_EN_W1S;
2629                len += sprintf(s + len,
2630                               "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
2631                               reg, (u64)octeon_read_csr64(oct, reg));
2632        } else if (pf_num == 1) {
2633                /*0x29270*/
2634                reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2635                len += sprintf(s + len,
2636                               "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2637                               reg, (u64)octeon_read_csr64(oct, reg));
2638        }
2639
2640        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2641                reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2642                len +=
2643                    sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2644                            reg, i, (u64)octeon_read_csr64(oct, reg));
2645        }
2646
2647        /*0x10040*/
2648        for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2649                reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2650                len += sprintf(s + len,
2651                               "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2652                               reg, i, (u64)octeon_read_csr64(oct, reg));
2653        }
2654
2655        /*0x10080*/
2656        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2657                reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2658                len += sprintf(s + len,
2659                               "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2660                               reg, i, (u64)octeon_read_csr64(oct, reg));
2661        }
2662
2663        /*0x10090*/
2664        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2665                reg = CN23XX_SLI_OQ_SIZE(i);
2666                len += sprintf(
2667                    s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2668                    reg, i, (u64)octeon_read_csr64(oct, reg));
2669        }
2670
2671        /*0x10050*/
2672        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2673                reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2674                len += sprintf(
2675                        s + len,
2676                        "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2677                        reg, i, (u64)octeon_read_csr64(oct, reg));
2678        }
2679
2680        /*0x10070*/
2681        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2682                reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2683                len += sprintf(s + len,
2684                               "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2685                               reg, i, (u64)octeon_read_csr64(oct, reg));
2686        }
2687
2688        /*0x100a0*/
2689        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2690                reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2691                len += sprintf(s + len,
2692                               "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2693                               reg, i, (u64)octeon_read_csr64(oct, reg));
2694        }
2695
2696        /*0x100b0*/
2697        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2698                reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2699                len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2700                               reg, i, (u64)octeon_read_csr64(oct, reg));
2701        }
2702
2703        /*0x100c0*/
2704        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2705                reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2706                len += sprintf(s + len,
2707                               "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2708                               reg, i, (u64)octeon_read_csr64(oct, reg));
2709
2710                /*0x10000*/
2711                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2712                        reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2713                        len += sprintf(
2714                                s + len,
2715                                "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2716                                reg, i, (u64)octeon_read_csr64(oct, reg));
2717                }
2718
2719                /*0x10010*/
2720                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2721                        reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2722                        len += sprintf(
2723                            s + len,
2724                            "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2725                            i, (u64)octeon_read_csr64(oct, reg));
2726                }
2727
2728                /*0x10020*/
2729                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2730                        reg = CN23XX_SLI_IQ_DOORBELL(i);
2731                        len += sprintf(
2732                            s + len,
2733                            "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2734                            reg, i, (u64)octeon_read_csr64(oct, reg));
2735                }
2736
2737                /*0x10030*/
2738                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2739                        reg = CN23XX_SLI_IQ_SIZE(i);
2740                        len += sprintf(
2741                            s + len,
2742                            "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2743                            reg, i, (u64)octeon_read_csr64(oct, reg));
2744                }
2745
2746                /*0x10040*/
2747                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2748                        reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2749                len += sprintf(s + len,
2750                               "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2751                               reg, i, (u64)octeon_read_csr64(oct, reg));
2752        }
2753
2754        return len;
2755}
2756
2757static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2758{
2759        int len = 0;
2760        u32 reg;
2761        int i;
2762
2763        /* PCI  Window Registers */
2764
2765        len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2766
2767        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2768                reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2769                len += sprintf(s + len,
2770                               "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2771                               reg, i, (u64)octeon_read_csr64(oct, reg));
2772        }
2773
2774        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2775                reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2776                len += sprintf(s + len,
2777                               "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2778                               reg, i, (u64)octeon_read_csr64(oct, reg));
2779        }
2780
2781        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2782                reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2783                len += sprintf(s + len,
2784                               "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2785                               reg, i, (u64)octeon_read_csr64(oct, reg));
2786        }
2787
2788        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2789                reg = CN23XX_VF_SLI_OQ_SIZE(i);
2790                len += sprintf(s + len,
2791                               "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2792                               reg, i, (u64)octeon_read_csr64(oct, reg));
2793        }
2794
2795        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2796                reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2797                len += sprintf(s + len,
2798                               "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2799                               reg, i, (u64)octeon_read_csr64(oct, reg));
2800        }
2801
2802        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2803                reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2804                len += sprintf(s + len,
2805                               "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2806                               reg, i, (u64)octeon_read_csr64(oct, reg));
2807        }
2808
2809        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2810                reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2811                len += sprintf(s + len,
2812                               "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2813                               reg, i, (u64)octeon_read_csr64(oct, reg));
2814        }
2815
2816        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2817                reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2818                len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2819                               reg, i, (u64)octeon_read_csr64(oct, reg));
2820        }
2821
2822        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2823                reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2824                len += sprintf(s + len,
2825                               "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2826                               reg, i, (u64)octeon_read_csr64(oct, reg));
2827        }
2828
2829        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2830                reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2831                len += sprintf(s + len,
2832                               "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2833                               reg, i, (u64)octeon_read_csr64(oct, reg));
2834        }
2835
2836        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2837                reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2838                len += sprintf(s + len,
2839                               "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2840                               reg, i, (u64)octeon_read_csr64(oct, reg));
2841        }
2842
2843        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2844                reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2845                len += sprintf(s + len,
2846                               "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2847                               reg, i, (u64)octeon_read_csr64(oct, reg));
2848        }
2849
2850        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2851                reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2852                len += sprintf(s + len,
2853                               "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2854                               reg, i, (u64)octeon_read_csr64(oct, reg));
2855        }
2856
2857        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2858                reg = CN23XX_VF_SLI_IQ_SIZE(i);
2859                len += sprintf(s + len,
2860                               "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2861                               reg, i, (u64)octeon_read_csr64(oct, reg));
2862        }
2863
2864        for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2865                reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2866                len += sprintf(s + len,
2867                               "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2868                               reg, i, (u64)octeon_read_csr64(oct, reg));
2869        }
2870
2871        return len;
2872}
2873
2874static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2875{
2876        u32 reg;
2877        int i, len = 0;
2878
2879        /* PCI  Window Registers */
2880
2881        len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2882        reg = CN6XXX_WIN_WR_ADDR_LO;
2883        len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2884                       CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2885        reg = CN6XXX_WIN_WR_ADDR_HI;
2886        len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2887                       CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2888        reg = CN6XXX_WIN_RD_ADDR_LO;
2889        len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2890                       CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2891        reg = CN6XXX_WIN_RD_ADDR_HI;
2892        len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2893                       CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2894        reg = CN6XXX_WIN_WR_DATA_LO;
2895        len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2896                       CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2897        reg = CN6XXX_WIN_WR_DATA_HI;
2898        len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2899                       CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2900        len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2901                       CN6XXX_WIN_WR_MASK_REG,
2902                       octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2903
2904        /* PCI  Interrupt Register */
2905        len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2906                       CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2907                                                CN6XXX_SLI_INT_ENB64_PORT0));
2908        len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2909                       CN6XXX_SLI_INT_ENB64_PORT1,
2910                       octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2911        len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2912                       octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2913
2914        /* PCI  Output queue registers */
2915        for (i = 0; i < oct->num_oqs; i++) {
2916                reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2917                len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2918                               reg, i, octeon_read_csr(oct, reg));
2919                reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2920                len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2921                               reg, i, octeon_read_csr(oct, reg));
2922        }
2923        reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2924        len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2925                       reg, octeon_read_csr(oct, reg));
2926        reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2927        len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2928                       reg, octeon_read_csr(oct, reg));
2929
2930        /* PCI  Input queue registers */
2931        for (i = 0; i <= 3; i++) {
2932                u32 reg;
2933
2934                reg = CN6XXX_SLI_IQ_DOORBELL(i);
2935                len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2936                               reg, i, octeon_read_csr(oct, reg));
2937                reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2938                len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2939                               reg, i, octeon_read_csr(oct, reg));
2940        }
2941
2942        /* PCI  DMA registers */
2943
2944        len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2945                       CN6XXX_DMA_CNT(0),
2946                       octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2947        reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2948        len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2949                       CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2950        reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2951        len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2952                       CN6XXX_DMA_TIME_INT_LEVEL(0),
2953                       octeon_read_csr(oct, reg));
2954
2955        len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2956                       CN6XXX_DMA_CNT(1),
2957                       octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2958        reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2959        len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2960                       CN6XXX_DMA_PKT_INT_LEVEL(1),
2961                       octeon_read_csr(oct, reg));
2962        reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2963        len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2964                       CN6XXX_DMA_TIME_INT_LEVEL(1),
2965                       octeon_read_csr(oct, reg));
2966
2967        /* PCI  Index registers */
2968
2969        len += sprintf(s + len, "\n");
2970
2971        for (i = 0; i < 16; i++) {
2972                reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2973                len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2974                               CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2975        }
2976
2977        return len;
2978}
2979
2980static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2981{
2982        u32 val;
2983        int i, len = 0;
2984
2985        /* PCI CONFIG Registers */
2986
2987        len += sprintf(s + len,
2988                       "\n\t Octeon Config space Registers\n\n");
2989
2990        for (i = 0; i <= 13; i++) {
2991                pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2992                len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2993                               (i * 4), i, val);
2994        }
2995
2996        for (i = 30; i <= 34; i++) {
2997                pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2998                len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2999                               (i * 4), i, val);
3000        }
3001
3002        return len;
3003}
3004
3005/*  Return register dump user app.  */
3006static void lio_get_regs(struct net_device *dev,
3007                         struct ethtool_regs *regs, void *regbuf)
3008{
3009        struct lio *lio = GET_LIO(dev);
3010        int len = 0;
3011        struct octeon_device *oct = lio->oct_dev;
3012
3013        regs->version = OCT_ETHTOOL_REGSVER;
3014
3015        switch (oct->chip_id) {
3016        case OCTEON_CN23XX_PF_VID:
3017                memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
3018                len += cn23xx_read_csr_reg(regbuf + len, oct);
3019                break;
3020        case OCTEON_CN23XX_VF_VID:
3021                memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
3022                len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
3023                break;
3024        case OCTEON_CN68XX:
3025        case OCTEON_CN66XX:
3026                memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
3027                len += cn6xxx_read_csr_reg(regbuf + len, oct);
3028                len += cn6xxx_read_config_reg(regbuf + len, oct);
3029                break;
3030        default:
3031                dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
3032                        __func__, oct->chip_id);
3033        }
3034}
3035
3036static u32 lio_get_priv_flags(struct net_device *netdev)
3037{
3038        struct lio *lio = GET_LIO(netdev);
3039
3040        return lio->oct_dev->priv_flags;
3041}
3042
3043static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
3044{
3045        struct lio *lio = GET_LIO(netdev);
3046        bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
3047
3048        lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
3049                          intr_by_tx_bytes);
3050        return 0;
3051}
3052
3053static int lio_get_fecparam(struct net_device *netdev,
3054                            struct ethtool_fecparam *fec)
3055{
3056        struct lio *lio = GET_LIO(netdev);
3057        struct octeon_device *oct = lio->oct_dev;
3058
3059        fec->active_fec = ETHTOOL_FEC_NONE;
3060        fec->fec = ETHTOOL_FEC_NONE;
3061
3062        if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3063            oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3064                if (oct->no_speed_setting == 1)
3065                        return 0;
3066
3067                liquidio_get_fec(lio);
3068                fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF);
3069                if (oct->props[lio->ifidx].fec == 1)
3070                        fec->active_fec = ETHTOOL_FEC_RS;
3071                else
3072                        fec->active_fec = ETHTOOL_FEC_OFF;
3073        }
3074
3075        return 0;
3076}
3077
3078static int lio_set_fecparam(struct net_device *netdev,
3079                            struct ethtool_fecparam *fec)
3080{
3081        struct lio *lio = GET_LIO(netdev);
3082        struct octeon_device *oct = lio->oct_dev;
3083
3084        if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3085            oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3086                if (oct->no_speed_setting == 1)
3087                        return -EOPNOTSUPP;
3088
3089                if (fec->fec & ETHTOOL_FEC_OFF)
3090                        liquidio_set_fec(lio, 0);
3091                else if (fec->fec & ETHTOOL_FEC_RS)
3092                        liquidio_set_fec(lio, 1);
3093                else
3094                        return -EOPNOTSUPP;
3095        } else {
3096                return -EOPNOTSUPP;
3097        }
3098
3099        return 0;
3100}
3101
3102static const struct ethtool_ops lio_ethtool_ops = {
3103        .get_link_ksettings     = lio_get_link_ksettings,
3104        .set_link_ksettings     = lio_set_link_ksettings,
3105        .get_fecparam           = lio_get_fecparam,
3106        .set_fecparam           = lio_set_fecparam,
3107        .get_link               = ethtool_op_get_link,
3108        .get_drvinfo            = lio_get_drvinfo,
3109        .get_ringparam          = lio_ethtool_get_ringparam,
3110        .set_ringparam          = lio_ethtool_set_ringparam,
3111        .get_channels           = lio_ethtool_get_channels,
3112        .set_channels           = lio_ethtool_set_channels,
3113        .set_phys_id            = lio_set_phys_id,
3114        .get_eeprom_len         = lio_get_eeprom_len,
3115        .get_eeprom             = lio_get_eeprom,
3116        .get_strings            = lio_get_strings,
3117        .get_ethtool_stats      = lio_get_ethtool_stats,
3118        .get_pauseparam         = lio_get_pauseparam,
3119        .set_pauseparam         = lio_set_pauseparam,
3120        .get_regs_len           = lio_get_regs_len,
3121        .get_regs               = lio_get_regs,
3122        .get_msglevel           = lio_get_msglevel,
3123        .set_msglevel           = lio_set_msglevel,
3124        .get_sset_count         = lio_get_sset_count,
3125        .get_coalesce           = lio_get_intr_coalesce,
3126        .set_coalesce           = lio_set_intr_coalesce,
3127        .get_priv_flags         = lio_get_priv_flags,
3128        .set_priv_flags         = lio_set_priv_flags,
3129        .get_ts_info            = lio_get_ts_info,
3130};
3131
3132static const struct ethtool_ops lio_vf_ethtool_ops = {
3133        .get_link_ksettings     = lio_get_link_ksettings,
3134        .get_link               = ethtool_op_get_link,
3135        .get_drvinfo            = lio_get_vf_drvinfo,
3136        .get_ringparam          = lio_ethtool_get_ringparam,
3137        .set_ringparam          = lio_ethtool_set_ringparam,
3138        .get_channels           = lio_ethtool_get_channels,
3139        .set_channels           = lio_ethtool_set_channels,
3140        .get_strings            = lio_vf_get_strings,
3141        .get_ethtool_stats      = lio_vf_get_ethtool_stats,
3142        .get_regs_len           = lio_get_regs_len,
3143        .get_regs               = lio_get_regs,
3144        .get_msglevel           = lio_get_msglevel,
3145        .set_msglevel           = lio_vf_set_msglevel,
3146        .get_sset_count         = lio_vf_get_sset_count,
3147        .get_coalesce           = lio_get_intr_coalesce,
3148        .set_coalesce           = lio_set_intr_coalesce,
3149        .get_priv_flags         = lio_get_priv_flags,
3150        .set_priv_flags         = lio_set_priv_flags,
3151        .get_ts_info            = lio_get_ts_info,
3152};
3153
3154void liquidio_set_ethtool_ops(struct net_device *netdev)
3155{
3156        struct lio *lio = GET_LIO(netdev);
3157        struct octeon_device *oct = lio->oct_dev;
3158
3159        if (OCTEON_CN23XX_VF(oct))
3160                netdev->ethtool_ops = &lio_vf_ethtool_ops;
3161        else
3162                netdev->ethtool_ops = &lio_ethtool_ops;
3163}
3164