linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
<<
>>
Prefs
   1/**********************************************************************
   2* Author: Cavium, Inc.
   3*
   4* Contact: support@cavium.com
   5*          Please include "LiquidIO" in the subject.
   6*
   7* Copyright (c) 2003-2015 Cavium, Inc.
   8*
   9* This file is free software; you can redistribute it and/or modify
  10* it under the terms of the GNU General Public License, Version 2, as
  11* published by the Free Software Foundation.
  12*
  13* This file is distributed in the hope that it will be useful, but
  14* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16* NONINFRINGEMENT.  See the GNU General Public License for more
  17* details.
  18*
  19* This file may also be available under a different license from Cavium.
  20* Contact Cavium, Inc. for more information
  21**********************************************************************/
  22#include <linux/netdevice.h>
  23#include <linux/net_tstamp.h>
  24#include <linux/pci.h>
  25#include "liquidio_common.h"
  26#include "octeon_droq.h"
  27#include "octeon_iq.h"
  28#include "response_manager.h"
  29#include "octeon_device.h"
  30#include "octeon_nic.h"
  31#include "octeon_main.h"
  32#include "octeon_network.h"
  33#include "cn66xx_regs.h"
  34#include "cn66xx_device.h"
  35#include "cn23xx_pf_device.h"
  36
  37static int octnet_get_link_stats(struct net_device *netdev);
  38
  39struct oct_mdio_cmd_context {
  40        int octeon_id;
  41        wait_queue_head_t wc;
  42        int cond;
  43};
  44
  45struct oct_mdio_cmd_resp {
  46        u64 rh;
  47        struct oct_mdio_cmd resp;
  48        u64 status;
  49};
  50
  51#define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
  52
  53/* Octeon's interface mode of operation */
  54enum {
  55        INTERFACE_MODE_DISABLED,
  56        INTERFACE_MODE_RGMII,
  57        INTERFACE_MODE_GMII,
  58        INTERFACE_MODE_SPI,
  59        INTERFACE_MODE_PCIE,
  60        INTERFACE_MODE_XAUI,
  61        INTERFACE_MODE_SGMII,
  62        INTERFACE_MODE_PICMG,
  63        INTERFACE_MODE_NPI,
  64        INTERFACE_MODE_LOOP,
  65        INTERFACE_MODE_SRIO,
  66        INTERFACE_MODE_ILK,
  67        INTERFACE_MODE_RXAUI,
  68        INTERFACE_MODE_QSGMII,
  69        INTERFACE_MODE_AGL,
  70        INTERFACE_MODE_XLAUI,
  71        INTERFACE_MODE_XFI,
  72        INTERFACE_MODE_10G_KR,
  73        INTERFACE_MODE_40G_KR4,
  74        INTERFACE_MODE_MIXED,
  75};
  76
  77#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
  78#define OCT_ETHTOOL_REGDUMP_LEN  4096
  79#define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
  80#define OCT_ETHTOOL_REGSVER  1
  81
  82/* statistics of PF */
  83static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
  84        "rx_packets",
  85        "tx_packets",
  86        "rx_bytes",
  87        "tx_bytes",
  88        "rx_errors",    /*jabber_err+l2_err+frame_err */
  89        "tx_errors",    /*fw_err_pko+fw_err_link+fw_err_drop */
  90        "rx_dropped",   /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
  91                        *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
  92                        */
  93        "tx_dropped",
  94
  95        "tx_total_sent",
  96        "tx_total_fwd",
  97        "tx_err_pko",
  98        "tx_err_link",
  99        "tx_err_drop",
 100
 101        "tx_tso",
 102        "tx_tso_packets",
 103        "tx_tso_err",
 104        "tx_vxlan",
 105
 106        "mac_tx_total_pkts",
 107        "mac_tx_total_bytes",
 108        "mac_tx_mcast_pkts",
 109        "mac_tx_bcast_pkts",
 110        "mac_tx_ctl_packets",   /*oct->link_stats.fromhost.ctl_sent */
 111        "mac_tx_total_collisions",
 112        "mac_tx_one_collision",
 113        "mac_tx_multi_collison",
 114        "mac_tx_max_collision_fail",
 115        "mac_tx_max_deferal_fail",
 116        "mac_tx_fifo_err",
 117        "mac_tx_runts",
 118
 119        "rx_total_rcvd",
 120        "rx_total_fwd",
 121        "rx_jabber_err",
 122        "rx_l2_err",
 123        "rx_frame_err",
 124        "rx_err_pko",
 125        "rx_err_link",
 126        "rx_err_drop",
 127
 128        "rx_vxlan",
 129        "rx_vxlan_err",
 130
 131        "rx_lro_pkts",
 132        "rx_lro_bytes",
 133        "rx_total_lro",
 134
 135        "rx_lro_aborts",
 136        "rx_lro_aborts_port",
 137        "rx_lro_aborts_seq",
 138        "rx_lro_aborts_tsval",
 139        "rx_lro_aborts_timer",
 140        "rx_fwd_rate",
 141
 142        "mac_rx_total_rcvd",
 143        "mac_rx_bytes",
 144        "mac_rx_total_bcst",
 145        "mac_rx_total_mcst",
 146        "mac_rx_runts",
 147        "mac_rx_ctl_packets",
 148        "mac_rx_fifo_err",
 149        "mac_rx_dma_drop",
 150        "mac_rx_fcs_err",
 151
 152        "link_state_changes",
 153};
 154
 155/* statistics of host tx queue */
 156static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
 157        "packets",              /*oct->instr_queue[iq_no]->stats.tx_done*/
 158        "bytes",                /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
 159        "dropped",
 160        "iq_busy",
 161        "sgentry_sent",
 162
 163        "fw_instr_posted",
 164        "fw_instr_processed",
 165        "fw_instr_dropped",
 166        "fw_bytes_sent",
 167
 168        "tso",
 169        "vxlan",
 170        "txq_restart",
 171};
 172
 173/* statistics of host rx queue */
 174static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
 175        "packets",              /*oct->droq[oq_no]->stats.rx_pkts_received */
 176        "bytes",                /*oct->droq[oq_no]->stats.rx_bytes_received */
 177        "dropped",              /*oct->droq[oq_no]->stats.rx_dropped+
 178                                 *oct->droq[oq_no]->stats.dropped_nodispatch+
 179                                 *oct->droq[oq_no]->stats.dropped_toomany+
 180                                 *oct->droq[oq_no]->stats.dropped_nomem
 181                                 */
 182        "dropped_nomem",
 183        "dropped_toomany",
 184        "fw_dropped",
 185        "fw_pkts_received",
 186        "fw_bytes_received",
 187        "fw_dropped_nodispatch",
 188
 189        "vxlan",
 190        "buffer_alloc_failure",
 191};
 192
 193/* LiquidIO driver private flags */
 194static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
 195};
 196
 197#define OCTNIC_NCMD_AUTONEG_ON  0x1
 198#define OCTNIC_NCMD_PHY_ON      0x2
 199
 200static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 201{
 202        struct lio *lio = GET_LIO(netdev);
 203        struct octeon_device *oct = lio->oct_dev;
 204        struct oct_link_info *linfo;
 205
 206        linfo = &lio->linfo;
 207
 208        if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
 209            linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
 210            linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
 211                ecmd->port = PORT_FIBRE;
 212                ecmd->supported =
 213                        (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
 214                         SUPPORTED_Pause);
 215                ecmd->advertising =
 216                        (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
 217                ecmd->transceiver = XCVR_EXTERNAL;
 218                ecmd->autoneg = AUTONEG_DISABLE;
 219
 220        } else {
 221                dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
 222                        linfo->link.s.if_mode);
 223        }
 224
 225        if (linfo->link.s.link_up) {
 226                ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
 227                ecmd->duplex = linfo->link.s.duplex;
 228        } else {
 229                ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
 230                ecmd->duplex = DUPLEX_UNKNOWN;
 231        }
 232
 233        return 0;
 234}
 235
 236static void
 237lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
 238{
 239        struct lio *lio;
 240        struct octeon_device *oct;
 241
 242        lio = GET_LIO(netdev);
 243        oct = lio->oct_dev;
 244
 245        memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
 246        strcpy(drvinfo->driver, "liquidio");
 247        strcpy(drvinfo->version, LIQUIDIO_VERSION);
 248        strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
 249                ETHTOOL_FWVERS_LEN);
 250        strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
 251}
 252
 253static void
 254lio_ethtool_get_channels(struct net_device *dev,
 255                         struct ethtool_channels *channel)
 256{
 257        struct lio *lio = GET_LIO(dev);
 258        struct octeon_device *oct = lio->oct_dev;
 259        u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
 260
 261        if (OCTEON_CN6XXX(oct)) {
 262                struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
 263
 264                max_rx = CFG_GET_OQ_MAX_Q(conf6x);
 265                max_tx = CFG_GET_IQ_MAX_Q(conf6x);
 266                rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
 267                tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
 268        } else if (OCTEON_CN23XX_PF(oct)) {
 269                struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
 270
 271                max_rx = CFG_GET_OQ_MAX_Q(conf23);
 272                max_tx = CFG_GET_IQ_MAX_Q(conf23);
 273                rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx);
 274                tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx);
 275        }
 276
 277        channel->max_rx = max_rx;
 278        channel->max_tx = max_tx;
 279        channel->rx_count = rx_count;
 280        channel->tx_count = tx_count;
 281}
 282
 283static int lio_get_eeprom_len(struct net_device *netdev)
 284{
 285        u8 buf[128];
 286        struct lio *lio = GET_LIO(netdev);
 287        struct octeon_device *oct_dev = lio->oct_dev;
 288        struct octeon_board_info *board_info;
 289        int len;
 290
 291        board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
 292        len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
 293                      board_info->name, board_info->serial_number,
 294                      board_info->major, board_info->minor);
 295
 296        return len;
 297}
 298
 299static int
 300lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
 301               u8 *bytes)
 302{
 303        struct lio *lio = GET_LIO(netdev);
 304        struct octeon_device *oct_dev = lio->oct_dev;
 305        struct octeon_board_info *board_info;
 306
 307        if (eeprom->offset)
 308                return -EINVAL;
 309
 310        eeprom->magic = oct_dev->pci_dev->vendor;
 311        board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
 312        sprintf((char *)bytes,
 313                "boardname:%s serialnum:%s maj:%lld min:%lld\n",
 314                board_info->name, board_info->serial_number,
 315                board_info->major, board_info->minor);
 316
 317        return 0;
 318}
 319
 320static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
 321{
 322        struct lio *lio = GET_LIO(netdev);
 323        struct octeon_device *oct = lio->oct_dev;
 324        struct octnic_ctrl_pkt nctrl;
 325        int ret = 0;
 326
 327        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
 328
 329        nctrl.ncmd.u64 = 0;
 330        nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
 331        nctrl.ncmd.s.param1 = addr;
 332        nctrl.ncmd.s.param2 = val;
 333        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
 334        nctrl.wait_time = 100;
 335        nctrl.netpndev = (u64)netdev;
 336        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
 337
 338        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
 339        if (ret < 0) {
 340                dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
 341                return -EINVAL;
 342        }
 343
 344        return 0;
 345}
 346
 347static int octnet_id_active(struct net_device *netdev, int val)
 348{
 349        struct lio *lio = GET_LIO(netdev);
 350        struct octeon_device *oct = lio->oct_dev;
 351        struct octnic_ctrl_pkt nctrl;
 352        int ret = 0;
 353
 354        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
 355
 356        nctrl.ncmd.u64 = 0;
 357        nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
 358        nctrl.ncmd.s.param1 = val;
 359        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
 360        nctrl.wait_time = 100;
 361        nctrl.netpndev = (u64)netdev;
 362        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
 363
 364        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
 365        if (ret < 0) {
 366                dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
 367                return -EINVAL;
 368        }
 369
 370        return 0;
 371}
 372
 373/* Callback for when mdio command response arrives
 374 */
 375static void octnet_mdio_resp_callback(struct octeon_device *oct,
 376                                      u32 status,
 377                                      void *buf)
 378{
 379        struct oct_mdio_cmd_context *mdio_cmd_ctx;
 380        struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
 381
 382        mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
 383
 384        oct = lio_get_device(mdio_cmd_ctx->octeon_id);
 385        if (status) {
 386                dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
 387                        CVM_CAST64(status));
 388                WRITE_ONCE(mdio_cmd_ctx->cond, -1);
 389        } else {
 390                WRITE_ONCE(mdio_cmd_ctx->cond, 1);
 391        }
 392        wake_up_interruptible(&mdio_cmd_ctx->wc);
 393}
 394
 395/* This routine provides PHY access routines for
 396 * mdio  clause45 .
 397 */
 398static int
 399octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
 400{
 401        struct octeon_device *oct_dev = lio->oct_dev;
 402        struct octeon_soft_command *sc;
 403        struct oct_mdio_cmd_resp *mdio_cmd_rsp;
 404        struct oct_mdio_cmd_context *mdio_cmd_ctx;
 405        struct oct_mdio_cmd *mdio_cmd;
 406        int retval = 0;
 407
 408        sc = (struct octeon_soft_command *)
 409                octeon_alloc_soft_command(oct_dev,
 410                                          sizeof(struct oct_mdio_cmd),
 411                                          sizeof(struct oct_mdio_cmd_resp),
 412                                          sizeof(struct oct_mdio_cmd_context));
 413
 414        if (!sc)
 415                return -ENOMEM;
 416
 417        mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
 418        mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
 419        mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
 420
 421        WRITE_ONCE(mdio_cmd_ctx->cond, 0);
 422        mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
 423        mdio_cmd->op = op;
 424        mdio_cmd->mdio_addr = loc;
 425        if (op)
 426                mdio_cmd->value1 = *value;
 427        octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
 428
 429        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 430
 431        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
 432                                    0, 0, 0);
 433
 434        sc->wait_time = 1000;
 435        sc->callback = octnet_mdio_resp_callback;
 436        sc->callback_arg = sc;
 437
 438        init_waitqueue_head(&mdio_cmd_ctx->wc);
 439
 440        retval = octeon_send_soft_command(oct_dev, sc);
 441
 442        if (retval == IQ_SEND_FAILED) {
 443                dev_err(&oct_dev->pci_dev->dev,
 444                        "octnet_mdio45_access instruction failed status: %x\n",
 445                        retval);
 446                retval = -EBUSY;
 447        } else {
 448                /* Sleep on a wait queue till the cond flag indicates that the
 449                 * response arrived
 450                 */
 451                sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
 452                retval = mdio_cmd_rsp->status;
 453                if (retval) {
 454                        dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
 455                        retval = -EBUSY;
 456                } else {
 457                        octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
 458                                            sizeof(struct oct_mdio_cmd) / 8);
 459
 460                        if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
 461                                if (!op)
 462                                        *value = mdio_cmd_rsp->resp.value1;
 463                        } else {
 464                                retval = -EINVAL;
 465                        }
 466                }
 467        }
 468
 469        octeon_free_soft_command(oct_dev, sc);
 470
 471        return retval;
 472}
 473
 474static int lio_set_phys_id(struct net_device *netdev,
 475                           enum ethtool_phys_id_state state)
 476{
 477        struct lio *lio = GET_LIO(netdev);
 478        struct octeon_device *oct = lio->oct_dev;
 479        int value, ret;
 480
 481        switch (state) {
 482        case ETHTOOL_ID_ACTIVE:
 483                if (oct->chip_id == OCTEON_CN66XX) {
 484                        octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
 485                                           VITESSE_PHY_GPIO_DRIVEON);
 486                        return 2;
 487
 488                } else if (oct->chip_id == OCTEON_CN68XX) {
 489                        /* Save the current LED settings */
 490                        ret = octnet_mdio45_access(lio, 0,
 491                                                   LIO68XX_LED_BEACON_ADDR,
 492                                                   &lio->phy_beacon_val);
 493                        if (ret)
 494                                return ret;
 495
 496                        ret = octnet_mdio45_access(lio, 0,
 497                                                   LIO68XX_LED_CTRL_ADDR,
 498                                                   &lio->led_ctrl_val);
 499                        if (ret)
 500                                return ret;
 501
 502                        /* Configure Beacon values */
 503                        value = LIO68XX_LED_BEACON_CFGON;
 504                        ret = octnet_mdio45_access(lio, 1,
 505                                                   LIO68XX_LED_BEACON_ADDR,
 506                                                   &value);
 507                        if (ret)
 508                                return ret;
 509
 510                        value = LIO68XX_LED_CTRL_CFGON;
 511                        ret = octnet_mdio45_access(lio, 1,
 512                                                   LIO68XX_LED_CTRL_ADDR,
 513                                                   &value);
 514                        if (ret)
 515                                return ret;
 516                } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
 517                        octnet_id_active(netdev, LED_IDENTIFICATION_ON);
 518
 519                        /* returns 0 since updates are asynchronous */
 520                        return 0;
 521                } else {
 522                        return -EINVAL;
 523                }
 524                break;
 525
 526        case ETHTOOL_ID_ON:
 527                if (oct->chip_id == OCTEON_CN66XX) {
 528                        octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
 529                                           VITESSE_PHY_GPIO_HIGH);
 530
 531                } else if (oct->chip_id == OCTEON_CN68XX) {
 532                        return -EINVAL;
 533                } else {
 534                        return -EINVAL;
 535                }
 536                break;
 537
 538        case ETHTOOL_ID_OFF:
 539                if (oct->chip_id == OCTEON_CN66XX)
 540                        octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
 541                                           VITESSE_PHY_GPIO_LOW);
 542                else if (oct->chip_id == OCTEON_CN68XX)
 543                        return -EINVAL;
 544                else
 545                        return -EINVAL;
 546
 547                break;
 548
 549        case ETHTOOL_ID_INACTIVE:
 550                if (oct->chip_id == OCTEON_CN66XX) {
 551                        octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
 552                                           VITESSE_PHY_GPIO_DRIVEOFF);
 553                } else if (oct->chip_id == OCTEON_CN68XX) {
 554                        /* Restore LED settings */
 555                        ret = octnet_mdio45_access(lio, 1,
 556                                                   LIO68XX_LED_CTRL_ADDR,
 557                                                   &lio->led_ctrl_val);
 558                        if (ret)
 559                                return ret;
 560
 561                        ret = octnet_mdio45_access(lio, 1,
 562                                                   LIO68XX_LED_BEACON_ADDR,
 563                                                   &lio->phy_beacon_val);
 564                        if (ret)
 565                                return ret;
 566                } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
 567                        octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
 568
 569                        return 0;
 570                } else {
 571                        return -EINVAL;
 572                }
 573                break;
 574
 575        default:
 576                return -EINVAL;
 577        }
 578
 579        return 0;
 580}
 581
 582static void
 583lio_ethtool_get_ringparam(struct net_device *netdev,
 584                          struct ethtool_ringparam *ering)
 585{
 586        struct lio *lio = GET_LIO(netdev);
 587        struct octeon_device *oct = lio->oct_dev;
 588        u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
 589            rx_pending = 0;
 590
 591        if (OCTEON_CN6XXX(oct)) {
 592                struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
 593
 594                tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
 595                rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
 596                rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
 597                tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
 598        } else if (OCTEON_CN23XX_PF(oct)) {
 599                struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
 600
 601                tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
 602                rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
 603                rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
 604                tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
 605        }
 606
 607        if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
 608                ering->rx_pending = 0;
 609                ering->rx_max_pending = 0;
 610                ering->rx_mini_pending = 0;
 611                ering->rx_jumbo_pending = rx_pending;
 612                ering->rx_mini_max_pending = 0;
 613                ering->rx_jumbo_max_pending = rx_max_pending;
 614        } else {
 615                ering->rx_pending = rx_pending;
 616                ering->rx_max_pending = rx_max_pending;
 617                ering->rx_mini_pending = 0;
 618                ering->rx_jumbo_pending = 0;
 619                ering->rx_mini_max_pending = 0;
 620                ering->rx_jumbo_max_pending = 0;
 621        }
 622
 623        ering->tx_pending = tx_pending;
 624        ering->tx_max_pending = tx_max_pending;
 625}
 626
 627static u32 lio_get_msglevel(struct net_device *netdev)
 628{
 629        struct lio *lio = GET_LIO(netdev);
 630
 631        return lio->msg_enable;
 632}
 633
 634static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
 635{
 636        struct lio *lio = GET_LIO(netdev);
 637
 638        if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
 639                if (msglvl & NETIF_MSG_HW)
 640                        liquidio_set_feature(netdev,
 641                                             OCTNET_CMD_VERBOSE_ENABLE, 0);
 642                else
 643                        liquidio_set_feature(netdev,
 644                                             OCTNET_CMD_VERBOSE_DISABLE, 0);
 645        }
 646
 647        lio->msg_enable = msglvl;
 648}
 649
 650static void
 651lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
 652{
 653        /* Notes: Not supporting any auto negotiation in these
 654         * drivers. Just report pause frame support.
 655         */
 656        struct lio *lio = GET_LIO(netdev);
 657        struct octeon_device *oct = lio->oct_dev;
 658
 659        pause->autoneg = 0;
 660
 661        pause->tx_pause = oct->tx_pause;
 662        pause->rx_pause = oct->rx_pause;
 663}
 664
 665static int
 666lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
 667{
 668        /* Notes: Not supporting any auto negotiation in these
 669         * drivers.
 670         */
 671        struct lio *lio = GET_LIO(netdev);
 672        struct octeon_device *oct = lio->oct_dev;
 673        struct octnic_ctrl_pkt nctrl;
 674        struct oct_link_info *linfo = &lio->linfo;
 675
 676        int ret = 0;
 677
 678        if (oct->chip_id != OCTEON_CN23XX_PF_VID)
 679                return -EINVAL;
 680
 681        if (linfo->link.s.duplex == 0) {
 682                /*no flow control for half duplex*/
 683                if (pause->rx_pause || pause->tx_pause)
 684                        return -EINVAL;
 685        }
 686
 687        /*do not support autoneg of link flow control*/
 688        if (pause->autoneg == AUTONEG_ENABLE)
 689                return -EINVAL;
 690
 691        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
 692
 693        nctrl.ncmd.u64 = 0;
 694        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
 695        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
 696        nctrl.wait_time = 100;
 697        nctrl.netpndev = (u64)netdev;
 698        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
 699
 700        if (pause->rx_pause) {
 701                /*enable rx pause*/
 702                nctrl.ncmd.s.param1 = 1;
 703        } else {
 704                /*disable rx pause*/
 705                nctrl.ncmd.s.param1 = 0;
 706        }
 707
 708        if (pause->tx_pause) {
 709                /*enable tx pause*/
 710                nctrl.ncmd.s.param2 = 1;
 711        } else {
 712                /*disable tx pause*/
 713                nctrl.ncmd.s.param2 = 0;
 714        }
 715
 716        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
 717        if (ret < 0) {
 718                dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
 719                return -EINVAL;
 720        }
 721
 722        oct->rx_pause = pause->rx_pause;
 723        oct->tx_pause = pause->tx_pause;
 724
 725        return 0;
 726}
 727
 728static void
 729lio_get_ethtool_stats(struct net_device *netdev,
 730                      struct ethtool_stats *stats  __attribute__((unused)),
 731                      u64 *data)
 732{
 733        struct lio *lio = GET_LIO(netdev);
 734        struct octeon_device *oct_dev = lio->oct_dev;
 735        struct net_device_stats *netstats = &netdev->stats;
 736        int i = 0, j;
 737
 738        netdev->netdev_ops->ndo_get_stats(netdev);
 739        octnet_get_link_stats(netdev);
 740
 741        /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
 742        data[i++] = CVM_CAST64(netstats->rx_packets);
 743        /*sum of oct->instr_queue[iq_no]->stats.tx_done */
 744        data[i++] = CVM_CAST64(netstats->tx_packets);
 745        /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
 746        data[i++] = CVM_CAST64(netstats->rx_bytes);
 747        /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
 748        data[i++] = CVM_CAST64(netstats->tx_bytes);
 749        data[i++] = CVM_CAST64(netstats->rx_errors);
 750        data[i++] = CVM_CAST64(netstats->tx_errors);
 751        /*sum of oct->droq[oq_no]->stats->rx_dropped +
 752         *oct->droq[oq_no]->stats->dropped_nodispatch +
 753         *oct->droq[oq_no]->stats->dropped_toomany +
 754         *oct->droq[oq_no]->stats->dropped_nomem
 755         */
 756        data[i++] = CVM_CAST64(netstats->rx_dropped);
 757        /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
 758        data[i++] = CVM_CAST64(netstats->tx_dropped);
 759
 760        /*data[i++] = CVM_CAST64(stats->multicast); */
 761        /*data[i++] = CVM_CAST64(stats->collisions); */
 762
 763        /* firmware tx stats */
 764        /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
 765         *fromhost.fw_total_sent
 766         */
 767        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
 768        /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
 769        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
 770        /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
 771        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
 772        /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
 773        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
 774        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
 775         *fw_err_drop
 776         */
 777        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
 778
 779        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
 780        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
 781        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
 782         *fw_tso_fwd
 783         */
 784        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
 785        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
 786         *fw_err_tso
 787         */
 788        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
 789        /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
 790         *fw_tx_vxlan
 791         */
 792        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
 793
 794        /* mac tx statistics */
 795        /*CVMX_BGXX_CMRX_TX_STAT5 */
 796        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
 797        /*CVMX_BGXX_CMRX_TX_STAT4 */
 798        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
 799        /*CVMX_BGXX_CMRX_TX_STAT15 */
 800        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
 801        /*CVMX_BGXX_CMRX_TX_STAT14 */
 802        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
 803        /*CVMX_BGXX_CMRX_TX_STAT17 */
 804        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
 805        /*CVMX_BGXX_CMRX_TX_STAT0 */
 806        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
 807        /*CVMX_BGXX_CMRX_TX_STAT3 */
 808        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
 809        /*CVMX_BGXX_CMRX_TX_STAT2 */
 810        data[i++] =
 811                CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
 812        /*CVMX_BGXX_CMRX_TX_STAT0 */
 813        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
 814        /*CVMX_BGXX_CMRX_TX_STAT1 */
 815        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
 816        /*CVMX_BGXX_CMRX_TX_STAT16 */
 817        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
 818        /*CVMX_BGXX_CMRX_TX_STAT6 */
 819        data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
 820
 821        /* RX firmware stats */
 822        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
 823         *fw_total_rcvd
 824         */
 825        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
 826        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
 827         *fw_total_fwd
 828         */
 829        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
 830        /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
 831        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
 832        /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
 833        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
 834        /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
 835        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
 836        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
 837         *fw_err_pko
 838         */
 839        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
 840        /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
 841        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
 842        /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
 843         *fromwire.fw_err_drop
 844         */
 845        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
 846
 847        /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
 848         *fromwire.fw_rx_vxlan
 849         */
 850        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
 851        /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
 852         *fromwire.fw_rx_vxlan_err
 853         */
 854        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
 855
 856        /* LRO */
 857        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
 858         *fw_lro_pkts
 859         */
 860        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
 861        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
 862         *fw_lro_octs
 863         */
 864        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
 865        /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
 866        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
 867        /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
 868        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
 869        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
 870         *fw_lro_aborts_port
 871         */
 872        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
 873        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
 874         *fw_lro_aborts_seq
 875         */
 876        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
 877        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
 878         *fw_lro_aborts_tsval
 879         */
 880        data[i++] =
 881                CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
 882        /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
 883         *fw_lro_aborts_timer
 884         */
 885        /* intrmod: packet forward rate */
 886        data[i++] =
 887                CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
 888        /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
 889        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
 890
 891        /* mac: link-level stats */
 892        /*CVMX_BGXX_CMRX_RX_STAT0 */
 893        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
 894        /*CVMX_BGXX_CMRX_RX_STAT1 */
 895        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
 896        /*CVMX_PKI_STATX_STAT5 */
 897        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
 898        /*CVMX_PKI_STATX_STAT5 */
 899        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
 900        /*wqe->word2.err_code or wqe->word2.err_level */
 901        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
 902        /*CVMX_BGXX_CMRX_RX_STAT2 */
 903        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
 904        /*CVMX_BGXX_CMRX_RX_STAT6 */
 905        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
 906        /*CVMX_BGXX_CMRX_RX_STAT4 */
 907        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
 908        /*wqe->word2.err_code or wqe->word2.err_level */
 909        data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
 910        /*lio->link_changes*/
 911        data[i++] = CVM_CAST64(lio->link_changes);
 912
 913        /* TX  -- lio_update_stats(lio); */
 914        for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
 915                if (!(oct_dev->io_qmask.iq & (1ULL << j)))
 916                        continue;
 917                /*packets to network port*/
 918                /*# of packets tx to network */
 919                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
 920                /*# of bytes tx to network */
 921                data[i++] =
 922                        CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
 923                /*# of packets dropped */
 924                data[i++] =
 925                        CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
 926                /*# of tx fails due to queue full */
 927                data[i++] =
 928                        CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
 929                /*XXX gather entries sent */
 930                data[i++] =
 931                        CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
 932
 933                /*instruction to firmware: data and control */
 934                /*# of instructions to the queue */
 935                data[i++] =
 936                        CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
 937                /*# of instructions processed */
 938                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
 939                                       stats.instr_processed);
 940                /*# of instructions could not be processed */
 941                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
 942                                       stats.instr_dropped);
 943                /*bytes sent through the queue */
 944                data[i++] =
 945                        CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
 946
 947                /*tso request*/
 948                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
 949                /*vxlan request*/
 950                data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
 951                /*txq restart*/
 952                data[i++] =
 953                        CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
 954        }
 955
 956        /* RX */
 957        /* for (j = 0; j < oct_dev->num_oqs; j++) { */
 958        for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
 959                if (!(oct_dev->io_qmask.oq & (1ULL << j)))
 960                        continue;
 961
 962                /*packets send to TCP/IP network stack */
 963                /*# of packets to network stack */
 964                data[i++] =
 965                        CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
 966                /*# of bytes to network stack */
 967                data[i++] =
 968                        CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
 969                /*# of packets dropped */
 970                data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
 971                                       oct_dev->droq[j]->stats.dropped_toomany +
 972                                       oct_dev->droq[j]->stats.rx_dropped);
 973                data[i++] =
 974                        CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
 975                data[i++] =
 976                        CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
 977                data[i++] =
 978                        CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
 979
 980                /*control and data path*/
 981                data[i++] =
 982                        CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
 983                data[i++] =
 984                        CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
 985                data[i++] =
 986                        CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
 987
 988                data[i++] =
 989                        CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
 990                data[i++] =
 991                        CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
 992        }
 993}
 994
 995static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
 996{
 997        struct octeon_device *oct_dev = lio->oct_dev;
 998        int i;
 999
1000        switch (oct_dev->chip_id) {
1001        case OCTEON_CN23XX_PF_VID:
1002                for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1003                        sprintf(data, "%s", oct_priv_flags_strings[i]);
1004                        data += ETH_GSTRING_LEN;
1005                }
1006                break;
1007        case OCTEON_CN68XX:
1008        case OCTEON_CN66XX:
1009                break;
1010        default:
1011                netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1012                break;
1013        }
1014}
1015
1016static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1017{
1018        struct lio *lio = GET_LIO(netdev);
1019        struct octeon_device *oct_dev = lio->oct_dev;
1020        int num_iq_stats, num_oq_stats, i, j;
1021        int num_stats;
1022
1023        switch (stringset) {
1024        case ETH_SS_STATS:
1025                num_stats = ARRAY_SIZE(oct_stats_strings);
1026                for (j = 0; j < num_stats; j++) {
1027                        sprintf(data, "%s", oct_stats_strings[j]);
1028                        data += ETH_GSTRING_LEN;
1029                }
1030
1031                num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1032                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1033                        if (!(oct_dev->io_qmask.iq & (1ULL << i)))
1034                                continue;
1035                        for (j = 0; j < num_iq_stats; j++) {
1036                                sprintf(data, "tx-%d-%s", i,
1037                                        oct_iq_stats_strings[j]);
1038                                data += ETH_GSTRING_LEN;
1039                        }
1040                }
1041
1042                num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1043                /* for (i = 0; i < oct_dev->num_oqs; i++) { */
1044                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1045                        if (!(oct_dev->io_qmask.oq & (1ULL << i)))
1046                                continue;
1047                        for (j = 0; j < num_oq_stats; j++) {
1048                                sprintf(data, "rx-%d-%s", i,
1049                                        oct_droq_stats_strings[j]);
1050                                data += ETH_GSTRING_LEN;
1051                        }
1052                }
1053                break;
1054
1055        case ETH_SS_PRIV_FLAGS:
1056                lio_get_priv_flags_strings(lio, data);
1057                break;
1058        default:
1059                netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1060                break;
1061        }
1062}
1063
1064static int lio_get_priv_flags_ss_count(struct lio *lio)
1065{
1066        struct octeon_device *oct_dev = lio->oct_dev;
1067
1068        switch (oct_dev->chip_id) {
1069        case OCTEON_CN23XX_PF_VID:
1070                return ARRAY_SIZE(oct_priv_flags_strings);
1071        case OCTEON_CN68XX:
1072        case OCTEON_CN66XX:
1073                return -EOPNOTSUPP;
1074        default:
1075                netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1076                return -EOPNOTSUPP;
1077        }
1078}
1079
1080static int lio_get_sset_count(struct net_device *netdev, int sset)
1081{
1082        struct lio *lio = GET_LIO(netdev);
1083        struct octeon_device *oct_dev = lio->oct_dev;
1084
1085        switch (sset) {
1086        case ETH_SS_STATS:
1087                return (ARRAY_SIZE(oct_stats_strings) +
1088                        ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1089                        ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1090        case ETH_SS_PRIV_FLAGS:
1091                return lio_get_priv_flags_ss_count(lio);
1092        default:
1093                return -EOPNOTSUPP;
1094        }
1095}
1096
1097static int lio_get_intr_coalesce(struct net_device *netdev,
1098                                 struct ethtool_coalesce *intr_coal)
1099{
1100        struct lio *lio = GET_LIO(netdev);
1101        struct octeon_device *oct = lio->oct_dev;
1102        struct octeon_instr_queue *iq;
1103        struct oct_intrmod_cfg *intrmod_cfg;
1104
1105        intrmod_cfg = &oct->intrmod;
1106
1107        switch (oct->chip_id) {
1108        case OCTEON_CN23XX_PF_VID:
1109                if (!intrmod_cfg->rx_enable) {
1110                        intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
1111                        intr_coal->rx_max_coalesced_frames =
1112                                intrmod_cfg->rx_frames;
1113                }
1114                if (!intrmod_cfg->tx_enable)
1115                        intr_coal->tx_max_coalesced_frames =
1116                                intrmod_cfg->tx_frames;
1117                break;
1118        case OCTEON_CN68XX:
1119        case OCTEON_CN66XX: {
1120                struct octeon_cn6xxx *cn6xxx =
1121                        (struct octeon_cn6xxx *)oct->chip;
1122
1123                if (!intrmod_cfg->rx_enable) {
1124                        intr_coal->rx_coalesce_usecs =
1125                                CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
1126                        intr_coal->rx_max_coalesced_frames =
1127                                CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
1128                }
1129                iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
1130                intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
1131                break;
1132        }
1133        default:
1134                netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1135                return -EINVAL;
1136        }
1137        if (intrmod_cfg->rx_enable) {
1138                intr_coal->use_adaptive_rx_coalesce =
1139                        intrmod_cfg->rx_enable;
1140                intr_coal->rate_sample_interval =
1141                        intrmod_cfg->check_intrvl;
1142                intr_coal->pkt_rate_high =
1143                        intrmod_cfg->maxpkt_ratethr;
1144                intr_coal->pkt_rate_low =
1145                        intrmod_cfg->minpkt_ratethr;
1146                intr_coal->rx_max_coalesced_frames_high =
1147                        intrmod_cfg->rx_maxcnt_trigger;
1148                intr_coal->rx_coalesce_usecs_high =
1149                        intrmod_cfg->rx_maxtmr_trigger;
1150                intr_coal->rx_coalesce_usecs_low =
1151                        intrmod_cfg->rx_mintmr_trigger;
1152                intr_coal->rx_max_coalesced_frames_low =
1153                    intrmod_cfg->rx_mincnt_trigger;
1154        }
1155        if (OCTEON_CN23XX_PF(oct) &&
1156            (intrmod_cfg->tx_enable)) {
1157                intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
1158                intr_coal->tx_max_coalesced_frames_high =
1159                    intrmod_cfg->tx_maxcnt_trigger;
1160                intr_coal->tx_max_coalesced_frames_low =
1161                    intrmod_cfg->tx_mincnt_trigger;
1162        }
1163        return 0;
1164}
1165
1166/* Callback function for intrmod */
1167static void octnet_intrmod_callback(struct octeon_device *oct_dev,
1168                                    u32 status,
1169                                    void *ptr)
1170{
1171        struct oct_intrmod_cmd *cmd = ptr;
1172        struct octeon_soft_command *sc = cmd->sc;
1173
1174        oct_dev = cmd->oct_dev;
1175
1176        if (status)
1177                dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
1178                        CVM_CAST64(status));
1179        else
1180                dev_info(&oct_dev->pci_dev->dev,
1181                         "Rx-Adaptive Interrupt moderation enabled:%llx\n",
1182                         oct_dev->intrmod.rx_enable);
1183
1184        octeon_free_soft_command(oct_dev, sc);
1185}
1186
1187/*  Configure interrupt moderation parameters */
1188static int octnet_set_intrmod_cfg(struct lio *lio,
1189                                  struct oct_intrmod_cfg *intr_cfg)
1190{
1191        struct octeon_soft_command *sc;
1192        struct oct_intrmod_cmd *cmd;
1193        struct oct_intrmod_cfg *cfg;
1194        int retval;
1195        struct octeon_device *oct_dev = lio->oct_dev;
1196
1197        /* Alloc soft command */
1198        sc = (struct octeon_soft_command *)
1199                octeon_alloc_soft_command(oct_dev,
1200                                          sizeof(struct oct_intrmod_cfg),
1201                                          0,
1202                                          sizeof(struct oct_intrmod_cmd));
1203
1204        if (!sc)
1205                return -ENOMEM;
1206
1207        cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
1208        cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1209
1210        memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1211        octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1212        cmd->sc = sc;
1213        cmd->cfg = cfg;
1214        cmd->oct_dev = oct_dev;
1215
1216        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1217
1218        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1219                                    OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1220
1221        sc->callback = octnet_intrmod_callback;
1222        sc->callback_arg = cmd;
1223        sc->wait_time = 1000;
1224
1225        retval = octeon_send_soft_command(oct_dev, sc);
1226        if (retval == IQ_SEND_FAILED) {
1227                octeon_free_soft_command(oct_dev, sc);
1228                return -EINVAL;
1229        }
1230
1231        return 0;
1232}
1233
1234static void
1235octnet_nic_stats_callback(struct octeon_device *oct_dev,
1236                          u32 status, void *ptr)
1237{
1238        struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1239        struct oct_nic_stats_resp *resp =
1240            (struct oct_nic_stats_resp *)sc->virtrptr;
1241        struct oct_nic_stats_ctrl *ctrl =
1242            (struct oct_nic_stats_ctrl *)sc->ctxptr;
1243        struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1244        struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1245
1246        struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1247        struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1248
1249        if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1250                octeon_swap_8B_data((u64 *)&resp->stats,
1251                                    (sizeof(struct oct_link_stats)) >> 3);
1252
1253                /* RX link-level stats */
1254                rstats->total_rcvd = rsp_rstats->total_rcvd;
1255                rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1256                rstats->total_bcst = rsp_rstats->total_bcst;
1257                rstats->total_mcst = rsp_rstats->total_mcst;
1258                rstats->runts      = rsp_rstats->runts;
1259                rstats->ctl_rcvd   = rsp_rstats->ctl_rcvd;
1260                /* Accounts for over/under-run of buffers */
1261                rstats->fifo_err  = rsp_rstats->fifo_err;
1262                rstats->dmac_drop = rsp_rstats->dmac_drop;
1263                rstats->fcs_err   = rsp_rstats->fcs_err;
1264                rstats->jabber_err = rsp_rstats->jabber_err;
1265                rstats->l2_err    = rsp_rstats->l2_err;
1266                rstats->frame_err = rsp_rstats->frame_err;
1267
1268                /* RX firmware stats */
1269                rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1270                rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1271                rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1272                rstats->fw_err_link = rsp_rstats->fw_err_link;
1273                rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1274                rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1275                rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1276
1277                /* Number of packets that are LROed      */
1278                rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1279                /* Number of octets that are LROed       */
1280                rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1281                /* Number of LRO packets formed          */
1282                rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1283                /* Number of times lRO of packet aborted */
1284                rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1285                rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1286                rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1287                rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1288                rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1289                /* intrmod: packet forward rate */
1290                rstats->fwd_rate = rsp_rstats->fwd_rate;
1291
1292                /* TX link-level stats */
1293                tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1294                tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1295                tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1296                tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1297                tstats->ctl_sent = rsp_tstats->ctl_sent;
1298                /* Packets sent after one collision*/
1299                tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1300                /* Packets sent after multiple collision*/
1301                tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1302                /* Packets not sent due to max collisions */
1303                tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1304                /* Packets not sent due to max deferrals */
1305                tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1306                /* Accounts for over/under-run of buffers */
1307                tstats->fifo_err = rsp_tstats->fifo_err;
1308                tstats->runts = rsp_tstats->runts;
1309                /* Total number of collisions detected */
1310                tstats->total_collisions = rsp_tstats->total_collisions;
1311
1312                /* firmware stats */
1313                tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1314                tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1315                tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1316                tstats->fw_err_link = rsp_tstats->fw_err_link;
1317                tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1318                tstats->fw_tso = rsp_tstats->fw_tso;
1319                tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1320                tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1321                tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1322
1323                resp->status = 1;
1324        } else {
1325                resp->status = -1;
1326        }
1327        complete(&ctrl->complete);
1328}
1329
1330/*  Configure interrupt moderation parameters */
1331static int octnet_get_link_stats(struct net_device *netdev)
1332{
1333        struct lio *lio = GET_LIO(netdev);
1334        struct octeon_device *oct_dev = lio->oct_dev;
1335
1336        struct octeon_soft_command *sc;
1337        struct oct_nic_stats_ctrl *ctrl;
1338        struct oct_nic_stats_resp *resp;
1339
1340        int retval;
1341
1342        /* Alloc soft command */
1343        sc = (struct octeon_soft_command *)
1344                octeon_alloc_soft_command(oct_dev,
1345                                          0,
1346                                          sizeof(struct oct_nic_stats_resp),
1347                                          sizeof(struct octnic_ctrl_pkt));
1348
1349        if (!sc)
1350                return -ENOMEM;
1351
1352        resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1353        memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1354
1355        ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1356        memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1357        ctrl->netdev = netdev;
1358        init_completion(&ctrl->complete);
1359
1360        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1361
1362        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1363                                    OPCODE_NIC_PORT_STATS, 0, 0, 0);
1364
1365        sc->callback = octnet_nic_stats_callback;
1366        sc->callback_arg = sc;
1367        sc->wait_time = 500;    /*in milli seconds*/
1368
1369        retval = octeon_send_soft_command(oct_dev, sc);
1370        if (retval == IQ_SEND_FAILED) {
1371                octeon_free_soft_command(oct_dev, sc);
1372                return -EINVAL;
1373        }
1374
1375        wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1376
1377        if (resp->status != 1) {
1378                octeon_free_soft_command(oct_dev, sc);
1379
1380                return -EINVAL;
1381        }
1382
1383        octeon_free_soft_command(oct_dev, sc);
1384
1385        return 0;
1386}
1387
1388/* Enable/Disable auto interrupt Moderation */
1389static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
1390                                 *intr_coal)
1391{
1392        int ret = 0;
1393        struct octeon_device *oct = lio->oct_dev;
1394        struct oct_intrmod_cfg *intrmod_cfg;
1395
1396        intrmod_cfg = &oct->intrmod;
1397
1398        if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
1399                if (intr_coal->rate_sample_interval)
1400                        intrmod_cfg->check_intrvl =
1401                                intr_coal->rate_sample_interval;
1402                else
1403                        intrmod_cfg->check_intrvl =
1404                                LIO_INTRMOD_CHECK_INTERVAL;
1405
1406                if (intr_coal->pkt_rate_high)
1407                        intrmod_cfg->maxpkt_ratethr =
1408                                intr_coal->pkt_rate_high;
1409                else
1410                        intrmod_cfg->maxpkt_ratethr =
1411                                LIO_INTRMOD_MAXPKT_RATETHR;
1412
1413                if (intr_coal->pkt_rate_low)
1414                        intrmod_cfg->minpkt_ratethr =
1415                                intr_coal->pkt_rate_low;
1416                else
1417                        intrmod_cfg->minpkt_ratethr =
1418                                LIO_INTRMOD_MINPKT_RATETHR;
1419        }
1420        if (oct->intrmod.rx_enable) {
1421                if (intr_coal->rx_max_coalesced_frames_high)
1422                        intrmod_cfg->rx_maxcnt_trigger =
1423                                intr_coal->rx_max_coalesced_frames_high;
1424                else
1425                        intrmod_cfg->rx_maxcnt_trigger =
1426                                LIO_INTRMOD_RXMAXCNT_TRIGGER;
1427
1428                if (intr_coal->rx_coalesce_usecs_high)
1429                        intrmod_cfg->rx_maxtmr_trigger =
1430                                intr_coal->rx_coalesce_usecs_high;
1431                else
1432                        intrmod_cfg->rx_maxtmr_trigger =
1433                                LIO_INTRMOD_RXMAXTMR_TRIGGER;
1434
1435                if (intr_coal->rx_coalesce_usecs_low)
1436                        intrmod_cfg->rx_mintmr_trigger =
1437                                intr_coal->rx_coalesce_usecs_low;
1438                else
1439                        intrmod_cfg->rx_mintmr_trigger =
1440                                LIO_INTRMOD_RXMINTMR_TRIGGER;
1441
1442                if (intr_coal->rx_max_coalesced_frames_low)
1443                        intrmod_cfg->rx_mincnt_trigger =
1444                                intr_coal->rx_max_coalesced_frames_low;
1445                else
1446                        intrmod_cfg->rx_mincnt_trigger =
1447                                LIO_INTRMOD_RXMINCNT_TRIGGER;
1448        }
1449        if (oct->intrmod.tx_enable) {
1450                if (intr_coal->tx_max_coalesced_frames_high)
1451                        intrmod_cfg->tx_maxcnt_trigger =
1452                                intr_coal->tx_max_coalesced_frames_high;
1453                else
1454                        intrmod_cfg->tx_maxcnt_trigger =
1455                                LIO_INTRMOD_TXMAXCNT_TRIGGER;
1456                if (intr_coal->tx_max_coalesced_frames_low)
1457                        intrmod_cfg->tx_mincnt_trigger =
1458                                intr_coal->tx_max_coalesced_frames_low;
1459                else
1460                        intrmod_cfg->tx_mincnt_trigger =
1461                                LIO_INTRMOD_TXMINCNT_TRIGGER;
1462        }
1463
1464        ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
1465
1466        return ret;
1467}
1468
1469static int
1470oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
1471{
1472        struct octeon_device *oct = lio->oct_dev;
1473        u32 rx_max_coalesced_frames;
1474
1475        /* Config Cnt based interrupt values */
1476        switch (oct->chip_id) {
1477        case OCTEON_CN68XX:
1478        case OCTEON_CN66XX: {
1479                struct octeon_cn6xxx *cn6xxx =
1480                        (struct octeon_cn6xxx *)oct->chip;
1481
1482                if (!intr_coal->rx_max_coalesced_frames)
1483                        rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1484                else
1485                        rx_max_coalesced_frames =
1486                                intr_coal->rx_max_coalesced_frames;
1487                octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1488                                 rx_max_coalesced_frames);
1489                CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1490                break;
1491        }
1492        case OCTEON_CN23XX_PF_VID: {
1493                int q_no;
1494
1495                if (!intr_coal->rx_max_coalesced_frames)
1496                        rx_max_coalesced_frames = oct->intrmod.rx_frames;
1497                else
1498                        rx_max_coalesced_frames =
1499                            intr_coal->rx_max_coalesced_frames;
1500                for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1501                        q_no += oct->sriov_info.pf_srn;
1502                        octeon_write_csr64(
1503                            oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
1504                            (octeon_read_csr64(
1505                                 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
1506                             (0x3fffff00000000UL)) |
1507                                rx_max_coalesced_frames);
1508                        /*consider setting resend bit*/
1509                }
1510                oct->intrmod.rx_frames = rx_max_coalesced_frames;
1511                break;
1512        }
1513        default:
1514                return -EINVAL;
1515        }
1516        return 0;
1517}
1518
1519static int oct_cfg_rx_intrtime(struct lio *lio,
1520                               struct ethtool_coalesce *intr_coal)
1521{
1522        struct octeon_device *oct = lio->oct_dev;
1523        u32 time_threshold, rx_coalesce_usecs;
1524
1525        /* Config Time based interrupt values */
1526        switch (oct->chip_id) {
1527        case OCTEON_CN68XX:
1528        case OCTEON_CN66XX: {
1529                struct octeon_cn6xxx *cn6xxx =
1530                        (struct octeon_cn6xxx *)oct->chip;
1531                if (!intr_coal->rx_coalesce_usecs)
1532                        rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1533                else
1534                        rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1535
1536                time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1537                                                         rx_coalesce_usecs);
1538                octeon_write_csr(oct,
1539                                 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1540                                 time_threshold);
1541
1542                CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1543                break;
1544        }
1545        case OCTEON_CN23XX_PF_VID: {
1546                u64 time_threshold;
1547                int q_no;
1548
1549                if (!intr_coal->rx_coalesce_usecs)
1550                        rx_coalesce_usecs = oct->intrmod.rx_usecs;
1551                else
1552                        rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1553                time_threshold =
1554                    cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
1555                for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1556                        q_no += oct->sriov_info.pf_srn;
1557                        octeon_write_csr64(oct,
1558                                           CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
1559                                           (oct->intrmod.rx_frames |
1560                                            (time_threshold << 32)));
1561                        /*consider writing to resend bit here*/
1562                }
1563                oct->intrmod.rx_usecs = rx_coalesce_usecs;
1564                break;
1565        }
1566        default:
1567                return -EINVAL;
1568        }
1569
1570        return 0;
1571}
1572
1573static int
1574oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
1575                   __attribute__((unused)))
1576{
1577        struct octeon_device *oct = lio->oct_dev;
1578        u32 iq_intr_pkt;
1579        void __iomem *inst_cnt_reg;
1580        u64 val;
1581
1582        /* Config Cnt based interrupt values */
1583        switch (oct->chip_id) {
1584        case OCTEON_CN68XX:
1585        case OCTEON_CN66XX:
1586                break;
1587        case OCTEON_CN23XX_PF_VID: {
1588                int q_no;
1589
1590                if (!intr_coal->tx_max_coalesced_frames)
1591                        iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
1592                                      CN23XX_PKT_IN_DONE_WMARK_MASK;
1593                else
1594                        iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
1595                                      CN23XX_PKT_IN_DONE_WMARK_MASK;
1596                for (q_no = 0; q_no < oct->num_iqs; q_no++) {
1597                        inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
1598                        val = readq(inst_cnt_reg);
1599                        /*clear wmark and count.dont want to write count back*/
1600                        val = (val & 0xFFFF000000000000ULL) |
1601                              ((u64)iq_intr_pkt
1602                               << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
1603                        writeq(val, inst_cnt_reg);
1604                        /*consider setting resend bit*/
1605                }
1606                oct->intrmod.tx_frames = iq_intr_pkt;
1607                break;
1608        }
1609        default:
1610                return -EINVAL;
1611        }
1612        return 0;
1613}
1614
1615static int lio_set_intr_coalesce(struct net_device *netdev,
1616                                 struct ethtool_coalesce *intr_coal)
1617{
1618        struct lio *lio = GET_LIO(netdev);
1619        int ret;
1620        struct octeon_device *oct = lio->oct_dev;
1621        u32 j, q_no;
1622        int db_max, db_min;
1623
1624        switch (oct->chip_id) {
1625        case OCTEON_CN68XX:
1626        case OCTEON_CN66XX:
1627                db_min = CN6XXX_DB_MIN;
1628                db_max = CN6XXX_DB_MAX;
1629                if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1630                    (intr_coal->tx_max_coalesced_frames <= db_max)) {
1631                        for (j = 0; j < lio->linfo.num_txpciq; j++) {
1632                                q_no = lio->linfo.txpciq[j].s.q_no;
1633                                oct->instr_queue[q_no]->fill_threshold =
1634                                        intr_coal->tx_max_coalesced_frames;
1635                        }
1636                } else {
1637                        dev_err(&oct->pci_dev->dev,
1638                                "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1639                                intr_coal->tx_max_coalesced_frames, db_min,
1640                                db_max);
1641                        return -EINVAL;
1642                }
1643                break;
1644        case OCTEON_CN23XX_PF_VID:
1645                break;
1646        default:
1647                return -EINVAL;
1648        }
1649
1650        oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1651        oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
1652
1653        ret = oct_cfg_adaptive_intr(lio, intr_coal);
1654
1655        if (!intr_coal->use_adaptive_rx_coalesce) {
1656                ret = oct_cfg_rx_intrtime(lio, intr_coal);
1657                if (ret)
1658                        goto ret_intrmod;
1659
1660                ret = oct_cfg_rx_intrcnt(lio, intr_coal);
1661                if (ret)
1662                        goto ret_intrmod;
1663        }
1664        if (!intr_coal->use_adaptive_tx_coalesce) {
1665                ret = oct_cfg_tx_intrcnt(lio, intr_coal);
1666                if (ret)
1667                        goto ret_intrmod;
1668        }
1669
1670        return 0;
1671ret_intrmod:
1672        return ret;
1673}
1674
1675static int lio_get_ts_info(struct net_device *netdev,
1676                           struct ethtool_ts_info *info)
1677{
1678        struct lio *lio = GET_LIO(netdev);
1679
1680        info->so_timestamping =
1681#ifdef PTP_HARDWARE_TIMESTAMPING
1682                SOF_TIMESTAMPING_TX_HARDWARE |
1683                SOF_TIMESTAMPING_RX_HARDWARE |
1684                SOF_TIMESTAMPING_RAW_HARDWARE |
1685                SOF_TIMESTAMPING_TX_SOFTWARE |
1686#endif
1687                SOF_TIMESTAMPING_RX_SOFTWARE |
1688                SOF_TIMESTAMPING_SOFTWARE;
1689
1690        if (lio->ptp_clock)
1691                info->phc_index = ptp_clock_index(lio->ptp_clock);
1692        else
1693                info->phc_index = -1;
1694
1695#ifdef PTP_HARDWARE_TIMESTAMPING
1696        info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1697
1698        info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1699                           (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1700                           (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1701                           (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1702#endif
1703
1704        return 0;
1705}
1706
1707static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
1708{
1709        struct lio *lio = GET_LIO(netdev);
1710        struct octeon_device *oct = lio->oct_dev;
1711        struct oct_link_info *linfo;
1712        struct octnic_ctrl_pkt nctrl;
1713        int ret = 0;
1714
1715        /* get the link info */
1716        linfo = &lio->linfo;
1717
1718        if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
1719                return -EINVAL;
1720
1721        if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
1722                                                  ecmd->speed != SPEED_10) ||
1723                                                 (ecmd->duplex != DUPLEX_HALF &&
1724                                                  ecmd->duplex != DUPLEX_FULL)))
1725                return -EINVAL;
1726
1727        /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
1728         * as they operate at fixed Speed and Duplex settings
1729         */
1730        if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
1731            linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
1732            linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
1733                dev_info(&oct->pci_dev->dev,
1734                         "Autonegotiation, duplex and speed settings cannot be modified.\n");
1735                return -EINVAL;
1736        }
1737
1738        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1739
1740        nctrl.ncmd.u64 = 0;
1741        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
1742        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1743        nctrl.wait_time = 1000;
1744        nctrl.netpndev = (u64)netdev;
1745        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1746
1747        /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
1748         * to SE core application using ncmd.s.more & ncmd.s.param
1749         */
1750        if (ecmd->autoneg == AUTONEG_ENABLE) {
1751                /* Autoneg ON */
1752                nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
1753                                     OCTNIC_NCMD_AUTONEG_ON;
1754                nctrl.ncmd.s.param1 = ecmd->advertising;
1755        } else {
1756                /* Autoneg OFF */
1757                nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
1758
1759                nctrl.ncmd.s.param2 = ecmd->duplex;
1760
1761                nctrl.ncmd.s.param1 = ecmd->speed;
1762        }
1763
1764        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1765        if (ret < 0) {
1766                dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1767                return -1;
1768        }
1769
1770        return 0;
1771}
1772
1773static int lio_nway_reset(struct net_device *netdev)
1774{
1775        if (netif_running(netdev)) {
1776                struct ethtool_cmd ecmd;
1777
1778                memset(&ecmd, 0, sizeof(struct ethtool_cmd));
1779                ecmd.autoneg = 0;
1780                ecmd.speed = 0;
1781                ecmd.duplex = 0;
1782                lio_set_settings(netdev, &ecmd);
1783        }
1784        return 0;
1785}
1786
1787/* Return register dump len. */
1788static int lio_get_regs_len(struct net_device *dev)
1789{
1790        struct lio *lio = GET_LIO(dev);
1791        struct octeon_device *oct = lio->oct_dev;
1792
1793        switch (oct->chip_id) {
1794        case OCTEON_CN23XX_PF_VID:
1795                return OCT_ETHTOOL_REGDUMP_LEN_23XX;
1796        default:
1797                return OCT_ETHTOOL_REGDUMP_LEN;
1798        }
1799}
1800
1801static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
1802{
1803        u32 reg;
1804        u8 pf_num = oct->pf_num;
1805        int len = 0;
1806        int i;
1807
1808        /* PCI  Window Registers */
1809
1810        len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
1811
1812        /*0x29030 or 0x29040*/
1813        reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
1814        len += sprintf(s + len,
1815                       "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
1816                       reg, oct->pcie_port, oct->pf_num,
1817                       (u64)octeon_read_csr64(oct, reg));
1818
1819        /*0x27080 or 0x27090*/
1820        reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
1821        len +=
1822            sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
1823                    reg, oct->pcie_port, oct->pf_num,
1824                    (u64)octeon_read_csr64(oct, reg));
1825
1826        /*0x27000 or 0x27010*/
1827        reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
1828        len +=
1829            sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
1830                    reg, oct->pcie_port, oct->pf_num,
1831                    (u64)octeon_read_csr64(oct, reg));
1832
1833        /*0x29120*/
1834        reg = 0x29120;
1835        len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
1836                       (u64)octeon_read_csr64(oct, reg));
1837
1838        /*0x27300*/
1839        reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
1840              (oct->pf_num) * CN23XX_PF_INT_OFFSET;
1841        len += sprintf(
1842            s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
1843            oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
1844
1845        /*0x27200*/
1846        reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
1847              (oct->pf_num) * CN23XX_PF_INT_OFFSET;
1848        len += sprintf(s + len,
1849                       "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
1850                       reg, oct->pcie_port, oct->pf_num,
1851                       (u64)octeon_read_csr64(oct, reg));
1852
1853        /*29130*/
1854        reg = CN23XX_SLI_PKT_CNT_INT;
1855        len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
1856                       (u64)octeon_read_csr64(oct, reg));
1857
1858        /*0x29140*/
1859        reg = CN23XX_SLI_PKT_TIME_INT;
1860        len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
1861                       (u64)octeon_read_csr64(oct, reg));
1862
1863        /*0x29160*/
1864        reg = 0x29160;
1865        len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
1866                       (u64)octeon_read_csr64(oct, reg));
1867
1868        /*0x29180*/
1869        reg = CN23XX_SLI_OQ_WMARK;
1870        len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
1871                       reg, (u64)octeon_read_csr64(oct, reg));
1872
1873        /*0x291E0*/
1874        reg = CN23XX_SLI_PKT_IOQ_RING_RST;
1875        len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
1876                       (u64)octeon_read_csr64(oct, reg));
1877
1878        /*0x29210*/
1879        reg = CN23XX_SLI_GBL_CONTROL;
1880        len += sprintf(s + len,
1881                       "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
1882                       (u64)octeon_read_csr64(oct, reg));
1883
1884        /*0x29220*/
1885        reg = 0x29220;
1886        len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
1887                       reg, (u64)octeon_read_csr64(oct, reg));
1888
1889        /*PF only*/
1890        if (pf_num == 0) {
1891                /*0x29260*/
1892                reg = CN23XX_SLI_OUT_BP_EN_W1S;
1893                len += sprintf(s + len,
1894                               "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
1895                               reg, (u64)octeon_read_csr64(oct, reg));
1896        } else if (pf_num == 1) {
1897                /*0x29270*/
1898                reg = CN23XX_SLI_OUT_BP_EN2_W1S;
1899                len += sprintf(s + len,
1900                               "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
1901                               reg, (u64)octeon_read_csr64(oct, reg));
1902        }
1903
1904        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1905                reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
1906                len +=
1907                    sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
1908                            reg, i, (u64)octeon_read_csr64(oct, reg));
1909        }
1910
1911        /*0x10040*/
1912        for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
1913                reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
1914                len += sprintf(s + len,
1915                               "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
1916                               reg, i, (u64)octeon_read_csr64(oct, reg));
1917        }
1918
1919        /*0x10080*/
1920        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1921                reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
1922                len += sprintf(s + len,
1923                               "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
1924                               reg, i, (u64)octeon_read_csr64(oct, reg));
1925        }
1926
1927        /*0x10090*/
1928        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1929                reg = CN23XX_SLI_OQ_SIZE(i);
1930                len += sprintf(
1931                    s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
1932                    reg, i, (u64)octeon_read_csr64(oct, reg));
1933        }
1934
1935        /*0x10050*/
1936        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1937                reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
1938                len += sprintf(
1939                        s + len,
1940                        "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
1941                        reg, i, (u64)octeon_read_csr64(oct, reg));
1942        }
1943
1944        /*0x10070*/
1945        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1946                reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
1947                len += sprintf(s + len,
1948                               "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
1949                               reg, i, (u64)octeon_read_csr64(oct, reg));
1950        }
1951
1952        /*0x100a0*/
1953        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1954                reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
1955                len += sprintf(s + len,
1956                               "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
1957                               reg, i, (u64)octeon_read_csr64(oct, reg));
1958        }
1959
1960        /*0x100b0*/
1961        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1962                reg = CN23XX_SLI_OQ_PKTS_SENT(i);
1963                len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
1964                               reg, i, (u64)octeon_read_csr64(oct, reg));
1965        }
1966
1967        /*0x100c0*/
1968        for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1969                reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
1970                len += sprintf(s + len,
1971                               "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
1972                               reg, i, (u64)octeon_read_csr64(oct, reg));
1973
1974                /*0x10000*/
1975                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
1976                        reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
1977                        len += sprintf(
1978                                s + len,
1979                                "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
1980                                reg, i, (u64)octeon_read_csr64(oct, reg));
1981                }
1982
1983                /*0x10010*/
1984                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
1985                        reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
1986                        len += sprintf(
1987                            s + len,
1988                            "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
1989                            i, (u64)octeon_read_csr64(oct, reg));
1990                }
1991
1992                /*0x10020*/
1993                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
1994                        reg = CN23XX_SLI_IQ_DOORBELL(i);
1995                        len += sprintf(
1996                            s + len,
1997                            "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
1998                            reg, i, (u64)octeon_read_csr64(oct, reg));
1999                }
2000
2001                /*0x10030*/
2002                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2003                        reg = CN23XX_SLI_IQ_SIZE(i);
2004                        len += sprintf(
2005                            s + len,
2006                            "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2007                            reg, i, (u64)octeon_read_csr64(oct, reg));
2008                }
2009
2010                /*0x10040*/
2011                for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2012                        reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2013                len += sprintf(s + len,
2014                               "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2015                               reg, i, (u64)octeon_read_csr64(oct, reg));
2016        }
2017
2018        return len;
2019}
2020
2021static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2022{
2023        u32 reg;
2024        int i, len = 0;
2025
2026        /* PCI  Window Registers */
2027
2028        len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2029        reg = CN6XXX_WIN_WR_ADDR_LO;
2030        len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2031                       CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2032        reg = CN6XXX_WIN_WR_ADDR_HI;
2033        len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2034                       CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2035        reg = CN6XXX_WIN_RD_ADDR_LO;
2036        len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2037                       CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2038        reg = CN6XXX_WIN_RD_ADDR_HI;
2039        len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2040                       CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2041        reg = CN6XXX_WIN_WR_DATA_LO;
2042        len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2043                       CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2044        reg = CN6XXX_WIN_WR_DATA_HI;
2045        len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2046                       CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2047        len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2048                       CN6XXX_WIN_WR_MASK_REG,
2049                       octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2050
2051        /* PCI  Interrupt Register */
2052        len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2053                       CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2054                                                CN6XXX_SLI_INT_ENB64_PORT0));
2055        len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2056                       CN6XXX_SLI_INT_ENB64_PORT1,
2057                       octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2058        len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2059                       octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2060
2061        /* PCI  Output queue registers */
2062        for (i = 0; i < oct->num_oqs; i++) {
2063                reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2064                len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2065                               reg, i, octeon_read_csr(oct, reg));
2066                reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2067                len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2068                               reg, i, octeon_read_csr(oct, reg));
2069        }
2070        reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2071        len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2072                       reg, octeon_read_csr(oct, reg));
2073        reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2074        len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2075                       reg, octeon_read_csr(oct, reg));
2076
2077        /* PCI  Input queue registers */
2078        for (i = 0; i <= 3; i++) {
2079                u32 reg;
2080
2081                reg = CN6XXX_SLI_IQ_DOORBELL(i);
2082                len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2083                               reg, i, octeon_read_csr(oct, reg));
2084                reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2085                len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2086                               reg, i, octeon_read_csr(oct, reg));
2087        }
2088
2089        /* PCI  DMA registers */
2090
2091        len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2092                       CN6XXX_DMA_CNT(0),
2093                       octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2094        reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2095        len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2096                       CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2097        reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2098        len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2099                       CN6XXX_DMA_TIME_INT_LEVEL(0),
2100                       octeon_read_csr(oct, reg));
2101
2102        len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2103                       CN6XXX_DMA_CNT(1),
2104                       octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2105        reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2106        len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2107                       CN6XXX_DMA_PKT_INT_LEVEL(1),
2108                       octeon_read_csr(oct, reg));
2109        reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2110        len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2111                       CN6XXX_DMA_TIME_INT_LEVEL(1),
2112                       octeon_read_csr(oct, reg));
2113
2114        /* PCI  Index registers */
2115
2116        len += sprintf(s + len, "\n");
2117
2118        for (i = 0; i < 16; i++) {
2119                reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2120                len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2121                               CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2122        }
2123
2124        return len;
2125}
2126
2127static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2128{
2129        u32 val;
2130        int i, len = 0;
2131
2132        /* PCI CONFIG Registers */
2133
2134        len += sprintf(s + len,
2135                       "\n\t Octeon Config space Registers\n\n");
2136
2137        for (i = 0; i <= 13; i++) {
2138                pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2139                len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2140                               (i * 4), i, val);
2141        }
2142
2143        for (i = 30; i <= 34; i++) {
2144                pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2145                len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2146                               (i * 4), i, val);
2147        }
2148
2149        return len;
2150}
2151
2152/*  Return register dump user app.  */
2153static void lio_get_regs(struct net_device *dev,
2154                         struct ethtool_regs *regs, void *regbuf)
2155{
2156        struct lio *lio = GET_LIO(dev);
2157        int len = 0;
2158        struct octeon_device *oct = lio->oct_dev;
2159
2160        regs->version = OCT_ETHTOOL_REGSVER;
2161
2162        switch (oct->chip_id) {
2163        case OCTEON_CN23XX_PF_VID:
2164                memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
2165                len += cn23xx_read_csr_reg(regbuf + len, oct);
2166                break;
2167        case OCTEON_CN68XX:
2168        case OCTEON_CN66XX:
2169                memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
2170                len += cn6xxx_read_csr_reg(regbuf + len, oct);
2171                len += cn6xxx_read_config_reg(regbuf + len, oct);
2172                break;
2173        default:
2174                dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
2175                        __func__, oct->chip_id);
2176        }
2177}
2178
2179static u32 lio_get_priv_flags(struct net_device *netdev)
2180{
2181        struct lio *lio = GET_LIO(netdev);
2182
2183        return lio->oct_dev->priv_flags;
2184}
2185
2186static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
2187{
2188        struct lio *lio = GET_LIO(netdev);
2189        bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
2190
2191        lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
2192                          intr_by_tx_bytes);
2193        return 0;
2194}
2195
2196static const struct ethtool_ops lio_ethtool_ops = {
2197        .get_settings           = lio_get_settings,
2198        .get_link               = ethtool_op_get_link,
2199        .get_drvinfo            = lio_get_drvinfo,
2200        .get_ringparam          = lio_ethtool_get_ringparam,
2201        .get_channels           = lio_ethtool_get_channels,
2202        .set_phys_id            = lio_set_phys_id,
2203        .get_eeprom_len         = lio_get_eeprom_len,
2204        .get_eeprom             = lio_get_eeprom,
2205        .get_strings            = lio_get_strings,
2206        .get_ethtool_stats      = lio_get_ethtool_stats,
2207        .get_pauseparam         = lio_get_pauseparam,
2208        .set_pauseparam         = lio_set_pauseparam,
2209        .get_regs_len           = lio_get_regs_len,
2210        .get_regs               = lio_get_regs,
2211        .get_msglevel           = lio_get_msglevel,
2212        .set_msglevel           = lio_set_msglevel,
2213        .get_sset_count         = lio_get_sset_count,
2214        .nway_reset             = lio_nway_reset,
2215        .set_settings           = lio_set_settings,
2216        .get_coalesce           = lio_get_intr_coalesce,
2217        .set_coalesce           = lio_set_intr_coalesce,
2218        .get_priv_flags         = lio_get_priv_flags,
2219        .set_priv_flags         = lio_set_priv_flags,
2220        .get_ts_info            = lio_get_ts_info,
2221};
2222
2223void liquidio_set_ethtool_ops(struct net_device *netdev)
2224{
2225        netdev->ethtool_ops = &lio_ethtool_ops;
2226}
2227