dpdk/drivers/net/ngbe/ngbe_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
   3 * Copyright(c) 2010-2017 Intel Corporation
   4 */
   5
   6#include <errno.h>
   7#include <rte_common.h>
   8#include <ethdev_pci.h>
   9
  10#include <rte_alarm.h>
  11
  12#include "ngbe_logs.h"
  13#include "ngbe.h"
  14#include "ngbe_ethdev.h"
  15#include "ngbe_rxtx.h"
  16#include "ngbe_regs_group.h"
  17
  18static const struct reg_info ngbe_regs_general[] = {
  19        {NGBE_RST, 1, 1, "NGBE_RST"},
  20        {NGBE_STAT, 1, 1, "NGBE_STAT"},
  21        {NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
  22        {NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
  23        {NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
  24        {NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
  25        {0, 0, 0, ""}
  26};
  27
  28static const struct reg_info ngbe_regs_nvm[] = {
  29        {0, 0, 0, ""}
  30};
  31
  32static const struct reg_info ngbe_regs_interrupt[] = {
  33        {0, 0, 0, ""}
  34};
  35
  36static const struct reg_info ngbe_regs_fctl_others[] = {
  37        {0, 0, 0, ""}
  38};
  39
  40static const struct reg_info ngbe_regs_rxdma[] = {
  41        {0, 0, 0, ""}
  42};
  43
  44static const struct reg_info ngbe_regs_rx[] = {
  45        {0, 0, 0, ""}
  46};
  47
  48static struct reg_info ngbe_regs_tx[] = {
  49        {0, 0, 0, ""}
  50};
  51
  52static const struct reg_info ngbe_regs_wakeup[] = {
  53        {0, 0, 0, ""}
  54};
  55
  56static const struct reg_info ngbe_regs_mac[] = {
  57        {0, 0, 0, ""}
  58};
  59
  60static const struct reg_info ngbe_regs_diagnostic[] = {
  61        {0, 0, 0, ""},
  62};
  63
  64/* PF registers */
  65static const struct reg_info *ngbe_regs_others[] = {
  66                                ngbe_regs_general,
  67                                ngbe_regs_nvm,
  68                                ngbe_regs_interrupt,
  69                                ngbe_regs_fctl_others,
  70                                ngbe_regs_rxdma,
  71                                ngbe_regs_rx,
  72                                ngbe_regs_tx,
  73                                ngbe_regs_wakeup,
  74                                ngbe_regs_mac,
  75                                ngbe_regs_diagnostic,
  76                                NULL};
  77
  78static int ngbe_dev_close(struct rte_eth_dev *dev);
  79static int ngbe_dev_link_update(struct rte_eth_dev *dev,
  80                                int wait_to_complete);
  81static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
  82static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
  83static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
  84                                        uint16_t queue);
  85
  86static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
  87static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
  88static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
  89static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
  90static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
  91static void ngbe_dev_interrupt_handler(void *param);
  92static void ngbe_configure_msix(struct rte_eth_dev *dev);
  93
  94#define NGBE_SET_HWSTRIP(h, q) do {\
  95                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
  96                uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
  97                (h)->bitmap[idx] |= 1 << bit;\
  98        } while (0)
  99
 100#define NGBE_CLEAR_HWSTRIP(h, q) do {\
 101                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
 102                uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
 103                (h)->bitmap[idx] &= ~(1 << bit);\
 104        } while (0)
 105
 106#define NGBE_GET_HWSTRIP(h, q, r) do {\
 107                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
 108                uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
 109                (r) = (h)->bitmap[idx] >> bit & 1;\
 110        } while (0)
 111
 112/*
 113 * The set of PCI devices this driver supports
 114 */
 115static const struct rte_pci_id pci_id_ngbe_map[] = {
 116        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
 117        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
 118        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
 119        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
 120        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
 121        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
 122        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
 123        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
 124        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
 125        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
 126        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
 127        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
 128        { .vendor_id = 0, /* sentinel */ },
 129};
 130
 131static const struct rte_eth_desc_lim rx_desc_lim = {
 132        .nb_max = NGBE_RING_DESC_MAX,
 133        .nb_min = NGBE_RING_DESC_MIN,
 134        .nb_align = NGBE_RXD_ALIGN,
 135};
 136
 137static const struct rte_eth_desc_lim tx_desc_lim = {
 138        .nb_max = NGBE_RING_DESC_MAX,
 139        .nb_min = NGBE_RING_DESC_MIN,
 140        .nb_align = NGBE_TXD_ALIGN,
 141        .nb_seg_max = NGBE_TX_MAX_SEG,
 142        .nb_mtu_seg_max = NGBE_TX_MAX_SEG,
 143};
 144
 145static const struct eth_dev_ops ngbe_eth_dev_ops;
 146
 147#define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
 148#define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
 149static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
 150        /* MNG RxTx */
 151        HW_XSTAT(mng_bmc2host_packets),
 152        HW_XSTAT(mng_host2bmc_packets),
 153        /* Basic RxTx */
 154        HW_XSTAT(rx_packets),
 155        HW_XSTAT(tx_packets),
 156        HW_XSTAT(rx_bytes),
 157        HW_XSTAT(tx_bytes),
 158        HW_XSTAT(rx_total_bytes),
 159        HW_XSTAT(rx_total_packets),
 160        HW_XSTAT(tx_total_packets),
 161        HW_XSTAT(rx_total_missed_packets),
 162        HW_XSTAT(rx_broadcast_packets),
 163        HW_XSTAT(rx_multicast_packets),
 164        HW_XSTAT(rx_management_packets),
 165        HW_XSTAT(tx_management_packets),
 166        HW_XSTAT(rx_management_dropped),
 167        HW_XSTAT(rx_dma_drop),
 168        HW_XSTAT(tx_secdrp_packets),
 169
 170        /* Basic Error */
 171        HW_XSTAT(rx_crc_errors),
 172        HW_XSTAT(rx_illegal_byte_errors),
 173        HW_XSTAT(rx_error_bytes),
 174        HW_XSTAT(rx_mac_short_packet_dropped),
 175        HW_XSTAT(rx_length_errors),
 176        HW_XSTAT(rx_undersize_errors),
 177        HW_XSTAT(rx_fragment_errors),
 178        HW_XSTAT(rx_oversize_errors),
 179        HW_XSTAT(rx_jabber_errors),
 180        HW_XSTAT(rx_l3_l4_xsum_error),
 181        HW_XSTAT(mac_local_errors),
 182        HW_XSTAT(mac_remote_errors),
 183
 184        /* PB Stats */
 185        HW_XSTAT(rx_up_dropped),
 186        HW_XSTAT(rdb_pkt_cnt),
 187        HW_XSTAT(rdb_repli_cnt),
 188        HW_XSTAT(rdb_drp_cnt),
 189
 190        /* MACSEC */
 191        HW_XSTAT(tx_macsec_pkts_untagged),
 192        HW_XSTAT(tx_macsec_pkts_encrypted),
 193        HW_XSTAT(tx_macsec_pkts_protected),
 194        HW_XSTAT(tx_macsec_octets_encrypted),
 195        HW_XSTAT(tx_macsec_octets_protected),
 196        HW_XSTAT(rx_macsec_pkts_untagged),
 197        HW_XSTAT(rx_macsec_pkts_badtag),
 198        HW_XSTAT(rx_macsec_pkts_nosci),
 199        HW_XSTAT(rx_macsec_pkts_unknownsci),
 200        HW_XSTAT(rx_macsec_octets_decrypted),
 201        HW_XSTAT(rx_macsec_octets_validated),
 202        HW_XSTAT(rx_macsec_sc_pkts_unchecked),
 203        HW_XSTAT(rx_macsec_sc_pkts_delayed),
 204        HW_XSTAT(rx_macsec_sc_pkts_late),
 205        HW_XSTAT(rx_macsec_sa_pkts_ok),
 206        HW_XSTAT(rx_macsec_sa_pkts_invalid),
 207        HW_XSTAT(rx_macsec_sa_pkts_notvalid),
 208        HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
 209        HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
 210
 211        /* MAC RxTx */
 212        HW_XSTAT(rx_size_64_packets),
 213        HW_XSTAT(rx_size_65_to_127_packets),
 214        HW_XSTAT(rx_size_128_to_255_packets),
 215        HW_XSTAT(rx_size_256_to_511_packets),
 216        HW_XSTAT(rx_size_512_to_1023_packets),
 217        HW_XSTAT(rx_size_1024_to_max_packets),
 218        HW_XSTAT(tx_size_64_packets),
 219        HW_XSTAT(tx_size_65_to_127_packets),
 220        HW_XSTAT(tx_size_128_to_255_packets),
 221        HW_XSTAT(tx_size_256_to_511_packets),
 222        HW_XSTAT(tx_size_512_to_1023_packets),
 223        HW_XSTAT(tx_size_1024_to_max_packets),
 224
 225        /* Flow Control */
 226        HW_XSTAT(tx_xon_packets),
 227        HW_XSTAT(rx_xon_packets),
 228        HW_XSTAT(tx_xoff_packets),
 229        HW_XSTAT(rx_xoff_packets),
 230
 231        HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
 232        HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
 233        HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
 234        HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
 235};
 236
 237#define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
 238                           sizeof(rte_ngbe_stats_strings[0]))
 239
 240/* Per-queue statistics */
 241#define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
 242static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
 243        QP_XSTAT(rx_qp_packets),
 244        QP_XSTAT(tx_qp_packets),
 245        QP_XSTAT(rx_qp_bytes),
 246        QP_XSTAT(tx_qp_bytes),
 247        QP_XSTAT(rx_qp_mc_packets),
 248};
 249
 250#define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
 251                           sizeof(rte_ngbe_qp_strings[0]))
 252
 253static inline int32_t
 254ngbe_pf_reset_hw(struct ngbe_hw *hw)
 255{
 256        uint32_t ctrl_ext;
 257        int32_t status;
 258
 259        status = hw->mac.reset_hw(hw);
 260
 261        ctrl_ext = rd32(hw, NGBE_PORTCTL);
 262        /* Set PF Reset Done bit so PF/VF Mail Ops can work */
 263        ctrl_ext |= NGBE_PORTCTL_RSTDONE;
 264        wr32(hw, NGBE_PORTCTL, ctrl_ext);
 265        ngbe_flush(hw);
 266
 267        if (status == NGBE_ERR_SFP_NOT_PRESENT)
 268                status = 0;
 269        return status;
 270}
 271
 272static inline void
 273ngbe_enable_intr(struct rte_eth_dev *dev)
 274{
 275        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
 276        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 277
 278        wr32(hw, NGBE_IENMISC, intr->mask_misc);
 279        wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
 280        ngbe_flush(hw);
 281}
 282
 283static void
 284ngbe_disable_intr(struct ngbe_hw *hw)
 285{
 286        PMD_INIT_FUNC_TRACE();
 287
 288        wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
 289        ngbe_flush(hw);
 290}
 291
 292/*
 293 * Ensure that all locks are released before first NVM or PHY access
 294 */
 295static void
 296ngbe_swfw_lock_reset(struct ngbe_hw *hw)
 297{
 298        uint16_t mask;
 299
 300        /*
 301         * These ones are more tricky since they are common to all ports; but
 302         * swfw_sync retries last long enough (1s) to be almost sure that if
 303         * lock can not be taken it is due to an improper lock of the
 304         * semaphore.
 305         */
 306        mask = NGBE_MNGSEM_SWPHY |
 307               NGBE_MNGSEM_SWMBX |
 308               NGBE_MNGSEM_SWFLASH;
 309        if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
 310                PMD_DRV_LOG(DEBUG, "SWFW common locks released");
 311
 312        hw->mac.release_swfw_sync(hw, mask);
 313}
 314
 315static int
 316eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 317{
 318        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 319        struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
 320        struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
 321        struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
 322        struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
 323        const struct rte_memzone *mz;
 324        uint32_t ctrl_ext;
 325        u32 led_conf = 0;
 326        int err, ret;
 327
 328        PMD_INIT_FUNC_TRACE();
 329
 330        eth_dev->dev_ops = &ngbe_eth_dev_ops;
 331        eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
 332        eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
 333        eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
 334        eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
 335        eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
 336        eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
 337
 338        /*
 339         * For secondary processes, we don't initialise any further as primary
 340         * has already done this work. Only check we don't need a different
 341         * Rx and Tx function.
 342         */
 343        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 344                struct ngbe_tx_queue *txq;
 345                /* Tx queue function in primary, set by last queue initialized
 346                 * Tx queue may not initialized by primary process
 347                 */
 348                if (eth_dev->data->tx_queues) {
 349                        uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
 350                        txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
 351                        ngbe_set_tx_function(eth_dev, txq);
 352                } else {
 353                        /* Use default Tx function if we get here */
 354                        PMD_INIT_LOG(NOTICE,
 355                                "No Tx queues configured yet. Using default Tx function.");
 356                }
 357
 358                ngbe_set_rx_function(eth_dev);
 359
 360                return 0;
 361        }
 362
 363        rte_eth_copy_pci_info(eth_dev, pci_dev);
 364        eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 365
 366        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
 367
 368        /* Vendor and Device ID need to be set before init of shared code */
 369        hw->back = pci_dev;
 370        hw->device_id = pci_dev->id.device_id;
 371        hw->vendor_id = pci_dev->id.vendor_id;
 372        if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) {
 373                hw->sub_system_id = pci_dev->id.subsystem_device_id;
 374        } else {
 375                u32 ssid;
 376
 377                ssid = ngbe_flash_read_dword(hw, 0xFFFDC);
 378                if (ssid == 0x1) {
 379                        PMD_INIT_LOG(ERR,
 380                                "Read of internal subsystem device id failed\n");
 381                        return -ENODEV;
 382                }
 383                hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8;
 384        }
 385        ngbe_map_device_id(hw);
 386
 387        /* Reserve memory for interrupt status block */
 388        mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
 389                NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
 390        if (mz == NULL)
 391                return -ENOMEM;
 392
 393        hw->isb_dma = TMZ_PADDR(mz);
 394        hw->isb_mem = TMZ_VADDR(mz);
 395
 396        /* Initialize the shared code (base driver) */
 397        err = ngbe_init_shared_code(hw);
 398        if (err != 0) {
 399                PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
 400                return -EIO;
 401        }
 402
 403        /* Unlock any pending hardware semaphore */
 404        ngbe_swfw_lock_reset(hw);
 405
 406        /* Get Hardware Flow Control setting */
 407        hw->fc.requested_mode = ngbe_fc_full;
 408        hw->fc.current_mode = ngbe_fc_full;
 409        hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
 410        hw->fc.low_water = NGBE_FC_XON_LOTH;
 411        hw->fc.high_water = NGBE_FC_XOFF_HITH;
 412        hw->fc.send_xon = 1;
 413
 414        err = hw->rom.init_params(hw);
 415        if (err != 0) {
 416                PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
 417                return -EIO;
 418        }
 419
 420        /* Make sure we have a good EEPROM before we read from it */
 421        err = hw->rom.validate_checksum(hw, NULL);
 422        if (err != 0) {
 423                PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
 424                return -EIO;
 425        }
 426
 427        err = hw->phy.led_oem_chk(hw, &led_conf);
 428        if (err == 0)
 429                hw->led_conf = led_conf;
 430        else
 431                hw->led_conf = 0xFFFF;
 432
 433        err = hw->mac.init_hw(hw);
 434        if (err != 0) {
 435                PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
 436                return -EIO;
 437        }
 438
 439        /* Reset the hw statistics */
 440        ngbe_dev_stats_reset(eth_dev);
 441
 442        /* disable interrupt */
 443        ngbe_disable_intr(hw);
 444
 445        /* Allocate memory for storing MAC addresses */
 446        eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
 447                                               hw->mac.num_rar_entries, 0);
 448        if (eth_dev->data->mac_addrs == NULL) {
 449                PMD_INIT_LOG(ERR,
 450                             "Failed to allocate %u bytes needed to store MAC addresses",
 451                             RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
 452                return -ENOMEM;
 453        }
 454
 455        /* Copy the permanent MAC address */
 456        rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
 457                        &eth_dev->data->mac_addrs[0]);
 458
 459        /* Allocate memory for storing hash filter MAC addresses */
 460        eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
 461                        RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
 462        if (eth_dev->data->hash_mac_addrs == NULL) {
 463                PMD_INIT_LOG(ERR,
 464                             "Failed to allocate %d bytes needed to store MAC addresses",
 465                             RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
 466                rte_free(eth_dev->data->mac_addrs);
 467                eth_dev->data->mac_addrs = NULL;
 468                return -ENOMEM;
 469        }
 470
 471        /* initialize the vfta */
 472        memset(shadow_vfta, 0, sizeof(*shadow_vfta));
 473
 474        /* initialize the hw strip bitmap*/
 475        memset(hwstrip, 0, sizeof(*hwstrip));
 476
 477        /* initialize PF if max_vfs not zero */
 478        ret = ngbe_pf_host_init(eth_dev);
 479        if (ret) {
 480                rte_free(eth_dev->data->mac_addrs);
 481                eth_dev->data->mac_addrs = NULL;
 482                rte_free(eth_dev->data->hash_mac_addrs);
 483                eth_dev->data->hash_mac_addrs = NULL;
 484                return ret;
 485        }
 486
 487        ctrl_ext = rd32(hw, NGBE_PORTCTL);
 488        /* let hardware know driver is loaded */
 489        ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
 490        /* Set PF Reset Done bit so PF/VF Mail Ops can work */
 491        ctrl_ext |= NGBE_PORTCTL_RSTDONE;
 492        wr32(hw, NGBE_PORTCTL, ctrl_ext);
 493        ngbe_flush(hw);
 494
 495        PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
 496                        (int)hw->mac.type, (int)hw->phy.type);
 497
 498        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
 499                     eth_dev->data->port_id, pci_dev->id.vendor_id,
 500                     pci_dev->id.device_id);
 501
 502        rte_intr_callback_register(intr_handle,
 503                                   ngbe_dev_interrupt_handler, eth_dev);
 504
 505        /* enable uio/vfio intr/eventfd mapping */
 506        rte_intr_enable(intr_handle);
 507
 508        /* enable support intr */
 509        ngbe_enable_intr(eth_dev);
 510
 511        return 0;
 512}
 513
 514static int
 515eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
 516{
 517        PMD_INIT_FUNC_TRACE();
 518
 519        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 520                return 0;
 521
 522        ngbe_dev_close(eth_dev);
 523
 524        return 0;
 525}
 526
 527static int
 528eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 529                struct rte_pci_device *pci_dev)
 530{
 531        return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
 532                        sizeof(struct ngbe_adapter),
 533                        eth_dev_pci_specific_init, pci_dev,
 534                        eth_ngbe_dev_init, NULL);
 535}
 536
 537static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
 538{
 539        struct rte_eth_dev *ethdev;
 540
 541        ethdev = rte_eth_dev_allocated(pci_dev->device.name);
 542        if (ethdev == NULL)
 543                return 0;
 544
 545        return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
 546}
 547
 548static struct rte_pci_driver rte_ngbe_pmd = {
 549        .id_table = pci_id_ngbe_map,
 550        .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
 551                     RTE_PCI_DRV_INTR_LSC,
 552        .probe = eth_ngbe_pci_probe,
 553        .remove = eth_ngbe_pci_remove,
 554};
 555
 556static int
 557ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 558{
 559        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 560        struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
 561        uint32_t vfta;
 562        uint32_t vid_idx;
 563        uint32_t vid_bit;
 564
 565        vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
 566        vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
 567        vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
 568        if (on)
 569                vfta |= vid_bit;
 570        else
 571                vfta &= ~vid_bit;
 572        wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
 573
 574        /* update local VFTA copy */
 575        shadow_vfta->vfta[vid_idx] = vfta;
 576
 577        return 0;
 578}
 579
 580static void
 581ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 582{
 583        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 584        struct ngbe_rx_queue *rxq;
 585        bool restart;
 586        uint32_t rxcfg, rxbal, rxbah;
 587
 588        if (on)
 589                ngbe_vlan_hw_strip_enable(dev, queue);
 590        else
 591                ngbe_vlan_hw_strip_disable(dev, queue);
 592
 593        rxq = dev->data->rx_queues[queue];
 594        rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
 595        rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
 596        rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
 597        if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 598                restart = (rxcfg & NGBE_RXCFG_ENA) &&
 599                        !(rxcfg & NGBE_RXCFG_VLAN);
 600                rxcfg |= NGBE_RXCFG_VLAN;
 601        } else {
 602                restart = (rxcfg & NGBE_RXCFG_ENA) &&
 603                        (rxcfg & NGBE_RXCFG_VLAN);
 604                rxcfg &= ~NGBE_RXCFG_VLAN;
 605        }
 606        rxcfg &= ~NGBE_RXCFG_ENA;
 607
 608        if (restart) {
 609                /* set vlan strip for ring */
 610                ngbe_dev_rx_queue_stop(dev, queue);
 611                wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
 612                wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
 613                wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
 614                ngbe_dev_rx_queue_start(dev, queue);
 615        }
 616}
 617
 618static int
 619ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
 620                    enum rte_vlan_type vlan_type,
 621                    uint16_t tpid)
 622{
 623        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 624        int ret = 0;
 625        uint32_t portctrl, vlan_ext, qinq;
 626
 627        portctrl = rd32(hw, NGBE_PORTCTL);
 628
 629        vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
 630        qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
 631        switch (vlan_type) {
 632        case RTE_ETH_VLAN_TYPE_INNER:
 633                if (vlan_ext) {
 634                        wr32m(hw, NGBE_VLANCTL,
 635                                NGBE_VLANCTL_TPID_MASK,
 636                                NGBE_VLANCTL_TPID(tpid));
 637                        wr32m(hw, NGBE_DMATXCTRL,
 638                                NGBE_DMATXCTRL_TPID_MASK,
 639                                NGBE_DMATXCTRL_TPID(tpid));
 640                } else {
 641                        ret = -ENOTSUP;
 642                        PMD_DRV_LOG(ERR,
 643                                "Inner type is not supported by single VLAN");
 644                }
 645
 646                if (qinq) {
 647                        wr32m(hw, NGBE_TAGTPID(0),
 648                                NGBE_TAGTPID_LSB_MASK,
 649                                NGBE_TAGTPID_LSB(tpid));
 650                }
 651                break;
 652        case RTE_ETH_VLAN_TYPE_OUTER:
 653                if (vlan_ext) {
 654                        /* Only the high 16-bits is valid */
 655                        wr32m(hw, NGBE_EXTAG,
 656                                NGBE_EXTAG_VLAN_MASK,
 657                                NGBE_EXTAG_VLAN(tpid));
 658                } else {
 659                        wr32m(hw, NGBE_VLANCTL,
 660                                NGBE_VLANCTL_TPID_MASK,
 661                                NGBE_VLANCTL_TPID(tpid));
 662                        wr32m(hw, NGBE_DMATXCTRL,
 663                                NGBE_DMATXCTRL_TPID_MASK,
 664                                NGBE_DMATXCTRL_TPID(tpid));
 665                }
 666
 667                if (qinq) {
 668                        wr32m(hw, NGBE_TAGTPID(0),
 669                                NGBE_TAGTPID_MSB_MASK,
 670                                NGBE_TAGTPID_MSB(tpid));
 671                }
 672                break;
 673        default:
 674                PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
 675                return -EINVAL;
 676        }
 677
 678        return ret;
 679}
 680
 681void
 682ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
 683{
 684        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 685        uint32_t vlnctrl;
 686
 687        PMD_INIT_FUNC_TRACE();
 688
 689        /* Filter Table Disable */
 690        vlnctrl = rd32(hw, NGBE_VLANCTL);
 691        vlnctrl &= ~NGBE_VLANCTL_VFE;
 692        wr32(hw, NGBE_VLANCTL, vlnctrl);
 693}
 694
 695void
 696ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 697{
 698        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 699        struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
 700        uint32_t vlnctrl;
 701        uint16_t i;
 702
 703        PMD_INIT_FUNC_TRACE();
 704
 705        /* Filter Table Enable */
 706        vlnctrl = rd32(hw, NGBE_VLANCTL);
 707        vlnctrl &= ~NGBE_VLANCTL_CFIENA;
 708        vlnctrl |= NGBE_VLANCTL_VFE;
 709        wr32(hw, NGBE_VLANCTL, vlnctrl);
 710
 711        /* write whatever is in local vfta copy */
 712        for (i = 0; i < NGBE_VFTA_SIZE; i++)
 713                wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
 714}
 715
 716void
 717ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 718{
 719        struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
 720        struct ngbe_rx_queue *rxq;
 721
 722        if (queue >= NGBE_MAX_RX_QUEUE_NUM)
 723                return;
 724
 725        if (on)
 726                NGBE_SET_HWSTRIP(hwstrip, queue);
 727        else
 728                NGBE_CLEAR_HWSTRIP(hwstrip, queue);
 729
 730        if (queue >= dev->data->nb_rx_queues)
 731                return;
 732
 733        rxq = dev->data->rx_queues[queue];
 734
 735        if (on) {
 736                rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
 737                rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 738        } else {
 739                rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
 740                rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 741        }
 742}
 743
 744static void
 745ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
 746{
 747        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 748        uint32_t ctrl;
 749
 750        PMD_INIT_FUNC_TRACE();
 751
 752        ctrl = rd32(hw, NGBE_RXCFG(queue));
 753        ctrl &= ~NGBE_RXCFG_VLAN;
 754        wr32(hw, NGBE_RXCFG(queue), ctrl);
 755
 756        /* record those setting for HW strip per queue */
 757        ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
 758}
 759
 760static void
 761ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
 762{
 763        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 764        uint32_t ctrl;
 765
 766        PMD_INIT_FUNC_TRACE();
 767
 768        ctrl = rd32(hw, NGBE_RXCFG(queue));
 769        ctrl |= NGBE_RXCFG_VLAN;
 770        wr32(hw, NGBE_RXCFG(queue), ctrl);
 771
 772        /* record those setting for HW strip per queue */
 773        ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
 774}
 775
 776static void
 777ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 778{
 779        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 780        uint32_t ctrl;
 781
 782        PMD_INIT_FUNC_TRACE();
 783
 784        ctrl = rd32(hw, NGBE_PORTCTL);
 785        ctrl &= ~NGBE_PORTCTL_VLANEXT;
 786        ctrl &= ~NGBE_PORTCTL_QINQ;
 787        wr32(hw, NGBE_PORTCTL, ctrl);
 788}
 789
 790static void
 791ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 792{
 793        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 794        uint32_t ctrl;
 795
 796        PMD_INIT_FUNC_TRACE();
 797
 798        ctrl  = rd32(hw, NGBE_PORTCTL);
 799        ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
 800        wr32(hw, NGBE_PORTCTL, ctrl);
 801}
 802
 803static void
 804ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
 805{
 806        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 807        uint32_t ctrl;
 808
 809        PMD_INIT_FUNC_TRACE();
 810
 811        ctrl = rd32(hw, NGBE_PORTCTL);
 812        ctrl &= ~NGBE_PORTCTL_QINQ;
 813        wr32(hw, NGBE_PORTCTL, ctrl);
 814}
 815
 816static void
 817ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
 818{
 819        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 820        uint32_t ctrl;
 821
 822        PMD_INIT_FUNC_TRACE();
 823
 824        ctrl  = rd32(hw, NGBE_PORTCTL);
 825        ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
 826        wr32(hw, NGBE_PORTCTL, ctrl);
 827}
 828
 829void
 830ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 831{
 832        struct ngbe_rx_queue *rxq;
 833        uint16_t i;
 834
 835        PMD_INIT_FUNC_TRACE();
 836
 837        for (i = 0; i < dev->data->nb_rx_queues; i++) {
 838                rxq = dev->data->rx_queues[i];
 839
 840                if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 841                        ngbe_vlan_hw_strip_enable(dev, i);
 842                else
 843                        ngbe_vlan_hw_strip_disable(dev, i);
 844        }
 845}
 846
 847void
 848ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
 849{
 850        uint16_t i;
 851        struct rte_eth_rxmode *rxmode;
 852        struct ngbe_rx_queue *rxq;
 853
 854        if (mask & RTE_ETH_VLAN_STRIP_MASK) {
 855                rxmode = &dev->data->dev_conf.rxmode;
 856                if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 857                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
 858                                rxq = dev->data->rx_queues[i];
 859                                rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 860                        }
 861                else
 862                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
 863                                rxq = dev->data->rx_queues[i];
 864                                rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 865                        }
 866        }
 867}
 868
 869static int
 870ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
 871{
 872        struct rte_eth_rxmode *rxmode;
 873        rxmode = &dev->data->dev_conf.rxmode;
 874
 875        if (mask & RTE_ETH_VLAN_STRIP_MASK)
 876                ngbe_vlan_hw_strip_config(dev);
 877
 878        if (mask & RTE_ETH_VLAN_FILTER_MASK) {
 879                if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 880                        ngbe_vlan_hw_filter_enable(dev);
 881                else
 882                        ngbe_vlan_hw_filter_disable(dev);
 883        }
 884
 885        if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
 886                if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 887                        ngbe_vlan_hw_extend_enable(dev);
 888                else
 889                        ngbe_vlan_hw_extend_disable(dev);
 890        }
 891
 892        if (mask & RTE_ETH_QINQ_STRIP_MASK) {
 893                if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
 894                        ngbe_qinq_hw_strip_enable(dev);
 895                else
 896                        ngbe_qinq_hw_strip_disable(dev);
 897        }
 898
 899        return 0;
 900}
 901
 902static int
 903ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 904{
 905        ngbe_config_vlan_strip_on_all_queues(dev, mask);
 906
 907        ngbe_vlan_offload_config(dev, mask);
 908
 909        return 0;
 910}
 911
 912static int
 913ngbe_dev_configure(struct rte_eth_dev *dev)
 914{
 915        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
 916        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
 917
 918        PMD_INIT_FUNC_TRACE();
 919
 920        if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 921                dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 922
 923        /* set flag to update link status after init */
 924        intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
 925
 926        /*
 927         * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
 928         * allocation Rx preconditions we will reset it.
 929         */
 930        adapter->rx_bulk_alloc_allowed = true;
 931
 932        return 0;
 933}
 934
 935static void
 936ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
 937{
 938        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 939        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
 940
 941        wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
 942        wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
 943        wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
 944        if (hw->phy.type == ngbe_phy_yt8521s_sfi)
 945                wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
 946        else
 947                wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
 948
 949        intr->mask_misc |= NGBE_ICRMISC_GPIO;
 950}
 951
 952/*
 953 * Configure device link speed and setup link.
 954 * It returns 0 on success.
 955 */
 956static int
 957ngbe_dev_start(struct rte_eth_dev *dev)
 958{
 959        struct ngbe_hw *hw = ngbe_dev_hw(dev);
 960        struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
 961        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 962        struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
 963        uint32_t intr_vector = 0;
 964        int err;
 965        bool link_up = false, negotiate = false;
 966        uint32_t speed = 0;
 967        uint32_t allowed_speeds = 0;
 968        int mask = 0;
 969        int status;
 970        uint32_t *link_speeds;
 971
 972        PMD_INIT_FUNC_TRACE();
 973
 974        /* Stop the link setup handler before resetting the HW. */
 975        rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
 976
 977        /* disable uio/vfio intr/eventfd mapping */
 978        rte_intr_disable(intr_handle);
 979
 980        /* stop adapter */
 981        hw->adapter_stopped = 0;
 982
 983        /* reinitialize adapter, this calls reset and start */
 984        hw->nb_rx_queues = dev->data->nb_rx_queues;
 985        hw->nb_tx_queues = dev->data->nb_tx_queues;
 986        status = ngbe_pf_reset_hw(hw);
 987        if (status != 0)
 988                return -1;
 989        hw->mac.start_hw(hw);
 990        hw->mac.get_link_status = true;
 991
 992        ngbe_set_pcie_master(hw, true);
 993
 994        /* configure PF module if SRIOV enabled */
 995        ngbe_pf_host_configure(dev);
 996
 997        ngbe_dev_phy_intr_setup(dev);
 998
 999        /* check and configure queue intr-vector mapping */
1000        if ((rte_intr_cap_multiple(intr_handle) ||
1001             !RTE_ETH_DEV_SRIOV(dev).active) &&
1002            dev->data->dev_conf.intr_conf.rxq != 0) {
1003                intr_vector = dev->data->nb_rx_queues;
1004                if (rte_intr_efd_enable(intr_handle, intr_vector))
1005                        return -1;
1006        }
1007
1008        if (rte_intr_dp_is_en(intr_handle)) {
1009                if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1010                                                   dev->data->nb_rx_queues)) {
1011                        PMD_INIT_LOG(ERR,
1012                                     "Failed to allocate %d rx_queues intr_vec",
1013                                     dev->data->nb_rx_queues);
1014                        return -ENOMEM;
1015                }
1016        }
1017
1018        /* configure MSI-X for sleep until Rx interrupt */
1019        ngbe_configure_msix(dev);
1020
1021        /* initialize transmission unit */
1022        ngbe_dev_tx_init(dev);
1023
1024        /* This can fail when allocating mbufs for descriptor rings */
1025        err = ngbe_dev_rx_init(dev);
1026        if (err != 0) {
1027                PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
1028                goto error;
1029        }
1030
1031        mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1032                RTE_ETH_VLAN_EXTEND_MASK;
1033        err = ngbe_vlan_offload_config(dev, mask);
1034        if (err != 0) {
1035                PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1036                goto error;
1037        }
1038
1039        hw->mac.setup_pba(hw);
1040        ngbe_configure_port(dev);
1041
1042        err = ngbe_dev_rxtx_start(dev);
1043        if (err < 0) {
1044                PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1045                goto error;
1046        }
1047
1048        /* Skip link setup if loopback mode is enabled. */
1049        if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1050                goto skip_link_setup;
1051
1052        err = hw->mac.check_link(hw, &speed, &link_up, 0);
1053        if (err != 0)
1054                goto error;
1055        dev->data->dev_link.link_status = link_up;
1056
1057        link_speeds = &dev->data->dev_conf.link_speeds;
1058        if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1059                negotiate = true;
1060
1061        err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1062        if (err != 0)
1063                goto error;
1064
1065        allowed_speeds = 0;
1066        if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1067                allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1068        if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1069                allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1070        if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1071                allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1072
1073        if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1074                PMD_INIT_LOG(ERR, "Invalid link setting");
1075                goto error;
1076        }
1077
1078        speed = 0x0;
1079        if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1080                speed = hw->mac.default_speeds;
1081        } else {
1082                if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1083                        speed |= NGBE_LINK_SPEED_1GB_FULL;
1084                if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1085                        speed |= NGBE_LINK_SPEED_100M_FULL;
1086                if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1087                        speed |= NGBE_LINK_SPEED_10M_FULL;
1088        }
1089
1090        err = hw->phy.init_hw(hw);
1091        if (err != 0) {
1092                PMD_INIT_LOG(ERR, "PHY init failed");
1093                goto error;
1094        }
1095        err = hw->mac.setup_link(hw, speed, link_up);
1096        if (err != 0)
1097                goto error;
1098
1099skip_link_setup:
1100
1101        if (rte_intr_allow_others(intr_handle)) {
1102                ngbe_dev_misc_interrupt_setup(dev);
1103                /* check if lsc interrupt is enabled */
1104                if (dev->data->dev_conf.intr_conf.lsc != 0)
1105                        ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1106                else
1107                        ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1108                ngbe_dev_macsec_interrupt_setup(dev);
1109                ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1110        } else {
1111                rte_intr_callback_unregister(intr_handle,
1112                                             ngbe_dev_interrupt_handler, dev);
1113                if (dev->data->dev_conf.intr_conf.lsc != 0)
1114                        PMD_INIT_LOG(INFO,
1115                                     "LSC won't enable because of no intr multiplex");
1116        }
1117
1118        /* check if rxq interrupt is enabled */
1119        if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1120            rte_intr_dp_is_en(intr_handle))
1121                ngbe_dev_rxq_interrupt_setup(dev);
1122
1123        /* enable UIO/VFIO intr/eventfd mapping */
1124        rte_intr_enable(intr_handle);
1125
1126        /* resume enabled intr since HW reset */
1127        ngbe_enable_intr(dev);
1128
1129        if (hw->gpio_ctl) {
1130                /* gpio0 is used to power on/off control*/
1131                wr32(hw, NGBE_GPIODATA, 0);
1132        }
1133
1134        /*
1135         * Update link status right before return, because it may
1136         * start link configuration process in a separate thread.
1137         */
1138        ngbe_dev_link_update(dev, 0);
1139
1140        ngbe_read_stats_registers(hw, hw_stats);
1141        hw->offset_loaded = 1;
1142
1143        return 0;
1144
1145error:
1146        PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1147        ngbe_dev_clear_queues(dev);
1148        return -EIO;
1149}
1150
1151/*
1152 * Stop device: disable rx and tx functions to allow for reconfiguring.
1153 */
1154static int
1155ngbe_dev_stop(struct rte_eth_dev *dev)
1156{
1157        struct rte_eth_link link;
1158        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1159        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1160        struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1161        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1162        struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1163        int vf;
1164
1165        if (hw->adapter_stopped)
1166                return 0;
1167
1168        PMD_INIT_FUNC_TRACE();
1169
1170        rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
1171
1172        if (hw->gpio_ctl) {
1173                /* gpio0 is used to power on/off control*/
1174                wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1175        }
1176
1177        /* disable interrupts */
1178        ngbe_disable_intr(hw);
1179
1180        /* reset the NIC */
1181        ngbe_pf_reset_hw(hw);
1182        hw->adapter_stopped = 0;
1183
1184        /* stop adapter */
1185        ngbe_stop_hw(hw);
1186
1187        for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1188                vfinfo[vf].clear_to_send = false;
1189
1190        hw->phy.set_phy_power(hw, false);
1191
1192        ngbe_dev_clear_queues(dev);
1193
1194        /* Clear stored conf */
1195        dev->data->scattered_rx = 0;
1196
1197        /* Clear recorded link status */
1198        memset(&link, 0, sizeof(link));
1199        rte_eth_linkstatus_set(dev, &link);
1200
1201        if (!rte_intr_allow_others(intr_handle))
1202                /* resume to the default handler */
1203                rte_intr_callback_register(intr_handle,
1204                                           ngbe_dev_interrupt_handler,
1205                                           (void *)dev);
1206
1207        /* Clean datapath event and queue/vec mapping */
1208        rte_intr_efd_disable(intr_handle);
1209        rte_intr_vec_list_free(intr_handle);
1210
1211        ngbe_set_pcie_master(hw, true);
1212
1213        adapter->rss_reta_updated = 0;
1214
1215        hw->adapter_stopped = true;
1216        dev->data->dev_started = 0;
1217
1218        return 0;
1219}
1220
1221/*
1222 * Reset and stop device.
1223 */
1224static int
1225ngbe_dev_close(struct rte_eth_dev *dev)
1226{
1227        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1228        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1229        struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1230        int retries = 0;
1231        int ret;
1232
1233        PMD_INIT_FUNC_TRACE();
1234
1235        ngbe_pf_reset_hw(hw);
1236
1237        ngbe_dev_stop(dev);
1238
1239        ngbe_dev_free_queues(dev);
1240
1241        ngbe_set_pcie_master(hw, false);
1242
1243        /* reprogram the RAR[0] in case user changed it. */
1244        ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1245
1246        /* Unlock any pending hardware semaphore */
1247        ngbe_swfw_lock_reset(hw);
1248
1249        /* disable uio intr before callback unregister */
1250        rte_intr_disable(intr_handle);
1251
1252        do {
1253                ret = rte_intr_callback_unregister(intr_handle,
1254                                ngbe_dev_interrupt_handler, dev);
1255                if (ret >= 0 || ret == -ENOENT) {
1256                        break;
1257                } else if (ret != -EAGAIN) {
1258                        PMD_INIT_LOG(ERR,
1259                                "intr callback unregister failed: %d",
1260                                ret);
1261                }
1262                rte_delay_ms(100);
1263        } while (retries++ < (10 + NGBE_LINK_UP_TIME));
1264
1265        /* uninitialize PF if max_vfs not zero */
1266        ngbe_pf_host_uninit(dev);
1267
1268        rte_free(dev->data->mac_addrs);
1269        dev->data->mac_addrs = NULL;
1270
1271        rte_free(dev->data->hash_mac_addrs);
1272        dev->data->hash_mac_addrs = NULL;
1273
1274        return ret;
1275}
1276
1277/*
1278 * Reset PF device.
1279 */
1280static int
1281ngbe_dev_reset(struct rte_eth_dev *dev)
1282{
1283        int ret;
1284
1285        /* When a DPDK PMD PF begin to reset PF port, it should notify all
1286         * its VF to make them align with it. The detailed notification
1287         * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1288         * To avoid unexpected behavior in VF, currently reset of PF with
1289         * SR-IOV activation is not supported. It might be supported later.
1290         */
1291        if (dev->data->sriov.active)
1292                return -ENOTSUP;
1293
1294        ret = eth_ngbe_dev_uninit(dev);
1295        if (ret != 0)
1296                return ret;
1297
1298        ret = eth_ngbe_dev_init(dev, NULL);
1299
1300        return ret;
1301}
1302
1303#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1304        {                                                       \
1305                uint32_t current_counter = rd32(hw, reg);       \
1306                if (current_counter < last_counter)             \
1307                        current_counter += 0x100000000LL;       \
1308                if (!hw->offset_loaded)                         \
1309                        last_counter = current_counter;         \
1310                counter = current_counter - last_counter;       \
1311                counter &= 0xFFFFFFFFLL;                        \
1312        }
1313
1314#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1315        {                                                                \
1316                uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1317                uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1318                uint64_t current_counter = (current_counter_msb << 32) | \
1319                        current_counter_lsb;                             \
1320                if (current_counter < last_counter)                      \
1321                        current_counter += 0x1000000000LL;               \
1322                if (!hw->offset_loaded)                                  \
1323                        last_counter = current_counter;                  \
1324                counter = current_counter - last_counter;                \
1325                counter &= 0xFFFFFFFFFLL;                                \
1326        }
1327
1328void
1329ngbe_read_stats_registers(struct ngbe_hw *hw,
1330                           struct ngbe_hw_stats *hw_stats)
1331{
1332        unsigned int i;
1333
1334        /* QP Stats */
1335        for (i = 0; i < hw->nb_rx_queues; i++) {
1336                UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1337                        hw->qp_last[i].rx_qp_packets,
1338                        hw_stats->qp[i].rx_qp_packets);
1339                UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1340                        hw->qp_last[i].rx_qp_bytes,
1341                        hw_stats->qp[i].rx_qp_bytes);
1342                UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1343                        hw->qp_last[i].rx_qp_mc_packets,
1344                        hw_stats->qp[i].rx_qp_mc_packets);
1345                UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1346                        hw->qp_last[i].rx_qp_bc_packets,
1347                        hw_stats->qp[i].rx_qp_bc_packets);
1348        }
1349
1350        for (i = 0; i < hw->nb_tx_queues; i++) {
1351                UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1352                        hw->qp_last[i].tx_qp_packets,
1353                        hw_stats->qp[i].tx_qp_packets);
1354                UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1355                        hw->qp_last[i].tx_qp_bytes,
1356                        hw_stats->qp[i].tx_qp_bytes);
1357                UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1358                        hw->qp_last[i].tx_qp_mc_packets,
1359                        hw_stats->qp[i].tx_qp_mc_packets);
1360                UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1361                        hw->qp_last[i].tx_qp_bc_packets,
1362                        hw_stats->qp[i].tx_qp_bc_packets);
1363        }
1364
1365        /* PB Stats */
1366        hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1367        hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1368        hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1369        hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1370        hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1371        hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1372
1373        hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1374        hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1375
1376        /* DMA Stats */
1377        hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1378        hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1379        hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1380        hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1381        hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1382        hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1383        hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1384        hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1385
1386        /* MAC Stats */
1387        hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1388        hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1389        hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1390
1391        hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1392        hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1393        hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1394
1395        hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1396        hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1397
1398        hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1399        hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1400        hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1401        hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1402        hw_stats->rx_size_512_to_1023_packets +=
1403                        rd64(hw, NGBE_MACRX512TO1023L);
1404        hw_stats->rx_size_1024_to_max_packets +=
1405                        rd64(hw, NGBE_MACRX1024TOMAXL);
1406        hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1407        hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1408        hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1409        hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1410        hw_stats->tx_size_512_to_1023_packets +=
1411                        rd64(hw, NGBE_MACTX512TO1023L);
1412        hw_stats->tx_size_1024_to_max_packets +=
1413                        rd64(hw, NGBE_MACTX1024TOMAXL);
1414
1415        hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1416        hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1417        hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1418
1419        /* MNG Stats */
1420        hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1421        hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1422        hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1423        hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1424
1425        /* MACsec Stats */
1426        hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1427        hw_stats->tx_macsec_pkts_encrypted +=
1428                        rd32(hw, NGBE_LSECTX_ENCPKT);
1429        hw_stats->tx_macsec_pkts_protected +=
1430                        rd32(hw, NGBE_LSECTX_PROTPKT);
1431        hw_stats->tx_macsec_octets_encrypted +=
1432                        rd32(hw, NGBE_LSECTX_ENCOCT);
1433        hw_stats->tx_macsec_octets_protected +=
1434                        rd32(hw, NGBE_LSECTX_PROTOCT);
1435        hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1436        hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1437        hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1438        hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1439        hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1440        hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1441        hw_stats->rx_macsec_sc_pkts_unchecked +=
1442                        rd32(hw, NGBE_LSECRX_UNCHKPKT);
1443        hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1444        hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1445        for (i = 0; i < 2; i++) {
1446                hw_stats->rx_macsec_sa_pkts_ok +=
1447                        rd32(hw, NGBE_LSECRX_OKPKT(i));
1448                hw_stats->rx_macsec_sa_pkts_invalid +=
1449                        rd32(hw, NGBE_LSECRX_INVPKT(i));
1450                hw_stats->rx_macsec_sa_pkts_notvalid +=
1451                        rd32(hw, NGBE_LSECRX_BADPKT(i));
1452        }
1453        for (i = 0; i < 4; i++) {
1454                hw_stats->rx_macsec_sa_pkts_unusedsa +=
1455                        rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1456                hw_stats->rx_macsec_sa_pkts_notusingsa +=
1457                        rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1458        }
1459        hw_stats->rx_total_missed_packets =
1460                        hw_stats->rx_up_dropped;
1461}
1462
1463static int
1464ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1465{
1466        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1467        struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1468        struct ngbe_stat_mappings *stat_mappings =
1469                        NGBE_DEV_STAT_MAPPINGS(dev);
1470        uint32_t i, j;
1471
1472        ngbe_read_stats_registers(hw, hw_stats);
1473
1474        if (stats == NULL)
1475                return -EINVAL;
1476
1477        /* Fill out the rte_eth_stats statistics structure */
1478        stats->ipackets = hw_stats->rx_packets;
1479        stats->ibytes = hw_stats->rx_bytes;
1480        stats->opackets = hw_stats->tx_packets;
1481        stats->obytes = hw_stats->tx_bytes;
1482
1483        memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1484        memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1485        memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1486        memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1487        memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1488        for (i = 0; i < NGBE_MAX_QP; i++) {
1489                uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1490                uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1491                uint32_t q_map;
1492
1493                q_map = (stat_mappings->rqsm[n] >> offset)
1494                                & QMAP_FIELD_RESERVED_BITS_MASK;
1495                j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1496                     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1497                stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1498                stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1499
1500                q_map = (stat_mappings->tqsm[n] >> offset)
1501                                & QMAP_FIELD_RESERVED_BITS_MASK;
1502                j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1503                     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1504                stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1505                stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1506        }
1507
1508        /* Rx Errors */
1509        stats->imissed  = hw_stats->rx_total_missed_packets +
1510                          hw_stats->rx_dma_drop;
1511        stats->ierrors  = hw_stats->rx_crc_errors +
1512                          hw_stats->rx_mac_short_packet_dropped +
1513                          hw_stats->rx_length_errors +
1514                          hw_stats->rx_undersize_errors +
1515                          hw_stats->rx_oversize_errors +
1516                          hw_stats->rx_illegal_byte_errors +
1517                          hw_stats->rx_error_bytes +
1518                          hw_stats->rx_fragment_errors;
1519
1520        /* Tx Errors */
1521        stats->oerrors  = 0;
1522        return 0;
1523}
1524
1525static int
1526ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1527{
1528        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1529        struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1530
1531        /* HW registers are cleared on read */
1532        hw->offset_loaded = 0;
1533        ngbe_dev_stats_get(dev, NULL);
1534        hw->offset_loaded = 1;
1535
1536        /* Reset software totals */
1537        memset(hw_stats, 0, sizeof(*hw_stats));
1538
1539        return 0;
1540}
1541
1542/* This function calculates the number of xstats based on the current config */
1543static unsigned
1544ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1545{
1546        int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1547        return NGBE_NB_HW_STATS +
1548               NGBE_NB_QP_STATS * nb_queues;
1549}
1550
1551static inline int
1552ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1553{
1554        int nb, st;
1555
1556        /* Extended stats from ngbe_hw_stats */
1557        if (id < NGBE_NB_HW_STATS) {
1558                snprintf(name, size, "[hw]%s",
1559                        rte_ngbe_stats_strings[id].name);
1560                return 0;
1561        }
1562        id -= NGBE_NB_HW_STATS;
1563
1564        /* Queue Stats */
1565        if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1566                nb = id / NGBE_NB_QP_STATS;
1567                st = id % NGBE_NB_QP_STATS;
1568                snprintf(name, size, "[q%u]%s", nb,
1569                        rte_ngbe_qp_strings[st].name);
1570                return 0;
1571        }
1572        id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1573
1574        return -(int)(id + 1);
1575}
1576
1577static inline int
1578ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1579{
1580        int nb, st;
1581
1582        /* Extended stats from ngbe_hw_stats */
1583        if (id < NGBE_NB_HW_STATS) {
1584                *offset = rte_ngbe_stats_strings[id].offset;
1585                return 0;
1586        }
1587        id -= NGBE_NB_HW_STATS;
1588
1589        /* Queue Stats */
1590        if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1591                nb = id / NGBE_NB_QP_STATS;
1592                st = id % NGBE_NB_QP_STATS;
1593                *offset = rte_ngbe_qp_strings[st].offset +
1594                        nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1595                return 0;
1596        }
1597
1598        return -1;
1599}
1600
1601static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1602        struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1603{
1604        unsigned int i, count;
1605
1606        count = ngbe_xstats_calc_num(dev);
1607        if (xstats_names == NULL)
1608                return count;
1609
1610        /* Note: limit >= cnt_stats checked upstream
1611         * in rte_eth_xstats_names()
1612         */
1613        limit = min(limit, count);
1614
1615        /* Extended stats from ngbe_hw_stats */
1616        for (i = 0; i < limit; i++) {
1617                if (ngbe_get_name_by_id(i, xstats_names[i].name,
1618                        sizeof(xstats_names[i].name))) {
1619                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1620                        break;
1621                }
1622        }
1623
1624        return i;
1625}
1626
1627static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1628        const uint64_t *ids,
1629        struct rte_eth_xstat_name *xstats_names,
1630        unsigned int limit)
1631{
1632        unsigned int i;
1633
1634        if (ids == NULL)
1635                return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1636
1637        for (i = 0; i < limit; i++) {
1638                if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1639                                sizeof(xstats_names[i].name))) {
1640                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1641                        return -1;
1642                }
1643        }
1644
1645        return i;
1646}
1647
1648static int
1649ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1650                                         unsigned int limit)
1651{
1652        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1653        struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1654        unsigned int i, count;
1655
1656        ngbe_read_stats_registers(hw, hw_stats);
1657
1658        /* If this is a reset xstats is NULL, and we have cleared the
1659         * registers by reading them.
1660         */
1661        count = ngbe_xstats_calc_num(dev);
1662        if (xstats == NULL)
1663                return count;
1664
1665        limit = min(limit, ngbe_xstats_calc_num(dev));
1666
1667        /* Extended stats from ngbe_hw_stats */
1668        for (i = 0; i < limit; i++) {
1669                uint32_t offset = 0;
1670
1671                if (ngbe_get_offset_by_id(i, &offset)) {
1672                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1673                        break;
1674                }
1675                xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1676                xstats[i].id = i;
1677        }
1678
1679        return i;
1680}
1681
1682static int
1683ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1684                                         unsigned int limit)
1685{
1686        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1687        struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1688        unsigned int i, count;
1689
1690        ngbe_read_stats_registers(hw, hw_stats);
1691
1692        /* If this is a reset xstats is NULL, and we have cleared the
1693         * registers by reading them.
1694         */
1695        count = ngbe_xstats_calc_num(dev);
1696        if (values == NULL)
1697                return count;
1698
1699        limit = min(limit, ngbe_xstats_calc_num(dev));
1700
1701        /* Extended stats from ngbe_hw_stats */
1702        for (i = 0; i < limit; i++) {
1703                uint32_t offset;
1704
1705                if (ngbe_get_offset_by_id(i, &offset)) {
1706                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1707                        break;
1708                }
1709                values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1710        }
1711
1712        return i;
1713}
1714
1715static int
1716ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1717                uint64_t *values, unsigned int limit)
1718{
1719        struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1720        unsigned int i;
1721
1722        if (ids == NULL)
1723                return ngbe_dev_xstats_get_(dev, values, limit);
1724
1725        for (i = 0; i < limit; i++) {
1726                uint32_t offset;
1727
1728                if (ngbe_get_offset_by_id(ids[i], &offset)) {
1729                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1730                        break;
1731                }
1732                values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1733        }
1734
1735        return i;
1736}
1737
1738static int
1739ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1740{
1741        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1742        struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1743
1744        /* HW registers are cleared on read */
1745        hw->offset_loaded = 0;
1746        ngbe_read_stats_registers(hw, hw_stats);
1747        hw->offset_loaded = 1;
1748
1749        /* Reset software totals */
1750        memset(hw_stats, 0, sizeof(*hw_stats));
1751
1752        return 0;
1753}
1754
1755static int
1756ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1757{
1758        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1759        int ret;
1760
1761        ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1762
1763        if (ret < 0)
1764                return -EINVAL;
1765
1766        ret += 1; /* add the size of '\0' */
1767        if (fw_size < (size_t)ret)
1768                return ret;
1769
1770        return 0;
1771}
1772
1773static int
1774ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1775{
1776        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1777        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1778
1779        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1780        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1781        dev_info->min_rx_bufsize = 1024;
1782        dev_info->max_rx_pktlen = 15872;
1783        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1784        dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1785        dev_info->max_vfs = pci_dev->max_vfs;
1786        dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1787        dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1788                                     dev_info->rx_queue_offload_capa);
1789        dev_info->tx_queue_offload_capa = 0;
1790        dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1791
1792        dev_info->default_rxconf = (struct rte_eth_rxconf) {
1793                .rx_thresh = {
1794                        .pthresh = NGBE_DEFAULT_RX_PTHRESH,
1795                        .hthresh = NGBE_DEFAULT_RX_HTHRESH,
1796                        .wthresh = NGBE_DEFAULT_RX_WTHRESH,
1797                },
1798                .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1799                .rx_drop_en = 0,
1800                .offloads = 0,
1801        };
1802
1803        dev_info->default_txconf = (struct rte_eth_txconf) {
1804                .tx_thresh = {
1805                        .pthresh = NGBE_DEFAULT_TX_PTHRESH,
1806                        .hthresh = NGBE_DEFAULT_TX_HTHRESH,
1807                        .wthresh = NGBE_DEFAULT_TX_WTHRESH,
1808                },
1809                .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1810                .offloads = 0,
1811        };
1812
1813        dev_info->rx_desc_lim = rx_desc_lim;
1814        dev_info->tx_desc_lim = tx_desc_lim;
1815
1816        dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1817        dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1818        dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1819
1820        dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1821                                RTE_ETH_LINK_SPEED_10M;
1822
1823        /* Driver-preferred Rx/Tx parameters */
1824        dev_info->default_rxportconf.burst_size = 32;
1825        dev_info->default_txportconf.burst_size = 32;
1826        dev_info->default_rxportconf.nb_queues = 1;
1827        dev_info->default_txportconf.nb_queues = 1;
1828        dev_info->default_rxportconf.ring_size = 256;
1829        dev_info->default_txportconf.ring_size = 256;
1830
1831        return 0;
1832}
1833
1834const uint32_t *
1835ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1836{
1837        if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1838            dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1839            dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1840            dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1841                return ngbe_get_supported_ptypes();
1842
1843        return NULL;
1844}
1845
1846void
1847ngbe_dev_setup_link_alarm_handler(void *param)
1848{
1849        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1850        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1851        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1852        u32 speed;
1853        bool autoneg = false;
1854
1855        speed = hw->phy.autoneg_advertised;
1856        if (!speed)
1857                hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1858
1859        hw->mac.setup_link(hw, speed, true);
1860
1861        intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1862}
1863
1864/* return 0 means link status changed, -1 means not changed */
1865int
1866ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1867                            int wait_to_complete)
1868{
1869        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1870        struct rte_eth_link link;
1871        u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1872        u32 lan_speed = 0;
1873        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1874        bool link_up;
1875        int err;
1876        int wait = 1;
1877
1878        memset(&link, 0, sizeof(link));
1879        link.link_status = RTE_ETH_LINK_DOWN;
1880        link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1881        link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1882        link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1883                        ~RTE_ETH_LINK_SPEED_AUTONEG);
1884
1885        hw->mac.get_link_status = true;
1886
1887        if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1888                return rte_eth_linkstatus_set(dev, &link);
1889
1890        /* check if it needs to wait to complete, if lsc interrupt is enabled */
1891        if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1892                wait = 0;
1893
1894        err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1895        if (err != 0) {
1896                link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1897                link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1898                return rte_eth_linkstatus_set(dev, &link);
1899        }
1900
1901        if (!link_up)
1902                return rte_eth_linkstatus_set(dev, &link);
1903
1904        intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1905        link.link_status = RTE_ETH_LINK_UP;
1906        link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1907
1908        switch (link_speed) {
1909        default:
1910        case NGBE_LINK_SPEED_UNKNOWN:
1911                link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1912                break;
1913
1914        case NGBE_LINK_SPEED_10M_FULL:
1915                link.link_speed = RTE_ETH_SPEED_NUM_10M;
1916                lan_speed = 0;
1917                break;
1918
1919        case NGBE_LINK_SPEED_100M_FULL:
1920                link.link_speed = RTE_ETH_SPEED_NUM_100M;
1921                lan_speed = 1;
1922                break;
1923
1924        case NGBE_LINK_SPEED_1GB_FULL:
1925                link.link_speed = RTE_ETH_SPEED_NUM_1G;
1926                lan_speed = 2;
1927                break;
1928        }
1929
1930        if (hw->is_pf) {
1931                wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1932                if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1933                                NGBE_LINK_SPEED_100M_FULL |
1934                                NGBE_LINK_SPEED_10M_FULL)) {
1935                        wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1936                                NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1937                }
1938        }
1939
1940        return rte_eth_linkstatus_set(dev, &link);
1941}
1942
1943static int
1944ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1945{
1946        return ngbe_dev_link_update_share(dev, wait_to_complete);
1947}
1948
1949static int
1950ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1951{
1952        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1953        uint32_t fctrl;
1954
1955        fctrl = rd32(hw, NGBE_PSRCTL);
1956        fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1957        wr32(hw, NGBE_PSRCTL, fctrl);
1958
1959        return 0;
1960}
1961
1962static int
1963ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1964{
1965        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1966        uint32_t fctrl;
1967
1968        fctrl = rd32(hw, NGBE_PSRCTL);
1969        fctrl &= (~NGBE_PSRCTL_UCP);
1970        if (dev->data->all_multicast == 1)
1971                fctrl |= NGBE_PSRCTL_MCP;
1972        else
1973                fctrl &= (~NGBE_PSRCTL_MCP);
1974        wr32(hw, NGBE_PSRCTL, fctrl);
1975
1976        return 0;
1977}
1978
1979static int
1980ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1981{
1982        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1983        uint32_t fctrl;
1984
1985        fctrl = rd32(hw, NGBE_PSRCTL);
1986        fctrl |= NGBE_PSRCTL_MCP;
1987        wr32(hw, NGBE_PSRCTL, fctrl);
1988
1989        return 0;
1990}
1991
1992static int
1993ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1994{
1995        struct ngbe_hw *hw = ngbe_dev_hw(dev);
1996        uint32_t fctrl;
1997
1998        if (dev->data->promiscuous == 1)
1999                return 0; /* must remain in all_multicast mode */
2000
2001        fctrl = rd32(hw, NGBE_PSRCTL);
2002        fctrl &= (~NGBE_PSRCTL_MCP);
2003        wr32(hw, NGBE_PSRCTL, fctrl);
2004
2005        return 0;
2006}
2007
2008/**
2009 * It clears the interrupt causes and enables the interrupt.
2010 * It will be called once only during NIC initialized.
2011 *
2012 * @param dev
2013 *  Pointer to struct rte_eth_dev.
2014 * @param on
2015 *  Enable or Disable.
2016 *
2017 * @return
2018 *  - On success, zero.
2019 *  - On failure, a negative value.
2020 */
2021static int
2022ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2023{
2024        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2025
2026        ngbe_dev_link_status_print(dev);
2027        if (on != 0) {
2028                intr->mask_misc |= NGBE_ICRMISC_PHY;
2029                intr->mask_misc |= NGBE_ICRMISC_GPIO;
2030        } else {
2031                intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2032                intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2033        }
2034
2035        return 0;
2036}
2037
2038/**
2039 * It clears the interrupt causes and enables the interrupt.
2040 * It will be called once only during NIC initialized.
2041 *
2042 * @param dev
2043 *  Pointer to struct rte_eth_dev.
2044 *
2045 * @return
2046 *  - On success, zero.
2047 *  - On failure, a negative value.
2048 */
2049static int
2050ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2051{
2052        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2053        u64 mask;
2054
2055        mask = NGBE_ICR_MASK;
2056        mask &= (1ULL << NGBE_MISC_VEC_ID);
2057        intr->mask |= mask;
2058        intr->mask_misc |= NGBE_ICRMISC_GPIO;
2059
2060        return 0;
2061}
2062
2063/**
2064 * It clears the interrupt causes and enables the interrupt.
2065 * It will be called once only during NIC initialized.
2066 *
2067 * @param dev
2068 *  Pointer to struct rte_eth_dev.
2069 *
2070 * @return
2071 *  - On success, zero.
2072 *  - On failure, a negative value.
2073 */
2074static int
2075ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2076{
2077        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2078        u64 mask;
2079
2080        mask = NGBE_ICR_MASK;
2081        mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2082        intr->mask |= mask;
2083
2084        return 0;
2085}
2086
2087/**
2088 * It clears the interrupt causes and enables the interrupt.
2089 * It will be called once only during NIC initialized.
2090 *
2091 * @param dev
2092 *  Pointer to struct rte_eth_dev.
2093 *
2094 * @return
2095 *  - On success, zero.
2096 *  - On failure, a negative value.
2097 */
2098static int
2099ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2100{
2101        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2102
2103        intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2104
2105        return 0;
2106}
2107
2108/*
2109 * It reads ICR and sets flag for the link_update.
2110 *
2111 * @param dev
2112 *  Pointer to struct rte_eth_dev.
2113 *
2114 * @return
2115 *  - On success, zero.
2116 *  - On failure, a negative value.
2117 */
2118static int
2119ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2120{
2121        uint32_t eicr;
2122        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2123        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2124
2125        /* read-on-clear nic registers here */
2126        eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2127        PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2128
2129        intr->flags = 0;
2130
2131        /* set flag for async link update */
2132        if (eicr & NGBE_ICRMISC_PHY)
2133                intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2134
2135        if (eicr & NGBE_ICRMISC_VFMBX)
2136                intr->flags |= NGBE_FLAG_MAILBOX;
2137
2138        if (eicr & NGBE_ICRMISC_LNKSEC)
2139                intr->flags |= NGBE_FLAG_MACSEC;
2140
2141        if (eicr & NGBE_ICRMISC_GPIO)
2142                intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2143
2144        ((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2145
2146        return 0;
2147}
2148
2149/**
2150 * It gets and then prints the link status.
2151 *
2152 * @param dev
2153 *  Pointer to struct rte_eth_dev.
2154 *
2155 * @return
2156 *  - On success, zero.
2157 *  - On failure, a negative value.
2158 */
2159static void
2160ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2161{
2162        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2163        struct rte_eth_link link;
2164
2165        rte_eth_linkstatus_get(dev, &link);
2166
2167        if (link.link_status == RTE_ETH_LINK_UP) {
2168                PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2169                                        (int)(dev->data->port_id),
2170                                        (unsigned int)link.link_speed,
2171                        link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2172                                        "full-duplex" : "half-duplex");
2173        } else {
2174                PMD_INIT_LOG(INFO, " Port %d: Link Down",
2175                                (int)(dev->data->port_id));
2176        }
2177        PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2178                                pci_dev->addr.domain,
2179                                pci_dev->addr.bus,
2180                                pci_dev->addr.devid,
2181                                pci_dev->addr.function);
2182}
2183
2184/*
2185 * It executes link_update after knowing an interrupt occurred.
2186 *
2187 * @param dev
2188 *  Pointer to struct rte_eth_dev.
2189 *
2190 * @return
2191 *  - On success, zero.
2192 *  - On failure, a negative value.
2193 */
2194static int
2195ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2196{
2197        struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2198
2199        PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2200
2201        if (intr->flags & NGBE_FLAG_MAILBOX) {
2202                ngbe_pf_mbx_process(dev);
2203                intr->flags &= ~NGBE_FLAG_MAILBOX;
2204        }
2205
2206        if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2207                struct rte_eth_link link;
2208
2209                /*get the link status before link update, for predicting later*/
2210                rte_eth_linkstatus_get(dev, &link);
2211
2212                ngbe_dev_link_update(dev, 0);
2213                intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2214                ngbe_dev_link_status_print(dev);
2215                if (dev->data->dev_link.link_speed != link.link_speed)
2216                        rte_eth_dev_callback_process(dev,
2217                                RTE_ETH_EVENT_INTR_LSC, NULL);
2218        }
2219
2220        PMD_DRV_LOG(DEBUG, "enable intr immediately");
2221        ngbe_enable_intr(dev);
2222
2223        return 0;
2224}
2225
2226/**
2227 * Interrupt handler triggered by NIC  for handling
2228 * specific interrupt.
2229 *
2230 * @param param
2231 *  The address of parameter (struct rte_eth_dev *) registered before.
2232 */
2233static void
2234ngbe_dev_interrupt_handler(void *param)
2235{
2236        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2237
2238        ngbe_dev_interrupt_get_status(dev);
2239        ngbe_dev_interrupt_action(dev);
2240}
2241
2242static int
2243ngbe_dev_led_on(struct rte_eth_dev *dev)
2244{
2245        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2246        return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2247}
2248
2249static int
2250ngbe_dev_led_off(struct rte_eth_dev *dev)
2251{
2252        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2253        return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2254}
2255
2256static int
2257ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2258{
2259        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2260        uint32_t mflcn_reg;
2261        uint32_t fccfg_reg;
2262        int rx_pause;
2263        int tx_pause;
2264
2265        fc_conf->pause_time = hw->fc.pause_time;
2266        fc_conf->high_water = hw->fc.high_water;
2267        fc_conf->low_water = hw->fc.low_water;
2268        fc_conf->send_xon = hw->fc.send_xon;
2269        fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2270
2271        /*
2272         * Return rx_pause status according to actual setting of
2273         * RXFCCFG register.
2274         */
2275        mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2276        if (mflcn_reg & NGBE_RXFCCFG_FC)
2277                rx_pause = 1;
2278        else
2279                rx_pause = 0;
2280
2281        /*
2282         * Return tx_pause status according to actual setting of
2283         * TXFCCFG register.
2284         */
2285        fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2286        if (fccfg_reg & NGBE_TXFCCFG_FC)
2287                tx_pause = 1;
2288        else
2289                tx_pause = 0;
2290
2291        if (rx_pause && tx_pause)
2292                fc_conf->mode = RTE_ETH_FC_FULL;
2293        else if (rx_pause)
2294                fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2295        else if (tx_pause)
2296                fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2297        else
2298                fc_conf->mode = RTE_ETH_FC_NONE;
2299
2300        return 0;
2301}
2302
2303static int
2304ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2305{
2306        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2307        int err;
2308        uint32_t rx_buf_size;
2309        uint32_t max_high_water;
2310        enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2311                ngbe_fc_none,
2312                ngbe_fc_rx_pause,
2313                ngbe_fc_tx_pause,
2314                ngbe_fc_full
2315        };
2316
2317        PMD_INIT_FUNC_TRACE();
2318
2319        rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2320        PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2321
2322        /*
2323         * At least reserve one Ethernet frame for watermark
2324         * high_water/low_water in kilo bytes for ngbe
2325         */
2326        max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2327        if (fc_conf->high_water > max_high_water ||
2328            fc_conf->high_water < fc_conf->low_water) {
2329                PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2330                PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2331                return -EINVAL;
2332        }
2333
2334        hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2335        hw->fc.pause_time     = fc_conf->pause_time;
2336        hw->fc.high_water     = fc_conf->high_water;
2337        hw->fc.low_water      = fc_conf->low_water;
2338        hw->fc.send_xon       = fc_conf->send_xon;
2339        hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2340
2341        err = hw->mac.fc_enable(hw);
2342
2343        /* Not negotiated is not an error case */
2344        if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2345                wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2346                      (fc_conf->mac_ctrl_frame_fwd
2347                       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2348                ngbe_flush(hw);
2349
2350                return 0;
2351        }
2352
2353        PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2354        return -EIO;
2355}
2356
2357int
2358ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2359                          struct rte_eth_rss_reta_entry64 *reta_conf,
2360                          uint16_t reta_size)
2361{
2362        uint8_t i, j, mask;
2363        uint32_t reta;
2364        uint16_t idx, shift;
2365        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2366        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2367
2368        PMD_INIT_FUNC_TRACE();
2369
2370        if (!hw->is_pf) {
2371                PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2372                        "NIC.");
2373                return -ENOTSUP;
2374        }
2375
2376        if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2377                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2378                        "(%d) doesn't match the number hardware can supported "
2379                        "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2380                return -EINVAL;
2381        }
2382
2383        for (i = 0; i < reta_size; i += 4) {
2384                idx = i / RTE_ETH_RETA_GROUP_SIZE;
2385                shift = i % RTE_ETH_RETA_GROUP_SIZE;
2386                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2387                if (!mask)
2388                        continue;
2389
2390                reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2391                for (j = 0; j < 4; j++) {
2392                        if (RS8(mask, j, 0x1)) {
2393                                reta  &= ~(MS32(8 * j, 0xFF));
2394                                reta |= LS32(reta_conf[idx].reta[shift + j],
2395                                                8 * j, 0xFF);
2396                        }
2397                }
2398                wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2399        }
2400        adapter->rss_reta_updated = 1;
2401
2402        return 0;
2403}
2404
2405int
2406ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2407                         struct rte_eth_rss_reta_entry64 *reta_conf,
2408                         uint16_t reta_size)
2409{
2410        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2411        uint8_t i, j, mask;
2412        uint32_t reta;
2413        uint16_t idx, shift;
2414
2415        PMD_INIT_FUNC_TRACE();
2416
2417        if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2418                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2419                        "(%d) doesn't match the number hardware can supported "
2420                        "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2421                return -EINVAL;
2422        }
2423
2424        for (i = 0; i < reta_size; i += 4) {
2425                idx = i / RTE_ETH_RETA_GROUP_SIZE;
2426                shift = i % RTE_ETH_RETA_GROUP_SIZE;
2427                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2428                if (!mask)
2429                        continue;
2430
2431                reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2432                for (j = 0; j < 4; j++) {
2433                        if (RS8(mask, j, 0x1))
2434                                reta_conf[idx].reta[shift + j] =
2435                                        (uint16_t)RS32(reta, 8 * j, 0xFF);
2436                }
2437        }
2438
2439        return 0;
2440}
2441
2442static int
2443ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2444                                uint32_t index, uint32_t pool)
2445{
2446        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2447        uint32_t enable_addr = 1;
2448
2449        return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2450                             pool, enable_addr);
2451}
2452
2453static void
2454ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2455{
2456        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2457
2458        ngbe_clear_rar(hw, index);
2459}
2460
2461static int
2462ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2463{
2464        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2465
2466        ngbe_remove_rar(dev, 0);
2467        ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2468
2469        return 0;
2470}
2471
2472static int
2473ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2474{
2475        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2476        uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2477        struct rte_eth_dev_data *dev_data = dev->data;
2478
2479        /* If device is started, refuse mtu that requires the support of
2480         * scattered packets when this feature has not been enabled before.
2481         */
2482        if (dev_data->dev_started && !dev_data->scattered_rx &&
2483            (frame_size + 2 * RTE_VLAN_HLEN >
2484             dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2485                PMD_INIT_LOG(ERR, "Stop port first.");
2486                return -EINVAL;
2487        }
2488
2489        if (hw->mode)
2490                wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2491                        NGBE_FRAME_SIZE_MAX);
2492        else
2493                wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2494                        NGBE_FRMSZ_MAX(frame_size));
2495
2496        return 0;
2497}
2498
2499static uint32_t
2500ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2501{
2502        uint32_t vector = 0;
2503
2504        switch (hw->mac.mc_filter_type) {
2505        case 0:   /* use bits [47:36] of the address */
2506                vector = ((uc_addr->addr_bytes[4] >> 4) |
2507                        (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2508                break;
2509        case 1:   /* use bits [46:35] of the address */
2510                vector = ((uc_addr->addr_bytes[4] >> 3) |
2511                        (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2512                break;
2513        case 2:   /* use bits [45:34] of the address */
2514                vector = ((uc_addr->addr_bytes[4] >> 2) |
2515                        (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2516                break;
2517        case 3:   /* use bits [43:32] of the address */
2518                vector = ((uc_addr->addr_bytes[4]) |
2519                        (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2520                break;
2521        default:  /* Invalid mc_filter_type */
2522                break;
2523        }
2524
2525        /* vector can only be 12-bits or boundary will be exceeded */
2526        vector &= 0xFFF;
2527        return vector;
2528}
2529
2530static int
2531ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2532                        struct rte_ether_addr *mac_addr, uint8_t on)
2533{
2534        uint32_t vector;
2535        uint32_t uta_idx;
2536        uint32_t reg_val;
2537        uint32_t uta_mask;
2538        uint32_t psrctl;
2539
2540        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2541        struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2542
2543        vector = ngbe_uta_vector(hw, mac_addr);
2544        uta_idx = (vector >> 5) & 0x7F;
2545        uta_mask = 0x1UL << (vector & 0x1F);
2546
2547        if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2548                return 0;
2549
2550        reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2551        if (on) {
2552                uta_info->uta_in_use++;
2553                reg_val |= uta_mask;
2554                uta_info->uta_shadow[uta_idx] |= uta_mask;
2555        } else {
2556                uta_info->uta_in_use--;
2557                reg_val &= ~uta_mask;
2558                uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2559        }
2560
2561        wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2562
2563        psrctl = rd32(hw, NGBE_PSRCTL);
2564        if (uta_info->uta_in_use > 0)
2565                psrctl |= NGBE_PSRCTL_UCHFENA;
2566        else
2567                psrctl &= ~NGBE_PSRCTL_UCHFENA;
2568
2569        psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2570        psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2571        wr32(hw, NGBE_PSRCTL, psrctl);
2572
2573        return 0;
2574}
2575
2576static int
2577ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2578{
2579        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2580        struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2581        uint32_t psrctl;
2582        int i;
2583
2584        if (on) {
2585                for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2586                        uta_info->uta_shadow[i] = ~0;
2587                        wr32(hw, NGBE_UCADDRTBL(i), ~0);
2588                }
2589        } else {
2590                for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2591                        uta_info->uta_shadow[i] = 0;
2592                        wr32(hw, NGBE_UCADDRTBL(i), 0);
2593                }
2594        }
2595
2596        psrctl = rd32(hw, NGBE_PSRCTL);
2597        if (on)
2598                psrctl |= NGBE_PSRCTL_UCHFENA;
2599        else
2600                psrctl &= ~NGBE_PSRCTL_UCHFENA;
2601
2602        psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2603        psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2604        wr32(hw, NGBE_PSRCTL, psrctl);
2605
2606        return 0;
2607}
2608
2609/**
2610 * Set the IVAR registers, mapping interrupt causes to vectors
2611 * @param hw
2612 *  pointer to ngbe_hw struct
2613 * @direction
2614 *  0 for Rx, 1 for Tx, -1 for other causes
2615 * @queue
2616 *  queue to map the corresponding interrupt to
2617 * @msix_vector
2618 *  the vector to map to the corresponding queue
2619 */
2620void
2621ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2622                   uint8_t queue, uint8_t msix_vector)
2623{
2624        uint32_t tmp, idx;
2625
2626        if (direction == -1) {
2627                /* other causes */
2628                msix_vector |= NGBE_IVARMISC_VLD;
2629                idx = 0;
2630                tmp = rd32(hw, NGBE_IVARMISC);
2631                tmp &= ~(0xFF << idx);
2632                tmp |= (msix_vector << idx);
2633                wr32(hw, NGBE_IVARMISC, tmp);
2634        } else {
2635                /* rx or tx causes */
2636                /* Workaround for ICR lost */
2637                idx = ((16 * (queue & 1)) + (8 * direction));
2638                tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2639                tmp &= ~(0xFF << idx);
2640                tmp |= (msix_vector << idx);
2641                wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2642        }
2643}
2644
2645/**
2646 * Sets up the hardware to properly generate MSI-X interrupts
2647 * @hw
2648 *  board private structure
2649 */
2650static void
2651ngbe_configure_msix(struct rte_eth_dev *dev)
2652{
2653        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2654        struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2655        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2656        uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2657        uint32_t vec = NGBE_MISC_VEC_ID;
2658        uint32_t gpie;
2659
2660        /*
2661         * Won't configure MSI-X register if no mapping is done
2662         * between intr vector and event fd
2663         * but if MSI-X has been enabled already, need to configure
2664         * auto clean, auto mask and throttling.
2665         */
2666        gpie = rd32(hw, NGBE_GPIE);
2667        if (!rte_intr_dp_is_en(intr_handle) &&
2668            !(gpie & NGBE_GPIE_MSIX))
2669                return;
2670
2671        if (rte_intr_allow_others(intr_handle)) {
2672                base = NGBE_RX_VEC_START;
2673                vec = base;
2674        }
2675
2676        /* setup GPIE for MSI-X mode */
2677        gpie = rd32(hw, NGBE_GPIE);
2678        gpie |= NGBE_GPIE_MSIX;
2679        wr32(hw, NGBE_GPIE, gpie);
2680
2681        /* Populate the IVAR table and set the ITR values to the
2682         * corresponding register.
2683         */
2684        if (rte_intr_dp_is_en(intr_handle)) {
2685                for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2686                        queue_id++) {
2687                        /* by default, 1:1 mapping */
2688                        ngbe_set_ivar_map(hw, 0, queue_id, vec);
2689                        rte_intr_vec_list_index_set(intr_handle,
2690                                                           queue_id, vec);
2691                        if (vec < base + rte_intr_nb_efd_get(intr_handle)
2692                            - 1)
2693                                vec++;
2694                }
2695
2696                ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2697        }
2698        wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2699                        NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2700                        | NGBE_ITR_WRDSA);
2701}
2702
2703static u8 *
2704ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2705                        u8 **mc_addr_ptr, u32 *vmdq)
2706{
2707        u8 *mc_addr;
2708
2709        *vmdq = 0;
2710        mc_addr = *mc_addr_ptr;
2711        *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2712        return mc_addr;
2713}
2714
2715int
2716ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2717                          struct rte_ether_addr *mc_addr_set,
2718                          uint32_t nb_mc_addr)
2719{
2720        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2721        u8 *mc_addr_list;
2722
2723        mc_addr_list = (u8 *)mc_addr_set;
2724        return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2725                                         ngbe_dev_addr_list_itr, TRUE);
2726}
2727
2728static uint64_t
2729ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2730{
2731        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2732        uint64_t systime_cycles;
2733
2734        systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2735        systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2736
2737        return systime_cycles;
2738}
2739
2740static uint64_t
2741ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2742{
2743        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2744        uint64_t rx_tstamp_cycles;
2745
2746        /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2747        rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2748        rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2749
2750        return rx_tstamp_cycles;
2751}
2752
2753static uint64_t
2754ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2755{
2756        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2757        uint64_t tx_tstamp_cycles;
2758
2759        /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2760        tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2761        tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2762
2763        return tx_tstamp_cycles;
2764}
2765
2766static void
2767ngbe_start_timecounters(struct rte_eth_dev *dev)
2768{
2769        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2770        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2771        uint32_t incval = 0;
2772        uint32_t shift = 0;
2773
2774        incval = NGBE_INCVAL_1GB;
2775        shift = NGBE_INCVAL_SHIFT_1GB;
2776
2777        wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2778
2779        memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2780        memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2781        memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2782
2783        adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2784        adapter->systime_tc.cc_shift = shift;
2785        adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2786
2787        adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2788        adapter->rx_tstamp_tc.cc_shift = shift;
2789        adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2790
2791        adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2792        adapter->tx_tstamp_tc.cc_shift = shift;
2793        adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2794}
2795
2796static int
2797ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2798{
2799        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2800
2801        adapter->systime_tc.nsec += delta;
2802        adapter->rx_tstamp_tc.nsec += delta;
2803        adapter->tx_tstamp_tc.nsec += delta;
2804
2805        return 0;
2806}
2807
2808static int
2809ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2810{
2811        uint64_t ns;
2812        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2813
2814        ns = rte_timespec_to_ns(ts);
2815        /* Set the timecounters to a new value. */
2816        adapter->systime_tc.nsec = ns;
2817        adapter->rx_tstamp_tc.nsec = ns;
2818        adapter->tx_tstamp_tc.nsec = ns;
2819
2820        return 0;
2821}
2822
2823static int
2824ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2825{
2826        uint64_t ns, systime_cycles;
2827        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2828
2829        systime_cycles = ngbe_read_systime_cyclecounter(dev);
2830        ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2831        *ts = rte_ns_to_timespec(ns);
2832
2833        return 0;
2834}
2835
2836static int
2837ngbe_timesync_enable(struct rte_eth_dev *dev)
2838{
2839        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2840        uint32_t tsync_ctl;
2841
2842        /* Stop the timesync system time. */
2843        wr32(hw, NGBE_TSTIMEINC, 0x0);
2844        /* Reset the timesync system time value. */
2845        wr32(hw, NGBE_TSTIMEL, 0x0);
2846        wr32(hw, NGBE_TSTIMEH, 0x0);
2847
2848        ngbe_start_timecounters(dev);
2849
2850        /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2851        wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2852                RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2853
2854        /* Enable timestamping of received PTP packets. */
2855        tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2856        tsync_ctl |= NGBE_TSRXCTL_ENA;
2857        wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2858
2859        /* Enable timestamping of transmitted PTP packets. */
2860        tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2861        tsync_ctl |= NGBE_TSTXCTL_ENA;
2862        wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2863
2864        ngbe_flush(hw);
2865
2866        return 0;
2867}
2868
2869static int
2870ngbe_timesync_disable(struct rte_eth_dev *dev)
2871{
2872        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2873        uint32_t tsync_ctl;
2874
2875        /* Disable timestamping of transmitted PTP packets. */
2876        tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2877        tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2878        wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2879
2880        /* Disable timestamping of received PTP packets. */
2881        tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2882        tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2883        wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2884
2885        /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2886        wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2887
2888        /* Stop incrementing the System Time registers. */
2889        wr32(hw, NGBE_TSTIMEINC, 0);
2890
2891        return 0;
2892}
2893
2894static int
2895ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2896                                 struct timespec *timestamp,
2897                                 uint32_t flags __rte_unused)
2898{
2899        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2900        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2901        uint32_t tsync_rxctl;
2902        uint64_t rx_tstamp_cycles;
2903        uint64_t ns;
2904
2905        tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2906        if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2907                return -EINVAL;
2908
2909        rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2910        ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2911        *timestamp = rte_ns_to_timespec(ns);
2912
2913        return  0;
2914}
2915
2916static int
2917ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2918                                 struct timespec *timestamp)
2919{
2920        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2921        struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2922        uint32_t tsync_txctl;
2923        uint64_t tx_tstamp_cycles;
2924        uint64_t ns;
2925
2926        tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2927        if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2928                return -EINVAL;
2929
2930        tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2931        ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2932        *timestamp = rte_ns_to_timespec(ns);
2933
2934        return 0;
2935}
2936
2937static int
2938ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2939{
2940        int count = 0;
2941        int g_ind = 0;
2942        const struct reg_info *reg_group;
2943        const struct reg_info **reg_set = ngbe_regs_others;
2944
2945        while ((reg_group = reg_set[g_ind++]))
2946                count += ngbe_regs_group_count(reg_group);
2947
2948        return count;
2949}
2950
2951static int
2952ngbe_get_regs(struct rte_eth_dev *dev,
2953              struct rte_dev_reg_info *regs)
2954{
2955        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2956        uint32_t *data = regs->data;
2957        int g_ind = 0;
2958        int count = 0;
2959        const struct reg_info *reg_group;
2960        const struct reg_info **reg_set = ngbe_regs_others;
2961
2962        if (data == NULL) {
2963                regs->length = ngbe_get_reg_length(dev);
2964                regs->width = sizeof(uint32_t);
2965                return 0;
2966        }
2967
2968        /* Support only full register dump */
2969        if (regs->length == 0 ||
2970            regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2971                regs->version = hw->mac.type << 24 |
2972                                hw->revision_id << 16 |
2973                                hw->device_id;
2974                while ((reg_group = reg_set[g_ind++]))
2975                        count += ngbe_read_regs_group(dev, &data[count],
2976                                                      reg_group);
2977                return 0;
2978        }
2979
2980        return -ENOTSUP;
2981}
2982
2983static int
2984ngbe_get_eeprom_length(struct rte_eth_dev *dev)
2985{
2986        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2987
2988        /* Return unit is byte count */
2989        return hw->rom.word_size * 2;
2990}
2991
2992static int
2993ngbe_get_eeprom(struct rte_eth_dev *dev,
2994                struct rte_dev_eeprom_info *in_eeprom)
2995{
2996        struct ngbe_hw *hw = ngbe_dev_hw(dev);
2997        struct ngbe_rom_info *eeprom = &hw->rom;
2998        uint16_t *data = in_eeprom->data;
2999        int first, length;
3000
3001        first = in_eeprom->offset >> 1;
3002        length = in_eeprom->length >> 1;
3003        if (first > hw->rom.word_size ||
3004            ((first + length) > hw->rom.word_size))
3005                return -EINVAL;
3006
3007        in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3008
3009        return eeprom->readw_buffer(hw, first, length, data);
3010}
3011
3012static int
3013ngbe_set_eeprom(struct rte_eth_dev *dev,
3014                struct rte_dev_eeprom_info *in_eeprom)
3015{
3016        struct ngbe_hw *hw = ngbe_dev_hw(dev);
3017        struct ngbe_rom_info *eeprom = &hw->rom;
3018        uint16_t *data = in_eeprom->data;
3019        int first, length;
3020
3021        first = in_eeprom->offset >> 1;
3022        length = in_eeprom->length >> 1;
3023        if (first > hw->rom.word_size ||
3024            ((first + length) > hw->rom.word_size))
3025                return -EINVAL;
3026
3027        in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3028
3029        return eeprom->writew_buffer(hw,  first, length, data);
3030}
3031
3032static const struct eth_dev_ops ngbe_eth_dev_ops = {
3033        .dev_configure              = ngbe_dev_configure,
3034        .dev_infos_get              = ngbe_dev_info_get,
3035        .dev_start                  = ngbe_dev_start,
3036        .dev_stop                   = ngbe_dev_stop,
3037        .dev_close                  = ngbe_dev_close,
3038        .dev_reset                  = ngbe_dev_reset,
3039        .promiscuous_enable         = ngbe_dev_promiscuous_enable,
3040        .promiscuous_disable        = ngbe_dev_promiscuous_disable,
3041        .allmulticast_enable        = ngbe_dev_allmulticast_enable,
3042        .allmulticast_disable       = ngbe_dev_allmulticast_disable,
3043        .link_update                = ngbe_dev_link_update,
3044        .stats_get                  = ngbe_dev_stats_get,
3045        .xstats_get                 = ngbe_dev_xstats_get,
3046        .xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3047        .stats_reset                = ngbe_dev_stats_reset,
3048        .xstats_reset               = ngbe_dev_xstats_reset,
3049        .xstats_get_names           = ngbe_dev_xstats_get_names,
3050        .xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3051        .fw_version_get             = ngbe_fw_version_get,
3052        .dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3053        .mtu_set                    = ngbe_dev_mtu_set,
3054        .vlan_filter_set            = ngbe_vlan_filter_set,
3055        .vlan_tpid_set              = ngbe_vlan_tpid_set,
3056        .vlan_offload_set           = ngbe_vlan_offload_set,
3057        .vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3058        .rx_queue_start             = ngbe_dev_rx_queue_start,
3059        .rx_queue_stop              = ngbe_dev_rx_queue_stop,
3060        .tx_queue_start             = ngbe_dev_tx_queue_start,
3061        .tx_queue_stop              = ngbe_dev_tx_queue_stop,
3062        .rx_queue_setup             = ngbe_dev_rx_queue_setup,
3063        .rx_queue_release           = ngbe_dev_rx_queue_release,
3064        .tx_queue_setup             = ngbe_dev_tx_queue_setup,
3065        .tx_queue_release           = ngbe_dev_tx_queue_release,
3066        .dev_led_on                 = ngbe_dev_led_on,
3067        .dev_led_off                = ngbe_dev_led_off,
3068        .flow_ctrl_get              = ngbe_flow_ctrl_get,
3069        .flow_ctrl_set              = ngbe_flow_ctrl_set,
3070        .mac_addr_add               = ngbe_add_rar,
3071        .mac_addr_remove            = ngbe_remove_rar,
3072        .mac_addr_set               = ngbe_set_default_mac_addr,
3073        .uc_hash_table_set          = ngbe_uc_hash_table_set,
3074        .uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3075        .reta_update                = ngbe_dev_rss_reta_update,
3076        .reta_query                 = ngbe_dev_rss_reta_query,
3077        .rss_hash_update            = ngbe_dev_rss_hash_update,
3078        .rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3079        .set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3080        .rxq_info_get               = ngbe_rxq_info_get,
3081        .txq_info_get               = ngbe_txq_info_get,
3082        .rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3083        .tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3084        .timesync_enable            = ngbe_timesync_enable,
3085        .timesync_disable           = ngbe_timesync_disable,
3086        .timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3087        .timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3088        .get_reg                    = ngbe_get_regs,
3089        .get_eeprom_length          = ngbe_get_eeprom_length,
3090        .get_eeprom                 = ngbe_get_eeprom,
3091        .set_eeprom                 = ngbe_set_eeprom,
3092        .timesync_adjust_time       = ngbe_timesync_adjust_time,
3093        .timesync_read_time         = ngbe_timesync_read_time,
3094        .timesync_write_time        = ngbe_timesync_write_time,
3095        .tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3096};
3097
3098RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3099RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3100RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3101
3102RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3103RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3104
3105#ifdef RTE_ETHDEV_DEBUG_RX
3106        RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3107#endif
3108#ifdef RTE_ETHDEV_DEBUG_TX
3109        RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3110#endif
3111